blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6991dc207815ab48ae46791891d445b78c2359a0 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_recentest.py | f93941f4b13cefdd2333686e329e9b615cf9de18 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _RECENTEST():
def __init__(self,):
self.name = "RECENTEST"
self.definitions = recent
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['recent']
| [
"[email protected]"
] | |
0582f2cb7055ba6e5d8133b7421aa813af80556e | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/benchmarks/sieve-323.py | 156204efa4971bd39565e35b25809298e6391b87 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,581 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = $Exp[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
# Data
v:Vector = None
i:int = 0
# Crunch
v = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
fb574b9eb168faba9a20e817021d5efb507b4117 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /lib/python2.7/site-packages/conda_manager/widgets/table.py | ee4d3b59ef65d8779dacf64dfa73c5fa745a9e83 | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 26,817 | py | # -*- coding:utf-8 -*-
#
# Copyright © 2015 The Spyder Development Team
# Copyright © 2014 Gonzalo Peña-Castellanos (@goanpeca)
#
# Licensed under the terms of the MIT License
"""
"""
from __future__ import (division, print_function, unicode_literals,
with_statement)
# Standard library imports
import gettext
# Third party imports
from qtpy import PYQT5
from qtpy.QtCore import Qt, QPoint, QSize, QUrl, Signal, QEvent
from qtpy.QtGui import QColor, QDesktopServices, QIcon, QPen, QBrush
from qtpy.QtWidgets import (QAbstractItemView, QItemDelegate, QMenu,
QTableView)
# Local imports
from conda_manager.models.filter import MultiColumnSortFilterProxy
from conda_manager.models.packages import CondaPackagesModel
from conda_manager.utils import get_image_path
from conda_manager.utils import constants as const
from conda_manager.utils.py3compat import to_text_string
from conda_manager.utils.qthelpers import add_actions, create_action
_ = gettext.gettext
HIDE_COLUMNS = [const.COL_STATUS, const.COL_URL, const.COL_LICENSE,
const.COL_REMOVE, const.COL_ACTION_VERSION]
class CustomDelegate(QItemDelegate):
def paint(self, painter, option, index):
QItemDelegate.paint(self, painter, option, index)
column = index.column()
row = index.row()
rect = option.rect
# Draw borders
pen = QPen()
pen.setWidth(1)
pen.setColor(QColor('#cdcdcd'))
painter.setPen(pen)
painter.drawLine(rect.topLeft(), rect.topRight())
if (row == self.current_hover_row() or row == self.current_row() and
(self.has_focus_or_context())):
brush = QBrush(Qt.SolidPattern)
brush.setColor(QColor(255, 255, 255, 100))
painter.fillRect(rect, brush)
if row == self.current_row() and column in [const.COL_START]:
pen = QPen()
pen.setWidth(10)
pen.setColor(QColor('#7cbb4c'))
painter.setPen(pen)
dyt = QPoint(0, 5)
dyb = QPoint(0, 4)
painter.drawLine(rect.bottomLeft()-dyb, rect.topLeft()+dyt)
def sizeHint(self, style, model_index):
column = model_index.column()
if column in [const.COL_PACKAGE_TYPE] + [const.ACTION_COLUMNS,
const.COL_PACKAGE_TYPE]:
return QSize(24, 24)
else:
return QItemDelegate.sizeHint(self, style, model_index)
class TableCondaPackages(QTableView):
""" """
WIDTH_TYPE = 24
WIDTH_NAME = 120
WIDTH_ACTIONS = 24
WIDTH_VERSION = 90
sig_status_updated = Signal(str, bool, list, bool)
sig_conda_action_requested = Signal(str, int, str, object, object)
sig_pip_action_requested = Signal(str, int)
sig_actions_updated = Signal(int)
sig_next_focus = Signal()
sig_previous_focus = Signal()
def __init__(self, parent):
super(TableCondaPackages, self).__init__(parent)
self._parent = parent
self._searchbox = u''
self._filterbox = const.ALL
self._delegate = CustomDelegate(self)
self.row_count = None
self._advanced_mode = True
self._current_hover_row = None
self._menu = None
self._palette = {}
# To manage icon states
self._model_index_clicked = None
self.valid = False
self.column_ = None
self.current_index = None
# To prevent triggering the keyrelease after closing a dialog
# but hititng enter on it
self.pressed_here = False
self.source_model = None
self.proxy_model = None
self.setSelectionBehavior(QAbstractItemView.SelectRows)
# self.setSelectionBehavior(QAbstractItemView.NoSelection)
# self.setSelectionMode(QAbstractItemView.SingleSelection)
self.setSelectionMode(QAbstractItemView.NoSelection)
self.verticalHeader().hide()
self.setSortingEnabled(True)
self.setMouseTracking(True)
# self.setAlternatingRowColors(True)
self._delegate.current_row = self.current_row
self._delegate.current_hover_row = self.current_hover_row
self._delegate.update_index = self.update
self._delegate.has_focus_or_context = self.has_focus_or_context
self.setItemDelegate(self._delegate)
self.setShowGrid(False)
self.setWordWrap(True)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
# Header setup
self._hheader = self.horizontalHeader()
if PYQT5:
self._hheader.setSectionResizeMode(self._hheader.Fixed)
else:
self._hheader.setResizeMode(self._hheader.Fixed)
self._hheader.setStyleSheet("""QHeaderView {border: 0px;
border-radius: 0px;};
""")
self.sortByColumn(const.COL_NAME, Qt.AscendingOrder)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.hide_columns()
def setup_model(self, packages, data, metadata_links={}):
""" """
self.proxy_model = MultiColumnSortFilterProxy(self)
self.source_model = CondaPackagesModel(self, packages, data)
self.proxy_model.setSourceModel(self.source_model)
self.setModel(self.proxy_model)
self.metadata_links = metadata_links
# FIXME: packages sizes... move to a better place?
packages_sizes = {}
for name in packages:
packages_sizes[name] = packages[name].get('size')
self._packages_sizes = packages_sizes
# Custom Proxy Model setup
self.proxy_model.setDynamicSortFilter(True)
filter_text = \
(lambda row, text, status: (
all([t in row[const.COL_NAME].lower() for t in
to_text_string(text).lower().split()]) or
all([t in row[const.COL_DESCRIPTION].lower() for t in
to_text_string(text).split()])))
filter_status = (lambda row, text, status:
to_text_string(row[const.COL_STATUS]) in
to_text_string(status))
self.model().add_filter_function('status-search', filter_status)
self.model().add_filter_function('text-search', filter_text)
# Signals and slots
self.verticalScrollBar().valueChanged.connect(self.resize_rows)
self.hide_columns()
self.resize_rows()
self.refresh_actions()
self.source_model.update_style_palette(self._palette)
def update_style_palette(self, palette={}):
self._palette = palette
def resize_rows(self):
""" """
delta_y = 10
height = self.height()
y = 0
while y < height:
row = self.rowAt(y)
self.resizeRowToContents(row)
row_height = self.rowHeight(row)
self.setRowHeight(row, row_height + delta_y)
y += self.rowHeight(row) + delta_y
def hide_columns(self):
""" """
for col in const.COLUMNS:
self.showColumn(col)
hide = HIDE_COLUMNS
if self._advanced_mode:
columns = const.ACTION_COLUMNS[:]
columns.remove(const.COL_ACTION)
hide += columns
else:
hide += [const.COL_ACTION]
for col in hide:
self.hideColumn(col)
def filter_changed(self):
"""Trigger the filter"""
group = self._filterbox
text = self._searchbox
if group in [const.ALL]:
group = ''.join([to_text_string(const.INSTALLED),
to_text_string(const.UPGRADABLE),
to_text_string(const.NOT_INSTALLED),
to_text_string(const.DOWNGRADABLE),
to_text_string(const.MIXGRADABLE)])
elif group in [const.INSTALLED]:
group = ''.join([to_text_string(const.INSTALLED),
to_text_string(const.UPGRADABLE),
to_text_string(const.DOWNGRADABLE),
to_text_string(const.MIXGRADABLE)])
elif group in [const.UPGRADABLE]:
group = ''.join([to_text_string(const.UPGRADABLE),
to_text_string(const.MIXGRADABLE)])
elif group in [const.DOWNGRADABLE]:
group = ''.join([to_text_string(const.DOWNGRADABLE),
to_text_string(const.MIXGRADABLE)])
else:
group = to_text_string(group)
if self.proxy_model is not None:
self.proxy_model.set_filter(text, group)
self.resize_rows()
# Update label count
count = self.verticalHeader().count()
if count == 0:
count_text = _("0 packages available ")
elif count == 1:
count_text = _("1 package available ")
elif count > 1:
count_text = to_text_string(count) + _(" packages available ")
if text != '':
count_text = count_text + _('matching "{0}"').format(text)
self.sig_status_updated.emit(count_text, False, [0, 0], True)
def search_string_changed(self, text):
""" """
text = to_text_string(text)
self._searchbox = text
self.filter_changed()
def filter_status_changed(self, text):
""" """
if text not in const.PACKAGE_STATUS:
text = const.PACKAGE_STATUS[text]
for key in const.COMBOBOX_VALUES:
val = const.COMBOBOX_VALUES[key]
if to_text_string(val) == to_text_string(text):
group = val
break
self._filterbox = group
self.filter_changed()
def resizeEvent(self, event):
"""Override Qt method"""
w = self.width()
width_start = 20
width_end = width_start
if self._advanced_mode:
action_cols = [const.COL_ACTION]
else:
action_cols = [const.COL_UPGRADE, const.COL_INSTALL,
const.COL_REMOVE, const.COL_DOWNGRADE]
self.setColumnWidth(const.COL_START, width_start)
self.setColumnWidth(const.COL_PACKAGE_TYPE, self.WIDTH_TYPE)
self.setColumnWidth(const.COL_NAME, self.WIDTH_NAME)
self.setColumnWidth(const.COL_VERSION, self.WIDTH_VERSION)
w_new = w - (width_start + self.WIDTH_ACTIONS + self.WIDTH_TYPE +
self.WIDTH_NAME + self.WIDTH_VERSION +
(len(action_cols))*self.WIDTH_ACTIONS + width_end)
self.setColumnWidth(const.COL_DESCRIPTION, w_new)
self.setColumnWidth(const.COL_END, width_end)
for col in action_cols:
self.setColumnWidth(col, self.WIDTH_ACTIONS)
QTableView.resizeEvent(self, event)
self.resize_rows()
def update_visible_rows(self):
current_index = self.currentIndex()
row = current_index.row()
if self.proxy_model:
for r in range(row - 50, row + 50):
for co in const.COLUMNS:
index = self.proxy_model.index(r, co)
self.update(index)
self.resize_rows()
def current_row(self):
if self._menu and self._menu.isVisible():
return self.currentIndex().row()
elif self.hasFocus():
return self.currentIndex().row()
else:
return -1
def current_hover_row(self):
return self._current_hover_row
def has_focus_or_context(self):
return self.hasFocus() or (self._menu and self._menu.isVisible())
def mouseMoveEvent(self, event):
super(TableCondaPackages, self).mouseMoveEvent(event)
pos = event.pos()
self._current_hover_row = self.rowAt(pos.y())
def leaveEvent(self, event):
super(TableCondaPackages, self).leaveEvent(event)
self._current_hover_row = None
def keyPressEvent(self, event):
"""
Override Qt method.
"""
index = self.currentIndex()
key = event.key()
if key in [Qt.Key_Enter, Qt.Key_Return]:
# self.action_pressed(index)
self.setCurrentIndex(self.proxy_model.index(index.row(),
const.COL_ACTION))
self.pressed_here = True
elif key in [Qt.Key_Tab]:
new_row = index.row() + 1
if not self.proxy_model or new_row == self.proxy_model.rowCount():
self.sig_next_focus.emit()
else:
new_index = self.proxy_model.index(new_row, 0)
self.setCurrentIndex(new_index)
elif key in [Qt.Key_Backtab]:
new_row = index.row() - 1
if new_row < 0:
self.sig_previous_focus.emit()
else:
new_index = self.proxy_model.index(new_row, 0)
self.setCurrentIndex(new_index)
else:
QTableView.keyPressEvent(self, event)
self.update_visible_rows()
def keyReleaseEvent(self, event):
"""Override Qt method"""
QTableView.keyReleaseEvent(self, event)
key = event.key()
index = self.currentIndex()
if key in [Qt.Key_Enter, Qt.Key_Return] and self.pressed_here:
self.context_menu_requested(event)
# self.action_released()
elif key in [Qt.Key_Menu]:
self.setCurrentIndex(self.proxy_model.index(index.row(),
const.COL_ACTION))
self.context_menu_requested(event, right_click=True)
self.pressed_here = False
self.update_visible_rows()
def mousePressEvent(self, event):
"""Override Qt method"""
QTableView.mousePressEvent(self, event)
self.current_index = self.currentIndex()
column = self.current_index.column()
if event.button() == Qt.LeftButton and column == const.COL_ACTION:
pos = QPoint(event.x(), event.y())
index = self.indexAt(pos)
self.action_pressed(index)
self.context_menu_requested(event)
elif event.button() == Qt.RightButton:
self.context_menu_requested(event, right_click=True)
self.update_visible_rows()
def mouseReleaseEvent(self, event):
"""Override Qt method"""
if event.button() == Qt.LeftButton:
self.action_released()
self.update_visible_rows()
def action_pressed(self, index):
"""
DEPRECATED
"""
column = index.column()
if self.proxy_model is not None:
model_index = self.proxy_model.mapToSource(index)
model = self.source_model
self._model_index_clicked = model_index
self.valid = True
if (column == const.COL_INSTALL and
model.is_installable(model_index)):
model.update_row_icon(model_index.row(), const.COL_INSTALL)
elif (column == const.COL_INSTALL and
model.is_removable(model_index)):
model.update_row_icon(model_index.row(), const.COL_REMOVE)
elif ((column == const.COL_UPGRADE and
model.is_upgradable(model_index)) or
(column == const.COL_DOWNGRADE and
model.is_downgradable(model_index))):
model.update_row_icon(model_index.row(), model_index.column())
else:
self._model_index_clicked = None
self.valid = False
def action_released(self):
"""
DEPRECATED
"""
model = self.source_model
model_index = self._model_index_clicked
actions = {const.COL_INSTALL: const.ACTION_INSTALL,
const.COL_REMOVE: const.ACTION_REMOVE,
const.COL_UPGRADE: const.ACTION_UPGRADE,
const.COL_DOWNGRADE: const.ACTION_DOWNGRADE,
}
if model_index:
column = model_index.column()
if column == const.COL_INSTALL and model.is_removable(model_index):
column = const.COL_REMOVE
self.source_model.update_row_icon(model_index.row(), column)
if self.valid:
row_data = self.source_model.row(model_index.row())
type_ = row_data[const.COL_PACKAGE_TYPE]
name = row_data[const.COL_NAME]
version = self.source_model.get_package_version(name)
versions = self.source_model.get_package_versions(name)
if not versions:
versions = [version]
action = actions.get(column, None)
if type_ == const.CONDA_PACKAGE:
self.sig_conda_action_requested.emit(name, action, version,
versions,
self._packages_sizes)
elif type_ == const.PIP_PACKAGE:
self.sig_pip_action_requested.emit(name, action)
else:
pass
def set_advanced_mode(self, value=True):
self._advanced_mode = value
# self.resizeEvent(None)
def set_action_status(self, model_index, status=const.ACTION_NONE,
version=None):
self.source_model.set_action_status(model_index, status, version)
self.refresh_actions()
def context_menu_requested(self, event, right_click=False):
"""
Custom context menu.
"""
if self.proxy_model is None:
return
self._menu = QMenu(self)
index = self.currentIndex()
model_index = self.proxy_model.mapToSource(index)
row_data = self.source_model.row(model_index.row())
column = model_index.column()
name = row_data[const.COL_NAME]
# package_type = row_data[const.COL_PACKAGE_TYPE]
versions = self.source_model.get_package_versions(name)
current_version = self.source_model.get_package_version(name)
# if column in [const.COL_ACTION, const.COL_VERSION, const.COL_NAME]:
if column in [const.COL_ACTION] and not right_click:
is_installable = self.source_model.is_installable(model_index)
is_removable = self.source_model.is_removable(model_index)
is_upgradable = self.source_model.is_upgradable(model_index)
action_status = self.source_model.action_status(model_index)
actions = []
action_unmark = create_action(
self,
_('Unmark'),
triggered=lambda: self.set_action_status(model_index,
const.ACTION_NONE,
current_version))
action_install = create_action(
self,
_('Mark for installation'),
triggered=lambda: self.set_action_status(model_index,
const.ACTION_INSTALL,
versions[-1]))
action_upgrade = create_action(
self,
_('Mark for upgrade'),
triggered=lambda: self.set_action_status(model_index,
const.ACTION_UPGRADE,
versions[-1]))
action_remove = create_action(
self,
_('Mark for removal'),
triggered=lambda: self.set_action_status(model_index,
const.ACTION_REMOVE,
current_version))
version_actions = []
for version in reversed(versions):
def trigger(model_index=model_index,
action=const.ACTION_INSTALL,
version=version):
return lambda: self.set_action_status(model_index,
status=action,
version=version)
if version == current_version:
version_action = create_action(
self,
version,
icon=QIcon(),
triggered=trigger(model_index,
const.ACTION_INSTALL,
version))
if not is_installable:
version_action.setCheckable(True)
version_action.setChecked(True)
version_action.setDisabled(True)
elif version != current_version:
if ((version in versions and versions.index(version)) >
(current_version in versions and
versions.index(current_version))):
upgrade_or_downgrade_action = const.ACTION_UPGRADE
else:
upgrade_or_downgrade_action = const.ACTION_DOWNGRADE
if is_installable:
upgrade_or_downgrade_action = const.ACTION_INSTALL
version_action = create_action(
self,
version,
icon=QIcon(),
triggered=trigger(model_index,
upgrade_or_downgrade_action,
version))
version_actions.append(version_action)
install_versions_menu = QMenu('Mark for specific version '
'installation', self)
add_actions(install_versions_menu, version_actions)
actions = [action_unmark, action_install, action_upgrade,
action_remove]
actions += [None, install_versions_menu]
install_versions_menu.setEnabled(len(version_actions) > 1)
if action_status is const.ACTION_NONE:
action_unmark.setDisabled(True)
action_install.setDisabled(not is_installable)
action_upgrade.setDisabled(not is_upgradable)
action_remove.setDisabled(not is_removable)
install_versions_menu.setDisabled(False)
else:
action_unmark.setDisabled(False)
action_install.setDisabled(True)
action_upgrade.setDisabled(True)
action_remove.setDisabled(True)
install_versions_menu.setDisabled(True)
elif right_click:
license_ = row_data[const.COL_LICENSE]
metadata = self.metadata_links.get(name, {})
pypi = metadata.get('pypi', '')
home = metadata.get('home', '')
dev = metadata.get('dev', '')
docs = metadata.get('docs', '')
q_pypi = QIcon(get_image_path('python.png'))
q_home = QIcon(get_image_path('home.png'))
q_docs = QIcon(get_image_path('conda_docs.png'))
if 'git' in dev:
q_dev = QIcon(get_image_path('conda_github.png'))
elif 'bitbucket' in dev:
q_dev = QIcon(get_image_path('conda_bitbucket.png'))
else:
q_dev = QIcon()
if 'mit' in license_.lower():
lic = 'http://opensource.org/licenses/MIT'
elif 'bsd' == license_.lower():
lic = 'http://opensource.org/licenses/BSD-3-Clause'
else:
lic = None
actions = []
if license_ != '':
actions.append(create_action(self, _('License: ' + license_),
icon=QIcon(), triggered=lambda:
self.open_url(lic)))
actions.append(None)
if pypi != '':
actions.append(create_action(self, _('Python Package Index'),
icon=q_pypi, triggered=lambda:
self.open_url(pypi)))
if home != '':
actions.append(create_action(self, _('Homepage'),
icon=q_home, triggered=lambda:
self.open_url(home)))
if docs != '':
actions.append(create_action(self, _('Documentation'),
icon=q_docs, triggered=lambda:
self.open_url(docs)))
if dev != '':
actions.append(create_action(self, _('Development'),
icon=q_dev, triggered=lambda:
self.open_url(dev)))
if actions and len(actions) > 1:
# self._menu = QMenu(self)
add_actions(self._menu, actions)
if event.type() == QEvent.KeyRelease:
rect = self.visualRect(index)
global_pos = self.viewport().mapToGlobal(rect.bottomRight())
else:
pos = QPoint(event.x(), event.y())
global_pos = self.viewport().mapToGlobal(pos)
self._menu.popup(global_pos)
def get_actions(self):
if self.source_model:
return self.source_model.get_actions()
def clear_actions(self):
index = self.currentIndex()
if self.source_model:
self.source_model.clear_actions()
self.refresh_actions()
self.setFocus()
self.setCurrentIndex(index)
def refresh_actions(self):
if self.source_model:
actions_per_package_type = self.source_model.get_actions()
number_of_actions = 0
for type_ in actions_per_package_type:
actions = actions_per_package_type[type_]
for key in actions:
data = actions[key]
number_of_actions += len(data)
self.sig_actions_updated.emit(number_of_actions)
def open_url(self, url):
"""
Open link from action in default operating system browser.
"""
if url is None:
return
QDesktopServices.openUrl(QUrl(url))
| [
"[email protected]"
] | |
143a2b435ce857dbd0475e94aef1e9bf00d0afb5 | 19236d9e966cf5bafbe5479d613a175211e1dd37 | /cohesity_management_sdk/models/role_update.py | 733ad80c72ec30c51a96e5f47fa441e5e2415f91 | [
"MIT"
] | permissive | hemanshu-cohesity/management-sdk-python | 236c44fbd9604809027f8ddd0ae6c36e4e727615 | 07c5adee58810979780679065250d82b4b2cdaab | refs/heads/master | 2020-04-29T23:22:08.909550 | 2019-04-10T02:42:16 | 2019-04-10T02:42:16 | 176,474,523 | 0 | 0 | NOASSERTION | 2019-03-19T09:27:14 | 2019-03-19T09:27:12 | null | UTF-8 | Python | false | false | 1,628 | py | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class RoleUpdate(object):
"""Implementation of the 'Role Update.' model.
Specifies parameters required to update a role.
Attributes:
description (string): Specifies a description about the role.
privileges (list of string): Array of Privileges. Specifies the list
of privileges to assign to the role.
"""
# Create a mapping from Model property names to API property names
_names = {
"description":'description',
"privileges":'privileges'
}
def __init__(self,
description=None,
privileges=None):
"""Constructor for the RoleUpdate class"""
# Initialize members of the class
self.description = description
self.privileges = privileges
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
description = dictionary.get('description')
privileges = dictionary.get('privileges')
# Return an object of this model
return cls(description,
privileges)
| [
"[email protected]"
] | |
24fee577c01fbd41cd14296cf06baa7ff9dd8694 | 76e62ddbfdfba19c80b37e855a4df67672ef0808 | /IVTa/2014/ALEKSEEV_I_S/task_12_50.py | ab19443ceba4961fb2b12b4689e1e91a89688863 | [
"Apache-2.0"
] | permissive | stasvorosh/pythonintask | 9d30f3cd492e89783b7221402375c1ebe4690baa | 8169ed26510022fe0d589f4013f11749131957df | refs/heads/master | 2021-01-17T16:49:32.778063 | 2016-10-10T14:08:04 | 2016-10-10T14:08:04 | 52,255,539 | 6 | 0 | null | 2016-02-22T07:33:16 | 2016-02-22T07:33:15 | null | UTF-8 | Python | false | false | 1,942 | py | # Задача 12. Вариант 50
# Разработайте игру "Крестики-нолики".
#(см. М.Доусон Программируем на Python гл. 6).
# Alekseev I.S.
# 20.05.2016
board = list(range(1,10))
def draw_board(board):
print("-------------")
for i in range(3):
print( "|", board[0+i*3], "|", board[1+i*3], "|", board[2+i*3], "|")
print( "-------------")
def take_input(player_token):
valid = False
while not valid:
player_answer = input("Куда поставим " + player_token+"? ")
try:
player_answer = int(player_answer)
except:
print( "Некорректный ввод. Вы уверены, что ввели число?")
continue
if player_answer >= 1 and player_answer <= 9:
if (str(board[player_answer-1]) not in "XO"):
board[player_answer-1] = player_token
valid = True
else:
print( "Эта клеточка уже занята")
else:
print( "Некорректный ввод. Введите число от 1 до 9 чтобы походить.")
def check_win(board):
win_coord = ((0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6))
for each in win_coord:
if board[each[0]] == board[each[1]] == board[each[2]]:
return board[each[0]]
return False
def main(board):
counter = 0
win = False
while not win:
draw_board(board)
if counter % 2 == 0:
take_input("X")
else:
take_input("O")
counter += 1
if counter > 4:
tmp = check_win(board)
if tmp:
print( tmp, "выиграл!")
win = True
break
if counter == 9:
print( "Ничья!")
break
draw_board(board)
main(board)
| [
"[email protected]"
] | |
5a39161daf1b0158febc6f1084a130433c1c9944 | bffd457e17dc250c81d7bd9e25c20a483f1a1ed5 | /pandatools/demo_hw.py | 609861d6783d01829e0f12b67428d2acf0459b39 | [] | no_license | Harzva/gigavision | 3121f183b8cfc66f9c89f4afe1bd0bdf1c1fe104 | 1fb1ad9b9d5aac6c18dc83184a52c484964be7fb | refs/heads/master | 2023-02-20T10:34:21.735085 | 2021-01-24T10:07:30 | 2021-01-24T10:07:30 | 332,416,570 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,892 | py |
import json
import cv2
import numpy as np
import os
# basepath="/root/data/gvision/dataset/train_all_annos/s0.3_t0.7_all"
# load_path="/root/data/gvision/dataset/output/my_pv_train/my_inference/coco_pv_inference_results.json"
# load_path_coco="/root/data/gvision/dataset/predict/s0.5_t0.8_141517/image_annos/person_bbox_test_141517_split.json"
""" "14_OCT_Habour_IMG_14_01___0.5__1408__3072.jpg": {
"image size": {
"height": 2049,
"width": 1025
},
"image id": 18
{
"file_name": "14_OCT_Habour_IMG_14_01___0.5__704__1024.jpg",
"height": 2049,
"width": 1025,
"id": 9
},
"""
# aaas=os.listdir("/root/data/rubzz/ruby/ruby_output3/split_train_person_panda_fafaxue_3category/img")
# for i in aaas:
# print(os.path.join("/root/data/rubzz/ruby/ruby_output3/split_train_person_panda_fafaxue_3category/img",i))
# im=cv2.imread(os.path.join("/root/data/rubzz/ruby/ruby_output3/split_train_person_panda_fafaxue_3category/img",i))
# print(im.shape)
# load_path="/root/data/rubzz/ruby/ruby_output2/train_person_unsure_cell/train_person_unsure_cell_3category.json"
# print(im.shape)
# with open(load_path,'r') as load_f:
# dataset_dicts = json.load(load_f)
# # print(dataset_dicts[0:100])
# with open(load_path_coco,'r') as load_path_coco:
# coco_dataset_dicts = json.load(load_path_coco)
# for coco_images_dict in coco_dataset_dicts["images"]:
# print(coco_images_dict["id"])
# for images_dict in dataset_dicts["images"]:
# if coco_images_dict["id"]==images_dict["id"]:
# h,w=images_dict["height"],images_dict["width"]
# coco_images_dict["height"]=h
# coco_images_dict["width"]=w
# with open(output_path, 'w') as load_f:
# COCO_dataset_dicts= json.dumps(coco_dataset_dicts,indent=2)
# load_f.write(COCO_dataset_dicts)
# with open("/root/data/gvision/dataset/train_all_annos/s0.3_t0.7_all/image_annos/coco_vehicle_train_hwnoi.json",'r') as load_f:
# dataset_dicts = json.load(load_f)
# print(len(dataset_dicts["annotations"]))
# # print(dataset_dicts)#1,2
# print("type",type(dataset_dicts))
"""
450558 coco_person_train_hwnoi.json
483276 coco_pv_train_bbox_hwnoi.json coco_pv_train_hwnoi.json
32718 coco_vehicle_train_bbox_hwnoi.json coco_vehicle_train_hwnoi
"""
def coco_hw(load_path_coco,save_path):
with open(load_path_coco,'r') as load_path_coco:
coco_dataset_dicts = json.load(load_path_coco)
f=open(save_path,'w')
for images_dict in coco_dataset_dicts["images"]:
imagename=images_dict["file_name"]
print(imagename)
height,width=cv2.imread(os.path.join("/root/data/rubzz/ruby/ruby_output3/split_train_person_panda_fafaxue_3category/img",imagename)).shape[0:2]
images_dict['height'] =height
images_dict['width'] = width
f.write(json.dumps(coco_dataset_dicts,indent=2))
coco_hw(load_path_coco="/root/data/rubzz/ruby/ruby_output3/split_train_person_panda_fafaxue_3category/split_train_person_panda_fafaxue_3category.json",
save_path="/root/data/rubzz/ruby/ruby_output3/split_train_person_panda_fafaxue_3category/split_train_person_panda_fafaxue_3category_hw.json")
# class MyEncoder(json.JSONEncoder):
# def default(self, obj):
# if isinstance(obj, np.integer):
# return int(obj)
# elif isinstance(obj, np.floating):
# return float(obj)
# elif isinstance(obj, np.ndarray):
# return obj.tolist()
# else:
# return super(MyEncoder, self).default(obj)
# load_path_coco="/root/data/gvision/dataset/d2_output/my_pv_mask/metrics.json"
# # target="/root/data/gvision/dataset/d2_output/my_pv_mask/my_predict/predict_all_0500.json"
# with open(load_path_coco,'r') as load_path_coco:
# result_list= json.load(load_path_coco)
# print(result_list)
# f=open(target,'w')
# f.write(json.dumps(result_list[0:500],cls=MyEncoder))
# a=[]
# for result_dict in result_list:
# result_dict.pop('segmentation')
# a.append(result_dict)
# f=open(target,'w')
# f.write(json.dumps(a,cls=MyEncoder))
# a=np.load("/root/data/gvision/dataset/d2_output/my_pv_mask/model_final_indexedresults.npy",allow_pickle=True)
# print(len(a))
# print(os.path.getsize("/root/data/gvision/dataset/d2_output/my_pv_mask/model_final_indexedresults.npy"))
# load_path_coco="/root/data/gvision/dataset/d2_output/my_pv_center_mask/metrics_18499.json"
# import json
# data = []
# a=[0,0]
# f=open(load_path_coco, 'r', encoding="utf-8")
# # 读取所有行 每行会是一个字符串
# loss=10
# for line,j in enumerate(f.readlines()):
# j = json.loads(j)
# if j["total_loss"]<loss:
# loss=j["total_loss"]
# a[0]=line+1
# # print(line)
# # print(loss)
# a[1]=loss
# print(a)
# img=cv2.imread("/root/data/gvision/panda_tools/panda-imgae-test.png")
# img18=img[0:238,0:423,:]
# img14=img[0:238,423:423*2,:]
# img17=img[0:238,423*2:423*3,:]
# print(img14.shape,img14.shape,img14.shape)
# cv2.imwrite("/root/data/gvision/panda_tools/test18.png",img18)
# cv2.imwrite("/root/data/gvision/panda_tools/test14.png",img14)
# cv2.imwrite("/root/data/gvision/panda_tools/test17.png",img17)
# img18=cv2.resize(img18,(423*50,238*50),interpolation=cv2.INTER_CUBIC)
# img14=cv2.resize(img14,(423*50,238*50),interpolation=cv2.INTER_CUBIC)
# img17=cv2.resize(img17,(423*50,238*50),interpolation=cv2.INTER_CUBIC)
# cv2.imwrite("/root/data/gvision/panda_tools/test_18.png",img18)
# cv2.imwrite("/root/data/gvision/panda_tools/test_14.png",img14,[int(cv2.IMWRITE_PNG_COMPRESSION), 9])
# cv2.imwrite("/root/data/gvision/panda_tools/test_17.png",img17,[int(cv2.IMWRITE_PNG_COMPRESSION), 9])
# import numpy as np
# a=[[1,2,3,4],[1,2,3,4],[1,2,3,4]]
# b=[1]
# c=[b,b,b,b]
# [old+new for old,new in zip(a,c)]
# print([old+new for old,new in zip(a,c)])
# print([1176.27, 637.9455, 1412.9817, 1139.9287] +[0.7856537]) | [
"[email protected]"
] | |
d74a7b8a8e2ea23d2b2855097c8f985640ed438f | 2fac796fa58c67fb5a4a95a6e7f28cbef169318b | /python/copy-books.py | 2e512fdb8dea53102e37f2f032524d38ae208b24 | [] | no_license | jwyx3/practices | f3fe087432e79c8e34f3af3a78dd10278b66dd38 | 6fec95b9b4d735727160905e754a698513bfb7d8 | refs/heads/master | 2021-03-12T20:41:59.816448 | 2019-04-14T06:47:30 | 2019-04-14T06:47:30 | 18,814,777 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | class Solution:
# @param pages: a list of integers
# @param k: an integer
# @return: an integer
def copyBooks(self, pages, k):
# can k persons copy books within x minutes
def check(x):
total, kt = 0, 0
for p in pages:
# current person cannot copy any more
# add one more person
if total + p > x:
kt += 1
total = 0
total += p
return (kt + (0 if total == 0 else 1)) <= k
# no books
if not pages:
return 0
# has books but no person
if pages and k <= 0:
return -1
left, right = 0, 0
for p in pages:
# the time of book with max pages
left = max(left, p)
# the total time to copy books for one person
right += p
while left + 1 < right:
mid = left + (right - left) / 2
if check(mid):
right = mid
else:
left = mid + 1
if check(left):
return left
return right
class Solution:
# @param pages: a list of integers
# @param k: an integer
# @return: an integer
def copyBooks(self, pages, k):
# no book
if not pages:
return 0
# invalid
if pages and k <= 0:
return -1
start, end = max(pages), sum(pages)
while start + 1 < end:
mid = start + (end - start) / 2
# If mid is ok, then all x > mid is ok
if self.check(pages, k, mid):
end = mid
else:
start = mid
if self.check(pages, k, start):
return start
return end
# @param t: time used to copy book
# return: boolean, whether all books can be copied within t
@staticmethod
def check(pages, k, t):
total, k_tmp = 0, 0
for page in pages:
# this one can not read any more,
# add one more person
if total + page > t:
k_tmp += 1
total = 0
total += page
if total > 0:
k_tmp += 1
return k_tmp <= k
| [
"[email protected]"
] | |
bc665f583a10f65731382232186ceba08c47f3bc | 7a21986fecf560d6ce9e4b29391112a891039416 | /tests/test_routing_api.py | ee5c21cf6b42437f971e410a099908df1a8ba4d1 | [
"MIT"
] | permissive | HTenkanen/HerePy | 8048d96406ea966e769e81fb06dc96502a2dd3c7 | d20690b79f70e90faaecd59307c9c66bb355e9da | refs/heads/master | 2020-06-03T22:54:58.430817 | 2019-06-13T14:16:16 | 2019-06-13T14:16:16 | 191,765,172 | 0 | 0 | null | 2019-06-13T13:08:47 | 2019-06-13T13:08:47 | null | UTF-8 | Python | false | false | 11,305 | py | #!/usr/bin/env python
import os
import sys
import unittest
import responses
import codecs
import herepy
class RoutingApiTest(unittest.TestCase):
def setUp(self):
api = herepy.RoutingApi('app_id', 'app_code')
self._api = api
def test_initiation(self):
self.assertIsInstance(self._api, herepy.RoutingApi)
self.assertEqual(self._api._app_id, 'app_id')
self.assertEqual(self._api._app_code, 'app_code')
self.assertEqual(self._api._base_url, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json')
@responses.activate
def test_carroute_whensucceed(self):
with codecs.open('testdata/models/routing.json', mode='r', encoding='utf-8') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.car_route([11.0, 12.0], [22.0, 23.0], [herepy.RouteMode.car, herepy.RouteMode.fastest])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_carroute_withdefaultmodes_whensucceed(self):
with codecs.open('testdata/models/routing.json', mode='r', encoding='utf-8') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.car_route([11.0, 12.0], [22.0, 23.0])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_carroute_whenerroroccured(self):
with open('testdata/models/routing_error.json', 'r') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.car_route([11.0, 12.0], [22.0, 23.0], [herepy.RouteMode.pedestrian, herepy.RouteMode.fastest])
self.assertIsInstance(response, herepy.HEREError)
@responses.activate
def test_pedastrianroute_whensucceed(self):
with codecs.open('testdata/models/routing.json', mode='r', encoding='utf-8') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.pedastrian_route([11.0, 12.0], [22.0, 23.0], [herepy.RouteMode.pedestrian, herepy.RouteMode.fastest])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_pedastrianroute_withdefaultmodes_whensucceed(self):
with codecs.open('testdata/models/routing.json', mode='r', encoding='utf-8') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.pedastrian_route([11.0, 12.0], [22.0, 23.0])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_pedastrianroute_whenerroroccured(self):
with open('testdata/models/routing_error.json', 'r') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.pedastrian_route([11.0, 12.0], [22.0, 23.0], [herepy.RouteMode.car, herepy.RouteMode.fastest])
self.assertIsInstance(response, herepy.HEREError)
@responses.activate
def test_intermediateroute_whensucceed(self):
with codecs.open('testdata/models/routing.json', mode='r', encoding='utf-8') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.intermediate_route([11.0, 12.0], [15.0, 16.0], [22.0, 23.0], [herepy.RouteMode.car, herepy.RouteMode.fastest])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_intermediateroute_withdefaultmodes_whensucceed(self):
with codecs.open('testdata/models/routing.json', mode='r', encoding='utf-8') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.intermediate_route([11.0, 12.0], [15.0, 16.0], [22.0, 23.0])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_intermediateroute_whenerroroccured(self):
with open('testdata/models/routing_error.json', 'r') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.intermediate_route([11.0, 12.0], [15.0, 16.0], [22.0, 23.0], [herepy.RouteMode.car, herepy.RouteMode.fastest])
self.assertIsInstance(response, herepy.HEREError)
@responses.activate
def test_publictransport_whensucceed(self):
with codecs.open('testdata/models/routing.json', mode='r', encoding='utf-8') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.public_transport([11.0, 12.0],
[15.0, 16.0],
True,
[herepy.RouteMode.publicTransport, herepy.RouteMode.fastest])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_publictransport_withdefaultmodes_whensucceed(self):
with codecs.open('testdata/models/routing.json', mode='r', encoding='utf-8') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.public_transport([11.0, 12.0],
[15.0, 16.0],
True)
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_publictransport_whenerroroccured(self):
with open('testdata/models/routing_error.json', 'r') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.public_transport([11.0, 12.0],
[15.0, 16.0],
True,
[herepy.RouteMode.car, herepy.RouteMode.fastest])
self.assertIsInstance(response, herepy.HEREError)
@responses.activate
def test_locationnearmotorway_whensucceed(self):
with codecs.open('testdata/models/routing.json', mode='r', encoding='utf-8') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.location_near_motorway([11.0, 12.0],
[22.0, 23.0],
[herepy.RouteMode.car, herepy.RouteMode.fastest])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_locationnearmotorway_withdefaultmodes_whensucceed(self):
with codecs.open('testdata/models/routing.json', mode='r', encoding='utf-8') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.location_near_motorway([11.0, 12.0],
[22.0, 23.0])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_locationnearmotorway_whenerroroccured(self):
with open('testdata/models/routing_error.json', 'r') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.location_near_motorway([11.0, 12.0],
[22.0, 23.0],
[herepy.RouteMode.pedestrian, herepy.RouteMode.fastest])
self.assertIsInstance(response, herepy.HEREError)
@responses.activate
def test_truckroute_whensucceed(self):
with codecs.open('testdata/models/routing.json', mode='r', encoding='utf-8') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.truck_route([11.0, 12.0],
[22.0, 23.0],
[herepy.RouteMode.truck, herepy.RouteMode.fastest])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_truckroute_withdefaultmodes_whensucceed(self):
with codecs.open('testdata/models/routing.json', mode='r', encoding='utf-8') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.truck_route([11.0, 12.0],
[22.0, 23.0])
self.assertTrue(response)
self.assertIsInstance(response, herepy.RoutingResponse)
@responses.activate
def test_truckroute_whenerroroccured(self):
with open('testdata/models/routing_error.json', 'r') as f:
expectedResponse = f.read()
responses.add(responses.GET, 'https://route.cit.api.here.com/routing/7.2/calculateroute.json',
expectedResponse, status=200)
response = self._api.truck_route([11.0, 12.0],
[22.0, 23.0],
[herepy.RouteMode.pedestrian, herepy.RouteMode.fastest])
self.assertIsInstance(response, herepy.HEREError)
| [
"[email protected]"
] | |
4f286ff92811136f0e5a8eae86b3ab789f7cc07f | 786de89be635eb21295070a6a3452f3a7fe6712c | /CalibManager/tags/V00-00-02/src/GUICalibDirTree.py | 862c14460ee2b3dba4aeab4e61a61435098d92dd | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,840 | py | #--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# Module GUICalibDirTree...
#
#------------------------------------------------------------------------
"""GUI works with dark run"""
#------------------------------
# Module's version from CVS --
#------------------------------
__version__ = "$Revision: 4 $"
# $Source$
#-------------------
# Import modules --
#-------------------
import sys
import os
from PyQt4 import QtGui, QtCore
#import time # for sleep(sec)
from ConfigParametersForApp import cp
from Logger import logger
from FileNameManager import fnm
#-----------------------------
class GUICalibDirTree (QtGui.QWidget):
calib_types_cspad = [
'center'
,'center_corr_xxx'
,'center_global'
,'offset'
,'offset_corr'
,'marg_gap_shift'
,'quad_rotation'
,'quad_tilt'
,'rotation'
,'tilt'
,'beam_vector'
,'beam_intersect'
,'pedestals'
,'pixel_status'
,'common_mode'
,'filter'
,'pixel_gain'
]
calib_types_cspad2x2 = [
'center'
,'tilt'
,'pedestals'
,'pixel_status'
,'common_mode'
,'filter'
,'pixel_gain'
]
calib_dets_cspad = [
'XppGon.0:Cspad.0'
,'XcsEndstation.0:Cspad.0'
,'CxiDs1.0:Cspad.0'
,'CxiDsd.0:Cspad.0'
]
calib_dets_cspad2x2 = [
'XppGon.0:Cspad2x2.0'
,'XppGon.0:Cspad2x2.1'
,'MecTargetChamber.0:Cspad2x2.1'
,'MecTargetChamber.0:Cspad2x2.2'
,'MecTargetChamber.0:Cspad2x2.3'
,'MecTargetChamber.0:Cspad2x2.4'
,'MecTargetChamber.0:Cspad2x2.5'
,'CxiSc.0:Cspad2x2.0'
,'MecTargetChamber.0:Cspad2x2.1'
]
calib_vers = [
'CsPad::CalibV1'
,'CsPad2x2::CalibV1'
]
def __init__(self, parent=None) :
#super(GUIQTreeView, self).__init__(parent)
QtGui.QWidget.__init__(self, parent)
self.setGeometry(100, 100, 200, 600)
self.setWindowTitle('Item selection tree')
self.setFrame()
#self.icon_folder_open = QtGui.QIcon("icons/folder_open.gif")
#self.icon_folder_closed = QtGui.QIcon("icons/folder_closed.gif")
#self.icon_table = QtGui.QIcon("icons/table.gif")
self.fill_calib_dir_tree()
#self.view = QtGui.QListView()
#self.view = QtGui.QTableView()
self.view = QtGui.QTreeView()
self.view.setModel(self.model)
#self.view.setDragDropMode(QtGui.QAbstractItemView.InternalMove)
#self.view.expandAll()
self.view.setAnimated(True)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.view)
if parent == None :
self.setLayout(vbox)
self.connect(self.view.selectionModel(), QtCore.SIGNAL('currentChanged(QModelIndex, QModelIndex)'), self.itemSelected)
#self.view.clicked.connect(self.someMethod1) # This works
#self.view.doubleClicked.connect(self.someMethod2) # This works
self.model.itemChanged.connect(self.itemChanged)
self.view.expanded.connect(self.itemExpanded)
self.view.collapsed.connect(self.itemCollapsed)
self.setStyle()
def fill_calib_dir_tree(self) :
self.model = QtGui.QStandardItemModel()
self.model.setHorizontalHeaderLabels('x')
#self.model.setHorizontalHeaderItem(1,QtGui.QStandardItem('Project Title'))
#self.model.setVerticalHeaderLabels('abc')
for v in self.calib_vers :
det, vers = v.split('::',1)
#print 'det, vers =', det, vers
parentItem = self.model.invisibleRootItem()
itemv = QtGui.QStandardItem(QtCore.QString(v))
itemv.setIcon(cp.icon_folder_closed)
#itemv.setCheckable(True)
parentItem.appendRow(itemv)
if det == 'CsPad' :
self.calib_type_list = self.calib_types_cspad
self.calib_det_list = self.calib_dets_cspad
elif det == 'CsPad2x2' :
self.calib_type_list = self.calib_types_cspad2x2
self.calib_det_list = self.calib_dets_cspad2x2
else :
print 'UNKNOWN DETECTOR'
for d in self.calib_det_list :
itemd = QtGui.QStandardItem(QtCore.QString(d))
itemd.setIcon(cp.icon_folder_closed)
#itemd.setCheckable(True)
itemv.appendRow(itemd)
for t in self.calib_type_list :
itemt = QtGui.QStandardItem(QtCore.QString(t))
itemt.setIcon(cp.icon_folder_closed)
itemt.setCheckable(True)
itemd.appendRow(itemt)
def getFullNameFromItem(self, item):
#item = self.model.itemFromIndex(ind)
ind = self.model.indexFromItem(item)
return self.getFullNameFromIndex(ind)
def getFullNameFromIndex(self, ind):
item = self.model.itemFromIndex(ind)
if item is None : return 'None'
self._full_name = item.text()
self._getFullName(ind)
return str(self._full_name)
def _getFullName(self, ind):
ind_par = self.model.parent(ind)
if(ind_par.column() == -1) :
item = self.model.itemFromIndex(ind)
self.full_name = '/' + self._full_name
#print 'Item full name :' + self._full_name
return self._full_name
else:
item_par = self.model.itemFromIndex(ind_par)
self._full_name = item_par.text() + '/' + self._full_name
self._getFullName(ind_par)
def getTextListOfChildren(self, index):
item = self.model.itemFromIndex(index)
number_of_children = item.rowCount()
txt_list_of_children = []
for row in range(number_of_children) :
child_item = item.child(row)
txt_list_of_children.append(str(child_item.text()))
return txt_list_of_children
def itemChanged(self, item):
state = ['UNCHECKED', 'TRISTATE', 'CHECKED'][item.checkState()]
msg = 'Item with full name %s, is at state %s' % ( self.getFullNameFromItem(item), state)
#print msg
logger.info(msg, __name__)
def itemExpanded(self, ind):
item = self.model.itemFromIndex(ind)
item.setIcon(cp.icon_folder_open)
msg = 'Item expanded: %s' % item.text()
logger.info(msg, __name__)
def itemCollapsed(self, ind):
item = self.model.itemFromIndex(ind)
item.setIcon(cp.icon_folder_closed)
msg = 'Item collapsed: %s' % item.text()
logger.info(msg, __name__)
def itemSelected(self, selected, deselected):
selected_txt = self.getFullNameFromIndex(selected)
msg1 = 'Item selected: %s' % self.getFullNameFromIndex(selected)
txt_list_of_children = self.getTextListOfChildren(selected)
self.onSelectedItem(selected_txt, txt_list_of_children)
logger.info(msg1, __name__)
#deselected_txt = self.getFullNameFromIndex(deselected)
#msg2 = 'Item deselected: %s' % self.getFullNameFromIndex(deselected)
#logger.info(msg2, __name__)
#self.onDeselectedItem(deselected_txt)
def onSelectedItem(self, path_from_calib, list_expected) :
cp.guitabs.setTabByName('Status')
dir = os.path.join(fnm.path_to_calib_dir(), path_from_calib)
cp.guistatus.statusOfDir(dir, list_expected)
def setStyle(self):
pass
#self.setMinimumSize(100,400)
self.setMinimumWidth(150)
self.setMaximumWidth(500)
self.setMinimumHeight(500)
self.setContentsMargins (QtCore.QMargins(-9,-9,-9,-9))
def setFrame(self):
self.frame = QtGui.QFrame(self)
self.frame.setFrameStyle( QtGui.QFrame.Box | QtGui.QFrame.Sunken ) #Box, Panel | Sunken, Raised
self.frame.setLineWidth(0)
self.frame.setMidLineWidth(1)
self.frame.setGeometry(self.rect())
self.frame.setVisible(False)
def resizeEvent(self, e):
#logger.debug('resizeEvent', self.name)
self.frame.setGeometry(self.rect())
def moveEvent(self, e):
#logger.debug('moveEvent', self.name)
#self.position = self.mapToGlobal(self.pos())
#self.position = self.pos()
#logger.debug('moveEvent - pos:' + str(self.position), __name__)
pass
#-----------------------------
if __name__ == "__main__" :
app = QtGui.QApplication(sys.argv)
widget = GUICalibDirTree ()
widget.show()
app.exec_()
#-----------------------------
| [
"[email protected]@b967ad99-d558-0410-b138-e0f6c56caec7"
] | [email protected]@b967ad99-d558-0410-b138-e0f6c56caec7 |
4b304a6e1a76ef2a2f7c84a60648ce425b7fb6eb | 4ad94b71e30883d6df07a3277265bd6fb7457ba7 | /python/examples/doc_examples/export/animation_mpeg4.py | 7c73d2ff682be520d5d63b21b8a1722fb7631a24 | [
"MIT"
] | permissive | Tecplot/handyscripts | 7cb1d4c80f323c785d06b0c8d37aeb0acb67f58c | 84a89bfecff5479a0319f08eb8aa9df465283830 | refs/heads/master | 2023-08-22T15:29:22.629644 | 2023-08-12T01:19:59 | 2023-08-12T01:19:59 | 149,826,165 | 89 | 64 | MIT | 2022-01-13T01:11:02 | 2018-09-21T22:47:23 | Jupyter Notebook | UTF-8 | Python | false | false | 2,932 | py | import argparse, os
import tecplot as tp
from tecplot.constant import *
def parse_args():
"""
This script is to be run from the command line and accepts the
following command line arguments. Run this script with "--help"
to see usage and help information.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--connect', action='store_true',
help='connect to TecUtil Server')
parser.add_argument('-p', '--port', type=int, default=7600,
help='port to use when connecting to TecUtil Server')
parser.add_argument('-n', '--nframes', type=int, default=360,
help='number of frames to produce in video')
parser.add_argument('outfile', nargs='?', default='aileron_roll.mp4',
help='output file name')
return parser.parse_args()
def setup_plot():
"""
Load the F-18 dataset from Tecplot 360's examples and show the
jet surface in 3D.
"""
tp.new_layout()
exdir = tp.session.tecplot_examples_directory()
datafile = os.path.join(exdir, 'SimpleData', 'F18.plt')
ds = tp.data.load_tecplot(datafile)
frame = tp.active_frame()
frame.show_border = False
plot = frame.plot(PlotType.Cartesian3D)
plot.activate()
plot.contour(0).variable = ds.variable('S')
plot.show_contour = True
return plot
def translate_view(view, x=0, y=0, z=0):
"""
Translate the viewer with respect to the data.
"""
p = view.position
view.position = p.x + x, p.y + y, p.z + z
def create_animation(outfile, plot, nframes):
"""
Using the tp.export.animation_mpeg4() context manager, the F-18 is
recorded doing an "aileron roll" by rotating and translating the
viewer with respect to the data by a small amount and capturing
each frame of the animation with a call to ani.export_animation_frame()
"""
with tp.session.suspend():
opts = dict(
width=400,
animation_speed=30,
supersample=3,
)
view = plot.view
translate_view(view, -15)
#{DOC:highlight}[
with tp.export.animation_mpeg4(outfile, **opts) as ani:
#]
for i in range(args.nframes):
view.rotate_axes(5, (1, 0, 0))
translate_view(view, 30 / args.nframes)
#{DOC:highlight}[
ani.export_animation_frame()
#]
"""
This script is meant to run on the command line. Run with "--help" to see
usage and help information about the options it understands. It loads
the F-18 dataset from Tecplot 360's examples directory and produces a
video of the model doing an "aileron roll" by manipulating the viewer
position.
"""
args = parse_args()
if args.connect:
tp.session.connect(port=args.port)
plot = setup_plot()
create_animation(args.outfile, plot, args.nframes)
print('video file created:', args.outfile)
| [
"[email protected]"
] | |
826ff5fbda6157abe17679e09c88a2f5ee00718f | 7b2d14f78099fde6c4a35082c9c294d1771cb163 | /Week 8/class_test.py | 53f6096a898910fbe51a1f31c0374884107660a0 | [] | no_license | pinardy/Digital-World | 04c6ddb369ede7295a0891aaaa006486c557965e | dd0a351eb64f05b524b08c47cd0c0ad3eadd775c | refs/heads/master | 2020-12-30T22:45:02.448171 | 2018-01-30T03:06:08 | 2018-01-30T03:06:08 | 80,622,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | class A:
x = []
def add(self):
self.x.append(1)
class B:
def __init__(self):
self.x = []
def add(self):
self.x.append(1)
x = A()
y = A()
x.add()
print x.x
y.add()
# print "A's x:",x.x
x = B()
y = B()
x.add()
y.add()
# print "B's x:",x.x | [
"[email protected]"
] | |
236de0d625e60d74d181e3e4a7c9f70b19d02c0d | 5a809d40a91f5504b61626ec02a9304e62291bc0 | /env/lib/python3.6/site-packages/chargebee/version.py | 30cbf0d92a4e12680dbf782cbdd8e9bdc38f1300 | [
"MIT"
] | permissive | Mohitkaushal97/File | 5788bcfd4bb54ad703d82c8184efab8152ae2f09 | 0bc5cca9a557e242861a289af74dfe8b905bc3bd | refs/heads/master | 2022-12-19T03:19:29.213914 | 2020-10-01T06:28:36 | 2020-10-01T06:28:36 | 300,170,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | VERSION = '2.7.4'
| [
"[email protected]"
] | |
2d959348fb53a3f73acfd66f0441f7e8c6026727 | 813284b9dac4477f4893cb6b30ffafab8e181cc4 | /contrib/linearize/linearize-hashes.py | d09065439ec8c13970e3d8289d3634fc41ad3053 | [
"MIT"
] | permissive | phoenixkonsole/xbtx | 609809c29c32e2c4373a26204480a0e2a9f0922e | 2f9db3d0ca34103e315a5bc9ef2fa2d42cb71810 | refs/heads/master | 2023-05-11T17:37:28.762478 | 2023-05-03T09:54:14 | 2023-05-03T09:54:14 | 243,993,348 | 3 | 11 | MIT | 2023-05-03T09:54:15 | 2020-02-29T15:30:47 | C++ | UTF-8 | Python | false | false | 4,698 | py | #!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Copyright (c) 2017 The BitcoinSubsidium Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
try: # Python 3
import http.client as httplib
except ImportError: # Python 2
import httplib
import json
import re
import base64
import sys
import os
import os.path
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class BitcoinSubsidiumRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinSubsidiumRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
sys.exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
# Open the cookie file
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8766
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
| [
"[email protected]"
] | |
db31b8d2ff15f45a463ad6fbc60c2cb57dc1f3f5 | 31e3e0ce6d8b8cd1b286971aa1ea3c56a338ca48 | /sunpy/util/sphinx/doctest.py | 5a29458fbf6228f895f78a2fab1182fc8684257c | [
"BSD-2-Clause"
] | permissive | sunpy/sunpy | d8df998cf7753834ffd7add6911c0e4f6828b5b8 | edd3ea805f4540d41ce2932a0e865cab2d6a4cf5 | refs/heads/main | 2023-09-01T12:05:09.343909 | 2023-08-31T13:36:35 | 2023-08-31T13:36:35 | 2,165,383 | 792 | 683 | BSD-2-Clause | 2023-09-14T14:03:09 | 2011-08-06T15:34:08 | Python | UTF-8 | Python | false | false | 1,966 | py | # Licensed under the Astropy 3-clause BSD license - see licenses/ASTROPY.rst
"""
This is a set of three directives that allow us to insert metadata
about doctests into the .rst files so the testing framework knows
which tests to skip.
This is quite different from the doctest extension in Sphinx itself,
which actually does something. For astropy, all of the testing is
centrally managed from py.test and Sphinx is not used for running
tests.
"""
import re
from docutils.nodes import literal_block
from docutils.parsers.rst import Directive
class DoctestSkipDirective(Directive):
has_content = True
def run(self):
# Check if there is any valid argument, and skip it. Currently only
# 'win32' is supported in astropy.tests.pytest_plugins.
if re.match('win32', self.content[0]):
self.content = self.content[2:]
code = '\n'.join(self.content)
return [literal_block(code, code)]
class DoctestOmitDirective(Directive):
has_content = True
def run(self):
# Simply do not add any content when this directive is encountered
return []
class DoctestRequiresDirective(DoctestSkipDirective):
# This is silly, but we really support an unbounded number of
# optional arguments
optional_arguments = 64
def setup(app):
app.add_directive('doctest-requires', DoctestRequiresDirective)
app.add_directive('doctest-skip', DoctestSkipDirective)
app.add_directive('doctest-skip-all', DoctestSkipDirective)
app.add_directive('doctest', DoctestSkipDirective)
# Code blocks that use this directive will not appear in the generated
# documentation. This is intended to hide boilerplate code that is only
# useful for testing documentation using doctest, but does not actually
# belong in the documentation itself.
app.add_directive('testsetup', DoctestOmitDirective)
return {'parallel_read_safe': True,
'parallel_write_safe': True}
| [
"[email protected]"
] | |
44846d39e03d4c86d424fa57d50d5d22b76e2b30 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/73/usersdata/214/39773/submittedfiles/triangulo.py | 69ff688cd3048f7f591723dc3954f22789a5366c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | # -*- coding: utf-8 -*-
import math
a=int(input('Digite a:'))
b=int(input('Digite a:'))
c=int(input('Digite a:')
a>=b>=c>0
if a<b+c:
print('S')
else:
print('N')
if a+b<c:
if(a**2)==(b*2)+(c**2):
print ('Re')
if(a**2)==(b*2)+(c**2):
print ('Re')
if(a**2)==(b*2)+(c**2):
print ('Re')
if a==b==c:
print('Eq)
if b=-c1=a:
print ('is')
if (a!=b) and (b!=c):
print ('Es')
| [
"[email protected]"
] | |
694a32db49cb1ca6e8ff77d71cfc2dbc980fad97 | a668806b052884b2f1faef6a28304c908a89fc68 | /test/fixtures/LoggerFixture.py | fcb55b6b5af8ab5768519c8566db92db68e92c05 | [
"MIT"
] | permissive | pip-services3-python/pip-services3-elasticsearch-python | aa2e49a70a0debcb2e77f59aefe144baf401b4ca | fe2fba2aeaef7ba80c17732d0065e5bcd60fcb82 | refs/heads/master | 2023-01-28T14:40:18.698083 | 2023-01-24T08:08:32 | 2023-01-24T08:08:32 | 140,886,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | # -*- coding: utf-8 -*-
import time
from pip_services3_components.log import LogLevel
class LoggerFixture:
def __init__(self, logger):
self.__logger = logger
def test_log_level(self):
assert self.__logger.get_level() >= LogLevel.Nothing
assert self.__logger.get_level() <= LogLevel.Trace
def test_simple_logging(self):
self.__logger.set_level(LogLevel.Trace)
self.__logger.fatal(None, None, 'Fatal error message')
self.__logger.error(None, None, 'Error message')
self.__logger.warn(None, 'Warning message')
self.__logger.info(None, 'Information message')
self.__logger.debug(None, 'Debug message')
self.__logger.trace(None, 'Trace message')
self.__logger.dump()
time.sleep(1)
def test_error_logging(self):
try:
# Raise an exception
raise Exception('test')
except Exception as err:
self.__logger.fatal('123', err, 'Fatal error')
self.__logger.error('123', err, 'Recoverable error')
assert err is not None
self.__logger.dump()
time.sleep(1) | [
"[email protected]"
] | |
32f57999cceed14699a94052de464465d2c5f3f6 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_mendicant.py | b54bd489ffe8ff7e7bd2a65fa8196b2a4fa0cc55 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py |
#calss header
class _MENDICANT():
def __init__(self,):
self.name = "MENDICANT"
self.definitions = [u'someone, especially a member of a religious group, who lives by asking people they do not know for money']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
9c7997be9cd575f150d0f208a7ab48236e792676 | ffc6efca14efd126e9b0541e7b0c1f3a05ee90e1 | /algorithms/sorts.py | 639abd50908bf5d6bb9026c50b0d6391653e9e3c | [] | no_license | dlopes7/python-tests | 65b6687fcee9c6230d1fd13aa2941fef34cbaa8f | 93c175c717499f75a2f533c2bccf4e4e0b886e25 | refs/heads/master | 2021-01-13T00:16:32.516708 | 2015-12-28T17:45:43 | 2015-12-28T17:45:43 | 48,704,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,112 | py | import random
import pygame
from algorithms.colors import *
def draw_array(array, nome, frame):
DISPLAY.fill(BLACK)
aux_surf = DISPLAY_FONT.render(nome+ ' - ' + str(frame), True, WHITE)
aux_rect = aux_surf.get_rect()
aux_rect.topleft = (10, 10)
DISPLAY.blit(aux_surf, aux_rect)
for idx, value in enumerate(array):
x = 10 + idx * 2
pygame.draw.line(DISPLAY, WHITE, (x, WINDOW_HEIGHT-10), (x, WINDOW_HEIGHT - value - 10), 1)
CLOCK.tick(FPS)
pygame.display.update()
def selection_sort():
frame = 0
lista = list(range(0, 500))
random.shuffle(lista)
for i in range( len(lista) ):
frame += 1
draw_array(lista, 'Selection Sort', frame)
menor = i
for k in range( i + 1 , len(lista) ):
if lista[k] < lista[menor]:
menor = k
lista[menor],lista[i]=lista[i],lista[menor]
def bubble_sort():
frame = 0
badList = list(range(0, 500))
random.shuffle(badList)
length = len(badList)
for i in range(0,length):
frame += 1
draw_array(badList, 'Bubble Sort', frame)
swapped = False
for element in range(0, length-i-1):
if badList[element] > badList[element + 1]:
hold = badList[element + 1]
badList[element + 1] = badList[element]
badList[element] = hold
swapped = True
if not swapped: break
def heapsort():
frame = 0
lst = list(range(0, 501))
random.shuffle(lst)
for start in range(int((len(lst)-2)/2), -1, -1):
frame += 1
draw_array(lst, 'Heap Sort', frame)
siftdown(lst, start, len(lst)-1)
for end in range(len(lst)-1, 0, -1):
frame += 1
draw_array(lst, 'Heap Sort', frame)
lst[end], lst[0] = lst[0], lst[end]
siftdown(lst, 0, end - 1)
return lst
def siftdown(lst, start, end):
root = start
while True:
child = root * 2 + 1
if child > end: break
if child + 1 <= end and lst[child] < lst[child + 1]:
child += 1
if lst[root] < lst[child]:
lst[root], lst[child] = lst[child], lst[root]
root = child
else:
break
def gnome():
frame = 0
lista = list(range(0, 100))
random.shuffle(lista)
pivot = 0
lista_length = len(lista)
while pivot < lista_length - 1:
frame += 1
draw_array(lista, 'Gnome Sort', frame)
if lista[pivot] > lista[pivot + 1]:
lista[pivot + 1], lista[pivot] = lista[pivot], lista[pivot + 1]
if pivot > 0:
pivot -= 2
pivot += 1
if __name__ == '__main__':
pygame.init()
CLOCK = pygame.time.Clock()
WINDOW_WIDTH = 1100
WINDOW_HEIGHT = 600
DISPLAY = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
DISPLAY.fill(BLACK)
pygame.font.init()
DISPLAY_FONT = pygame.font.SysFont('couriernew', 36)
pygame.display.set_caption("Sort Tests")
FPS = 60
selection_sort()
bubble_sort()
heapsort()
gnome()
| [
"[email protected]"
] | |
09abbc7def9184b80f439ee054f332587bccaf68 | 03e3138f99f275d15d41a5c5bfb212f85d64d02e | /source/res/scripts/common/Lib/distutils/tests/test_core.py | 2de38bab70119d63190c0ca07ab4ad94abb32f45 | [] | no_license | TrenSeP/WorldOfTanks-Decompiled | e428728e7901146d0b599d02c930d70532232a97 | 1faa748acec1b7e435b657fd054ecba23dd72778 | refs/heads/1.4.1 | 2020-04-27T08:07:49.813023 | 2019-03-05T17:37:06 | 2019-03-05T17:37:06 | 174,159,837 | 1 | 0 | null | 2019-03-06T14:33:33 | 2019-03-06T14:24:36 | Python | UTF-8 | Python | false | false | 2,628 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/common/Lib/distutils/tests/test_core.py
import StringIO
import distutils.core
import os
import shutil
import sys
import test.test_support
from test.test_support import captured_stdout, run_unittest
import unittest
from distutils.tests import support
setup_using___file__ = '\n__file__\n\nfrom distutils.core import setup\nsetup()\n'
setup_prints_cwd = '\nimport os\nprint os.getcwd()\n\nfrom distutils.core import setup\nsetup()\n'
class CoreTestCase(support.EnvironGuard, unittest.TestCase):
def setUp(self):
super(CoreTestCase, self).setUp()
self.old_stdout = sys.stdout
self.cleanup_testfn()
self.old_argv = (sys.argv, sys.argv[:])
def tearDown(self):
sys.stdout = self.old_stdout
self.cleanup_testfn()
sys.argv = self.old_argv[0]
sys.argv[:] = self.old_argv[1]
super(CoreTestCase, self).tearDown()
def cleanup_testfn(self):
path = test.test_support.TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def write_setup(self, text, path=test.test_support.TESTFN):
f = open(path, 'w')
try:
f.write(text)
finally:
f.close()
return path
def test_run_setup_provides_file(self):
distutils.core.run_setup(self.write_setup(setup_using___file__))
def test_run_setup_uses_current_dir(self):
sys.stdout = StringIO.StringIO()
cwd = os.getcwd()
os.mkdir(test.test_support.TESTFN)
setup_py = os.path.join(test.test_support.TESTFN, 'setup.py')
distutils.core.run_setup(self.write_setup(setup_prints_cwd, path=setup_py))
output = sys.stdout.getvalue()
if output.endswith('\n'):
output = output[:-1]
self.assertEqual(cwd, output)
def test_debug_mode(self):
sys.argv = ['setup.py', '--name']
with captured_stdout() as stdout:
distutils.core.setup(name='bar')
stdout.seek(0)
self.assertEqual(stdout.read(), 'bar\n')
distutils.core.DEBUG = True
try:
with captured_stdout() as stdout:
distutils.core.setup(name='bar')
finally:
distutils.core.DEBUG = False
stdout.seek(0)
wanted = 'options (after parsing config files):\n'
self.assertEqual(stdout.readlines()[0], wanted)
def test_suite():
return unittest.makeSuite(CoreTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
| [
"[email protected]"
] | |
95d03c0871078dbaef667f5b4515468e49b15261 | d99e5b65624f115db6982dd88af9390e8d766042 | /tensorflow/contrib/slim/python/slim/nets/inception_v3.py | 77c95b155f3b20cb6702b87bcdebee7af607416f | [
"Apache-2.0"
] | permissive | golbin/tensorflow | 03dbecb6f093f5628c072086c780659bcc14dba8 | 8a58a304bdcf909f8b55ec49e9280fc3af01c7d3 | refs/heads/master | 2021-01-12T07:05:41.360503 | 2016-12-20T00:15:41 | 2016-12-20T00:15:41 | 76,907,006 | 2 | 0 | null | 2016-12-19T23:58:44 | 2016-12-19T23:58:43 | null | UTF-8 | Python | false | false | 28,924 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v3 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def inception_v3_base(inputs,
final_endpoint='Mixed_7c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception model from http://arxiv.org/abs/1512.00567.
Constructs an Inception v3 network from inputs to the given final endpoint.
This method can construct the network up to the final inception block
Mixed_7c.
Note that the names of the layers in the paper do not correspond to the names
of the endpoints registered by this function although they build the same
network.
Here is a mapping from the old_names to the new names:
Old name | New name
=======================================
conv0 | Conv2d_1a_3x3
conv1 | Conv2d_2a_3x3
conv2 | Conv2d_2b_3x3
pool1 | MaxPool_3a_3x3
conv3 | Conv2d_3b_1x1
conv4 | Conv2d_4a_3x3
pool2 | MaxPool_5a_3x3
mixed_35x35x256a | Mixed_5b
mixed_35x35x288a | Mixed_5c
mixed_35x35x288b | Mixed_5d
mixed_17x17x768a | Mixed_6a
mixed_17x17x768b | Mixed_6b
mixed_17x17x768c | Mixed_6c
mixed_17x17x768d | Mixed_6d
mixed_17x17x768e | Mixed_6e
mixed_8x8x1280a | Mixed_7a
mixed_8x8x2048a | Mixed_7b
mixed_8x8x2048b | Mixed_7c
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',
'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV3', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='VALID'):
# 299 x 299 x 3
end_point = 'Conv2d_1a_3x3'
net = slim.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 149 x 149 x 32
end_point = 'Conv2d_2a_3x3'
net = slim.conv2d(net, depth(32), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 147 x 147 x 32
end_point = 'Conv2d_2b_3x3'
net = slim.conv2d(net, depth(64), [3, 3], padding='SAME', scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 147 x 147 x 64
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 73 x 73 x 64
end_point = 'Conv2d_3b_1x1'
net = slim.conv2d(net, depth(80), [1, 1], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 73 x 73 x 80.
end_point = 'Conv2d_4a_3x3'
net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 71 x 71 x 192.
end_point = 'MaxPool_5a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 35 x 35 x 192.
# Inception blocks
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# mixed: 35 x 35 x 256.
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_1: 35 x 35 x 288.
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv_1_0c_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1],
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_2: 35 x 35 x 288.
end_point = 'Mixed_5d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_3: 17 x 17 x 768.
end_point = 'Mixed_6a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(384), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat_v2([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed4: 17 x 17 x 768.
end_point = 'Mixed_6b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(128), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_5: 17 x 17 x 768.
end_point = 'Mixed_6c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_6: 17 x 17 x 768.
end_point = 'Mixed_6d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_7: 17 x 17 x 768.
end_point = 'Mixed_6e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_8: 8 x 8 x 1280.
end_point = 'Mixed_7a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(320), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat_v2([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_9: 8 x 8 x 2048.
end_point = 'Mixed_7b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat_v2(
[
slim.conv2d(
branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(
branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')
],
3)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat_v2(
[
slim.conv2d(
branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(
branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')
],
3)
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# mixed_10: 8 x 8 x 2048.
end_point = 'Mixed_7c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat_v2(
[
slim.conv2d(
branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(
branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')
],
3)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat_v2(
[
slim.conv2d(
branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(
branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')
],
3)
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v3(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV3'):
"""Inception model from http://arxiv.org/abs/1512.00567.
"Rethinking the Inception Architecture for Computer Vision"
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
Zbigniew Wojna.
With the default arguments this method constructs the exact model defined in
the paper. However, one can experiment with variations of the inception_v3
network by changing arguments dropout_keep_prob, min_depth and
depth_multiplier.
The default image size used to train this network is 299x299.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if 'depth_multiplier' is less than or equal to zero.
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV3', [inputs, num_classes],
reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v3_base(
inputs, scope=scope, min_depth=min_depth,
depth_multiplier=depth_multiplier)
# Auxiliary Head logits
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
aux_logits = end_points['Mixed_6e']
with tf.variable_scope('AuxLogits'):
aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, depth(128), [1, 1],
scope='Conv2d_1b_1x1')
# Shape of feature map before the final layer.
kernel_size = _reduced_kernel_size_for_small_input(
aux_logits, [5, 5])
aux_logits = slim.conv2d(
aux_logits, depth(768), kernel_size,
weights_initializer=trunc_normal(0.01),
padding='VALID', scope='Conv2d_2a_{}x{}'.format(*kernel_size))
aux_logits = slim.conv2d(
aux_logits, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, weights_initializer=trunc_normal(0.001),
scope='Conv2d_2b_1x1')
if spatial_squeeze:
aux_logits = tf.squeeze(aux_logits, [1, 2], name='SpatialSqueeze')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with tf.variable_scope('Logits'):
kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
# 1 x 1 x 2048
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
end_points['PreLogits'] = net
# 2048
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
# 1000
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v3.default_image_size = 299
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
def inception_v3_arg_scope(weight_decay=0.00004,
stddev=0.1,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV3 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': tf.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
| [
"[email protected]"
] | |
7c464d2064133dba060722b62dd9afa6f8efab4a | 7864ab2c567f5f3a98e7ab38ff38a3bd7c816fde | /fireplace/cards/gvg/spare_parts.py | eabe7299eaca68fb72038d7f15c9cb40af004331 | [] | no_license | gmagogsfm/fireplace | bfa1b57254b673317442518a997c635183bd3e61 | f16ee0659310a003d54552d0660ea3eb15c4da3f | refs/heads/master | 2021-01-09T09:06:35.035741 | 2015-02-09T14:30:24 | 2015-02-09T14:30:24 | 28,540,192 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | """
Spare Parts
"""
from ..utils import *
# Armor Plating
class PART_001:
action = buffTarget("PART_001e")
class PART_001e:
Health = 1
# Time Rewinder
class PART_002:
action = bounceTarget
# Rusty Horn
class PART_003:
def action(self, target):
target.taunt = True
# Finicky Cloakfield
class PART_004:
action = buffTarget("PART_004e")
class PART_004e:
Stealth = True
def OWN_TURN_BEGIN(self):
self.destroy()
# Emergency Coolant
class PART_005:
def action(self, target):
target.frozen = True
# Reversing Switch
class PART_006:
action = buffTarget("PART_006a")
class PART_006a:
def apply(self, target):
atk = target.atk
self.setAtk(target.health)
self.setHealth(atk)
# Whirling Blades
class PART_007:
action = buffTarget("PART_007e")
class PART_007e:
Atk = 1
| [
"[email protected]"
] | |
6e8c6813730dff827293a1ea8bb73eac583c808b | 32a3396cf8d879c92c09f5411af946084ed2ca3c | /blog/company_name_context_processor.py | 945c41dda6a2e989d60d18204aab0c0e256507cc | [] | no_license | mPowering/django-mpowering-healthcare | 5ae527dd7abac8d2f9debc506b6cb197b4db0ab8 | 52cff8d864d9363f0115831963bfa43a92ee2b47 | refs/heads/master | 2020-12-25T18:16:32.992431 | 2014-05-23T15:52:46 | 2014-05-23T15:52:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | # Django imports
from django.conf import settings
def get_company_name(request):
return {"company": settings.COMPANY_NAME}
| [
"[email protected]"
] | |
9582efa970cbb8fbcb5908d20387bee0ac01cdcb | 00b405a49ac6108d24986243c4b52fa53fb58acc | /0517_super_washing_machines.py | 2f24982ec5d0bc0f3747d006eebaca662cd97192 | [] | no_license | Shin-jay7/LeetCode | 0325983fff95bfbc43a528812582cbf9b7c0c2f2 | 953b0b19764744753f01c661da969bdab6521504 | refs/heads/master | 2023-07-19T07:17:21.513531 | 2023-07-15T06:05:06 | 2023-07-15T06:05:06 | 231,285,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | from __future__ import annotations
from typing import List
class Solution:
def findMinMoves(self, machines: List[int]) -> int:
total, n = sum(machines), len(machines)
if total % n:
return -1
target, ans, to_right = total // n, 0, 0
# to_right: num of dresses to pass to the right machine
# dresses: num of dresses in the machine
for dresses in machines:
to_right = dresses + to_right - target
ans = max(ans, abs(to_right), dresses-target)
return ans
| [
"[email protected]"
] | |
8d615fa638b973f4aa9bfba25588878e9931efc2 | ae3f23efcdc4b7fdd1c224043d0ece002955956e | /xplace/xplace/network/domains/apps.py | 800f984530bd9ae92e8cb8591bf123ddcc22521a | [] | no_license | alexeysofin/xplace | 4466682fe76c808288d69f2808ddbca38a583bc4 | 9f12f066a62fae4e789bee94e5e554cc6de26d90 | refs/heads/master | 2023-01-12T01:02:40.137609 | 2021-02-14T20:41:30 | 2021-02-14T20:41:30 | 208,021,139 | 0 | 0 | null | 2023-01-04T10:18:46 | 2019-09-12T10:07:17 | Python | UTF-8 | Python | false | false | 104 | py | from django.apps import AppConfig
class DomainsConfig(AppConfig):
name = 'xplace.network.domains'
| [
"sofin.moffin"
] | sofin.moffin |
84d78b6245a076777dc308a6a42e78272b8479ec | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03330/s929352693.py | 253b3802f55ad2c29dbbdab985c6ea0170a9fbee | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | n,c=map(int,input().split())
irohen=[list(map(int,input().split())) for i in range(c)]
grid=[list(map(int,input().split())) for i in range(n)]
rem0=[0]*c
rem1=[0]*c
rem2=[0]*c
for i in range(n):
for j in range(n):
if (i+j)%3==0:
rem0[grid[i][j]-1]+=1
elif (i+j)%3==1:
rem1[grid[i][j]-1]+=1
elif (i+j)%3==2:
rem2[grid[i][j]-1]+=1
ans=10**10
for i in range(c):
for j in range(c):
for h in range(c):
chk=0
if i==j or i==h or j==h:
continue
for k in range(c):
chk+=rem0[k]*irohen[k][i]+rem1[k]*irohen[k][j]+rem2[k]*irohen[k][h]
if chk < ans:ans=chk
print(ans)
| [
"[email protected]"
] | |
c6658efc9c3b1000d0b1be621573728ac5c30b16 | fc4f97918ac9366837cb05f51091178bbf37ac18 | /shelve_ex.py | d7cf2d49cdfc05a1eac38bbb50efef5cfafaa194 | [] | no_license | zoejane/automate-python | ae72ef7bed291b757ee41d578844c132cd1fc192 | 9c4e8ce69da21dc58e4fc85604d9e1fc848d8c3e | refs/heads/master | 2021-01-10T01:51:23.009746 | 2015-10-24T13:14:09 | 2015-10-24T13:14:09 | 43,808,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | import shelve
# list,dictionary,etc..
shelfFile =shelve.open('mydata')
shelfFile['cats']=['Pooka','Simon','Cleo']
shelfFile.close()
shelfFile =shelve.open('mydata')
print(shelfFile['cats'])
print(list(shelfFile.keys()))
print(list(shelfFile.values()))
shelfFile.close()
| [
"[email protected]"
] | |
7b53a6dde1bd8e550782891bc7ea6c8a288fb41e | b005369db9f220e8548a11fceab8543a726def3c | /stacked-autoencoder-pytorch/untitled.py | d5d81ba59073148107d09a96f751aa0269d6617a | [] | no_license | liuyanqi/shallow_learning | 9ec2b365986f66f2a3c07d377e3d979a07ebb2bd | b5fafb5b6ae5886bbd1a4ed03611eaee5481b627 | refs/heads/master | 2020-04-09T03:27:21.907715 | 2018-12-01T22:16:23 | 2018-12-01T22:16:23 | 159,983,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,552 | py | import os
import torch
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import CIFAR10
from torchvision.utils import save_image
from model3 import VAE
if not os.path.exists('./mlp_img'):
os.mkdir('./mlp_img')
def to_img(x):
x = x.view(x.size(0), 3, 32, 32)
return x
num_epochs = 10
batch_size = 128
learning_rate = 1e-3
transform = transforms.Compose([transforms.ToTensor()])
dataset = torchvision.datasets.CIFAR10(root='./data', train=True, transform=transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=2)
model = VAE().cuda()
model.train()
for epoch in range(20):
for i, data in enumerate(dataloader):
img, _ = data
# noisy_img = theano_rng.binomial(size=img.shape, n=1, p=0.1, dtype=theano.config.floatX) * img
img = Variable(img).cuda()
# ===================forward=====================
output = model(img, epoch)
# ===================log========================
# print("sparsity:", torch.sum(output.data > 0.0)*100 / output.data.numel())
x_reconstructed = model.reconstruct(output)
orig = to_img(img.cpu().data)
save_image(orig, './imgs_cifar/orig_1_{}.png'.format(epoch))
pic = to_img(x_reconstructed.cpu().data)
save_image(pic, './imgs_cifar/reconstruction_1_{}.png'.format(epoch))
##fine tuning
model.eval()
classifier = nn.Sequential(nn.Linear(8*8*200, 324), nn.ReLU(), nn.Linear(324, 10), nn.Softmax())
criterion = nn.CrossEntropyLoss()
params = list(VAE.encoder.parameters()) + list(classifier.parameters())
optimizer = torch.optim.SGD(params, lr=0.1)
for epoch in range(30):
for i, data in enumerate(dataloader):
img, target = data
img = Variable(img).cuda()
target = Variable(target).cuda()
feature = VAE(img)
feature = feature.view(feature.size(0), -1)
prediction = classifier(feature)
loss = criterion(prediction, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
pred = prediction.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
# if epoch % 10 == 0:
# x = to_img(img.cpu().data)
# x_hat = to_img(output.cpu().data)
# x_noisy = to_img(noisy_img.cpu().data)
# weights = to_img(model.encoder[0].weight.cpu().data)
# save_image(x, './mlp_img/x_{}.png'.format(epoch))
# save_image(x_hat, './mlp_img/x_hat_{}.png'.format(epoch))
# save_image(x_noisy, './mlp_img/x_noisy_{}.png'.format(epoch))
# save_image(weights, './filters/epoch_{}.png'.format(epoch))
# torch.save(model.state_dict(), './sim_autoencoder.pth') | [
"[email protected]"
] | |
ae620efea9987cc629259704e3869f4454db1c9c | 06164402e4a9c46a03d579175e588519dbd4048d | /experiments/experiments_toy/test_varying_missing/nmtf_icm.py | 4434c3df200e94d25a479b0008f59fced2e0e8ae | [
"Apache-2.0"
] | permissive | XuanHeIIIS/BNMTF | 19547e36466ecee8d45fb0002d305ee6b7ba6c23 | 34df0c3cebc5e67a5e39762b9305b75d73a2a0e0 | refs/heads/master | 2020-03-27T12:47:58.375964 | 2018-06-10T10:22:19 | 2018-06-10T10:22:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,991 | py | """
Test the performance of ICM for recovering a toy dataset, where
we vary the fraction of entries that are missing.
We use the correct number of latent factors and same priors as used to generate the data.
I, J, K, L = 100, 50, 10, 5
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../../"
sys.path.append(project_location)
from BNMTF.code.models.nmtf_icm import nmtf_icm
from BNMTF.data_toy.bnmtf.generate_bnmtf import try_generate_M
from BNMTF.code.cross_validation.mask import calc_inverse_M
import numpy, matplotlib.pyplot as plt
##########
fractions_unknown = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95] #[ 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9 ]
input_folder = project_location+"BNMTF/data_toy/bnmtf/"
repeats = 10 # number of times we try each fraction
iterations = 1000
I,J,K,L = 100, 80, 5, 5
alpha, beta = 1., 1.
lambdaF = numpy.ones((I,K))/10.
lambdaS = numpy.ones((K,L))/10.
lambdaG = numpy.ones((J,L))/10.
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF, 'lambdaS':lambdaS, 'lambdaG':lambdaG }
init_S = 'random'
init_FG = 'kmeans'
minimum_TN = 0.1
metrics = ['MSE', 'R^2', 'Rp']
# Load in data
R = numpy.loadtxt(input_folder+"R.txt")
# Seed all of the methods the same
numpy.random.seed(3)
# Generate matrices M - one list of M's for each fraction
M_attempts = 100
all_Ms = [
[try_generate_M(I,J,fraction,M_attempts) for r in range(0,repeats)]
for fraction in fractions_unknown
]
all_Ms_test = [ [calc_inverse_M(M) for M in Ms] for Ms in all_Ms ]
# Make sure each M has no empty rows or columns
def check_empty_rows_columns(M,fraction):
sums_columns = M.sum(axis=0)
sums_rows = M.sum(axis=1)
for i,c in enumerate(sums_rows):
assert c != 0, "Fully unobserved row in M, row %s. Fraction %s." % (i,fraction)
for j,c in enumerate(sums_columns):
assert c != 0, "Fully unobserved column in M, column %s. Fraction %s." % (j,fraction)
for Ms,fraction in zip(all_Ms,fractions_unknown):
for M in Ms:
check_empty_rows_columns(M,fraction)
# We now run the VB algorithm on each of the M's for each fraction.
all_performances = {metric:[] for metric in metrics}
average_performances = {metric:[] for metric in metrics} # averaged over repeats
for (fraction,Ms,Ms_test) in zip(fractions_unknown,all_Ms,all_Ms_test):
print "Trying fraction %s." % fraction
# Run the algorithm <repeats> times and store all the performances
for metric in metrics:
all_performances[metric].append([])
for (repeat,M,M_test) in zip(range(0,repeats),Ms,Ms_test):
print "Repeat %s of fraction %s." % (repeat+1, fraction)
# Run the VB algorithm
NMTF = nmtf_icm(R,M,K,L,priors)
NMTF.initialise(init_S,init_FG)
NMTF.run(iterations,minimum_TN=minimum_TN)
# Measure the performances
performances = NMTF.predict(M_test)
for metric in metrics:
# Add this metric's performance to the list of <repeat> performances for this fraction
all_performances[metric][-1].append(performances[metric])
# Compute the average across attempts
for metric in metrics:
average_performances[metric].append(sum(all_performances[metric][-1])/repeats)
print "repeats=%s \nfractions_unknown = %s \nall_performances = %s \naverage_performances = %s" % \
(repeats,fractions_unknown,all_performances,average_performances)
'''
repeats=10
fractions_unknown = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
all_performances = {'R^2': [[0.9975927090292699, 0.9980081023219172, 0.9978155521077262, 0.9975554561095054, 0.997923878035404, 0.9982099616576611, 0.9981625860413992, 0.9978359736733243, 0.9978779504510271, 0.997515342709776], [0.9975890536153215, 0.9980292910066693, 0.9979194412541915, 0.9979658918924728, 0.9975677425995196, 0.9980789962725235, 0.9978807816742185, 0.9979738215496476, 0.9977169179356461, 0.9979390527530128], [0.9977695576055731, 0.9977719145111632, 0.9977470654043835, 0.9975949936294892, 0.9978477764618622, 0.9974169395818809, 0.997792227822902, 0.9978193213459388, 0.9975648828625464, 0.9975486588574607], [0.9976570706503591, 0.997912315499696, 0.9975888360031283, 0.9975517834065201, 0.997401303631934, 0.9976416958006299, 0.9975701396700851, 0.9977735347994222, 0.9977381352638842, 0.9978347279701963], [0.9972048400356697, 0.9974121335180816, 0.9972725006709184, 0.9973945651110651, 0.9975860214597151, 0.9977031023255696, 0.9973354940256396, 0.9973366323745044, 0.9975166443424057, 0.9973194422122789], [0.9968876942549296, 0.9974666638945257, 0.9971232162590854, 0.9965339728630452, 0.9975274512294184, 0.9970835492426965, 0.9968105183626977, 0.9973085027696141, 0.9970350876802735, 0.9971047649757829], [0.9956004866700275, 0.9960425084997317, 0.9956908333301903, 0.9963534897720362, 0.9962563685498766, 0.996272429736938, 0.9963548678337069, 0.9958440798191359, 0.9957456641450465, 0.9958371822889462], [0.966490466863117, 0.9504011247182779, 0.9876873238870871, 0.9837743100507569, 0.9934731765762597, 0.9628010730795674, 0.9929675264889107, 0.9852611319618546, 0.980928068556346, 0.9846092579536968], [0.7448940841360358, 0.5644707788072548, 0.7317341967977111, 0.6268865264832666, 0.6904933275719349, 0.6529445005908574, 0.5883958546834347, 0.5221206128238185, 0.8335961861162272, 0.721881132667106]], 'MSE': [[1.2359111050754783, 1.3042928410580119, 1.2376394595262963, 1.2275988732827634, 1.2382569585089236, 1.1418971298061806, 1.1441518803914916, 1.2223920892944424, 1.2247789351682155, 1.2417987434632936], [1.2792307137209402, 1.2095820439287437, 1.204666514137299, 1.2176876197524185, 1.2450080035692148, 1.2369182353181538, 1.2530229409862728, 1.1919084524073353, 1.2828978056725004, 1.2545687977643687], [1.306426127698413, 1.2906457456653304, 1.3398826196253992, 1.3881057063319009, 1.2359863502075081, 1.3861514029887203, 1.2824578439380021, 1.368772494293425, 1.2942788237192839, 1.2678432597883094], [1.3495310437055388, 1.3641408699751489, 1.3558539963324807, 1.37733755299835, 1.3842753912555095, 1.3138440184813585, 1.4012705880437191, 1.2997572216472284, 1.3232244381774516, 1.3412297465782208], [1.4845439502286777, 1.4236733167015867, 1.4829075685316131, 1.5204074140569728, 1.4671178593650924, 1.296597105766925, 1.4165389674196776, 1.5220963946036925, 1.4511389025847241, 1.5223750280398956], [1.7278422464010317, 1.4371317131299706, 1.664199220301334, 1.8513490228163638, 1.4211713429603694, 1.6371637612297056, 1.7683467037268072, 1.5486671570896307, 1.7350633312338388, 1.6605389189725468], [2.6195785679210499, 2.3095470903110709, 2.4722099129359854, 2.1205864798805969, 2.1261213726651982, 2.1416350443412218, 2.1391932810654732, 2.3567048373783179, 2.4427368406574317, 2.3864977291757903], [19.054484310140925, 28.26033451828971, 7.1455382084831127, 9.5589530429614076, 3.744672733276559, 22.104946580343103, 3.930619863957872, 8.4495494068552048, 11.058583546825346, 8.8319755926652324], [144.22416058491476, 249.60357823247341, 153.54572226357712, 211.09141802208796, 176.71761117002131, 194.69471829847672, 237.88233262720109, 275.26569225321771, 95.593068425483409, 162.0412584829115]], 'Rp': [[0.99880420951544502, 0.99900395021136434, 0.99890857558075508, 0.99878035694496503, 0.99896500446856951, 0.99910731663444752, 0.99908550333940471, 0.9989248480995444, 0.99893876976564466, 0.99876195708329107], [0.99879681787297547, 0.99901489703340884, 0.99895955595351549, 0.99898584690408676, 0.99878738899022479, 0.99904044836434747, 0.99894016179002976, 0.99898821843986496, 0.99885993625712199, 0.99897197416089323], [0.99888436491207322, 0.99888592938366239, 0.99887397749315432, 0.9987985413547712, 0.99892389506668577, 0.99870798064138, 0.99889670828874466, 0.99890921170628233, 0.99878353194075076, 0.99877442025883056], [0.99882953400638341, 0.99895874015184949, 0.99879652488340387, 0.99877596405665137, 0.99869999080131067, 0.99882300349094955, 0.99878850333901692, 0.99888736849994753, 0.99887119732067753, 0.99891742914772053], [0.99860239521475547, 0.99870576989184867, 0.99863740745075846, 0.99869652065113557, 0.99880231392030572, 0.99885117938926438, 0.99867060989233092, 0.99867208619182257, 0.99875800884615606, 0.99865894212951556], [0.99844309014561028, 0.99873292732912911, 0.99856256752744232, 0.99826775109421695, 0.99876308359431631, 0.99854175878031926, 0.99840826616033396, 0.99865428180438387, 0.99852227404052185, 0.99855668698014199], [0.99780564121926174, 0.99802260055713676, 0.99786671812837191, 0.99819140855105448, 0.99813263124688767, 0.99813520389691202, 0.99817671233486327, 0.99793440286027668, 0.99787319646144834, 0.99791773308442666], [0.98362796692557153, 0.97694437268254153, 0.993825075948362, 0.991854258729025, 0.99673194445485813, 0.98223469976977984, 0.99649303700878877, 0.99265967783514075, 0.99053006207596628, 0.99229389145788727], [0.89031297236990137, 0.83497912445121836, 0.87060834703560308, 0.83604847675271821, 0.87662483639964928, 0.85831295806558006, 0.83403806731021446, 0.81720594968054239, 0.92663402534958672, 0.88144323801393565]]}
average_performances = {'R^2': [0.9978497512137011, 0.9978660990553223, 0.9976873338083202, 0.9976669542695855, 0.9974081376075847, 0.9970881421532068, 0.9959997910645635, 0.9788393460135871, 0.6677417200677647], 'MSE': [1.2218718015575099, 1.2375491127257248, 1.3160550374256292, 1.3510464867195005, 1.4587396507298858, 1.64514734178616, 2.3114811156332133, 12.213965780379848, 190.06595603603648], 'Rp': [0.99892804916434308, 0.99893452457664689, 0.99884385610463355, 0.99883482556979108, 0.9987055233577895, 0.99854526874564153, 0.99800562483406396, 0.98971949868879217, 0.86262079954289494]}
repeats=10
fractions_unknown = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
all_performances = {'R^2': [[0.9977960716139789, 0.9973834994890584, 0.9983016043107281, 0.9975472592906569, 0.9978136425496568, 0.9982280201387091, 0.9981965828582579, 0.9982640230093016, 0.9983963415199908, 0.9986477681687802], [0.9980111539591642, 0.9975445304412317, 0.9978931295438996, 0.9974807190762393, 0.99757835772839, 0.9983210217988878, 0.9978340730406277, 0.9967995704214855, 0.9978108885059572, 0.9979979938512343], [0.9973146791772716, 0.9980396394604358, 0.9977687638443559, 0.997904081502049, 0.9975161817834871, 0.998121638140143, 0.9976928630836424, 0.9978737988025677, 0.9977120376906787, 0.9972076792556375], [0.9978259391246938, 0.9977884822776646, 0.9980475595309849, 0.9976484374258442, 0.9976216503705784, 0.99789535037804, 0.9978651088505368, 0.9979826727856517, 0.997544456824371, 0.9975883619808774], [0.9975896865555836, 0.9976926057034096, 0.9976484871022528, 0.9978103442988308, 0.9978474789857447, 0.9972883842908079, 0.997677596006807, 0.9977181786238531, 0.9977999285296558, 0.9977976626898056], [0.9975737461445836, 0.9975423188792942, 0.9977169797502722, 0.9979126318421085, 0.9975225209643708, 0.997757794048769, 0.9975233529826689, 0.9978989446831908, 0.9979259356750545, 0.9978514364388203], [0.9978665622537566, 0.9977476716261016, 0.9975760561841783, 0.99790900009907, 0.9977387640460015, 0.9976825507814665, 0.9977414083572004, 0.997848613997625, 0.9977360605400215, 0.9975028818526086], [0.9975234749065116, 0.9974798654620204, 0.9974761474309704, 0.9973728835895923, 0.9976878829209026, 0.9976997532110653, 0.9974807774566232, 0.9975172657032069, 0.9976588312092749, 0.9977835395758824], [0.9976916099058944, 0.9973953438727917, 0.997230212317283, 0.9973304698418283, 0.9977560222676292, 0.9976886347066465, 0.997663466707994, 0.997508865564486, 0.997724741393137, 0.9976413063374209], [0.9972797546513117, 0.9973940868507666, 0.9972898855173566, 0.9974036286735548, 0.9974003668260464, 0.9974139449622138, 0.9975140464267697, 0.9975152871072698, 0.997777046292111, 0.9975378762795625], [0.9971501213232983, 0.9971977857080206, 0.9975558498142384, 0.9974912047361075, 0.9972945765279337, 0.997352246105013, 0.9973115052336812, 0.9974688120116599, 0.9974422419135635, 0.9974107073837184], [0.9970995270455087, 0.9973392783898744, 0.9972834182452326, 0.9972311347022608, 0.9972923039059131, 0.997197545842884, 0.9970868777460126, 0.9973734774086613, 0.9974327173642094, 0.9971003899531427], [0.9971298838288146, 0.9967578636383226, 0.9966776909374893, 0.9970304781422635, 0.9960345037762506, 0.9968291959653174, 0.9967697623614128, 0.9968179355654329, 0.9967998002156155, 0.9968017819420039], [0.9910039919105932, 0.9960555506964023, 0.9962509552455636, 0.9938998176987858, 0.9966791511827771, 0.9966366624720836, 0.9967903317782414, 0.9942109857060469, 0.9962207623480237, 0.9960353282783987], [0.995842543538116, 0.9955620025360832, 0.995907521770374, 0.8829053844080554, 0.9959229055620908, 0.9904842012728244, 0.9949252711617799, 0.9958227145540727, 0.9960905531238834, 0.995899366309018], [0.9934617025608774, 0.7371740107582515, 0.9922826081721235, 0.9934648038262894, 0.9838012678489808, 0.9896178071196157, 0.9814919718710041, 0.9812151164783904, 0.9837925411381649, 0.982883296991726], [0.8675910902715176, 0.9719095961333374, 0.8932930234717759, 0.9164207092822406, 0.8919421316904165, 0.9005238308496155, 0.9441155496017423, 0.9774620451163136, 0.9355822571031298, 0.9798571574602224], [0.5891624711472134, 0.7648666894351978, 0.7844874522667273, 0.6699633816459498, 0.5912874125814198, 0.7938940480533925, 0.7654709776302533, 0.372397391797735, 0.5689501208164378, 0.5707875297302283], [0.5356820022435773, 0.6425334772832034, 0.5279011792193716, 0.25505905517157423, 0.45448485610879363, 0.471030154278161, 0.48683253473635146, 0.6287791547754051, 0.6517576137524674, 0.49949659259123036]], 'MSE': [[1.1450246392114909, 1.2986670590230718, 1.262895968808295, 1.29825964658808, 1.4138665350704542, 1.0724022212929925, 1.2002489707376693, 1.2501162558286667, 1.1617217382766478, 1.2223468964174509], [1.1885623802538827, 1.2545136796398837, 1.4094536329111025, 1.329634618943272, 1.2521469466946074, 1.2387188840155623, 1.2177932802807248, 1.1976076890097385, 1.2328860687036776, 1.3004538688910794], [1.3091331188036246, 1.1699102110675565, 1.3689684255984995, 1.2638748078391044, 1.2833228665268406, 1.1564118609700051, 1.3209733440027862, 1.2762883138271994, 1.3163661787371224, 1.3859241245280605], [1.3419589522616153, 1.2464529617661497, 1.2874660589480103, 1.3935502398437702, 1.3637977502174352, 1.2224412621837066, 1.2211385805166233, 1.2027964656159795, 1.3864034937354353, 1.2795360946164827], [1.2856684807702792, 1.3319642545033934, 1.322338998508074, 1.2888487786435445, 1.3173376617480976, 1.4271941583652452, 1.3081757099208153, 1.4142075696016867, 1.291599600542312, 1.2296601238294209], [1.4561542408876869, 1.3548394884532, 1.2474253593911397, 1.2695165954654954, 1.3880053786827933, 1.3203555463773231, 1.3175642196349024, 1.2377602302549882, 1.2305351176261241, 1.2983851052958166], [1.2550550812445838, 1.2485573168105362, 1.3860457578407819, 1.272664109751503, 1.2067096392920351, 1.3227053456436386, 1.3503037860031089, 1.2710761506507637, 1.3608981059913599, 1.3585545973728475], [1.3565973014319042, 1.4078421372954384, 1.3681682904899881, 1.3815523823850144, 1.3430060224882041, 1.3671747729303292, 1.3566905483801273, 1.4243779991670056, 1.4010205397342466, 1.3502767881290783], [1.4450708655967792, 1.4169715322519969, 1.6678115220273113, 1.4225154528631265, 1.4015539722892949, 1.3414530355518277, 1.364043459535724, 1.3814473757333345, 1.3480544195167985, 1.3751031783066732], [1.4427006547855894, 1.494580879551638, 1.5942235427118878, 1.3599595844761085, 1.4943169638141514, 1.4702564716750393, 1.4855692701548133, 1.4435876091692579, 1.2815842421261499, 1.4210031425646643], [1.5772181401336689, 1.5654595031984846, 1.4069078858228075, 1.4777761152040079, 1.5552847083515597, 1.4840830577653927, 1.4825277592310759, 1.4862587456687619, 1.5207591062057491, 1.469974108855759], [1.6546548113350283, 1.4618935014338796, 1.6169883281110622, 1.5151526511788675, 1.5664548144455308, 1.6764140454342262, 1.6725771428458265, 1.4631902851047409, 1.5554480716787247, 1.6370529724262626], [1.6367548364088857, 1.8550847285144507, 1.9690461104081762, 1.6625927306332324, 2.2229930695515598, 1.8537641144518238, 1.7738650420783093, 1.782531880977406, 1.875946833165107, 1.8138575293636237], [5.1868853495123961, 2.2725482800187429, 2.2197745665901354, 3.5328223673119004, 1.8576404284898198, 1.9223835554790278, 1.7757916530539961, 3.3120375395504573, 2.1814882442868133, 2.3003375517405131], [2.3607550410019691, 2.5527398367559089, 2.3819341566204906, 66.333478408471791, 2.4509302605799901, 5.5157660021332902, 2.916088083254964, 2.3888283872327198, 2.2343597198439613, 2.2393921157266408], [3.8051959107529325, 156.05426900405925, 4.5022766451240486, 3.7695914924360454, 9.2785189900528913, 6.0528134400167906, 10.770496666154495, 10.867618932373919, 9.2052455851677433, 9.8885589088119588], [77.810189469115016, 16.224653856134744, 61.59555950659238, 46.618829263148868, 60.578534656481047, 56.644269349693523, 33.140001594590785, 12.840300085843067, 37.187807127884774, 11.728983634417434], [236.7423844286273, 130.38743451940667, 123.03203486810101, 190.50870291106733, 231.82376771486133, 121.05394140124487, 134.42958943368492, 360.28596416203214, 244.40634070142011, 247.17188547620017], [270.0540370588659, 205.53242824756143, 267.84105795700276, 426.9337636314288, 315.32642727762124, 301.20774639438889, 291.25795132420939, 213.66670393856165, 198.75373213764905, 287.60860431144732]], 'Rp': [[0.99890935256924096, 0.99869185280726025, 0.99916276320407815, 0.99877676385742276, 0.99891310989709747, 0.99911797993527129, 0.99910187286913665, 0.99913797417060035, 0.99920288198206242, 0.999330249353373], [0.99900615287740258, 0.99877516828671953, 0.99896692788050023, 0.99874957459175939, 0.99878974075166183, 0.99916734184053047, 0.99891746995248221, 0.99840579799538876, 0.99891356947634524, 0.99900600476308365], [0.99866262837407427, 0.99902441774235584, 0.99888640359126069, 0.99895339611985856, 0.99876404317948186, 0.99906197807601993, 0.9988470436151502, 0.99893773743809144, 0.99886585638400172, 0.9986071721895754], [0.99892628533831729, 0.99889735003056468, 0.99902385095087365, 0.99882411479595268, 0.9988113114169318, 0.99894900047793445, 0.99893444048330937, 0.99899212367256651, 0.99877418661514783, 0.99879851371599571], [0.9987953766991271, 0.99884814006291944, 0.99882534167464987, 0.99890576518535468, 0.9989259470544104, 0.99865174291269776, 0.99883983190854908, 0.99886078239354525, 0.99889955170821354, 0.99889933271644105], [0.99879254271427154, 0.9987752750031017, 0.99885837198547611, 0.99895647699378343, 0.99876106272044007, 0.99887904382598092, 0.99876613627728883, 0.99894958822151481, 0.9989647243004387, 0.99892730066312319], [0.99894393852846219, 0.99887633512073926, 0.99879137255226835, 0.99895511888467603, 0.99886879198767853, 0.99884571172590897, 0.99887216862001826, 0.99892833150295557, 0.9988695322819654, 0.99875289107495691], [0.99876347416434419, 0.99874326175445161, 0.99874014046974335, 0.99868807892266076, 0.99884534157992444, 0.99885015965869572, 0.99874240642417889, 0.99876339961357352, 0.99882908830951045, 0.99889219125682949], [0.99884824688476104, 0.99869920655871125, 0.99861826203745563, 0.99867253197609851, 0.99888351349247551, 0.99884688875393157, 0.99883458545343429, 0.99875585601283423, 0.99886183194494327, 0.99882304720438408], [0.99864178242640111, 0.99870277918222594, 0.99865088053712014, 0.99870795220847663, 0.99870201114927393, 0.99871454181743746, 0.99875772047994216, 0.99875864683112903, 0.99888892324977241, 0.99877267666535341], [0.99857409963631272, 0.99860632623390699, 0.99877785214981529, 0.99874512485309175, 0.99865193974519373, 0.99868546619008902, 0.99865788495019348, 0.99873892626542049, 0.99872403401333754, 0.99870997987681687], [0.99855017065490947, 0.9986687622542203, 0.99864981832292632, 0.99861499275635679, 0.99864617085512219, 0.99860256927794799, 0.99854596594405087, 0.99869167154587724, 0.99872024010358373, 0.99855537373748726], [0.99856643106380216, 0.9983792922799114, 0.99835264964525361, 0.9985144270164984, 0.99803345855867265, 0.99842495141746068, 0.99838607906859389, 0.99840865279579272, 0.99841544942711424, 0.99840045147289369], [0.99551286695583363, 0.99803265155046572, 0.99812538967031328, 0.99694844039148545, 0.99833861008987512, 0.99831785625680958, 0.99839705249686583, 0.99710229637235048, 0.99811734952734865, 0.99801657217517592], [0.99792672579435393, 0.99778161866028414, 0.9979557395283718, 0.94540538085365777, 0.99796123624828348, 0.99523276726597965, 0.9974610328108009, 0.99791127988803696, 0.99804820787585291, 0.99795351389476661], [0.9967621481196125, 0.88463345249420922, 0.99615828401900974, 0.99672915103861981, 0.99197321428477458, 0.99482651130955702, 0.99080231904391824, 0.9907108255405126, 0.99192096418767595, 0.99145986525720042], [0.93278003598471049, 0.98635120492483941, 0.95379991091096294, 0.95832139107447212, 0.94976056779666818, 0.95109755380423389, 0.97315830799096914, 0.98869008271397618, 0.96837229130465574, 0.99002572508503617], [0.82899672390352575, 0.88897660021746838, 0.89733927334601338, 0.84756014823296355, 0.82447297652678064, 0.90705198593095693, 0.90587327909949378, 0.79879661536924096, 0.84499517565171556, 0.84721926048568019], [0.78411640788953207, 0.8306266710716893, 0.78598556365468686, 0.70230820340856248, 0.75017038856772689, 0.76068119503423637, 0.82938455655594701, 0.82229042998374346, 0.84833032220204641, 0.77084465228349131]]}
average_performances = {'R^2': [0.9980574812949119, 0.9977271438367117, 0.9977151362740269, 0.9977808019549244, 0.9976870352786751, 0.9977225661409133, 0.9977349569738028, 0.997568042146605, 0.9975630672915111, 0.9974525923586963, 0.9973675050757235, 0.9972436670603699, 0.9967648896372923, 0.9953783537316918, 0.9839362464236299, 0.9619185126765425, 0.927869739098031, 0.6471267475104554, 0.5153556620160135], 'MSE': [1.2325549931254818, 1.2621771049343531, 1.28511732519008, 1.294554185970521, 1.3216995336432869, 1.3120541282069469, 1.303256989060116, 1.3756706782431336, 1.4164024813672866, 1.44877823610293, 1.5026249130437268, 1.5819826623994149, 1.8446436875552574, 2.6561709536033797, 9.1374272011621738, 22.419458557495005, 41.436912854390165, 201.9842045616646, 277.81824522787366], 'Rp': [0.99903448006455431, 0.99886977484158757, 0.99886106767098704, 0.99889311774975942, 0.99884518123159083, 0.99886305227054195, 0.99887041922796294, 0.99878575421539106, 0.9987843970319028, 0.99872979145471308, 0.99868716339141772, 0.99862457354524836, 0.99838818427459919, 0.99769090854865239, 0.99236375028203871, 0.982597673529509, 0.96523570715905238, 0.859128203876384, 0.78847383906516622]}
'''
# Plot the MSE, R^2 and Rp
for metric in metrics:
plt.figure()
x = fractions_unknown
y = average_performances[metric]
plt.plot(x,y)
plt.xlabel("Fraction missing")
plt.ylabel(metric) | [
"[email protected]"
] | |
9e6ee83397fceeb430e08c5252d3be5dfb030f62 | c5458f2d53d02cb2967434122183ed064e1929f9 | /sdks/python/setup.py | 0e7640cc7b4ed0f33efa8b96d3aad2210fed2a4f | [] | no_license | ross-weir/ergo-node-api-sdks | fd7a32f79784dbd336ef6ddb9702b9dd9a964e75 | 9935ef703b14760854b24045c1307602b282c4fb | refs/heads/main | 2023-08-24T05:12:30.761145 | 2021-11-08T10:28:10 | 2021-11-08T10:28:10 | 425,785,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | """
Ergo Node API
API docs for Ergo Node. Models are shared between all Ergo products # noqa: E501
The version of the OpenAPI document: 4.0.15
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "ergo-node"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"urllib3 >= 1.25.3",
"python-dateutil",
]
setup(
name=NAME,
version=VERSION,
description="Ergo Node API",
author="Ergo Platform Team",
author_email="[email protected]",
url="",
keywords=["OpenAPI", "OpenAPI-Generator", "Ergo Node API"],
python_requires=">=3.6",
install_requires=REQUIRES,
packages=find_packages(exclude=["test", "tests"]),
include_package_data=True,
license="CC0 1.0 Universal",
long_description="""\
API docs for Ergo Node. Models are shared between all Ergo products # noqa: E501
"""
)
| [
"[email protected]"
] | |
c8b0402d15b859aaa402c2e24f7481605a77cd2a | 762de1c66746267e05d53184d7854934616416ee | /tools/MolSurfGenService/MolSurfaceGen32/chimera/share/SimpleSession/versions/v25.py | ab3a358e3694974f623f33517a4eaf4975369ca4 | [] | no_license | project-renard-survey/semanticscience | 6e74f5d475cf0ebcd9bb7be6bb9522cf15ed8677 | 024890dba56c3e82ea2cf8c773965117f8cda339 | refs/heads/master | 2021-07-07T21:47:17.767414 | 2017-10-04T12:13:50 | 2017-10-04T12:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,944 | py | # --- UCSF Chimera Copyright ---
# Copyright (c) 2000 Regents of the University of California.
# All rights reserved. This software provided pursuant to a
# license agreement containing restrictions on its disclosure,
# duplication and use. This notice must be embedded in or
# attached to all copies, including partial copies, of the
# software or any revisions or derivations thereof.
# --- UCSF Chimera Copyright ---
#
# $Id: v25.py 26655 2009-01-07 22:02:30Z gregc $
from v24 import RemapDialog, reportRestoreError, restoreWindowSize, \
restoreOpenModelsAttrs, noAutoRestore, autoRestorable, \
registerAfterModelsCB, makeAfterModelsCBs, restoreModelClip, \
restoreSelections, restoreCamera, getColor, findFile, \
setSessionIDparams, sessionID, idLookup, expandSummary, init, \
beginRestore, endRestore, restoreColors, restoreSurfaces, restoreVRML, \
restorePseudoBondGroups, restoreOpenStates, restoreFontInfo
import globals # so that various version files can easily access same variables
import chimera
def restoreMolecules(molInfo, resInfo, atomInfo, bondInfo, crdInfo):
items = []
sm = globals.sessionMap
res2mol = []
atom2mol = []
openModelsArgs = {}
for ids, name, cid, display, lineWidth, pointSize, stickScale, \
pdbHeaders, surfaceOpacity, ballScale, vdwDensity, autochain, \
ribbonHidesMainchain in zip(
expandSummary(molInfo['ids']),
expandSummary(molInfo['name']),
expandSummary(molInfo['color']),
expandSummary(molInfo['display']),
expandSummary(molInfo['lineWidth']),
expandSummary(molInfo['pointSize']),
expandSummary(molInfo['stickScale']),
molInfo['pdbHeaders'],
expandSummary(molInfo['surfaceOpacity']),
expandSummary(molInfo['ballScale']),
expandSummary(molInfo['vdwDensity']),
expandSummary(molInfo['autochain']),
expandSummary(molInfo['ribbonHidesMainchain'])
):
m = chimera.Molecule()
sm[len(items)] = m
items.append(m)
m.name = name
from SimpleSession import modelMap, modelOffset
chimera.openModels.add([m],
baseId=ids[0]+modelOffset, subid=ids[1])
modelMap.setdefault(ids, []).append(m)
m.color = getColor(cid)
m.display = display
m.lineWidth = lineWidth
m.pointSize = pointSize
m.stickScale = stickScale
m.setAllPDBHeaders(pdbHeaders)
m.surfaceOpacity = surfaceOpacity
m.ballScale = ballScale
m.vdwDensity = vdwDensity
m.autochain = autochain
m.ribbonHidesMainchain = ribbonHidesMainchain
if molInfo['optional']:
for attrName, info in molInfo['optional'].items():
for a, val in zip(items, expandSummary(info)):
if val is not None:
setattr(a, attrName, val)
resStart = len(items)
for mid, name, chain, pos, insert, rcid, lcid, ss, ribbonDrawMode, \
ribbonDisplay, label in zip(
expandSummary(resInfo['molecule']),
expandSummary(resInfo['name']),
expandSummary(resInfo['chain']),
resInfo['position'],
expandSummary(resInfo['insert']),
expandSummary(resInfo['ribbonColor']),
expandSummary(resInfo['labelColor']),
expandSummary(resInfo['ss']),
expandSummary(resInfo['ribbonDrawMode']),
expandSummary(resInfo['ribbonDisplay']),
expandSummary(resInfo['label'])
):
m = idLookup(mid)
r = m.newResidue(name, chain, pos, insert)
sm[len(items)] = r
items.append(r)
r.ribbonColor = getColor(rcid)
r.labelColor = getColor(lcid)
r.isHelix, r.isStrand, r.isTurn = ss
r.ribbonDrawMode = ribbonDrawMode
r.ribbonDisplay = ribbonDisplay
r.label = label
if resInfo['optional']:
residues = items[resStart:]
for attrName, info in resInfo['optional'].items():
for a, val in zip(residues, expandSummary(info)):
if val is not None:
setattr(a, attrName, val)
atomStart = len(items)
for rid, name, element, cid, vcid, lcid, scid, drawMode, display, \
label, surfaceDisplay, surfaceCategory, surfaceOpacity, radius, vdw, \
idatmType in zip(
expandSummary(atomInfo['residue']),
expandSummary(atomInfo['name']),
expandSummary(atomInfo['element']),
expandSummary(atomInfo['color']),
expandSummary(atomInfo['vdwColor']),
expandSummary(atomInfo['labelColor']),
expandSummary(atomInfo['surfaceColor']),
expandSummary(atomInfo['drawMode']),
expandSummary(atomInfo['display']),
expandSummary(atomInfo['label']),
expandSummary(atomInfo['surfaceDisplay']),
expandSummary(atomInfo['surfaceCategory']),
expandSummary(atomInfo['surfaceOpacity']),
expandSummary(atomInfo['radius']),
expandSummary(atomInfo['vdw']),
expandSummary(atomInfo['idatmType'])
):
r = idLookup(rid)
a = r.molecule.newAtom(name, chimera.Element(element))
sm[len(items)] = a
items.append(a)
r.addAtom(a)
a.color = getColor(cid)
a.vdwColor = getColor(vcid)
a.labelColor = getColor(lcid)
a.surfaceColor = getColor(scid)
a.drawMode = drawMode
a.display = display
a.label = label
a.surfaceDisplay = surfaceDisplay
a.surfaceCategory = surfaceCategory
a.surfaceOpacity = surfaceOpacity
a.radius = radius
a.vdw = vdw
if idatmType:
a.idatmType = idatmType
if atomInfo['optional']:
atoms = items[atomStart:]
for attrName, info in atomInfo['optional'].items():
for a, val in zip(atoms, expandSummary(info)):
if val is not None:
setattr(a, attrName, val)
for atoms, drawMode, display in zip(
bondInfo['atoms'],
expandSummary(bondInfo['drawMode']),
expandSummary(bondInfo['display'])
):
a1, a2 = [idLookup(a) for a in atoms]
b = a1.molecule.newBond(a1, a2)
sm[len(items)] = b
items.append(b)
b.drawMode = drawMode
b.display = display
from chimera import Point
for mid, crdSets in crdInfo.items():
m = idLookup(mid)
active = crdSets.pop('active')
for key, crds in crdSets.items():
coordSet = m.newCoordSet(key, len(crds))
for aid, crdString in crds:
idLookup(aid).setCoord(Point(*tuple([float(c)
for c in crdString.split()])), coordSet)
if key == active:
m.activeCoordSet = coordSet
| [
"alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5"
] | alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5 |
699107beaaa0112e11b92168d41891cd13d99d01 | 996bb6bf244dded0a778b9035e34311a4ca0fbfe | /docs/conf.py | 57e9c9ffe3ccd4b6a7747a216820b2bfb4ab4c67 | [
"MIT"
] | permissive | edeposit/cz-urnnbn-api | 7eb4229c03051300ddc375030d8233e8b3b2e95f | 2c9d36648491bfcbf0f29bedaf6f507a51805f8e | refs/heads/master | 2020-05-30T17:01:38.993229 | 2015-10-12T12:38:11 | 2015-10-12T12:38:11 | 31,064,934 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,418 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
import os
import sys
import urllib
import os.path
sys.path.insert(0, os.path.abspath('../src/'))
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.napoleon',
'sphinx.ext.intersphinx'
]
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7', None),
'amqp': ("http://edeposit-amqp.readthedocs.org/en/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
# Sorting of items
autodoc_member_order = "bysource"
# Document all methods in classes
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cz-urnnbn-api'
copyright = u'2015 E-deposit team'
# The full version, including alpha/beta/rc tags.
try:
# read data from CHANGES.rst
sys.path.insert(0, os.path.abspath('../'))
from docs import getVersion
release = getVersion(open("../CHANGES.rst").read())
except Exception:
# this is here specially for readthedocs, which downloads only docs, not
# other files
fh = urllib.urlopen("https://pypi.python.org/pypi/" + project + "/")
release = filter(lambda x: "<title>" in x, fh.read().splitlines())
release = release[0].split(":")[0].split()[1]
# The short X.Y version.
version = ".".join(release.split(".")[:2])
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'cz-urnnbn-api'
| [
"[email protected]"
] | |
be8ba9e73db30402493becf02ec4687d74472442 | a343a405ecc557a52974fa84bc0481cc11405b14 | /33_searchRotatedSortedArray_V2.py | 16dcda7bdf1a8844681f255d5bae37eca5ec09c1 | [] | no_license | jennyChing/leetCode | 926c2a5ff9f6c03152e93725b64f7bad804c415a | f3fc71f344cd758cfce77f16ab72992c99ab288e | refs/heads/master | 2020-05-21T16:42:17.325980 | 2017-03-18T01:59:15 | 2017-03-18T01:59:15 | 61,048,131 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | '''
33. Search in Rotated Sorted Array
Suppose a sorted array is rotated at some pivot unknown to you beforehand.
(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
You are given a target value to search. If found in the array return its index, otherwise return -1.
You may assume no duplicate exists in the array.
'''
class Solution(object):
def search(self, nums, target):
# find the start of rotated array
left, right = 0, len(nums) - 1
while left + 1 < right: # careful off-by-1 case!!
mid = (left + right ) // 2
if nums[mid] == target:
return mid
if nums[left] <= target < nums[mid]:
right = mid
elif nums[mid] <= target <= nums[right]:
left = mid
elif nums[mid] > nums[left]:
left = mid
else:
right = mid
if nums[left] == target: return left
if nums[right] == target: return right
return -1
if __name__ == '__main__':
nums = [4, 4, 5, 6, 7, 0, 1, 2]
nums = [1, 1, 3, 1]
res = Solution().search(nums, 3)
print(res)
| [
"[email protected]"
] | |
29e07ede867807108d273fe7196934df3cefeaac | b0cfa5e2d84057ece11f3316f82b806f1383a9df | /modules/standard/whois/org_list_controller.py | 634951f4f7f98df7331e81c2e4726dc4fdd7b993 | [] | no_license | comatech/Tyrbot | 0b862afc834ec2d587fd5f8f67926569b109b667 | 1f8648d81c0a5f089ef7aaa6581809a47657b624 | refs/heads/master | 2020-07-06T06:40:13.863215 | 2019-07-21T09:51:45 | 2019-07-21T09:51:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,161 | py | from core.buddy_service import BuddyService
from core.chat_blob import ChatBlob
from core.command_param_types import Int, Any, Character
from core.decorators import instance, command, event
from core.dict_object import DictObject
@instance()
class OrgListController:
ORGLIST_BUDDY_TYPE = "orglist"
def __init__(self):
self.orglist = None
self.governing_types = DictObject({
"Anarchism": ["Anarchist"],
"Monarchy": ["Monarch", "Counsil", "Follower"],
"Feudalism": ["Lord", "Knight", "Vassal", "Peasant"],
"Republic": ["President", "Advisor", "Veteran", "Member", "Applicant"],
"Faction": ["Director", "Board Member", "Executive", "Member", "Applicant"],
"Department": ["President", "General", "Squad Commander", "Unit Commander", "Unit Leader", "Unit Member", "Applicant"]
})
def inject(self, registry):
self.bot = registry.get_instance("bot")
self.db = registry.get_instance("db")
self.util = registry.get_instance("util")
self.text = registry.get_instance("text")
self.pork_service = registry.get_instance("pork_service")
self.org_pork_service = registry.get_instance("org_pork_service")
self.pork_service = registry.get_instance("pork_service")
self.buddy_service: BuddyService = registry.get_instance("buddy_service")
self.character_service = registry.get_instance("character_service")
@command(command="orglist", params=[Int("org_id")], access_level="all",
description="Show online status of characters in an org")
def orglist_cmd(self, request, org_id):
self.start_orglist_lookup(request.reply, org_id)
@command(command="orglist", params=[Any("character|org_name|org_id")], access_level="all",
description="Show online status of characters in an org")
def orglist_character_cmd(self, request, search):
if search.isdigit():
org_id = int(search)
else:
orgs = self.pork_service.find_orgs(search)
num_orgs = len(orgs)
if num_orgs == 0:
char_info = self.pork_service.get_character_info(search)
if char_info:
if not char_info.org_id:
return "<highlight>%s<end> does not appear to belong to an org." % search.capitalize()
else:
org_id = char_info.org_id
else:
return "Could not find character or org <highlight>%s<end>." % search
elif num_orgs == 1:
org_id = orgs[0].org_id
else:
blob = ""
for org in orgs:
blob += self.text.make_chatcmd("%s (%d)" % (org.org_name, org.org_id), "/tell <myname> orglist %d" % org.org_id) + "\n"
return ChatBlob("Org List (%d)" % num_orgs, blob)
self.start_orglist_lookup(request.reply, org_id)
def start_orglist_lookup(self, reply, org_id):
if self.orglist:
reply("There is an orglist already in progress.")
return
reply("Downloading org roster for org id %d..." % org_id)
self.orglist = self.org_pork_service.get_org_info(org_id)
if not self.orglist:
reply("Could not find org with ID <highlight>%d<end>." % org_id)
return
self.orglist.reply = reply
self.orglist.waiting_org_members = {}
self.orglist.finished_org_members = {}
reply("Checking online status for %d members of <highlight>%s<end>..." % (len(self.orglist.org_members), self.orglist.org_info.name))
# process all name lookups
while self.bot.iterate():
pass
self.iterate_org_members()
self.check_for_orglist_end()
@event(event_type=BuddyService.BUDDY_LOGON_EVENT, description="Detect online buddies for orglist command", is_hidden=True)
def buddy_logon_event(self, event_type, event_data):
if self.orglist and event_data.char_id in self.orglist.waiting_org_members:
self.update_online_status(event_data.char_id, True)
self.buddy_service.remove_buddy(event_data.char_id, self.ORGLIST_BUDDY_TYPE)
self.check_for_orglist_end()
@event(event_type=BuddyService.BUDDY_LOGOFF_EVENT, description="Detect offline buddies for orglist command", is_hidden=True)
def buddy_logoff_event(self, event_type, event_data):
if self.orglist and event_data.char_id in self.orglist.waiting_org_members:
self.update_online_status(event_data.char_id, False)
self.buddy_service.remove_buddy(event_data.char_id, self.ORGLIST_BUDDY_TYPE)
self.check_for_orglist_end()
def update_online_status(self, char_id, status):
self.orglist.finished_org_members[char_id] = self.orglist.waiting_org_members[char_id]
self.orglist.finished_org_members[char_id].online = status
del self.orglist.waiting_org_members[char_id]
def check_for_orglist_end(self):
if self.orglist.org_members:
self.iterate_org_members()
return
if not self.orglist.waiting_org_members:
self.orglist.reply(self.format_result())
self.orglist = None
def format_result(self):
org_ranks = {}
for rank_name in self.governing_types[self.orglist.org_info.governing_type]:
org_ranks[rank_name] = DictObject({
"online_members": [],
"offline_members": []
})
for char_id, org_member in self.orglist.finished_org_members.items():
if org_member.online:
org_ranks[org_member.org_rank_name].online_members.append(org_member)
else:
org_ranks[org_member.org_rank_name].offline_members.append(org_member)
blob = ""
num_online = 0
num_total = 0
for rank_name, rank_info in org_ranks.items():
rank_num_online = len(rank_info.online_members)
rank_num_total = len(rank_info.offline_members) + rank_num_online
blob += "<header2>%s (%d / %d)<end>\n" % (rank_name, rank_num_online, rank_num_total)
num_online += rank_num_online
num_total += rank_num_total
for org_member in rank_info.online_members:
level = org_member.level if org_member.ai_level == 0 else "%d/<green>%d<end>" % (org_member.level, org_member.ai_level)
blob += "%s (Level <highlight>%s<end>, %s %s <highlight>%s<end>)\n" % (org_member.name, level, org_member.gender, org_member.breed, org_member.profession)
if rank_num_total < 200:
blob += "<font color='#555555'>" + ", ".join(map(lambda x: x.name, rank_info.offline_members)) + "<end>"
blob += "\n"
else:
blob += "<font color='#555555'>Offline members ommitted for brevity<end>\n"
blob += "\n"
return ChatBlob("Orglist for '%s' (%d / %d)" % (self.orglist.org_info.name, num_online, num_total), blob)
def iterate_org_members(self):
# add org_members that we don't have online status for as buddies
for char_id, org_member in self.orglist.org_members.copy().items():
self.orglist.waiting_org_members[char_id] = self.orglist.org_members[char_id]
del self.orglist.org_members[char_id]
is_online = self.buddy_service.is_online(char_id)
if is_online is None:
if self.character_service.resolve_char_to_id(org_member.name):
self.buddy_service.add_buddy(char_id, self.ORGLIST_BUDDY_TYPE)
else:
# character is inactive, set as offline
self.update_online_status(char_id, False)
else:
self.update_online_status(char_id, is_online)
if not self.buddy_list_has_available_slots():
break
def buddy_list_has_available_slots(self):
return self.buddy_service.buddy_list_size - len(self.buddy_service.buddy_list) > 5
| [
"[email protected]"
] | |
acf3f83d7f62c84c967c3097d82a174f12128cc8 | 6296d071fb9f48d8a12b1a14b371f9c4da29f98b | /notebook_format/formats.py | 9c774ef6ba757215b4411ff71f8112eb2f17798c | [] | no_license | rosdyana/programming | 681b14d9977cca527b8d787ffbcc4322ceca4725 | eef81128a76519c96c7dd3e236f7a3bcd7e48d71 | refs/heads/master | 2021-01-18T16:07:23.625612 | 2017-03-26T23:33:30 | 2017-03-26T23:33:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | import json
import warnings
import matplotlib.pyplot as plt
from IPython.core.display import HTML
def load_style(css_style = 'custom1.css'):
"""
custom1.css adapted from
https://github.com/rlabbe/ThinkBayes/blob/master/code/custom.css
custom2.css adapted from
https://github.com/neilpanchal/iPython-Notebook-Theme
"""
# recent matplotlibs are raising deprecation warnings that
# we don't worry about (it's the axes_prop_cycle).
warnings.filterwarnings('ignore')
# update the default matplotlib's formating
with open('plot.json') as f:
s = json.load(f)
plt.rcParams.update(s)
# load the styles for the notebooks
with open(css_style) as f:
styles = f.read()
return HTML(styles)
| [
"[email protected]"
] | |
73df75db6f73aa73017618cd51953ec831274985 | f0b1e58de2dfaad5a689ac32685e8201777fdd3c | /Base_model/intention/intent_classifier.py | 25161550038682d3f810c7f4c4c3793def6ecbc5 | [] | no_license | BarryZM/Chatbot_Utils | 3f99d33769709f3a116a9dec4fb32106b9396cb0 | 62dd366287839251a36b3ee3096a2a19da78e857 | refs/heads/master | 2022-11-19T23:50:18.003354 | 2020-07-22T13:32:19 | 2020-07-22T13:32:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,454 | py | # -*- coding: utf-8 -*-
'''
@Author : Xu
@Software: ide
@File : domain_classifier.py
@Time : 2019-11-06
@Desc : 基于bert的分类模型的fine-tune的领域分类模型,模型准确率验证通过,但是需要修改模型的初始化方法
'''
import os, csv, random, collections, pickle
import tensorflow as tf
import numpy as np
import pickle as pkl
import pathlib
from queue import Queue
from threading import Thread
from Base_model.bert import modeling
from Base_model.bert import optimization
from Base_model.bert import tokenization
from Base_model.intention.config import Config
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
basedir = str(pathlib.Path(os.path.abspath(__file__)).parent)
cf = Config()
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids, input_mask, segment_ids, label_id, is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class IntentionProcessor(DataProcessor):
"""Processor for the FenLei data set (GLUE version)."""
def get_train_examples(self, data_dir):
file_path = os.path.join(data_dir, 'train.txt')
with open(file_path, 'r', encoding="utf-8") as f:
reader = f.readlines()
random.seed(0)
random.shuffle(reader) # 注意要shuffle
examples, self.labels = [], []
for index, line in enumerate(reader):
guid = 'train-%d' % index
split_line = line.strip().split("\t")
text_a = tokenization.convert_to_unicode(split_line[1])
text_b = None
label = split_line[0]
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
self.labels.append(label)
return examples
def get_dev_examples(self, data_dir):
file_path = os.path.join(data_dir, 'val.txt')
with open(file_path, 'r', encoding="utf-8") as f:
reader = f.readlines()
random.shuffle(reader)
examples = []
for index, line in enumerate(reader):
guid = 'dev-%d' % index
split_line = line.strip().split('\t')
text_a = tokenization.convert_to_unicode(split_line[1])
text_b = None
label = split_line[0]
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
file_path = os.path.join(data_dir, 'cnews.test.txt')
with open(file_path, 'r', encoding="utf-8") as f:
reader = f.readlines()
# random.shuffle(reader) # 测试集不打乱数据,便于比较
examples = []
for index, line in enumerate(reader):
guid = 'test-%d' % index
split_line = line.strip().split("\t")
text_a = tokenization.convert_to_unicode(split_line[1])
text_b = None
label = split_line[0]
examples.append(InputExample(guid=guid, text_a=text_a,
text_b=text_b, label=label))
return examples
def get_sentence_examples(self, questions):
for index, data in enumerate(questions):
guid = 'test-%d' % index
text_a = tokenization.convert_to_unicode(str(data))
text_b = None
# label = str(0)
label = self.labels[0]
yield InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)
def one_example(self, sentence):
guid, label = 'pred-0', self.labels[0]
text_a, text_b = sentence, None
return InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)
def get_labels(self):
return sorted(set(self.labels), key=self.labels.index) # 使用有序列表而不是集合。保证了标签正确
class IntentionCLS():
def __init__(self, batch_size=cf.batch_size):
self.mode = None
self.max_seq_length = cf.max_seq_length
self.tokenizer = tokenization.FullTokenizer(vocab_file=cf.vocab_file, do_lower_case=True)
self.batch_size = batch_size
self.estimator = None
self.processor = IntentionProcessor() # 加载训练、测试数据class
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
def set_mode(self, mode):
self.mode = mode
self.estimator = self.get_estimator()
if mode == tf.estimator.ModeKeys.PREDICT:
self.input_queue = Queue(maxsize=1)
self.output_queue = Queue(maxsize=1)
self.predict_thread = Thread(target=self.predict_from_queue, daemon=True) #daemon守护进程
self.predict_thread.start()
def create_model(self, bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings):
"""
构建分类模型
:param bert_config:
:param is_training:
:param input_ids:
:param input_mask:
:param segment_ids:
:param labels:
:param num_labels:
:return:
"""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire segment.
#
# If you want to use the token-level output, use model.get_sequence_output() instead.
# embedding_layer = model.get_sequence_output() # 获取embedding,类似embedding_lookup操作, 后面可以接 crf
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
# 这里对分类样本进行加权操作,处理分类样本不均衡问题
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(self, bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_one_hot_embeddings):
"""Returns `model_fn` closure for GPU Estimator."""
def model_gpu(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for GPU 版本的 Estimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = self.create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings)
tvars = tf.compat.v1.trainable_variables()
initialized_variable_names = {}
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, False)
output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, )
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.compat.v1.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.compat.v1.metrics.mean(values=per_example_loss, weights=is_real_example)
return {"eval_accuracy": accuracy, "eval_loss": loss, }
metrics = metric_fn(per_example_loss, label_ids, logits, True)
output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=total_loss, eval_metric_ops=metrics)
else:
output_spec = tf.estimator.EstimatorSpec(mode=mode, predictions={"probabilities": probabilities}, )
return output_spec
return model_gpu
def get_estimator(self):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
bert_config = modeling.BertConfig.from_json_file(cf.bert_config_file)
train_examples = self.processor.get_train_examples(cf.data_dir)
label_list = self.processor.get_labels() # 这里需要这样写,如果用self.get_label_list()获取列表,在还没有生成label_list.pkl文件的时候会报错
# label_list = self.get_label_list()
num_train_steps = int(len(train_examples) / self.batch_size * cf.num_train_epochs)
num_warmup_steps = int(num_train_steps * 0.1)
if self.mode == tf.estimator.ModeKeys.TRAIN:
init_checkpoint = cf.init_checkpoint
else:
init_checkpoint = cf.output_dir # 预测模式下加载
model_fn = self.model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=init_checkpoint,
learning_rate=cf.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_one_hot_embeddings=False)
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = cf.gpu_memory_fraction
config.log_device_placement = False
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config), model_dir=cf.output_dir, params={'batch_size': self.batch_size})
def get_label_list(self):
'''
读取模型训练是动态产生的label_list.pkl文件
:return:
'''
label_list = pkl.load(open(basedir + '/label_list.pkl', 'rb'))
return label_list
def predict_from_queue(self):
for i in self.estimator.predict(input_fn=self.queue_predict_input_fn, yield_single_examples=False):
self.output_queue.put(i)
def queue_predict_input_fn(self):
return (tf.data.Dataset.from_generator(
self.generate_from_queue,
output_types={
'input_ids': tf.int32,
'input_mask': tf.int32,
'segment_ids': tf.int32,
'label_ids': tf.int32},
output_shapes={
'input_ids': (None, self.max_seq_length),
'input_mask': (None, self.max_seq_length),
'segment_ids': (None, self.max_seq_length),
'label_ids': (1,)}).prefetch(10))
def generate_from_queue(self):
while True:
predict_examples = self.processor.get_sentence_examples(self.input_queue.get())
features = list(self.convert_examples_to_features(predict_examples,
self.processor.get_labels(),
cf.max_seq_length,
self.tokenizer))
yield {
'input_ids': [f.input_ids for f in features],
'input_mask': [f.input_mask for f in features],
'segment_ids': [f.segment_ids for f in features],
'label_ids': [f.label_id for f in features]
}
def convert_examples_to_features(self, examples, label_list, max_seq_length, tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
for (ex_index, example) in enumerate(examples):
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("guid: %s" % (example.guid))
tf.compat.v1.logging.info("tokens: %s" % " ".join([tokenization.printable_text(x) for x in tokens]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.compat.v1.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
yield feature
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_single_example(self, ex_index, example, label_list, max_seq_length, tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("guid: %s" % (example.guid))
tf.compat.v1.logging.info("tokens: %s" % " ".join([tokenization.printable_text(x) for x in tokens]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.compat.v1.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(self, examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = self.convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(self, input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.io.FixedLenFeature([], tf.int64),
"is_real_example": tf.io.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
# This function is not used by this file but is still used by the Colab and people who depend on it.
def input_fn_builder(self, features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(all_input_mask, shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(all_segment_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def create_classification_model(self, bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels):
# 通过传入的训练数据,进行representation
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
)
embedding_layer = model.get_sequence_output()
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
if labels is not None:
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
else:
loss, per_example_loss = None, None
return (loss, per_example_loss, logits, probabilities)
def save_PBmodel(self, num_labels):
""" 保存PB格式中文分类模型 """
try:
# 如果PB文件已经存在,则返回PB文件的路径,否则将模型转化为PB文件,并且返回存储PB文件的路径
pb_file = os.path.join(cf.pb_model_dir, 'classification_model.pb')
graph = tf.Graph()
with graph.as_default():
input_ids = tf.placeholder(tf.int32, (None, cf.max_seq_length), 'input_ids')
input_mask = tf.placeholder(tf.int32, (None, cf.max_seq_length), 'input_mask')
bert_config = modeling.BertConfig.from_json_file(cf.bert_config_file)
loss, per_example_loss, logits, probabilities = self.create_classification_model(
bert_config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=None,
labels=None,
num_labels=num_labels)
probabilities = tf.identity(probabilities, 'pred_prob')
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
latest_checkpoint = tf.train.latest_checkpoint(cf.output_dir)
saver.restore(sess, latest_checkpoint)
tmp_g = tf.compat.v1.graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), ['pred_prob'])
# 存储二进制模型到文件中
with tf.gfile.GFile(pb_file, 'wb') as f:
f.write(tmp_g.SerializeToString())
return pb_file
except Exception as e:
print('fail to optimize the graph! %s', e)
def train(self):
'''
domain 模型训练
:return:
'''
if self.mode is None:
raise ValueError("Please set the 'mode' parameter")
bert_config = modeling.BertConfig.from_json_file(cf.bert_config_file)
if cf.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(cf.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(cf.output_dir)
train_examples = self.processor.get_train_examples(cf.data_dir)
label_list = self.processor.get_labels() # 从训练数据中动态获取label标签, 并且将其映射成pkl文件
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
with open('label_list.pkl', 'wb') as f:
pickle.dump(label_list, f)
with open('label2id.pkl', 'wb') as f:
pickle.dump(label_map, f)
num_train_steps = int(len(train_examples) / cf.batch_size * cf.num_train_epochs)
estimator = self.get_estimator()
train_file = os.path.join(cf.output_dir, "train.tf_record")
self.file_based_convert_examples_to_features(train_examples, label_list, cf.max_seq_length, self.tokenizer,
train_file)
tf.compat.v1.logging.info("***** Running training *****")
tf.compat.v1.logging.info(" Num examples = %d", len(train_examples))
tf.compat.v1.logging.info(" Batch size = %d", cf.batch_size)
tf.compat.v1.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = self.file_based_input_fn_builder(input_file=train_file,
seq_length=cf.max_seq_length,
is_training=True,
drop_remainder=True)
# early_stopping = tf.contrib.estimator.stop_if_no_decrease_hook(
# estimator,
# metric_name='loss',
# max_steps_without_decrease=10,
# min_steps=num_train_steps)
# estimator.train(input_fn=train_input_fn, hooks=[early_stopping])
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
def eval(self):
if self.mode is None:
raise ValueError("Please set the 'mode' parameter")
eval_examples = self.processor.get_dev_examples(cf.data_dir)
eval_file = os.path.join(cf.output_dir, "eval.tf_record")
label_list = self.processor.get_labels()
self.file_based_convert_examples_to_features(
eval_examples, label_list, cf.max_seq_length, self.tokenizer, eval_file)
tf.compat.v1.logging.info("***** Running evaluation *****")
tf.compat.v1.logging.info(" Num examples = %d", len(eval_examples))
tf.compat.v1.logging.info(" Batch size = %d", self.batch_size)
eval_input_fn = self.file_based_input_fn_builder(
input_file=eval_file,
seq_length=cf.max_seq_length,
is_training=False,
drop_remainder=False)
estimator = self.get_estimator()
result = estimator.evaluate(input_fn=eval_input_fn, steps=None)
output_eval_file = os.path.join(cf.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.compat.v1.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.compat.v1.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
def predict(self, sentence):
'''
domain 分类模型预测
:param sentence:
:return:
'''
if self.mode is None:
raise ValueError("Please set the 'mode' parameter")
self.input_queue.put([sentence])
label = self.get_label_list()
# prob = self.output_queue.get()['probabilities'].tolist()[0]
# intent = dict(zip(label, prob))
prediction = label[int(np.argmax(self.output_queue.get()['probabilities']))]
return prediction
# save_PBmodel(len(label_list)) # 生成单个pb模型。
if __name__ == '__main__':
cls = IntentionCLS()
# if cf.do_train:
# cls.set_mode(tf.estimator.ModeKeys.TRAIN)
# cls.train()
# cls.set_mode(tf.estimator.ModeKeys.EVAL)
# cls.eval()
if cf.do_predict:
cls.set_mode(tf.estimator.ModeKeys.PREDICT)
sentence = '你好'
y = cls.predict(sentence)
print(y) | [
"[email protected]"
] | |
3bbddd1bbeb2a35468c28a37728f99bdf6f30c89 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_vpn_l2tp.py | 8a679a3190e555ba61d89d2bd7d5cafd9ce95faa | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 9,014 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_vpn_l2tp
short_description: Configure L2TP in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify vpn feature and l2tp category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.9"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
vpn_l2tp:
description:
- Configure L2TP.
default: null
type: dict
suboptions:
eip:
description:
- End IP.
type: str
enforce_ipsec:
description:
- Enable/disable IPsec enforcement.
type: str
choices:
- enable
- disable
sip:
description:
- Start IP.
type: str
status:
description:
- Enable/disable FortiGate as a L2TP gateway.
type: str
choices:
- enable
- disable
usrgrp:
description:
- User group. Source user.group.name.
type: str
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: Configure L2TP.
fortios_vpn_l2tp:
vdom: "{{ vdom }}"
vpn_l2tp:
eip: "<your_own_value>"
enforce_ipsec: "enable"
sip: "<your_own_value>"
status: "enable"
usrgrp: "<your_own_value> (source user.group.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_vpn_l2tp_data(json):
option_list = ['eip', 'enforce_ipsec', 'sip',
'status', 'usrgrp']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def vpn_l2tp(data, fos):
vdom = data['vdom']
vpn_l2tp_data = data['vpn_l2tp']
filtered_data = underscore_to_hyphen(filter_vpn_l2tp_data(vpn_l2tp_data))
return fos.set('vpn',
'l2tp',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_vpn(data, fos):
if data['vpn_l2tp']:
resp = vpn_l2tp(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('vpn_l2tp'))
return not is_successful_status(resp), \
resp['status'] == "success" and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
def main():
mkeyname = None
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"vpn_l2tp": {
"required": False, "type": "dict", "default": None,
"options": {
"eip": {"required": False, "type": "str"},
"enforce_ipsec": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"sip": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable",
"disable"]},
"usrgrp": {"required": False, "type": "str"}
}
}
}
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
fos = FortiOSHandler(connection, module, mkeyname)
is_error, has_changed, result = fortios_vpn(module.params, fos)
versions_check_result = connection.get_system_version()
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and galaxy, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
59286046da3a22e4b8f901dfac7e4065db049967 | 42a0760a051935b2e765d57c445235221a28f49e | /791_custom_sort_string.py | b4bb0b9295a5d278dee0a6d9d69b7aff5e664df3 | [] | no_license | Th3Lourde/l33tcode | 3bea3a3e7c633a2d5a36f7d76d5d776d275d8ee3 | eb6b11f97a022b66716cb3890cc56c58f62e8aa4 | refs/heads/master | 2022-12-22T19:05:04.384645 | 2022-12-18T19:38:46 | 2022-12-18T19:38:46 | 232,450,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | '''
S and T are strings that contain lowercase letters.
S is composed of unique chrs.
S is sorted. Sort the characters in T that are in S.
If there are characters in T not in S, they can be
put anywhere in the resulting permutation.
Idea: go through all elements of T, put them in a dictionary.
We will want to keep track of the elements that we haven't used
and append them later.
Get the keys of the dict.
Step through S. If c ∈ S is in T, append all instances of that
character to our new permutation. Also delete that character from
our dictionary.
When we have seen all elements in S, step through the remaining
elements and add them to our permutation.
"acdbf"
"aaabbbcccdddeeeefff" <-- Some random permutation.
{"a":3, "b":3, "c":3, "d":3, "e":4, "f":3}
keys = [a,b,c,d,e,f]
stepping through S
a, is a ∈ keys?
yes, ans = "aaa"
keys = [b,c,d,e,f]
c, is c ∈ keys?
yes, ans = "aaaccc"
keys = [b,d,e,f]
d, is d ∈ keys?
yes, ans = "aaacccddd"
keys = [b,e,f]
b, is b ∈ keys?
yes, ans = "aaacccdddbbb"
keys = [e,f]
f, is f ∈ keys?
yes, ans = "aaacccdddbbbfff"
keys = [e]
Step through e, append to ans.
ans = "aaacccdddbbbfffeeee"
Test cases: Vary # in S, T, overlap.
Had s,t at zero, not zero, varied
amount of overlap, looks good, let's run it.
'''
class Solution:
def customSortString(self, S, T):
d = {}
for c in T:
if c in d:
d[c] += 1
else:
d[c] = 1
ans = ""
keys = list(d.keys())
for c in S:
if c in d:
keys.remove(c)
ans = ans + "{}".format(c)*d[c]
for c in keys:
ans = ans + "{}".format(c)*d[c]
return ans
if __name__ == '__main__':
s = Solution()
# print(s.customSortString("cba", "aaaabalaadfahdflakjdvdcd"))
print(s.customSortString("", "aaaabalaadfahdflakjdvdcd"))
print(s.customSortString("cba", ""))
print(s.customSortString("bzadc", "aaaababbdbdbdbdbdlaadfahdflakjdvdcd"))
| [
"[email protected]"
] | |
8707126656c4925ca4d8fbc116ad308a37a5d15e | 964b063c2461aad267ddd991fefaf7ab53b1ca94 | /7-kyu/remove-the-minimum/python/solution.py | 11722bbad1643eb8888c64bafc9b75e0de5391b9 | [] | no_license | lucasbflopes/codewars-solutions | 26c4e2cd1be19db50cc8c1d9fc117c51c82a2259 | 72ef2c02dde7bd0d5a691e04e3b2a383e892f84b | refs/heads/master | 2022-03-14T01:26:41.816498 | 2019-11-23T17:17:19 | 2019-11-23T17:17:19 | 114,834,447 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | def remove_smallest(numbers):
if not numbers:
return []
else:
min = numbers[0]
for number in numbers[1:]:
if number < min:
min = number
numbers.remove(min)
return numbers
| [
"[email protected]"
] | |
d6fa72819b42cb6d6b28b3aa70ee781aee56d539 | e93cdd365b302dcbdb0dbef8accbd61473851354 | /core/migrations/0018_field_picture.py | f10c67a1c51ad0c9dc125f0fb51f1423dbc3a9ad | [] | no_license | jonerra/radcliff | 7bfd1004a2e1c789c4894b89df7d6408c5dc5034 | 13957e48a96175f11318187f3e180efa4fba2294 | refs/heads/master | 2021-01-21T13:53:45.409435 | 2016-05-16T23:21:51 | 2016-05-16T23:21:51 | 52,484,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-14 15:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0017_auto_20160414_1533'),
]
operations = [
migrations.AddField(
model_name='field',
name='Picture',
field=models.ImageField(null=True, upload_to=b''),
),
]
| [
"[email protected]"
] | |
0d97b7a0ec8e8b977b8369a64d5521329bae48f6 | 66c21d53a80f1ef1c53478b2c3aa2dc8ce8aed40 | /MODEL1310110044/model.py | 9e1259aa2a444fd57e70e89a3f234a8528b7fb5c | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | biomodels/MODEL1310110044 | a380b839eeb2c650ef712c65a47b46b77f589a02 | 18ca04fdc1bcae46976eb10e064fc7deed8297a4 | refs/heads/master | 2021-01-10T22:28:05.137897 | 2014-10-16T05:34:05 | 2014-10-16T05:34:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1310110044.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) | [
"[email protected]"
] | |
0c056a6628998a8dee81f09c8ff8bf4f17b95073 | ed1165acc8937976142f00009df5a942c02dbd24 | /database.py | fe32b9a6206e5442caefce688ca62b803109ef8d | [] | no_license | Kha/shaderwall | 1cb116b41c36ef9b20e86bfe2e16aaf4bf24e164 | afac9d484fbee345500167cfc1e2edcf5c752b5c | refs/heads/master | 2021-01-15T12:30:50.704084 | 2014-12-28T21:23:54 | 2014-12-28T21:23:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | from sqlalchemy import Column, ForeignKey, Integer, String, Text, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
import datetime
from config import connection_url
import random
import string
def generate_authcode():
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(32))
Base = declarative_base()
class Shader(Base):
__tablename__ = 'shader'
id = Column(Integer, primary_key=True)
source = Column(Text)
authcode = Column(String(32), default=generate_authcode)
created = Column(DateTime, default=datetime.datetime.now)
updated = Column(DateTime, default=datetime.datetime.now)
views = Column(Integer, default=0)
def setup_db():
global engine
engine = create_engine(connection_url, pool_recycle=14400)
Base.metadata.create_all(engine)
def db_session():
DBSession = sessionmaker(bind=engine)
session = DBSession()
return session
| [
"[email protected]"
] | |
2160e87b55ae05a8679e74bdf72ae4a4de990797 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_lofts.py | 8a42b14b52f7fdd76fb42f78e75a7c6bdbbbc98f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
#calss header
class _LOFTS():
def __init__(self,):
self.name = "LOFTS"
self.definitions = loft
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['loft']
| [
"[email protected]"
] | |
3e8087f0c76fb300a58687be1be04060a5486a08 | e66fa131cff76fa3fe70e7b6649fa1332159c781 | /ch10/statisticsModule.py | 457ed3ce44eb38e2f21007c4ca924d22a6b5f722 | [] | no_license | chc1129/python_tutorial | c6d97c6671a7952d8a7b838ccb8aa3c352fa6881 | 2f8b389731bafbda73c766c095d1eaadb0f99a1c | refs/heads/main | 2023-08-24T07:00:43.424652 | 2021-10-28T16:07:57 | 2021-10-28T16:07:57 | 341,532,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | import statistics
data = [2.74, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]
print(statistics.mean(data))
print(statistics.median(data))
print(statistics.variance(data))
| [
"[email protected]"
] | |
f00e73727670667a1e871603bb509b79a7a90568 | d190750d6cb34e9d86ae96724cf4b56a2f57a74a | /tests/r/test_saving.py | d5ffb1d760e2806452c3283ac3a9f4c3fa58f4c0 | [
"Apache-2.0"
] | permissive | ROAD2018/observations | a119f61a48213d791de0620804adb8d21c2ad9fb | 2c8b1ac31025938cb17762e540f2f592e302d5de | refs/heads/master | 2021-09-24T04:28:02.725245 | 2018-09-16T23:06:30 | 2018-09-16T23:06:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.saving import saving
def test_saving():
"""Test module saving.py by downloading
saving.csv and testing shape of
extracted data has 100 rows and 7 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = saving(test_path)
try:
assert x_train.shape == (100, 7)
except:
shutil.rmtree(test_path)
raise()
| [
"[email protected]"
] | |
7f03e6648a2276ac0563039f20ed29c3515d2021 | c94f888541c0c430331110818ed7f3d6b27b788a | /ak_320bc483f2434f39a3af9ec9f04d3cc0/python/antchain_sdk_ak_320bc483f2434f39a3af9ec9f04d3cc0/client.py | 924c4fe29cce941b83d6292b00ebd148f377ec46 | [
"Apache-2.0",
"MIT"
] | permissive | alipay/antchain-openapi-prod-sdk | 48534eb78878bd708a0c05f2fe280ba9c41d09ad | 5269b1f55f1fc19cf0584dc3ceea821d3f8f8632 | refs/heads/master | 2023-09-03T07:12:04.166131 | 2023-09-01T08:56:15 | 2023-09-01T08:56:15 | 275,521,177 | 9 | 10 | MIT | 2021-03-25T02:35:20 | 2020-06-28T06:22:14 | PHP | UTF-8 | Python | false | false | 23,502 | py | # -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
import time
from Tea.exceptions import TeaException, UnretryableException
from Tea.request import TeaRequest
from Tea.core import TeaCore
from antchain_alipay_util.antchain_utils import AntchainUtils
from typing import Dict
from antchain_sdk_ak_320bc483f2434f39a3af9ec9f04d3cc0 import models as ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_tea_util import models as util_models
from alibabacloud_rpc_util.client import Client as RPCUtilClient
class Client:
_endpoint: str = None
_region_id: str = None
_access_key_id: str = None
_access_key_secret: str = None
_protocol: str = None
_user_agent: str = None
_read_timeout: int = None
_connect_timeout: int = None
_http_proxy: str = None
_https_proxy: str = None
_socks_5proxy: str = None
_socks_5net_work: str = None
_no_proxy: str = None
_max_idle_conns: int = None
_security_token: str = None
_max_idle_time_millis: int = None
_keep_alive_duration_millis: int = None
_max_requests: int = None
_max_requests_per_host: int = None
def __init__(
self,
config: ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.Config,
):
"""
Init client with Config
@param config: config contains the necessary information to create a client
"""
if UtilClient.is_unset(config):
raise TeaException({
'code': 'ParameterMissing',
'message': "'config' can not be unset"
})
self._access_key_id = config.access_key_id
self._access_key_secret = config.access_key_secret
self._security_token = config.security_token
self._endpoint = config.endpoint
self._protocol = config.protocol
self._user_agent = config.user_agent
self._read_timeout = UtilClient.default_number(config.read_timeout, 20000)
self._connect_timeout = UtilClient.default_number(config.connect_timeout, 20000)
self._http_proxy = config.http_proxy
self._https_proxy = config.https_proxy
self._no_proxy = config.no_proxy
self._socks_5proxy = config.socks_5proxy
self._socks_5net_work = config.socks_5net_work
self._max_idle_conns = UtilClient.default_number(config.max_idle_conns, 60000)
self._max_idle_time_millis = UtilClient.default_number(config.max_idle_time_millis, 5)
self._keep_alive_duration_millis = UtilClient.default_number(config.keep_alive_duration_millis, 5000)
self._max_requests = UtilClient.default_number(config.max_requests, 100)
self._max_requests_per_host = UtilClient.default_number(config.max_requests_per_host, 100)
def do_request(
self,
version: str,
action: str,
protocol: str,
method: str,
pathname: str,
request: dict,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> dict:
"""
Encapsulate the request and invoke the network
@param action: api name
@param protocol: http or https
@param method: e.g. GET
@param pathname: pathname of every api
@param request: which contains request params
@param runtime: which controls some details of call api, such as retry times
@return: the response
"""
runtime.validate()
_runtime = {
'timeouted': 'retry',
'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),
'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),
'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),
'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),
'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),
'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),
'maxIdleTimeMillis': self._max_idle_time_millis,
'keepAliveDuration': self._keep_alive_duration_millis,
'maxRequests': self._max_requests,
'maxRequestsPerHost': self._max_requests_per_host,
'retry': {
'retryable': runtime.autoretry,
'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)
},
'backoff': {
'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),
'period': UtilClient.default_number(runtime.backoff_period, 1)
},
'ignoreSSL': runtime.ignore_ssl,
# 签署方信息
}
_last_request = None
_last_exception = None
_now = time.time()
_retry_times = 0
while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):
if _retry_times > 0:
_backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)
if _backoff_time > 0:
TeaCore.sleep(_backoff_time)
_retry_times = _retry_times + 1
try:
_request = TeaRequest()
_request.protocol = UtilClient.default_string(self._protocol, protocol)
_request.method = method
_request.pathname = pathname
_request.query = {
'method': action,
'version': version,
'sign_type': 'HmacSHA1',
'req_time': AntchainUtils.get_timestamp(),
'req_msg_id': AntchainUtils.get_nonce(),
'access_key': self._access_key_id,
'base_sdk_version': 'TeaSDK-2.0',
'sdk_version': '1.1.0',
'_prod_code': 'ak_320bc483f2434f39a3af9ec9f04d3cc0',
'_prod_channel': 'saas'
}
if not UtilClient.empty(self._security_token):
_request.query['security_token'] = self._security_token
_request.headers = TeaCore.merge({
'host': UtilClient.default_string(self._endpoint, 'openapi.antchain.antgroup.com'),
'user-agent': UtilClient.get_user_agent(self._user_agent)
}, headers)
tmp = UtilClient.anyify_map_value(RPCUtilClient.query(request))
_request.body = UtilClient.to_form_string(tmp)
_request.headers['content-type'] = 'application/x-www-form-urlencoded'
signed_param = TeaCore.merge(_request.query,
RPCUtilClient.query(request))
_request.query['sign'] = AntchainUtils.get_signature(signed_param, self._access_key_secret)
_last_request = _request
_response = TeaCore.do_action(_request, _runtime)
raw = UtilClient.read_as_string(_response.body)
obj = UtilClient.parse_json(raw)
res = UtilClient.assert_as_map(obj)
resp = UtilClient.assert_as_map(res.get('response'))
if AntchainUtils.has_error(raw, self._access_key_secret):
raise TeaException({
'message': resp.get('result_msg'),
'data': resp,
'code': resp.get('result_code')
})
return resp
except Exception as e:
if TeaCore.is_retryable(e):
_last_exception = e
continue
raise e
raise UnretryableException(_last_request, _last_exception)
async def do_request_async(
self,
version: str,
action: str,
protocol: str,
method: str,
pathname: str,
request: dict,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> dict:
"""
Encapsulate the request and invoke the network
@param action: api name
@param protocol: http or https
@param method: e.g. GET
@param pathname: pathname of every api
@param request: which contains request params
@param runtime: which controls some details of call api, such as retry times
@return: the response
"""
runtime.validate()
_runtime = {
'timeouted': 'retry',
'readTimeout': UtilClient.default_number(runtime.read_timeout, self._read_timeout),
'connectTimeout': UtilClient.default_number(runtime.connect_timeout, self._connect_timeout),
'httpProxy': UtilClient.default_string(runtime.http_proxy, self._http_proxy),
'httpsProxy': UtilClient.default_string(runtime.https_proxy, self._https_proxy),
'noProxy': UtilClient.default_string(runtime.no_proxy, self._no_proxy),
'maxIdleConns': UtilClient.default_number(runtime.max_idle_conns, self._max_idle_conns),
'maxIdleTimeMillis': self._max_idle_time_millis,
'keepAliveDuration': self._keep_alive_duration_millis,
'maxRequests': self._max_requests,
'maxRequestsPerHost': self._max_requests_per_host,
'retry': {
'retryable': runtime.autoretry,
'maxAttempts': UtilClient.default_number(runtime.max_attempts, 3)
},
'backoff': {
'policy': UtilClient.default_string(runtime.backoff_policy, 'no'),
'period': UtilClient.default_number(runtime.backoff_period, 1)
},
'ignoreSSL': runtime.ignore_ssl,
# 签署方信息
}
_last_request = None
_last_exception = None
_now = time.time()
_retry_times = 0
while TeaCore.allow_retry(_runtime.get('retry'), _retry_times, _now):
if _retry_times > 0:
_backoff_time = TeaCore.get_backoff_time(_runtime.get('backoff'), _retry_times)
if _backoff_time > 0:
TeaCore.sleep(_backoff_time)
_retry_times = _retry_times + 1
try:
_request = TeaRequest()
_request.protocol = UtilClient.default_string(self._protocol, protocol)
_request.method = method
_request.pathname = pathname
_request.query = {
'method': action,
'version': version,
'sign_type': 'HmacSHA1',
'req_time': AntchainUtils.get_timestamp(),
'req_msg_id': AntchainUtils.get_nonce(),
'access_key': self._access_key_id,
'base_sdk_version': 'TeaSDK-2.0',
'sdk_version': '1.1.0',
'_prod_code': 'ak_320bc483f2434f39a3af9ec9f04d3cc0',
'_prod_channel': 'saas'
}
if not UtilClient.empty(self._security_token):
_request.query['security_token'] = self._security_token
_request.headers = TeaCore.merge({
'host': UtilClient.default_string(self._endpoint, 'openapi.antchain.antgroup.com'),
'user-agent': UtilClient.get_user_agent(self._user_agent)
}, headers)
tmp = UtilClient.anyify_map_value(RPCUtilClient.query(request))
_request.body = UtilClient.to_form_string(tmp)
_request.headers['content-type'] = 'application/x-www-form-urlencoded'
signed_param = TeaCore.merge(_request.query,
RPCUtilClient.query(request))
_request.query['sign'] = AntchainUtils.get_signature(signed_param, self._access_key_secret)
_last_request = _request
_response = await TeaCore.async_do_action(_request, _runtime)
raw = await UtilClient.read_as_string_async(_response.body)
obj = UtilClient.parse_json(raw)
res = UtilClient.assert_as_map(obj)
resp = UtilClient.assert_as_map(res.get('response'))
if AntchainUtils.has_error(raw, self._access_key_secret):
raise TeaException({
'message': resp.get('result_msg'),
'data': resp,
'code': resp.get('result_code')
})
return resp
except Exception as e:
if TeaCore.is_retryable(e):
_last_exception = e
continue
raise e
raise UnretryableException(_last_request, _last_exception)
def sign_antsaas_staffingc_contract_send(
self,
request: ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.SignAntsaasStaffingcContractSendRequest,
) -> ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.SignAntsaasStaffingcContractSendResponse:
"""
Description: 发起签约调用接口
Summary: 发起签约
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.sign_antsaas_staffingc_contract_send_ex(request, headers, runtime)
async def sign_antsaas_staffingc_contract_send_async(
self,
request: ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.SignAntsaasStaffingcContractSendRequest,
) -> ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.SignAntsaasStaffingcContractSendResponse:
"""
Description: 发起签约调用接口
Summary: 发起签约
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.sign_antsaas_staffingc_contract_send_ex_async(request, headers, runtime)
def sign_antsaas_staffingc_contract_send_ex(
self,
request: ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.SignAntsaasStaffingcContractSendRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.SignAntsaasStaffingcContractSendResponse:
"""
Description: 发起签约调用接口
Summary: 发起签约
"""
if not UtilClient.is_unset(request.file_object):
upload_req = ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.CreateAntcloudGatewayxFileUploadRequest(
auth_token=request.auth_token,
api_code='antsaas.staffingc.contract.send.sign',
file_name=request.file_object_name
)
upload_resp = self.create_antcloud_gatewayx_file_upload_ex(upload_req, headers, runtime)
if not AntchainUtils.is_success(upload_resp.result_code, 'ok'):
sign_antsaas_staffingc_contract_send_response = ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.SignAntsaasStaffingcContractSendResponse(
req_msg_id=upload_resp.req_msg_id,
result_code=upload_resp.result_code,
result_msg=upload_resp.result_msg
)
return sign_antsaas_staffingc_contract_send_response
upload_headers = AntchainUtils.parse_upload_headers(upload_resp.upload_headers)
AntchainUtils.put_object(request.file_object, upload_headers, upload_resp.upload_url)
request.file_id = upload_resp.file_id
UtilClient.validate_model(request)
return TeaCore.from_map(
ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.SignAntsaasStaffingcContractSendResponse(),
self.do_request('1.0', 'antsaas.staffingc.contract.send.sign', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def sign_antsaas_staffingc_contract_send_ex_async(
self,
request: ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.SignAntsaasStaffingcContractSendRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.SignAntsaasStaffingcContractSendResponse:
"""
Description: 发起签约调用接口
Summary: 发起签约
"""
if not UtilClient.is_unset(request.file_object):
upload_req = ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.CreateAntcloudGatewayxFileUploadRequest(
auth_token=request.auth_token,
api_code='antsaas.staffingc.contract.send.sign',
file_name=request.file_object_name
)
upload_resp = await self.create_antcloud_gatewayx_file_upload_ex_async(upload_req, headers, runtime)
if not AntchainUtils.is_success(upload_resp.result_code, 'ok'):
sign_antsaas_staffingc_contract_send_response = ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.SignAntsaasStaffingcContractSendResponse(
req_msg_id=upload_resp.req_msg_id,
result_code=upload_resp.result_code,
result_msg=upload_resp.result_msg
)
return sign_antsaas_staffingc_contract_send_response
upload_headers = AntchainUtils.parse_upload_headers(upload_resp.upload_headers)
await AntchainUtils.put_object_async(request.file_object, upload_headers, upload_resp.upload_url)
request.file_id = upload_resp.file_id
UtilClient.validate_model(request)
return TeaCore.from_map(
ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.SignAntsaasStaffingcContractSendResponse(),
await self.do_request_async('1.0', 'antsaas.staffingc.contract.send.sign', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_antsaas_staffingc_contract_sign(
self,
request: ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.QueryAntsaasStaffingcContractSignRequest,
) -> ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.QueryAntsaasStaffingcContractSignResponse:
"""
Description: 签约结果查询
Summary: 查询签约结果
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_antsaas_staffingc_contract_sign_ex(request, headers, runtime)
async def query_antsaas_staffingc_contract_sign_async(
self,
request: ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.QueryAntsaasStaffingcContractSignRequest,
) -> ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.QueryAntsaasStaffingcContractSignResponse:
"""
Description: 签约结果查询
Summary: 查询签约结果
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_antsaas_staffingc_contract_sign_ex_async(request, headers, runtime)
def query_antsaas_staffingc_contract_sign_ex(
self,
request: ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.QueryAntsaasStaffingcContractSignRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.QueryAntsaasStaffingcContractSignResponse:
"""
Description: 签约结果查询
Summary: 查询签约结果
"""
UtilClient.validate_model(request)
return TeaCore.from_map(
ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.QueryAntsaasStaffingcContractSignResponse(),
self.do_request('1.0', 'antsaas.staffingc.contract.sign.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_antsaas_staffingc_contract_sign_ex_async(
self,
request: ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.QueryAntsaasStaffingcContractSignRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.QueryAntsaasStaffingcContractSignResponse:
"""
Description: 签约结果查询
Summary: 查询签约结果
"""
UtilClient.validate_model(request)
return TeaCore.from_map(
ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.QueryAntsaasStaffingcContractSignResponse(),
await self.do_request_async('1.0', 'antsaas.staffingc.contract.sign.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def create_antcloud_gatewayx_file_upload(
self,
request: ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.CreateAntcloudGatewayxFileUploadRequest,
) -> ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.CreateAntcloudGatewayxFileUploadResponse:
"""
Description: 创建HTTP PUT提交的文件上传
Summary: 文件上传创建
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.create_antcloud_gatewayx_file_upload_ex(request, headers, runtime)
async def create_antcloud_gatewayx_file_upload_async(
self,
request: ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.CreateAntcloudGatewayxFileUploadRequest,
) -> ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.CreateAntcloudGatewayxFileUploadResponse:
"""
Description: 创建HTTP PUT提交的文件上传
Summary: 文件上传创建
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.create_antcloud_gatewayx_file_upload_ex_async(request, headers, runtime)
def create_antcloud_gatewayx_file_upload_ex(
self,
request: ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.CreateAntcloudGatewayxFileUploadRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.CreateAntcloudGatewayxFileUploadResponse:
"""
Description: 创建HTTP PUT提交的文件上传
Summary: 文件上传创建
"""
UtilClient.validate_model(request)
return TeaCore.from_map(
ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.CreateAntcloudGatewayxFileUploadResponse(),
self.do_request('1.0', 'antcloud.gatewayx.file.upload.create', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def create_antcloud_gatewayx_file_upload_ex_async(
self,
request: ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.CreateAntcloudGatewayxFileUploadRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.CreateAntcloudGatewayxFileUploadResponse:
"""
Description: 创建HTTP PUT提交的文件上传
Summary: 文件上传创建
"""
UtilClient.validate_model(request)
return TeaCore.from_map(
ak__320bc_483f_2434f_39a_3af_9ec_9f_04d_3cc_0_models.CreateAntcloudGatewayxFileUploadResponse(),
await self.do_request_async('1.0', 'antcloud.gatewayx.file.upload.create', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
| [
"[email protected]"
] | |
119d98cbfe961151ff8f55209511e83d900e5f00 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/point-mutations/80163ab58f714c95a8b2ee96815a1fcb.py | 1d9ab6a4040ca28d3ccf3327a89a15e4fdbfb616 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 210 | py | class DNA(object):
def __init__(self, strand):
self.strand = strand
def hamming_distance(self, strand):
return len([(x,y) for (x,y) in zip(strand, self.strand) if
x != y])
| [
"[email protected]"
] | |
c4a8cd354d040c15afd379aff695a191ded6cdc5 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_redistributing.py | 7376d9ca1ac4caa9854da9d4a805a8cb3a3c1b21 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py |
from xai.brain.wordbase.verbs._redistribute import _REDISTRIBUTE
#calss header
class _REDISTRIBUTING(_REDISTRIBUTE, ):
def __init__(self,):
_REDISTRIBUTE.__init__(self)
self.name = "REDISTRIBUTING"
self.specie = 'verbs'
self.basic = "redistribute"
self.jsondata = {}
| [
"[email protected]"
] | |
9a8e84ef6d87a09c15df0221f95c9ccc202aa040 | f8a58ae93ff78c59296a136dff721c5ef666790b | /Starting small/Sorting a list using the sort- method.py | 4d32bda30c9c7681623df56368518f88d0b25c23 | [] | no_license | Anthonymcqueen21/Python-Programs | cb116b36e3c774ef51dba7f1fd29561767f89c7f | 127d5cbab4e7a2d0009d65075508cbaf5a6b6dc2 | refs/heads/master | 2021-06-26T18:03:30.212592 | 2017-09-15T19:59:21 | 2017-09-15T19:59:21 | 79,636,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | cars = ['Ford', 'Subaru', 'Mitsubishi', 'Nissan', 'Pontiac]
cars.sort()
print(cars)
| [
"[email protected]"
] | |
39fa71b52214a84429c1e12d21c534e2b0f13a00 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/102/usersdata/195/49999/submittedfiles/av1_2.py | aa7d3b44f7edf2b0de15cd659ec5a4cc9d377f3f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | # -*- coding: utf-8 -*-
import math
a=int(input('digite a :'))
b=int(input('digte b:'))
c=int(input('digite c:'))
d=int(input('digite d:'))
if a!=b and a==c and a!=d:
print(V)
else:
print(F)
| [
"[email protected]"
] | |
6619ecbf5ce70002cdfdedb21633647ee067064e | 3d8aae8aa43e0fbd8a8cffc4fa2cd67419059d66 | /module_PyQt/xtest/test_echo_array.py | 33ca00c7a33595dfb60b15d778273c5fa491ec50 | [] | no_license | onitonitonito/k_mooc_reboot | b8273b7e9fa3fc5958bca57c39f2f3a9108964f1 | 68c8c6a94adc99005fb0fc8c38c416f902d37888 | refs/heads/main | 2021-07-21T22:32:26.080330 | 2021-07-04T02:22:08 | 2021-07-04T02:22:08 | 109,581,972 | 0 | 0 | null | 2020-05-05T22:28:26 | 2017-11-05T13:30:03 | Python | UTF-8 | Python | false | false | 945 | py | """
# how to stack echo file
"""
print(__doc__)
import random
import _add_syspath_root
from assets.config import dir_statics
from string import ascii_letters
SYMBOLS = [sym for sym in "!@#$%^&*()_-+=,.?/|;:{}~{}" + ascii_letters]
RANDOM_START = (0, 39)
RANDOM_END = (40, 78)
LINES = 50
REPEAT = 10
FILE_NAME = dir_statics + 'test_echo_array.txt'
def main():
for i in range(REPEAT):
print(*get_echo_array(write=True))
def get_echo_array(write=False):
echo_array = []
for i in range(LINES):
random.shuffle(SYMBOLS)
x1, x2 = (random.randint(*RANDOM_START), random.randint(*RANDOM_END))
string_shuffled = "".join(SYMBOLS)
add_string = string_shuffled[x1:x2]
echo_array.append(f"\n{add_string}")
if write:
with open(file=FILE_NAME, mode='w', encoding='utf8') as f:
f.write("".join(echo_array))
return echo_array
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
c4ea0a8154024f7e95ffa9605300406c7e7de34f | e1b8fb9a5500516f28d3d7e9a5f259c49ef35f14 | /top/api/rest/UserGetRequest.py | b3bd7048fb45379401d4afc382aa2472248e42f6 | [] | no_license | htom78/taobao_comet_py | 9224dbca1a413a54bcc5569873e4c7a9fc9ba059 | ad8b2e983a14d3ab7665244449f79dd72f390815 | refs/heads/master | 2020-05-17T10:47:28.369191 | 2013-08-27T08:50:59 | 2013-08-27T08:50:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | '''
Created by auto_sdk on 2013-06-16 16:36:02
'''
from top.api.base import RestApi
class UserGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.fields = None
self.nick = None
def getapiname(self):
return 'taobao.user.get'
| [
"[email protected]"
] | |
bf4d638465250a538af5ac031e32f9596dbd63e1 | 4b773103a5000a0a980739dd65426878c90dc098 | /core/models.py | 9e45dbcac258c6b24c9a9cf4ee079215ea16be1f | [] | no_license | gpchelkin/grading_system | 6ef693a89700fb86ce9567e33f697fb529c34297 | e34f85fd1d9ac6bad892222d68516bbab5d7cf23 | refs/heads/master | 2020-06-15T07:10:27.193190 | 2016-12-20T14:12:32 | 2016-12-20T14:12:32 | 75,315,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,319 | py | # coding=utf-8
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import AbstractUser
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from curriculum.models import ClassesType
SEMESTER_CHOICES = (
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
('6', '6'),
('7', '7'),
('8', '8'),
('9', '9'),
('10', '10'),
('11', '11'),
('12', '12'),
)
COURSE_CHOICE = (
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
('6', '6'),
)
class User(AbstractUser):
is_student = models.BooleanField("Этот пользователь студент", default=False)
is_teacher = models.BooleanField("Этот пользователь учитель", default=False)
class Group(models.Model):
name = models.CharField(verbose_name=u'Группа', max_length=10)
def __unicode__(self):
return u'{}'.format(self.name)
class Student(models.Model):
year_start = models.IntegerField(verbose_name=u'Год поступления', validators=[MaxValueValidator(3000), MinValueValidator(1970)])
year_end = models.IntegerField(verbose_name=u'Год окончания', validators=[MaxValueValidator(3000), MinValueValidator(1970)])
user_group_full_name = models.ForeignKey(verbose_name=u'Группа студента', to=Group)
user_connection = models.OneToOneField(verbose_name=u'Пользователь', to=User)
def __unicode__(self):
return u'{} {}'.format(self.user_connection.first_name, self.user_connection.last_name)
class Subject(models.Model):
name = models.CharField(verbose_name=u'Предмет', max_length=50)
subject_group = models.ManyToManyField(verbose_name=u'Группы', to=Group)
subject_type = models.ForeignKey(verbose_name=u'Тип предмета', to=ClassesType)
def __unicode__(self):
return u'{} - {}'.format(self.name, self.subject_type)
class Teacher(models.Model):
all_subjects = models.ManyToManyField(verbose_name=u'Предметы', to=Subject)
user_connection = models.OneToOneField(User)
def __unicode__(self):
return u'{} {}'.format(self.user_connection.first_name, self.user_connection.last_name)
| [
"[email protected]"
] | |
0fd11a4dfafdc5db0041929e7300c6f3c2bac9da | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /oepiudBYC7PT7TXAM_12.py | b4db6ac9bfd7b1baa70fc45eadc33d11817f752d | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py |
def parse_roman_numeral(num):
d = {'M':1000,'D':500,'C':100,'L':50,'X':10,'V':5,'I':1}
return sum(d[num[i]] if (i+1 == len(num) or d[num[i]]>=d[num[i+1]]) else -d[num[i]] for i in range(len(num)))
| [
"[email protected]"
] | |
aa9043d32112f48f44454ef11cc5e8715ec14cc7 | f320d83c1b6f4854cb808a17a2dbb8827051636e | /setfreq/ParaSetTest.py | d9afb304b9f8f27e4bf782ec4f1758761566c8d9 | [] | no_license | JiahuiSun/Digital-Signal-Analyzer-based-on-SDR | f5214f2b2b36d4a24896f7d0c4a712979c236fd0 | 1e57dbb9cfcec7c0cb0a3f2335f3e68ecd2694d6 | refs/heads/master | 2020-04-04T19:50:16.131461 | 2018-11-06T12:51:42 | 2018-11-06T12:51:42 | 156,222,484 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | import ParaSetCliSock
import time
f1 = 1500
f2 = 1600
##while True:
ParaSetCliSock.set_param('rx_freq',f1)
##time.sleep(5)
ParaSetCliSock.set_param('tx_freq',f2)
##time.sleep(5)
| [
"[email protected]"
] | |
6cf93c00ab4fa724d6ba734a936d3ff553a95395 | 6e3f97742562ff3cdf9372f54320c78e5c72fe97 | /apps/partidos/serializers.py | 984cd9ae22ba0f32acb4b3a184c75e363f0e068c | [] | no_license | desarrollosimagos/exit_poll | 6892e9ad504691fa44eb5b599881c7fb044b260d | e4572e2b222cf6b5de8a221ac300ccb0062f8e41 | refs/heads/master | 2021-01-01T16:36:31.726898 | 2017-07-20T18:52:30 | 2017-07-20T18:52:30 | 97,868,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | from rest_framework import serializers
from .models import Partidos
class PartidosSerializer(serializers.ModelSerializer):
"""
Clase donde llamamos al modelo `Partidos` y serializamos los campos
"""
class Meta:
model = Partidos
fields = ('id', 'n_partidos','siglas','foto_partido','nom_presidente',
'ape_presidente', 'correo','twitter','telefono','partido_binario',
'user_create','user_update','fecha_create','fecha_update',)
| [
"[email protected]"
] | |
699ab4ccd658741ab0ee7d42f5a80900e4a99ca3 | a137466dbaa5d704cd5a15ab9dfd17907b24be04 | /utility/aggregator.py | dd85c5817e5daa2bc1e2d53f3a9685154e121927 | [
"Apache-2.0"
] | permissive | xlnwel/g2rl | 92c15b8b9d0cd75b6d2dc8df20e6717e1a621ff6 | e1261fdd2ce70724a99ddd174616cf013917b241 | refs/heads/master | 2023-08-30T10:29:44.169523 | 2021-11-08T07:50:43 | 2021-11-08T07:50:43 | 422,582,891 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | class Aggregator:
"""Allows accumulating values and computing their mean."""
def __init__(self):
self.total = 0
self.last = 0
self.reset()
def reset(self):
self.sum = 0.
self.count = 0
def average(self):
return self.sum / self.count if self.count else 0.
def add(self, v):
self.last = v
self.total += v
self.sum += v
self.count += 1
| [
"[email protected]"
] | |
4d5ea0573f752d71751f6d8611db7e239774bfc2 | ea5a801283e5c8dd822d755aa8824e9fd17c9ecf | /nomuraholdings/spiders/nomura.py | 9dde6136aea2930291b6ca2abd5eacd2b258ca05 | [] | no_license | daniel-kanchev/nomuraholdings | 3a5c98c2540fac135346267504eedd8bc8375ee1 | 4cf5faeeba53cf1122b1efe7698bac71af21b8fb | refs/heads/main | 2023-03-03T14:54:43.454865 | 2021-02-11T07:33:46 | 2021-02-11T07:33:46 | 337,959,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,595 | py | import scrapy
from scrapy.loader import ItemLoader
from itemloaders.processors import TakeFirst
from datetime import datetime
from nomuraholdings.items import Article
class NomuraSpider(scrapy.Spider):
name = 'nomura'
start_urls = ['https://www.nomuraholdings.com/news/nr/index.html']
def parse(self, response):
links = response.xpath('//table[@class="js-selectList"]//a/@href').getall()
yield from response.follow_all(links, self.parse_year)
def parse_year(self, response):
links = response.xpath('//p[@class="c-List-info__link"]/a/@href').getall()
yield from response.follow_all(links, self.parse_article)
def parse_article(self, response):
if 'pdf' in response.url:
return
item = ItemLoader(Article())
item.default_output_processor = TakeFirst()
title = response.xpath('//h1[@class="u-h1"]/text()').get()
if title:
title = title.strip()
else:
return
date = response.xpath('//div[@class="news-header__date"]/p/text()[1]').get()
if date:
date = datetime.strptime(date.strip(), '%B %d, %Y')
date = date.strftime('%Y/%m/%d')
content = response.xpath('//p[@class="news-paragraph"]//text()').getall()
content = [text for text in content if text.strip()]
content = "\n".join(content).strip()
item.add_value('title', title)
item.add_value('date', date)
item.add_value('link', response.url)
item.add_value('content', content)
return item.load_item()
| [
"[email protected]"
] | |
22c627fae44f0079e535d66064c91480db572937 | 7bad6ecb04b57f4a692426bb23766cf0b5916d3d | /microdrop/core_plugins/command_plugin/plugin.py | 8fa27ab553a21e769406c2338ae015ca0488a243 | [
"BSD-3-Clause"
] | permissive | cfobel/microdrop | b943bed4a765c5419b6dead8344dbff420af283e | 721f2c9c040406bb3c70a9928923aad10a725b96 | refs/heads/master | 2020-04-15T18:50:20.035858 | 2018-10-23T14:26:58 | 2018-10-23T14:31:04 | 164,927,192 | 0 | 0 | BSD-3-Clause | 2019-01-09T19:47:11 | 2019-01-09T19:47:10 | null | UTF-8 | Python | false | false | 3,735 | py | from multiprocessing import Process
import logging
import sys
from zmq_plugin.plugin import Plugin as ZmqPlugin
from zmq_plugin.schema import decode_content_data
import pandas as pd
from logging_helpers import _L #: .. versionadded:: 2.20
logger = logging.getLogger(__name__)
class CommandZmqPlugin(ZmqPlugin):
'''
API for registering commands.
'''
def __init__(self, parent, *args, **kwargs):
self.parent = parent
self.control_board = None
self._commands = pd.DataFrame(None, columns=['namespace',
'plugin_name',
'command_name', 'title'])
super(CommandZmqPlugin, self).__init__(*args, **kwargs)
def on_execute__unregister_command(self, request):
data = decode_content_data(request)
commands = self._commands
ix = commands.loc[(commands.namespace == data['namespace']) &
(commands.plugin_name == data['plugin_name']) &
(commands.command_name == data['command_name']) &
(commands.title == data['title'])].index
self._commands.drop(ix, inplace=True)
self._commands.reset_index(drop=True, inplace=True)
return self.commands
def on_execute__register_command(self, request):
data = decode_content_data(request)
plugin_name = data.get('plugin_name', request['header']['source'])
return self.register_command(plugin_name, data['command_name'],
namespace=data.get('namespace', ''),
title=data.get('title'))
def on_execute__get_commands(self, request):
return self.commands
def register_command(self, plugin_name, command_name, namespace='',
title=None):
'''
Register command.
Each command is unique by:
(namespace, plugin_name, command_name)
'''
if title is None:
title = (command_name[:1].upper() +
command_name[1:]).replace('_', ' ')
row_i = dict(zip(self._commands, [namespace, plugin_name, command_name,
title]))
self._commands = self._commands.append(row_i, ignore_index=True)
return self.commands
@property
def commands(self):
'''
Returns
-------
pd.Series
Series of command groups, where each group name maps to a series of
commands.
'''
return self._commands.copy()
def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='ZeroMQ Plugin process.')
log_levels = ('critical', 'error', 'warning', 'info', 'debug', 'notset')
parser.add_argument('-l', '--log-level', type=str, choices=log_levels,
default='info')
parser.add_argument('hub_uri')
parser.add_argument('name', type=str)
args = parser.parse_args()
args.log_level = getattr(logging, args.log_level.upper())
return args
if __name__ == '__main__':
from zmq_plugin.bin.plugin import run_plugin
def run_plugin_process(uri, name, subscribe_options, log_level):
plugin_process = Process(target=run_plugin,
args=())
plugin_process.daemon = False
plugin_process.start()
args = parse_args()
logging.basicConfig(level=args.log_level)
task = CommandZmqPlugin(None, args.name, args.hub_uri, {})
run_plugin(task, args.log_level)
| [
"[email protected]"
] | |
d84c34bf4d3fd0906684953b37dfca5d0bb16e5f | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/completion/dictLiteralCompletion/NotEmptyLiteralsInAssignments/main.py | d36bc2b54d3906268fceea50f594afaefc1e9506 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 109 | py | from typing import TypedDict
class Point(TypedDict):
x: int
y: int
p: Point = {"x": 42, <caret>}
| [
"[email protected]"
] | |
6a59eb575683dbf700ea669d40428d6a1b5b91f2 | d74913eda69ee1799c887a645c574fa5a4da8fba | /code/metric_functions/metric_model1_func.py | 1b710aff7f2d5c9b1716dee526ab7dcf74bca1c0 | [
"Apache-2.0"
] | permissive | Fweek/pyMETRIC | efd6fe8c6ea74f5c87d19ecbb6653549fb3ba943 | 0e7eec57fedd33b81e6e7efe58290f50ebbebfab | refs/heads/master | 2021-05-03T10:23:15.066106 | 2018-02-06T19:32:36 | 2018-02-06T19:32:36 | 120,534,046 | 1 | 0 | null | 2018-02-06T23:00:49 | 2018-02-06T23:00:48 | null | UTF-8 | Python | false | false | 51,068 | py | #!/usr/bin/env python
#--------------------------------
# Name: metric_model1_func.py
# Purpose: Calculate METRIC Model 1
# Notes: GDAL Block Version
# Python: 2.7, 3.5, 3.6
#--------------------------------
import argparse
import datetime as dt
import logging
import math
import os
import random
import shutil
import sys
from time import sleep
import numpy as np
from osgeo import gdal
import gdal_common as gdc
import et_common
import et_image
import et_numpy
from python_common import open_ini, read_param, remove_file
def metric_model1(image_ws, ini_path, stats_flag=None, overwrite_flag=None):
"""METRIC Model 1 Version
Args:
image_ws (str): Image folder path
ini_path (str): METRIC config file path
ovewrite_flag (bool): if True, overwrite existing files
Returns:
True if successful
"""
logging.info('METRIC Model 1')
log_fmt = ' {:<18s} {}'
env = gdc.env
image = et_image.Image(image_ws, env)
np.seterr(invalid='ignore')
# env.cellsize = 463.313
# env.snap_xmin, env.snap_ymin = 231.6565, 231.6565
# # Check that image_ws is valid
# image_re = re.compile(
# '^(LT04|LT05|LE07|LC08)_(\d{3})(\d{3})_(\d{4})(\d{2})(\d{2})')
# if not os.path.isdir(image_ws) or not image_re.match(scene_id):
# logging.error('\nERROR: Image folder is invalid or does not exist\n')
# return False
# Open config file
config = open_ini(ini_path)
# Get input parameters
logging.debug(' Reading Input File')
# Arrays are processed by block
bs = read_param('block_size', 1024, config)
logging.info(log_fmt.format('Block Size:', bs))
# Raster pyramids/statistics
pyramids_flag = read_param('pyramids_flag', False, config)
if pyramids_flag:
gdal.SetConfigOption('HFA_USE_RRD', 'YES')
if stats_flag is None:
stats_flag = read_param('statistics_flag', False, config)
# Remove reflectance rasters after calculating Model 1
remove_refl_toa_flag = read_param('remove_refl_toa_flag', False, config)
remove_refl_sur_flag = read_param('remove_refl_sur_flag', False, config)
remove_ts_bt_flag = read_param('remove_ts_bt_flag', False, config)
# Check that common_area raster exists
if not os.path.isfile(image.common_area_raster):
logging.error('\nERROR: A common area raster was not found.')
logging.error('ERROR: Please rerun prep tool to build these files.\n')
return False
# Use common_area to set mask parameters
common_ds = gdal.Open(image.common_area_raster)
env.mask_geo = gdc.raster_ds_geo(common_ds)
env.mask_rows, env.mask_cols = gdc.raster_ds_shape(common_ds)
env.mask_extent = gdc.geo_extent(
env.mask_geo, env.mask_rows, env.mask_cols)
env.mask_array = gdc.raster_ds_to_array(common_ds, return_nodata=False)
env.mask_path = image.common_area_raster
common_ds = None
logging.debug(log_fmt.format('Mask Extent:', env.mask_extent))
# Set raster names
r_fmt = '.img'
raster_dict = dict()
raster_dict['dem'] = os.path.join(image.support_ws, 'dem' + r_fmt)
raster_dict['landuse'] = os.path.join(image.support_ws, 'landuse' + r_fmt)
raster_dict['slp'] = os.path.join(image.support_ws, 'slope' + r_fmt)
raster_dict['asp'] = os.path.join(image.support_ws, 'aspect' + r_fmt)
raster_dict['lat'] = os.path.join(image.support_ws, 'latitude' + r_fmt)
raster_dict['lon'] = os.path.join(image.support_ws, 'longitude' + r_fmt)
raster_dict['cos_theta'] = os.path.join(image.support_ws, 'cos_theta' + r_fmt)
raster_dict['albedo_sur'] = image.albedo_sur_raster
raster_dict['tau'] = os.path.join(image_ws, 'transmittance' + r_fmt)
raster_dict['ea'] = image.metric_ea_raster
raster_dict['ndvi_toa'] = image.ndvi_toa_raster
raster_dict['ndwi_toa'] = image.ndwi_toa_raster
raster_dict['lai_toa'] = image.lai_toa_raster
raster_dict['ndvi_sur'] = image.ndvi_sur_raster
raster_dict['lai_sur'] = image.lai_sur_raster
raster_dict['ndwi_sur'] = image.ndwi_sur_raster
raster_dict['savi_toa'] = os.path.join(image.indices_ws, 'savi_toa' + r_fmt)
raster_dict['savi_sur'] = os.path.join(image.indices_ws, 'savi' + r_fmt)
raster_dict['em_nb'] = os.path.join(image_ws, 'narrowband_em' + r_fmt)
raster_dict['em_0'] = os.path.join(image_ws, 'broadband_em' + r_fmt)
raster_dict['rc'] = os.path.join(image_ws, 'corrected_rad' + r_fmt)
raster_dict['ts_dem'] = os.path.join(image_ws, 'ts_dem' + r_fmt)
raster_dict['ts'] = image.ts_raster
raster_dict['ts_bt'] = image.ts_bt_raster
raster_dict['refl_toa'] = image.refl_toa_raster
raster_dict['refl_sur_ledaps'] = image.refl_sur_ledaps_raster
raster_dict['refl_sur_tasumi'] = image.refl_sur_tasumi_raster
raster_dict['refl_sur'] = '' # DEADBEEF - this is a sloppy work around
# to a KeyError that was being thrown under the comment
# 'Calculate refl_toa if any TOA indices flags are True'
# Read MODEL 1 raster flags
save_dict = dict()
save_dict['dem'] = read_param('save_dem_raster_flag', False, config)
save_dict['landuse'] = read_param('save_landuse_raster_flag', False, config)
save_dict['slp'] = read_param('save_mountain_rasters_flag', False, config)
save_dict['asp'] = read_param('save_mountain_rasters_flag', False, config)
save_dict['lat'] = read_param('save_mountain_rasters_flag', False, config)
save_dict['lon'] = read_param('save_mountain_rasters_flag', False, config)
save_dict['cos_theta'] = read_param('save_cos_theta_raster_flag', True, config)
# You can only save Tasumi, not LEDAPS, at-surface reflectance
save_dict['refl_sur_tasumi'] = read_param('save_refl_sur_raster_flag', True, config)
save_dict['tau'] = read_param('save_tau_raster_flag', True, config)
save_dict['albedo_sur'] = read_param('save_albedo_sur_raster_flag', True, config)
# Default for all TOA reflectance indices is True except SAVI
save_dict['ndvi_toa'] = read_param('save_ndvi_toa_raster_flag', True, config)
save_dict['ndwi_toa'] = read_param('save_ndwi_toa_raster_flag', True, config)
save_dict['savi_toa'] = read_param('save_savi_toa_raster_flag', False, config)
save_dict['lai_toa'] = read_param('save_lai_toa_raster_flag', True, config)
# Default for all at-surface reflectance indices is False
save_dict['ndvi_sur'] = read_param('save_ndvi_raster_flag', False, config)
save_dict['ndwi_sur'] = read_param('save_ndwi_raster_flag', False, config)
save_dict['savi_sur'] = read_param('save_savi_raster_flag', False, config)
save_dict['lai_sur'] = read_param('save_lai_raster_flag', False, config)
# Surface temperature and emissivity
save_dict['em_nb'] = read_param('save_em_nb_raster_flag', False, config)
save_dict['em_0'] = read_param('save_em_0_raster_flag', True, config)
save_dict['rc'] = read_param('save_rc_raster_flag', False, config)
save_dict['ts'] = read_param('save_ts_raster_flag', True, config)
save_dict['ts_dem'] = read_param('save_ts_dem_raster_flag', True, config)
# Clear SUR save flags if input rasters from prep_scene are not present
em_refl_type = read_param('em_refl_type', 'TOA', config).upper()
refl_sur_model_type = read_param(
'refl_sur_model_type', 'TASUMI', config).upper()
refl_sur_model_type_list = ['TASUMI', 'LEDAPS']
if refl_sur_model_type.upper() not in refl_sur_model_type_list:
logging.error(
('\nERROR: Surface reflectance model type {} is invalid.' +
'\nERROR: Set refl_sur_model_type to {}').format(
refl_sur_model_type, ','.join(refl_sur_model_type_list)))
return False
elif (refl_sur_model_type == 'LEDAPS' and
not os.path.isfile(raster_dict['refl_sur_ledaps'])):
logging.warning(
'\nLEDAPS at-surface refl. composite raster does not exist' +
'\nLEDAPS at-surface refl. products will not be calculated')
save_dict['refl_sur_ledaps'] = False
clear_refl_sur_flag = True
elif (refl_sur_model_type == 'TASUMI' and
not os.path.isfile(raster_dict['refl_toa'])):
logging.warning(
'\nTOA reflectance composite raster does not exist' +
'\nTasumi at-surface refl. products will not be calculated')
save_dict['refl_sur_tasumi'] = False
clear_refl_sur_flag = True
else:
clear_refl_sur_flag = False
if clear_refl_sur_flag:
save_dict['refl_sur'] = False
save_dict['ndvi_sur'] = False
save_dict['ndwi_sur'] = False
save_dict['savi_sur'] = False
save_dict['lai_sur'] = False
save_dict['albedo_sur'] = False
if em_refl_type == 'SUR':
save_dict['em_nb'] = False
save_dict['em_0'] = False
save_dict['rc'] = False
save_dict['ts'] = False
# Clear TOA save flags if input TOA raster is not present
if not os.path.isfile(raster_dict['refl_toa']):
logging.warning(
'\nTOA reflectance composite raster does not exist' +
'\nTOA reflectance products will not be calculated')
save_dict['ndvi_toa'] = False
save_dict['ndwi_toa'] = False
save_dict['savi_toa'] = False
save_dict['lai_toa'] = False
if em_refl_type == 'TOA':
save_dict['em_nb'] = False
save_dict['em_0'] = False
save_dict['rc'] = False
save_dict['ts'] = False
save_dict['ts_dem'] = False
# Clear Ts save flags if input Ts brightness raster is not present
if not os.path.isfile(raster_dict['ts_bt']):
logging.warning('\nTs brightness raster does not exist')
save_dict['rc'] = False
save_dict['ts'] = False
save_dict['ts_dem'] = False
# If overwrite, remove all existing rasters that can be saved
# DEADBEEF - changed the overwrite_flag or save_flag line to and. Not sure
# what else this will affect.
logging.debug('\nRemoving existing rasters')
for name, save_flag in sorted(save_dict.items()):
if ((overwrite_flag and save_flag) and
os.path.isfile(raster_dict[name])):
remove_file(raster_dict[name])
# If save flag is true, than calc flag has to be true
calc_dict = save_dict.copy()
# Initialize prep_scene rasters to False
calc_dict['refl_toa'] = False
calc_dict['refl_sur'] = False
calc_dict['refl_sur_ledaps'] = False
calc_dict['ts_bt'] = False
calc_dict['ea'] = False
# Working backwards,
# Adjust calc flags based on function dependencies
# Read in additional parameters based on calc flags
# Surface temperature
if calc_dict['ts_dem']:
calc_dict['ts'] = True
calc_dict['dem'] = True
lapse_flat_flt = read_param('lapse_flat', 6.5, config)
lapse_mtn_flt = read_param('lapse_mtn', 10.0, config)
lapse_elev_flt = read_param('lapse_elev', 99999.0, config)
if calc_dict['ts']:
calc_dict['rc'] = True
if calc_dict['rc']:
calc_dict['ts_bt'] = True
calc_dict['em_nb'] = True
rsky_flt = read_param('rsky', 1.32, config)
rp_flt = read_param('rp', 0.91, config)
tnb_flt = read_param('tnb', 0.866, config)
# Emissivity
if calc_dict['em_nb'] or calc_dict['em_0']:
# Emissivity is a function of TOA LAI or at-surface LAI
em_refl_type = read_param('em_refl_type', 'TOA', config).upper()
if em_refl_type == 'TOA':
calc_dict['lai_toa'] = True
elif em_refl_type == 'SUR':
calc_dict['lai_sur'] = True
else:
logging.error(
('\nERROR: The emissivity reflectance type {} is invalid.' +
'\nERROR: Set em_refl_type to TOA or SUR').format(
em_refl_type))
return False
# Emissivity of water can be set using either NDVI or NDWI
em_water_index_type = 'NDVI'
# em_water_index_type = read_param(
# 'em_water_index_type', 'NDVI', config).upper()
if em_water_index_type == 'NDVI' and em_refl_type == 'TOA':
calc_dict['ndvi_toa'] = True
elif em_water_index_type == 'NDVI' and em_refl_type == 'SUR':
calc_dict['ndvi_sur'] = True
# elif em_water_index_type == 'NDWI' and em_refl_type == 'TOA':
# calc_dict['ndwi_toa'] = True
# elif em_water_index_type == 'NDWI' and em_refl_type == 'SUR':
# calc_dict['ndwi_sur'] = True
else:
logging.error(
('\nERROR: The emissivity water type {} is invalid.' +
'\nERROR: Set em_water_index_type to NDVI').format(
em_water_index_type))
return False
# Vegetation indices
if calc_dict['lai_sur']:
lai_veg_index_type = read_param(
'lai_veg_index_type', 'SAVI', config).upper()
if lai_veg_index_type == 'SAVI':
calc_dict['savi_sur'] = True
elif lai_veg_index_type == 'NDVI':
calc_dict['ndvi_sur'] = True
else:
logging.error(
('\nERROR: The LAI veg. index type {} is invalid.' +
'\nERROR: Set lai_veg_index_type to SAVI or NDVI').format(
lai_veg_index_type))
return False
if calc_dict['lai_toa']:
lai_toa_veg_index_type = read_param(
'lai_toa_veg_index_type', 'SAVI', config).upper()
if lai_toa_veg_index_type == 'SAVI':
calc_dict['savi_toa'] = True
elif lai_toa_veg_index_type == 'NDVI':
calc_dict['ndvi_toa'] = True
else:
logging.error(
('\nERROR: The LAI TOA veg. index type {} is invalid.' +
'\nERROR: Set lai_toa_veg_index_type to SAVI or NDVI').format(
lai_toa_veg_index_type))
return False
if calc_dict['savi_toa'] or calc_dict['savi_sur']:
savi_l_flt = read_param('savi_l', 0.1, config)
# Calculate refl_toa if any TOA indices flags are True
if any([v for k, v in calc_dict.items()
if image.indices_ws in raster_dict[k] and '_toa' in k]):
calc_dict['refl_toa'] = True
# Calculate refl_sur if any non-TOA indices flags are True
refl_toa_index_flag = False
if any([v for k, v in calc_dict.items()
if image.indices_ws in raster_dict[k] and
('_toa' not in k)]):
refl_toa_index_flag = True
calc_dict['refl_sur'] = True
# At-surface albedo
if calc_dict['albedo_sur']:
calc_dict['refl_sur'] = True
# At-surface reflectance
if calc_dict['refl_sur']:
# Remove refl_sur key/value then set LEDAPS or Tasumi
del calc_dict['refl_sur']
refl_sur_model_type_list = ['LEDAPS', 'TASUMI']
refl_sur_model_type = read_param(
'refl_sur_model_type', 'TASUMI', config).upper()
if refl_sur_model_type.upper() not in refl_sur_model_type_list:
logging.error(
('\nERROR: Surface reflectance model type {} is invalid.' +
'\nERROR: Set refl_sur_model_type to {}').format(
refl_sur_model_type, ','.join(refl_sur_model_type_list)))
return False
elif refl_sur_model_type.upper() == 'LEDAPS':
calc_dict['refl_sur_ledaps'] = True
calc_dict['refl_sur_tasumi'] = False
elif refl_sur_model_type.upper() == 'TASUMI':
calc_dict['refl_toa'] = True
calc_dict['refl_sur_tasumi'] = True
calc_dict['refl_sur_ledaps'] = False
kt_flt = read_param('kt', 1.0, config)
# Tasumi at-surface reflectance and transmittance
if ((calc_dict['refl_sur_tasumi'] or calc_dict['tau']) and not
os.path.isfile(raster_dict['cos_theta'])):
calc_dict['cos_theta'] = True
kt_flt = read_param('kt', 1.0, config)
# Air pressure model dependent parameters
if calc_dict['refl_sur_tasumi'] or calc_dict['tau']:
pair_model_list = ['DATUM', 'DEM']
pair_model = read_param('pair_model', 'DEM', config).upper()
if pair_model not in pair_model_list:
logging.error(
('\nERROR: The Pair model {} is not a valid option.' +
'\nERROR: Set pair_model to DATUM or DEM').format(
pair_model))
return False
# Get Datum elevation
if pair_model == 'DATUM' or calc_dict['ts_dem']:
datum_flt = float(config.get('INPUTS', 'datum'))
# Get DEM elevation
if pair_model == 'DEM':
calc_dict['dem'] = True
else:
pair_model = None
# Calculate a centroid based cos_theta value
# DEADBEEF - Move this to image class?
if calc_dict['cos_theta']:
logging.debug('\nCos(theta)')
# Get mask extent center in decimal degrees
lon_center, lat_center = gdc.project_point(
env.mask_extent.center(), env.snap_osr, env.snap_gcs_osr)
cos_theta_centroid_flt = et_common.cos_theta_centroid_func(
image.acq_time, image.acq_doy, image.dr,
lon_center * math.pi / 180, lat_center * math.pi / 180)
del lon_center, lat_center
logging.debug(' Centroid: {}'.format(cos_theta_centroid_flt))
# Spatial/Mountain model input rasters
if calc_dict['cos_theta']:
cos_theta_model_list = ['SOLAR', 'CENTROID', 'SPATIAL', 'MOUNTAIN']
cos_theta_model = read_param(
'cos_theta_model', 'CENTROID', config).upper()
if cos_theta_model not in cos_theta_model_list:
logging.error(
('\nERROR: The Cos(theta) model {} is not a valid option.' +
'\nERROR: Set cos_theta_model to {}').format(
cos_theta_model, ', '.join(cos_theta_model_list)))
return False
# I can't move these up since I have to read cos_theta_model first
if cos_theta_model == 'MOUNTAIN':
calc_dict['lon'] = True
calc_dict['lat'] = True
calc_dict['slp'] = True
calc_dict['asp'] = True
elif cos_theta_model == 'SPATIAL':
calc_dict['lon'] = True
calc_dict['lat'] = True
calc_dict['slp'] = False
calc_dict['asp'] = False
else:
calc_dict['lon'] = False
calc_dict['lat'] = False
calc_dict['slp'] = False
calc_dict['asp'] = False
# Rasters can be read from local copy or clipped from remote copy
for key, raster_name in [
['dem', 'dem_raster'],
['landuse', 'landuse_raster'],
['slp', 'slope_raster'],
['asp', 'aspect_raster'],
['lat', 'latitude_raster'],
['lon', 'longitude_raster']]:
# Skip if raster is not needed and reset save flag
if not calc_dict[key]:
save_dict[key] = False
# Read local raster if possible
elif (os.path.isfile(raster_dict[key]) and
gdc.raster_path_extent(raster_dict[key]) == env.mask_extent):
raster_dict[key + '_full'] = raster_dict[key]
save_dict[key] = False
# Otherwise try to read read full/external path
else:
raster_dict[key + '_full'] = config.get('INPUTS', raster_name)
if not os.path.isfile(raster_dict[key + '_full']):
logging.error(
'\nERROR: The raster path {} is not valid'.format(
raster_dict[key + '_full']))
return False
# Landuse type
if calc_dict['landuse']:
# For now only read NLCD landuse rasters
landuse_type = read_param(
'landuse_type', 'NLCD', config).upper()
landuse_type_list = ['NLCD']
# landuse_type_list = ['NLCD', 'CDL']
if landuse_type not in landuse_type_list:
logging.error(
('\nERROR: The landuse type {} is invalid.' +
'\nERROR: Set landuse_type to {}').format(
landuse_type, ', '.join(landuse_type_list)))
return False
# # Spatial/Mountain model input rasters
# if calc_dict['cos_theta']:
# cos_theta_model_list = ['SOLAR', 'CENTROID', 'SPATIAL', 'MOUNTAIN']
# cos_theta_model = read_param('cos_theta_model', 'CENTROID', config).upper()
# if cos_theta_model not in cos_theta_model_list:
# logging.error(
# ('\nERROR: The Cos(theta) model {} is not a valid option.' +
# '\nERROR: Set cos_theta_model to {}').format(
# cos_theta_model, ', '.join(cos_theta_model_list)))
# return False
# # I can't move these up since I have to read cos_theta_model first
# if cos_theta_model in ['SPATIAL', 'MOUNTAIN']:
# calc_dict['lon'] = True
# calc_dict['lat'] = True
# if cos_theta_model == 'MOUNTAIN':
# calc_dict['slp'] = True
# calc_dict['asp'] = True
# for local_key, full_key, raster_name in [
# ['slp', 'slp_full', 'slope_raster'],
# ['asp', 'asp_full', 'aspect_raster'],
# ['lat', 'lat_full', 'latitude_raster'],
# ['lon', 'lon_full', 'longitude_raster']]:
# # Check that the output/sub rasters exist
# # Check that they have the correct shape
# if calc_dict[local_key]:
# if (save_dict[local_key] or
# not os.path.isfile(raster_dict[local_key]) or
# gdc.raster_path_extent(raster_dict[local_key]) != env.mask_extent):
# save_dict[local_key] = True
# raster_dict[full_key] = config.get('INPUTS', raster_name)
# # Check that the input rasters exist
# if not os.path.isfile(raster_dict[full_key]):
# logging.error(
# '\nERROR: The raster path {} is not valid'.format(
# raster_dict[full_key]))
# return False
# # Otherwise script reads from "full" path,
# # so set full path to local path
# else:
# raster_dict[full_key] = raster_dict[local_key]
# # Terrain model dependent parameters
# # if True:
# # terrain_model_list = ['FLAT', 'MOUNTAIN']
# # terrain_model = read_param('terrain_model', 'FLAT', config).upper()
# # if terrain_model not in terrain_model_list:
# # logging.error(
# # ('\nERROR: The terrain model {} is not a valid option.' +
# # '\nERROR: Set terrain_model to FLAT or MOUNTAIN').format(
# # terrain_model))
# # return False
# # For elevation rasters, calc means it will be read locally
# # save means it will be extracted from remote location first
# # DEM
# if calc_dict['dem']:
# # Get the input file DEM raster path if needed
# if (save_dict['dem'] or
# not os.path.isfile(raster_dict['dem']) or
# gdc.raster_path_extent(raster_dict['dem']) != env.mask_extent):
# raster_dict['dem_full'] = config.get('INPUTS','dem_raster')
# if not os.path.isfile(raster_dict['dem_full']):
# logging.error(
# '\nERROR: The dem_raster path {} is not valid'.format(
# raster_dict['dem_full']))
# return False
# # Otherwise script reads from "full" path,
# # so set full path to local path
# else:
# raster_dict['dem_full'] = raster_dict['dem']
#
# # Landuse
# if calc_dict['landuse']:
# # Get the input file NLCD raster path if needed
# if (save_dict['nlcd'] or
# not os.path.isfile(raster_dict['nlcd']) or
# gdc.raster_path_extent(raster_dict['nlcd']) != env.mask_extent):
# raster_dict['landuse_full'] = config.get('INPUTS', 'landuse_raster')
# if not os.path.isfile(raster_dict['landuse_full']):
# logging.error(
# '\nERROR: The landuse raster {} does not exist'.format(
# raster_dict['landuse_full']))
# return False
# landuse_type = read_param('landuse_type', 'NLCD', config).upper()
# if landuse_type not in ['NLCD', 'CDL']:
# logging.error(
# ('\nERROR: The landuse type {} is invalid.' +
# '\nERROR: Set landuse_type to NLCD or CDL').format(
# landuse_type))
# return False
# # Otherwise script reads from "full" path,
# # so set full path to local path
# else:
# raster_dict['landuse_full'] = raster_dict['nlcd']
# Weather Data
if calc_dict['refl_sur_tasumi'] or calc_dict['tau']:
weather_data_source = config.get(
'INPUTS', 'weather_data_source').upper()
log_fmt = ' {:<18s} {}'
if weather_data_source not in ['NLDAS', 'REFET', 'MANUAL']:
logging.error(
('\nERROR: The weather data source {} is invalid.' +
'\nERROR: Set weather_data_source to REFET or MANUAL').format(
weather_data_source))
return False
elif weather_data_source == 'NLDAS':
logging.info('\nWeather parameters from NDLAS rasters')
# DEADBEEF - Testing where to store Landsat scene NLDAS Ea rasters
# Assuming Ea raster was clipped/projected into SUPPORT_RASTERS
if not os.path.isfile(raster_dict['ea']):
logging.error(
('\nERROR: NLDAS Ea raster does not exist\n' +
' {}').format(raster_dict['ea']))
return False
calc_dict['ea'] = True
elif weather_data_source == 'REFET':
gmt_offset_flt = float(config.get('INPUTS', 'gmt_offset'))
logging.debug('\n Weather parameters from RefET file')
refet_file = config.get('INPUTS', 'refet_file')
logging.debug(' {}'.format(refet_file))
if not os.path.isfile(refet_file):
logging.error('\nERROR: The refet_file path is not valid')
return False
# The RefET data is localtime, scene acquisition time is GMT
acq_localtime = image.acq_time + gmt_offset_flt
# Get RefET Data
(dew_point_flt, wind_speed_flt, ea_flt,
etr_flt, etr_24hr_flt) = et_common.read_refet_instantaneous_func(
refet_file, image.acq_year, image.acq_doy, acq_localtime)
ea_array = np.array([ea_flt])
# Output RefET Data
logging.debug('\n Interpolated Values:')
log_fmt = ' {:<22s} {}'
logging.debug(log_fmt.format('Scene Time:', acq_localtime))
logging.debug(log_fmt.format('Dew Point [C]:', dew_point_flt))
logging.debug(log_fmt.format('Wind Speed [m/s]:', wind_speed_flt))
logging.debug(log_fmt.format('Ea [kPa]:', ea_flt))
logging.debug(log_fmt.format('ETr [mm/hr]:', etr_flt))
logging.debug(log_fmt.format('ETr 24hr [mm/day]:', etr_24hr_flt))
elif weather_data_source == 'MANUAL':
logging.info('\n Weather parameters from INI file')
ea_flt = float(config.get('INPUTS', 'ea'))
ea_array = np.array([ea_flt])
logging.debug(log_fmt.format('Ea [kPa]:', ea_flt))
# Build necessary output folders
logging.debug('\nBuilding output folders')
if save_dict['refl_sur_tasumi']:
if not os.path.isdir(image.refl_sur_ws):
os.makedirs(image.refl_sur_ws)
if any([v for k, v in save_dict.items()
if image.indices_ws in raster_dict[k]]):
if not os.path.isdir(image.indices_ws):
os.makedirs(image.indices_ws)
# Remove existing and build new empty rasters if necessary
logging.debug('\nBuilding empty rasters')
for name, save_flag in sorted(save_dict.items()):
# logging.debug('{} {}'.format(name, save_flag))
if save_flag:
band_cnt, raster_type = 1, np.float32
if name == 'refl_sur_tasumi':
band_cnt = image.band_sur_cnt
elif name == 'landuse_sub':
raster_type = np.uint8
logging.debug(raster_dict[name])
gdc.build_empty_raster(raster_dict[name], band_cnt, raster_type)
del band_cnt
# Process by block
logging.info('\nProcessing by block')
logging.debug(' Mask cols/rows: {}/{}'.format(
env.mask_cols, env.mask_rows))
for b_i, b_j in gdc.block_gen(env.mask_rows, env.mask_cols, bs):
logging.debug(' Block y: {:5d} x: {:5d}'.format(b_i, b_j))
block_data_mask = gdc.array_to_block(
env.mask_array, b_i, b_j, bs).astype(np.bool)
block_nodata_mask = ~block_data_mask
block_rows, block_cols = block_nodata_mask.shape
block_geo = gdc.array_offset_geo(env.mask_geo, b_j, b_i)
block_extent = gdc.geo_extent(block_geo, block_rows, block_cols)
logging.debug(' Block rows: {} cols: {}'.format(
block_rows, block_cols))
logging.debug(' Block extent: {}'.format(block_extent))
logging.debug(' Block geo: {}'.format(block_geo))
# Skips blocks that are entirely nodata
if not np.any(block_data_mask):
continue
# Prebuild Landuse array even though it isn't used in Model 1
if calc_dict['landuse']:
landuse_array, landuse_nodata = gdc.raster_to_array(
raster_dict['landuse_full'], 1, block_extent,
return_nodata=True)
landuse_array[block_nodata_mask] = landuse_nodata
if save_dict['landuse']:
gdc.block_to_raster(
landuse_array, raster_dict['landuse'], b_i, b_j, bs)
if calc_dict['landuse']:
del landuse_array, landuse_nodata
# Mountain rasters, and landuse by block
if calc_dict['slp']:
slope_array, slope_nodata = gdc.raster_to_array(
raster_dict['slp'], 1, block_extent, return_nodata=True)
slope_array[block_nodata_mask] = slope_nodata
if calc_dict['asp']:
aspect_array, aspect_nodata = gdc.raster_to_array(
raster_dict['asp'], 1, block_extent, return_nodata=True)
aspect_array[block_nodata_mask] = aspect_nodata
if calc_dict['lat']:
lat_array, lat_nodata = gdc.raster_to_array(
raster_dict['lat'], 1, block_extent, return_nodata=True)
lat_array[block_nodata_mask] = lat_nodata
if calc_dict['lon']:
lon_array, lon_nodata = gdc.raster_to_array(
raster_dict['lon'], 1, block_extent, return_nodata=True)
lon_array[block_nodata_mask] = lon_nodata
if save_dict['slp']:
gdc.block_to_raster(slope_array, raster_dict['slp'], b_i, b_j, bs)
if save_dict['asp']:
gdc.block_to_raster(aspect_array, raster_dict['asp'], b_i, b_j, bs)
if save_dict['lat']:
gdc.block_to_raster(lat_array, raster_dict['lat'], b_i, b_j, bs)
if save_dict['lon']:
gdc.block_to_raster(lon_array, raster_dict['lon'], b_i, b_j, bs)
# logging.info('Build Latitude/Longitude Rasters for Common Area')
# lat_lon_array_func(lat_sub_raster, lon_sub_raster)
# Cos(theta) by block
if calc_dict['cos_theta']:
if cos_theta_model == 'MOUNTAIN':
# lon_array = gdc.raster_to_block(
# raster_dict['lon_sub'],
# b_i, b_j, bs, return_nodata=False)
# lat_array = gdc.raster_to_block(
# raster_dict['lat_sub'],
# b_i, b_j, bs, return_nodata=False)
# slope_array = gdc.raster_to_block(
# raster_dict['slope_sub'],
# b_i, b_j, bs, return_nodata=False)
# aspect_array = gdc.raster_to_block(
# raster_dict['aspect_sub'],
# b_i, b_j, bs, return_nodata=False)
cos_theta_array = et_numpy.cos_theta_mountain_func(
image.acq_time, image.acq_doy, image.dr,
lon_array, lat_array, slope_array, aspect_array)
del lon_array, lat_array, slope_array, aspect_array
# Also build a simple cos(theta) array for refl_toa
cos_theta_toa_array = np.empty(
block_data_mask.shape).astype(np.float32)
cos_theta_toa_array[block_data_mask] = image.cos_theta_solar
cos_theta_toa_array[block_nodata_mask] = np.nan
elif cos_theta_model == 'SPATIAL':
# lon_array = gdc.raster_to_block(
# raster_dict['lon_sub'],
# b_i, b_j, bs, return_nodata=False)
# lat_array = gdc.raster_to_block(
# raster_dict['lat_sub'],
# b_i, b_j, bs, return_nodata=False)
cos_theta_array = et_numpy.cos_theta_spatial_func(
image.acq_time, image.acq_doy, image.dr,
lon_array, lat_array)
del lon_array, lat_array
elif cos_theta_model == 'CENTROID':
cos_theta_array = np.empty(
block_data_mask.shape).astype(np.float32)
cos_theta_array[block_data_mask] = cos_theta_centroid_flt
cos_theta_array[block_nodata_mask] = np.nan
elif cos_theta_model == 'SOLAR':
cos_theta_array = np.empty(
block_data_mask.shape).astype(np.float32)
cos_theta_array[block_data_mask] = image.cos_theta_solar
cos_theta_array[block_nodata_mask] = np.nan
if save_dict['cos_theta']:
gdc.block_to_raster(
cos_theta_array, raster_dict['cos_theta'],
b_i, b_j, bs)
if calc_dict['slp']:
del slope_array
if calc_dict['asp']:
del aspect_array
if calc_dict['lat']:
del lat_array
if calc_dict['lon']:
del lon_array
# Read in TOA Reflectance
if calc_dict['refl_toa']:
refl_toa_array = np.zeros(
(block_rows, block_cols, image.band_toa_cnt),
dtype=np.float32)
for band, band_i in sorted(image.band_toa_dict.items()):
refl_toa_array[:, :, band_i - 1] = gdc.raster_to_block(
raster_dict['refl_toa'], b_i, b_j, bs, band_i,
return_nodata=False)
refl_toa_array[block_nodata_mask, :] = np.nan
# METRIC default indices using TOA reflectance
# All other indices will use surface reflectance instead
# Don't remove NDVI or LAI
# NDVI
if calc_dict['ndvi_toa']:
ndvi_toa_array = et_numpy.ndi_func(
refl_toa_array[:, :, 4 - 1], refl_toa_array[:, :, 3 - 1])
if save_dict['ndvi_toa']:
gdc.block_to_raster(
ndvi_toa_array, raster_dict['ndvi_toa'], b_i, b_j, bs)
# NDVI
if save_dict['ndwi_toa']:
ndwi_toa_array = et_numpy.ndi_func(
refl_toa_array[:, :, 5 - 1], refl_toa_array[:, :, 2 - 1])
if calc_dict['ndwi_toa']:
gdc.block_to_raster(
ndwi_toa_array, raster_dict['ndwi_toa'], b_i, b_j, bs)
# SAVI
if calc_dict['savi_toa']:
savi_toa_array = et_numpy.ndi_func(
refl_toa_array[:, :, 4 - 1], refl_toa_array[:, :, 3 - 1],
savi_l_flt)
if save_dict['savi_toa']:
gdc.block_to_raster(
savi_toa_array, raster_dict['savi_toa'], b_i, b_j, bs)
# LAI (from SAVI or NDVI)
if calc_dict['lai_toa'] and lai_toa_veg_index_type == 'SAVI':
lai_toa_array = et_numpy.savi_lai_func(savi_toa_array)
elif calc_dict['lai_toa'] and lai_toa_veg_index_type == 'NDVI':
lai_toa_array = et_numpy.ndvi_lai_func(ndvi_toa_array)
if save_dict['lai_toa']:
gdc.block_to_raster(
lai_toa_array, raster_dict['lai_toa'], b_i, b_j, bs)
if calc_dict['savi_toa']:
del savi_toa_array
# DEM
if calc_dict['dem']:
elev_array = gdc.raster_to_array(
raster_dict['dem_full'], 1, block_extent, -9999.0,
return_nodata=False)
elev_array = elev_array.astype(np.float32)
elev_array[block_nodata_mask] = np.nan
if save_dict['dem']:
gdc.block_to_raster(
elev_array, raster_dict['dem'], b_i, b_j, bs)
# At surface reflectance, transmittance, & albedo
# Pre calculate air pressure and precipitable water
if calc_dict['refl_sur_tasumi'] or calc_dict['tau']:
# Air Pressure
if pair_model in ['DEM']:
pair_array = et_common.air_pressure_func(elev_array)
elif pair_model in ['DATUM']:
pair_array = np.empty(
block_data_mask.shape, dtype=np.float32)
pair_array[block_data_mask] = et_common.air_pressure_func(
datum_flt)
pair_array[block_nodata_mask] = np.nan
# Vapor pressure (Ea)
if calc_dict['ea']:
ea_array = gdc.raster_to_array(
raster_dict['ea'], 1, block_extent, return_nodata=False)
ea_array = ea_array.astype(np.float32)
ea_array[block_nodata_mask] = np.nan
else:
ea_array = np.array([ea_flt])
# Precipitable water
w_array = et_common.precipitable_water_func(pair_array, ea_array)
del ea_array
# Transmittance can be pre-calculated for Model2 Rn calculation
if calc_dict['tau'] or save_dict['tau']:
if (not calc_dict['cos_theta'] and
os.path.isfile(raster_dict['cos_theta'])):
# read in cos_theta
cos_theta_array = gdc.raster_to_block(
raster_dict['cos_theta'], b_i, b_j, bs,
return_nodata=False)
tau_array = et_numpy.tau_broadband_func(
pair_array, w_array, cos_theta_array)
gdc.block_to_raster(
tau_array, raster_dict['tau'], b_i, b_j, bs)
del tau_array
# Read in LEDAPS at-surface reflectance
if calc_dict['refl_sur_ledaps']:
refl_sur_array = np.zeros(
(block_rows, block_cols, image.band_sur_cnt),
dtype=np.float32)
for band, band_i in sorted(image.band_sur_dict.items()):
refl_sur_array[:, :, band_i - 1] = gdc.raster_to_block(
raster_dict['refl_sur_ledaps'], b_i, b_j, bs, band_i,
return_nodata=False)
refl_sur_array[block_nodata_mask, :] = np.nan
# Calculate Tasumi at-surface reflectance
elif calc_dict['refl_sur_tasumi']:
refl_sur_array = et_numpy.refl_sur_tasumi_func(
refl_toa_array[:, :, image.band_toa_sur_mask],
pair_array, w_array, cos_theta_array, kt_flt,
image.c1, image.c2, image.c3, image.c4, image.c5,
image.cb, image.band_sur_cnt)
if save_dict['refl_sur_tasumi']:
for band, band_i in sorted(image.band_sur_dict.items()):
gdc.block_to_raster(
refl_sur_array[:, :, band_i - 1],
raster_dict['refl_sur_tasumi'],
b_i, b_j, bs, band_i)
# Cleanup
if calc_dict['refl_sur_tasumi'] or calc_dict['tau']:
del pair_array, w_array
if calc_dict['refl_toa']:
del refl_toa_array
if calc_dict['cos_theta']:
del cos_theta_array
# Calculate at surface albedo
if calc_dict['albedo_sur']:
albedo_sur_array = et_numpy.albedo_sur_func(
refl_sur_array, image.wb)
if save_dict['albedo_sur']:
gdc.block_to_raster(
albedo_sur_array, raster_dict['albedo_sur'], b_i, b_j, bs)
del albedo_sur_array
# Non METRIC Indices (using surface reflectance)
if calc_dict['ndvi_sur']:
ndvi_sur_array = et_numpy.ndi_func(
refl_sur_array[:, :, 4 - 1], refl_sur_array[:, :, 3 - 1])
if save_dict['ndvi_sur']:
gdc.block_to_raster(
ndvi_sur_array, raster_dict['ndvi_sur'], b_i, b_j, bs)
if calc_dict['ndwi_sur'] or save_dict['ndwi_sur']:
# This is the NDWI Rick Allen uses in the METRIC model,
# but it is identical to MNDWI below
ndwi_sur_array = et_numpy.ndi_func(
refl_sur_array[:, :, 5 - 1], refl_sur_array[:, :, 2 - 1])
gdc.block_to_raster(
ndwi_sur_array, raster_dict['ndwi_sur'], b_i, b_j, bs)
if calc_dict['savi_sur']:
savi_sur_array = et_numpy.ndi_func(
refl_sur_array[:, :, 4 - 1], refl_sur_array[:, :, 3 - 1],
savi_l_flt)
if save_dict['savi_sur']:
gdc.block_to_raster(
savi_sur_array, raster_dict['savi_sur'], b_i, b_j, bs)
if calc_dict['lai_sur']:
if lai_veg_index_type == 'SAVI':
lai_sur_array = et_numpy.savi_lai_func(savi_sur_array)
else:
lai_sur_array = et_numpy.ndvi_lai_func(ndvi_sur_array)
if save_dict['lai_sur']:
gdc.block_to_raster(
lai_sur_array, raster_dict['lai_sur'], b_i, b_j, bs)
if calc_dict['savi_sur']:
del savi_sur_array
# Narrowband emissivity
if calc_dict['em_nb']:
if em_refl_type == 'TOA' and em_water_index_type == 'NDVI':
em_nb_array = et_numpy.em_nb_func(
lai_toa_array, ndvi_toa_array)
elif em_refl_type == 'SUR' and em_water_index_type == 'NDVI':
em_nb_array = et_numpy.em_nb_func(
lai_sur_array, ndvi_sur_array)
elif em_refl_type == 'TOA' and em_water_index_type == 'NDWI':
em_nb_array = et_numpy.em_nb_func(
lai_toa_array, ndwi_toa_array)
elif em_refl_type == 'SUR' and em_water_index_type == 'NDWI':
em_nb_array = et_numpy.em_nb_func(
lai_sur_array, ndwi_sur_array)
if save_dict['em_nb']:
gdc.block_to_raster(
em_nb_array, raster_dict['em_nb'], b_i, b_j, bs)
# Broadband emissivity
if calc_dict['em_0']:
if em_refl_type == 'TOA' and em_water_index_type == 'NDVI':
em_0_array = et_numpy.em_0_func(lai_toa_array, ndvi_toa_array)
elif em_refl_type == 'SUR' and em_water_index_type == 'NDVI':
em_0_array = et_numpy.em_0_func(lai_sur_array, ndvi_sur_array)
# elif em_refl_type == 'TOA' and em_water_index_type == 'NDWI':
# em_0_array = em_0_func(lai_toa_array, ndwi_toa_array)
# elif em_refl_type == 'SUR' and em_water_index_type == 'NDWI':
# em_0_array = em_0_func(lai_array, ndwi_array)
if save_dict['em_0']:
gdc.block_to_raster(
em_0_array, raster_dict['em_0'], b_i, b_j, bs)
if calc_dict['em_0']:
del em_0_array
# Cleanup
if calc_dict['ndvi_sur']:
del ndvi_sur_array
if calc_dict['ndwi_sur']:
del ndwi_sur_array
if calc_dict['lai_sur']:
del lai_sur_array
if calc_dict['ndvi_toa']:
del ndvi_toa_array
if calc_dict['ndwi_toa']:
del ndwi_toa_array
if calc_dict['lai_toa']:
del lai_toa_array
# Corrected radiance
if calc_dict['ts_bt']:
ts_bt_array = gdc.raster_to_block(
raster_dict['ts_bt'], b_i, b_j, bs, return_nodata=False)
ts_bt_array[block_nodata_mask] = np.nan
if calc_dict['rc']:
thermal_rad_array = et_numpy.thermal_rad_func(
ts_bt_array, image.k1_dict[image.thermal_band],
image.k2_dict[image.thermal_band])
rc_array = et_numpy.rc_func(
thermal_rad_array, em_nb_array,
rp_flt, tnb_flt, rsky_flt)
del thermal_rad_array
if save_dict['rc']:
gdc.block_to_raster(rc_array, raster_dict['rc'], b_i, b_j, bs)
if calc_dict['ts_bt']:
del ts_bt_array
# Surface temperature
if calc_dict['ts']:
ts_array = et_numpy.ts_func(
em_nb_array, rc_array, image.k1_dict[image.thermal_band],
image.k2_dict[image.thermal_band])
if save_dict['ts']:
gdc.block_to_raster(ts_array, raster_dict['ts'], b_i, b_j, bs)
if calc_dict['rc']:
del rc_array
if calc_dict['em_nb']:
del em_nb_array
# Delapsed Surface temperature
# if calc_dict['ts_dem'] and calc_dict['ts']:
# ts_dem_array = et_numpy.ts_delapsed_func(
# ts_array, elev_array, datum_flt,
# lapse_elev_flt, lapse_flat_flt, lapse_mtn_flt)
# if calc_dict['ts_dem'] and not calc_dict['ts']:
# ts_array = gdc.raster_to_block(
# raster_dict['ts'], b_i, b_j, bs, return_nodata=False)
if calc_dict['ts_dem']:
ts_dem_array = et_numpy.ts_delapsed_func(
ts_array, elev_array, datum_flt,
lapse_elev_flt, lapse_flat_flt, lapse_mtn_flt)
if save_dict['ts_dem']:
gdc.block_to_raster(
ts_dem_array, raster_dict['ts_dem'], b_i, b_j, bs)
del ts_dem_array
if calc_dict['ts']:
del ts_array
# DEADBEEF - Brightness temp is provided by LEDAPS/ESPA
# Brightness temperature
# if calc_dict['ts_bt']:
# rc_bt_array = et_numpy.rc_func(
# thermal_rad_toa_array, 1., 0, 1., 1.)
# # em_nb is 1, but needs to be an array of
# ts_bt_array = et_numpy.ts_func(
# block_data_mask.astype(np.float32),
# rc_bt_array, image.k_dict)
# if save_dict['ts_bt']:
# gdc.block_to_raster(
# ts_bt_array, raster_dict['ts_bt'], b_i, b_j, bs)
# if calc_dict['ts_bt']:
# del ts_bt_array, rc_bt_array
# if calc_dict['rc'] or calc_dict['ts_bt']:
# del thermal_rad_toa_array
# Cleanup
if calc_dict['dem']:
del elev_array
del block_nodata_mask, block_data_mask, block_rows, block_cols
# Raster Pyramids
if pyramids_flag:
logging.info('\nBuild Pyramids')
for name, save_flag in sorted(save_dict.items()):
if save_flag:
logging.debug(' {}'.format(raster_dict[name]))
gdc.raster_pyramids(raster_dict[name])
# Raster Statistics
if stats_flag:
logging.info('\nCalculate Statistics')
for name, save_flag in sorted(save_dict.items()):
if save_flag:
logging.debug(' {}'.format(raster_dict[name]))
gdc.raster_statistics(raster_dict[name])
# Cleanup
if remove_refl_toa_flag and os.path.isdir(image.refl_toa_ws):
shutil.rmtree(image.refl_toa_ws)
if remove_refl_sur_flag and os.path.isdir(image.refl_sur_ws):
shutil.rmtree(image.refl_sur_ws)
if remove_ts_bt_flag and os.path.isfile(image.ts_bt_raster):
remove_file(image.ts_bt_raster)
del save_dict, calc_dict, image
return True
def arg_parse():
""""""
parser = argparse.ArgumentParser(
description='METRIC Model 1',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'workspace', nargs='?', default=os.getcwd(),
help='Landsat scene folder', metavar='FOLDER')
parser.add_argument(
'-i', '--ini', required=True,
help='METRIC input file', metavar='PATH')
parser.add_argument(
'--debug', default=logging.INFO, const=logging.DEBUG,
help='Debug level logging', action="store_const", dest="loglevel")
parser.add_argument(
'--delay', default=0, type=int, metavar='N',
help='Max random delay starting job in seconds')
parser.add_argument(
'--no_file_logging', default=False, action="store_true",
help='Turn off file logging')
parser.add_argument(
'-o', '--overwrite', default=None, action="store_true",
help='Force overwrite of existing files')
parser.add_argument(
'--stats', default=None, action="store_true",
help='Compute raster statistics')
args = parser.parse_args()
# Convert relative paths to absolute paths
if args.workspace and os.path.isdir(os.path.abspath(args.workspace)):
args.workspace = os.path.abspath(args.workspace)
if args.ini and os.path.isfile(os.path.abspath(args.ini)):
args.ini = os.path.abspath(args.ini)
return args
if __name__ == '__main__':
args = arg_parse()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
log_console = logging.StreamHandler()
log_console.setLevel(args.loglevel)
formatter = logging.Formatter('%(message)s')
log_console.setFormatter(formatter)
logger.addHandler(log_console)
if not args.no_file_logging:
log_file_name = 'metric_model1_log.txt'
log_file = logging.FileHandler(
os.path.join(args.workspace, log_file_name), "w")
log_file.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
log_file.setFormatter(formatter)
logger.addHandler(log_file)
logging.info('\n{}'.format('#' * 80))
log_fmt = '{:<20s} {}'
logging.info(log_fmt.format(
'Run Time Stamp:', dt.datetime.now().isoformat(' ')))
logging.info(log_fmt.format('Current Directory:', args.workspace))
logging.info(log_fmt.format('Script:', os.path.basename(sys.argv[0])))
logging.info('')
# Delay
sleep(random.uniform(0, max([0, args.delay])))
# METRIC Model 1
metric_model1(image_ws=args.workspace, ini_path=args.ini,
stats_flag=args.stats, overwrite_flag=args.overwrite)
| [
"[email protected]"
] | |
4506438a67a0403f0dbfc593c8410beb5c8794ac | 30c209d2e2ee59f5484197bb5ce8acd48f366a5b | /lib/icons.py | 6a6a4d54995f0b58c0c79df1e6dac2e404ce6940 | [] | no_license | anandu467/json_language_translator | 1907c697d0d93b7167a23f874caaec048a00e18d | 89478f69439ba5d5eb4f1a126dba8d6fa4173a2c | refs/heads/main | 2023-04-15T23:46:23.764526 | 2021-05-02T08:54:26 | 2021-05-02T08:54:26 | 361,663,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,951 | py | saveIcon=b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAByElEQVQ4jdXSz0/acBjH8f4netSQgIT667JdTZaZHXbQ0+LfMWMksuhEN4UpZZrMgy3REKNRYoiyDrpFajd0BEX6LfUHKCIS6V/w8aShtohczPYk79uTV57DQ1H/1dA+pY/2ya6X3ri4IGQuV4SktiIktWAsqc0Jaq66Fx5xp8uXbn4cYwhohuCVV0CKZBHiowjxUWzyMVxXNF09HgG0P3NSE21nlJFqMHGYxgzLYYbl4OMCRnBaAM0Q0P7Mif2L2mS80C+7aIbAwcjo/hiBfJYHt7GBxbV1rH/ndVihfIOO0QjuDnAw8mBNkGYILC4eAwsiAvEslnZVXdxOFu++xWH58ONx0D6dYu8XZo9gm5Rgm6jRpATHbPoetHsP5g1g21Ti591Co9k+JbafH+xlVbwNHJvWy6qNgcv7RcObPGzxT/FpYIefoFTRkLu8QipDTMsXiyiUK08DO78quK5ouCiVoZzmTSuUyri60RoD1dw5fu0mTDvOX+hA6+e/WwbQ6v4dqgbrpbvQLQUNoGVos986IcLh2cOapNRtVSJom9qDzS2ixRl5bQApiqJah8NvLK7w4Hgwul2vseXYVqsz/L7FGTbH/tm5Bdrf2B6CbEEjAAAAAElFTkSuQmCC'
browseIcon=b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAABjElEQVQ4jaXTT0vCcBzH8S907di9TrI/P2V/DDr0BII61jPoeXyrkQuj0qbzEgndhnQL+5U2cClUFBbNlj6KoGt+u6RUbpRz8Lr82N784MMAAAAcZyqBXBCRs98S6M7AWI/jTInIWyJyivAmbJyv/7v3dbOo2JCA1bqI3AgjIUcROQMAABE5WyvUKVtp0O5pPNlKg1at2ruM7jQs5u+clfILLR0HE1kuv9DC4d0JzNtBP13q0W9aoUOa5f+gFzqkl7oj7w7M20EfomLSVp2SZnOIZTySNmvEthuUtl8jol0KD1o+Jc1m6Jmy/0ByZDRGMF3qkbL/QMy8niyoFwOSN2skGy7JhksSXsQIFoORYTTLJ91+Jdlwxw8qufaPYQY06zleUM0/USp7O0Kz/NCgbv8R1AodUg/aI/RiEC+o7N0PR/hOzT+NH2QZL3SQAXnr6v/BtN2l5E4rdJCBVPYm5LsegZp7DP2X41AO2h8g4eURy3jEzOvJZDwS8cIGAAABq3MJ5MIkRONsFgDgEyX9px77ofENAAAAAElFTkSuQmCC'
icon =b'iVBORw0KGgoAAAANSUhEUgAAAIAAAACACAYAAADDPmHLAAAgAElEQVR4Xu19CZhcRbX/r+7S3bP1bFmY7BtJIAGyL4QAQdYYgiQkAVlUEBHUBz6RACqZEWRTFvH9RdEH/sGFF8RdHy4QECUsCYgRQiRhGyAhZCaT2Xq5S73v1L235/bte7tv98yECZn+Pj4y3VV1q+r86pzfOXWqLsPg56CeAXZQj35w8BgEwEEOgl4AgDPwA2z2xGjZgdbrfp3k4gDAOTu+CfLxgNnYyMx+7dlg4/tlBkIDYNV6Lj+0mhlOr1at55GhQGRfGtJ+6WkvHhKtAq9QwVLNSNxzCdN60dSHrmo4ADRyCY3MPP9+XhGtxEqD4RQYmMk4hoFB3W+z4ihvp9f0N/3b+X9QRzgMtRJyuhs/u28l+yzs8ey3fg/gBxUAgG3nGeOfepivkSU0yRFMYQwwNICb1twX9fEK0akc9D39nk/A+eq52pYVQEtijwpMumc12wdwNsgHrPXj/+GcNTaBka2/6Bf8TiWKy01dCF6YAQ4wxgewF+EFDYOpRCDrGlbcu5L98vgNXHl8CdOLAu+HsHAgABobuUTCv/Ahfkc0jitSHTA4B2Osn21+IXVeohA4hx6thKx14b7/PotdhPVchovTlNjsAV/NFwAO4SO1H4nhQS0BHRwy2ABe8YVFYUoKJEPDu6wdU++9iHUMmgFfE2DZxss28MpEC7YoKsbqafB+X/mFBdi7ElyYLa5EwbQ0lv/oLPbbVZzLD7Eez6Z3Dzgwa+dogMYNXGlcwvQLH+IXqOX4/1pC2Hw5Z3j9pKr7cxodM5Duwg/uPYtd4nVt+/PZA7XtXADYtv+ih/mDahlWp7tggEEZqAMosl/CDJg6mrUqHPbAKazrYDcD2QDgnBG3/8z3uarV40VFxWG6BpOhn4lfkVLsTXHOwZUImJHG0ntXsf892LWALwAuW88rEwxvyjLqTAPk7IULGPVGMvupLgf0WAXkVDfuvncl+9wgANwTb2uAc3/M49EYmpmEOP+QAQCAKSuQdB1vdJo4/KHVLHEwmwFfDSAAEEUzkz+UABDegKyAmQZOuncl+8vBrAXCA8DN+oM8AG9s3h0ndj8pTDg43zNIa3n3A7I0mY89ce0ZcBN6rApKqgt33XsWu3wQAJmYuUUCszQAhYBK5QBhXMUwsfw+5gecw5QjkIw0tldWYvp3lrLUwWoGAjVAxDEBvQFAWMGF3dUL216IcsIMyGCahhPuX8M2HKxaYGAAIKOBXKo9hBB7U8Tk0GOVULRO3H7vavalQQDQbLq8AKEBHC+ALG6QI1isCg/iBV4QhDEfQQjwyxvI4g2icWEG9DS2RfbiCCtR5ODbIg7WABGzmUlSnBumtQeYb7KLiRIUIpB9oQ3CgoczzmTA1PTjfnSO+uTBqAUCAaBGzGZJluKmngsANwHvjRoOU9ch72HKBpXxc0CELwiuxyolJd1p3nrfGnntIABcJoAAwGQpzn0A0Bth9FvdYk2RSGrhpqJKkqGZL40bKs1YdzyMpiawdeuKT3Tqt3EV0TCjFJ0iP8EaQD3AAFDkwHuKM84kE7ouLXzgHPZMyc0MgIqcc9q0MxkLn7GdHwCFOEBYWzsAJieYwnA9Ui4pioZv3XBc6/XvdNbJIytbM9nPA7jromt1dXV8504YI0aw7gx94pwytXkYjdA7AAyk2SkRjDQBaRMYW20aVx9rdkmshETXD3QeOOccnRKkHSbMjSZjD9VURDZTl9avXy+vXr06L5gDAaAoPSaA2V5Ajpm1GVqQ4XETuCDi6EfyvGXzORlZXqWrMYEHuyKnDFZbSOLf9IerLJ1wkRjD2uOBQ+tNpHQq4401u6NVbokXGoGXgvqNztu2K26deVSwb0t9jajWnn1nQjdkxn+a6Navra8vf3v9ei6vzpP7GAoAed3AfOgPsypDlDGtdC7xyXTYrkcrNvTHbw4p2YEBnWlgxTSDnzXNEP8uqt3QHejXgoRjUvtKVXkEKcN8LdlprK6piWzmnEtBvCA/AApxgGDDmi/hPPws2EKOKT2rmSo7mDFMIO0kdgcBIQTAqCqt+on1HFct9mSK70+ft9DMhOgLB+fc5EZlVUzRUsbubi2xpL6q6uUgEAQDQHZMgGEHggopdNt4kieSmXSvevSqtqCwoHXgwOAMJPymUxlqYj3NOmp82/sctz/Bhfru+biXuXeTIb+XRK1cdayBCXXcNgOFJDJwf+ec61WVUaWjO7U5Xh5dTBj3I4aBAJBlo1mS5bipOwDIPYXlnU4/MThTlG+BulW7e8EaHChTgG+eLqE6ljvZW3dz3PgohxzQuHfx54sQE4a60sCq6SZWTDMPVDOQNUkEgnhlVGnvTH2huir2X5xzmXmyoIMBIBnNTJHj3AWAwCNa7pnOp3KL+M3SABAa4JZlEmrLejQAcQKy0S+9x3HLY6QBCqzEQkiwuQWZgclDOK5cTGlQB/6HjEFZLMqSieTLr/47NnPOnNyDsfkBIMlxbvRogH6ZkoBlKc4f2gC49fQAAOziuDkMAEJ2nLpC2mTtcQbG1nKkjb6hMiEf3z/FaCLJU2RsXk1FZJPXNSwIANMwiFmKGGMWBwlYzW4r791C8nOAHFLndazob1rp0XwaYBfHrRuyNQDxA3dHxTMdemITyCxvwv6O6jnewNlHmjjjcBOdKUByH373oxfujvvZRL95yplMzzZ4EDVynhVW49IcmqZZVRmTuhPaxfGKyA8536AwtiTDdAMBIDGjmckeE1AkRmkFCYGU8HGbgG+fKaGuPNcEbNnJ8fU/hTABBZ5Pz4ooFgASOnDYUI4vkRkose8lDLc/q2jxyoi6rzP91Zqq6DcoXMxYz6HYfgEAzRtN5vg6BlXyRNZCuGXObJAGiMjA5xYxVPl4Aa+3cjywKY/PXuhZ9u/0HGrLIMAyQJaAa443MLqaQ6O1Qofl+lNE/dg251yrroqq7Z3ppuqqaOMmztU5rIcL5AeAlwME0X7XRIuVa1rk7ZvLJdTSynWib/040FKadrpN5O8/f22iLWEBjlT/OTNMLDvMFJ5BQZJZysP3Vx0bAG2d6abaYgDAoDczSYlzQxdxAD+P3s/8UTlaURYA5AMHAL8ysNcGQEIDpg3n+OJiQ4zlgP5wCA3Q1plsqq0qC68BvAAIOwlu2x0EgHzq1G++iylfSFW728/SAL800JYEFMoQMq3/f2WJgZFxLjaLCrUbdn72ezmhAWIlAMDUm5ncowHCdjwUAPLMZm+JV57ktRxSlwWAXxnCBCiSFXYm1X/eLBMfnWp7AwcqAggA8Zja1p5sqq0uRgPYADBtE1DwIiYbIQ4AKIL37RUyKqNhofPBlSPQfe7nlglwAEBm4MgGjiuOMQQ5LKQC3CbSay69I/Mzp0Heot+sFNU+51pNKQDgpt4skQbQdSvY7vWB84xKBFQkYOZIK5ZP21QOOSRAnDdHguq6ccAZ/PudwPoXStO31L5uAqdPZ8L78COev9zC8VYrF0TPueTQ6dfzb3OxscRsv58uwCLX8CsnGDikikMrrVsfHKozMYNSAWDYAHA0QAlDoVWUpdIpIGMCN3xUwrQGJgiWOwFDM4Brfmtg226LRAoheSOF7u/o37aLRsKvrwDuPFNGnFxG2/NwwEWr+/KHDbQnLXCKZu22CQQxNXvHkfrVnQbOn23itAPZDJSsAXQLAKapF+XEZSKB9j57BjckbAnoSAInT2W4ZJGUAQCVobAvhWGffoPj9g0mytXwACBhdaSA1TMY1szKbpcIHT330W0m7v4bR2XM8lLExwUuKudWw6QJEmlg5ggTly82oYcwAyWskf6vUhoAWuKmHrdMgKHlPxdQ5BBo8ssjwK3LZdRXZKtqEgAJ4mt/MPDv3RxlBIIQbhitdhEyXi5jWFWPZhEytus3PWJgy05YwPJlmrmWlTRQTAa+eqKBYRUINAPu8LfXNfb+5o3yhhieO3kp9Gz3hN25VhuPqa3tyab68CSwJW5q8WZJIQ7gAkAhphMULHCNklajCLTMkrDau1ptk/DC2xw3/skQQs2K7WfsWk9KF6lzUusrjmK4YJ6cpVUcE/PKexyNfzCEa+fdKxBNBoyL+kpm4JNzTJw0haOL9gYONG+Ac622ukQAWG5gsAbwzpujVUVOiGei3H9SpLCqjOHWM2TUlDm3Ttpa2TY4337cwOPbOSojPVrAy5SpTSJn1MbNyxXUlPu3dccGA3+ltkj92+zP3ZaXZji/CR6gAbNHcXzhGFOEhfO5maGX5/4sWCoAjLStAcKaAK+X4Ja4Z7aFFkgCK2cynD83e9U6K3RvF7D2N7pY3eSa5WgCmzN0pIHPHyvjxCnZqt9Z/a/u5vja7w1rV8/P1/IKw1OGAEOm6Gsn6RhSYXkaB5QSEAAoK94ECAD0AwdwNC4JiFTyTacrGFVrrXJHvToM/pk3OG75s4GySO6uIql+AtG8cQxrT7J8SlqdjlZy2rjxjwY2v8UF7wjDJ7x4INzQDuGn5hk4cTIX5ovIaiHbbcVDKEOv8HKnfssuT7twjXAlBFB7AwAyAcinAcKsKK/dtokZCZCibQvHM1x1YrYWoCoOIH7ynIn/ed4Urh15CvQhoZDqpzSxG5crGFqZveHkeBRPvc7xrb9YAKL2Slm5JJzuFDBvLMfnjzGsWEGIhkjw5REZMdUam18VGg6BPqkZ6E4bodoNJ3pXKc61uqI1wF0tcb2yqlmS1Dg30hzM8dbzxaAKxadyu07xpW6N44tLZBw7Kdt9o8lxVs9tjxp4cgeBgAkbTr9pBse1pyiYNdqH9VMoNwWs/ZWO3Z2AKodbidk97BkPCbAiwoQZoLyEQDNgV6GTGlFVxt+278YLza0oUxVKysmZANpiS2gGZo6uxTGThiGlEQhcasyPZDlEKzQSbAC0JZvqa8saN23iqjs1LBuYzuHQDAA8XoDfQ3v8jVyYezWEh22J6J0BVJcDt5yhoM7rFtqEsDvNQap8yzsc8TKgPQFcfIyMZdOzQePWHHc/aeCRl01URW3N4fTFr09Bfpw9XmEGNODTCzhOmFQ4YZRkSEA9794n8XzzXqEJ/FxPsQDSOmaPqccDnzqmfzQASgVABWkAJW6GJYGhEWkVdOac7CmZArLl15ysWDzNlYDh2PJ9CeDWP+t47k2O8+dJOH9+rtlwVP8Tr5q487Fs7hDCFPuOgPpJ+k+Yq3Ecly0ykTKCb86k6xQqY4pY/V9cv8k2Af736ltjZWLl37l6DhZNGobOpA65T33NEgGg2QDIuIHF2PsiwSBy8VLABfMlnDVTFgklxBEy9MHWBK3dwN92mFh+hJRD6B3O8NZejq/8Rhd5/WHIWt6uusZM7ZNLet0phjijIOKjPpVppVdGVTT97kU8tPlNVJepIFAEfSSJoT2RxurZ43HdsiPRmdI85xyKnMyc4lyrry5T97Qlm4YWYwK0cksDFIwE9gEwHG1AewFXnSRj/ngpEAR+0+FoifYkx7rfGnizlYvYfimsP2i6SSsldeCShRzHTvTPFKKpUCQJLV1JfOpHT6G1KwVFyv9yNWEGTY76iiju++TRqKuIQTfNkgirf997AQDKCMrnBWRk742qhGDJ7s46bJj4AO3ArVuqYPLwbHLncB8nezejHWxzQnVv+pOO596gFWjZ/SK7kXepOWZg0QSOy442BRgy7dv/ME2OeEzF+uffwNd/90+hCfzDztmPIi5AK3/dsqOwatZY7EtokB31FcSt/b73dTW4Vl9TggZIx6r6LQ4QqA6ZdUaP8gjXLVMwptZaPUGCdBRrSgO+/zcDj75iWsLvjxfa0TkF4XpyXHcyeSQ8AzI3v1Qlhise2oS/b39fcIF86t+ZB7L5HUkdiycNwx2r5yBtGJDQM3a/SKXDo7xz6V2LANeG1JSre9q6m4bWVoT3AtKxyv0OABoM5QlQgIc8g5vOUDGiJhgEjt3f+JqJ6/+gC7Uv9g9cLmRvLai7PgGRAPrZRSaOmcCzEkZppZepMl7Z1Y7P/Php6/06RTycNBut+h+ctwBThseFe5h95rGIxrKKkgYoV1uKBUAqVtksF+AAfWD+M111giu0+UJRu6XTZXxshizSwYPQ7lSmlbnlXRM/fdbA1l1cpKK7gdBX/XQOjhw3ieMSjxmglV5dFsF3NmzF9574N6rLI76rP6gvpAX2dadx6XFT8PklU7Evke4jb6BUAETzaIBiZ9Srw1wIdTwe8rPpM3uMhLPnSpg8zP99lPkeTXb/0a0mfv2iRQQp1EyJJfTJyQEohKqABUdgo82ndacZwitwuAYBOK2buPjHG7FjdyeiqpQVBhbcRWJCqJpuB3w885DUTEwcVoUfnrcQqmJtlff+0xsAhPECSughqUZKuKABJjVLOIc3SDhzhoQFEyzBu/cG6G8xF7Y76P3NW57a3LDNwCMvmXh9jzWLZB7ItRSXTfRiYh0z8LnFJo4eb5kBivJVxlQ8vu09XPmLTYgqclbkT9QxTIyuLceSKYfg/o07EFPkTFqaM4UiJqAbuG3lbBw/+RB0JDUBGPf6CYqt+YnB4olcG1Jbrr7f2t00vL4IDpAiDRAEgDwr2tsRd4edAA8xdtpgIcZ/+CESTpsu4ZhJPSveK2D33xSGde8Oeu2st+zTr5n4yysmXt5pCm5Bz6ScQNodJCA4YAir1BwzcMKhHJ9eaIoIIU1zRUTBut+9iF/9oznH9xfqPZHGx2aMweeOm4pzfvhXdGt6jo23ymn42IzRaFp2FLrSuWWKX29cq68tV/cUC4BkxMcE+K0ctwTcdNjuKQmdJo1UJxEoEiDF02ePlbBkioQZo4MFLwRkR+Ko3oPPGSCBfvJoGXPG+msKR1t43cXX9nBs3GHi2ddNNLdxke5FYCDSKYJO3BM3CHC/aDy0EVVfDlx3qiEyjCRJwvsdSVx4/9+FAL2+v4j5p3XccMYMrJg5Fpf+9Gls2LYLVbHsIJETE6gpi+C/Lzgaw6pi0HodEygZABW2BqCTQUURWiF6YrDEjGn3jAI85VFgwhAJR0+UsHCChOHxHuT4qXu3AF9618QDGw1B9MiuU81Tp8lYM5dOHlnt+JkFxxtwR1ZJ+2zdZeL5tzj++baJ5lYrqEMfIp+ZT57YBj0xbTB8/lgTc8dQn1Q8uOl13PTIv4RQKR6QUeti59LEsKoyYdtH1ZXj55vfRNPv/ym0hjdOQDyBVP+1px6Bc+aOQ3tCE9yh9E+pAFAr7KNhVkaQe8+HOiP+tleJn59KqrFCBcbUM7HaZ4+Vcegweh2BNRT36nYG5/3uvXaOX7xg4M8vGyIbhwREmUZUjnb7RtYyrJ4j48SpcqZdPyAEPY+0UnMrx0s7Tbz8rqUd/OTuVWwiFyEFnDiF46KFJtKGhMv/51k892ZLjlAdtX7WrLH46tIjkNJN7O1K4cL7N2Jvd26kkBYOqf5544bgrjVzRZSwN+KnOEDRJuC0u1ri5R4AFINA6vCZsxSx0sfUEfPtqU2TToB276l7Bd/WzfHISwb+sMXEnk4rmYPquEO71CaZFNIu00dIOGu2jLnjXOaEMnfolI9Px5123AvrsW0Gbv+zLrJ/CpFE6jtpkrpyjpuXS9j+/l5c8tNnfJ/lELtvriBiN1ysaNIS1/32Rfzmn7l8wa05vvvx+ZjeUOvLF8LLg2v1deVqSzEcQABAydYA4oFeu+hjJ508uitPVnH8FGvTxplQ94T7qWcS9l+2GvjzSyZ27rPi+eKsnijskApXPr8NJLLnJJSjRklYfpSMOeOkLI0gvA4fJDhAaO3iuPKhtDgaJo6zh/ASxN6AxvG102T87dWX8Z3Ht6PW4/uT8NO6gTF1FSLAQ4EiymOgcPEft76La3/5AmKR3FwBR2tcePREXPGRwwRoSt8hJABUFA+AmOJwgDwmwJaJWzY0MbQyG6oZ7lgTESvKfb7eb/Vt320K//3vOwy832EFcYigOckf7lXhg4NMKhkFkYjdk0t5yjQJCyfK1vPtj1f7OObiu4/r+M0/DJFrQGXc5s7BeM7/6TIJjWHxRA1Pb38Kr7zXhZjH93fY/wXzJ+LKkw4Xtp1yayjM35kycNEDT2FXexIUPnZjzokpWMBZiDLVynso7dNbAFBauDsmWchfsg+AdCY5Lj9RxanTZaEuSTBuDUA2/NnXDTzxbxNb3jGFTacVT6zcMQnFDljkLbliC2R+jp5ELqaMCUOzVQCZDnoWuYfXPqzlpowHPTyL8EiQkMT29/4Kk9s3SXjqESGkvf6544aI5A9Bjk0uzMCNj2zBg5veQLwskkUchbJlDElNx80fm4WTDmtAux0TKHZOBAcoSQPI2RqgmAeTIFIax5h6CXecHcmcA6RJ3/K2iWdeN7HpDUvNk1hEkMa28SUD3dVBB2jkgaR0CtIwHHYIw4KJEuaMkzMeCIFl7c81AYKwh1Cy54F6K+PdtufQmdwFiZG6sUZg5fqZmDw8ju+dOz8r6ZPCxgSAJ7fvxpd/vgmqJ3BE9Ul7kOpfOn0krj9jhnAj8723I1g+FgAoDtAQNhBEHCAml/cEghwLGpKOOi4cpXJ9dkkEY+uY8MH/+baBt/dalzBSiFalG0CdMG0hzVIEAp2mHNtP4VoRcTQ5asoZJg2TsGgS7dsDP3vGIn7OAdZQj7EfQBFAWYpgb9cOvNf+L8hMpcibaMKJ7X9m8WR8fskUa4vXle8n1Lxhio2j11s6EVWyQ8fUhkhCiSq457wFGFldLsq7va+8fc3YMa7V11eoe/Z0NzUMDRkJFACQyrPcwFAT4ykkgjj2uT8CQ1RhmcuYvPa9lPaLqeNoBQooEQBJeBGZCdXfmw+DhLTRheaWv4PnBHeB7549H9NG1Ni7e24+QvmNKu58dCvue2q77+YRmYuOlIYrT5yG8+ePt0BUdEygLwFQzCp1O88i791aZVkMO4v5FBCDN9hQotTcHkGGawQFfcKMV+xPMLzT9hy6UruFGZAYF2neM0bX4Ttr5vlmBFuZxjI2v9WK/1j/rO/Wr3VCmbKG63BXQDuFp6FEAESZZQJKTQp1s2bqZF/Y9sKDLVyi7/tFHDmCvd3b8X7HSwIAFKOg1Xr5CVNx4dGTAleuY/4ue/AZbN3ZJtxEP7ZP2oqARIAiIlkMF2CUFVxfobYUawIIANYlUX17OriwiD7AEmFWfE73iMjKSOmdeKftKXBuCi2nypIgfxOHVgky6Ke5KWmkOhbB957chruf2CbyCbwZRBaX0HD+ggn4ku1KFpcoYmmA0gDAbAC4bwhxT4A3PlxIdj6Bo0JV+vX3UvvjV48xvNv2LJLaHrFDeMzEYfjWylki8BN0koh4UCwiY9uudlz6s6ehE8nzFHZiAiNrynHPuQtQEVVslzEkIyc3cEgpAMBBqAFKRBsxf2EGul5FS9fLMLiCUTVlGFVbBrpru2Awn1M8Yp/IBfBT785u4vXLj8Jph48sMiZAGqBSbWnpbGoYWhUuJ5C8gIgZa2YKHQ3TrDQML+C8BC7ob3e9ILIVduKLIYKF1LlfW4XqOP30DUtLSOkdeKdto0jn1AwTmr2RE4b/UIJIkKYg95G8gY9MbcCNZ8wQZwmDlLJ3Ki0OUKnuaelsGlk0AGQbAMWwjrDC/FCWY3i3/Vkk0i2QmVLUUa9C6eNEDikk/L2PLxB7C7SrGM4jtDTAIAD6GXBkBmThDfwbe7q2CpPQl76PExn8jyVT8amjJxYREygRAKoRa5ZkVbiB3uvi3W6dmw+FpSVuWXi1aRCv9G7QePvg/O3n5oWt64cRv/H5WTKLByhI6m3Y2f5MyNWZ+0QrZzHXaNBqp1TxaQ01+K+z51kbs6FMVi8AwAZNQNF6g+z4G61Po637fcgSHXYNwwDsnXa6Y1mVEfEJC1NHSOiUIHLHWXMwd2x9yJxBGwB7OptGDi+CBKq6hwQ6PfASIe/33inzjt+7RIPKBy0953lhCGG+SKN39Xgilz7ufm4+hKcv9FoN3ZBRX7ULo+t2wzTDx5kJKFFZxj/ebsW23e1W1rBHE1gHSTWcPXscrjp5miCG7piAn/Z0SGBLSQAY1ABFaQBLTVv5CNct88dLUIMUAKJEEcoSuvbXL6DcJ1+QBCzyCytj+P65C8VeAtXLZ3pLBoBCGmAQAEUBIKMcOcfVp3IcOoyLQ6Rh2Lo4GiZZ+YCX/ORpvNeRgCpZGVXuj5MzeN3SI3H6kaMKZguVDgCNAHCQhYJLErdHQPblVavncNB/4r1DWUs0OHxKkUE6UHrTH/+Fnz//lu/dAtZBUg3HHTpcJIv0BI/8uQZlsVMcoGgToAwCoCQ4iJQ4DTh0OMc1pwYQQC+/sYvR3gAdKadEkat/9TwiPokiwtuhV+koEr579jyMr6/MgMDPFFgaoEpEAosigbIee0uSlGq/zaAg/pWPlwW5e36zXEzA0Ccol5PT53URg3irt/9B/LUg77WvvLt2Kcf4IfYt5CF8ZHoebZvT5tGlDz6Dt1q7fEFgxQTSuPTYKfj0ouDdRmtuyQuoEoGg0QW9AJF1z/iq23lZwux6VVKiI0091ad3BZe0rA6wSiREurz6nHkcK2f5mYHgATn5gnc+thUPPPua7w6hk2526PAq/L81lG6WL+RUNACsANay27uekCPli/V0t8HAwvszB5iw+qO7jjdwWAPH2lOLu6KOAkF0q9jmt1pw5cOboco09bnRBNqc0QwDt66YjfnjhuS5W6goAADHN3Ll8Uamn35b981KWdlaLdFFl6HYh6z7Y7o+nG066vyrH+UYW1/cW0idRJFP/2QjXnx7r2+iiGMGVswcgxuXz0S32CDyNajFmACgsZFLjY3MXHZb51GAtFkEoOj+wMKbmoUlWer+e+GWB1wJSoPvSgIfX8Bx5kwuTELQS669nacAEBHAja+9jy3v7kVM6TlDmJGxOHBr5RWePn0UFJnODvjFBIrUANSZDAi+1X2/WlZ2frq7SwMTOc8Ft7bzScKPVBWSXKFwtx9xC0bx1YAAAAffSURBVMG3ch7rFxh0xlsMKXUIp2WngcNHcFx1Cu9J9QqZRENMn0yBuGsgMJxsHcAltzD4YwGA3MDRDYVCwXYrAgDrwJd+E8Mhdz+rqOWj9XSnziANmoJCiHX9TsChuwy+toxjdAkvoybhBt0z7PZs8mcKlwAAMYZGLqGRmUtvaZsjqeW/l1V1mJ7spPsRJfuMr4VlcnydmLX76K/39Kf7N/ckBn3vtC1Gaq/zrLLOnbqe35y23XF0dx+9AnR+CyqT73t3W347eGQGUgznL+RYfhQXF1RkvYy6CDCVXrRUAABYtZ7LD61mxtKbW4+QIhX3KJHIAlM3YGhJUksGs3RTuO2u0kfQfzUZGOfoN63meANHjuK46lT2Ab13qBcAcPOB2Z/5vnrI1E9czExczGEeIatlsuw6eNl/Uuq/lukNZkY65Xv/RbF231HJXp5E7agywzWnpTGqlu4SoKMk+3PNEADiasue9qbRDdUFcgID5tohhfTz8Y0blHh83gzTxAzOQfd5qnTgw32nl/dvp9l831MZ/3vBrAMlQe07v7nLeO+JpLrOd/ZJJSYz8ESya2Kqq+08SuXOOfreF7izr8fpSJi4duVQLJlejq4kpXKVQlNL7ZAFgD172pvGlgoA69GcrVoPiUxCqV0ZgPWih1/4u3cgyfUwRfpuH0qGi6td2rt1rFjUgC+tnGJ70/txFsRo6HBoXN3T0msAZNgVa2wEexyQOho2M2D2fhxR3z1qwk6w9eugHXnpYw8q0YrVRqqzTwNeFMAj4X/kqCFo/DgJn5JFgs8I9N3IvC3ZGqDvANB/Xd2fLZMpe7xxiXHUpRvOldXoA4aeMERepx2sCtoYyrjxYtfE7rHLEaJv6MrXjoSBuZOqceMnpkCRLQIYJi+g7+dgEABBzEYCGs0jP/vHYYqsboOk1HBucHCfMxB+LfiwRFrldEVcV8rA5BEVuOWTUxAvV5AWr1/ue9GGa3EQAIHz1NjYKDU2NpqzLn/iYVmtXKGnunTGuOIOt2bfTWQve/v+QofxO4qA1EdCM9FQG8EtF0xGQ30MybSR80Ywb5ZyPo+jkCZyD85vO55uChVewKAJyMWBYwZmXbHxE2okcp+RThiMcZl2xcXuR463ZolKCND1O31LKzxt0M0fEm46bxIOHVmB7iS9r/ADW/r2gCkhJK62DgLATxE0CjMw+4q/NshM3sZkpYq8gQxV88btvUZfgICJ6B4RPLpsomnNeMyaGEdHoq/f/RNO4eeWGgRA3plzzMCCLz/9azlSudwyA3Z00F+nZnbEnKtwKF5vmiauPnM0jptei/Yu3Xrjh/DCfN5N7NXbhZSE10a467vr+vVXpIQNaoBAEDhmYNHVmy6S1LIf6KkugzHI+WJ12fKiG75NfOGUBiybXY/2hJF1MWap67bv6tkAoEDQyBIjgX3XmQHYUmOjhMZGc/5VT49SZPkVJikV3DQ4bXi5QZBjDey9qK6UiU8eOwTnLBqKzlTYA5v7cx4GAVB4tu0XZi7+you/V6LlS7VUp0iDy9rQ7HH5hV4XFzglDJw5twYXf2QYulPW5Q4ON/RqZu/3hSxDGM/T4aLestm5DVyrrY+rewc1QDAOnDS446578VI5WvldXQAAgXmQVjqWgRMPr8Tlpx0i7iIslLhSGIX9VsJKC9/T3jh+ZHVT/lfH9lsfBnjDdu7DCTe8PNbUja1MksvIDGRdjGHbA+ut5SbmTYjhyx8dLhieOJ7lhBAH2lA59OqauLJvX8eXxo2I3845VxhjdK2p+BTinwNtOP3XH9sMnPD1rX9UYhUn68lOg25/cogAyV8c3UqZOLxBxdWnDxUviKY7gD5wVz/PrHDOeSQaZYaufWzMIfFfb9iwQVmyZMkgALxz5piBE7+x9QtKLH6XnugQ3oDIiSWbLzEk0xyjayRcs2wIaisVofoHuvAVRWG6rrdDTU8ZP2zYLs4z1z0MaoAsEDhm4PpXJ8qy+RKT5Ci4uLWPkfBJ2LVlwDVL6zCqLoKEuEO7/xRSn7TMuR6vjiud7R0/HTOi+lzOucQYy0qXGOhD6JN5CN2IbQZOuXn7Y0qscgl5AzJjMl0vG5FNrD2lBlMaYuhKD0R3L3uUpPplWSaJ65qG+eNHVf5jEAAFkGAHhfSl33r9i3IsfruR2GeYjMn04oLLl1Rh9rgydKTMnvz+glE4+4H5cs6piN+OT1Db+XaH7OdwU+h5o6Y2rrS27vvqhFE13/AT/iAJ9ADCSX0787Ydk3XI/zIlWdXSOv/MMeVs8eRydCRN65V1HgbtFzX020vKFxvwCzp5seHnarrr0aqXANqBUqqrq9C2r/3u8SOqL1u/fr28atUqk2XHtgY5gK9CsM3A6Xe+80QK0YXnHJk2lx5RKZHrJw3IE5IitZmZHDIRvoqKCnR3d6c0w7xhwoiqG4j0iZXuI/xBDeCDAMcbOO225rUXnjTq5uVTgYTRt9mCoTlJyIK6DiQTSaSTqXZZVn6n6fodE8fUbKKNrnXr1pE5CNzaGCSBOWbAShK5+Tc7xqyZPeRCSZI47fTRIqL/7f+DHQVRQALu4Bw7mJZ6dty4oTuF6fBh/H4tDQKg4PweWAUsGiBUvjc73ncggwAIkC/Zzs2bnVNDdEh64GY/z56dOaHlS/TyQXgQAAfWAu/z3g4CoM+n9MBqcBAAB5a8+ry3/wcsJe68DYxwAwAAAABJRU5ErkJggg=='
red_x_base64=b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAACXBIWXMAAAsTAAALEwEAmpwYAAACV0lEQVQ4ja2UX0hTYRjGf2droxZtUDOVRNofQfxDhBeT4UQIBHFsplKXLrrroqtOF+1SN5JBRAhBhNObrEDmEaPwrgjJiwilVeS2nN0Eq6CbsC1ZF+dbHs88NaLn5vB97/s+33ne7/leif1hBYaAfuCALvYTWAYWgKJB/R6MAClgFLDtE7eJWErkGkIC4oAMmGo42CRy46K2CnEgXAORHkNArLIwi+8IcAiYFmsH0Ad8A77rCJxAACgAP4B3QBfQALwF9QJS7Mp01Lm922Oz95+e9PmzgE9D5nN1+zfHkveeHXO5i+JgRG0KsJjF360DaRHsO3fzdsuJzlOBzmD48Jf8Zk8hs/ESaGofCCZHb0w12xsa3Y2tbStrynwOyAJlYAdok4C7wGWNNKfHH3h1fupOPWABikr0SgYkKRxLeISi4tyli58+vFjpAj6LOhtwSwJmgIiuT76OwdBMaHzSUyEV+1b1ADmbfrx0AVjV1SWN7LH6+tFiRInKGUFmrYEMQPqj3yTJVMbAY0YwIvR1DIanQxOTLexKLgLWcCzhaR8IJtl7+xWUTahvU/vEnN6e3rnQ+PUKWUm5JueUqJzVkrq6/Q9RPVmBDdgxAwcBL/BGBALhicTpI8frXUBpMXo1l36yFClk3itft/K9rWf6HYD1aFPz+poyv4FqG1BfWQ7UZi9o5Nvr3N7tyOyD5x5/4CPVxs5rjG3XtC4lFAGquWVNoQN1dGkl/W6JiNk1ezIwrE/8L8NBi38dXzH+Yq1hah+wVTKNmC3AWdRemTV5lSGwLAhL+sJf07WqI9Q0faYAAAAASUVORK5CYII=' | [
"[email protected]"
] | |
c94b34d4d6623b867a7cc91d3672366334dd307f | 9cb6a655735b954eac4feeb006b174b8a5d759f4 | /test/test_sighash.py | 1e3dbf744d8ce0de53675357bfa42832889eef0d | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | Conio/pybitcointools | c68639439c5fc84694bb28213cc6d9c21198ad94 | 3025e73092789121ecf5aef4e6815be24f475735 | refs/heads/master | 2021-11-27T07:55:38.707876 | 2021-11-23T10:17:32 | 2021-11-23T10:17:32 | 53,937,108 | 20 | 13 | null | 2017-11-15T01:03:28 | 2016-03-15T10:55:22 | Python | UTF-8 | Python | false | false | 3,375 | py | import unittest
import bitcoin
from bitcoin import *
class TestTransaction(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("Attempting transaction creation")
def test3(self):
print(
deserialize_script('52210248905f94419795ea33cd42474e10bfaddc3ee5f0f0c66ecc29238fea6555f29c2103fde505b2f67b2c8ec17c7540bbc9aafb527366c0863d655d03a00e5f3c4bbbd121023f96141f1bec4df22465539ecd807762e2c96b75e436540d3e7654d461b62a1953ae')
)
def test2(self):
pub = '029b06d73294a2fe59dd5d2156f9d7bf1cadc8e741b39fff834d39a055ab8f5c97'
addr = 'bcrt1q8s2hkukgulyf575hakxazset8v2z5ltxnvepy8'
self.assertEqual(pubkey_to_bech32_address(pub, prefix='bcrt'), addr)
print(deserialize_script('00141976a9141d0f172a0ecb48aee1be1f2687d2963ae33f71a188ac'))
print(hash160(binascii.unhexlify('025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357')))
def test_multisig(self):
priv1 = sha256(b'sighash_priv_key_text')
priv2 = sha256(b'sighash_priv_key_text_2')
pub1 = compress(privtopub(priv1))
pub2 = compress(privtopub(priv2))
witness_program = mk_multisig_script([pub1, pub2], 2, 2)
addr = bech32_script_to_address(witness_program, prefix='bc')
print('addr', addr)
recipient = '3AbjFnwcChgaAGsPx28hnpDWF3yUobvTFT'
amount = 0.00028295
transaction_to_sign = mktx(
{
'output': '99911f6ddabc51290a45194f268d7e618284d7f42d79a2b57bee9bc5b11787c5:0',
'segregated': True
},
[
{'address': recipient, 'value': int(amount * 10**8) - 4500}
]
)
tx = bitcoin.deserialize(transaction_to_sign)
""" test big opreturn size"""
bigscript = [os.urandom(1024).hex() for _ in range(0, 1000)]
tx['outs'].append(
{
'value': 0,
'script': '00' + bitcoin.serialize_script(bigscript)
}
)
txs = bitcoin.serialize(tx)
tx = bitcoin.deserialize(txs)
s = bitcoin.deserialize_script(tx['outs'][-1]['script'])
self.assertEqual(s[0], None)
self.assertEqual(s[1:], bigscript)
sig1 = bech32_multisign(
transaction_to_sign, 0, priv1, int(amount * 10 ** 8),
witness_program, hashcode=SIGHASH_NONE|SIGHASH_ANYONECANPAY
)
sig2 = bech32_multisign(transaction_to_sign, 0, priv2, int(amount * 10 ** 8), witness_program)
tx = apply_bech32_multisignatures(transaction_to_sign, 0, witness_program, [sig1, sig2])
print(tx)
def test_hash_opreturn(self):
tx = '0100000000010122371ebb7a0432f0d506c35c8a78da70d29258dd50fc870426b3ced80839ebe50100000000fdffffff03983a00000000000017a9148380f47f331682e3683cc0628b04d3e1c918af8887464d00000000000017a914cc2008ff35eea6390b32dde0cf5998fd1016fcec8700000000000000005100160014636f6e696f5f66726f7a656e5f6f75747075747301010102040100000020e5eb3908d8ceb3260487fc50dd5892d270da788a5cc306d5f032047abb1e372202010008de8700000000000002020002000000000000'
txhash = bitcoin.segwit_txhash(tx)
print(txhash)
des_tx = bitcoin.deserialize(tx)
des_tx['outs'] = des_tx['outs'][:-1]
tx2 = bitcoin.serialize(des_tx)
txhash = bitcoin.segwit_txhash(tx2)
print(txhash)
| [
"[email protected]"
] | |
6717c5e8394bf8346701fd67ae56099633f0713c | e56214188faae8ebfb36a463e34fc8324935b3c2 | /test/test_hyperflex_hxdp_version_list.py | cc62ffaf9a3db32bf124709634b0401a115e5b90 | [
"Apache-2.0"
] | permissive | CiscoUcs/intersight-python | 866d6c63e0cb8c33440771efd93541d679bb1ecc | a92fccb1c8df4332ba1f05a0e784efbb4f2efdc4 | refs/heads/master | 2021-11-07T12:54:41.888973 | 2021-10-25T16:15:50 | 2021-10-25T16:15:50 | 115,440,875 | 25 | 18 | Apache-2.0 | 2020-03-02T16:19:49 | 2017-12-26T17:14:03 | Python | UTF-8 | Python | false | false | 1,965 | py | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.hyperflex_hxdp_version_list import HyperflexHxdpVersionList # noqa: E501
from intersight.rest import ApiException
class TestHyperflexHxdpVersionList(unittest.TestCase):
"""HyperflexHxdpVersionList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHyperflexHxdpVersionList(self):
"""Test HyperflexHxdpVersionList"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.hyperflex_hxdp_version_list.HyperflexHxdpVersionList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
4074ccc9524357ff4d85c3bd86584bd3ef35977c | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/1-Python-Basics/17-password-checker_20200413015442.py | c3dbd472448138f2e12538cd9a3c725139ae119c | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | # input ('testingpassword')
# input ('secret')
# print('{username}, your password {*******} is {6} letters long')
print('*') | [
"[email protected]"
] | |
09a20fc08bd4c36c320dd80bd12158ff8b3dd30e | 0549916a0d04943a0d944a2794e103aed2d1299c | /docs/conf.py | 1e9f35385b8904bab54fa8a27ed5d249e4bc4fcb | [
"MIT"
] | permissive | stephtdouglas/thejoker | 20d6eac36520477b0478ae84effa519fde625f2f | b1f2681cd72b6c04d19b24aadf818639c5f59ad0 | refs/heads/master | 2020-03-18T10:26:55.842576 | 2018-04-16T20:24:47 | 2018-04-16T20:24:47 | 134,612,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,806 | py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import datetime
import os
import sys
try:
import astropy_helpers
except ImportError:
# Building from inside the docs/ directory?
if os.path.basename(os.getcwd()) == 'docs':
a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers'))
if os.path.isdir(a_h_path):
sys.path.insert(1, a_h_path)
# Load all of the global Astropy configuration
from astropy_helpers.sphinx.conf import *
# Get configuration information from setup.cfg
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# see if we're running on travis
if 'CI' in os.environ:
ON_TRAVIS = True
else:
ON_TRAVIS = False
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
# check_sphinx_version("1.2.1")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
exclude_patterns.append('**.ipynb_checkpoints')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
# TODO: swap this once bugfix is in nbsphinx
# see: https://github.com/spatialaudio/nbsphinx/issues/38
# rst_epilog = ""
rst_epilog += """
.. |thejoker| replace:: *The Joker*
"""
# Add h5py to intersphinx mapping
intersphinx_mapping['h5py'] = ('http://docs.h5py.org/en/latest/', None)
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['package_name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
__import__(setup_cfg['package_name'])
package = sys.modules[setup_cfg['package_name']]
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
# Use astropy plot style
plot_rcparams = dict()
if not ON_TRAVIS:
plot_rcparams['text.usetex'] = True
plot_rcparams['savefig.facecolor'] = 'none'
plot_rcparams['savefig.bbox'] = 'tight'
plot_apply_rcparams = True
plot_formats = [('png', 512)]
# -- Options for HTML output --------------------------------------------------
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Please update these texts to match the name of your package.
html_theme_options = {
'logotext1': 'The', # white, semi-bold
'logotext2': 'Joker', # orange, light
'logotext3': ':docs' # white, light
}
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '_static'))
html_favicon = os.path.join(path, 'icon.ico')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# Static files to copy after template files
html_static_path = ['_static']
html_style = 'thejoker.css'
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# -- Options for the edit_on_github extension ---------------------------------
if eval(setup_cfg.get('edit_on_github')):
extensions += ['astropy_helpers.sphinx.ext.edit_on_github']
versionmod = __import__(setup_cfg['package_name'] + '.version')
edit_on_github_project = setup_cfg['github_project']
if versionmod.version.release:
edit_on_github_branch = "v" + versionmod.version.version
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
# -- Resolving issue number to links in changelog -----------------------------
github_issues_url = 'https://github.com/{0}/issues/'.format(setup_cfg['github_project'])
# -- Custom --
# add nbsphinx extension
extensions += ['nbsphinx']
extensions += ['IPython.sphinxext.ipython_console_highlighting']
# try:
# source_parsers['.ipynb'] = 'nbsphinx.NotebookParser'
# except NameError:
# source_parsers = {'.ipynb': 'nbsphinx.NotebookParser'}
| [
"[email protected]"
] | |
198f417c20f548b5837d62bb3ea3650d6729a7b7 | a66460a46611483dfbdc94c7996893f427e60d97 | /ansible/my_env/lib/python2.7/site-packages/ansible/modules/source_control/github_hooks.py | df0f0f2199b9d4b047c892050d52ba99640ae995 | [
"MIT"
] | permissive | otus-devops-2019-02/yyashkin_infra | 06b57807dde26f94f501828c07503d6bf1d70816 | 0cd0c003884155ac922e3e301305ac202de7028c | refs/heads/master | 2020-04-29T02:42:22.056724 | 2019-05-15T16:24:35 | 2019-05-15T16:24:35 | 175,780,718 | 0 | 0 | MIT | 2019-05-15T16:24:36 | 2019-03-15T08:37:35 | HCL | UTF-8 | Python | false | false | 5,874 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Phillip Gentry <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: github_hooks
short_description: Manages GitHub service hooks.
description:
- Adds service hooks and removes service hooks that have an error status.
version_added: "1.4"
options:
user:
description:
- Github username.
required: true
oauthkey:
description:
- The oauth key provided by GitHub. It can be found/generated on GitHub under "Edit Your Profile" >> "Developer settings" >> "Personal Access Tokens"
required: true
repo:
description:
- >
This is the API url for the repository you want to manage hooks for. It should be in the form of: https://api.github.com/repos/user:/repo:.
Note this is different than the normal repo url.
required: true
hookurl:
description:
- When creating a new hook, this is the url that you want GitHub to post to. It is only required when creating a new hook.
required: false
action:
description:
- This tells the githooks module what you want it to do.
required: true
choices: [ "create", "cleanall", "list", "clean504" ]
validate_certs:
description:
- If C(no), SSL certificates for the target repo will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
type: bool
content_type:
description:
- Content type to use for requests made to the webhook
required: false
default: 'json'
choices: ['json', 'form']
author: "Phillip Gentry, CX Inc (@pcgentry)"
'''
EXAMPLES = '''
# Example creating a new service hook. It ignores duplicates.
- github_hooks:
action: create
hookurl: http://11.111.111.111:2222
user: '{{ gituser }}'
oauthkey: '{{ oauthkey }}'
repo: https://api.github.com/repos/pcgentry/Github-Auto-Deploy
# Cleaning all hooks for this repo that had an error on the last update. Since this works for all hooks in a repo it is probably best that this would
# be called from a handler.
- github_hooks:
action: cleanall
user: '{{ gituser }}'
oauthkey: '{{ oauthkey }}'
repo: '{{ repo }}'
delegate_to: localhost
'''
import json
import base64
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_bytes
def request(module, url, user, oauthkey, data='', method='GET'):
auth = base64.b64encode(to_bytes('%s:%s' % (user, oauthkey)).replace('\n', ''))
headers = {
'Authorization': 'Basic %s' % auth,
}
response, info = fetch_url(module, url, headers=headers, data=data, method=method)
return response, info
def _list(module, oauthkey, repo, user):
url = "%s/hooks" % repo
response, info = request(module, url, user, oauthkey)
if info['status'] != 200:
return False, ''
else:
return False, response.read()
def _clean504(module, oauthkey, repo, user):
current_hooks = _list(module, oauthkey, repo, user)[1]
decoded = json.loads(current_hooks)
for hook in decoded:
if hook['last_response']['code'] == 504:
_delete(module, oauthkey, repo, user, hook['id'])
return 0, current_hooks
def _cleanall(module, oauthkey, repo, user):
current_hooks = _list(module, oauthkey, repo, user)[1]
decoded = json.loads(current_hooks)
for hook in decoded:
if hook['last_response']['code'] != 200:
_delete(module, oauthkey, repo, user, hook['id'])
return 0, current_hooks
def _create(module, hookurl, oauthkey, repo, user, content_type):
url = "%s/hooks" % repo
values = {
"active": True,
"name": "web",
"config": {
"url": "%s" % hookurl,
"content_type": "%s" % content_type
}
}
data = json.dumps(values)
response, info = request(module, url, user, oauthkey, data=data, method='POST')
if info['status'] != 200:
return 0, '[]'
else:
return 0, response.read()
def _delete(module, oauthkey, repo, user, hookid):
url = "%s/hooks/%s" % (repo, hookid)
response, info = request(module, url, user, oauthkey, method='DELETE')
return response.read()
def main():
module = AnsibleModule(
argument_spec=dict(
action=dict(required=True, choices=['list', 'clean504', 'cleanall', 'create']),
hookurl=dict(required=False),
oauthkey=dict(required=True, no_log=True),
repo=dict(required=True),
user=dict(required=True),
validate_certs=dict(default='yes', type='bool'),
content_type=dict(default='json', choices=['json', 'form']),
)
)
action = module.params['action']
hookurl = module.params['hookurl']
oauthkey = module.params['oauthkey']
repo = module.params['repo']
user = module.params['user']
content_type = module.params['content_type']
if action == "list":
(rc, out) = _list(module, oauthkey, repo, user)
if action == "clean504":
(rc, out) = _clean504(module, oauthkey, repo, user)
if action == "cleanall":
(rc, out) = _cleanall(module, oauthkey, repo, user)
if action == "create":
(rc, out) = _create(module, hookurl, oauthkey, repo, user, content_type)
if rc != 0:
module.fail_json(msg="failed", result=out)
module.exit_json(msg="success", result=out)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
7669acaa0139374e57a42a7a5e950e3fd981b1cf | 3ca6b34676a0adeaba85a2953a8c9abf5d6ef3e4 | /cap 5/pizza.py | 44790867aae04d53d68213989d73d8dcd4ef7e96 | [] | no_license | giusepper11/Curso-intensivo-Python | 34fb8e94c7c9afb09f54d8fc67136b337d0ef106 | 613cd502af3ff877dac0d62d9eb09b290d227838 | refs/heads/master | 2021-08-30T11:41:42.824065 | 2017-12-17T19:47:15 | 2017-12-17T19:47:15 | 114,535,941 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | available_toppings = ['mushrooms', 'olives', 'green pepper', 'pepperoni', 'pinapple', 'extra cheese']
requested_toppings = ['mushrooms', 'extra cheese', 'french fries']
# if 'mushrooms' in requested_toppings:
# print('Adding mushrooms')
# if 'pepperoni' in requested_toppings:
# print('Adding pepperoni')
# if 'extra cheese' in requested_toppings:
# print('Adding extra cheese')
#
for requested_topping in requested_toppings:
if requested_topping in available_toppings:
print('Adding {} as requested'.format(requested_topping.title()))
else:
print('Nao temos {}'.format(requested_topping.title()))
| [
"[email protected]"
] | |
a04ff5d4bae9109384d468f2375916651f0782c8 | fe1d3a2e3b51d1440a5c431c32afc334841dcdc6 | /view-point-server/tests/landmark_objects/perform_modeling.py | a351aa50fa3ffb0b0128eba0135d867485c7742d | [] | no_license | vyzuer/view_point | 5a9b2251880de93a6ac41058f7d05eac2f4d814d | 3ae071f8b5eca883f5d8790ad441d8ae419e9242 | refs/heads/master | 2020-06-15T23:35:26.027690 | 2016-12-01T05:35:33 | 2016-12-01T05:35:33 | 75,258,808 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | import sys
# add the package
sys.path.append('/home/vyzuer/Copy/Research/Project/code/view-point/view-point-python')
import landmark_object.classify_objects as cl_obj
import landmark_object.gmm_modeling as gmm_model
import landmark_object.geo_pixel_map as gpmap
def process(cluster_model_path, dump_path, model_type):
# preprocess
# cl_obj.process_dataset(cluster_model_path, dump_path)
# perform modeling
# model_type = "weather"
ext = "gmm_" + model_type
gmm_model.process_context(cluster_model_path, dump_path, ext, model_type=model_type)
gmm_model.process_human_object(cluster_model_path, dump_path, ext, model_type=model_type)
def process_geo_pixel_map(cluster_model_path, dump_path):
gpmap.process_lmo(cluster_model_path, dump_path, dump_map=True)
if __name__ == '__main__':
if len(sys.argv) != 4:
print "Usage : cluster_model dump_path gmm_type"
sys.exit(0)
cluster_model_path = sys.argv[1]
dump_path = sys.argv[2]
gmm_type = sys.argv[3]
# process(cluster_model_path, dump_path, gmm_type)
# dump the geo-pixel map for each landmark object
process_geo_pixel_map(cluster_model_path, dump_path)
| [
"[email protected]"
] | |
b216d0f072c2e1c156b59d7618b849f5928627d9 | 5d5f6ba3bdcb52b4750a5f28afa8a1a1019bfc9e | /django/extras/miniRegisterProject/miniRegisterProject/wsgi.py | 611c6dc7765e6df4a6c4145df88f0e835082e8e1 | [] | no_license | eDiazGtz/pythonLearning | 06e96f2f5a6e48ac314cb815cf9fbf65d0b7c2c8 | 57d7b2292cf5d9769cce9adf765962c3c0930d6c | refs/heads/master | 2023-06-18T02:16:09.293375 | 2021-05-03T18:09:52 | 2021-05-03T18:09:52 | 335,090,531 | 0 | 0 | null | 2021-05-03T18:09:53 | 2021-02-01T21:35:24 | Python | UTF-8 | Python | false | false | 415 | py | """
WSGI config for miniRegisterProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'miniRegisterProject.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
1aa5f30f75d756e2d60d09e99a08e0c7a06a8549 | cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc | /Python Books/Athena/training/exercises/exercises/software_craftsmanship/code_check/code_check_solution.py | 503dc3833b752d73aa4ecca182f07f0d07cda69b | [] | no_license | theGreenJedi/Path | df24fca355590efef0c6cb5c52e7216c6b5d2464 | b5ed2805dbb046480929e49e550bfd8af5bb4d6f | refs/heads/master | 2023-07-27T14:23:37.694546 | 2021-07-16T01:38:55 | 2021-07-16T01:38:55 | 87,686,563 | 8 | 2 | null | 2023-07-11T22:49:03 | 2017-04-09T05:57:30 | Jupyter Notebook | UTF-8 | Python | false | false | 1,073 | py | """
Code Check
----------
This code has an assortment of bugs, and its style doesn't
conform to PEP-8. Use pyflakes and pep8 to find and fix
the code.
You may have to install pep8 with the command:
$ easy_install pep8
It might take a few iterations before pyflakes doesn't
complain about something.
"""
from math import acos, sqrt
class Vector(object):
def __init__(self, x, y, z):
""" Constructor method.
"""
self.x = x
self.y = y
self.z = z
def dot(self, v):
d = self.x * v.x + self.y * v.y + self.z * v.z
return d
def abs(self):
m = sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2)
return m
def angle(self, v):
theta = acos(self.dot(v) / (self.abs() * v.abs()))
return theta
def __repr__(self):
s = "Vector(x=%s, y=%s, z=%s)" % (self.x, self.y, self.z)
return s
if __name__ == "__main__":
v1 = Vector(2.0, 13.0, -1.0)
print v1, " magnitude is", v1.abs()
v2 = Vector(1.0, 2.0, 3.0)
print "v1.angle(v2) =", v1.angle(v2)
| [
"[email protected]"
] | |
78294f6a8aef669474858e616f2609a6d163080a | bef7c41e7b51417f9cc5c3d30a7f94b59286e2b7 | /Algorithms/subsets.py | eb73b6b90fe89a9c354b65f360339b14b82bdd11 | [] | no_license | algometrix/LeetCode | 40dd6ea93c370cabe57ba672d820f261e0595cae | 3dc885ac2a93781c36fbe2735061da29194caba4 | refs/heads/master | 2021-07-01T19:45:08.018784 | 2021-06-03T05:38:13 | 2021-06-03T05:38:13 | 235,496,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py |
import pprint
def generateSubset(array):
temp = []
result = []
def search(k):
if k == len(array):
result.append([array[index] for index in temp])
else:
temp.append(k)
search(k+1)
temp.pop()
search(k+1)
search(0)
return result
if __name__ == "__main__":
array = [2,5,9]
result = generateSubset(array)
print('All possible permuations')
pprint.pprint(result) | [
"[email protected]"
] | |
453b1c254a73d6ca3f23e25571f460f0270bd009 | 1816378da612c7db376934b033e4fd64951338b6 | /gui/system/migrations/0053_auto__add_registration.py | 4a22f895de85bfa55c644ca8410ecc3045f62741 | [] | no_license | quater/freenas-9.2-xen | 46517a7a23546764347d3c91108c70a8bd648ec6 | 96e580055fa97575f0a0cb23a72495860467bcfb | refs/heads/master | 2021-01-16T22:21:38.781962 | 2014-02-07T05:59:13 | 2014-02-07T05:59:13 | 16,609,785 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,183 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Registration'
db.create_table(u'system_registration', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reg_firstname', self.gf('django.db.models.fields.CharField')(max_length=120)),
('reg_lastname', self.gf('django.db.models.fields.CharField')(max_length=120)),
('reg_company', self.gf('django.db.models.fields.CharField')(max_length=120, blank=True)),
('reg_address', self.gf('django.db.models.fields.CharField')(max_length=120)),
('reg_city', self.gf('django.db.models.fields.CharField')(max_length=120)),
('reg_state', self.gf('django.db.models.fields.CharField')(max_length=120)),
('reg_zip', self.gf('django.db.models.fields.CharField')(max_length=120)),
('reg_email', self.gf('django.db.models.fields.CharField')(max_length=120, blank=True)),
('reg_homephone', self.gf('django.db.models.fields.CharField')(max_length=120, blank=True)),
('reg_cellphone', self.gf('django.db.models.fields.CharField')(max_length=120, blank=True)),
('reg_workphone', self.gf('django.db.models.fields.CharField')(max_length=120, blank=True)),
))
db.send_create_signal(u'system', ['Registration'])
def backwards(self, orm):
# Deleting model 'Registration'
db.delete_table(u'system_registration')
models = {
u'storage.disk': {
'Meta': {'ordering': "['disk_name']", 'object_name': 'Disk'},
'disk_acousticlevel': ('django.db.models.fields.CharField', [], {'default': "'Disabled'", 'max_length': '120'}),
'disk_advpowermgmt': ('django.db.models.fields.CharField', [], {'default': "'Disabled'", 'max_length': '120'}),
'disk_description': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'disk_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'disk_hddstandby': ('django.db.models.fields.CharField', [], {'default': "'Always On'", 'max_length': '120'}),
'disk_identifier': ('django.db.models.fields.CharField', [], {'max_length': '42'}),
'disk_multipath_member': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'disk_multipath_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'disk_name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'disk_serial': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'disk_smartoptions': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'disk_togglesmart': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'disk_transfermode': ('django.db.models.fields.CharField', [], {'default': "'Auto'", 'max_length': '120'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'system.advanced': {
'Meta': {'object_name': 'Advanced'},
'adv_advancedmode': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'adv_anonstats': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'adv_anonstats_token': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'adv_autotune': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'adv_consolemenu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'adv_consolemsg': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'adv_consolescreensaver': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'adv_debugkernel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'adv_firmwarevc': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'adv_motd': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'adv_powerdaemon': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'adv_serialconsole': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'adv_serialspeed': ('django.db.models.fields.CharField', [], {'default': "'9600'", 'max_length': '120'}),
'adv_swapondrive': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'adv_systembeep': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'adv_traceback': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'adv_tuning': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'adv_zeroconfbonjour': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'system.alert': {
'Meta': {'object_name': 'Alert'},
'dismiss': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'system.cronjob': {
'Meta': {'ordering': "['cron_description', 'cron_user']", 'object_name': 'CronJob'},
'cron_command': ('django.db.models.fields.TextField', [], {}),
'cron_daymonth': ('django.db.models.fields.CharField', [], {'default': "'*'", 'max_length': '100'}),
'cron_dayweek': ('django.db.models.fields.CharField', [], {'default': "'1,2,3,4,5,6,7'", 'max_length': '100'}),
'cron_description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'cron_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cron_hour': ('django.db.models.fields.CharField', [], {'default': "'*'", 'max_length': '100'}),
'cron_minute': ('django.db.models.fields.CharField', [], {'default': "'00'", 'max_length': '100'}),
'cron_month': ('django.db.models.fields.CharField', [], {'default': "'1,2,3,4,5,6,7,8,9,a,b,c'", 'max_length': '100'}),
'cron_stderr': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cron_stdout': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cron_user': ('freenasUI.freeadmin.models.UserField', [], {'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'system.email': {
'Meta': {'object_name': 'Email'},
'em_fromemail': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '120'}),
'em_outgoingserver': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'em_pass': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'em_port': ('django.db.models.fields.IntegerField', [], {'default': '25'}),
'em_security': ('django.db.models.fields.CharField', [], {'default': "'plain'", 'max_length': '120'}),
'em_smtp': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'em_user': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'system.initshutdown': {
'Meta': {'object_name': 'InitShutdown'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ini_command': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'ini_script': ('freenasUI.freeadmin.models.PathField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ini_type': ('django.db.models.fields.CharField', [], {'default': "'command'", 'max_length': '15'}),
'ini_when': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
u'system.ntpserver': {
'Meta': {'ordering': "['ntp_address']", 'object_name': 'NTPServer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ntp_address': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ntp_burst': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ntp_iburst': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'ntp_maxpoll': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'ntp_minpoll': ('django.db.models.fields.IntegerField', [], {'default': '6'}),
'ntp_prefer': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'system.registration': {
'Meta': {'object_name': 'Registration'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reg_address': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'reg_cellphone': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'reg_city': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'reg_company': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'reg_email': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'reg_firstname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'reg_homephone': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'reg_lastname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'reg_state': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'reg_workphone': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'reg_zip': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
u'system.rsync': {
'Meta': {'ordering': "['rsync_path', 'rsync_desc']", 'object_name': 'Rsync'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rsync_archive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rsync_compress': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rsync_daymonth': ('django.db.models.fields.CharField', [], {'default': "'*'", 'max_length': '100'}),
'rsync_dayweek': ('django.db.models.fields.CharField', [], {'default': "'1,2,3,4,5,6,7'", 'max_length': '100'}),
'rsync_delete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rsync_desc': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'rsync_direction': ('django.db.models.fields.CharField', [], {'default': "'push'", 'max_length': '10'}),
'rsync_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rsync_extra': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rsync_hour': ('django.db.models.fields.CharField', [], {'default': "'*'", 'max_length': '100'}),
'rsync_minute': ('django.db.models.fields.CharField', [], {'default': "'00'", 'max_length': '100'}),
'rsync_mode': ('django.db.models.fields.CharField', [], {'default': "'module'", 'max_length': '20'}),
'rsync_month': ('django.db.models.fields.CharField', [], {'default': "'1,2,3,4,5,6,7,8,9,a,b,c'", 'max_length': '100'}),
'rsync_path': ('freenasUI.freeadmin.models.PathField', [], {'max_length': '255'}),
'rsync_preserveattr': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rsync_preserveperm': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rsync_quiet': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rsync_recursive': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rsync_remotehost': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'rsync_remotemodule': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'rsync_remotepath': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'rsync_remoteport': ('django.db.models.fields.SmallIntegerField', [], {'default': '22'}),
'rsync_times': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rsync_user': ('freenasUI.freeadmin.models.UserField', [], {'max_length': '60'})
},
u'system.settings': {
'Meta': {'object_name': 'Settings'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stg_directoryservice': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'stg_guiaddress': ('django.db.models.fields.CharField', [], {'default': "'0.0.0.0'", 'max_length': '120', 'blank': 'True'}),
'stg_guiport': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '120', 'blank': 'True'}),
'stg_guiprotocol': ('django.db.models.fields.CharField', [], {'default': "'http'", 'max_length': '120'}),
'stg_guiv6address': ('django.db.models.fields.CharField', [], {'default': "'::'", 'max_length': '120', 'blank': 'True'}),
'stg_kbdmap': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'stg_language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '120'}),
'stg_syslogserver': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '120', 'blank': 'True'}),
'stg_timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '120'})
},
u'system.smarttest': {
'Meta': {'ordering': "['smarttest_type']", 'object_name': 'SMARTTest'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'smarttest_daymonth': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'smarttest_dayweek': ('django.db.models.fields.CharField', [], {'default': "'1,2,3,4,5,6,7'", 'max_length': '100'}),
'smarttest_desc': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'smarttest_disks': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['storage.Disk']", 'symmetrical': 'False'}),
'smarttest_hour': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'smarttest_month': ('django.db.models.fields.CharField', [], {'default': "'1,2,3,4,5,6,7,8,9,10,a,b,c'", 'max_length': '100'}),
'smarttest_type': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'system.ssl': {
'Meta': {'object_name': 'SSL'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ssl_certfile': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ssl_city': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'ssl_common': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'ssl_country': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'ssl_email': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'ssl_org': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'ssl_passphrase': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'ssl_state': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'ssl_unit': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'})
},
u'system.sysctl': {
'Meta': {'ordering': "['sysctl_mib']", 'object_name': 'Sysctl'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sysctl_comment': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'sysctl_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sysctl_mib': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'sysctl_value': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'system.tunable': {
'Meta': {'ordering': "['tun_var']", 'object_name': 'Tunable'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tun_comment': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'tun_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'tun_value': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'tun_var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
}
}
complete_apps = ['system'] | [
"[email protected]"
] | |
1def5be0b51e055f3389540b66364e0974814105 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_159/624.py | cf0d31f0a91c8e0ded44e2339ac0e8cf0e446e08 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,099 | py | #codejam 4/17/15
import math as m
import time
#import codejam
import sys
sys.setrecursionlimit(100)#1100) #we need 1000 max
#filename = r'c:\g\1A\1-test.in.txt'
filename = r'c:\g\1A\A-large.in'
#filename = r'c:\g\A1\1-large.in'
foutname = r'c:\g\1A\1-out-large.txt'
#foutname = r'c:\g\1A\1-out-large.txt'
FILE = open(filename)
FOUT = open(foutname,"w")
T = int(FILE.readline())
def ceildiv(x, d):#like x//d but ceiling, for positives only
return (x + (d-1)) // d
def sol1(M, dbg): #first method, given samples in array M, which is of length 2 to 1000
S = M[0] #number at start
E = 0 #total eaten
pmj = M[0] #previous mj
for mj in M[1:]:
D = mj - pmj #delta
if D>0: #more were put on plate, none eaten
pass
elif D<0: #some were removed, must have been eaten
if dbg: print "D<0: D=",D,", ate",-D," so total eaten=",(E-D)
E -= D
else: #no change
pass
pmj = mj
return E
def sol2(M, dbg): #second method, eats at constant rate
#first find minimum eating rate - largest decline
changes = [b-a for a,b in zip(M[:-1],M[1:])]
R = abs(min(changes))
E = 0 #number eaten
if dbg: print "sol2 R=",R #minimum eating rate
P = M[0] #number on plate at start
pmj = M[0] #previous mj
for mj in M[1:]:
P2 = max(0,P - R) #she would eat down to this if none were added
#if dbg: print "See mj=",mj,"so ate",(P-P2)," P2=",P2
E += (P - P2)
#if mj > P2: #more were added, assumed an instant before time sample (for minimum)
# pass
#else: #some (or none) were removed
# pass #must have been eaten
P = mj
pmj = mj
return E
dbg=0
if dbg: print ""
if 1:
t0 = time.time()
sumz = 0
for i in range(1,T+1):
rawline = FILE.readline().split(' ')
D = int(rawline[0]) #number of samples at 10 second intervals
if len(rawline)>1: #trick to check known answers
manual_ans = [int(a) for a in rawline[-2:]]
else:
manual_ans = None
s = FILE.readline()
if s[-1]<'0': s=s[:-1]#strip newline
P = [int(ps) for ps in s.split(' ')]
if dbg: print "Case #" + str(i)+": D=",D," ["+(' '.join([str(xp) for xp in P]))+']',("manual_ans="+str(manual_ans) if manual_ans else "")
#if dbg and manual_ans: print "manual_ans = ",manual_ans
z1 = sol1(P, 0)
z2 = sol2(P, dbg)
if dbg: print " ==> ",z1,z2
sumz += z1
msg = 'Case #' + str(i) + ': ' + str(z1)+' '+str(z2)
if dbg:
if manual_ans: print msg+ (" 1 is OK!" if manual_ans[0]==z1 else "1 DIFF!") + (" 2 is OK!" if manual_ans[1]==z2 else "2 DIFF!")
else: print msg
if not dbg and i%10==1: print msg
FOUT.write(msg + "\n")
if manual_ans!=None:
if manual_ans[0]!=z1 or manual_ans[1]!=z2: print "...DIFFERENT! ",manual_ans," but we got: ",(z1,z2)
if dbg: print ""
print "finished",T,"cases,", round(time.time() - t0,3),"s, sumz:",sumz
FOUT.close()
FILE.close()
| [
"[email protected]"
] | |
2a06374a0a793b1371880df1dcb25fa45b93da2c | aea74a8c1d4ad17eb65b7c70da5342c01fd1a930 | /websites_mongo/scraper_military_shop.py | b0d8f9173ceff9468d92e846a3b1818f303f0b09 | [] | no_license | savusebastian/angular_project | 4e6d8b398e17ca91842d7579d8f4da8650e7a13a | 9c28c25e4b9875abf346f7e9a7e8baa34bc3f9ee | refs/heads/main | 2023-04-17T07:03:32.016850 | 2021-05-09T09:07:55 | 2021-05-09T09:07:55 | 365,710,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,161 | py | from bs4 import BeautifulSoup
from bson.objectid import ObjectId
from pymongo import MongoClient
import pymongo
import requests
def military_shop_DB():
cluster = MongoClient('mongodb://localhost:27017/vrem_reduceri_db')
db = cluster['vrem_reduceri_db']
collection = db['military_shop_products']
all_links = [
'https://www.military-shop.ro/sitemap_cat_85.xml',
'https://www.military-shop.ro/sitemap_cat_67.xml',
'https://www.military-shop.ro/sitemap_cat_2.xml',
'https://www.military-shop.ro/sitemap_cat_4.xml',
'https://www.military-shop.ro/sitemap_cat_101.xml',
'https://www.military-shop.ro/sitemap_cat_40.xml',
'https://www.military-shop.ro/sitemap_cat_119.xml',
'https://www.military-shop.ro/sitemap_cat_37.xml',
'https://www.military-shop.ro/sitemap_cat_39.xml',
'https://www.military-shop.ro/sitemap_cat_120.xml',
'https://www.military-shop.ro/sitemap_cat_147.xml',
'https://www.military-shop.ro/sitemap_cat_171.xml',
'https://www.military-shop.ro/sitemap_cat_44.xml',
'https://www.military-shop.ro/sitemap_cat_35.xml',
'https://www.military-shop.ro/sitemap_cat_148.xml',
'https://www.military-shop.ro/sitemap_cat_36.xml',
'https://www.military-shop.ro/sitemap_cat_141.xml',
'https://www.military-shop.ro/sitemap_cat_100.xml',
'https://www.military-shop.ro/sitemap_cat_41.xml',
'https://www.military-shop.ro/sitemap_cat_38.xml',
'https://www.military-shop.ro/sitemap_cat_42.xml',
'https://www.military-shop.ro/sitemap_cat_43.xml',
]
for text in all_links:
URL = text
shop = URL.split('/')[2].split('.')[1]
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
available_data = soup.find_all('loc')
links = [item.get_text() for item in available_data]
for link in links[60:]:
try:
web_page = requests.get(link)
web_soup = BeautifulSoup(web_page.content, 'html.parser')
schemaorg_data = web_soup.find_all(type='application/ld+json')[0].contents[0]
split_data = schemaorg_data.split('"')
data = {}
data['_id'] = ObjectId()
i = 0
for item in split_data:
if item == 'name' and data[i - 2] == 'Product':
data[item] = split_data[i + 2]
data['slug'] = split_data[i + 2].lower().replace('"', '').replace(',', '').replace('.', '-').replace(' ', '-')
if item == 'image' or item == 'sku' or item == 'priceCurrency':
data[item] = split_data[i + 2]
if item == 'price':
data[item] = split_data[i + 1][1:-1]
if item == 'brand':
data[item] = split_data[i + 8]
if item == 'availability':
data[item] = split_data[i + 2].split('/')[-1]
i += 1
data['url'] = link
data['shop'] = shop
print(len(data))
if len(data) > 5:
result = collection.find_one({'name': data['name']})
if result == None:
# print('Insert', link)
collection.insert_one(data)
else:
# print('Update', link)
data['_id'] = result['_id']
collection.replace_one({'name': data['name']}, data)
except:
print(link)
# for item in data:
# print(item, ':', data[item])
print('military_shop_DB')
if __name__ == '__main__':
military_shop_DB()
| [
"[email protected]"
] | |
9f7c7cef1726910db585906e0c6bc38c69a04522 | dff47f2ac7671e074fec2f6f1590c8c994e1be27 | /1_map/PyGreentea/PyGreentea.py | efb564f538f3dcdd0a4e9deb6eb3b856a3c7b5d3 | [
"BSD-2-Clause"
] | permissive | VCG/parallel_unet | a4976bd23569389810b4075793c0394bdd976ed8 | 87f39a6f93992aaacb2796d134b57fdd7273b164 | refs/heads/master | 2021-01-17T23:07:53.103134 | 2017-06-09T19:42:04 | 2017-06-09T19:42:04 | 84,212,594 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 40,186 | py | import os, sys, inspect, gc
import h5py
import numpy as np
from scipy import io
import math
import threading
import png
from Crypto.Random.random import randint
import numpy.random
import pdb
# Determine where PyGreentea is
pygtpath = os.path.normpath(os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])))
# Determine where PyGreentea gets called from
cmdpath = os.getcwd()
sys.path.append(pygtpath)
sys.path.append(cmdpath)
from numpy import float32, int32, uint8
# Load the configuration file
import config
# Load the setup module
import setup
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# Direct call to PyGreentea, set up everything
if __name__ == "__main__":
if (pygtpath != cmdpath):
os.chdir(pygtpath)
if (os.geteuid() != 0):
print(bcolors.WARNING + "PyGreentea setup should probably be executed with root privileges!" + bcolors.ENDC)
if config.install_packages:
print(bcolors.HEADER + ("==== PYGT: Installing OS packages ====").ljust(80,"=") + bcolors.ENDC)
setup.install_dependencies()
print(bcolors.HEADER + ("==== PYGT: Updating Caffe/Greentea repository ====").ljust(80,"=") + bcolors.ENDC)
setup.clone_caffe(config.caffe_path, config.clone_caffe, config.update_caffe)
print(bcolors.HEADER + ("==== PYGT: Updating Malis repository ====").ljust(80,"=") + bcolors.ENDC)
setup.clone_malis(config.malis_path, config.clone_malis, config.update_malis)
if config.compile_caffe:
print(bcolors.HEADER + ("==== PYGT: Compiling Caffe/Greentea ====").ljust(80,"=") + bcolors.ENDC)
setup.compile_caffe(config.caffe_path)
if config.compile_malis:
print(bcolors.HEADER + ("==== PYGT: Compiling Malis ====").ljust(80,"=") + bcolors.ENDC)
setup.compile_malis(config.malis_path)
if (pygtpath != cmdpath):
os.chdir(cmdpath)
print(bcolors.OKGREEN + ("==== PYGT: Setup finished ====").ljust(80,"=") + bcolors.ENDC)
sys.exit(0)
#pdb.set_trace()
setup.setup_paths(config.caffe_path, config.malis_path)
setup.set_environment_vars()
# Import Caffe
import caffe as caffe
# Import the network generator
import network_generator as netgen
# Import Malis
import malis as malis
# Wrapper around a networks set_input_arrays to prevent memory leaks of locked up arrays
class NetInputWrapper:
def __init__(self, net, shapes):
self.net = net
self.shapes = shapes
self.dummy_slice = np.ascontiguousarray([0]).astype(float32)
self.inputs = []
for i in range(0,len(shapes)):
# Pre-allocate arrays that will persist with the network
self.inputs += [np.zeros(tuple(self.shapes[i]), dtype=float32)]
def setInputs(self, data):
#pdb.set_trace()
for i in range(0,len(self.shapes)):
np.copyto(self.inputs[i], np.ascontiguousarray(data[i]).astype(float32))
self.net.set_input_arrays(i, self.inputs[i], self.dummy_slice)
# Transfer network weights from one network to another
def net_weight_transfer(dst_net, src_net):
print('===>transfering weights...')
# Go through all source layers/weights
for layer_key in src_net.params:
# Test existence of the weights in destination network
if (layer_key in dst_net.params):
print('---', layer_key)
# Copy weights + bias
for i in range(0, min(len(dst_net.params[layer_key]), len(src_net.params[layer_key]))):
np.copyto(dst_net.params[layer_key][i].data, src_net.params[layer_key][i].data)
class ClassWeight:
def __init__(self, aug_datalbl, recent_iter):
self.pred_thd = 0.0
self.alpha = 2
nz0idx = np.where(aug_datalbl[0]['label'] == 0)
self.const_wt0 = aug_datalbl[0]['label'].size*1.0/len(nz0idx[2])
nz1idx = np.where(aug_datalbl[0]['label'] == 1)
self.const_wt1 = aug_datalbl[0]['label'].size*1.0/len(nz1idx[2])
self.nclass = np.unique(aug_datalbl[0]['label']).size
self.class_ind = []
for i in range(0,(len(aug_datalbl))):
self.class_ind.append([])
actual_labels = aug_datalbl[i]['label']
indmat = []
for cc in range(0,self.nclass):
indmat.append([])
indmat[-1] = (actual_labels == cc).astype('uint8')
self.class_ind[-1] = indmat
#pdb.set_trace()
self.class_weights = []
weight_filename = 'weights_itr'+str(recent_iter)+'.h5'
if os.path.exists(weight_filename):
fp = h5py.File(weight_filename)
ndsets = fp.keys()
for i in range(len(ndsets)):
dataset_name = 'stack'+str(i)
self.class_weights.append([])
self.class_weights[i] = np.array(fp[dataset_name]).astype(np.float32)
fp.close()
else:
for i in range(0,(len(aug_datalbl))):
self.class_weights.append([])
self.class_weights[i] = (self.const_wt0 * self.class_ind[i][0]) + (self.const_wt1 * self.class_ind[i][1])
self.class_weights[i] = self.class_weights[i].astype(np.float32)
## # toufiq debug
#pdb.set_trace()
##for i in range(0,(len(aug_datalbl))):
#savename = 'tst-weights520.h5'
#fp = h5py.File(savename,'w')
#fp.create_dataset('stack1',data=self.class_weights[1])
#fp.create_dataset('stack5',data=self.class_weights[5])
#fp.create_dataset('stack10',data=self.class_weights[10])
#fp.create_dataset('stack15',data=self.class_weights[15])
#fp.close()
#pdb.set_trace()
def recompute_weight(self, trn_pred_array, trn_itr):
#pdb.set_trace()
for i in range(0,(len(trn_pred_array))):
pred0_diff = (trn_pred_array[i] - self.pred_thd)
wt0 = self.class_weights[i] + (self.alpha * pred0_diff)
wt0_clipped = np.clip(wt0, self.const_wt0, 50*self.const_wt0 ) # membrane weight cannot be less than cyto weights
self.class_weights[i] = (wt0_clipped * self.class_ind[i][0] ) + ( self.const_wt1 * self.class_ind[i][1] )
## # toufiq debug
#savename = 'weights_itr'+str(trn_itr)+'.h5'
#fp = h5py.File(savename,'w')
#for i in range(len(self.class_weights)):
#dataset_name = 'stack'+str(i)
#fp.create_dataset(dataset_name,data=self.class_weights[i],compression='gzip',compression_opts=9)
#fp.close()
def normalize(dataset, newmin=-1, newmax=1):
maxval = dataset
while len(maxval.shape) > 0:
maxval = maxval.max(0)
minval = dataset
while len(minval.shape) > 0:
minval = minval.min(0)
return ((dataset - minval) / (maxval - minval)) * (newmax - newmin) + newmin
def getSolverStates(prefix):
files = [f for f in os.listdir('.') if os.path.isfile(f)]
print files
solverstates = []
for file in files:
if(prefix+'_iter_' in file and '.solverstate' in file):
solverstates += [(int(file[len(prefix+'_iter_'):-len('.solverstate')]),file)]
return sorted(solverstates)
def getCaffeModels(prefix):
files = [f for f in os.listdir('.') if os.path.isfile(f)]
print files
caffemodels = []
for file in files:
if(prefix+'_iter_' in file and '.caffemodel' in file):
caffemodels += [(int(file[len(prefix+'_iter_'):-len('.caffemodel')]),file)]
return sorted(caffemodels)
def error_scale(data, factor_low, factor_high):
scale = np.add((data >= 0.5) * factor_high, (data < 0.5) * factor_low)
return scale
def error_scale_overall(data, weight_vec):
#pdb.set_trace()
scale = np.zeros(data.shape)
nclass = weight_vec.shape[0]
for cc in range(nclass):
binary_indicator = np.array(data == cc)
scale += ((1.0/weight_vec[cc]) * binary_indicator)
return scale
def class_balance_distribution(label_array):
#pdb.set_trace()
nclass = np.unique(label_array).shape[0]
weight_vec = []
for cc in range(nclass):
binary_indicator = np.array(label_array == cc)
frac_cc = np.clip(binary_indicator.mean(),0.05,0.95) #for binary labels
weight_vec.append(frac_cc)
return(np.array(weight_vec))
def count_affinity(dataset):
aff_high = np.sum(dataset >= 0.5)
aff_low = np.sum(dataset < 0.5)
return aff_high, aff_low
def border_reflect(dataset, border):
return np.pad(dataset,((border, border)),'reflect')
def augment_data_simple(dataset,trn_method='affinity'):
nset = len(dataset)
for iset in range(nset):
for reflectz in range(2):
for reflecty in range(2):
for reflectx in range(2):
for swapxy in range(2):
if reflectz==0 and reflecty==0 and reflectx==0 and swapxy==0:
continue
dataset.append({})
if trn_method == 'affinity':
dataset[-1]['name'] = dataset[iset]['name']
dataset[-1]['nhood'] = dataset[iset]['nhood']
dataset[-1]['data'] = dataset[iset]['data'][:]
dataset[-1]['components'] = dataset[iset]['components'][:]
if reflectz:
dataset[-1]['data'] = dataset[-1]['data'][::-1,:,:]
dataset[-1]['components'] = dataset[-1]['components'][::-1,:,:]
if reflecty:
dataset[-1]['data'] = dataset[-1]['data'][:,::-1,:]
dataset[-1]['components'] = dataset[-1]['components'][:,::-1,:]
if reflectx:
dataset[-1]['data'] = dataset[-1]['data'][:,:,::-1]
dataset[-1]['components'] = dataset[-1]['components'][:,:,::-1]
if swapxy:
dataset[-1]['data'] = dataset[-1]['data'].transpose((0,2,1))
dataset[-1]['components'] = dataset[-1]['components'].transpose((0,2,1))
dataset[-1]['label'] = malis.seg_to_affgraph(dataset[-1]['components'],dataset[-1]['nhood'])
elif trn_method == 'pixel':
dataset[-1]['name'] = dataset[iset]['name']
dataset[-1]['nhood'] = dataset[iset]['nhood']
dataset[-1]['data'] = dataset[iset]['data'][:]
dataset[-1]['label'] = dataset[iset]['label'][:]
#dataset[-1]['components'] = dataset[iset]['components'][:]
if reflectz:
dataset[-1]['data'] = dataset[-1]['data'][::-1,:,:]
if len(dataset[-1]['label'].shape)==3:
dataset[-1]['label'] = dataset[-1]['label'][::-1,:,:]
elif len(dataset[-1]['label'].shape)==4:
dataset[-1]['label'] = dataset[-1]['label'][:,::-1,:,:]
if reflecty:
dataset[-1]['data'] = dataset[-1]['data'][:,::-1,:]
if len(dataset[-1]['label'].shape)==3:
dataset[-1]['label'] = dataset[-1]['label'][:,::-1,:]
elif len(dataset[-1]['label'].shape)==4:
dataset[-1]['label'] = dataset[-1]['label'][:,:,::-1,:]
if reflectx:
dataset[-1]['data'] = dataset[-1]['data'][:,:,::-1]
if len(dataset[-1]['label'].shape)==3:
dataset[-1]['label'] = dataset[-1]['label'][:,:,::-1]
elif len(dataset[-1]['label'].shape)==4:
dataset[-1]['label'] = dataset[-1]['label'][:,:,:,::-1]
if swapxy:
dataset[-1]['data'] = dataset[-1]['data'].transpose((0,2,1))
if len(dataset[-1]['label'].shape)==3:
dataset[-1]['label'] = dataset[-1]['label'].transpose((0,2,1))
elif len(dataset[-1]['label'].shape)==4:
dataset[-1]['label'] = dataset[-1]['label'].transpose((0,1,3,2))
#dataset[-1]['label'] = malis.seg_to_affgraph(dataset[-1]['components'],dataset[-1]['nhood'])
####dataset[-1]['transform'] = dataset[iset]['transform']
dataset[-1]['reflectz']=reflectz
dataset[-1]['reflecty']=reflecty
dataset[-1]['reflectx']=reflectx
dataset[-1]['swapxy']=swapxy
#pdb.set_trace()
return dataset
def augment_data_elastic(dataset,ncopy_per_dset):
dsetout = []
nset = len(dataset)
for iset in range(nset):
for icopy in range(ncopy_per_dset):
reflectz = np.random.rand()>.5
reflecty = np.random.rand()>.5
reflectx = np.random.rand()>.5
swapxy = np.random.rand()>.5
dataset.append({})
dataset[-1]['reflectz']=reflectz
dataset[-1]['reflecty']=reflecty
dataset[-1]['reflectx']=reflectx
dataset[-1]['swapxy']=swapxy
dataset[-1]['name'] = dataset[iset]['name']
dataset[-1]['nhood'] = dataset[iset]['nhood']
dataset[-1]['data'] = dataset[iset]['data'][:]
dataset[-1]['components'] = dataset[iset]['components'][:]
if reflectz:
dataset[-1]['data'] = dataset[-1]['data'][::-1,:,:]
dataset[-1]['components'] = dataset[-1]['components'][::-1,:,:]
if reflecty:
dataset[-1]['data'] = dataset[-1]['data'][:,::-1,:]
dataset[-1]['components'] = dataset[-1]['components'][:,::-1,:]
if reflectx:
dataset[-1]['data'] = dataset[-1]['data'][:,:,::-1]
dataset[-1]['components'] = dataset[-1]['components'][:,:,::-1]
if swapxy:
dataset[-1]['data'] = dataset[-1]['data'].transpose((0,2,1))
dataset[-1]['components'] = dataset[-1]['components'].transpose((0,2,1))
# elastic deformations
dataset[-1]['label'] = malis.seg_to_affgraph(dataset[-1]['components'],dataset[-1]['nhood'])
return dataset
def slice_data(data, offsets, sizes):
if (len(offsets) == 1):
return data[offsets[0]:offsets[0] + sizes[0]]
if (len(offsets) == 2):
return data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1]]
if (len(offsets) == 3):
return data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2]]
if (len(offsets) == 4):
d = data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2], offsets[3]:offsets[3] + sizes[3]]
#print ('data:', d.shape)
return d
def set_slice_data(data, insert_data, offsets, sizes):
if (len(offsets) == 1):
data[offsets[0]:offsets[0] + sizes[0]] = insert_data
if (len(offsets) == 2):
data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1]] = insert_data
if (len(offsets) == 3):
data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2]] = insert_data
if (len(offsets) == 4):
data[offsets[0]:offsets[0] + sizes[0], offsets[1]:offsets[1] + sizes[1], offsets[2]:offsets[2] + sizes[2], offsets[3]:offsets[3] + sizes[3]] = insert_data
def sanity_check_net_blobs(net):
for key in net.blobs.keys():
dst = net.blobs[key]
data = np.ndarray.flatten(dst.data[0].copy())
print 'Blob: %s; %s' % (key, data.shape)
failure = False
first = -1
for i in range(0,data.shape[0]):
if abs(data[i]) > 1000:
failure = True
if first == -1:
first = i
print 'Failure, location %d; objective %d' % (i, data[i])
print 'Failure: %s, first at %d, mean %3.5f' % (failure,first,np.mean(data))
if failure:
break
def dump_feature_maps(net, folder):
for key in net.blobs.keys():
dst = net.blobs[key]
norm = normalize(dst.data[0], 0, 255)
# print(norm.shape)
for f in range(0,norm.shape[0]):
outfile = open(folder+'/'+key+'_'+str(f)+'.png', 'wb')
writer = png.Writer(norm.shape[2], norm.shape[1], greyscale=True)
# print(np.uint8(norm[f,:]).shape)
writer.write(outfile, np.uint8(norm[f,:]))
outfile.close()
def get_net_input_specs(net, test_blobs = ['data', 'label', 'scale', 'label_affinity', 'affinty_edges']):
shapes = []
# The order of the inputs is strict in our network types
for blob in test_blobs:
if (blob in net.blobs):
shapes += [[blob, np.shape(net.blobs[blob].data)]]
return shapes
def get_spatial_io_dims(net):
out_primary = 'label'
if ('prob' in net.blobs):
out_primary = 'prob'
shapes = get_net_input_specs(net, test_blobs=['data', out_primary])
dims = len(shapes[0][1]) - 2
print(dims)
input_dims = list(shapes[0][1])[2:2+dims]
output_dims = list(shapes[1][1])[2:2+dims]
#padding = [input_dims[i]-output_dims[i] for i in range(0,dims)]
# felix addition
if len(input_dims) == 3 and len(input_dims) > len(output_dims):
offsets = output_dims + [output_dims[-1]]
padding = [input_dims[i]-offsets[i] for i in range(0,dims)]
else:
padding = [input_dims[i]-output_dims[i] for i in range(0,dims)]
return input_dims, output_dims, padding
def get_fmap_io_dims(net):
out_primary = 'label'
if ('prob' in net.blobs):
out_primary = 'prob'
shapes = get_net_input_specs(net, test_blobs=['data', out_primary])
input_fmaps = list(shapes[0][1])[1]
output_fmaps = list(shapes[1][1])[1]
return input_fmaps, output_fmaps
def get_net_output_specs(net):
return np.shape(net.blobs['prob'].data)
def process(net, data_arrays, shapes=None, net_io=None):
input_dims, output_dims, input_padding = get_spatial_io_dims(net)
fmaps_in, fmaps_out = get_fmap_io_dims(net)
dims = len(output_dims)
#pdb.set_trace()
if (shapes == None):
shapes = []
# Raw data slice input (n = 1, f = 1, spatial dims)
shapes += [[1,fmaps_in] + input_dims]
if (net_io == None):
net_io = NetInputWrapper(net, shapes)
dst = net.blobs['prob']
dummy_slice = [0]
i_out = 0
pred_arrays = []
for i in range(0, len(data_arrays)):
data_array = data_arrays[i]['data']
data_dims = len(data_array.shape)
offsets = []
in_dims = []
out_dims = []
for d in range(0, dims):
offsets += [0]
in_dims += [data_array.shape[data_dims-dims+d]]
out_dims += [data_array.shape[data_dims-dims+d] - input_padding[d]]
plane_id = 0
if dims==2:
in_dims = [data_array.shape[1]] + in_dims
out_dims = [data_array.shape[1]] + out_dims
offsets = [plane_id] + offsets
pred_array = np.zeros(tuple([fmaps_out] + out_dims))
#pdb.set_trace()
while(True):
if dims==3:
data_slice = slice_data(data_array, [0] + offsets, [fmaps_in] + [output_dims[di] + input_padding[di] for di in range(0, dims)])
elif dims==2:
data_slice = slice_data(data_array, [0] + offsets, [fmaps_in,1] + [output_dims[di] + input_padding[di] for di in range(0, dims)])
net_io.setInputs([data_slice])
net.forward_iters(output_dims[-1])
output = dst.data[0].copy()
'''
if i_out < 5:
with h5py.File('output_%0d.h5'%(i_out), 'w') as f:
f.create_dataset('main', data=output)
print('saving output...%d', i_out)
with h5py.File('data_%0d.h5'%(i_out), 'w') as f:
f.create_dataset('main', data=data_slice)
i_out += 1
'''
if dims==3:
set_slice_data(pred_array, output, [0] + offsets, [fmaps_out] + output_dims)
print offsets
print output.mean()
elif dims==2:
output = np.expand_dims(output,axis=1)
set_slice_data(pred_array, output, [0] + offsets, [fmaps_out,1] + output_dims)
incremented = False
#pdb.set_trace()
#if offsets[0]==124:
#print offsets
#print output.mean()
for d in range(0, dims):
##if (offsets[dims - 1 - d] == out_dims[dims - 1 - d] - output_dims[dims - 1 - d]):
### Reset direction
##offsets[dims - 1 - d] = 0
##else:
### Increment direction
##offsets[dims - 1 - d] = min(offsets[dims - 1 - d] + output_dims[dims - 1 - d], out_dims[dims - 1 - d] - output_dims[dims - 1 - d])
##incremented = True
##break
ninp_dims = len(in_dims)
if (offsets[ninp_dims - 1 - d] == out_dims[ninp_dims - 1 - d] - output_dims[dims - 1 - d]):
# Reset direction
offsets[ninp_dims - 1 - d] = 0
else:
# Increment direction
offsets[ninp_dims - 1 - d] = min(offsets[ninp_dims - 1 - d] + output_dims[dims - 1 - d], out_dims[ninp_dims - 1 - d] - output_dims[dims - 1 - d])
incremented = True
break
# Processed the whole input block, or, in case of 2D, the slice
if not incremented:
if dims==2 and plane_id < (in_dims[0]-1):
print offsets
print output.mean()
plane_id = plane_id + 1
offsets[0] = plane_id
incremented = True
else:
break
#pdb.set_trace()
mask = np.zeros(tuple([fmaps_out] + in_dims))
if dims==3:
startz = (input_dims[0]-output_dims[0])/2;
endz = in_dims[0] - startz
starty = (input_dims[1]-output_dims[1])/2;
endy = in_dims[1] - starty
startx = (input_dims[2]-output_dims[2])/2;
endx = in_dims[2] - startx
mask[:,startz:endz, starty:endy, startx:endx] = 1
elif dims==2:
starty = (input_dims[0]-output_dims[0])/2;
endy = in_dims[1] - starty
startx = (input_dims[1]-output_dims[1])/2;
endx = in_dims[2] - startx
mask[:,:, starty:endy, startx:endx] = 1
#pred_arrays += [pred_array]
pred_arrays += [pred_array]
pred_arrays += [mask]
return pred_arrays
# Wrapper around a networks
class TestNetEvaluator:
def __init__(self, test_net, train_net, data_arrays, options):
self.options = options
self.test_net = test_net
self.train_net = train_net
self.data_arrays = data_arrays
self.thread = None
input_dims, output_dims, input_padding = get_spatial_io_dims(self.test_net)
fmaps_in, fmaps_out = get_fmap_io_dims(self.test_net)
self.shapes = []
self.shapes += [[1,fmaps_in] + input_dims]
self.net_io = NetInputWrapper(self.test_net, self.shapes)
def run_test(self, iteration):
caffe.select_device(self.options.test_device, False)
self.pred_arrays = process(self.test_net, self.data_arrays, shapes=self.shapes, net_io=self.net_io)
for i in range(0, 1):
#for i in range(0, len(self.data_arrays)):
if ('name' in self.data_arrays[i]):
h5file = self.data_arrays[i]['name'] + '.h5'
else:
h5file = 'test_out_' + repr(i) + '.h5'
outhdf5 = h5py.File(h5file, 'w')
outdset = outhdf5.create_dataset('main', self.pred_arrays[i*2].shape, np.float32, data=self.pred_arrays[i*2])
# outdset.attrs['nhood'] = np.string_('-1,0,0;0,-1,0;0,0,-1')
outhdf5.close()
count=0
#pdb.set_trace()
self.pred_arrays_samesize = []
for i in range(0, len(self.pred_arrays),2):
pred_array1 = self.pred_arrays[i]
pred_mask = self.pred_arrays[i+1]
nz_idx = np.where(pred_mask[0,...]>0)
pred_array1_samesize = np.zeros(pred_mask.shape).astype(np.float32)
for cc in range(pred_array1_samesize.shape[0]):
pred_array1_samesize[cc,nz_idx[0],nz_idx[1],nz_idx[2]] = pred_array1[cc,...].ravel()
self.pred_arrays_samesize.append([])
self.pred_arrays_samesize[-1] = pred_array1_samesize
def evaluate(self, iteration):
# Test/wait if last test is done
if not(self.thread is None):
try:
self.thread.join()
except:
self.thread = None
# Weight transfer
net_weight_transfer(self.test_net, self.train_net)
# Run test
# # Toufiq -- debug check
self.run_test(iteration)
#self.thread = threading.Thread(target=self.run_test, args=[iteration])
#self.thread.start()
def init_solver(solver_config, options):
caffe.set_mode_gpu()
caffe.select_device(options.train_device, False)
solver_inst = caffe.get_solver(solver_config)
#print(caffe.enumerate_devices(False))
if (options.test_net == None):
return (solver_inst, None)
else:
return (solver_inst, init_testnet(options.test_net, test_device=options.test_device))
def init_testnet(test_net, trained_model=None, test_device=0):
print('--->init_testnet')
caffe.set_mode_gpu()
print('--->selecting test device...', test_device)
caffe.select_device(test_device, False)
print('--->going to create nets...')
if(trained_model == None):
print('--->creating test net...')
return caffe.Net(test_net, caffe.TEST)
else:
print('--->creating test and train net...')
return caffe.Net(test_net, trained_model, caffe.TEST)
def oldtrain(solver, test_net, data_arrays, train_data_arrays, options):
caffe.select_device(options.train_device, False)
print('====> in training....')
net = solver.net
net.debug_info = True
#pdb.set_trace()
clwt=None
test_eval = None
if options.scale_error == 2:
clwt = ClassWeight(data_arrays, solver.iter)
test_eval = TestNetEvaluator(test_net, net, data_arrays, options)
test_eval2 = None
if (options.test_net != None):
test_eval2 = TestNetEvaluator(test_net, net, train_data_arrays, options)
input_dims, output_dims, input_padding = get_spatial_io_dims(net)
fmaps_in, fmaps_out = get_fmap_io_dims(net)
print('input_dims:', input_dims)
print('output_dims:', output_dims)
print('input_padding:', input_padding)
print('fmaps_out:', fmaps_out)
dims = len(output_dims)
losses = []
shapes = []
# Raw data slice input (n = 1, f = 1, spatial dims)
shapes += [[1,fmaps_in] + input_dims]
# Label data slice input (n = 1, f = #edges, spatial dims)
shapes += [[1,fmaps_out] + output_dims]
if (options.loss_function == 'malis'):
# Connected components input (n = 1, f = 1, spatial dims)
shapes += [[1,1] + output_dims]
if (options.loss_function == 'euclid'):
# Error scale input (n = 1, f = #edges, spatial dims)
shapes += [[1,fmaps_out] + output_dims]
# Nhood specifications (n = #edges, f = 3)
if (('nhood' in data_arrays[0]) and (options.loss_function == 'malis')):
shapes += [[1,1] + list(np.shape(data_arrays[0]['nhood']))]
net_io = NetInputWrapper(net, shapes)
weight_vec = []
if (options.loss_function == 'softmax' or options.loss_function == 'euclid') and options.scale_error == 1:
#pdb.set_trace()
weight_vec = class_balance_distribution(data_arrays[0]['label'])
#weight_vec[2] = weight_vec[1]*4.0 #for 3 class, inversed during weighting
#pdb.set_trace()
# Loop from current iteration to last iteration
for i in range(solver.iter, solver.max_iter):
if (options.test_net != None and i % options.test_interval == 0 and i>1):
#pdb.set_trace()
test_eval2.evaluate(i)
if options.scale_error == 2:
test_eval.evaluate(i)
clwt.recompute_weight(test_eval.pred_arrays_samesize, i)
# First pick the dataset to train with
dataset = randint(0, len(data_arrays) - 1)
if dims==3:
offsets = []
for j in range(0, dims):
offsets.append(randint(0, data_arrays[dataset]['data'].shape[1+j] - (output_dims[j] + input_padding[j])))
# These are the raw data elements
#pdb.set_trace()
data_slice = slice_data(data_arrays[dataset]['data'], [0]+offsets, [fmaps_in]+[output_dims[di] + input_padding[di] for di in range(0, dims)])
label_slice = slice_data(data_arrays[dataset]['label'], [0] + [offsets[di] + int(math.ceil(input_padding[di] / float(2))) for di in range(0, dims)], [fmaps_out] + output_dims)
if options.scale_error ==2 and clwt != None:
weight_slice = slice_data(clwt.class_weights[dataset], [0] + [offsets[di] + int(math.ceil(input_padding[di] / float(2))) for di in range(0, dims)], [fmaps_out] + output_dims)
elif dims==2:
offsets = []
offsets.append(randint(0,data_arrays[dataset]['data'].shape[1]-1))
for j in range(0, dims):
offsets.append(randint(0, data_arrays[dataset]['data'].shape[1+j] - (output_dims[j] + input_padding[j])))
# These are the raw data elements
#pdb.set_trace()
data_slice = slice_data(data_arrays[dataset]['data'], [0]+offsets, [fmaps_in,1]+[output_dims[di] + input_padding[di] for di in range(0, dims)])
label_slice = slice_data(data_arrays[dataset]['label'], [0, offsets[0]] + [offsets[di+1] + int(math.ceil(input_padding[di] / float(2))) for di in range(0, dims)], [fmaps_out,1] + output_dims)
data_slice = np.squeeze(data_slice)
label_slice = np.squeeze(label_slice)
#offsets=np.zeros(dims);
if (data_slice.shape[0]<1) or (label_slice.shape[0]<2):
pp=1
#print('pid:', os.getpid(), 'offsets:', offsets, 'dims:', dims, 'shape:', data_arrays[dataset]['data'].shape)
#exit(1)
#pdb.set_trace()
#if(np.unique(label_slice).shape[0]<2):
# continue;
# transform the input
# this code assumes that the original input pixel values are scaled between (0,1)
if 'transform' in data_arrays[dataset]:
# print('Pre:',(data_slice.min(),data_slice.mean(),data_slice.max()))
data_slice_mean = data_slice.mean()
lo, hi = data_arrays[dataset]['transform']['scale']
data_slice = data_slice_mean + (data_slice-data_slice_mean)*np.random.uniform(low=lo,high=hi)
lo, hi = data_arrays[dataset]['transform']['shift']
data_slice = data_slice + np.random.uniform(low=lo,high=hi)
# print('Post:',(data_slice.min(),data_slice.mean(),data_slice.max()))
data_slice = np.clip(data_slice, 0.0, 0.95)
if options.loss_function == 'malis':
components_slice,ccSizes = malis.connected_components_affgraph(label_slice.astype(int32), data_arrays[dataset]['nhood'])
# Also recomputing the corresponding labels (connected components)
net_io.setInputs([data_slice, label_slice, components_slice, data_arrays[0]['nhood']])
if options.loss_function == 'euclid':
###if(options.scale_error == True):
###frac_pos = np.clip(label_slice.mean(),0.05,0.95) #for binary labels
###w_pos = 1.0/(2.0*frac_pos)
###w_neg = 1.0/(2.0*(1.0-frac_pos))
###else:
###w_pos = 1
###w_neg = 1
###net_io.setInputs([data_slice, label_slice, error_scale(label_slice,w_neg,w_pos)])
if(options.scale_error == 3):
frac_pos = np.clip(label_slice.mean(),0.01,0.99) #for binary labels
w_pos = 1.0/(2.0*frac_pos)
w_neg = 1.0/(2.0*(1.0-frac_pos))
net_io.setInputs([data_slice, label_slice, error_scale(label_slice,w_neg,w_pos)])
elif(options.scale_error == 1):
frac_pos = weight_vec[0]
w_pos = 1./frac_pos
label_weights = error_scale_overall(label_slice, weight_vec)
net_io.setInputs([data_slice, label_slice, label_weights])
elif options.scale_error == 2:
net_io.setInputs([data_slice, label_slice, weight_slice])
elif options.scale_error == 0:
net_io.setInputs([data_slice, label_slice])
if options.loss_function == 'softmax':
net_io.setInputs([data_slice, label_slice])
#pdb.set_trace()
print('data_slice dims:', data_slice.shape)
# Single step
n_slices = output_dims[-1]
loss = solver.step(1) #n_slices)
#for i in range(n_slices):
# loss = solver.stepForward(1)
#solver.stepBackward()
# sanity_check_net_blobs(net)
while gc.collect():
pass
if (options.loss_function == 'euclid' or options.loss_function == 'euclid_aniso') and options.scale_error ==1 :
print("[Iter %i] Loss: %f, frac_pos=%f, w_pos=%f" % (i,loss,frac_pos,w_pos))
else:
print("[Iter %i] Loss: %f" % (i,loss))
# TODO: Store losses to file
losses += [loss]
if hasattr(options, 'loss_snapshot') and ((i % options.loss_snapshot) == 0):
io.savemat('loss.mat',{'loss':losses})
#pdb.set_trace()
def train(solver, test_net, data_arrays, train_data_arrays, options):
caffe.select_device(options.train_device, False)
print('====> in training....')
net = solver.net
net.debug_info = True
#pdb.set_trace()
'''
clwt=None
test_eval = None
if options.scale_error == 2:
clwt = ClassWeight(data_arrays, solver.iter)
test_eval = TestNetEvaluator(test_net, net, data_arrays, options)
test_eval2 = None
if (options.test_net != None):
test_eval2 = TestNetEvaluator(test_net, net, train_data_arrays, options)
'''
input_dims, output_dims, input_padding = get_spatial_io_dims(net)
fmaps_in, fmaps_out = get_fmap_io_dims(net)
print('input_dims:', input_dims)
print('output_dims:', output_dims)
print('input_padding:', input_padding)
print('fmaps_out:', fmaps_out)
dims = len(output_dims)
losses = []
shapes = []
# Raw data slice input (n = 1, f = 1, spatial dims)
shapes += [[1,fmaps_in] + input_dims]
# Label data slice input (n = 1, f = #edges, spatial dims)
shapes += [[1,fmaps_out] + output_dims]
if (options.loss_function == 'malis'):
# Connected components input (n = 1, f = 1, spatial dims)
shapes += [[1,1] + output_dims]
if (options.loss_function == 'euclid'):
# Error scale input (n = 1, f = #edges, spatial dims)
shapes += [[1,fmaps_out] + output_dims]
# Nhood specifications (n = #edges, f = 3)
if (('nhood' in data_arrays[0]) and (options.loss_function == 'malis')):
shapes += [[1,1] + list(np.shape(data_arrays[0]['nhood']))]
net_io = NetInputWrapper(net, shapes)
weight_vec = []
if (options.loss_function == 'softmax' or options.loss_function == 'euclid') and options.scale_error == 1:
#pdb.set_trace()
weight_vec = class_balance_distribution(data_arrays[0]['label'])
#weight_vec[2] = weight_vec[1]*4.0 #for 3 class, inversed during weighting
#pdb.set_trace()
'''
dims3d = dims + 1
output_dims3d = output_dims + [output_dims[-1]]
input_padding3d = input_padding + [input_padding[-1]]
output_dims3d = output_dims + [output_dims[-1]]
'''
n_slices = output_dims[-1]
i_slice = 0
# Loop from current iteration to last iteration
for i in range(solver.iter, solver.max_iter):
'''
if (options.test_net != None and i % options.test_interval == 0 and i>1):
#pdb.set_trace()
test_eval2.evaluate(i)
if options.scale_error == 2:
test_eval.evaluate(i)
clwt.recompute_weight(test_eval.pred_arrays_samesize, i)
'''
# First pick the dataset to train with
dataset = randint(0, len(data_arrays) - 1)
#print('dataset shape:', data_arrays[dataset]['data'].shape)
if i_slice == 0 or i_slice == n_slices:
i_slice = 0
offsets = []
for j in range(0, dims):
offsets.append(randint(0, data_arrays[dataset]['data'].shape[1+j] - (output_dims[j] + input_padding[j])))
data_slice = slice_data(data_arrays[dataset]['data'], [0]+offsets, [fmaps_in]+[output_dims[di] + input_padding[di] for di in range(0, dims)])
label_slice = slice_data(data_arrays[dataset]['label'], [0] + [offsets[di] + int(math.ceil(input_padding[di] / float(2))) for di in range(0, dims)], [fmaps_out] + output_dims)
print(data_slice.shape)
print(label_slice.shape)
#data_slice = np.squeeze(data_slice)
label_slice = np.squeeze(label_slice)
#print(label_slice)
#offsets=np.zeros(dims);
if (data_slice.shape[0]<1) or (label_slice.shape[0]<2):
pp=1
# transform the input
# this code assumes that the original input pixel values are scaled between (0,1)
if 'transform' in data_arrays[dataset]:
# print('Pre:',(data_slice.min(),data_slice.mean(),data_slice.max()))
data_slice_mean = data_slice.mean()
lo, hi = data_arrays[dataset]['transform']['scale']
data_slice = data_slice_mean + (data_slice-data_slice_mean)*np.random.uniform(low=lo,high=hi)
lo, hi = data_arrays[dataset]['transform']['shift']
data_slice = data_slice + np.random.uniform(low=lo,high=hi)
# print('Post:',(data_slice.min(),data_slice.mean(),data_slice.max()))
data_slice = np.clip(data_slice, 0.0, 0.95)
if options.loss_function == 'malis':
components_slice,ccSizes = malis.connected_components_affgraph(label_slice.astype(int32), data_arrays[dataset]['nhood'])
# Also recomputing the corresponding labels (connected components)
net_io.setInputs([data_slice, label_slice, components_slice, data_arrays[0]['nhood']])
if options.loss_function == 'euclid':
###net_io.setInputs([data_slice, label_slice, error_scale(label_slice,w_neg,w_pos)])
if(options.scale_error == 3):
frac_pos = np.clip(label_slice.mean(),0.01,0.99) #for binary labels
w_pos = 1.0/(2.0*frac_pos)
w_neg = 1.0/(2.0*(1.0-frac_pos))
net_io.setInputs([data_slice, label_slice, error_scale(label_slice,w_neg,w_pos)])
elif(options.scale_error == 1):
frac_pos = weight_vec[0]
w_pos = 1./frac_pos
label_weights = error_scale_overall(label_slice, weight_vec)
net_io.setInputs([data_slice, label_slice, label_weights])
elif options.scale_error == 2:
net_io.setInputs([data_slice, label_slice, weight_slice])
elif options.scale_error == 0:
net_io.setInputs([data_slice, label_slice])
if options.loss_function == 'softmax':
net_io.setInputs([data_slice, label_slice])
#pdb.set_trace()
#print('training slice#: ', i_slice)
# Single step
n_slices = output_dims[-1]
#loss = solver.stepForward(1)
loss = solver.stepParallel( n_slices )
i_slice = n_slices
# do backward when all slices have been processed.
#if i_slice == n_slices:
# solver.stepBackward(1)
#loss = solver.step(1) #n_slices)
#for i in range(n_slices):
# loss = solver.stepForward(1)
#solver.stepBackward()
# sanity_check_net_blobs(net)
while gc.collect():
pass
if (options.loss_function == 'euclid' or options.loss_function == 'euclid_aniso') and options.scale_error ==1 :
print("[Iter %i] Loss: %f, frac_pos=%f, w_pos=%f" % (i,loss,frac_pos,w_pos))
else:
print("[Iter %i] Loss: %f" % (i,loss))
# TODO: Store losses to file
losses += [loss]
if hasattr(options, 'loss_snapshot') and ((i % options.loss_snapshot) == 0):
io.savemat('loss.mat',{'loss':losses})
#pdb.set_trace()
| [
"[email protected]"
] | |
4e09b2610a1de447484dfa0b2a454a2e60fbe606 | c1c7f9e400f788c296d9464117ba6cac553b03ca | /src/datasets/soilmoist.py | 4985c8c3541822a678276e354a53aac02b638597 | [
"MIT"
] | permissive | nasa/RHEAS | 1d8e0d6cb2df13713d458db07c0348fcf18eb9e1 | 27d0abcaeefd8760ce68e05e52905aea5f8f3a51 | refs/heads/master | 2023-08-03T23:05:47.535575 | 2023-08-01T16:55:13 | 2023-08-01T16:55:13 | 46,281,533 | 88 | 63 | MIT | 2023-08-01T16:55:15 | 2015-11-16T14:57:18 | Python | UTF-8 | Python | false | false | 4,223 | py | """Definition for abstract soil moisture class.
.. module:: soilmoist
:synopsis: Definition of the Soilmoist class
.. moduleauthor:: Kostas Andreadis <[email protected]>
"""
import numpy as np
import dbio
import logging
class Soilmoist(object):
def __init__(self, uncert=None):
"""Initialize SMOS soil moisture object."""
self.statevar = ["soil_moist"]
self.obsvar = "soil_moist"
self.uncert = uncert
def x(self, dt, models):
"""Retrieve state variable from database."""
data = {}
db = dbio.connect(models.dbname)
cur = db.cursor()
for s in self.statevar:
sql = "select ensemble,st_x(geom),st_y(geom),sum(val) from (select ensemble,layer,(ST_PixelAsCentroids(rast)).* from {0}.{1} where fdate=date '{2}-{3}-{4}') foo group by ensemble,geom order by ensemble".format(
models.name, s, dt.year, dt.month, dt.day)
cur.execute(sql)
e, lon, lat, vals = zip(*cur.fetchall())
gid = [models[0].lgid[(l[0], l[1])] for l in zip(lat, lon)]
nens = max(e)
data[s] = np.array(vals).reshape((len(vals) / nens, nens))
lat = np.array(lat).reshape((len(lat) / nens, nens))
lon = np.array(lon).reshape((len(lon) / nens, nens))
gid = np.array(gid).reshape((len(gid) / nens, nens))
cur.close()
db.close()
return data, lat, lon, gid
def get(self, dt, models):
"""Retrieve observations from database for date *dt*."""
db = dbio.connect(models.dbname)
cur = db.cursor()
sql = "select st_x(geom),st_y(geom),val from (select (st_pixelascentroids(st_clip(rast,geom))).* from {0},{1}.basin where st_intersects(rast,geom) and fdate=date '{2}-{3}-{4}') foo".format(
self.tablename, models.name, dt.year, dt.month, dt.day)
cur.execute(sql)
if bool(cur.rowcount):
lon, lat, data = zip(*cur.fetchall())
data = np.array(data).reshape((len(data), 1))
lat = np.array(lat).reshape((len(lat), 1))
lon = np.array(lon).reshape((len(lon), 1))
self.nobs = len(data)
else:
data = lat = lon = None
cur.close()
db.close()
return data, lat, lon
def hx(self, models, dt):
"""Retrieve observed variable from database and resample to observation resolution."""
db = dbio.connect(models.dbname)
cur = db.cursor()
sql = "with f as (select st_union(st_clip(rast,geom)) as rast from {0},{1}.basin where st_intersects(rast,geom) and fdate=date '{2}-{3}-{4}') select ensemble,st_x(geom),st_y(geom),val from (select ensemble,(st_pixelascentroids(st_resample(b.rast,f.rast,'average'))).* from f,{1}.{5} as b where layer=1 and fdate=date '{2}-{3}-{4}') foo order by ensemble".format(
self.tablename, models.name, dt.year, dt.month, dt.day, self.obsvar)
cur.execute(sql)
e, lon, lat, data = zip(*cur.fetchall())
nens = max(e)
lat = np.array(lat).reshape((len(lat) / nens, nens))
lon = np.array(lon).reshape((len(lon) / nens, nens))
data = np.array(data).reshape((len(data) / nens, nens))
sql = "select depths from {0}.basin order by geom <-> st_geomfromtext('POINT(%(lon)s %(lat)s)',4326) limit 1".format(
models.name)
for i in range(len(data) / nens):
for e in range(nens):
cur.execute(sql, {'lat': lat[i, e], 'lon': lon[i, e]})
z = cur.fetchone()[0][0]
# convert to volumetric soil moisture
data[i, e] /= (1000.0 * z)
cur.close()
db.close()
return data, lat, lon
def E(self, nens):
"""Generate observation error vector."""
log = logging.getLogger(__name__)
e = None
if self.uncert is not None:
try:
e = self.uncert(size=(self.nobs, nens))
except:
log.warning("Error using provided parameters in observation error PDF. Reverting to default.")
if e is None:
e = np.random.normal(0.0, self.stddev, (self.nobs, nens))
return e
| [
"[email protected]"
] | |
c09e738a65a63a9205d71eecf0d10d4efcb5b816 | cd2aaf0097f2e244aa4a22c9da7133dd0e2f2fb8 | /Saylani/python-code-master/23July2017/hello/first.py | 3447c8c7a278396583b817b155ad18eece3784af | [] | no_license | EnggQasim/SSUET-2017-Module-I | 349ea6e9b0554fa8c55899622bf0ee97fd19b685 | cd41ab8e768616ca56ddaa1d7662283f653674f9 | refs/heads/master | 2020-03-25T10:36:38.330710 | 2018-09-30T13:17:38 | 2018-09-30T13:17:38 | 143,698,684 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | print("Hello New world")
name = "Mughal"
name = 'Mughal'
age = 45
email = "[email protected]"
print(name)
name = 67
print(name)
name1 = "My father\"s name is M. Aslam"
print(name1)
| [
"[email protected]"
] | |
5fb8b2e6dd69ef1453ce691638668749ee32b12b | 29e1133741b339c2e6c4c0385a103f68baa32a11 | /findata/gbif/gbif.py | 94ef7024bcdaf8ce800a3eefbe841ba0e9df5a59 | [] | no_license | Gscsd8527/AllProject | b406935dd1e969d1f45a62f870fb409f81ba4200 | 10b56c432b6f433e3a37967b7c717840e726765c | refs/heads/master | 2023-02-21T20:25:48.397668 | 2022-03-04T14:01:27 | 2022-03-04T14:01:27 | 199,461,253 | 13 | 6 | null | 2023-02-15T20:47:23 | 2019-07-29T13:45:25 | Python | UTF-8 | Python | false | false | 1,903 | py | import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
import json
import pymongo
from loguru import logger
myclient = pymongo.MongoClient('mongodb://*********:27017/')
mydb = myclient['dataset'] # 数据库
mycol = mydb['gbif'] # 表
class Gbif:
def __init__(self):
self.url = 'https://www.gbif.org/api/dataset/search?facet=type&facet=publishing_org&facet=hosting_org&facet=publishing_country&facet=project_id&facet=license&locale=en&offset={offset}'
self.count = 54976 # 总量
self.page_num = 20 # 一页的数量
self.pages = self.get_pages()
def get_pages(self):
"""
获取页数
:return:
"""
pages = self.count // self.page_num
ys = self.count % self.page_num
if ys > 0:
pages += 1
print(pages)
return pages
def get_works(self):
works = [self.url.format(offset=page*self.page_num) for page in range(self.pages)]
return works
def request(self, url):
response = requests.get(url)
if response.status_code == 200:
text = response.text
data_json = json.loads(text)
results = data_json['results']
return results
else:
print('错误响应码为: ', response.status_code)
def main():
"""
https://www.gbif.org/dataset/search
:return:
"""
gbif = Gbif()
works = gbif.get_works()
pool = ThreadPoolExecutor(max_workers=10)
jobs = []
for work in works:
p = pool.submit(gbif.request, work) # 异步提交任务
jobs.append(p)
for _ in as_completed(jobs):
for result in _.result():
logger.info(result['title'])
# mycol.insert_one(result)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
b8732654492ceef372cc4e82ca927642071ce0f8 | 47ec91bedb4ca9d69bf288fd25484b08e013a8ac | /themylog/config/processors.py | ade5bc4eb60a7c781f37f1697dc4a98080629c66 | [] | no_license | themylogin/themylog | 23d1238866240d168cf3ce828bbb85d38276a226 | d4de99f08f066972a06c1463a1e2440a56513bfa | refs/heads/master | 2020-04-15T17:29:36.318428 | 2016-04-17T14:53:14 | 2016-04-17T14:53:14 | 14,795,831 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | # -*- coding=utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
from collections import namedtuple
import sys
from themylog.config.scripts import find_scripts
Processor = namedtuple("Processor", ["name", "process"])
def get_processors(config):
processors = []
directory = config.get("processors", {}).get("directory")
if directory:
sys.path.insert(0, directory)
for script in find_scripts(directory, {}):
processors.append(Processor(name=script.name,
process=__import__(script.name).process))
return processors
| [
"[email protected]"
] | |
e5f2f94778e5364a8c9c19af7062bf8a1f7f02e9 | 2734b77a68f6d7e22e8b823418ad1c59fe1a34af | /opengever/workspaceclient/tests/test_keys.py | 96b2015989f56a8230f5fa71a7c7b1f05fd50952 | [] | no_license | 4teamwork/opengever.core | 5963660f5f131bc12fd0a5898f1d7c8f24a5e2b1 | a01bec6c00d203c21a1b0449f8d489d0033c02b7 | refs/heads/master | 2023-08-30T23:11:27.914905 | 2023-08-25T14:27:15 | 2023-08-25T14:27:15 | 9,788,097 | 19 | 8 | null | 2023-09-14T13:28:56 | 2013-05-01T08:28:16 | Python | UTF-8 | Python | false | false | 2,895 | py | from contextlib import contextmanager
from ftw.builder import Builder
from ftw.builder import create
from opengever.testing import IntegrationTestCase
from opengever.workspaceclient.exceptions import ServiceKeyMissing
from opengever.workspaceclient.keys import key_registry
from plone.restapi.serializer.converters import json_compatible
import json
import shutil
import tempfile
class TestKeyRegistry(IntegrationTestCase):
@contextmanager
def temp_fs_key(self, key):
temp_dir = tempfile.mkdtemp()
original_key_directory = key_registry.key_directory
original_keys = key_registry.keys
key_registry.key_directory = temp_dir
file_ = tempfile.NamedTemporaryFile(
dir=temp_dir, suffix=".json", delete=False)
file_.write(json.dumps(json_compatible(key)))
file_.close()
try:
key_registry.load_file_system_keys()
yield temp_dir
finally:
shutil.rmtree(temp_dir)
key_registry.key_directory = original_key_directory
key_registry.keys = original_keys
def test_raises_an_error_if_the_key_file_not_found_for_a_specific_url(self):
service_key_client = create(Builder('workspace_token_auth_app')
.uri('http://example.com/plone/'))
with self.temp_fs_key(service_key_client) as path:
with self.assertRaises(ServiceKeyMissing) as cm:
key_registry.get_key_for('http://example.de/plone/')
self.maxDiff = None
self.assertEqual(
"No workspace service key found for URL http://example.de/plone.\n"
"Found keys ('http://example.com/plone',) in the folder: {}".format(path),
str(cm.exception))
def test_skip_fs_keys_without_a_token_uri(self):
service_key_client = create(Builder('workspace_token_auth_app')
.uri('http://example.com/plone/'))
del service_key_client['token_uri']
with self.temp_fs_key(service_key_client):
key_registry.load_file_system_keys()
self.assertEqual([], key_registry.keys)
def test_return_registered_keys_on_the_filesystem(self):
service_key_client = create(Builder('workspace_token_auth_app')
.uri('http://example.com/plone'))
with self.temp_fs_key(service_key_client):
self.assertEqual(
['http://example.com/plone'],
key_registry.keys_by_token_uri.keys())
def test_get_key_for(self):
service_key_client = create(Builder('workspace_token_auth_app')
.uri('http://example.com/plone/'))
self.assertDictContainsSubset(
service_key_client,
key_registry.get_key_for('http://example.com/plone/'))
| [
"[email protected]"
] | |
cb8b5b0617c9db4aef66e16cb81ce5a3fcd33305 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_29358.py | 0321469737aa07d31d022e05273ebf1901e76801 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | # Python coveralls not testing "if name == __main__"
if __name__
| [
"[email protected]"
] | |
eb28e0f54441e884f4107a7771de1bbcac4b2f24 | 3b60e6f4bbc011003ac4929f01eb7409918deb79 | /Analysis_v1/Simulation/Pythia/Unparticles/CP2UnparticlesPythia8fragments-2018PSWeights/UnparToGG_Spin2_du1p1_LambdaU-2000_pT70_M2000_TuneCP2_13TeV_pythia8_cfi.py | f5943fddccc2bb59b36b498dd9eadffdd636bb94 | [] | no_license | uzzielperez/Analyses | d1a64a4e8730325c94e2bc8461544837be8a179d | 1d66fa94763d7847011ea551ee872936c4c401be | refs/heads/master | 2023-02-09T04:54:01.854209 | 2020-09-07T14:57:54 | 2020-09-07T14:57:54 | 120,850,137 | 0 | 0 | null | 2020-06-17T16:48:16 | 2018-02-09T03:14:04 | C++ | UTF-8 | Python | false | false | 1,586 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP2Settings_cfi import *
from Configuration.Generator.PSweightsPythia.PythiaPSweightsSettings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP2SettingsBlock,
pythia8PSweightsSettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsUnpart:ffbar2gammagamma = on',
'ExtraDimensionsUnpart:gg2gammagamma = on',
'PromptPhoton:gg2gammagamma = on',
#'PromptPhoton:ffbar2gammagamma = on',
'ExtraDimensionsUnpart:LambdaU = 2000.0',
'ExtraDimensionsUnpart:lambda = 1.0',
'ExtraDimensionsUnpart:dU = 1.1',
'ExtraDimensionsUnpart:spinU = 2',
'PhaseSpace:pTHatMin = 70',
'PhaseSpace:mHatMin = 2000',
'PhaseSpace:mHatMax = 1',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP2Settings',
'processParameters',
'pythia8PSweightsSettings',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
] | |
dc7b67abce3d12e28bf013bb0871e37d84a241c6 | 40eb57784dc62179eafcf21f796a7d0c43cf55e5 | /calliope/constraints/planning.py | 39e9ce39f71b7a76af050a81764360e4c2893cc3 | [
"Apache-2.0"
] | permissive | sjpfenninger/calliope | 61b202f8519076a95ee8bad3d0d2215043e1b497 | a4e49c3b7d37f908bafc84543510eec0b4cf5d9f | refs/heads/master | 2020-06-11T01:01:36.709420 | 2016-12-06T14:47:20 | 2016-12-06T14:47:20 | 75,827,649 | 1 | 1 | null | 2016-12-07T11:01:51 | 2016-12-07T11:01:49 | Python | UTF-8 | Python | false | false | 1,944 | py | """
Copyright (C) 2013-2016 Stefan Pfenninger.
Licensed under the Apache 2.0 License (see LICENSE file).
planning.py
~~~~~~~~~~~
Planning constraints.
"""
import numpy as np
import pyomo.core as po
def node_constraints_build_total(model):
"""
"""
m = model.m
# Constraint rules
def c_e_cap_total_systemwide_rule(m, y):
total_max = model.get_option(y + '.constraints.e_cap.total_max')
total_equals = model.get_option(y + '.constraints.e_cap.total_equals')
scale = model.get_option(y + '.constraints.e_cap_scale')
if np.isinf(total_max) and not total_equals:
return po.Constraint.NoConstraint
sum_expr = sum(m.e_cap[y, x] for x in m.x)
total_expr = total_equals * scale if total_equals else total_max * scale
if total_equals:
return sum_expr == total_expr
else:
return sum_expr <= total_expr
# Constraints
m.c_e_cap_total_systemwide = \
po.Constraint(m.y, rule=c_e_cap_total_systemwide_rule)
def system_margin(model):
"""
"""
m = model.m
time_res = model.data['_time_res'].to_series()
def carrier(y):
return model.get_option(y + '.carrier')
# Constraint rules
def c_system_margin_rule(m, c):
# If no margin defined for a carrier, use 0 (i.e. no margin)
margin = model.config_model.system_margin.get_key(c, default=0)
if margin:
t = model.t_max_demand[c]
return (sum(m.es_prod[c, y, x, t] for y in m.y for x in m.x)
* (1 + margin)
<= time_res.at[t]
* sum((m.e_cap[y, x] / model.get_eff_ref('e', y, x))
for y in m.y if carrier(y) == c
for x in m.x))
else:
return po.Constraint.NoConstraint
# Constraints
m.c_system_margin = po.Constraint(m.c, rule=c_system_margin_rule)
| [
"[email protected]"
] | |
a727ae60692c2636d6abd360bd56330c24e06fee | df2cbe914f463ad050d7ed26194424afbe3a0a52 | /addons/hr_maintenance/models/res_users.py | c97a2bb60b15c15017414adf202109752bb76078 | [
"Apache-2.0"
] | permissive | SHIVJITH/Odoo_Machine_Test | 019ed339e995be980606a2d87a63312ddc18e706 | 310497a9872db7844b521e6dab5f7a9f61d365a4 | refs/heads/main | 2023-07-16T16:23:14.300656 | 2021-08-29T11:48:36 | 2021-08-29T11:48:36 | 401,010,175 | 0 | 0 | Apache-2.0 | 2021-08-29T10:13:58 | 2021-08-29T10:13:58 | null | UTF-8 | Python | false | false | 1,195 | py | from odoo import api, models, fields
class Users(models.Model):
_inherit = 'res.users'
equipment_ids = fields.One2many('maintenance.equipment', 'owner_user_id', string="Managed Equipments")
equipment_count = fields.Integer(related='employee_id.equipment_count', string="Assigned Equipments")
def __init__(self, pool, cr):
""" Override of __init__ to add access rights.
Access rights are disabled by default, but allowed
on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.
"""
init_res = super(Users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
type(self).SELF_READABLE_FIELDS = type(self).SELF_READABLE_FIELDS + ['equipment_count']
return init_res
class Employee(models.Model):
_inherit = 'hr.employee'
equipment_ids = fields.One2many('maintenance.equipment', 'employee_id')
equipment_count = fields.Integer('Equipments', compute='_compute_equipment_count')
@api.depends('equipment_ids')
def _compute_equipment_count(self):
for employee in self:
employee.equipment_count = len(employee.equipment_ids)
| [
"[email protected]"
] | |
8aa5ecca68340cd50012898952cf72b3f349e83d | 4c5608f20fa2580774d734d94198dd10648e4339 | /src/vpp-api/vapi/vapi_json_parser.py | a9d2c8186bc30c2e9985c89375e1e97426ff3a7d | [
"Apache-2.0"
] | permissive | mojtaba-eshghie/VPP-In-Situ-IOAM | 3d1c3d01752a7934d2f060326674280e0bd93413 | efebd91195eb1b0d98a4a1f5efd962ae79c77be6 | refs/heads/master | 2022-12-10T13:37:04.644952 | 2020-05-29T11:42:36 | 2020-05-29T11:42:36 | 194,249,816 | 2 | 0 | Apache-2.0 | 2022-12-08T05:17:31 | 2019-06-28T09:50:05 | C | UTF-8 | Python | false | false | 18,764 | py | #!/usr/bin/env python2
import json
class ParseError (Exception):
pass
magic_prefix = "vl_api_"
magic_suffix = "_t"
def remove_magic(what):
if what.startswith(magic_prefix) and what.endswith(magic_suffix):
return what[len(magic_prefix): - len(magic_suffix)]
return what
class Field(object):
def __init__(self, field_name, field_type, array_len=None,
nelem_field=None):
self.name = field_name
self.type = field_type
self.len = array_len
self.nelem_field = nelem_field
def __str__(self):
if self.len is None:
return "Field(name: %s, type: %s)" % (self.name, self.type)
elif self.len > 0:
return "Field(name: %s, type: %s, length: %s)" % (self.name,
self.type,
self.len)
else:
return (
"Field(name: %s, type: %s, variable length stored in: %s)" %
(self.name, self.type, self.nelem_field))
def is_vla(self):
return self.nelem_field is not None
def has_vla(self):
return self.is_vla() or self.type.has_vla()
class Alias(Field):
pass
class Type(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class SimpleType (Type):
def has_vla(self):
return False
def get_msg_header_defs(struct_type_class, field_class, json_parser, logger):
return [
struct_type_class(['msg_header1_t',
['u16', '_vl_msg_id'],
['u32', 'context'],
],
json_parser, field_class, logger
),
struct_type_class(['msg_header2_t',
['u16', '_vl_msg_id'],
['u32', 'client_index'],
['u32', 'context'],
],
json_parser, field_class, logger
),
]
class Struct(object):
def __init__(self, name, fields):
self.name = name
self.fields = fields
self.field_names = [n.name for n in self.fields]
self.depends = [f.type for f in self.fields]
def __str__(self):
return "[%s]" % "], [".join([str(f) for f in self.fields])
def has_vla(self):
for f in self.fields:
if f.has_vla():
return True
return False
class Enum(SimpleType):
def __init__(self, name, value_pairs, enumtype):
super(Enum, self).__init__(name)
self.type = enumtype
self.value_pairs = value_pairs
def __str__(self):
return "Enum(%s, [%s])" % (
self.name,
"], [" .join(["%s => %s" % (i, j) for i, j in self.value_pairs])
)
class Union(Type):
def __init__(self, name, type_pairs, crc):
Type.__init__(self, name)
self.crc = crc
self.type_pairs = type_pairs
self.depends = [t for t, _ in self.type_pairs]
def __str__(self):
return "Union(%s, [%s])" % (
self.name,
"], [" .join(["%s %s" % (i, j) for i, j in self.type_pairs])
)
def has_vla(self):
return False
class Message(object):
def __init__(self, logger, definition, json_parser):
struct_type_class = json_parser.struct_type_class
field_class = json_parser.field_class
self.request = None
self.logger = logger
m = definition
logger.debug("Parsing message definition `%s'" % m)
name = m[0]
self.name = name
logger.debug("Message name is `%s'" % name)
ignore = True
self.header = None
self.is_reply = json_parser.is_reply(self.name)
self.is_event = json_parser.is_event(self.name)
fields = []
for header in get_msg_header_defs(struct_type_class, field_class,
json_parser, logger):
logger.debug("Probing header `%s'" % header.name)
if header.is_part_of_def(m[1:]):
self.header = header
logger.debug("Found header `%s'" % header.name)
fields.append(field_class(field_name='header',
field_type=self.header))
ignore = False
break
if ignore and not self.is_event and not self.is_reply:
raise ParseError("While parsing message `%s': could not find all "
"common header fields" % name)
for field in m[1:]:
if len(field) == 1 and 'crc' in field:
self.crc = field['crc']
logger.debug("Found CRC `%s'" % self.crc)
continue
else:
field_type = json_parser.lookup_type_like_id(field[0])
logger.debug("Parsing message field `%s'" % field)
l = len(field)
if any(type(n) is dict for n in field):
l -= 1
if l == 2:
if self.header is not None and\
self.header.has_field(field[1]):
continue
p = field_class(field_name=field[1],
field_type=field_type)
elif l == 3:
if field[2] == 0:
raise ParseError(
"While parsing message `%s': variable length "
"array `%s' doesn't have reference to member "
"containing the actual length" % (
name, field[1]))
p = field_class(
field_name=field[1],
field_type=field_type,
array_len=field[2])
elif l == 4:
nelem_field = None
for f in fields:
if f.name == field[3]:
nelem_field = f
if nelem_field is None:
raise ParseError(
"While parsing message `%s': couldn't find "
"variable length array `%s' member containing "
"the actual length `%s'" % (
name, field[1], field[3]))
p = field_class(
field_name=field[1],
field_type=field_type,
array_len=field[2],
nelem_field=nelem_field)
else:
raise Exception("Don't know how to parse message "
"definition for message `%s': `%s'" %
(m, m[1:]))
logger.debug("Parsed field `%s'" % p)
fields.append(p)
self.fields = fields
self.depends = [f.type for f in self.fields]
logger.debug("Parsed message: %s" % self)
def __str__(self):
return "Message(%s, [%s], {crc: %s}" % \
(self.name,
"], [".join([str(f) for f in self.fields]),
self.crc)
class StructType (Type, Struct):
def __init__(self, definition, json_parser, field_class, logger):
t = definition
logger.debug("Parsing struct definition `%s'" % t)
name = t[0]
fields = []
for field in t[1:]:
if len(field) == 1 and 'crc' in field:
self.crc = field['crc']
continue
field_type = json_parser.lookup_type_like_id(field[0])
logger.debug("Parsing type field `%s'" % field)
if len(field) == 2:
p = field_class(field_name=field[1],
field_type=field_type)
elif len(field) == 3:
if field[2] == 0:
raise ParseError("While parsing type `%s': array `%s' has "
"variable length" % (name, field[1]))
p = field_class(field_name=field[1],
field_type=field_type,
array_len=field[2])
elif len(field) == 4:
nelem_field = None
for f in fields:
if f.name == field[3]:
nelem_field = f
if nelem_field is None:
raise ParseError(
"While parsing message `%s': couldn't find "
"variable length array `%s' member containing "
"the actual length `%s'" % (
name, field[1], field[3]))
p = field_class(field_name=field[1],
field_type=field_type,
array_len=field[2],
nelem_field=nelem_field)
else:
raise ParseError(
"Don't know how to parse field `%s' of type definition "
"for type `%s'" % (field, t))
fields.append(p)
Type.__init__(self, name)
Struct.__init__(self, name, fields)
def __str__(self):
return "StructType(%s, %s)" % (Type.__str__(self),
Struct.__str__(self))
def has_field(self, name):
return name in self.field_names
def is_part_of_def(self, definition):
for idx in range(len(self.fields)):
field = definition[idx]
p = self.fields[idx]
if field[1] != p.name:
return False
if field[0] != p.type.name:
raise ParseError(
"Unexpected field type `%s' (should be `%s'), "
"while parsing msg/def/field `%s/%s/%s'" %
(field[0], p.type, p.name, definition, field))
return True
class JsonParser(object):
def __init__(self, logger, files, simple_type_class=SimpleType,
enum_class=Enum, union_class=Union,
struct_type_class=StructType, field_class=Field,
message_class=Message, alias_class=Alias):
self.services = {}
self.messages = {}
self.enums = {}
self.unions = {}
self.aliases = {}
self.types = {
x: simple_type_class(x) for x in [
'i8', 'i16', 'i32', 'i64',
'u8', 'u16', 'u32', 'u64',
'f64', 'bool'
]
}
self.types['string'] = simple_type_class('vl_api_string_t')
self.replies = set()
self.events = set()
self.simple_type_class = simple_type_class
self.enum_class = enum_class
self.union_class = union_class
self.struct_type_class = struct_type_class
self.field_class = field_class
self.alias_class = alias_class
self.message_class = message_class
self.exceptions = []
self.json_files = []
self.types_by_json = {}
self.enums_by_json = {}
self.unions_by_json = {}
self.aliases_by_json = {}
self.messages_by_json = {}
self.logger = logger
for f in files:
self.parse_json_file(f)
self.finalize_parsing()
def parse_json_file(self, path):
self.logger.info("Parsing json api file: `%s'" % path)
self.json_files.append(path)
self.types_by_json[path] = []
self.enums_by_json[path] = []
self.unions_by_json[path] = []
self.aliases_by_json[path] = []
self.messages_by_json[path] = {}
with open(path) as f:
j = json.load(f)
for k in j['services']:
if k in self.services:
raise ParseError("Duplicate service `%s'" % k)
self.services[k] = j['services'][k]
self.replies.add(self.services[k]["reply"])
if "events" in self.services[k]:
for x in self.services[k]["events"]:
self.events.add(x)
for e in j['enums']:
name = e[0]
value_pairs = e[1:-1]
enumtype = self.types[e[-1]["enumtype"]]
enum = self.enum_class(name, value_pairs, enumtype)
self.enums[enum.name] = enum
self.logger.debug("Parsed enum: %s" % enum)
self.enums_by_json[path].append(enum)
exceptions = []
progress = 0
last_progress = 0
while True:
for u in j['unions']:
name = u[0]
if name in self.unions:
progress = progress + 1
continue
try:
type_pairs = [[self.lookup_type_like_id(t), n]
for t, n in u[1:]]
union = self.union_class(name, type_pairs, 0)
progress = progress + 1
except ParseError as e:
exceptions.append(e)
continue
self.unions[union.name] = union
self.logger.debug("Parsed union: %s" % union)
self.unions_by_json[path].append(union)
for name, body in j['aliases'].iteritems():
if name in self.aliases:
progress = progress + 1
continue
if 'length' in body:
array_len = body['length']
else:
array_len = None
t = self.types[body['type']]
alias = self.alias_class(name, t, array_len)
self.aliases[name] = alias
self.logger.debug("Parsed alias: %s" % alias)
self.aliases_by_json[path].append(alias)
for t in j['types']:
if t[0] in self.types:
progress = progress + 1
continue
try:
type_ = self.struct_type_class(t, self,
self.field_class,
self.logger)
if type_.name in self.types:
raise ParseError(
"Duplicate type `%s'" % type_.name)
progress = progress + 1
except ParseError as e:
exceptions.append(e)
continue
self.types[type_.name] = type_
self.types_by_json[path].append(type_)
self.logger.debug("Parsed type: %s" % type_)
if not exceptions:
# finished parsing
break
if progress <= last_progress:
# cannot make forward progress
self.exceptions.extend(exceptions)
break
exceptions = []
last_progress = progress
progress = 0
prev_length = len(self.messages)
processed = []
while True:
exceptions = []
for m in j['messages']:
if m in processed:
continue
try:
msg = self.message_class(self.logger, m, self)
if msg.name in self.messages:
raise ParseError(
"Duplicate message `%s'" % msg.name)
except ParseError as e:
exceptions.append(e)
continue
self.messages[msg.name] = msg
self.messages_by_json[path][msg.name] = msg
processed.append(m)
if prev_length == len(self.messages):
# cannot make forward progress ...
self.exceptions.extend(exceptions)
break
prev_length = len(self.messages)
def lookup_type_like_id(self, name):
mundane_name = remove_magic(name)
if name in self.types:
return self.types[name]
elif name in self.enums:
return self.enums[name]
elif name in self.unions:
return self.unions[name]
elif name in self.aliases:
return self.aliases[name]
elif mundane_name in self.types:
return self.types[mundane_name]
elif mundane_name in self.enums:
return self.enums[mundane_name]
elif mundane_name in self.unions:
return self.unions[mundane_name]
elif mundane_name in self.aliases:
return self.aliases[mundane_name]
raise ParseError(
"Could not find type, enum or union by magic name `%s' nor by "
"mundane name `%s'" % (name, mundane_name))
def is_reply(self, message):
return message in self.replies
def is_event(self, message):
return message in self.events
def get_reply(self, message):
return self.messages[self.services[message]['reply']]
def finalize_parsing(self):
if len(self.messages) == 0:
for e in self.exceptions:
self.logger.warning(e)
for jn, j in self.messages_by_json.items():
remove = []
for n, m in j.items():
try:
if not m.is_reply and not m.is_event:
try:
m.reply = self.get_reply(n)
if "stream" in self.services[m.name]:
m.reply_is_stream = \
self.services[m.name]["stream"]
else:
m.reply_is_stream = False
m.reply.request = m
except:
raise ParseError(
"Cannot find reply to message `%s'" % n)
except ParseError as e:
self.exceptions.append(e)
remove.append(n)
self.messages_by_json[jn] = {
k: v for k, v in j.items() if k not in remove}
| [
"[email protected]"
] | |
ee44bd9403d965734f8cccd64c37a5995b76953c | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/89d09bae21e22374af7fcaf39c189233621e7ed2-<main>-fix.py | fadbbe0da4b2f852c816a0aebf9ae4773b9e8aa0 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,267 | py |
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(name=dict(), group_id=dict(), description=dict(), vpc_id=dict(), rules=dict(type='list'), rules_egress=dict(type='list'), state=dict(default='present', type='str', choices=['present', 'absent']), purge_rules=dict(default=True, required=False, type='bool'), purge_rules_egress=dict(default=True, required=False, type='bool'), tags=dict(required=False, type='dict', aliases=['resource_tags']), purge_tags=dict(default=True, required=False, type='bool')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=[['name', 'group_id']], required_if=[['state', 'present', ['name']]])
if (not HAS_BOTO3):
module.fail_json(msg='boto3 required for this module')
name = module.params['name']
group_id = module.params['group_id']
description = module.params['description']
vpc_id = module.params['vpc_id']
rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules'])))
rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules_egress'])))
state = module.params.get('state')
purge_rules = module.params['purge_rules']
purge_rules_egress = module.params['purge_rules_egress']
tags = module.params['tags']
purge_tags = module.params['purge_tags']
if ((state == 'present') and (not description)):
module.fail_json(msg='Must provide description when state is present.')
changed = False
(region, ec2_url, aws_connect_params) = get_aws_connection_info(module, boto3=True)
if (not region):
module.fail_json(msg='The AWS region must be specified as an environment variable or in the AWS credentials profile.')
client = boto3_conn(module, conn_type='client', resource='ec2', endpoint=ec2_url, region=region, **aws_connect_params)
group = None
groups = dict()
security_groups = []
try:
response = get_security_groups_with_backoff(client)
security_groups = response.get('SecurityGroups', [])
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg=('Error in describe_security_groups: %s' % 'Unable to locate credentials'), exception=traceback.format_exc())
except botocore.exceptions.ClientError as e:
module.fail_json(msg=('Error in describe_security_groups: %s' % e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
for sg in security_groups:
groups[sg['GroupId']] = sg
groupName = sg['GroupName']
if (groupName in groups):
if (groups[groupName].get('VpcId') == vpc_id):
pass
elif ((vpc_id is None) and (groups[groupName].get('VpcId') is None)):
pass
else:
groups[groupName] = sg
else:
groups[groupName] = sg
if (group_id and (sg['GroupId'] == group_id)):
group = sg
elif ((groupName == name) and ((vpc_id is None) or (sg.get('VpcId') == vpc_id))):
group = sg
if (state == 'absent'):
if group:
try:
if (not module.check_mode):
client.delete_security_group(GroupId=group['GroupId'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=("Unable to delete security group '%s' - %s" % (group, e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
else:
group = None
changed = True
else:
pass
elif (state == 'present'):
if group:
if (group['Description'] != description):
module.fail_json(msg='Group description does not match existing group. ec2_group does not support this case.')
else:
if (not module.check_mode):
params = dict(GroupName=name, Description=description)
if vpc_id:
params['VpcId'] = vpc_id
group = client.create_security_group(**params)
while True:
group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
if (group.get('VpcId') and (not group.get('IpPermissionsEgress'))):
pass
else:
break
changed = True
if (tags is not None):
current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
(tags_need_modify, tags_to_delete) = compare_aws_tags(current_tags, tags, purge_tags)
if tags_to_delete:
try:
client.delete_tags(Resources=[group['GroupId']], Tags=[{
'Key': tag,
} for tag in tags_to_delete])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
if tags_need_modify:
try:
client.create_tags(Resources=[group['GroupId']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
except botocore.exceptions.ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
else:
module.fail_json(msg=('Unsupported state requested: %s' % state))
ip_permission = []
if group:
groupRules = {
}
add_rules_to_lookup(group['IpPermissions'], group['GroupId'], 'in', groupRules)
if (rules is not None):
for rule in rules:
validate_rule(module, rule)
(group_id, ip, ipv6, target_group_created) = get_target_from_rule(module, client, rule, name, group, groups, vpc_id)
if target_group_created:
changed = True
if (rule['proto'] in ('all', '-1', (- 1))):
rule['proto'] = (- 1)
rule['from_port'] = None
rule['to_port'] = None
if group_id:
rule_id = make_rule_key('in', rule, group['GroupId'], group_id)
if (rule_id in groupRules):
del groupRules[rule_id]
else:
if (not module.check_mode):
ip_permission = serialize_group_grant(group_id, rule)
if ip_permission:
ips = ip_permission
if vpc_id:
[useridpair.update({
'VpcId': vpc_id,
}) for useridpair in ip_permission.get('UserIdGroupPairs', [])]
try:
client.authorize_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ips])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=("Unable to authorize ingress for group %s security group '%s' - %s" % (group_id, group['GroupName'], e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
elif ip:
if (ip and (not isinstance(ip, list))):
ip = [ip]
(changed, ip_permission) = authorize_ip('in', changed, client, group, groupRules, ip, ip_permission, module, rule, 'ipv4')
elif ipv6:
if (not isinstance(ipv6, list)):
ipv6 = [ipv6]
(changed, ip_permission) = authorize_ip('in', changed, client, group, groupRules, ipv6, ip_permission, module, rule, 'ipv6')
if purge_rules:
for (rule, grant) in groupRules.values():
ip_permission = serialize_revoke(grant, rule)
if (not module.check_mode):
try:
client.revoke_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ip_permission])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=("Unable to revoke ingress for security group '%s' - %s" % (group['GroupName'], e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
groupRules = {
}
add_rules_to_lookup(group['IpPermissionsEgress'], group['GroupId'], 'out', groupRules)
if (rules_egress is not None):
for rule in rules_egress:
validate_rule(module, rule)
(group_id, ip, ipv6, target_group_created) = get_target_from_rule(module, client, rule, name, group, groups, vpc_id)
if target_group_created:
changed = True
if (rule['proto'] in ('all', '-1', (- 1))):
rule['proto'] = (- 1)
rule['from_port'] = None
rule['to_port'] = None
if group_id:
rule_id = make_rule_key('out', rule, group['GroupId'], group_id)
if (rule_id in groupRules):
del groupRules[rule_id]
else:
if (not module.check_mode):
ip_permission = serialize_group_grant(group_id, rule)
if ip_permission:
ips = ip_permission
if vpc_id:
[useridpair.update({
'VpcId': vpc_id,
}) for useridpair in ip_permission.get('UserIdGroupPairs', [])]
try:
client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ips])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=("Unable to authorize egress for group %s security group '%s' - %s" % (group_id, group['GroupName'], e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
elif ip:
if (not isinstance(ip, list)):
ip = [ip]
(changed, ip_permission) = authorize_ip('out', changed, client, group, groupRules, ip, ip_permission, module, rule, 'ipv4')
elif ipv6:
if (not isinstance(ipv6, list)):
ipv6 = [ipv6]
(changed, ip_permission) = authorize_ip('out', changed, client, group, groupRules, ipv6, ip_permission, module, rule, 'ipv6')
elif (vpc_id is not None):
default_egress_rule = (('out--1-None-None-' + group['GroupId']) + '-0.0.0.0/0')
if (default_egress_rule not in groupRules):
if (not module.check_mode):
ip_permission = [{
'IpProtocol': '-1',
'IpRanges': [{
'CidrIp': '0.0.0.0/0',
}],
}]
try:
client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=ip_permission)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=("Unable to authorize egress for ip %s security group '%s' - %s" % ('0.0.0.0/0', group['GroupName'], e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
else:
del groupRules[default_egress_rule]
if (purge_rules_egress and (vpc_id is not None)):
for (rule, grant) in groupRules.values():
if (grant != '0.0.0.0/0'):
ip_permission = serialize_revoke(grant, rule)
if (not module.check_mode):
try:
client.revoke_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ip_permission])
except botocore.exceptions.ClientError as e:
module.fail_json(msg=("Unable to revoke egress for ip %s security group '%s' - %s" % (grant, group['GroupName'], e)), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
changed = True
if group:
security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
security_group = camel_dict_to_snake_dict(security_group)
security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []), tag_name_key_name='key', tag_value_key_name='value')
module.exit_json(changed=changed, **security_group)
else:
module.exit_json(changed=changed, group_id=None)
| [
"[email protected]"
] | |
caaf5af5646fa7468387887b3bcc943ccfb9293f | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/FrontCache/FPythonCode/FC_TCOLL_01_ATS_48.py | cc9794df7b6147718d9bfd202883a84d9f122953 | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,150 | py |
'''----------------------------------------------------------------------------------------------------------
MODULE : FC_TCOLL_01_ATS_48
PROJECT : FX onto Front Arena
PURPOSE : This module is the entry point for the Trade Collection ATSs. These ATSs will
subscribe to Trade Collection Requests. They will pull the relevant Front Cache
data from Front Cache Tradign Manager Template for the specific trades in the
incoming request. Once a Request and/or Batch is complete, a Response message
will be posted onto the AMB so that the Response can be send to subscribing
consumers to notify them that the data for the Request or Batch is avaiable
for consumption.
DEPARTMENT AND DESK : All Departments and all Desks.
REQUASTER : FX onto Front Arena Project
DEVELOPER : Heinrich Cronje
CR NUMBER : XXXXXX
-------------------------------------------------------------------------------------------------------------
'''
'''----------------------------------------------------------------------------------------------------------
Importing all relevant Python and custom modules needed for the ATS to start up. Initializing the FC_UTILS
module to load all Parameters, Logging, Error Handler.
----------------------------------------------------------------------------------------------------------'''
import FC_ERROR_HANDLER_DEFAULT as ERROR_HANDLER_DEFAULT
import traceback
try:
from FC_UTILS import FC_UTILS as UTILS
except ImportError, e:
ERROR_HANDLER_DEFAULT.handelError('Import Error in module %s.' %__name__, e, traceback)
raise ImportError('Import Error in module %s. ERROR: %s.' %(__name__, str(e)))
try:
UTILS.Initialize(__name__)
except Exception, e:
ERROR_HANDLER_DEFAULT.handelError('Initialization Error in module %s. FC_UTILS could not be initialized. '
'No Parameters, Logging or Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved.' %__name__, e, traceback)
raise Exception('Initialization Error in module %s. FC_UTILS could not be initialized. '
'No Parameters, Logging or Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved. ERROR: %s. ' %(__name__, str(e)))
try:
from FC_EXCEPTION import FC_EXCEPTION as EXCEPTION
except ImportError, e:
ERROR_HANDLER_DEFAULT.handelError('Import Error in module %s. FC_EXCEPTION could not be imported. '
'No Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved.' %__name__, e, traceback)
raise Exception('Import Error in module %s. FC_EXCEPTION could not be imported. '
'No Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved. ERROR: %s. ' %(__name__, str(e)))
try:
from datetime import datetime
except ImportError, e:
UTILS.ErrorHandler.processError(None, EXCEPTION('Import Error in module %s. Module datetime could not be imported. '
'The ATS will not start until the root issue is resolved.' %__name__, traceback, 'CRITICAL', e), __name__)
raise Exception('Import Error in module %s. Module datetime could not be imported. '
'The ATS will not start until the root issue is resolved. ERROR: %s' %(__name__, str(e)))
try:
from FC_TCOLL_ATS_WORKER import FC_TCOLL_ATS_WORKER as TCOLL_ATS_WORKER
except ImportError, e:
UTILS.ErrorHandler.processError(None, EXCEPTION('Could not import the worker module in module %s' %__name__, traceback, 'CRITICAL', None), __name__)
raise Exception('Could not import the worker module in module %s. ERROR: %s' %(__name__, str(e)))
'''----------------------------------------------------------------------------------------------------------
Global variables
-------------------------------------------------------------------------------------------------------------
'''
global worker
worker = None
'''----------------------------------------------------------------------------------------------------------
work function which the ATS will call once started.
-------------------------------------------------------------------------------------------------------------
'''
def work():
global worker
if not worker:
UTILS.ErrorHandler.processError(None, EXCEPTION(UTILS.Constants.fcExceptionConstants.WORKER_VARIABLE_S_IS_NOT_INSTANTIATED %__name__, traceback, UTILS.Constants.fcGenericConstants.CRITICAL, None), __name__)
else:
worker.work()
'''----------------------------------------------------------------------------------------------------------
start function which the ATS will call when the ATS is starting.
-------------------------------------------------------------------------------------------------------------
'''
def start():
UTILS.Logger.flogger.info(UTILS.Constants.fcFloggerConstants.STARTING_ATS_S_AT_S %(__name__, datetime.now()))
global worker
if not worker:
worker = TCOLL_ATS_WORKER()
worker.start()
'''----------------------------------------------------------------------------------------------------------
stop function which the ATS will call when the ATS is stopping.
-------------------------------------------------------------------------------------------------------------
'''
def stop():
global worker
if not worker:
UTILS.ErrorHandler.processError(None, EXCEPTION(UTILS.Constants.fcExceptionConstants.WORKER_VARIABLE_IN_S_IS_NOT_INSTANTIATED_STOP %__name__, traceback, UTILS.Constants.fcGenericConstants.MEDIUM, None), __name__)
else:
worker.stop()
#start()
#work()
#stop()
| [
"[email protected]"
] | |
2de6c9501b1b8560c72788d40905ffe4818ba046 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /5_map/构造O(1)复杂度数组.py | 4022012734ec37223659443e2deaa1ed6ec62b0f | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | # 设计一个特殊的数组,要求该数据结构以下三种操作的时间复杂度均为O(1)
# 1. 查询数组某个位置的元素
# 2. 将数组某个位置的元素修改为指定值
# 3. 将数组所有元素修改为指定值
from collections import defaultdict
class SpecialArray:
__slots__ = "_data"
def __init__(self) -> None:
self._data = defaultdict(int)
def get(self, index: int) -> int:
return self._data[index]
def set(self, index: int, value: int) -> None:
self._data[index] = value
def setAll(self, value: int) -> None:
self._data = defaultdict(lambda: value)
| [
"[email protected]"
] | |
d6f0dd5c587a5205dc3e3b19517b90443f991d4e | 97e349765284a1239580f4ae6943f597797fdc0d | /dingweitest/test1.py | 88b98797ba89775967126c4b52b4988562f63047 | [] | no_license | chenhanfang/test2 | 716aa9b1f875a6c88bfc6fb45ddc9879441c3c34 | 5d9d44086815bdf514636a1fc14bcd2c1f4284a5 | refs/heads/master | 2021-01-20T14:22:51.885745 | 2017-05-09T01:59:34 | 2017-05-09T01:59:34 | 90,597,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,244 | py | #coding=utf-8
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains#######鼠标事件的类
import time
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
driver=webdriver.Remote(desired_capabilities=DesiredCapabilities.CHROME)
driver.get('http://www.baidu.com/')
time.sleep(1)
driver.find_element_by_xpath('//a[@href="http://www.baidu.com/gaoji/preferences.html" and @class="pf"]').click()###设置
driver.find_element_by_xpath('//a[@class="setpref" and @href="javascript:;"]').click()###搜索设置
time.sleep(1)
m=driver.find_element_by_xpath("//select[@name='NR']")####下来框操作
m.find_element_by_xpath("//option[@value='20']").click()
time.sleep(1)
driver.find_element_by_xpath("//a[@class='prefpanelgo']").click()
time.sleep(1)
date=driver.switch_to.alert.text####返回alert/confirm/prompt中的文字信息
print(date)
driver.switch_to.alert.accept()####accept弹出的带有确定按钮的提示框,来接受确认提示框操作
'''dissmiss 点击取消按钮,如果存在取消按钮;send_keys 输入值,这个
alert\confirm没有对话框就不能用了,不然会报错'''
cookie=driver.get_cookies()#获取cookie
print(cookie)
driver.find_element_by_xpath("//input[@id='kw']").send_keys('selenium')
driver.find_element_by_xpath("//input[@id='su']").click()
time.sleep(2)
js="var q=document.documentElement.scrollTop=1000"###将页面滚动条拖到底部
driver.execute_script(js)
time.sleep(2)
# data=driver.find_element_by_xpath('//p[@id="cp"]').text####获取元素的文本信息
# print(data)
# driver.find_element_by_xpath('//a[@name="tj_mp3"]').click()
print(driver.title)####打印浏览器标题
# driver.set_window_size(480,800)
# driver.back()####后退
# time.sleep(2)
# driver.forward()#####前进
'''
qqq=driver.find_element_by_xpath("///")
ActionChains(driver).context_click(qqq).perform()####鼠标右击事件
ActionChains(driver).double_click(qqq).perform()####鼠标双击事件
ppp=driver.find_element_by_xpath("///")
ActionChains(driver).drag_and_drop(qqq,ppp).perform()####鼠标拖地事件,perform()执行所有存储的行为
switch_to_frame()#####框架(frame)或者窗口(window)的定位
switch_to_window()
'''
| [
"[email protected]"
] | |
6f66ef247d249d472ab1dc47b7de6f50ed703bd3 | bfaa8a34f0d954fd91d57e101556aab01099d0ea | /beary/__init__.py | 97da988c56cb1c2f294b7bd6ea5be11500110eda | [] | no_license | vtmer/bearychat | cf9167385c4451c45c9ea2caffd25298ec950855 | 8223151d74a0f859274e60032b1dfe8be3cd7db2 | refs/heads/master | 2016-09-05T16:09:35.369036 | 2015-06-29T08:05:42 | 2015-06-29T08:05:42 | 33,439,045 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | # coding: utf-8
'''
beary
~~~~~
vtmer 里面的熊孩子 ʅ(´◔౪◔)ʃ
'''
| [
"[email protected]"
] | |
914fa86716ed865bb5eabf6824fd0f4239243ca5 | 163c66e58f04268c884335ed66461d5ddf513280 | /hw2/quicksort.py | 9d115b440faadc402a3c34e630c76fdcad1375f1 | [] | no_license | pz325/Coursera_ADA | 4ca0d8273c0571b45364b951d52a5d06cbdc652c | b968dd6b60f73d1ebe34195ddfa7fc39df3726cd | refs/heads/master | 2016-09-05T22:16:13.865655 | 2014-11-18T21:27:54 | 2014-11-18T21:27:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,269 | py | def chooseFirstElementAsPivot(A, l, r):
return A[l]
def chooseLastElementAsPivot(A, l, r):
tmp = A[l]
A[l] = A[r]
A[r] = tmp
return A[l]
def chooseMedianOfThreeAsPivot(A, l, r):
if r - l == 1:
return chooseFirstElementAsPivot(A, l, r)
mid = (r - l) / 2 + l
# print(l, mid, r)
# print(A[l], A[mid], A[r])
if (A[mid]-A[l])*(A[mid]-A[r]) < 0:
tmp = A[l]
A[l] = A[mid]
A[mid] = tmp
if (A[r]-A[l])*(A[r]-A[mid]) < 0:
tmp = A[l]
A[l] = A[r]
A[r] = tmp
return A[l]
def quicksort(A, l, r, choosePivot):
# print('========')
# print('before sort', A)
compares = r - l
if r - l <= 0: return 0
pivot = choosePivot(A, l, r)
# print('pivot', pivot)
# print('choose pivot', A)
l1, r1, l2, r2 = partition(A, l, r, pivot)
# print(A[l1:r1+1], A[l2:r2+1])
# print('after partition', A)
compares += quicksort(A, l1, r1, choosePivot)
# print('sort 1st part', A)
compares += quicksort(A, l2, r2, choosePivot)
# print('sort 2nd part', A)
return compares
def partition(A, l, r, pivot):
i = l + 1
for j in range(l+1, r+1):
if A[j] < pivot:
tmp = A[j]
A[j] = A[i]
A[i] = tmp
i += 1
tmp = A[l]
A[l] = A[i-1]
A[i-1] = tmp
l1 = l
r1 = i-2
l2 = i
r2 = r
return l1, r1, l2, r2
def test():
A = [3, 8, 2, 5, 1, 4, 7, 6]
compares = quicksort(A, 0, 7, chooseFirstElementAsPivot)
print(compares)
solution('10.txt')
solution('100.txt')
solution('1000.txt')
def solution(source):
print(source)
A = [int(l.strip()) for l in open(source).readlines()]
compares = quicksort(A, 0, len(A)-1, chooseFirstElementAsPivot)
print('choose 1st element', compares)
A = [int(l.strip()) for l in open(source).readlines()]
compares = quicksort(A, 0, len(A)-1, chooseLastElementAsPivot)
print('choose last element', compares)
A = [int(l.strip()) for l in open(source).readlines()]
compares = quicksort(A, 0, len(A)-1, chooseMedianOfThreeAsPivot)
print('choose median of three', compares)
def main():
test()
solution('QuickSort.txt')
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
9b1aec656d50ff842d5761e6a750df7afab50cad | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/Bert-text-classification_for_PyTorch/transformers/tests/bartpho/test_tokenization_bartpho.py | 3e35ad15c1ee543473709c7f66f9c1e22cda20ae | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,656 | py | # coding=utf-8
# Copyright 2021 HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from os.path import dirname
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from ..test_tokenization_common import TokenizerTesterMixin
SAMPLE_VOCAB = os.path.join(dirname(dirname(os.path.abspath(__file__))), "fixtures/test_sentencepiece_bpe.model")
class BartphoTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BartphoTokenizer
test_rust_tokenizer = False
test_sentencepiece = True
def setUp(self):
super().setUp()
vocab = ["▁This", "▁is", "▁a", "▁t", "est"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
self.special_tokens_map = {"unk_token": "<unk>"}
self.monolingual_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
tokenizer = BartphoTokenizer(SAMPLE_VOCAB, self.monolingual_vocab_file, **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "This is a là test"
output_text = "This is a<unk><unk> test"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = BartphoTokenizer(SAMPLE_VOCAB, self.monolingual_vocab_file, **self.special_tokens_map)
text = "This is a là test"
bpe_tokens = "▁This ▁is ▁a ▁l à ▁t est".split()
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + [tokenizer.unk_token]
input_bpe_tokens = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
| [
"[email protected]"
] | |
75869a80c9112aec95c89f8cafe5d4bc41012d64 | 74b7b062b4a7b759845adc2121fa8530d55aa452 | /record_notebook_py/nicks_dataset.py | cde19e80bb298b7a9418d4931d6c7bfcd03b71f4 | [] | no_license | drschwenk/tableparse_notebooks | a817dca6bde0e4e41078f76bf756c087b49e8ea8 | 07f5daa760c5e01349fba5a360bd4aa4b6d0956c | refs/heads/master | 2021-01-22T06:18:56.168313 | 2017-06-22T22:32:22 | 2017-06-22T22:32:22 | 92,539,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,543 | py | # -*- coding: utf-8 -*-
# <nbformat>4</nbformat>
# <markdowncell>
# # Table of Contents
# * [Alternate tables](#Alternate-tables)
# * [pairing images and annotations](#pairing-images-and-annotations)
# * [sampling images](#sampling-images)
# * [Regents tables](#Regents-tables)
# * [Hough lines experiment](#Hough-lines-experiment)
# * [End](#End)
# <codecell>
%%capture
from __future__ import division
import numpy as np
import pandas as pd
import scipy.stats as st
import itertools
import math
from collections import Counter, defaultdict
%load_ext autoreload
%autoreload 2
import os
import shutil
import cv2
import PIL.Image as Image
# <markdowncell>
# # Alternate tables
# <codecell>
image_path_prefix = '../data/small_table_training/'
anno_path_prefix = '../data/exp_nicks_data/table-research/ground_truth/alternate/'
image_files = os.listdir(image_path_prefix)
anno_files = os.listdir(anno_path_prefix)
# <markdowncell>
# ## pairing images and annotations
# <codecell>
image_bases = ['.'.join(f.split('.')[:-1]) for f in image_files]
anno_bases = ['.'.join(f.split('.')[:-2]) for f in anno_files]
# <codecell>
images_with_anno = [f for f in image_files if '.'.join(f.split('.')[:-1]) in anno_bases]
# <codecell>
bases_intersection = set(image_bases).intersection(set(anno_bases))
# <codecell>
print(len(bases_intersection), len(anno_bases), len(images_with_anno))
# <markdowncell>
# images missing annotations
# <codecell>
len(image_bases)
# <codecell>
set(image_bases[:100]).difference(set(anno_bases))
# <markdowncell>
# ## sampling images
# <codecell>
sample_n = 30
# <codecell>
sample_image = images_with_anno[sample_n]
# <codecell>
Image.open(image_path_prefix + sample_image)
# <codecell>
with open(anno_path_prefix + anno_files[sample_n]) as f:
sample_anno = f.readlines()
split_lines = [l.split(',', maxsplit=4) for l in sample_anno]
# <markdowncell>
# # Regents tables
# <codecell>
regents_image_path_prefix = '../data/exp_nicks_data/regents_images/'
regents_anno_path_prefix = '../data/exp_nicks_data/regents_anno/'
# <codecell>
regents_anno = os.listdir(regents_anno_path_prefix)
# <codecell>
regents_anno_8th = {an: ".PNG" for an in regents_anno if '_8_' in an}
regents_anno_4th = {an: ".PNG" for an in regents_anno if '_4_' in an}
regents_anno_other = {an: an.replace('.jpg.txt', '.png') for an in regents_anno if an not in regents_anno_4th and an not in regents_anno_8th}
# <codecell>
# assert(set(regents_anno_other + regents_anno_8th + regents_anno_4th) == set(regents_anno))
# <codecell>
regents_images_4 = os.listdir(regents_image_path_prefix + '/4th')
regents_images_8 = os.listdir(regents_image_path_prefix + '/8th')
# regents_images_8 = [ri for ri in regents_anno_other if '2011' in ri]
# <codecell>
name_mapping = {
'2007_4_15.jpg.txt': '2007_4th_Grade_09.PNG',
'2009_4_31b.jpg.txt': '2009_4th_Grade_11.PNG',
'2009_4_40.jpg.txt': '2009_4th_Grade_18.PNG',
'2011_4_32.jpg.txt': '2011_4th_Grade_16.PNG',
'2004_8_55_2.jpg.txt': '2004_8th_Grade_53.PNG',
'2004_8_64-65.jpg.txt': '2004_8th_Grade_55.PNG',
'2005_8_38.jpg.txt': '2005_8th_Grade_26.PNG',
'2005_8_46-48.jpg.txt': '2005_8th_Grade_29.PNG',
'2005_8_79.jpg.txt': '2005_8th_Grade_44.PNG',
'2007_8_49-50.jpg.txt': '2007_8th_Grade_20.PNG',
'2007_8_60.jpg.txt': '2007_8th_Grade_27.PNG',
'2009_8_33.jpg.txt': '2009_8th_Grade_16.PNG',
'2009_8_79-81.jpg.txt': '2009_8th_Grade_41.PNG',
'2009_8_82-83b.jpg.txt': '2009_8th_Grade_43.PNG',
'2011_8_56.jpg.txt': '2011_8th_Grade_33.PNG',
'2011_8_79-80.jpg.txt': '2011_8th_Grade_46.PNG',
'2007-01-24_12_54-56.jpg.txt': '2007-01-24_12_54-56.png',
'2007-01-24_12_77-79.jpg.txt': '2007-01-24_12_77-79.png',
'2007-08-16_12_16_3.jpg.txt': '2007-08-16_12_16_3.png',
'2007-08-16_12_20.jpg.txt': '2007-08-16_12_20.png',
'2007-08-16_12_75-77.jpg.txt': '2007-08-16_12_75-77.png',
'2009-01-28_12_13_1.jpg.txt': '2009-01-28_12_13_1.png',
'2009-01-28_12_13_4.jpg.txt': '2009-01-28_12_13_4.png',
'2009-01-28_12_71-74.jpg.txt': '2009-01-28_12_71-74.png',
'2009-06-17_12_13.jpg.txt': '2009-06-17_12_13.png',
'2009-06-17_12_33_2.jpg.txt': '2009-06-17_12_33_2.png',
'2009-06-17_12_34.jpg.txt': '2009-06-17_12_34.png',
'2009-06-17_12_54-57.jpg.txt': '2009-06-17_12_54-57.png',
'2009-08-13_12_35_1.jpg.txt': '2009-08-13_12_35_1.png',
'2009-08-13_12_35_4.jpg.txt': '2009-08-13_12_35_4.png',
'2009-08-13_12_45-47.jpg.txt': '2009-08-13_12_45-47.png',
'2011-06-17_12_36-40.jpg.txt': '2011-06-17_12_36-40.png',
'2011-06-17_12_47-50.jpg.txt': '2011-06-17_12_47-50.png'
}
# <codecell>
# with open('image_anno_mapping.json', 'w') as f:
# json.dump(name_mapping, f)
# <markdowncell>
# # Build new dataset
# <codecell>
new_data_dir = '/Users/schwenk/wrk/tableparse/data/test_data/'
regents_path_prefix = '/Users/schwenk/wrk/tableparse/data/exp_nicks_data/regents_images/all_images/'
# <codecell>
def read_image_anno(img_f, anno_f=None):
if not anno_f:
ann_ext = '.jpg.txt'
anno_f = anno_path_prefix + os.path.splitext(fb)[0] + ann_ext
with open(anno_f) as f:
sample_anno = f.readlines()
split_lines = [l.split(',', maxsplit=4) for l in sample_anno]
build_image_anno = [{'text': line[-1].strip() , 'rectangle': list(map(int, line[:4]))} for line in split_lines]
image_number = str(img_counter).zfill(3)
new_img_name = 'table_' + image_number + '.png'
image_anno = {
'annotations': build_image_anno,
'imageName': new_img_name,
'tableID': 'T_' + image_number,
'legacyName': os.path.split(fb)[1],
}
return {image_anno['tableID']: image_anno}, new_img_name
# <codecell>
import ai2.vision.utils as ai2vu
# <codecell>
img_counter = 0
image_annotations = {}
# <markdowncell>
# building image annotations and standardizing images
# <codecell>
for fb in images_with_anno:
img_counter += 1
img_f = image_path_prefix + fb
img_anno, new_img_name = read_image_anno(img_f)
image_annotations.update(img_anno)
new_img = new_data_dir + 'images/' + new_img_name
# standardized_img, _ = ai2vu.standardize_images.standardize_image(img_f)
# cv2.imwrite(new_img, standardized_img)
for anno_file, img_file in name_mapping.items():
img_counter += 1
img_f = regents_path_prefix + img_file
anno_file = os.path.join(regents_anno_path_prefix, anno_file)
img_anno, new_img_name = read_image_anno(img_f, anno_file)
image_annotations.update(img_anno)
new_img = new_data_dir + 'images/' + new_img_name
print(img_f, new_img_name)
standardized_img, _ = ai2vu.standardize_images.standardize_image(img_f)
cv2.imwrite(new_img, standardized_img)
# <markdowncell>
# ### image_annotations['T_101']
# <codecell>
def random_color():
import random
return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)
def draw_detections(gt_anno):
image = cv2.imread(new_data_dir + 'images/' + gt_anno['imageName'])
color_counter = 0
for cell in gt_anno['annotations']:
cell = cell['rectangle']
start_x = cell[0]
start_y = cell[1]
end_x = cell[0] + cell[2]
end_y = cell[1] + cell[3]
cv2.rectangle(image, (start_x, start_y), (end_x, end_y), color=random_color(), thickness=2)
color_counter += 1
return Image.fromarray(image)
# <codecell>
# with open(new_data_dir + 'table_ground_truth.json', 'w') as f:
# json.dump(image_annotations, f, sort_keys=True, indent=4)
# <codecell>
test_anno = image_annotations['T_100']
draw_detections(test_anno)
# <codecell>
test_anno
# <codecell>
# <markdowncell>
# ## looking at resized images
# <codecell>
def get_max_dim(img_anno):
boxes = [(box['rectangle'][0] + box['rectangle'][2], box['rectangle'][1] + box['rectangle'][3]) for box in list(img_anno.values())[0]['annotations']]
xs, ys = list(zip(*boxes))
max_x = max(xs)
max_y = max(ys)
return max_x, max_y
# <codecell>
img_counter = 0
# <codecell>
for fb in images_with_anno:
img_counter += 1
img_f = image_path_prefix + fb
img_anno, new_img_name = read_image_anno(img_f)
max_x, max_y = get_max_dim(img_anno)
image_shape = Image.open(img_f).size
new_img = new_data_dir + 'images/' + new_img_name
standardized_img, _ = ai2vu.standardize_images.standardize_image(img_f)
resized_shape = standardized_img.shape[:2][::-1]
if image_shape != resized_shape:
print(img_counter, image_shape, resized_shape)
# <codecell>
for anno_file, img_file in name_mapping.items():
img_counter += 1
img_f = regents_path_prefix + img_file
img_anno, new_img_name = read_image_anno(img_f)
max_x, max_y = get_max_dim(img_anno)
image_shape = Image.open(img_f).size
new_img = new_data_dir + 'images/' + new_img_name
standardized_img, _ = ai2vu.standardize_images.standardize_image(img_f)
resized_shape = standardized_img.shape[:2][::-1]
if image_shape != resized_shape:
print(img_counter, image_shape, resized_shape)
# <codecell>
# <codecell>
# <markdowncell>
# # Hough lines experiment
# <codecell>
easy_image = '/Users/schwenk/wrk/tableparse/vision-tableparse/examples/example_1.png'
# img = cv2.imread(image_path_prefix + sample_image)
img = cv2.imread(easy_image)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 100, 200, apertureSize=3, L2gradient=1)
minLineLength = 30
maxLineGap = 10
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 120, minLineLength=10, maxLineGap=2)
for x in range(0, len(lines)):
for x1,y1,x2,y2 in lines[x]:
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
# <codecell>
Image.fromarray(edges)
# <codecell>
# You need to choose 4 or 8 for connectivity type
connectivity = 4
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Perform the operation
output = cv2.connectedComponentsWithStats(thresh, connectivity, cv2.CV_32S)
# <codecell>
output[]
# <codecell>
lines.shape
# <codecell>
Image.fromarray(img)
# <codecell>
import cv2
import numpy as np
import os.path
from collections import defaultdict
def ik(x, y):
return '.'.join([str(x), str(y)])
def boxes_from_intersections(image_bw, h_intersections, v_intersections, all_intersections):
boxes = []
for x_i, y_i in all_intersections:
i_key = ik(x_i, y_i)
nearest_y = 99999999
nearest_x = 99999999
found_point = False
for x_j, y_j in all_intersections:
j_key = ik(x_j, y_j)
if x_j > x_i and y_j > y_i and (h_intersections[i_key] & v_intersections[j_key]) and \
(v_intersections[i_key] & h_intersections[j_key]) and x_j <= nearest_x and y_j <= nearest_y:
nearest_x = x_j
nearest_y = y_j
found_point = True
if found_point:
# x, y, width, height, text
height = nearest_y - y_i
width = nearest_x - x_i
avg_color = (np.average(image_bw[y_i:nearest_y, x_i:nearest_x]))
if (width <= 15 or height <= 15) and avg_color == 0.0:
continue
boxes.append((x_i, y_i, width, height, []))
return boxes
def get_intersections(img, horiz_lines, vert_lines):
h_intersections = defaultdict(set)
v_intersections = defaultdict(set)
all_intersections = set()
for h_x1, h_y1, h_x2, h_y2 in horiz_lines:
intersect_set = set()
for v_x1, v_y1, v_x2, v_y2 in vert_lines:
if v_x1 >= h_x1 and v_x1 <= h_x2 and v_y1 <= h_y1 and v_y2 >= h_y1:
i_key = ik(v_x1, h_y1)
intersect_set.add(i_key)
if len(intersect_set) > 2:
for s in intersect_set:
all_intersections.add(tuple(map(int, s.split('.'))))
h_intersections[s] = intersect_set
for v_x1, v_y1, v_x2, v_y2 in vert_lines:
intersect_set = set()
for h_x1, h_y1, h_x2, h_y2 in horiz_lines:
if v_x1 >= h_x1 and v_x1 <= h_x2 and v_y1 <= h_y1 and v_y2 >= h_y1:
i_key = ik(v_x1, h_y1)
intersect_set.add(i_key)
if len(intersect_set) > 2:
for s in intersect_set:
all_intersections.add(tuple(map(int, s.split('.'))))
v_intersections[s] = intersect_set
return h_intersections, v_intersections, list(all_intersections)
def supress_lines(lines):
new_lines = []
for i, line_a in enumerate(lines):
suppressed = False
for j, line_b in enumerate(lines):
if i >= j:
continue
if line_a[0] == line_a[2]:
min_x = min([line_a[1], line_b[1]])
max_x = max([line_a[3], line_b[3]])
intersection = min([line_a[3], line_b[3]]) - max([line_a[1], line_b[1]])
delta = abs(line_a[0] - line_b[0])
else:
min_x = min([line_a[0], line_b[0]])
max_x = max([line_a[2], line_b[2]])
intersection = min([line_a[2], line_b[2]]) - max([line_a[0], line_b[0]])
delta = abs(line_a[1] - line_b[1])
if intersection > 0 and (intersection/float(max_x - min_x)) > 0.5 and delta < 8:
suppressed = True
break
if not suppressed:
new_lines.append(line_a)
return new_lines
# <codecell>
def get_boxes(image_name, base_path):
horiz_lines = []
vert_lines = []
img = cv2.imread(os.path.join(base_path, image_name))
#img = cv2.resize(img,(2*img.shape[1], 2*img.shape[0]), interpolation = cv2.INTER_CUBIC)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(thresh, im_bw) = cv2.threshold(gray, 128, 255, cv2.THRESH_OTSU)
edges = cv2.Canny(gray, 50, 250, apertureSize=3)
# edges = cv2.Canny(gray, 100, 200, apertureSize=3, L2gradient=1)
# return Image.fromarray(edges)
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 200, minLineLength=20, maxLineGap=3)
# lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 120, minLineLength=100, maxLineGap=2)
if lines is None:
lines = []
for info in lines:
x1, y1, x2, y2 = info[0]
if y2 < y1:
y1 = info[0][3]
y2 = info[0][1]
# horizontal line
offsets = [-1, 0, 1]
if y1 - y2 == 0:
avg_above = avg_below = 256
avg_center = np.average(gray[y1:y2 + 1, x1:x2 + 1])
if y1 > 0:
avg_above = np.average(gray[y1 - 1:y2, x1:x2 + 1])
if y2 + 1 < gray.shape[0]:
avg_below = np.average(gray[y1 + 1:y2 + 2, x1:x2 + 1])
# assuming black lines, could do something to check for background color
# this occurs from edges detected in gray areas that aren't cell boundaries
if np.min([avg_above, avg_center, avg_below]) > 192:
continue
y1 += offsets[np.argmin([avg_above, avg_center, avg_below])]
y2 = y1
while x2 + 1 < im_bw.shape[1] and abs(im_bw[y1:y2 + 1, x2 + 1:x2 + 2][0,0] - np.average(im_bw[y1:y2 + 1, x1:x2 + 1])) < 16:
x2 += 1
while x1 > 0 and abs(im_bw[y1:y2 + 1, x1 - 1:x1][0,0] - np.average(im_bw[y1:y2 + 1, x1:x2 + 1])) < 16:
x1 -= 1
horiz_lines.append((x1, y1, x2, y2))
elif x1 - x2 == 0:
avg_right = avg_left = 256
avg_center = np.average(gray[y1:y2 + 1, x1:x2 + 1])
if x1 > 0:
avg_left = np.average(gray[y1:y2 + 1, x1 - 1:x2])
if x2 + 1 < gray.shape[1]:
avg_right = np.average(gray[y1:y2 + 1, x1 + 1: x2 + 2])
x1 += offsets[np.argmin([avg_left, avg_center, avg_right])]
x2 = x1
while y2 + 1 < im_bw.shape[0] and abs(im_bw[y2 + 1:y2 + 2, x1:x2 + 1][0,0] - np.average(im_bw[y1:y2 + 1, x1:x2 + 1])) < 16:
y2 += 1
while y1 > 0 and abs(im_bw[y1 - 1:y1, x1:x2 + 1][0,0] - np.average(im_bw[y1:y2 + 1, x1:x2 + 1])) < 16:
y1 -= 1
vert_lines.append((x1, y1, x2, y2))
horiz_lines = supress_lines(horiz_lines)
vert_lines = supress_lines(vert_lines)
sorted_h_lines = sorted(horiz_lines, key=lambda l: l[1])
sorted_v_lines = sorted(vert_lines, key=lambda l: l[0])
h_intersections, v_intersections, all_intersections = get_intersections(img, sorted_h_lines, sorted_v_lines)
return boxes_from_intersections(im_bw, h_intersections, v_intersections, all_intersections)
# <codecell>
def random_color():
import random
return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)
def draw_detections(img_path, found_cells):
colors = [
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(255, 255, 0),
(0, 255, 255),
(255, 0, 255),
(128, 0, 0),
(0, 128, 0),
(0, 0, 128),
(128, 128, 0),
(0, 128, 128),
(128, 0, 128),
(255, 128, 0),
(0, 128, 255),
(128, 255, 0),
(0, 255, 128),
(255, 0, 128),
(128, 0, 255)]
image = cv2.imread(img_path)
color_counter = 0
for cell in found_cells:
start_x = cell[0]
start_y = cell[1]
end_x = cell[0] + cell[2]
end_y = cell[1] + cell[3]
cv2.rectangle(image, (start_x, start_y), (end_x, end_y), color=random_color(), thickness=2)
color_counter += 1
return Image.fromarray(image)
# <codecell>
old_boxes = get_boxes(sample_image, image_path_prefix)
# <codecell>
new_boxes = get_boxes(sample_image, image_path_prefix)
# <codecell>
len(new_boxes)
# <codecell>
import random
# <codecell>
draw_detections(image_path_prefix + sample_image, random.sample(new_boxes, 10))
# <codecell>
# <markdowncell>
# # End
# <codecell>
# img_n = 0
# anno_n = 0
# # img_n +=1
# # print(regents_images_8[img_n])
# # Image.open(regents_image_path_prefix + '/8th/' + regents_images_8[img_n])
# # anno_n += 1
# # with open(regents_anno_path_prefix + list(regents_anno_other.keys())[anno_n]) as f:
# # print(list(regents_anno_other.keys())[anno_n])
# # print()
# # print(f.read())
# # anno_n += 1
# # with open(regents_anno_path_prefix + list(regents_anno_other.keys())[anno_n]) as f:
# # print(list(regents_anno_other.keys())[anno_n])
# # print()
# # print(f.read())
# <codecell>
| [
"[email protected]"
] | |
d6667c371f5635050e24804b3548edbb78015a8e | d2e69d4d3d1e11a87f5a377e4a423422fe0a7058 | /FullStack/12/celery_stuff/periodic_task.py | 5d5dcb1b71af4eff0549304787a59963e751cecf | [] | no_license | oJacker/_python | 6f30dd4a60c1593d27c00ac485163fc0ba77dd8c | 8086d0cd78e156abfff9819a56384149dd431c56 | refs/heads/master | 2021-05-06T03:13:29.167281 | 2018-02-01T09:41:42 | 2018-02-01T09:41:42 | 114,827,084 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | from celery import Celery
from celery.schedules import crontab
app = Celery()
@app.on_after_configure.connect
def setup_periodic_tasks(sender,**kwargs):
# Calls test('hello) every 10 seconds
sender.add_periodic_task(10.0, test.s('hello'),name='add every 10')
# Calls test('world') every 30 seconds
sender.add_periodic_task(30.0.test.s('world'),expires=10)
# Executes every Monday moring at 7:30 a.m
sender.add_periodic_task(
crontab(hour=7,minute=30,day_of_week=1),
test.s('Happy Mondays!'),
)
# app.conf.beat_schedule = {
# 'add-every-30-seconds':{
# 'task': 'tasks.add',
# 'schedule': 30.0,
# 'args': (16, 16)
# },
# }
# app.conf.timezone = 'UTC'
@app.task
def test(arg):
print(arg) | [
"[email protected]"
] | |
fd48de6ef94c04e1dc45c14888a710d6d932a6a8 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/DATASMART-MIB.py | 9e7acdf50065bc5ffabe955bbdafb82db30439ad | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 105,595 | py | #
# PySNMP MIB module DATASMART-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DATASMART-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:21:38 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
TimeTicks, Unsigned32, MibIdentifier, enterprises, NotificationType, Bits, ObjectIdentity, Counter64, Gauge32, NotificationType, IpAddress, Counter32, iso, Integer32, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Unsigned32", "MibIdentifier", "enterprises", "NotificationType", "Bits", "ObjectIdentity", "Counter64", "Gauge32", "NotificationType", "IpAddress", "Counter32", "iso", "Integer32", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class DLCI(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 1023)
class Counter32(Counter32):
pass
class DisplayString(OctetString):
pass
datasmart = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2))
dsSs = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 1))
dsRp = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 2))
dsLm = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 3))
dsRm = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 4))
dsAc = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 5))
dsCc = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 6))
dsDc = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 7))
dsFc = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 8))
dsFmc = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 9))
dsMc = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 10))
dsNc = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 11))
dsSc = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 12))
dsTc = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 13))
dsFp = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 14))
dsSsAlarmSource = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("ssSourceNone", 1), ("ssSourceNi", 2), ("ssSourceTi", 3), ("ssSourceDp1", 4), ("ssSourceDp2", 5), ("ssSourceSystem", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsSsAlarmSource.setStatus('mandatory')
dsSsAlarmState = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13))).clone(namedValues=NamedValues(("ssStateNone", 1), ("ssStateEcf", 2), ("ssStateLos", 3), ("ssStateAis", 4), ("ssStateOof", 5), ("ssStateBer", 6), ("ssStateYel", 7), ("ssStateRfa", 8), ("ssStateRma", 9), ("ssStateOmf", 10), ("ssStateEer", 11), ("ssStateDds", 12), ("ssStateOos", 13)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsSsAlarmState.setStatus('mandatory')
dsSsLoopback = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16))).clone(namedValues=NamedValues(("ssLbkNone", 1), ("ssLbkRemLlb", 2), ("ssLbkRemPlb", 3), ("ssLbkRemDp1", 4), ("ssLbkRemDp2", 5), ("ssLbkLlb", 6), ("ssLbkLoc", 7), ("ssLbkPlb", 8), ("ssLbkTlb", 9), ("ssLbkDp1", 10), ("ssLbkDp2", 11), ("ssLbkDt1", 12), ("ssLbkDt2", 13), ("ssLbkCsu", 14), ("ssLbkDsu", 15), ("ssLbkDpdt", 16)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsSsLoopback.setStatus('mandatory')
dsSsPowerStatus = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("ssBothOff", 1), ("ssAOnBOff", 2), ("ssAOffBOn", 3), ("ssBothOn", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsSsPowerStatus.setStatus('mandatory')
dsRpUsr = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1))
dsRpCar = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2))
dsRpStat = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3))
dsRpPl = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 4))
dsRpFr = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10))
dsRpUsrTmCntTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 1), )
if mibBuilder.loadTexts: dsRpUsrTmCntTable.setStatus('mandatory')
dsRpUsrTmCntEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 1, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpUsrTmCntIndex"))
if mibBuilder.loadTexts: dsRpUsrTmCntEntry.setStatus('mandatory')
dsRpUsrTmCntIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 3))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrTmCntIndex.setStatus('mandatory')
dsRpUsrTmCntSecs = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 899))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrTmCntSecs.setStatus('mandatory')
dsRpUsrTmCnt15Mins = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 96))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrTmCnt15Mins.setStatus('mandatory')
dsRpUsrTmCntDays = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrTmCntDays.setStatus('mandatory')
dsRpUsrCurTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 2), )
if mibBuilder.loadTexts: dsRpUsrCurTable.setStatus('mandatory')
dsRpUsrCurEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 2, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpUsrCurIndex"))
if mibBuilder.loadTexts: dsRpUsrCurEntry.setStatus('mandatory')
dsRpUsrCurIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 3))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrCurIndex.setStatus('mandatory')
dsRpUsrCurEE = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 2, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrCurEE.setStatus('mandatory')
dsRpUsrCurES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 2, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrCurES.setStatus('mandatory')
dsRpUsrCurBES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 2, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrCurBES.setStatus('mandatory')
dsRpUsrCurSES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrCurSES.setStatus('mandatory')
dsRpUsrCurUAS = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 2, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrCurUAS.setStatus('mandatory')
dsRpUsrCurCSS = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 2, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrCurCSS.setStatus('mandatory')
dsRpUsrCurDM = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 2, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrCurDM.setStatus('mandatory')
dsRpUsrCurStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 2, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrCurStatus.setStatus('mandatory')
dsRpUsrIntvlTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 3), )
if mibBuilder.loadTexts: dsRpUsrIntvlTable.setStatus('mandatory')
dsRpUsrIntvlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 3, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpUsrIntvlIndex"), (0, "DATASMART-MIB", "dsRpUsrIntvlNum"))
if mibBuilder.loadTexts: dsRpUsrIntvlEntry.setStatus('mandatory')
dsRpUsrIntvlIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrIntvlIndex.setStatus('mandatory')
dsRpUsrIntvlNum = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 96))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrIntvlNum.setStatus('mandatory')
dsRpUsrIntvlEE = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 3, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrIntvlEE.setStatus('mandatory')
dsRpUsrIntvlES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 3, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrIntvlES.setStatus('mandatory')
dsRpUsrIntvlBES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 3, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrIntvlBES.setStatus('mandatory')
dsRpUsrIntvlSES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 3, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrIntvlSES.setStatus('mandatory')
dsRpUsrIntvlUAS = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 3, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrIntvlUAS.setStatus('mandatory')
dsRpUsrIntvlCSS = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 3, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrIntvlCSS.setStatus('mandatory')
dsRpUsrIntvlDM = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 3, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrIntvlDM.setStatus('mandatory')
dsRpUsrIntvlStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 3, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrIntvlStatus.setStatus('mandatory')
dsRpUsrTotalTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 4), )
if mibBuilder.loadTexts: dsRpUsrTotalTable.setStatus('mandatory')
dsRpUsrTotalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 4, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpUsrTotalIndex"))
if mibBuilder.loadTexts: dsRpUsrTotalEntry.setStatus('mandatory')
dsRpUsrTotalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 4, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrTotalIndex.setStatus('mandatory')
dsRpUsrTotalEE = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 4, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrTotalEE.setStatus('mandatory')
dsRpUsrTotalES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 4, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrTotalES.setStatus('mandatory')
dsRpUsrTotalBES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 4, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrTotalBES.setStatus('mandatory')
dsRpUsrTotalSES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 4, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrTotalSES.setStatus('mandatory')
dsRpUsrTotalUAS = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 4, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrTotalUAS.setStatus('mandatory')
dsRpUsrTotalCSS = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 4, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrTotalCSS.setStatus('mandatory')
dsRpUsrTotalDM = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 4, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrTotalDM.setStatus('mandatory')
dsRpUsrTotalStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 4, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrTotalStatus.setStatus('mandatory')
dsRpUsrDayTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 5), )
if mibBuilder.loadTexts: dsRpUsrDayTable.setStatus('mandatory')
dsRpUsrDayEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 5, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpUsrDayIndex"), (0, "DATASMART-MIB", "dsRpUsrDayNum"))
if mibBuilder.loadTexts: dsRpUsrDayEntry.setStatus('mandatory')
dsRpUsrDayIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 5, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrDayIndex.setStatus('mandatory')
dsRpUsrDayNum = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrDayNum.setStatus('mandatory')
dsRpUsrDayEE = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 5, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrDayEE.setStatus('mandatory')
dsRpUsrDayES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 5, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrDayES.setStatus('mandatory')
dsRpUsrDayBES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 5, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrDayBES.setStatus('mandatory')
dsRpUsrDaySES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 5, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrDaySES.setStatus('mandatory')
dsRpUsrDayUAS = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 5, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrDayUAS.setStatus('mandatory')
dsRpUsrDayCSS = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 5, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrDayCSS.setStatus('mandatory')
dsRpUsrDayDM = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 5, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrDayDM.setStatus('mandatory')
dsRpUsrDayStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 1, 5, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpUsrDayStatus.setStatus('mandatory')
dsRpCarCntSecs = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 899))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarCntSecs.setStatus('mandatory')
dsRpCarCnt15Mins = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 96))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarCnt15Mins.setStatus('mandatory')
dsRpCarCur = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 3))
dsRpCarCurEE = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 3, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarCurEE.setStatus('mandatory')
dsRpCarCurES = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 3, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarCurES.setStatus('mandatory')
dsRpCarCurBES = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 3, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarCurBES.setStatus('mandatory')
dsRpCarCurSES = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 3, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarCurSES.setStatus('mandatory')
dsRpCarCurUAS = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 3, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarCurUAS.setStatus('mandatory')
dsRpCarCurCSS = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 3, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarCurCSS.setStatus('mandatory')
dsRpCarCurLOFC = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 3, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarCurLOFC.setStatus('mandatory')
dsRpCarIntvlTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 4), )
if mibBuilder.loadTexts: dsRpCarIntvlTable.setStatus('mandatory')
dsRpCarIntvlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 4, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpCarIntvlNum"))
if mibBuilder.loadTexts: dsRpCarIntvlEntry.setStatus('mandatory')
dsRpCarIntvlNum = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 96))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarIntvlNum.setStatus('mandatory')
dsRpCarIntvlEE = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 4, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarIntvlEE.setStatus('mandatory')
dsRpCarIntvlES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 4, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarIntvlES.setStatus('mandatory')
dsRpCarIntvlBES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 4, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarIntvlBES.setStatus('mandatory')
dsRpCarIntvlSES = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 4, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarIntvlSES.setStatus('mandatory')
dsRpCarIntvlUAS = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 4, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarIntvlUAS.setStatus('mandatory')
dsRpCarIntvlCSS = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 4, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarIntvlCSS.setStatus('mandatory')
dsRpCarIntvlLOFC = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 4, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarIntvlLOFC.setStatus('mandatory')
dsRpCarTotal = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 5))
dsRpCarTotalEE = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 5, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarTotalEE.setStatus('mandatory')
dsRpCarTotalES = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 5, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarTotalES.setStatus('mandatory')
dsRpCarTotalBES = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 5, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarTotalBES.setStatus('mandatory')
dsRpCarTotalSES = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 5, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarTotalSES.setStatus('mandatory')
dsRpCarTotalUAS = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 5, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarTotalUAS.setStatus('mandatory')
dsRpCarTotalCSS = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 5, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarTotalCSS.setStatus('mandatory')
dsRpCarTotalLOFC = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 2, 5, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpCarTotalLOFC.setStatus('mandatory')
dsRpStTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1), )
if mibBuilder.loadTexts: dsRpStTable.setStatus('mandatory')
dsRpStEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpStIndex"))
if mibBuilder.loadTexts: dsRpStEntry.setStatus('mandatory')
dsRpStIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 3))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStIndex.setStatus('mandatory')
dsRpStEsfErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStEsfErrors.setStatus('mandatory')
dsRpStCrcErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStCrcErrors.setStatus('mandatory')
dsRpStOofErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStOofErrors.setStatus('mandatory')
dsRpStFrameBitErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStFrameBitErrors.setStatus('mandatory')
dsRpStBPVs = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStBPVs.setStatus('mandatory')
dsRpStControlledSlips = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStControlledSlips.setStatus('mandatory')
dsRpStYellowEvents = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStYellowEvents.setStatus('mandatory')
dsRpStAISEvents = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStAISEvents.setStatus('mandatory')
dsRpStLOFEvents = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStLOFEvents.setStatus('mandatory')
dsRpStLOSEvents = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStLOSEvents.setStatus('mandatory')
dsRpStFarEndBlkErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStFarEndBlkErrors.setStatus('mandatory')
dsRpStRemFrameAlmEvts = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStRemFrameAlmEvts.setStatus('mandatory')
dsRpStRemMFrameAlmEvts = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStRemMFrameAlmEvts.setStatus('mandatory')
dsRpStLOTS16MFrameEvts = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpStLOTS16MFrameEvts.setStatus('mandatory')
dsRpStZeroCounters = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 3, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("rpStZeroCountersIdle", 1), ("rpStZeroCountersStart", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsRpStZeroCounters.setStatus('mandatory')
dsPlBreak = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 4, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("rpPlLineFeed", 1), ("rpPlMorePrompt", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsPlBreak.setStatus('mandatory')
dsPlLen = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 4, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 70))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsPlLen.setStatus('mandatory')
dsRpAhrTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 5), )
if mibBuilder.loadTexts: dsRpAhrTable.setStatus('mandatory')
dsRpAhrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 5, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpAhrIndex"))
if mibBuilder.loadTexts: dsRpAhrEntry.setStatus('mandatory')
dsRpAhrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpAhrIndex.setStatus('mandatory')
dsRpAhrStr = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 5, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpAhrStr.setStatus('mandatory')
dsRpShrTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 6), )
if mibBuilder.loadTexts: dsRpShrTable.setStatus('mandatory')
dsRpShrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 6, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpShrIndex"))
if mibBuilder.loadTexts: dsRpShrEntry.setStatus('mandatory')
dsRpShrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 6, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpShrIndex.setStatus('mandatory')
dsRpShrDateTime = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 6, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpShrDateTime.setStatus('mandatory')
dsRpShrEventType = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 6, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("rpShrTelnetPassword", 1), ("rpShrSrcIpAddressScreen", 2), ("rpShrReadCommString", 3), ("rpShrWriteCommString", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpShrEventType.setStatus('mandatory')
dsRpShrComments = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 6, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpShrComments.setStatus('mandatory')
dsRpBes = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 63999))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsRpBes.setStatus('mandatory')
dsRpSes = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(3, 64000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsRpSes.setStatus('mandatory')
dsRpDm = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsRpDm.setStatus('mandatory')
dsRpFrTmCntTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 1), )
if mibBuilder.loadTexts: dsRpFrTmCntTable.setStatus('mandatory')
dsRpFrTmCntEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 1, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpFrTmCntDir"))
if mibBuilder.loadTexts: dsRpFrTmCntEntry.setStatus('mandatory')
dsRpFrTmCntDir = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTmCntDir.setStatus('mandatory')
dsRpFrTmCntSecs = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7200))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTmCntSecs.setStatus('mandatory')
dsRpFrTmCnt2Hrs = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTmCnt2Hrs.setStatus('mandatory')
dsRpFrTmCntDays = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTmCntDays.setStatus('mandatory')
dsRpFrPre15MTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 2), )
if mibBuilder.loadTexts: dsRpFrPre15MTable.setStatus('mandatory')
dsRpFrPre15MEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 2, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpFrPre15MDir"), (0, "DATASMART-MIB", "dsRpFrPre15MVcIndex"))
if mibBuilder.loadTexts: dsRpFrPre15MEntry.setStatus('mandatory')
dsRpFrPre15MDir = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrPre15MDir.setStatus('mandatory')
dsRpFrPre15MVcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrPre15MVcIndex.setStatus('mandatory')
dsRpFrPre15MVc = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8388607))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrPre15MVc.setStatus('mandatory')
dsRpFrPre15MFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrPre15MFrames.setStatus('mandatory')
dsRpFrPre15MOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrPre15MOctets.setStatus('mandatory')
dsRpFrPre15MKbps = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 2, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrPre15MKbps.setStatus('mandatory')
dsRpFrPre15MFpMax = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrPre15MFpMax.setStatus('mandatory')
dsRpFrPre15MFpAvg = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 2, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrPre15MFpAvg.setStatus('mandatory')
dsRpFrPre15MFpLost = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 2, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrPre15MFpLost.setStatus('mandatory')
dsRpFrPre15MFpSent = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrPre15MFpSent.setStatus('mandatory')
dsRpFrPre15MStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 2, 1, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrPre15MStatus.setStatus('mandatory')
dsRpFrCur15MTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3), )
if mibBuilder.loadTexts: dsRpFrCur15MTable.setStatus('mandatory')
dsRpFrCur15MEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpFrCur15MDir"), (0, "DATASMART-MIB", "dsRpFrCur15MVcIndex"))
if mibBuilder.loadTexts: dsRpFrCur15MEntry.setStatus('mandatory')
dsRpFrCur15MDir = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur15MDir.setStatus('mandatory')
dsRpFrCur15MVcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur15MVcIndex.setStatus('mandatory')
dsRpFrCur15MVc = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8388607))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur15MVc.setStatus('mandatory')
dsRpFrCur15MFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur15MFrames.setStatus('mandatory')
dsRpFrCur15MOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur15MOctets.setStatus('mandatory')
dsRpFrCur15MKbps = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur15MKbps.setStatus('mandatory')
dsRpFrCur15MFpMax = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur15MFpMax.setStatus('mandatory')
dsRpFrCur15MFpAvg = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur15MFpAvg.setStatus('mandatory')
dsRpFrCur15MFpLost = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur15MFpLost.setStatus('mandatory')
dsRpFrCur15MFpSent = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur15MFpSent.setStatus('mandatory')
dsRpFrCur15MFpRmtIp = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3, 1, 11), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur15MFpRmtIp.setStatus('mandatory')
dsRpFrCur15MFpRmtVc = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8388607))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur15MFpRmtVc.setStatus('mandatory')
dsRpFrCur15MStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 3, 1, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur15MStatus.setStatus('mandatory')
dsRpFrCur2HTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 4), )
if mibBuilder.loadTexts: dsRpFrCur2HTable.setStatus('mandatory')
dsRpFrCur2HEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 4, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpFrCur2HDir"), (0, "DATASMART-MIB", "dsRpFrCur2HVcIndex"))
if mibBuilder.loadTexts: dsRpFrCur2HEntry.setStatus('mandatory')
dsRpFrCur2HDir = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur2HDir.setStatus('mandatory')
dsRpFrCur2HVcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur2HVcIndex.setStatus('mandatory')
dsRpFrCur2HVc = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 4, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8388607))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur2HVc.setStatus('mandatory')
dsRpFrCur2HFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 4, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur2HFrames.setStatus('mandatory')
dsRpFrCur2HOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 4, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur2HOctets.setStatus('mandatory')
dsRpFrCur2HKbps = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 4, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur2HKbps.setStatus('mandatory')
dsRpFrCur2HFpMax = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 4, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur2HFpMax.setStatus('mandatory')
dsRpFrCur2HFpAvg = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 4, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur2HFpAvg.setStatus('mandatory')
dsRpFrCur2HFpLost = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 4, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur2HFpLost.setStatus('mandatory')
dsRpFrCur2HFpSent = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 4, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur2HFpSent.setStatus('mandatory')
dsRpFrCur2HStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 4, 1, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrCur2HStatus.setStatus('mandatory')
dsRpFrIntvl2HTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 5), )
if mibBuilder.loadTexts: dsRpFrIntvl2HTable.setStatus('mandatory')
dsRpFrIntvl2HEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 5, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpFrIntvl2HDir"), (0, "DATASMART-MIB", "dsRpFrIntvl2HVcIndex"), (0, "DATASMART-MIB", "dsRpFrIntvl2HNum"))
if mibBuilder.loadTexts: dsRpFrIntvl2HEntry.setStatus('mandatory')
dsRpFrIntvl2HDir = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrIntvl2HDir.setStatus('mandatory')
dsRpFrIntvl2HVcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrIntvl2HVcIndex.setStatus('mandatory')
dsRpFrIntvl2HNum = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 5, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrIntvl2HNum.setStatus('mandatory')
dsRpFrIntvl2HVc = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 5, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8388607))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrIntvl2HVc.setStatus('mandatory')
dsRpFrIntvl2HFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 5, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrIntvl2HFrames.setStatus('mandatory')
dsRpFrIntvl2HOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 5, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrIntvl2HOctets.setStatus('mandatory')
dsRpFrIntvl2HKbps = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 5, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrIntvl2HKbps.setStatus('mandatory')
dsRpFrIntvl2HFpMax = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 5, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrIntvl2HFpMax.setStatus('mandatory')
dsRpFrIntvl2HFpAvg = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 5, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrIntvl2HFpAvg.setStatus('mandatory')
dsRpFrIntvl2HFpLost = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 5, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrIntvl2HFpLost.setStatus('mandatory')
dsRpFrIntvl2HFpSent = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 5, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrIntvl2HFpSent.setStatus('mandatory')
dsRpFrIntvl2HStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 5, 1, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrIntvl2HStatus.setStatus('mandatory')
dsRpFrTotalTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 6), )
if mibBuilder.loadTexts: dsRpFrTotalTable.setStatus('mandatory')
dsRpFrTotalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 6, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpFrTotalDir"), (0, "DATASMART-MIB", "dsRpFrTotalVcIndex"))
if mibBuilder.loadTexts: dsRpFrTotalEntry.setStatus('mandatory')
dsRpFrTotalDir = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTotalDir.setStatus('mandatory')
dsRpFrTotalVcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTotalVcIndex.setStatus('mandatory')
dsRpFrTotalVc = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 6, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8388607))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTotalVc.setStatus('mandatory')
dsRpFrTotalFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 6, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTotalFrames.setStatus('mandatory')
dsRpFrTotalOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 6, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTotalOctets.setStatus('mandatory')
dsRpFrTotalKbps = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 6, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTotalKbps.setStatus('mandatory')
dsRpFrTotalFpMax = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 6, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTotalFpMax.setStatus('mandatory')
dsRpFrTotalFpAvg = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 6, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTotalFpAvg.setStatus('mandatory')
dsRpFrTotalFpLost = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 6, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTotalFpLost.setStatus('mandatory')
dsRpFrTotalFpSent = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 6, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTotalFpSent.setStatus('mandatory')
dsRpFrTotalStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 6, 1, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrTotalStatus.setStatus('mandatory')
dsRpFrDayTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 7), )
if mibBuilder.loadTexts: dsRpFrDayTable.setStatus('mandatory')
dsRpFrDayEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 7, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpFrDayDir"), (0, "DATASMART-MIB", "dsRpFrDayVcIndex"), (0, "DATASMART-MIB", "dsRpFrDayNum"))
if mibBuilder.loadTexts: dsRpFrDayEntry.setStatus('mandatory')
dsRpFrDayDir = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrDayDir.setStatus('mandatory')
dsRpFrDayVcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 7, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrDayVcIndex.setStatus('mandatory')
dsRpFrDayNum = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 7, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrDayNum.setStatus('mandatory')
dsRpFrDayVc = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 7, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8388607))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrDayVc.setStatus('mandatory')
dsRpFrDayFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 7, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrDayFrames.setStatus('mandatory')
dsRpFrDayOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 7, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrDayOctets.setStatus('mandatory')
dsRpFrDayKbps = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 7, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrDayKbps.setStatus('mandatory')
dsRpFrDayFpMax = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 7, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrDayFpMax.setStatus('mandatory')
dsRpFrDayFpAvg = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 7, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrDayFpAvg.setStatus('mandatory')
dsRpFrDayFpLost = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 7, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrDayFpLost.setStatus('mandatory')
dsRpFrDayFpSent = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 7, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrDayFpSent.setStatus('mandatory')
dsRpFrDayStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 7, 1, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrDayStatus.setStatus('mandatory')
dsRpFrUrTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 8), )
if mibBuilder.loadTexts: dsRpFrUrTable.setStatus('mandatory')
dsRpFrUrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 8, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpFrUrDir"), (0, "DATASMART-MIB", "dsRpFrUrVcIndex"))
if mibBuilder.loadTexts: dsRpFrUrEntry.setStatus('mandatory')
dsRpFrUrDir = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrUrDir.setStatus('mandatory')
dsRpFrUrVcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 8, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrUrVcIndex.setStatus('mandatory')
dsRpFrUrVc = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 8, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8388607))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrUrVc.setStatus('mandatory')
dsRpFrUrCIRExceeded = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 8, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrUrCIRExceeded.setStatus('mandatory')
dsRpFrUrCIRExceededOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 8, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrUrCIRExceededOctets.setStatus('mandatory')
dsRpFrUrEIRExceeded = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 8, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrUrEIRExceeded.setStatus('mandatory')
dsRpFrUrEIRExceededOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 10, 8, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpFrUrEIRExceededOctets.setStatus('mandatory')
dsRpDdsDuration = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 11), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpDdsDuration.setStatus('mandatory')
dsRpDdsTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 12), )
if mibBuilder.loadTexts: dsRpDdsTable.setStatus('mandatory')
dsRpDdsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 12, 1), ).setIndexNames((0, "DATASMART-MIB", "dsRpDdsIfIndex"))
if mibBuilder.loadTexts: dsRpDdsEntry.setStatus('mandatory')
dsRpDdsIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 12, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpDdsIfIndex.setStatus('mandatory')
dsRpDdsAvailableSecs = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 12, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpDdsAvailableSecs.setStatus('mandatory')
dsRpDdsTotalSecs = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 12, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpDdsTotalSecs.setStatus('mandatory')
dsRpDdsBPVs = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 2, 12, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRpDdsBPVs.setStatus('mandatory')
dsLmLoopback = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("lmLbkNone", 1), ("lmLbkLine", 2), ("lmLbkPayload", 3), ("lmLbkLocal", 4), ("lmLbkTiTest", 5), ("lmLbkDp1", 6), ("lmLbkDp2", 7), ("lmLbkDt1", 8), ("lmLbkDt2", 9), ("lmLbkCsu", 10), ("lmLbkDsu", 11), ("lmLbkDpdt", 12)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsLmLoopback.setStatus('mandatory')
dsLmSelfTestState = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("lmSelfTestIdle", 1), ("lmSelfTestStart", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsLmSelfTestState.setStatus('mandatory')
dsLmSelfTestResults = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 3, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsLmSelfTestResults.setStatus('mandatory')
dsRmLbkCode = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("rmRNone", 1), ("rmRst1", 2), ("rmRLine", 3), ("rmRPayload", 4), ("rmRDp1", 5), ("rmRDp2", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsRmLbkCode.setStatus('mandatory')
dsRmTestCode = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("rmTestNone", 1), ("rmTestQrs", 2), ("rmTest324", 3), ("rmTestOnes", 4), ("rmTestZeros", 5), ("rmTest511Dp1", 6), ("rmTest511Dp2", 7), ("rmTest2047Dp1", 8), ("rmTest2047Dp2", 9), ("rmTest2toThe23", 10), ("rmTest2toThe15", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsRmTestCode.setStatus('mandatory')
dsRmBertState = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("rmBertIdle", 1), ("rmBertOtherStart", 2), ("rmBertSearching", 3), ("rmBertFound", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmBertState.setStatus('mandatory')
dsRmBertCode = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("rmBertNone", 1), ("rmBertQrs", 2), ("rmBert324", 3), ("rmBertOnes", 4), ("rmBertZeros", 5), ("rmBert511Dp1", 6), ("rmBert511Dp2", 7), ("rmBert2047Dp1", 8), ("rmBert2047Dp2", 9), ("rmTest2toThe23", 10), ("rmTest2toThe15", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsRmBertCode.setStatus('mandatory')
dsRmBertTestSecs = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmBertTestSecs.setStatus('mandatory')
dsRmBertBitErrors = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmBertBitErrors.setStatus('mandatory')
dsRmBertErrdSecs = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmBertErrdSecs.setStatus('mandatory')
dsRmBertTotalErrors = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmBertTotalErrors.setStatus('mandatory')
dsRmBertReSync = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmBertReSync.setStatus('mandatory')
dsRmFping = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 10))
dsRmFpingAction = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 10, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("rmFpingStart", 1), ("rmFpingStop", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsRmFpingAction.setStatus('mandatory')
dsRmFpingState = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 10, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("rmFpingIdle", 1), ("rmFpingOtherStart", 2), ("rmFpingRunning", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmFpingState.setStatus('mandatory')
dsRmFpingVc = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 10, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8388607))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsRmFpingVc.setStatus('mandatory')
dsRmFpingFreq = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 10, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsRmFpingFreq.setStatus('mandatory')
dsRmFpingLen = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 10, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1400))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsRmFpingLen.setStatus('mandatory')
dsRmFpingCur = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 10, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmFpingCur.setStatus('mandatory')
dsRmFpingMin = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 10, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmFpingMin.setStatus('mandatory')
dsRmFpingMax = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 10, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmFpingMax.setStatus('mandatory')
dsRmFpingAvg = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 10, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmFpingAvg.setStatus('mandatory')
dsRmFpingLost = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 10, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmFpingLost.setStatus('mandatory')
dsRmFpingTotal = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 10, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmFpingTotal.setStatus('mandatory')
dsRmFpingRmtVc = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 10, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmFpingRmtVc.setStatus('mandatory')
dsRmFpingRmtIp = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 10, 13), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsRmFpingRmtIp.setStatus('mandatory')
dsRmInsertBitError = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 4, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("insertBitError", 1), ("noInsertBitError", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsRmInsertBitError.setStatus('mandatory')
dsAcAlmMsg = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 5, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("acAlmMsgEnable", 1), ("acAlmMsgDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAcAlmMsg.setStatus('mandatory')
dsAcYelAlm = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 5, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("acYelAlmEnable", 1), ("acYelAlmDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAcYelAlm.setStatus('mandatory')
dsAcDeact = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 5, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAcDeact.setStatus('mandatory')
dsAcEst = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 5, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 900))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAcEst.setStatus('mandatory')
dsAcUst = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 5, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 900))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAcUst.setStatus('mandatory')
dsAcSt = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 5, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("acSt15", 1), ("acSt60", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAcSt.setStatus('mandatory')
dsAcBerAlm = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 5, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("acBerAlmEnable", 1), ("acBerAlmDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAcBerAlm.setStatus('mandatory')
dsAcRfaAlm = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 5, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("acRfaAlmEnable", 1), ("acRfaAlmDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAcRfaAlm.setStatus('mandatory')
dsAcAisAlm = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 5, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("acAisAlmEnable", 1), ("acAisAlmDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAcAisAlm.setStatus('mandatory')
dsAcOnPowerTransition = NotificationType((1, 3, 6, 1, 4, 1, 181, 2, 2) + (0,5005)).setObjects(("DATASMART-MIB", "dsSsPowerStatus"))
dsAcOffPowerTransition = NotificationType((1, 3, 6, 1, 4, 1, 181, 2, 2) + (0,5006)).setObjects(("DATASMART-MIB", "dsSsPowerStatus"))
dsCcEcho = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 6, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ccEchoEnable", 1), ("ccEchoDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsCcEcho.setStatus('mandatory')
dsCcControlPort = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 6, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ccDce", 1), ("ccDte", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsCcControlPort.setStatus('mandatory')
dsCcBaud = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 6, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("cc2400", 1), ("cc9600", 2), ("cc19200", 3), ("cc38400", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsCcBaud.setStatus('mandatory')
dsCcParity = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 6, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ccNone", 1), ("ccEven", 2), ("ccOdd", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsCcParity.setStatus('mandatory')
dsCcDataBits = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 6, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("cc7Bit", 1), ("cc8Bit", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsCcDataBits.setStatus('mandatory')
dsCcStopBits = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 6, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("cc1Bit", 1), ("cc2Bit", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsCcStopBits.setStatus('mandatory')
dsCcDceIn = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 6, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("ccBothOff", 1), ("ccRtsOnDtrOff", 2), ("ccRtsOffDtrOn", 3), ("ccBothOn", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsCcDceIn.setStatus('mandatory')
dsCcDteIn = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 6, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("ccBothOff", 1), ("ccCtsOnDcdOff", 2), ("ccCtsOffDcdOn", 3), ("ccBothOn", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsCcDteIn.setStatus('mandatory')
dsDcTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 7, 1), )
if mibBuilder.loadTexts: dsDcTable.setStatus('mandatory')
dsDcEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 7, 1, 1), ).setIndexNames((0, "DATASMART-MIB", "dsDcIndex"))
if mibBuilder.loadTexts: dsDcEntry.setStatus('mandatory')
dsDcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 7, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsDcIndex.setStatus('mandatory')
dsDcDataInvert = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 7, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("dcDataInvertEnable", 1), ("dcDataInvertDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsDcDataInvert.setStatus('mandatory')
dsDcInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 7, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("dcV35Interface", 1), ("dcEia530Interface", 2), ("dcV35DSInterface", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsDcInterface.setStatus('mandatory')
dsDcClockSource = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 7, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("dcInternalClock", 1), ("dcExternalClock", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsDcClockSource.setStatus('mandatory')
dsDcXmtClkInvert = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 7, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("dcXClkInvertEnable", 1), ("dcXClkInvertDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsDcXmtClkInvert.setStatus('mandatory')
dsDcRcvClkInvert = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 7, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("dcRClkInvertEnable", 1), ("dcRClkInvertDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsDcRcvClkInvert.setStatus('mandatory')
dsDcIdleChar = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 7, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("dc7eIdleChar", 1), ("dc7fIdleChar", 2), ("dcffIdleChar", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsDcIdleChar.setStatus('mandatory')
dsDcLOSInput = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 7, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("dcLosNone", 1), ("dcLosRTS", 2), ("dcLosDTR", 3), ("dcLosBoth", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsDcLOSInput.setStatus('mandatory')
dsFcLoadXcute = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 8, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fcLoadXcuteIdle", 1), ("fcLoadXcuteStartA", 2), ("fcLoadXcuteStartB", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsFcLoadXcute.setStatus('mandatory')
dsFcTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 8, 2), )
if mibBuilder.loadTexts: dsFcTable.setStatus('mandatory')
dsFcEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 8, 2, 1), ).setIndexNames((0, "DATASMART-MIB", "dsFcTableIndex"), (0, "DATASMART-MIB", "dsFcChanIndex"))
if mibBuilder.loadTexts: dsFcEntry.setStatus('mandatory')
dsFcTableIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 8, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 3))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsFcTableIndex.setStatus('mandatory')
dsFcChanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 8, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsFcChanIndex.setStatus('mandatory')
dsFcChanMap = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 8, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("fcChanIdle", 1), ("fcChanTiData", 2), ("fcChanTiVoice", 3), ("fcChan56Dp1", 4), ("fcChan64Dp1", 5), ("fcChan56Dp2", 6), ("fcChan64Dp2", 7), ("fcChanDLNK", 8), ("fcChanDPDL", 9), ("fcChanUnav", 10)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsFcChanMap.setStatus('mandatory')
dsFcMap16 = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 8, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("fcMap16Used", 1), ("fcMap16Unused", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsFcMap16.setStatus('mandatory')
dsFmcFrameType = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 9, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("fmcFrNlpid", 1), ("fmcFrEther", 2), ("fmcAtmNlpid", 3), ("fmcAtmLlcSnap", 4), ("fmcAtmVcMux", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsFmcFrameType.setStatus('mandatory')
dsFmcAddrOctets = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 9, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("fmcTwoOctets", 1), ("fmcFourOctets", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsFmcAddrOctets.setStatus('mandatory')
dsFmcFcsBits = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 9, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("fmc16Bits", 1), ("fmc32Bits", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsFmcFcsBits.setStatus('mandatory')
dsFmcUpperBW = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 9, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 95))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsFmcUpperBW.setStatus('mandatory')
dsFmcFpingOper = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 9, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("fmcFpoEnable", 1), ("fmcFpoDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsFmcFpingOper.setStatus('mandatory')
dsFmcFpingGen = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 9, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsFmcFpingGen.setStatus('mandatory')
dsFmcFpingThres = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 9, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(20, 2000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsFmcFpingThres.setStatus('mandatory')
dsFmcFpingRst = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 9, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8388607))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsFmcFpingRst.setStatus('mandatory')
dsFmcAddVc = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 9, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8388607))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsFmcAddVc.setStatus('mandatory')
dsFmcDelVc = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 9, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8388607))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsFmcDelVc.setStatus('mandatory')
dsFmcSetNiRcvUpperBwThresh = NotificationType((1, 3, 6, 1, 4, 1, 181, 2, 2) + (0,9001)).setObjects(("DATASMART-MIB", "dsRpFrCur15MVc"))
dsFmcClrNiRcvUpperBwThresh = NotificationType((1, 3, 6, 1, 4, 1, 181, 2, 2) + (0,9002)).setObjects(("DATASMART-MIB", "dsRpFrCur15MVc"))
dsFmcSetNiXmtUpperBwThresh = NotificationType((1, 3, 6, 1, 4, 1, 181, 2, 2) + (0,9003)).setObjects(("DATASMART-MIB", "dsRpFrCur15MVc"))
dsFmcClrNiXmtUpperBwThresh = NotificationType((1, 3, 6, 1, 4, 1, 181, 2, 2) + (0,9004)).setObjects(("DATASMART-MIB", "dsRpFrCur15MVc"))
dsFmcFpingLinkDown = NotificationType((1, 3, 6, 1, 4, 1, 181, 2, 2) + (0,9005)).setObjects(("DATASMART-MIB", "dsRpFrCur15MVc"))
dsFmcFpingLinkUp = NotificationType((1, 3, 6, 1, 4, 1, 181, 2, 2) + (0,9006)).setObjects(("DATASMART-MIB", "dsRpFrCur15MVc"))
dsMcNetif = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("mcNetNone", 1), ("mcNetEthernet", 2), ("mcNetPppSlip", 3), ("mcNetSlip", 4), ("mcNetDatalink", 5), ("mcNetES", 6), ("mcNetED", 7), ("mcNetESD", 8), ("mcNetPSD", 9), ("mcNetSD", 10), ("mcNetInband", 11)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsMcNetif.setStatus('mandatory')
dsMcT1DLPath = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49))).clone(namedValues=NamedValues(("mcDLPathFdl", 1), ("mcDLPathTS1-64", 2), ("mcDLPathTS2-64", 3), ("mcDLPathTS3-64", 4), ("mcDLPathTS4-64", 5), ("mcDLPathTS5-64", 6), ("mcDLPathTS6-64", 7), ("mcDLPathTS7-64", 8), ("mcDLPathTS8-64", 9), ("mcDLPathTS9-64", 10), ("mcDLPathTS10-64", 11), ("mcDLPathTS11-64", 12), ("mcDLPathTS12-64", 13), ("mcDLPathTS13-64", 14), ("mcDLPathTS14-64", 15), ("mcDLPathTS15-64", 16), ("mcDLPathTS16-64", 17), ("mcDLPathTS17-64", 18), ("mcDLPathTS18-64", 19), ("mcDLPathTS19-64", 20), ("mcDLPathTS20-64", 21), ("mcDLPathTS21-64", 22), ("mcDLPathTS22-64", 23), ("mcDLPathTS23-64", 24), ("mcDLPathTS24-64", 25), ("mcDLPathTS1-56", 26), ("mcDLPathTS2-56", 27), ("mcDLPathTS3-56", 28), ("mcDLPathTS4-56", 29), ("mcDLPathTS5-56", 30), ("mcDLPathTS6-56", 31), ("mcDLPathTS7-56", 32), ("mcDLPathTS8-56", 33), ("mcDLPathTS9-56", 34), ("mcDLPathTS10-56", 35), ("mcDLPathTS11-56", 36), ("mcDLPathTS12-56", 37), ("mcDLPathTS13-56", 38), ("mcDLPathTS14-56", 39), ("mcDLPathTS15-56", 40), ("mcDLPathTS16-56", 41), ("mcDLPathTS17-56", 42), ("mcDLPathTS18-56", 43), ("mcDLPathTS19-56", 44), ("mcDLPathTS20-56", 45), ("mcDLPathTS21-56", 46), ("mcDLPathTS22-56", 47), ("mcDLPathTS23-56", 48), ("mcDLPathTS24-56", 49)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsMcT1DLPath.setStatus('mandatory')
dsMcDefRoute = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsMcDefRoute.setStatus('mandatory')
dsMcCIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsMcCIpAddr.setStatus('mandatory')
dsMcDIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsMcDIpAddr.setStatus('mandatory')
dsMcCDIpMask = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 6), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsMcCDIpMask.setStatus('mandatory')
dsMcEIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 7), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsMcEIpAddr.setStatus('mandatory')
dsMcEIpMask = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsMcEIpMask.setStatus('mandatory')
dsMcIIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 9), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsMcIIpAddr.setStatus('mandatory')
dsMcIIpMask = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 10), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsMcIIpMask.setStatus('mandatory')
dsAmc = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11))
dsAmcAgent = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("amcEnabled", 1), ("amcDisabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAmcAgent.setStatus('mandatory')
dsAmcSourceScreen = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("mcIpScreen", 1), ("mcNoScreen", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAmcSourceScreen.setStatus('mandatory')
dsAmcTrapTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 3), )
if mibBuilder.loadTexts: dsAmcTrapTable.setStatus('mandatory')
dsAmcTrapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 3, 1), ).setIndexNames((0, "DATASMART-MIB", "dsAmcTrapType"))
if mibBuilder.loadTexts: dsAmcTrapEntry.setStatus('mandatory')
dsAmcTrapType = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("mcStartTraps", 1), ("mcLinkTraps", 2), ("mcAuthenTraps", 3), ("mcEnterpriseTraps", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsAmcTrapType.setStatus('mandatory')
dsAmcTrapStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("amcEnabled", 1), ("amcDisabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAmcTrapStatus.setStatus('mandatory')
dsAmcScrnTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 4), )
if mibBuilder.loadTexts: dsAmcScrnTable.setStatus('mandatory')
dsAmcScrnEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 4, 1), ).setIndexNames((0, "DATASMART-MIB", "dsAmcScrnIndex"))
if mibBuilder.loadTexts: dsAmcScrnEntry.setStatus('mandatory')
dsAmcScrnIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsAmcScrnIndex.setStatus('mandatory')
dsAmcScrnIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 4, 1, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAmcScrnIpAddr.setStatus('mandatory')
dsAmcScrnIpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 4, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAmcScrnIpMask.setStatus('mandatory')
dsAmcTrapDestTable = MibTable((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 5), )
if mibBuilder.loadTexts: dsAmcTrapDestTable.setStatus('mandatory')
dsAmcTrapDestEntry = MibTableRow((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 5, 1), ).setIndexNames((0, "DATASMART-MIB", "dsAmcTrapDestIndex"))
if mibBuilder.loadTexts: dsAmcTrapDestEntry.setStatus('mandatory')
dsAmcTrapDestIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsAmcTrapDestIndex.setStatus('mandatory')
dsAmcTrapDestIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 5, 1, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAmcTrapDestIpAddr.setStatus('mandatory')
dsAmcTrapDestVc = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 5, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8388607))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAmcTrapDestVc.setStatus('mandatory')
dsAmcTrapDestPort = MibTableColumn((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 11, 5, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("amcNIPort", 1), ("amcDPPort", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsAmcTrapDestPort.setStatus('mandatory')
dsMcIVc = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 12), DLCI()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsMcIVc.setStatus('mandatory')
dsMcIPort = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 10, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("amcNiPort", 1), ("amcDPPort", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsMcIPort.setStatus('mandatory')
dsNcFraming = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ncSF", 1), ("ncESF", 2), ("ncEricsson", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcFraming.setStatus('mandatory')
dsNcCoding = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ncAmi", 1), ("ncB8zs", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcCoding.setStatus('mandatory')
dsNcT1403 = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ncT1403Enable", 1), ("ncT1403Disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcT1403.setStatus('mandatory')
dsNcYellow = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ncYelEnable", 1), ("ncYelDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcYellow.setStatus('mandatory')
dsNcAddr54 = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ncAddrCsu", 1), ("ncAddrDsu", 2), ("ncAddrBoth", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcAddr54.setStatus('mandatory')
dsNc54016 = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("nc54016Enable", 1), ("nc54016Disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNc54016.setStatus('mandatory')
dsNcLbo = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ncLbo0", 1), ("ncLbo1", 2), ("ncLbo2", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcLbo.setStatus('mandatory')
dsNcMF16 = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ncMF16Enable", 1), ("ncMF16Disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcMF16.setStatus('mandatory')
dsNcCRC = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ncCrcEnable", 1), ("ncCrcDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcCRC.setStatus('mandatory')
dsNcFasAlign = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ncFasWord", 1), ("ncNonFasWord", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcFasAlign.setStatus('mandatory')
dsNcE1DLPath = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37))).clone(namedValues=NamedValues(("ncSaNone", 1), ("ncSaBit4", 2), ("ncSaBit5", 3), ("ncSaBit6", 4), ("ncSaBit7", 5), ("ncSaBit8", 6), ("ncTS1", 7), ("ncTS2", 8), ("ncTS3", 9), ("ncTS4", 10), ("ncTS5", 11), ("ncTS6", 12), ("ncTS7", 13), ("ncTS8", 14), ("ncTS9", 15), ("ncTS10", 16), ("ncTS11", 17), ("ncTS12", 18), ("ncTS13", 19), ("ncTS14", 20), ("ncTS15", 21), ("ncTS16", 22), ("ncTS17", 23), ("ncTS18", 24), ("ncTS19", 25), ("ncTS20", 26), ("ncTS21", 27), ("ncTS22", 28), ("ncTS23", 29), ("ncTS24", 30), ("ncTS25", 31), ("ncTS26", 32), ("ncTS27", 33), ("ncTS28", 34), ("ncTS29", 35), ("ncTS30", 36), ("ncTS31", 37)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcE1DLPath.setStatus('mandatory')
dsNcKA = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ncFramedKeepAlive", 1), ("ncUnFramedKeepAlive", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcKA.setStatus('mandatory')
dsNcGenRfa = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ncGenRfaEnable", 1), ("ncGenRfaDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcGenRfa.setStatus('mandatory')
dsNcPassTiRfa = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ncPassTiRfaEnable", 1), ("ncPassTiRfaDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcPassTiRfa.setStatus('mandatory')
dsNcIdle = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcIdle.setStatus('mandatory')
dsNcDdsType = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 11, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("scDds56K", 1), ("scDds64K", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsNcDdsType.setStatus('mandatory')
dsScMonth = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 12))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScMonth.setStatus('mandatory')
dsScDay = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 31))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScDay.setStatus('mandatory')
dsScYear = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 99))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScYear.setStatus('mandatory')
dsScHour = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 23))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScHour.setStatus('mandatory')
dsScMinutes = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 59))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScMinutes.setStatus('mandatory')
dsScName = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScName.setStatus('mandatory')
dsScSlotAddr = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScSlotAddr.setStatus('mandatory')
dsScShelfAddr = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScShelfAddr.setStatus('mandatory')
dsScGroupAddr = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScGroupAddr.setStatus('mandatory')
dsScFrontPanel = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("scFpEnable", 1), ("scFpDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScFrontPanel.setStatus('mandatory')
dsScDSCompatible = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("scDSEnable", 1), ("scDSDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScDSCompatible.setStatus('mandatory')
dsScClockSource = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("scTerminalTiming", 1), ("scThroughTiming", 2), ("scInternalTiming", 3), ("scLoopTiming", 4), ("scDP1Timing", 5), ("scDP2Timing", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScClockSource.setStatus('mandatory')
dsScAutologout = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 60))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScAutologout.setStatus('mandatory')
dsScZeroPerData = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("scZallIdle", 1), ("scZallStart", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScZeroPerData.setStatus('mandatory')
dsScWyv = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 15), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsScWyv.setStatus('mandatory')
dsScAutoCfg = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("scAcEnable", 1), ("scAcDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScAutoCfg.setStatus('mandatory')
dsScTftpSwdl = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 17), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScTftpSwdl.setStatus('mandatory')
dsScBoot = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("scBootIdle", 1), ("scBootActive", 2), ("scBootInactive", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScBoot.setStatus('mandatory')
dsScOperMode = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("scTransparentMode", 1), ("scMonitorMode", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScOperMode.setStatus('mandatory')
dsScYearExtention = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1992, 2091))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScYearExtention.setStatus('mandatory')
dsScMonthExtention = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 12))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScMonthExtention.setStatus('mandatory')
dsScDayExtention = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 22), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 31))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScDayExtention.setStatus('mandatory')
dsScHourExtention = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 23), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 24))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScHourExtention.setStatus('mandatory')
dsScMinExtention = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 59))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScMinExtention.setStatus('mandatory')
dsScSecExtention = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 25), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 59))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsScSecExtention.setStatus('mandatory')
dsScPinK = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 12, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("pinKEnabled", 1), ("pinKDisabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsScPinK.setStatus('mandatory')
dsTcFraming = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 13, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("tcSF", 1), ("tcESF", 2), ("tcEricsson", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsTcFraming.setStatus('mandatory')
dsTcCoding = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 13, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("tcAmi", 1), ("tcB8zs", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsTcCoding.setStatus('mandatory')
dsTcIdle = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 13, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsTcIdle.setStatus('mandatory')
dsTcEqual = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 13, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("tcTe0", 1), ("tcTe1", 2), ("tcTe2", 3), ("tcTe3", 4), ("tcTe4", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsTcEqual.setStatus('mandatory')
dsTcMF16 = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 13, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("tcMF16Enable", 1), ("tcMF16Disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsTcMF16.setStatus('mandatory')
dsTcCRC = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 13, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("tcCrcEnable", 1), ("tcCrcDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsTcCRC.setStatus('mandatory')
dsTcFasAlign = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 13, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("tcFasWord", 1), ("tcNonFasWord", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsTcFasAlign.setStatus('mandatory')
dsTcAis = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 13, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("tcAisEnable", 1), ("tcAisDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsTcAis.setStatus('mandatory')
dsTcGenRfa = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 13, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("tcGenRfaEnable", 1), ("tcGenRfaDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsTcGenRfa.setStatus('mandatory')
dsTcPassTiRfa = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 13, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("tcPassTiRfaEnable", 1), ("tcPassTiRfaDisable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsTcPassTiRfa.setStatus('mandatory')
dsFpFr56 = MibIdentifier((1, 3, 6, 1, 4, 1, 181, 2, 2, 14, 1))
dsFpFr56PwrLed = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 14, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("fpLedIndeterminate", 1), ("fpLedOff", 2), ("fpLedOnGreen", 3), ("fpLedBlinkGreen", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsFpFr56PwrLed.setStatus('mandatory')
dsFpFr56DnldFailLed = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 14, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("fpLedIndeterminate", 1), ("fpLedOff", 2), ("fpLedOnRed", 3), ("fpLedBlinkRed", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsFpFr56DnldFailLed.setStatus('mandatory')
dsFpFr56NiAlarmLed = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 14, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fpLedIndeterminate", 1), ("fpLedOff", 2), ("fpLedOnRed", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsFpFr56NiAlarmLed.setStatus('mandatory')
dsFpFr56NiDataLed = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 14, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fpLedIndeterminate", 1), ("fpLedOff", 2), ("fpLedOnGreen", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsFpFr56NiDataLed.setStatus('mandatory')
dsFpFr56TestLed = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 14, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fpLedIndeterminate", 1), ("fpLedOff", 2), ("fpLedOnYellow", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsFpFr56TestLed.setStatus('mandatory')
dsFpFr56DpCtsTxLed = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 14, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fpLedIndeterminate", 1), ("fpLedOff", 2), ("fpLedOnYellow", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsFpFr56DpCtsTxLed.setStatus('mandatory')
dsFpFr56DpRtsRxLed = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 14, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fpLedIndeterminate", 1), ("fpLedOff", 2), ("fpLedOnYellow", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsFpFr56DpRtsRxLed.setStatus('mandatory')
dsFpFr56FrLinkLed = MibScalar((1, 3, 6, 1, 4, 1, 181, 2, 2, 14, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("fpLedIndeterminate", 1), ("fpLedOff", 2), ("fpLedOnGreen", 3), ("fpLedBlinkGreen", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsFpFr56FrLinkLed.setStatus('mandatory')
mibBuilder.exportSymbols("DATASMART-MIB", dsRpUsrDayES=dsRpUsrDayES, dsDcInterface=dsDcInterface, dsScBoot=dsScBoot, dsRpFrUrVc=dsRpFrUrVc, dsScWyv=dsScWyv, dsFp=dsFp, dsRpDdsTotalSecs=dsRpDdsTotalSecs, dsFmcFpingLinkDown=dsFmcFpingLinkDown, dsScYear=dsScYear, dsFcTableIndex=dsFcTableIndex, dsRpUsrTotalEntry=dsRpUsrTotalEntry, dsScPinK=dsScPinK, dsLmLoopback=dsLmLoopback, DLCI=DLCI, dsRpUsrCurCSS=dsRpUsrCurCSS, dsAcOnPowerTransition=dsAcOnPowerTransition, dsMcIIpMask=dsMcIIpMask, dsRmBertCode=dsRmBertCode, dsRpFrTotalTable=dsRpFrTotalTable, dsAmcScrnTable=dsAmcScrnTable, dsRpFrCur2HEntry=dsRpFrCur2HEntry, dsRpUsrDayStatus=dsRpUsrDayStatus, dsRpFrPre15MVc=dsRpFrPre15MVc, dsRmFpingRmtVc=dsRmFpingRmtVc, dsRpDdsTable=dsRpDdsTable, dsRpFrIntvl2HFpMax=dsRpFrIntvl2HFpMax, dsAcDeact=dsAcDeact, dsRpFrPre15MFpAvg=dsRpFrPre15MFpAvg, dsRpFrPre15MFpMax=dsRpFrPre15MFpMax, dsFmcAddVc=dsFmcAddVc, dsNcGenRfa=dsNcGenRfa, dsNcDdsType=dsNcDdsType, dsRpUsrCurIndex=dsRpUsrCurIndex, dsRpUsrIntvlCSS=dsRpUsrIntvlCSS, dsRpDdsEntry=dsRpDdsEntry, dsRpFrPre15MFrames=dsRpFrPre15MFrames, dsRpUsrTotalSES=dsRpUsrTotalSES, dsCcEcho=dsCcEcho, dsRpFrUrCIRExceeded=dsRpFrUrCIRExceeded, dsRpAhrStr=dsRpAhrStr, dsFmc=dsFmc, dsRpFrCur15MOctets=dsRpFrCur15MOctets, dsTcIdle=dsTcIdle, dsRpFrCur15MFpRmtVc=dsRpFrCur15MFpRmtVc, dsDcDataInvert=dsDcDataInvert, dsLmSelfTestResults=dsLmSelfTestResults, dsFmcDelVc=dsFmcDelVc, dsTcCRC=dsTcCRC, dsRpUsrCurSES=dsRpUsrCurSES, dsRpFrDayFpMax=dsRpFrDayFpMax, dsMcIPort=dsMcIPort, dsRpFrIntvl2HDir=dsRpFrIntvl2HDir, dsRpFrDayVcIndex=dsRpFrDayVcIndex, dsFpFr56NiDataLed=dsFpFr56NiDataLed, datasmart=datasmart, dsRpUsrDayBES=dsRpUsrDayBES, dsRpUsrCurStatus=dsRpUsrCurStatus, dsRpDdsAvailableSecs=dsRpDdsAvailableSecs, dsRpFrIntvl2HOctets=dsRpFrIntvl2HOctets, dsRpFrCur2HOctets=dsRpFrCur2HOctets, dsRpFrUrDir=dsRpFrUrDir, dsRpUsrDayDM=dsRpUsrDayDM, dsAmcTrapDestIndex=dsAmcTrapDestIndex, dsRpCarTotal=dsRpCarTotal, dsRpFrDayFrames=dsRpFrDayFrames, dsRpUsrTotalIndex=dsRpUsrTotalIndex, dsSs=dsSs, dsRmBertErrdSecs=dsRmBertErrdSecs, dsRpCarIntvlTable=dsRpCarIntvlTable, dsRpUsrCurUAS=dsRpUsrCurUAS, dsScMinExtention=dsScMinExtention, dsRpUsrIntvlIndex=dsRpUsrIntvlIndex, dsRpFrTotalVcIndex=dsRpFrTotalVcIndex, dsDcLOSInput=dsDcLOSInput, dsTcFraming=dsTcFraming, dsRpCarIntvlEntry=dsRpCarIntvlEntry, dsRmFping=dsRmFping, dsCcBaud=dsCcBaud, dsAmcAgent=dsAmcAgent, dsRpCarCurCSS=dsRpCarCurCSS, dsFmcFpingThres=dsFmcFpingThres, dsRpDdsDuration=dsRpDdsDuration, dsRpFrTmCntDays=dsRpFrTmCntDays, dsRpCarTotalCSS=dsRpCarTotalCSS, dsRpUsrTmCntTable=dsRpUsrTmCntTable, dsRpUsrIntvlUAS=dsRpUsrIntvlUAS, dsRpStOofErrors=dsRpStOofErrors, dsRpFrCur15MFpRmtIp=dsRpFrCur15MFpRmtIp, dsRpFrDayNum=dsRpFrDayNum, dsCc=dsCc, dsRp=dsRp, dsFcTable=dsFcTable, dsRpUsrCurEE=dsRpUsrCurEE, dsRpShrEventType=dsRpShrEventType, dsRpFrIntvl2HKbps=dsRpFrIntvl2HKbps, dsRpFrCur15MVcIndex=dsRpFrCur15MVcIndex, dsRpUsrTmCntSecs=dsRpUsrTmCntSecs, dsRpStLOFEvents=dsRpStLOFEvents, dsScMonth=dsScMonth, dsRpStBPVs=dsRpStBPVs, dsRmBertState=dsRmBertState, dsTcCoding=dsTcCoding, dsRpFrCur2HStatus=dsRpFrCur2HStatus, dsRpUsrIntvlEE=dsRpUsrIntvlEE, dsRpUsrTmCnt15Mins=dsRpUsrTmCnt15Mins, dsAmcTrapStatus=dsAmcTrapStatus, dsScSecExtention=dsScSecExtention, dsDc=dsDc, dsRpUsrIntvlEntry=dsRpUsrIntvlEntry, dsRpFrIntvl2HStatus=dsRpFrIntvl2HStatus, dsRpFrCur15MEntry=dsRpFrCur15MEntry, dsRpFrPre15MEntry=dsRpFrPre15MEntry, dsRmBertReSync=dsRmBertReSync, dsRpStFrameBitErrors=dsRpStFrameBitErrors, dsNc54016=dsNc54016, dsRpStCrcErrors=dsRpStCrcErrors, dsDcRcvClkInvert=dsDcRcvClkInvert, dsRmFpingCur=dsRmFpingCur, dsRpStTable=dsRpStTable, dsRpFrIntvl2HTable=dsRpFrIntvl2HTable, dsRpFrUrEntry=dsRpFrUrEntry, dsRpCarCurSES=dsRpCarCurSES, dsRpFrTmCntSecs=dsRpFrTmCntSecs, dsDcEntry=dsDcEntry, dsScSlotAddr=dsScSlotAddr, dsScZeroPerData=dsScZeroPerData, dsRpFrCur2HFpLost=dsRpFrCur2HFpLost, dsFpFr56=dsFpFr56, dsScYearExtention=dsScYearExtention, dsMcCIpAddr=dsMcCIpAddr, dsNcT1403=dsNcT1403, dsAmcTrapDestIpAddr=dsAmcTrapDestIpAddr, dsTcMF16=dsTcMF16, dsRmBertBitErrors=dsRmBertBitErrors, dsRpFrCur15MFrames=dsRpFrCur15MFrames, dsRmFpingState=dsRmFpingState, dsRpStFarEndBlkErrors=dsRpStFarEndBlkErrors, dsRpCarTotalUAS=dsRpCarTotalUAS, dsRpFrCur2HVc=dsRpFrCur2HVc, dsRpFrDayFpSent=dsRpFrDayFpSent, dsRmFpingRmtIp=dsRmFpingRmtIp, dsScHour=dsScHour, dsRpFrTotalVc=dsRpFrTotalVc, dsRpStat=dsRpStat, dsRpFrDayOctets=dsRpFrDayOctets, dsRpStEsfErrors=dsRpStEsfErrors, dsRpFrUrEIRExceededOctets=dsRpFrUrEIRExceededOctets, dsAmcTrapDestEntry=dsAmcTrapDestEntry, dsRpFrTotalEntry=dsRpFrTotalEntry, dsTcPassTiRfa=dsTcPassTiRfa, dsRpFrDayDir=dsRpFrDayDir, dsRpFrCur2HVcIndex=dsRpFrCur2HVcIndex, dsRpDdsBPVs=dsRpDdsBPVs, dsRpFrCur15MKbps=dsRpFrCur15MKbps, dsRpCarIntvlCSS=dsRpCarIntvlCSS, dsPlLen=dsPlLen, dsNcKA=dsNcKA, dsFpFr56PwrLed=dsFpFr56PwrLed, dsRpUsrTotalTable=dsRpUsrTotalTable, dsRpUsrDayCSS=dsRpUsrDayCSS, dsNcE1DLPath=dsNcE1DLPath, dsRpUsrTmCntIndex=dsRpUsrTmCntIndex, dsRpFrPre15MStatus=dsRpFrPre15MStatus, dsAcRfaAlm=dsAcRfaAlm, dsRpFrTotalFpMax=dsRpFrTotalFpMax, dsAmcTrapTable=dsAmcTrapTable, dsRpAhrTable=dsRpAhrTable, dsMcDefRoute=dsMcDefRoute, dsRpStZeroCounters=dsRpStZeroCounters, dsRpFrIntvl2HEntry=dsRpFrIntvl2HEntry, dsScGroupAddr=dsScGroupAddr, dsRpCarIntvlBES=dsRpCarIntvlBES, dsRmFpingAction=dsRmFpingAction, dsNcLbo=dsNcLbo, dsScHourExtention=dsScHourExtention, dsRpUsrDaySES=dsRpUsrDaySES, dsDcIndex=dsDcIndex, dsRpFrDayKbps=dsRpFrDayKbps, dsAmcScrnIpMask=dsAmcScrnIpMask, dsTc=dsTc, dsRpFrTmCnt2Hrs=dsRpFrTmCnt2Hrs, dsRpFrDayVc=dsRpFrDayVc, dsRpUsrIntvlBES=dsRpUsrIntvlBES, dsRpUsrTotalUAS=dsRpUsrTotalUAS, dsRpFrCur15MStatus=dsRpFrCur15MStatus, dsRpFrTmCntDir=dsRpFrTmCntDir, dsDcXmtClkInvert=dsDcXmtClkInvert, dsFmcFpingGen=dsFmcFpingGen, dsFmcFpingRst=dsFmcFpingRst, dsRpFrIntvl2HNum=dsRpFrIntvl2HNum, dsSc=dsSc, dsRpFrTotalFpSent=dsRpFrTotalFpSent, dsRmFpingMax=dsRmFpingMax, dsRmFpingAvg=dsRmFpingAvg, dsRpFrPre15MTable=dsRpFrPre15MTable, dsAcEst=dsAcEst, dsRpFrUrEIRExceeded=dsRpFrUrEIRExceeded, dsRpFrIntvl2HFrames=dsRpFrIntvl2HFrames, dsRpCarTotalEE=dsRpCarTotalEE, dsMcT1DLPath=dsMcT1DLPath, dsRpStLOSEvents=dsRpStLOSEvents, dsRpCarTotalBES=dsRpCarTotalBES, dsScDSCompatible=dsScDSCompatible, dsRpCarIntvlEE=dsRpCarIntvlEE, dsRpCarCnt15Mins=dsRpCarCnt15Mins, dsRpFrUrVcIndex=dsRpFrUrVcIndex, dsLmSelfTestState=dsLmSelfTestState, dsRpUsrDayTable=dsRpUsrDayTable, dsRpShrComments=dsRpShrComments, dsRpFrDayFpLost=dsRpFrDayFpLost, dsAcOffPowerTransition=dsAcOffPowerTransition, dsRpAhrIndex=dsRpAhrIndex, dsMcIIpAddr=dsMcIIpAddr, dsCcDteIn=dsCcDteIn, dsNcPassTiRfa=dsNcPassTiRfa, dsFcChanMap=dsFcChanMap, dsFpFr56FrLinkLed=dsFpFr56FrLinkLed, dsRpUsrDayUAS=dsRpUsrDayUAS, dsRmFpingMin=dsRmFpingMin, dsRpCarIntvlSES=dsRpCarIntvlSES, dsRpCarCurLOFC=dsRpCarCurLOFC, dsScMinutes=dsScMinutes, dsRpFrTmCntTable=dsRpFrTmCntTable, dsRpFrTotalDir=dsRpFrTotalDir, dsLm=dsLm, dsMcCDIpMask=dsMcCDIpMask, dsNcCRC=dsNcCRC, dsRpDdsIfIndex=dsRpDdsIfIndex, dsRpFrCur2HFpSent=dsRpFrCur2HFpSent, dsRpFrPre15MKbps=dsRpFrPre15MKbps, dsRpFrPre15MFpLost=dsRpFrPre15MFpLost, dsScAutoCfg=dsScAutoCfg, dsRpFrTotalOctets=dsRpFrTotalOctets, dsAcUst=dsAcUst, dsRmFpingTotal=dsRmFpingTotal, dsRpUsrIntvlStatus=dsRpUsrIntvlStatus, dsAcYelAlm=dsAcYelAlm, dsMc=dsMc, dsRpUsrCurBES=dsRpUsrCurBES, dsRpCarCur=dsRpCarCur, dsRmLbkCode=dsRmLbkCode, dsRpFrPre15MFpSent=dsRpFrPre15MFpSent, dsFcEntry=dsFcEntry, dsRpCarCurEE=dsRpCarCurEE, dsRpFrCur15MFpLost=dsRpFrCur15MFpLost, dsRpCarCurBES=dsRpCarCurBES, dsRpDm=dsRpDm, dsRpStLOTS16MFrameEvts=dsRpStLOTS16MFrameEvts, dsRpFrDayEntry=dsRpFrDayEntry, dsRpFrCur2HTable=dsRpFrCur2HTable, dsRpUsrDayNum=dsRpUsrDayNum, dsRpStRemFrameAlmEvts=dsRpStRemFrameAlmEvts, dsRpUsrCurTable=dsRpUsrCurTable, dsRpStIndex=dsRpStIndex)
mibBuilder.exportSymbols("DATASMART-MIB", dsRpFrPre15MOctets=dsRpFrPre15MOctets, dsRpUsrCurES=dsRpUsrCurES, dsCcControlPort=dsCcControlPort, dsAmc=dsAmc, dsCcStopBits=dsCcStopBits, dsFmcFpingOper=dsFmcFpingOper, dsRm=dsRm, dsRmFpingLen=dsRmFpingLen, dsMcIVc=dsMcIVc, dsCcDataBits=dsCcDataBits, dsScFrontPanel=dsScFrontPanel, dsRpFrCur2HDir=dsRpFrCur2HDir, dsRpUsrTotalES=dsRpUsrTotalES, dsRpUsrTotalCSS=dsRpUsrTotalCSS, dsRpFrCur15MFpSent=dsRpFrCur15MFpSent, dsRmFpingVc=dsRmFpingVc, dsRpFrCur2HFrames=dsRpFrCur2HFrames, dsRpShrTable=dsRpShrTable, dsRpFrTmCntEntry=dsRpFrTmCntEntry, dsNcMF16=dsNcMF16, dsAmcTrapDestPort=dsAmcTrapDestPort, dsRmFpingLost=dsRmFpingLost, dsFmcSetNiXmtUpperBwThresh=dsFmcSetNiXmtUpperBwThresh, dsFpFr56DnldFailLed=dsFpFr56DnldFailLed, dsRpCarTotalLOFC=dsRpCarTotalLOFC, dsDcTable=dsDcTable, dsAcAlmMsg=dsAcAlmMsg, dsRpFrDayTable=dsRpFrDayTable, dsFmcUpperBW=dsFmcUpperBW, dsRpCarCurUAS=dsRpCarCurUAS, dsMcEIpAddr=dsMcEIpAddr, dsDcClockSource=dsDcClockSource, dsRpUsrIntvlES=dsRpUsrIntvlES, dsPlBreak=dsPlBreak, dsRpFrCur2HFpAvg=dsRpFrCur2HFpAvg, dsRmBertTestSecs=dsRmBertTestSecs, dsRpStYellowEvents=dsRpStYellowEvents, dsRpUsrTotalBES=dsRpUsrTotalBES, dsNcFasAlign=dsNcFasAlign, dsRpFrIntvl2HFpSent=dsRpFrIntvl2HFpSent, dsScDay=dsScDay, dsRpUsrIntvlNum=dsRpUsrIntvlNum, dsFpFr56TestLed=dsFpFr56TestLed, dsFmcSetNiRcvUpperBwThresh=dsFmcSetNiRcvUpperBwThresh, dsTcGenRfa=dsTcGenRfa, dsRpSes=dsRpSes, dsCcParity=dsCcParity, dsRpFrPre15MDir=dsRpFrPre15MDir, dsRpCarCntSecs=dsRpCarCntSecs, dsRpStAISEvents=dsRpStAISEvents, dsFcLoadXcute=dsFcLoadXcute, dsAc=dsAc, dsDcIdleChar=dsDcIdleChar, dsFmcFrameType=dsFmcFrameType, dsRpUsrTotalDM=dsRpUsrTotalDM, dsAmcTrapDestTable=dsAmcTrapDestTable, dsAcSt=dsAcSt, dsSsAlarmSource=dsSsAlarmSource, dsRpStEntry=dsRpStEntry, dsNc=dsNc, dsRpFrIntvl2HVcIndex=dsRpFrIntvl2HVcIndex, dsRpFrTotalFrames=dsRpFrTotalFrames, dsFmcFcsBits=dsFmcFcsBits, dsRpFrDayFpAvg=dsRpFrDayFpAvg, dsNcCoding=dsNcCoding, dsRpUsrTotalStatus=dsRpUsrTotalStatus, dsRpUsrDayIndex=dsRpUsrDayIndex, dsRpUsrIntvlTable=dsRpUsrIntvlTable, dsFmcAddrOctets=dsFmcAddrOctets, dsAmcScrnIndex=dsAmcScrnIndex, dsSsPowerStatus=dsSsPowerStatus, dsRpFrDayStatus=dsRpFrDayStatus, dsRpAhrEntry=dsRpAhrEntry, dsFpFr56DpRtsRxLed=dsFpFr56DpRtsRxLed, dsAmcTrapEntry=dsAmcTrapEntry, dsRpCar=dsRpCar, dsRpUsrTotalEE=dsRpUsrTotalEE, dsRpFrCur15MDir=dsRpFrCur15MDir, dsRpFrTotalFpLost=dsRpFrTotalFpLost, dsFmcFpingLinkUp=dsFmcFpingLinkUp, dsAmcTrapDestVc=dsAmcTrapDestVc, dsRpStRemMFrameAlmEvts=dsRpStRemMFrameAlmEvts, dsRpShrIndex=dsRpShrIndex, dsMcDIpAddr=dsMcDIpAddr, dsRpUsrIntvlDM=dsRpUsrIntvlDM, dsFpFr56DpCtsTxLed=dsFpFr56DpCtsTxLed, dsAmcScrnEntry=dsAmcScrnEntry, dsFcMap16=dsFcMap16, dsFpFr56NiAlarmLed=dsFpFr56NiAlarmLed, dsRpCarIntvlUAS=dsRpCarIntvlUAS, dsScName=dsScName, dsRpFrIntvl2HFpLost=dsRpFrIntvl2HFpLost, dsRpCarIntvlLOFC=dsRpCarIntvlLOFC, dsFmcClrNiXmtUpperBwThresh=dsFmcClrNiXmtUpperBwThresh, dsRpStControlledSlips=dsRpStControlledSlips, dsScMonthExtention=dsScMonthExtention, dsScOperMode=dsScOperMode, dsAmcSourceScreen=dsAmcSourceScreen, dsTcAis=dsTcAis, dsAcBerAlm=dsAcBerAlm, dsRpUsr=dsRpUsr, dsRpCarCurES=dsRpCarCurES, dsFmcClrNiRcvUpperBwThresh=dsFmcClrNiRcvUpperBwThresh, dsRpFrPre15MVcIndex=dsRpFrPre15MVcIndex, dsRmInsertBitError=dsRmInsertBitError, dsSsAlarmState=dsSsAlarmState, dsRpUsrDayEE=dsRpUsrDayEE, dsRpFrCur15MVc=dsRpFrCur15MVc, dsRpFrTotalStatus=dsRpFrTotalStatus, dsRpUsrCurEntry=dsRpUsrCurEntry, dsNcYellow=dsNcYellow, dsRpCarTotalSES=dsRpCarTotalSES, dsAcAisAlm=dsAcAisAlm, dsNcFraming=dsNcFraming, dsRpFrUrCIRExceededOctets=dsRpFrUrCIRExceededOctets, dsSsLoopback=dsSsLoopback, dsRpUsrIntvlSES=dsRpUsrIntvlSES, dsRpCarTotalES=dsRpCarTotalES, dsTcFasAlign=dsTcFasAlign, dsRpFr=dsRpFr, dsRpUsrDayEntry=dsRpUsrDayEntry, dsScDayExtention=dsScDayExtention, dsTcEqual=dsTcEqual, dsAmcScrnIpAddr=dsAmcScrnIpAddr, dsRpBes=dsRpBes, dsRpFrTotalFpAvg=dsRpFrTotalFpAvg, dsAmcTrapType=dsAmcTrapType, dsRpUsrCurDM=dsRpUsrCurDM, dsRpShrEntry=dsRpShrEntry, dsNcAddr54=dsNcAddr54, dsFc=dsFc, dsRpFrUrTable=dsRpFrUrTable, dsRpCarIntvlNum=dsRpCarIntvlNum, dsRpPl=dsRpPl, dsRmTestCode=dsRmTestCode, dsRmFpingFreq=dsRmFpingFreq, dsScTftpSwdl=dsScTftpSwdl, dsRpFrCur15MFpMax=dsRpFrCur15MFpMax, dsRpUsrTmCntEntry=dsRpUsrTmCntEntry, dsScShelfAddr=dsScShelfAddr, dsRpFrCur15MFpAvg=dsRpFrCur15MFpAvg, dsScAutologout=dsScAutologout, DisplayString=DisplayString, dsCcDceIn=dsCcDceIn, dsRmBertTotalErrors=dsRmBertTotalErrors, dsFcChanIndex=dsFcChanIndex, dsRpFrCur2HKbps=dsRpFrCur2HKbps, dsRpShrDateTime=dsRpShrDateTime, dsRpCarIntvlES=dsRpCarIntvlES, Counter32=Counter32, dsMcNetif=dsMcNetif, dsRpUsrTmCntDays=dsRpUsrTmCntDays, dsRpFrTotalKbps=dsRpFrTotalKbps, dsRpFrIntvl2HFpAvg=dsRpFrIntvl2HFpAvg, dsRpFrCur15MTable=dsRpFrCur15MTable, dsScClockSource=dsScClockSource, dsRpFrCur2HFpMax=dsRpFrCur2HFpMax, dsNcIdle=dsNcIdle, dsRpFrIntvl2HVc=dsRpFrIntvl2HVc, dsMcEIpMask=dsMcEIpMask)
| [
"[email protected]"
] | |
97f136b14681008e20c099f12631a94a0fc21e33 | b7203262280b8fabcf5573ea494e8e2408d8d2b9 | /turtle/star.py | 9282c3b8c1bd90f12603fc067e860a8e5d21d5fd | [
"Apache-2.0"
] | permissive | MDGSF/PythonPractice | 1c11994a047ecb01c74b0cf0b320b6ffc570209d | 77e81d7c965c5de1629df223cb27dd541d128eb1 | refs/heads/master | 2021-06-16T13:49:00.310063 | 2021-04-15T11:32:24 | 2021-04-15T11:32:24 | 177,229,019 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import turtle as t
def main():
t.color('red', 'yellow')
t.begin_fill()
while True:
t.forward(200)
t.left(170)
if abs(t.pos()) < 1:
break
t.end_fill()
t.done()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c351193f7c13944665260375b74e52d614f9e126 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/list/double/Schema+Instance/NISTXML-SV-IV-list-double-maxLength-3-3.py | b87e6657a0667e04224d685b68de4abfd4acfadf | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 415 | py | from output.models.nist_data.list_pkg.double.schema_instance.nistschema_sv_iv_list_double_max_length_3_xsd.nistschema_sv_iv_list_double_max_length_3 import NistschemaSvIvListDoubleMaxLength3
obj = NistschemaSvIvListDoubleMaxLength3(
value=[
6.828163737338829e+162,
4.3832452374445357e+167,
4.21622419951358e+263,
4.477423873143575e+138,
7.653382762597696e+277,
]
)
| [
"[email protected]"
] | |
5b89597467106b28a80cea60757167381bfd8edc | 2aee45d23b47c6adba9eafc5a84d606a021f9300 | /web_dashboard/models/models.py | fec9ca50de1abcd2af54f1f4e7a897ee9cb90ce6 | [] | no_license | charles-123456/Primoris-System | 23b183460ea79bfa8d896556aa35d62460154567 | 0880b8266eedfd0016a3b365c9939c34ad301155 | refs/heads/main | 2023-08-21T06:24:42.840026 | 2021-10-25T06:10:48 | 2021-10-25T06:10:48 | 385,922,247 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | # -*- coding: utf-8 -*-
from odoo import models, api
from lxml.builder import E
class BaseModel(models.AbstractModel):
_inherit = 'base'
@api.model
def _get_default_dashboard_view(self):
""" Generates a default dashboard view containing default sub graph and
pivot views.
:returns: a dashboard view as an lxml document
:rtype: etree._Element
"""
dashboard = E.dashboard()
dashboard.append(E.view(type="graph"))
dashboard.append(E.view(type="pivot"))
return dashboard
| [
"[email protected]"
] | |
1b4899ddb490787b92f042f6d5e21fcde84aa21a | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/1935d5ba538766606877fea3170e8be386a9677d1874a1cd4f21bb4b15b507a4/_cython_0_29_16.py | 134551b94817f810b1dd85bf60fdae6b79d3c44b | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,282 | py | # encoding: utf-8
# module _cython_0_29_16
# from C:\Users\Doly\Anaconda3\lib\site-packages\scipy\integrate\_test_multivariate.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
# Variables with simple values
__loader__ = None
__spec__ = None
# no functions
# classes
class cython_function_or_method(object):
def __call__(self, *args, **kwargs): # real signature unknown
""" Call self as a function. """
pass
def __get__(self, *args, **kwargs): # real signature unknown
""" Return an attribute of instance, which is of type owner. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
func_closure = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_code = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_defaults = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_dict = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_doc = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_globals = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__annotations__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__closure__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__code__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__defaults__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__globals__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__kwdefaults__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__self__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__dict__ = None # (!) real value is "mappingproxy({'__repr__': <slot wrapper '__repr__' of 'cython_function_or_method' objects>, '__call__': <slot wrapper '__call__' of 'cython_function_or_method' objects>, '__get__': <slot wrapper '__get__' of 'cython_function_or_method' objects>, '__reduce__': <method '__reduce__' of 'cython_function_or_method' objects>, '__module__': <member '__module__' of 'cython_function_or_method' objects>, 'func_doc': <attribute 'func_doc' of 'cython_function_or_method' objects>, '__doc__': <attribute '__doc__' of 'cython_function_or_method' objects>, 'func_name': <attribute 'func_name' of 'cython_function_or_method' objects>, '__name__': <attribute '__name__' of 'cython_function_or_method' objects>, '__qualname__': <attribute '__qualname__' of 'cython_function_or_method' objects>, '__self__': <attribute '__self__' of 'cython_function_or_method' objects>, 'func_dict': <attribute 'func_dict' of 'cython_function_or_method' objects>, '__dict__': <attribute '__dict__' of 'cython_function_or_method' objects>, 'func_globals': <attribute 'func_globals' of 'cython_function_or_method' objects>, '__globals__': <attribute '__globals__' of 'cython_function_or_method' objects>, 'func_closure': <attribute 'func_closure' of 'cython_function_or_method' objects>, '__closure__': <attribute '__closure__' of 'cython_function_or_method' objects>, 'func_code': <attribute 'func_code' of 'cython_function_or_method' objects>, '__code__': <attribute '__code__' of 'cython_function_or_method' objects>, 'func_defaults': <attribute 'func_defaults' of 'cython_function_or_method' objects>, '__defaults__': <attribute '__defaults__' of 'cython_function_or_method' objects>, '__kwdefaults__': <attribute '__kwdefaults__' of 'cython_function_or_method' objects>, '__annotations__': <attribute '__annotations__' of 'cython_function_or_method' objects>})"
__name__ = 'cython_function_or_method'
__qualname__ = 'cython_function_or_method'
| [
"[email protected]"
] | |
523d863369652fae4e71eb3c3f18e20eae041782 | e0980f704a573894350e285f66f4cf390837238e | /.history/menus/wagtail_hooks_20201030120504.py | b3b8766746430b42c76ff1e9a244cfa6de30ca5e | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | """ Kategoria zostanie dodana w pasku bocznym u admina"""
import ModelAdmin, decorator
class MenuAdmin(ModelAdmin) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.