blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e0f32a920e709012ccfe001490c91293713eac63 | 67b4c76ba0a94c023e085706cc3e0760f700eecd | /prod_settings.py | 22087417dac1072182d4d1a36ff665e4adf163d4 | [] | no_license | rlr/sitesprint09 | 928ef7129f0b120a2a1d02226b154a28d462e0bd | 3f74631c6feb9dcab50dc263dba145755cefab6f | refs/heads/master | 2021-01-10T21:30:30.745816 | 2010-01-30T02:35:45 | 2010-01-30T02:35:45 | 388,924 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,686 | py | # -*- coding: utf-8 -*-
LOCAL_DEV = False
DEBUG = False
TEMPLATE_DEBUG = DEBUG
#sorl-thumbnail
THUMBNAIL_DEBUG = False
#django-contact-form
DEFAULT_FROM_EMAIL = '[email protected]'
MANAGERS = (
('Ricky Rosario','[email protected]'),
)
DATABASE_ENGINE = 'postgresql_psycopg2' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'db_X' # Or path to database file if using sqlite3.
DATABASE_USER = 'pg_X' # Not used with sqlite3.
DATABASE_PASSWORD = 'X' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'ABC'
EMAIL_HOST_PASSWORD = 'ABC'
EMAIL_USE_TLS = True
CACHE_BACKEND = 'memcached://127.0.0.1:11211'
CACHE_MIDDLEWARE_SECONDS = 60*5
CACHE_MIDDLEWARE_KEY_PREFIX = 'rr.'
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
### DEBUG-TOOLBAR SETTINGS
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
### django-markup
MARKUP_CHOICES = (
'none',
'markdown',
'textile',
) | [
"[email protected]"
] | |
6b6f2a16f52435fd0b9bf3df97aef77d9bb2f821 | 22e9d7c194cf22513d68b61b97c49405a47e8708 | /Hashing/count_No_Teams.py | 2d1043accf6dbc6c8c2b8b3d941b2330ed460298 | [] | no_license | SandeepPadhi/Algorithmic_Database | 44c26f9300a99539781c5beb5587997b3ecadfe1 | ab8040a7dad94c84ec88f40e44b8520edcbe2443 | refs/heads/main | 2023-06-22T02:04:29.362315 | 2021-07-19T17:48:40 | 2021-07-19T17:48:40 | 338,329,340 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | """
Date:24/02/2021
1395. Count Number of Teams - Leetcode - Medium
The following program is using Hashing
"""
class Solution:
def numTeams(self, rating: List[int]) -> int:
highindex={}
lowindex={}
for i in range(len(rating)):
highindex[i]=[]
lowindex[i]=[]
for j in range(i+1,len(rating)):
if rating[j]>rating[i]:
highindex[i].append(j)
elif rating[j]<rating[i]:
lowindex[i].append(j)
count=0
for i in range(len(rating)):
for j in highindex[i]:
count+=len(highindex[j])
for j in lowindex[i]:
count+=len(lowindex[j])
return count
"""
Date:24/02/2021
The following program is solved using simple counting .
"""
"""
def numTeams(self, rating: List[int]) -> int:
Ans=0
for i in range(1,len(rating)-1):
leftlow,lefthigh,rightlow,righthigh=0,0,0,0
j=i-1
while(j>=0):
if rating[j]<rating[i]:
leftlow+=1
elif rating[j]>rating[i]:
lefthigh+=1
j-=1
j=i+1
while(j<len(rating)):
if rating[i]<rating[j]:
righthigh+=1
elif rating[i]>rating[j]:
rightlow+=1
j+=1
Ans+=(leftlow*righthigh + lefthigh*rightlow)
return Ans
""" | [
"[email protected]"
] | |
4c08c95b73a2a6f9d94cb46833129c2508a1bf92 | c633bf9cbfa588ecd26d6daebb5434b08542bcb3 | /warehouse_by_active_user/models/warehouse.py | 1ffa053b1cf9a9fa92043664614a9bdc53ac7e66 | [] | no_license | rosalesdc/am_testing | 57c6afa0f6e028569c682d8bfff7d0e80d08c12d | b78be0ef4eb6a6ab916e4840d900a73cca427a0e | refs/heads/master | 2020-09-08T08:48:00.213951 | 2019-11-11T22:51:34 | 2019-11-11T22:51:34 | 221,083,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | # -*- encoding: utf-8 -*-
#
# Module written to Odoo, Open Source Management Solution
#
# Copyright (c) 2017 Telematel - http://www.telematel.com/
# All Rights Reserved.
#
# Developer(s): Luis Ernesto Garcia Medina
# ([email protected])
#
########################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from odoo import api, models, fields
class StockWarehouse(models.Model):
_inherit = 'stock.warehouse'
user_ids = fields.One2many('res.users', 'warehouse_id')
| [
"[email protected]"
] | |
37f3382aaf1b32e126dda9c8ba924399ca4eae1e | 783d136927a55eb83734bfd4eee4f4de06ababe2 | /DeepFM/DeepFM1.py | 04b1f6b1cb52f55d52af811bd940d31a38c169a1 | [] | no_license | robertaaa/code_study | 51bf9f87cf32c6c131c2711a0626fbd0e6ceec57 | 3fb264ed9fb36abe156c6663316b2e80169c26ac | refs/heads/master | 2023-09-05T21:58:20.055755 | 2021-11-08T05:07:49 | 2021-11-08T05:07:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,144 | py | import pandas as pd
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from deepctr.models import DeepFM
from deepctr.utils import SingleFeat
if __name__ == "__main__":
data = pd.read_csv('./criteo_sample.txt')
#拆分稀疏和稠密特征
sparse_features = ['C' + str(i) for i in range(1, 27)]
dense_features = ['I' + str(i) for i in range(1, 14)]
data[sparse_features] = data[sparse_features].fillna('-1', )
data[dense_features] = data[dense_features].fillna(0, )
target = ['label']
# 1.类别特征的编码与稠密特征做归一化
for feat in sparse_features:
lbe = LabelEncoder()
data[feat] = lbe.fit_transform(data[feat])
mms = MinMaxScaler(feature_range=(0, 1))
data[dense_features] = mms.fit_transform(data[dense_features])
# 2.统计稀疏特征类别特征个数,记录稠密特征类目
sparse_feature_list = [SingleFeat(feat, data[feat].nunique()) for feat in sparse_features]
dense_feature_list = [SingleFeat(feat, 0,) for feat in dense_features]
# 3.生成模型输入特征
train, test = train_test_split(data, test_size=0.2)
train_model_input = [train[feat.name].values for feat in sparse_feature_list] + \
[train[feat.name].values for feat in dense_feature_list]
test_model_input = [test[feat.name].values for feat in sparse_feature_list] + \
[test[feat.name].values for feat in dense_feature_list]
# 4.定义模型、预测、评估模型
model = DeepFM({"sparse": sparse_feature_list, "dense": dense_feature_list}, task='binary')
model.compile("adam", "binary_crossentropy", metrics=['binary_crossentropy'], )
history = model.fit(train_model_input, train[target].values, batch_size=256, epochs=10, verbose=2, validation_split=0.2, )
pred_ans = model.predict(test_model_input, batch_size=256)
print("test LogLoss", round(log_loss(test[target].values, pred_ans), 4))
print("test AUC", round(roc_auc_score(test[target].values, pred_ans), 4))
| [
"[email protected]"
] | |
dbff0efff569b2b4be22939abe0ab1146d16710f | e68a40e90c782edae9d8f89b827038cdc69933c4 | /res/scripts/client/gui/scaleform/daapi/view/battle/shared/ribbons_panel.py | 88ada4a83db65cc2b9b186d1ab9087c3f904009f | [] | no_license | webiumsk/WOT-0.9.16 | 2486f8b632206b992232b59d1a50c770c137ad7d | 71813222818d33e73e414e66daa743bd7701492e | refs/heads/master | 2021-01-10T23:12:33.539240 | 2016-10-11T21:00:57 | 2016-10-11T21:00:57 | 70,634,922 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 13,015 | py | # 2016.10.11 22:10:43 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/shared/ribbons_panel.py
import BigWorld
from debug_utils import LOG_DEBUG_DEV
from helpers import i18n
from account_helpers.settings_core.settings_constants import BATTLE_EVENTS, GRAPHICS
from account_helpers.settings_core.SettingsCore import g_settingsCore
from gui.Scaleform.daapi.view.meta.RibbonsPanelMeta import RibbonsPanelMeta
from gui.Scaleform.daapi.view.battle.shared.ribbons_aggregator import RibbonsAggregator
from gui.Scaleform.genConsts.BATTLE_EFFICIENCY_TYPES import BATTLE_EFFICIENCY_TYPES as _BET
from gui.Scaleform.locale.INGAME_GUI import INGAME_GUI
from gui.shared import g_eventBus, EVENT_BUS_SCOPE
from gui.shared.events import GameEvent
from gui.battle_control import avatar_getter
from gui.battle_control import g_sessionProvider
_RIBBON_SOUNDS_ENABLED = True
_SHOW_RIBBON_SOUND_NAME = 'show_ribbon'
_HIDE_RIBBON_SOUND_NAME = 'hide_ribbon'
_CHANGE_RIBBON_SOUND_NAME = 'increment_ribbon_counter'
_SOUNDS = (_SHOW_RIBBON_SOUND_NAME, _HIDE_RIBBON_SOUND_NAME, _CHANGE_RIBBON_SOUND_NAME)
_EXTENDED_RENDER_PIPELINE = 0
_ADDITIONAL_USER_SETTINGS = (BATTLE_EVENTS.VEHICLE_INFO,
BATTLE_EVENTS.EVENT_NAME,
BATTLE_EVENTS.SHOW_IN_BATTLE,
GRAPHICS.RENDER_PIPELINE)
_BATTLE_EVENTS_SETTINGS_TO_BATTLE_EFFICIENCY_TYPES = {BATTLE_EVENTS.ENEMY_HP_DAMAGE: _BET.DAMAGE,
BATTLE_EVENTS.BLOCKED_DAMAGE: _BET.ARMOR,
BATTLE_EVENTS.ENEMY_RAM_ATTACK: _BET.RAM,
BATTLE_EVENTS.ENEMY_BURNING: _BET.BURN,
BATTLE_EVENTS.ENEMY_KILL: _BET.DESTRUCTION,
BATTLE_EVENTS.ENEMY_DETECTION: _BET.DETECTION,
BATTLE_EVENTS.ENEMY_TRACK_DAMAGE: _BET.ASSIST_TRACK,
BATTLE_EVENTS.ENEMY_DETECTION_DAMAGE: _BET.ASSIST_SPOT,
BATTLE_EVENTS.ENEMY_CRITICAL_HIT: _BET.CRITS,
BATTLE_EVENTS.BASE_CAPTURE: _BET.CAPTURE,
BATTLE_EVENTS.BASE_CAPTURE_DROP: _BET.DEFENCE}
def _getVehicleData(arenaDP, vehArenaID):
vTypeInfoVO = arenaDP.getVehicleInfo(vehArenaID).vehicleType
vehicleClassTag = vTypeInfoVO.classTag or ''
vehicleName = vTypeInfoVO.shortNameWithPrefix
return (vehicleName, vehicleClassTag)
def _formatCounter(counter):
return i18n.makeString(INGAME_GUI.COUNTRIBBONS_MULTISEPARATOR, multiplier=counter)
def _baseRibbonFormatter(ribbon, arenaDP, updater):
"""
Proxy to show BATTLE_EFFICIENCY_TYPES.CAPTURE or BATTLE_EFFICIENCY_TYPES.DEFENCE ribbon.
:param ribbon: An instance of _BaseRibbon derived class.
:param updater: Reference to view update method.
"""
updater(ribbonType=ribbon.getType(), leftFieldStr=str(ribbon.getPoints()))
def _enemyDetectionRibbonFormatter(ribbon, arenaDP, updater):
"""
Proxy to show BATTLE_EFFICIENCY_TYPES.DETECTION ribbon.
:param ribbon: An instance of _EnemyDetectionRibbon class.
:param updater: Reference to view update method.
"""
updater(ribbonType=ribbon.getType(), leftFieldStr=_formatCounter(ribbon.getCount()))
def _singleVehDamageRibbonFormatter(ribbon, arenaDP, updater):
"""
Proxy to show BATTLE_EFFICIENCY_TYPES.ARMOR or BATTLE_EFFICIENCY_TYPES.DAMAGE ribbon.
:param ribbon: An instance of _SingleVehicleDamageRibbon derived class.
:param updater: Reference to view update method.
"""
vehicleName, vehicleClassTag = _getVehicleData(arenaDP, ribbon.getVehicleID())
updater(ribbonType=ribbon.getType(), vehName=vehicleName, vehType=vehicleClassTag, leftFieldStr=BigWorld.wg_getIntegralFormat(ribbon.getDamage()))
def _multiVehHitRibbonFormatter(ribbon, arenaDP, updater):
"""
Proxy to show BATTLE_EFFICIENCY_TYPES.CRITS ribbon.
:param ribbon: An instance of _MultiVehicleHitRibbon derived class.
:param updater: Reference to view update method.
"""
vehIDs = ribbon.getVehIDs()
count = len(vehIDs)
if not count > 0:
raise AssertionError
count > 1 and updater(ribbonType=ribbon.getType(), leftFieldStr=_formatCounter(ribbon.getExtraSum()), rightFieldStr=_formatCounter(count))
else:
vehicleName, vehicleClassTag = _getVehicleData(arenaDP, vehIDs[0])
updater(ribbonType=ribbon.getType(), leftFieldStr=_formatCounter(ribbon.getExtraSum()), vehName=vehicleName, vehType=vehicleClassTag)
def _multiVehDamageRibbonFormatter(ribbon, arenaDP, updater):
"""
Proxy to show BATTLE_EFFICIENCY_TYPES.RAM, BATTLE_EFFICIENCY_TYPES.BURN,
BATTLE_EFFICIENCY_TYPES.ASSIST_TRACK or BATTLE_EFFICIENCY_TYPES.ASSIST_SPOT ribbon.
:param ribbon: An instance of _MultiVehicleDamageRibbon derived class.
:param updater: Reference to view update method.
"""
vehIDs = ribbon.getVehIDs()
count = len(vehIDs)
if not count > 0:
raise AssertionError
count > 1 and updater(ribbonType=ribbon.getType(), leftFieldStr=str(ribbon.getExtraSum()), rightFieldStr=_formatCounter(count))
else:
vehicleName, vehicleClassTag = _getVehicleData(arenaDP, vehIDs[0])
updater(ribbonType=ribbon.getType(), leftFieldStr=str(ribbon.getExtraSum()), vehName=vehicleName, vehType=vehicleClassTag)
def _killRibbonFormatter(ribbon, arenaDP, updater):
"""
Proxy to show BATTLE_EFFICIENCY_TYPES.DESTRUCTION ribbon.
:param ribbon: An instance of _EnemyKillRibbon class.
:param updater: Reference to view update method.
"""
vehIDs = ribbon.getVehIDs()
count = len(vehIDs)
if not count > 0:
raise AssertionError
count > 1 and updater(ribbonType=ribbon.getType(), leftFieldStr=_formatCounter(count))
else:
vehicleName, vehicleClassTag = _getVehicleData(arenaDP, vehIDs[0])
updater(ribbonType=ribbon.getType(), vehName=vehicleName, vehType=vehicleClassTag)
_RIBBONS_FMTS = {_BET.CAPTURE: _baseRibbonFormatter,
_BET.DEFENCE: _baseRibbonFormatter,
_BET.DETECTION: _enemyDetectionRibbonFormatter,
_BET.ARMOR: _singleVehDamageRibbonFormatter,
_BET.DAMAGE: _singleVehDamageRibbonFormatter,
_BET.CRITS: _multiVehHitRibbonFormatter,
_BET.RAM: _multiVehDamageRibbonFormatter,
_BET.BURN: _multiVehDamageRibbonFormatter,
_BET.ASSIST_TRACK: _multiVehDamageRibbonFormatter,
_BET.ASSIST_SPOT: _multiVehDamageRibbonFormatter,
_BET.DESTRUCTION: _killRibbonFormatter}
_AGGREGATED_RIBBON_TYPES = (_BET.CAPTURE,)
class BattleRibbonsPanel(RibbonsPanelMeta):
def __init__(self):
super(BattleRibbonsPanel, self).__init__()
self.__enabled = True
self.__userPreferences = {}
self.__isWithRibbonName = True
self.__isWithVehName = True
self.__isExtendedAnim = True
self.__isVisible = True
self.__arenaDP = g_sessionProvider.getCtx().getArenaDP()
self.__ribbonsAggregator = RibbonsAggregator()
self.__ribbonsAggregator.onRibbonAdded += self.__showRibbon
self.__ribbonsAggregator.onRibbonUpdated += self.__showRibbon
def onShow(self):
self.__playSound(_SHOW_RIBBON_SOUND_NAME)
def onChange(self):
self.__playSound(_CHANGE_RIBBON_SOUND_NAME)
def onHide(self, ribbonType):
LOG_DEBUG_DEV('RIBBON PANEL: onHide: ribbonType="{}"'.format(ribbonType))
if ribbonType not in _AGGREGATED_RIBBON_TYPES:
self.__ribbonsAggregator.clearRibbonData(ribbonType)
self.__playSound(_HIDE_RIBBON_SOUND_NAME)
def _populate(self):
super(BattleRibbonsPanel, self)._populate()
self.__enabled = bool(g_settingsCore.getSetting(BATTLE_EVENTS.SHOW_IN_BATTLE)) and self.__arenaDP is not None
self.__isWithRibbonName = bool(g_settingsCore.getSetting(BATTLE_EVENTS.EVENT_NAME))
self.__isWithVehName = bool(g_settingsCore.getSetting(BATTLE_EVENTS.VEHICLE_INFO))
self.__isExtendedAnim = g_settingsCore.getSetting(GRAPHICS.RENDER_PIPELINE) == _EXTENDED_RENDER_PIPELINE
for settingName in _BATTLE_EVENTS_SETTINGS_TO_BATTLE_EFFICIENCY_TYPES:
key = _BATTLE_EVENTS_SETTINGS_TO_BATTLE_EFFICIENCY_TYPES[settingName]
self.__userPreferences[key] = bool(g_settingsCore.getSetting(settingName))
self.__setupView()
g_settingsCore.onSettingsChanged += self.__onSettingsChanged
g_eventBus.addListener(GameEvent.GUI_VISIBILITY, self.__onGUIVisibilityChanged, scope=EVENT_BUS_SCOPE.BATTLE)
if self.__enabled:
self.__ribbonsAggregator.start()
return
def _dispose(self):
self.__ribbonsAggregator.onRibbonAdded -= self.__showRibbon
self.__ribbonsAggregator.onRibbonUpdated -= self.__showRibbon
g_eventBus.removeListener(GameEvent.GUI_VISIBILITY, self.__onGUIVisibilityChanged, scope=EVENT_BUS_SCOPE.BATTLE)
g_settingsCore.onSettingsChanged -= self.__onSettingsChanged
if self.__enabled:
self.__ribbonsAggregator.stop()
self.__arenaDP = None
super(BattleRibbonsPanel, self)._dispose()
return
def __playSound(self, eventName):
if not self.__isVisible or not _RIBBON_SOUNDS_ENABLED:
return
soundNotifications = avatar_getter.getSoundNotifications()
if soundNotifications and hasattr(soundNotifications, 'play') and hasattr(soundNotifications, 'isPlaying'):
for eventName in _SOUNDS:
if soundNotifications.isPlaying(eventName):
break
else:
soundNotifications.play(eventName)
def __addBattleEfficiencyEvent(self, ribbonType = '', leftFieldStr = '', vehName = '', vehType = '', rightFieldStr = ''):
LOG_DEBUG_DEV('RIBBON PANEL: __addBattleEfficiencyEvent: ribbonType="{}", leftFieldStr="{}", vehName="{}", vehType="{}", rightFieldStr="{}".'.format(ribbonType, leftFieldStr, vehName, vehType, rightFieldStr))
self.as_addBattleEfficiencyEventS(ribbonType, leftFieldStr, vehName, vehType, rightFieldStr)
def __showRibbon(self, ribbon):
if self.__checkUserPreferences(ribbon) and ribbon.getType() in _RIBBONS_FMTS:
updater = _RIBBONS_FMTS[ribbon.getType()]
updater(ribbon, self.__arenaDP, self.__addBattleEfficiencyEvent)
def __onGUIVisibilityChanged(self, event):
self.__isVisible = event.ctx['visible']
def __onSettingsChanged(self, diff):
addSettings = {}
for item in diff:
if item in _BATTLE_EVENTS_SETTINGS_TO_BATTLE_EFFICIENCY_TYPES:
key = _BATTLE_EVENTS_SETTINGS_TO_BATTLE_EFFICIENCY_TYPES[item]
self.__userPreferences[key] = bool(diff[item])
elif item in _ADDITIONAL_USER_SETTINGS:
addSettings[item] = diff[item]
if addSettings:
enabled = bool(addSettings.get(BATTLE_EVENTS.SHOW_IN_BATTLE, self.__enabled)) and self.__arenaDP is not None
self.__isWithRibbonName = bool(g_settingsCore.getSetting(BATTLE_EVENTS.EVENT_NAME))
self.__isWithVehName = bool(g_settingsCore.getSetting(BATTLE_EVENTS.VEHICLE_INFO))
self.__isExtendedAnim = g_settingsCore.getSetting(GRAPHICS.RENDER_PIPELINE) == _EXTENDED_RENDER_PIPELINE
if self.__enabled != enabled:
self.__enabled = enabled
if self.__enabled:
self.__ribbonsAggregator.start()
else:
self.__ribbonsAggregator.stop()
self.as_setSettingsS(self.__enabled, self.__isExtendedAnim, self.__isWithRibbonName, self.__isWithVehName)
return
def __checkUserPreferences(self, ribbon):
"""
Returns True if the user has enabled displaying of the given ribbon or there is no
setting for the given ribbon. Otherwise returns False.
:param ribbon: An instance of _Ribbon derived class.
"""
return self.__userPreferences.get(ribbon.getType(), True)
def __setupView(self):
self.as_setupS([[_BET.ARMOR, i18n.makeString(INGAME_GUI.efficiencyribbons(_BET.ARMOR))],
[_BET.DEFENCE, i18n.makeString(INGAME_GUI.efficiencyribbons(_BET.DEFENCE))],
[_BET.DAMAGE, i18n.makeString(INGAME_GUI.efficiencyribbons(_BET.DAMAGE))],
[_BET.ASSIST_SPOT, i18n.makeString(INGAME_GUI.efficiencyribbons(_BET.ASSIST_SPOT))],
[_BET.ASSIST_TRACK, i18n.makeString(INGAME_GUI.efficiencyribbons(_BET.ASSIST_TRACK))],
[_BET.BURN, i18n.makeString(INGAME_GUI.efficiencyribbons(_BET.BURN))],
[_BET.CAPTURE, i18n.makeString(INGAME_GUI.efficiencyribbons(_BET.CAPTURE))],
[_BET.DESTRUCTION, i18n.makeString(INGAME_GUI.efficiencyribbons(_BET.DESTRUCTION))],
[_BET.DETECTION, i18n.makeString(INGAME_GUI.efficiencyribbons(_BET.DETECTION))],
[_BET.RAM, i18n.makeString(INGAME_GUI.efficiencyribbons(_BET.RAM))],
[_BET.CRITS, i18n.makeString(INGAME_GUI.efficiencyribbons(_BET.CRITS))]], self.__isExtendedAnim, self.__enabled, self.__isWithRibbonName, self.__isWithVehName)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\battle\shared\ribbons_panel.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.10.11 22:10:44 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
2cfe9762f059ccfa617d55861f8b9a5968eaecb6 | dfac8bd76452c83f39de08eb8fdbdd53e8caaca4 | /skyline/flux/uploaded_data_worker.py | 18b90da4f3faa1f1f0f23c3229a90f7e73bef173 | [
"MIT"
] | permissive | kioco/skyline-1 | ea872b329116bfdaa825180df576592e00dd2a58 | 9f96e7edfcae73fd0e5fcdb7d4cdbd79c8f43e8a | refs/heads/master | 2022-09-06T18:14:32.522353 | 2020-06-02T07:34:20 | 2020-06-02T07:34:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86,244 | py | import sys
import os.path
from os import kill
import traceback
from multiprocessing import Process
from time import time
import datetime
from time import sleep
from ast import literal_eval
import socket
import pickle
import struct
import shutil
import glob
import gzip
import zipfile
import tarfile
from collections import Counter
import pandas as pd
import pytz
from timeit import default_timer as timer
from logger import set_up_logging
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
sys.path.insert(0, os.path.dirname(__file__))
if True:
import settings
from skyline_functions import (
get_redis_conn, get_redis_conn_decoded, mkdir_p, sort_timeseries,
filesafe_metricname)
# Consolidate flux logging
logger = set_up_logging(None)
this_host = str(os.uname()[1])
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
parent_skyline_app = 'flux'
skyline_app = 'flux'
skyline_app_graphite_namespace = 'skyline.%s%s.uploaded_data_worker' % (parent_skyline_app, SERVER_METRIC_PATH)
LOCAL_DEBUG = False
settings_errors = False
try:
CARBON_HOST = settings.FLUX_CARBON_HOST
CARBON_PORT = settings.FLUX_CARBON_PORT
FLUX_CARBON_PICKLE_PORT = settings.FLUX_CARBON_PICKLE_PORT
DATA_UPLOADS_PATH = settings.DATA_UPLOADS_PATH
except:
settings_errors = True
try:
save_uploads = settings.FLUX_SAVE_UPLOADS
save_uploads_path = settings.FLUX_SAVE_UPLOADS_PATH
except:
save_uploads = False
utc_timezones = [
'Etc/GMT', 'Etc/GMT+0', 'Etc/GMT0', 'Etc/GMT-0', 'Etc/Greenwich', 'Etc/UTC',
'Etc/Universal', 'Etc/Zulu', 'GMT', 'GMT+0', 'GMT-0', 'GMT0', 'Greenwich',
'UTC', 'Universal', 'Zulu'
]
ALLOWED_EXTENSIONS = {'json', 'csv', 'xlsx', 'xls'}
class UploadedDataWorker(Process):
"""
The worker grabs data files from the :mod:`settings.DATA_UPLOADS_PATH` and
processes the data files and sends to data to Graphite.
"""
def __init__(self, parent_pid):
super(UploadedDataWorker, self).__init__()
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
self.parent_pid = parent_pid
self.daemon = True
def check_if_parent_is_alive(self):
"""
Self explanatory.
"""
try:
kill(self.parent_pid, 0)
except:
exit(0)
def run(self):
"""
Called when the process intializes.
"""
def pickle_data_to_graphite(data):
message = None
try:
payload = pickle.dumps(data, protocol=2)
header = struct.pack("!L", len(payload))
message = header + payload
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to pickle to send to Graphite')
return False
if message:
try:
sock = socket.socket()
sock.connect((CARBON_HOST, FLUX_CARBON_PICKLE_PORT))
sock.sendall(message)
sock.close()
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to send pickle data to Graphite')
return False
else:
logger.error('error :: uploaded_data_worker :: failed to pickle metric data into message')
return False
return True
def remove_redis_set_item(data):
try:
self.redis_conn.srem('flux.uploaded_data', str(data))
logger.info('uploaded_data_worker :: removed upload from the flux.uploaded_data Redis set')
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to remove item from the Redis flux.uploaded_data set - %s' % str(data))
return False
return True
def update_redis_upload_status_key(upload_status):
upload_id_key = None
for row in upload_status:
if row[0] == 'upload_id':
upload_id = row[1]
break
upload_id_key = upload_id.replace('/', '.')
try:
upload_status_redis_key = 'flux.upload_status.%s' % upload_id_key
upload_status_dict = {}
for row in upload_status:
if row[0] not in upload_status_dict.keys():
upload_status_dict[row[0]] = row[1]
if upload_status_dict:
self.redis_conn.setex(upload_status_redis_key, 2592000, str(upload_status_dict))
logger.info('uploaded_data_worker :: updated Redis key %s with new status' % upload_status_redis_key)
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed updated Redis key %s with new status' % upload_status_redis_key)
return False
return True
def new_upload_status(upload_status, key, value):
new_upload_status = []
for row in upload_status:
if row[0] == key:
row[1] = value
new_upload_status.append(row)
update_redis_upload_status_key(new_upload_status)
return new_upload_status
def set_date(self, d):
try:
self.date = d.astimezone(pytz.utc)
except:
self.date = pytz.utc.localize(d)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
logger.info('uploaded_data_worker :: starting worker')
while True:
# Make sure Redis is up
redis_up = False
while not redis_up:
try:
redis_up = self.redis_conn.ping()
except:
logger.error('uploaded_data_worker :: cannot connect to Redis at socket path %s' % (settings.REDIS_SOCKET_PATH))
sleep(2)
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
if settings_errors:
logger.error('error :: uploaded_data_worker :: there are errors in the settings, cannot run')
sleep(60)
continue
uploads_to_process = []
try:
# Get uploads to process from the Redis set which the webapp
# /upload_data populates
uploads_to_process = list(self.redis_conn_decoded.smembers('flux.uploaded_data'))
logger.info('uploaded_data_worker :: there are %s uploads to process' % str(len(uploads_to_process)))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to query Redis for flux.uploaded_data - %s' % str(e))
if not uploads_to_process:
logger.info('uploaded_data_worker :: there are no uploads to process, sleeping for 30 seconds')
sleep(30)
continue
processing_upload_failed = False
data_files_uploaded = 0
data_files_successfully_processed = []
upload_to_process = None
dryrun = False
try:
uploads_to_process.sort()
upload_to_process = uploads_to_process[0]
logger.info('uploaded_data_worker :: new upload to process - %s' % str(upload_to_process))
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine upload_to_process')
processing_upload_failed = True
upload_dict = None
upload_status = []
if upload_to_process:
started_at = int(time())
started_at = datetime.datetime.fromtimestamp(started_at)
started_at_date = started_at.strftime('%Y-%m-%d %H:%M:%S')
start_processing = timer()
try:
upload_dict = literal_eval(upload_to_process)
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to literal_eval the upload_dict from upload_to_process - %s ' % str(upload_to_process))
processing_upload_failed = True
if upload_dict:
try:
parent_metric_namespace = upload_dict['parent_metric_namespace']
upload_status.append(['parent_metric_namespace', parent_metric_namespace])
timezone = upload_dict['timezone']
upload_status.append(['timezone', timezone])
upload_id = upload_dict['upload_id']
upload_status.append(['upload_id', upload_id])
upload_status.append(['status', 'pending'])
data_format = upload_dict['format']
upload_status.append(['format', data_format])
archive = upload_dict['archive']
upload_status.append(['archive', archive])
data_filename = upload_dict['data_filename']
upload_status.append(['data_filename', data_filename])
info_filename = upload_dict['info_filename']
upload_status.append(['info_filename', info_filename])
try:
header_row = int(upload_dict['header_row'])
except:
header_row = None
upload_status.append(['header_row', header_row])
try:
skip_rows = int(upload_dict['skip_rows'])
except:
skip_rows = None
upload_status.append(['skip_rows', skip_rows])
try:
info_file_in_archive = upload_dict['info_file_in_archive']
except:
info_file_in_archive = False
upload_status.append(['info_file_in_archive', str(info_file_in_archive)])
try:
resample_method = upload_dict['resample_method']
except:
resample_method = 'mean'
upload_status.append(['resample_method', resample_method])
# @added 20200521 - Feature #3538: webapp - upload_data endpoint
# Feature #3550: flux.uploaded_data_worker
# Added the ability to ignore_submitted_timestamps and not
# check flux.last metric timestamp
try:
ignore_submitted_timestamps = upload_dict['ignore_submitted_timestamps']
except:
ignore_submitted_timestamps = False
upload_status.append(['ignore_submitted_timestamps', ignore_submitted_timestamps])
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine required variables for the upload_dict - %s ' % str(upload_dict))
processing_upload_failed = True
if upload_status and upload_id:
update_redis_upload_status_key(upload_status)
upload_status.append(['processing started at', started_at_date])
upload_status = new_upload_status(upload_status, 'processing started at', started_at_date)
upload_info = None
info_file = None
if upload_id and info_filename:
info_file = '%s/%s/%s' % (DATA_UPLOADS_PATH, upload_id, info_filename)
if not os.path.isfile(info_file):
logger.error('error :: uploaded_data_worker :: failed to find info file - %s' % str(info_file))
info_file = None
processing_upload_failed = True
if upload_status:
upload_error = 'info file not found - %s' % info_filename
upload_status.append(['error', upload_error])
upload_status = new_upload_status(upload_status, 'error', upload_error)
if info_file:
try:
with open(info_file) as f:
dict_data_str = f.read()
upload_info = literal_eval(dict_data_str)
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to literal_eval the upload info for - %s' % str(info_file))
processing_upload_failed = True
if upload_status:
upload_error = 'data in info file not in the json correct format - %s' % info_filename
upload_status.append(['error', upload_error])
upload_status = new_upload_status(upload_status, 'error', upload_error)
refresh_info = False
data_file = None
if processing_upload_failed:
data_filename = None
if upload_id and data_filename:
try:
data_file = '%s/%s/%s' % (DATA_UPLOADS_PATH, upload_id, data_filename)
if not os.path.isfile(data_file):
logger.error('error :: uploaded_data_worker :: failed to find data file - %s' % str(data_file))
processing_upload_failed = True
data_file = None
if upload_status:
upload_error = 'data file not found - %s' % data_filename
upload_status.append(['error', upload_error])
upload_status = new_upload_status(upload_status, 'error', upload_error)
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to check if data_file exists')
processing_upload_failed = True
if upload_status:
upload_error = 'failed to check if data_file exists'
upload_status.append(['error', upload_error])
upload_status = new_upload_status(upload_status, 'error', upload_error)
if data_file:
if archive != 'none':
extracted_data_dir = '%s/%s/extracted' % (DATA_UPLOADS_PATH, upload_id)
else:
extracted_data_dir = '%s/%s' % (DATA_UPLOADS_PATH, upload_id)
upload_status.append([data_filename, 'pending'])
upload_status = new_upload_status(upload_status, data_filename, 'pending')
data_files_uploaded = 1
if not os.path.exists(extracted_data_dir):
mkdir_p(extracted_data_dir)
if archive == 'gz':
logger.info('uploaded_data_worker :: gunzipping - %s' % str(data_filename))
try:
uncompressed_data_filename = data_filename.replace('.gz', '')
uncompressed_data_file = '%s/%s' % (extracted_data_dir, uncompressed_data_filename)
with gzip.open(data_file, 'rb') as f_in:
with open(uncompressed_data_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
data_files_uploaded = 1
upload_status.append([uncompressed_data_filename, 'pending'])
upload_status = new_upload_status(upload_status, uncompressed_data_filename, 'pending')
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to ungzip data archive - %s' % str(data_file))
processing_upload_failed = True
if upload_status:
upload_error = 'failed to ungzip data archive - %s' % data_filename
upload_status.append(['error', upload_error])
upload_status = new_upload_status(upload_status, 'error', upload_error)
if archive == 'zip':
logger.info('uploaded_data_worker :: unzipping - %s' % str(data_filename))
try:
with zipfile.ZipFile(data_file, 'r') as zip_ref:
zip_ref.extractall(extracted_data_dir)
for root, dirs, files in os.walk(extracted_data_dir):
for file in files:
if file.endswith('info.json'):
if info_file_in_archive:
info_file = file
refresh_info = True
if file.endswith(data_format):
data_files_uploaded += 1
upload_status.append([file, 'pending'])
upload_status = new_upload_status(upload_status, file, 'pending')
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to unzip data archive - %s' % str(data_file))
processing_upload_failed = True
if upload_status:
upload_error = 'failed to unzip data archive - %s' % data_filename
upload_status.append(['error', upload_error])
upload_status = new_upload_status(upload_status, 'error', upload_error)
if archive == 'tar_gz':
logger.info('uploaded_data_worker :: tar ungzipping - %s' % str(data_filename))
try:
tar = tarfile.open(data_file, 'r:gz')
for member in tar.getmembers():
f = tar.extractfile(member)
if f is not None:
extracted_data_dir = '%s/%s/extracted' % (DATA_UPLOADS_PATH, upload_id)
uncompressed_data_file = '%s/%s' % (extracted_data_dir, str(member))
with open(uncompressed_data_file, 'wb') as f_out:
shutil.copyfileobj(f, f_out)
if uncompressed_data_file.endswith('info.json'):
if info_file_in_archive:
info_file = uncompressed_data_file
refresh_info = True
if uncompressed_data_file.endswith(data_format):
data_files_uploaded += 1
uncompressed_data_filename = os.path.basename(uncompressed_data_file)
upload_status.append([uncompressed_data_filename, 'pending'])
upload_status = new_upload_status(upload_status, uncompressed_data_filename, 'pending')
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to untar data archive - %s' % str(data_file))
processing_upload_failed = True
if upload_status:
upload_error = 'failed to untar data archive - %s' % data_filename
upload_status.append(['error', upload_error])
upload_status = new_upload_status(upload_status, 'error', upload_error)
if archive != 'none':
unallowed_files = []
try:
for root, dirs, files in os.walk(extracted_data_dir):
for file in files:
acceptable_file = allowed_file(file)
if not acceptable_file:
file_to_delete = '%s/%s' % (extracted_data_dir, str(file))
unallowed_files.append(file_to_delete)
logger.info('uploaded_data_worker :: WARNING unallowed file format - %s' % file)
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine what files to delete in %s' % extracted_data_dir)
if unallowed_files:
for unallowed_file in unallowed_files:
unallowed_filename = os.path.basename(unallowed_file)
try:
os.remove(unallowed_file)
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to delete unallowed file - %s' % unallowed_file)
try:
upload_status.append([unallowed_filename, 'failed - DELETED - not an allowed file type'])
upload_status = new_upload_status(upload_status, unallowed_filename, 'failed - DELETED - not an allowed file type')
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed update upload_status for delete unallowed file - %s' % unallowed_filename)
logger.info('uploaded_data_worker :: %s data files found to process for %s' % (
str(data_files_uploaded), upload_id))
if refresh_info:
logger.info('uploaded_data_worker :: refresh info for the info.json included in the data archive - %s' % info_file)
try:
with open(info_file) as f:
dict_data_str = f.read()
upload_info = literal_eval(dict_data_str)
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to literal_eval the upload info for - %s' % str(info_file))
processing_upload_failed = True
if upload_status:
upload_error = 'data in info file not in the json correct format - %s' % info_filename
upload_status.append(['error', upload_error])
upload_status = new_upload_status(upload_status, 'error', upload_error)
if upload_dict and processing_upload_failed:
try:
logger.info('uploaded_data_worker :: failed to process upload - %s' % str(upload_dict))
upload_status = new_upload_status(upload_status, data_filename, 'failed')
upload_status = new_upload_status(upload_status, 'status', 'failed')
item_removed = remove_redis_set_item(str(upload_dict))
if item_removed:
logger.info('uploaded_data_worker :: removed failed upload from the flux.uploaded_data Redis set')
else:
logger.error('error :: uploaded_data_worker :: failed to remove item from the Redis flux.uploaded_data set - %s' % str(upload_dict))
continue
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to remove_redis_set_item')
continue
if not upload_info:
upload_info = upload_dict
if upload_info:
try:
parent_metric_namespace = upload_info['parent_metric_namespace']
logger.info('uploaded_data_worker :: determined parent_metric_namespace from the upload_info dict - %s' % str(parent_metric_namespace))
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine parent_metric_namespace from the upload_info dict - %s' % str(upload_info))
processing_upload_failed = True
if upload_status:
upload_error = 'failed to determine parent_metric_namespace from info file - %s' % info_filename
upload_status.append(['error', upload_error])
upload_status = new_upload_status(upload_status, 'error', upload_error)
try:
timezone = upload_info['timezone']
logger.info('uploaded_data_worker :: determined timezone from the upload_info dict - %s' % str(timezone))
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine timezone from the upload_info dict - %s' % str(upload_info))
processing_upload_failed = True
if upload_status:
upload_error = 'failed to determine timezone from info file - %s' % info_filename
upload_status.append(['error', upload_error])
upload_status = new_upload_status(upload_status, 'error', upload_error)
try:
skip_rows = int(upload_info['skip_rows'])
logger.info('uploaded_data_worker :: determined skip_rows from the upload_info dict - %s' % str(skip_rows))
if skip_rows == 0:
skip_rows = None
except:
skip_rows = None
logger.info('uploaded_data_worker :: skip_rows was not passed in the upload info dict using %s' % str(skip_rows))
try:
header_row = int(upload_info['header_row'])
logger.info('uploaded_data_worker :: determined header_row from the upload_info dict - %s' % str(header_row))
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine header_row from the upload_info dict - %s' % str(upload_info))
processing_upload_failed = True
if upload_status:
upload_error = 'failed to determine header_row from info file - %s' % info_filename
upload_status.append(['error', upload_error])
upload_status = new_upload_status(upload_status, 'error', upload_error)
try:
date_orientation = upload_info['date_orientation']
logger.info('uploaded_data_worker :: determined date_orientation from the upload_info dict - %s' % str(date_orientation))
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine date_orientation from the upload_info dict - %s' % str(upload_info))
processing_upload_failed = True
if upload_status:
upload_error = 'failed to determine date_orientation from info file - %s' % info_filename
upload_status.append(['error', upload_error])
upload_status = new_upload_status(upload_status, 'error', upload_error)
try:
columns_to_metrics = upload_info['columns_to_metrics']
logger.info('uploaded_data_worker :: determined columns_to_metrics from the upload_info dict - %s' % str(columns_to_metrics))
except:
columns_to_metrics = None
logger.info('uploaded_data_worker :: columns_to_metrics was not passed in the upload info dict setting to None')
try:
columns_to_ignore = upload_info['columns_to_ignore']
logger.info('uploaded_data_worker :: determined columns_to_ignore from the upload_info dict - %s' % str(columns_to_ignore))
except:
columns_to_ignore = None
logger.info('uploaded_data_worker :: columns_to_ignore was not passed in the upload info dict setting to None')
try:
columns_to_process = upload_info['columns_to_process']
logger.info('uploaded_data_worker :: determined columns_to_process from the upload_info dict - %s' % str(columns_to_process))
except:
columns_to_process = None
logger.info('uploaded_data_worker :: columns_to_process was not passed in the upload info dict setting to None')
resample_method = 'mean'
try:
resample_method_str = upload_info['resample_method']
if resample_method_str in ['mean', 'sum']:
resample_method = resample_method_str
logger.info('uploaded_data_worker :: determined resample_method from the upload_info dict - %s' % str(resample_method))
except:
resample_method = 'mean'
logger.info('uploaded_data_worker :: resample_method was not passed in the upload info dict setting to mean')
try:
debug_enabled_in_info = upload_info['debug']
logger.info('uploaded_data_worker :: determined debug from the upload_info dict - %s' % str(debug_enabled_in_info))
if debug_enabled_in_info == 'true':
debug_enabled_in_info = True
except:
debug_enabled_in_info = False
try:
dryrun = upload_info['dryrun']
logger.info('uploaded_data_worker :: determined dryrun from the upload_info dict - %s' % str(dryrun))
if dryrun == 'true':
dryrun = True
except:
dryrun = False
try:
ignore_submitted_timestamps_str = upload_info['ignore_submitted_timestamps']
if ignore_submitted_timestamps_str == 'true':
ignore_submitted_timestamps = True
logger.info('uploaded_data_worker :: determined ignore_submitted_timestamps from the upload_info dict - %s' % str(ignore_submitted_timestamps))
except:
logger.info('uploaded_data_worker :: ignore_submitted_timestamps was not passed in the upload info')
if upload_dict and processing_upload_failed:
logger.info('uploaded_data_worker :: failed to process upload - %s' % str(upload_dict))
if upload_status:
upload_status = new_upload_status(upload_status, 'status', 'failed')
upload_status = new_upload_status(upload_status, data_filename, 'failed')
item_removed = remove_redis_set_item(str(upload_dict))
if item_removed:
logger.info('uploaded_data_worker :: removed failed upload from the flux.uploaded_data Redis set')
else:
logger.error('error :: uploaded_data_worker :: failed to remove item from the Redis flux.uploaded_data set - %s' % str(upload_dict))
continue
data_format_extension = '.%s' % str(data_format)
data_files_to_process = []
try:
for root, dirs, files in os.walk(extracted_data_dir):
for file in files:
if file.endswith(data_format_extension):
file_to_process = '%s/%s' % (extracted_data_dir, str(file))
data_files_to_process.append(file_to_process)
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine files to process in %s' % extracted_data_dir)
if upload_status:
upload_error = 'no extracted files found to process'
upload_status.append(['error', upload_error])
upload_status = new_upload_status(upload_status, 'error', upload_error)
upload_status = new_upload_status(upload_status, data_filename, 'failed')
pytz_tz = 'UTC'
invalid_timezone = False
if timezone:
pytz_tz = str(timezone)
try:
test_tz = pytz.timezone(pytz_tz)
logger.info('uploaded_data_worker :: the passed pytz_tz argument is OK - %s - ' % str(test_tz))
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: the passed pytz_tz argument is not a valid pytz timezone string - %s' % str(timezone))
invalid_timezone = True
if upload_status:
upload_error = 'invalid timezone passed'
upload_status.append(['error', upload_error])
upload_status = new_upload_status(upload_status, 'error', upload_error)
upload_status = new_upload_status(upload_status, data_filename, 'failed')
data_file_process_failures = 0
for file_to_process in data_files_to_process:
successful = True
df = None
failure_reason = 'none'
data_columns = []
processing_filename = os.path.basename(file_to_process)
upload_status = new_upload_status(upload_status, processing_filename, 'in progress')
if invalid_timezone:
failure_reason = 'falied - invalid timezone passed'
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
successful = False
if upload_status:
upload_status = new_upload_status(upload_status, processing_filename, failure_reason)
if successful:
if data_format == 'xlsx' or data_format == 'xls':
try:
if LOCAL_DEBUG or debug_enabled_in_info:
logger.info('running - pd.read_excel(' + file_to_process + ', skiprows=' + str(skip_rows) + ', header=' + str(header_row) + ', usecols=' + str(columns_to_process) + ')')
# pandas-log is not available in Python 2 with
# pip, although Python 2 is no longer supported
# this to ensure this is backwards compatible
# with any current Python 2 installations
if sys.version[0] == 3:
import pandas_log
with pandas_log.enable():
df = pd.read_excel(file_to_process, skiprows=skip_rows, header=header_row, usecols=columns_to_process)
else:
df = pd.read_excel(file_to_process, skiprows=skip_rows, header=header_row, usecols=columns_to_process)
logger.debug(df.head())
logger.debug(df.info())
else:
df = pd.read_excel(file_to_process, skiprows=skip_rows, header=header_row, usecols=columns_to_process)
logger.info('uploaded_data_worker :: pandas dataframe created from %s - %s' % (
data_format, str(file_to_process)))
# Unfortunately this if df is not a reliable test
# if df.info() is None:
# logger.error('error :: uploaded_data_worker :: df.info() returns None')
# df = None
except:
logger.error(traceback.format_exc())
failure_reason = 'failed - pandas failed to parse the %s data file - %s' % (
data_format, str(file_to_process))
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
successful = False
if upload_status:
upload_status = new_upload_status(upload_status, processing_filename, failure_reason)
if data_format == 'csv':
try:
if LOCAL_DEBUG or debug_enabled_in_info:
logger.info('running - pd.read_csv(' + file_to_process + ', skiprows=' + str(skip_rows) + ', header=' + str(header_row) + ', usecols=' + str(columns_to_process) + ')')
if sys.version[0] == 3:
import pandas_log
with pandas_log.enable():
df = pd.read_csv(file_to_process, skiprows=skip_rows, header=header_row, usecols=columns_to_process)
else:
df = pd.read_csv(file_to_process, skiprows=skip_rows, header=header_row, usecols=columns_to_process)
logger.debug(df.head())
logger.debug(df.info())
else:
df = pd.read_csv(file_to_process, skiprows=skip_rows, header=header_row, usecols=columns_to_process)
logger.info('uploaded_data_worker :: pandas dataframe created from csv - %s' % str(file_to_process))
except:
logger.error(traceback.format_exc())
failure_reason = 'pandas failed to parse the csv data file - %s' % str(file_to_process)
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
successful = False
if upload_status:
upload_status = new_upload_status(upload_status, processing_filename, 'failed to read csv data')
# Unfortunately this if df is not a reliable test
# if df.info() is None:
# logger.error('error :: uploaded_data_worker :: df.info() returns None')
# df = None
# Unfortunately this if df is not a reliable test
# if not df:
# failure_reason = 'failed - emtpy dataframe'
# logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
# successful = False
# if upload_status:
# upload_status = new_upload_status(upload_status, processing_filename, failure_reason)
if successful:
if columns_to_metrics:
columns_list = columns_to_metrics.split(',')
try:
df.columns = columns_list
logger.info('uploaded_data_worker :: pandas renamed columns to - %s' % str(columns_list))
except:
logger.error(traceback.format_exc())
failure_reason = 'pandas failed to rename columns to - %s' % str(columns_list)
logger.error('error :: uploaded_data_worker :: uploaded_data_worker :: %s' % failure_reason)
successful = False
if upload_status:
upload_status = new_upload_status(upload_status, processing_filename, 'failed to rename columns')
date_columns = []
if successful:
try:
date_columns = [col for col in df.columns if 'datetime64[ns' in str(df[col].dtypes)]
logger.info('uploaded_data_worker :: determined date columns, %s' % str(date_columns))
if len(date_columns) == 0:
logger.info('uploaded_data_worker :: no date column determined trying to convert data column to datetime64')
try:
df['date'] = pd.to_datetime(df['date'])
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to convert date column to datetime64')
date_columns = [col for col in df.columns if 'datetime64[ns' in str(df[col].dtypes)]
except:
logger.error(traceback.format_exc())
failure_reason = 'pandas failed to determined date columns'
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
successful = False
if upload_status:
upload_status = new_upload_status(upload_status, processing_filename, 'failed to determine datetime column')
date_column = None
if successful:
if len(date_columns) != 1:
failure_reason = 'the dataframe has no or more than one date columns - %s' % str(date_columns)
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
successful = False
if upload_status:
upload_status = new_upload_status(upload_status, processing_filename, 'failed - more than one date column')
else:
date_column = date_columns[0]
logger.info('uploaded_data_worker :: determined date column as %s' % str(date_column))
del date_columns
if successful and date_column:
if pytz_tz not in utc_timezones:
try:
df[date_column] = df[date_column].dt.tz_localize(pytz_tz)
logger.info('uploaded_data_worker :: applied timezone %s to date column, %s' % (
str(pytz_tz), str(date_column)))
except:
logger.error(traceback.format_exc())
failure_reason = 'pandas failed to applied timezone %s to date column, %s' % (
str(pytz_tz), str(date_column))
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
successful = False
if upload_status:
upload_status = new_upload_status(upload_status, processing_filename, 'failed - to apply timezone')
dates = None
timestamps = []
if successful and date_column:
try:
dates = df[date_column].tolist()
timestamps = []
for d in dates:
timestamps.append(int(d.strftime('%s')))
logger.info('uploaded_data_worker :: created UTC timestamps list from date column %s data' % (
str(date_column)))
del dates
except:
logger.error(traceback.format_exc())
failure_reason = 'pandas failed to convert datetimes to UTC timestamps'
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
successful = False
if upload_status:
upload_status = new_upload_status(upload_status, processing_filename, 'failed to convert datetimes to UTC timestamps')
if successful and timestamps:
try:
df['pandas_utc_timestamp'] = timestamps
logger.info('uploaded_data_worker :: added pandas_utc_timestamp column to dataframe')
del timestamps
except:
logger.error(traceback.format_exc())
failure_reason = 'failed added skyline_uts_ts column to dataframe'
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
successful = False
if upload_status:
upload_status = new_upload_status(upload_status, processing_filename, 'failed to add UTC timestamp column')
data_columns = []
if successful:
try:
data_columns = [col for col in df.columns if col not in [date_column, 'pandas_utc_timestamp']]
logger.info('uploaded_data_worker :: determined %s data columns from the dataframe' % str(len(data_columns)))
except:
logger.error(traceback.format_exc())
failure_reason = 'failed to determine data columns from the dataframe'
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
successful = False
if upload_status:
upload_status = new_upload_status(upload_status, processing_filename, failure_reason)
if len(data_columns) == 0:
failure_reason = 'there are no data columns in the dataframe'
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
successful = False
if upload_status:
upload_status = new_upload_status(upload_status, processing_filename, failure_reason)
if not successful:
data_file_process_failures += 1
logger.error('error :: uploaded_data_worker :: could not process - %s - continuing' % str(file_to_process))
try:
del df
except:
pass
continue
successful = True
data_df_failures = 0
processed_data_columns = []
if successful and data_columns:
for data_col in data_columns:
data_df_successful = True
data_df = None
data_col_key = '%s (%s)' % (data_col, processing_filename, )
upload_status.append([data_col_key, 'processing'])
try:
data_df = df[['pandas_utc_timestamp', data_col]].copy()
logger.info('uploaded_data_worker :: created dataframe for %s' % data_col)
except:
logger.error(traceback.format_exc())
failure_reason = 'failed to create dataframe for pandas_utc_timestamp and %s' % data_col
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
data_df_successful = False
data_df_failures += 1
if upload_status:
upload_status = new_upload_status(upload_status, data_col_key, failure_reason)
try:
del data_df
except:
pass
continue
timeseries = None
original_timeseries_length = 0
if data_df_successful:
try:
data_df['timeseries'] = data_df.apply(lambda x: list([x['pandas_utc_timestamp'], x[data_col]]), axis=1)
timeseries = data_df['timeseries'].values.tolist()
original_timeseries_length = len(timeseries)
logger.info('uploaded_data_worker :: created timeseries for %s with %s timestamps and values' % (
data_col, str(original_timeseries_length)))
except:
logger.error(traceback.format_exc())
failure_reason = 'failed to create timeseries from pandas_utc_timestamp and %s' % data_col
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
data_df_successful = False
data_df_failures += 1
if upload_status:
upload_status = new_upload_status(upload_status, data_col_key, failure_reason)
try:
del data_df
except:
pass
continue
original_timeseries = None
if data_df_successful and timeseries:
original_timeseries = timeseries
try:
sorted_timeseries = sort_timeseries(timeseries)
if sorted_timeseries:
sorted_timeseries_length = len(sorted_timeseries)
timeseries = sorted_timeseries
logger.info('uploaded_data_worker :: sorted timeseries for %s which now has %s timestamps and values' % (
data_col, str(sorted_timeseries_length)))
try:
del sorted_timeseries
except:
pass
except:
logger.error(traceback.format_exc())
failure_reason = 'failed to sort timeseries of pandas_utc_timestamp and %s' % data_col
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
timeseries = original_timeseries
metric = None
if data_df_successful and timeseries:
if original_timeseries:
try:
del original_timeseries
except:
pass
try:
full_metric_name = '%s.%s' % (str(parent_metric_namespace), str(data_col))
metric = filesafe_metricname(full_metric_name)
logger.info('uploaded_data_worker :: interpolated metric name to %s' % metric)
except:
logger.error(traceback.format_exc())
failure_reason = 'failed to interpolated metric name for %s, cannot continue' % data_col
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
data_df_successful = False
data_df_failures += 1
if upload_status:
upload_status = new_upload_status(upload_status, data_col_key, failure_reason)
try:
del data_df
except:
pass
try:
del timeseries
except:
pass
continue
# Best effort to de-duplicate the data sent to Graphite
last_flux_timestamp = None
if data_df_successful and timeseries and metric:
cache_key = 'flux.last.%s' % metric
redis_last_metric_data = None
# @added 20200521 - Feature #3538: webapp - upload_data endpoint
# Feature #3550: flux.uploaded_data_worker
# Added the ability to ignore_submitted_timestamps and not
# check flux.last metric timestamp
if not ignore_submitted_timestamps:
try:
redis_last_metric_data = self.redis_conn_decoded.get(cache_key)
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine last_flux_timestamp from Redis key %s' % cache_key)
last_flux_timestamp = None
if redis_last_metric_data:
try:
last_metric_data = literal_eval(redis_last_metric_data)
last_flux_timestamp = int(last_metric_data[0])
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine last_flux_timestamp from Redis key %s' % cache_key)
last_flux_timestamp = None
valid_timeseries = []
if last_flux_timestamp:
try:
logger.info('uploaded_data_worker :: determined last timestamp from Redis for %s as %s' % (
metric, str(last_flux_timestamp)))
for timestamp, value in timeseries:
if timestamp > last_flux_timestamp:
valid_timeseries.append([timestamp, value])
valid_timeseries_length = len(valid_timeseries)
logger.info('uploaded_data_worker :: deduplicated timeseries based on last_flux_timestamp for %s which now has %s timestamps and values' % (
data_col, str(valid_timeseries_length)))
if valid_timeseries:
timeseries = valid_timeseries
else:
newest_data_timestamp = str(timeseries[-1][0])
logger.info('uploaded_data_worker :: none of the timestamps in %s data are older than %s, the newset being %s, nothing to submit continuing' % (
data_col, str(last_flux_timestamp), newest_data_timestamp))
if upload_status:
upload_status = new_upload_status(upload_status, data_col_key, 'processed - no new timestamps, all already known')
try:
del data_df
except:
pass
try:
del timeseries
except:
pass
continue
except:
logger.error(traceback.format_exc())
failure_reason = 'failed to determine if timestamps for %s are newer than the last_flux_timestamp, cannot continue' % data_col
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
data_df_successful = False
data_df_failures += 1
if upload_status:
upload_status = new_upload_status(upload_status, data_col_key, 'failed to determine if timestamps for %s are newer than the last known timestamp, cannot continue')
try:
del data_df
except:
pass
try:
del timeseries
except:
pass
continue
valid_timeseries = []
if data_df_successful and timeseries and metric:
datapoints_with_no_value = 0
for timestamp, value in timeseries:
try:
if value is None:
datapoints_with_no_value += 1
continue
float_value = float(value)
valid_timeseries.append([timestamp, float_value])
except:
datapoints_with_no_value += 1
continue
if datapoints_with_no_value > 0:
logger.info('uploaded_data_worker :: dropped %s timestamps from %s which have no value' % (
str(datapoints_with_no_value), metric))
if valid_timeseries:
timeseries = valid_timeseries
try:
del valid_timeseries
except:
pass
else:
logger.info('uploaded_data_worker :: none of the timestamps have value data in %s data, nothing to submit continuing' % (
data_col))
if upload_status:
upload_status = new_upload_status(upload_status, data_col_key, 'failed - no timestamps have value data')
try:
del data_df
except:
pass
try:
del timeseries
except:
pass
continue
if data_df_successful and timeseries and metric:
try:
# Deal with lower frequency data
# Determine resolution from the last 30 data points
resolution_timestamps = []
metric_resolution_determined = False
for metric_datapoint in timeseries[-30:]:
timestamp = int(metric_datapoint[0])
resolution_timestamps.append(timestamp)
timestamp_resolutions = []
if resolution_timestamps:
last_timestamp = None
for timestamp in resolution_timestamps:
if last_timestamp:
resolution = timestamp - last_timestamp
timestamp_resolutions.append(resolution)
last_timestamp = timestamp
else:
last_timestamp = timestamp
try:
del resolution_timestamps
except:
pass
if timestamp_resolutions:
try:
timestamp_resolutions_count = Counter(timestamp_resolutions)
ordered_timestamp_resolutions_count = timestamp_resolutions_count.most_common()
metric_resolution = int(ordered_timestamp_resolutions_count[0][0])
if metric_resolution > 0:
metric_resolution_determined = True
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine metric_resolution from timeseries')
try:
del timestamp_resolutions
except:
pass
# Resample
resample_at = None
if metric_resolution_determined and metric_resolution < 60:
resample_at = '1Min'
if resample_at:
try:
high_res_df = pd.DataFrame(timeseries)
high_res_df.columns = ['date', data_col]
high_res_df = high_res_df.set_index('date')
high_res_df.index = pd.to_datetime(high_res_df.index, unit='s')
resample_at = '1Min'
if resample_method == 'mean':
resampled_df = high_res_df.resample(resample_at).mean()
else:
resampled_df = high_res_df.resample(resample_at).sum()
logger.info('uploaded_data_worker :: resampled %s data by %s' % (
data_col, resample_method))
try:
del high_res_df
except:
pass
resampled_df.reset_index(level=0, inplace=True)
resampled_df['date'] = resampled_df['date'].dt.tz_localize(pytz_tz)
dates = resampled_df['date'].tolist()
timestamps = []
for d in dates:
timestamps.append(int(d.strftime('%s')))
resampled_df['pandas_utc_timestamp'] = timestamps
resampled_df['timeseries'] = resampled_df.apply(lambda x: list([x['pandas_utc_timestamp'], x[data_col]]), axis=1)
timeseries = resampled_df['timeseries'].values.tolist()
try:
del resampled_df
except:
pass
resampled_timeseries_length = len(timeseries)
logger.info('uploaded_data_worker :: created resampled timeseries for %s with %s timestamps and values' % (
data_col, str(resampled_timeseries_length)))
except:
logger.error(traceback.format_exc())
failure_reason = 'failed to create resampled timeseries from %s' % data_col
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
data_df_successful = False
data_df_failures += 1
if upload_status:
upload_status = new_upload_status(upload_status, data_col_key, failure_reason)
try:
del data_df
except:
pass
try:
del timeseries
except:
pass
continue
except:
logger.error(traceback.format_exc())
failure_reason = 'failed to resampled timeseries for %s' % data_col
logger.error('error :: uploaded_data_worker :: %s' % failure_reason)
data_df_successful = False
data_df_failures += 1
if upload_status:
upload_status = new_upload_status(upload_status, data_col_key, failure_reason)
try:
del data_df
except:
pass
try:
del timeseries
except:
pass
continue
try:
del data_df
except:
pass
sent_to_graphite = 0
last_timestamp_sent = None
last_value_sent = None
if data_df_successful and timeseries and metric:
timeseries_length = len(timeseries)
logger.info('uploaded_data_worker :: after preprocessing there are %s data points to send to Graphite for %s' % (
str(timeseries_length), metric))
timestamp = None
value = None
start_populating = timer()
listOfMetricTuples = []
try:
for timestamp, value in timeseries:
tuple_data = (metric, (int(timestamp), float(value)))
if LOCAL_DEBUG or debug_enabled_in_info:
logger.debug('debug :: uploaded_data_worker :: sending - %s' % str(tuple_data))
listOfMetricTuples.append(tuple_data)
sent_to_graphite += 1
if value == 0.0:
has_value = True
if value == 0:
has_value = True
if value:
has_value = True
if has_value:
last_timestamp_sent = int(timestamp)
last_value_sent = float(value)
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to populate listOfMetricTuples for %s' % str(metric))
if listOfMetricTuples:
data_points_sent = 0
smallListOfMetricTuples = []
tuples_added = 0
for data in listOfMetricTuples:
smallListOfMetricTuples.append(data)
tuples_added += 1
if tuples_added >= 1000:
if dryrun:
pickle_data_sent = True
logger.info('uploaded_data_worker :: DRYRUN :: faking sending data')
else:
pickle_data_sent = pickle_data_to_graphite(smallListOfMetricTuples)
if pickle_data_sent:
data_points_sent += tuples_added
logger.info('uploaded_data_worker :: sent %s/%s of %s data points to Graphite via pickle for %s' % (
str(tuples_added), str(data_points_sent),
str(timeseries_length), metric))
sent_to_graphite += len(smallListOfMetricTuples)
smallListOfMetricTuples = []
tuples_added = 0
else:
logger.error('error :: uploaded_data_worker :: failed to send %s data points to Graphite via pickle for %s' % (
str(tuples_added), metric))
if smallListOfMetricTuples:
tuples_to_send = len(smallListOfMetricTuples)
if dryrun:
pickle_data_sent = True
logger.info('uploaded_data_worker :: DRYRUN :: faking sending data')
else:
pickle_data_sent = pickle_data_to_graphite(smallListOfMetricTuples)
if pickle_data_sent:
data_points_sent += tuples_to_send
logger.info('uploaded_data_worker :: sent the last %s/%s of %s data points to Graphite via pickle for %s' % (
str(tuples_to_send), str(data_points_sent),
str(timeseries_length), metric))
else:
logger.error('error :: uploaded_data_worker :: failed to send the last %s data points to Graphite via pickle for %s' % (
str(tuples_to_send), metric))
try:
del timeseries
except:
pass
try:
del listOfMetricTuples
except:
pass
try:
del smallListOfMetricTuples
except:
pass
logger.info('uploaded_data_worker :: sent %s data points to Graphite for %s' % (
str(sent_to_graphite), metric))
if last_timestamp_sent:
try:
# Update Redis flux key
cache_key = 'flux.last.%s' % metric
metric_data = [int(last_timestamp_sent), float(last_value_sent)]
if dryrun:
logger.info('uploaded_data_worker :: DRYRUN :: faking updating %s with %s' % (
cache_key, str(metric_data)))
# @added 20200521 - Feature #3538: webapp - upload_data endpoint
# Feature #3550: flux.uploaded_data_worker
# Added the ability to ignore_submitted_timestamps and not
# check flux.last metric timestamp
elif ignore_submitted_timestamps:
logger.info('uploaded_data_worker :: ignore_submitted_timestamps :: not updating %s with %s' % (
cache_key, str(metric_data)))
# @added 20200527 - Feature #3550: flux.uploaded_data_worker
# If submitted timestamps are ignored
# add the the Redis set for analyzer to
# sorted and deduplicated the time
# series data in Redis
self.redis_conn.sadd('flux.sort_and_dedup.metrics', metric)
logger.info('uploaded_data_worker :: added %s to flux.sort_and_dedup.metrics Redis set' % (
metric))
else:
self.redis_conn.set(cache_key, str(metric_data))
logger.info('uploaded_data_worker :: set the metric Redis key - %s - %s' % (
cache_key, str(metric_data)))
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to set Redis key - %s - %s' % (
cache_key, str(metric_data)))
processed_data_columns.append(data_col)
end_populating = timer()
seconds_to_run = end_populating - start_populating
logger.info('uploaded_data_worker :: %s populated to Graphite in %.6f seconds' % (
metric, seconds_to_run))
if upload_status:
new_status = 'processed - %s data points submitted' % str(sent_to_graphite)
upload_status = new_upload_status(upload_status, data_col_key, new_status)
if upload_status:
upload_status = new_upload_status(upload_status, processing_filename, 'complete')
try:
if len(processed_data_columns) == len(data_columns):
data_files_successfully_processed.append(processing_filename)
new_status = 'completed - processed all %s data columns OK' % str(len(processed_data_columns))
else:
new_status = 'completed - with some errors processed %s of the %s data columns ' % (
str(len(processed_data_columns)), str(len(data_columns)))
upload_status = new_upload_status(upload_status, processing_filename, new_status)
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine if processed')
try:
data_files_successfully_processed_count = len(data_files_successfully_processed)
logger.info('uploaded_data_worker :: %s of the %s data files were successfully' % (
str(data_files_successfully_processed_count), str(data_files_uploaded)))
logger.info('uploaded_data_worker :: processed upload - %s' % str(upload_dict))
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine how many data files were processed')
all_processed = True
try:
if data_files_successfully_processed_count != data_files_uploaded:
all_processed = False
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine if all data files were processed')
try:
real_extracted_data_dir = '%s/%s/extracted' % (DATA_UPLOADS_PATH, upload_id)
if all_processed:
try:
if os.path.exists(real_extracted_data_dir):
shutil.rmtree(real_extracted_data_dir)
logger.info('uploaded_data_worker :: removed extracted files directory - %s' % real_extracted_data_dir)
except:
logger.error('error :: uploaded_data_worker :: failed to rmtree extracted files directory - %s' % real_extracted_data_dir)
if upload_status:
upload_status = new_upload_status(upload_status, data_filename, 'complete')
upload_status = new_upload_status(upload_status, 'status', 'complete')
else:
if upload_status:
upload_status = new_upload_status(upload_status, data_filename, 'complete - with caveats')
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine if extracted dir needs to be removed')
remove_upload_dir = True
try:
upload_data_dir = '%s/%s' % (DATA_UPLOADS_PATH, upload_id)
if save_uploads and all_processed:
save_path = '%s/%s' % (save_uploads_path, upload_id)
if not os.path.exists(save_path):
mkdir_p(save_path)
logger.info('uploaded_data_worker :: created %s' % save_path)
logger.info('uploaded_data_worker :: saving uploaded files from %s' % upload_data_dir)
data_files = []
try:
glob_path = '%s/*.*' % upload_data_dir
data_files = glob.glob(glob_path)
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: glob falied on %s' % upload_data_dir)
remove_upload_dir = False
for i_file in data_files:
try:
shutil.copy(i_file, save_path)
logger.info('uploaded_data_worker :: data copied to %s/%s' % (save_path, i_file))
except shutil.Error as e:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: shutil error - upload data not copied to %s' % save_path)
remove_upload_dir = False
# Any error saying that the directory doesn't exist
except OSError as e:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: copying upload data to save_path - %s' % (e))
remove_upload_dir = False
logger.info('uploaded_data_worker :: upload data copied to %s' % save_path)
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine if data was saved')
if remove_upload_dir:
try:
if data_files_successfully_processed_count != data_files_uploaded:
logger.info('uploaded_data_worker :: due to their being failures to process some data files, the upload directory is not being removed')
remove_upload_dir = False
if upload_status:
new_status = 'some errors, processed %s of the %s data files successfully' % (
str(data_files_successfully_processed_count), str(data_files_uploaded))
upload_status = new_upload_status(upload_status, 'final status', new_status)
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine if removed_upload_dir')
if remove_upload_dir:
try:
if os.path.exists(upload_data_dir):
shutil.rmtree(upload_data_dir)
logger.info('uploaded_data_worker :: removed upload directory - %s' % upload_data_dir)
if upload_status:
new_status = 'complete'
upload_status.append(['final_status', new_status])
upload_status = new_upload_status(upload_status, 'final status', new_status)
except:
logger.error('error :: uploaded_data_worker :: failed to rmtree upload directory - %s' % upload_data_dir)
try:
item_removed = remove_redis_set_item(str(upload_dict))
if item_removed:
logger.info('uploaded_data_worker :: removed failed upload from the flux.uploaded_data Redis set')
else:
logger.error('error :: uploaded_data_worker :: failed to remove item from the Redis flux.uploaded_data set - %s' % str(upload_dict))
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to remove_redis_set_item')
try:
end_processing = timer()
seconds_to_run = end_processing - start_processing
logger.info('uploaded_data_worker :: processed upload for %s in %.6f seconds' % (
parent_metric_namespace, seconds_to_run))
if upload_status:
time_to_run = '%.6f seconds' % seconds_to_run
upload_status.append(['processing_time', time_to_run])
upload_status = new_upload_status(upload_status, 'processing_time', time_to_run)
completed = int(time())
completed_at = datetime.datetime.fromtimestamp(completed)
completed_at_date = completed_at.strftime('%Y-%m-%d %H:%M:%S')
upload_status.append(['processing completed at', completed_at_date])
upload_status = new_upload_status(upload_status, 'processing completed at', completed_at_date)
upload_status = new_upload_status(upload_status, 'status', 'complete')
try:
del upload_status
except:
pass
except:
logger.error(traceback.format_exc())
logger.error('error :: uploaded_data_worker :: failed to determine time_to_run')
try:
del start_processing
except:
pass
try:
del end_processing
except:
pass
try:
del df
except:
pass
| [
"[email protected]"
] | |
c8a8925d67f8d6367f31e9f8874fb6c12a1cef90 | eb731db339fd09757cc816823aa875bcaf15abeb | /jobs/migrations/0024_auto_20150116_2026.py | 31697f0f928f47b0adcf929e74d0db4c10f09ed4 | [] | no_license | sokogfb/1source | a34c17b90e23744686d904d8ed195182ecdd5e1f | a866e16e52aa34800f0da60e615f47952e084735 | refs/heads/master | 2020-12-01T08:50:43.138251 | 2016-10-23T01:33:47 | 2016-10-23T01:33:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0023_application_hired_date'),
]
operations = [
migrations.AddField(
model_name='application',
name='interview_date',
field=models.DateField(blank=True, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='application',
name='interviewed',
field=models.BooleanField(default=False, help_text='Where they interviewed?'),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
ab3dd0815e9f713d51735f87335bdf9ccefb193a | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_1484496_0/Python/Abraham/3.py | 3eb056330663b609738fc113d40d686cfd9d04a0 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | import sys
infile=open(sys.argv[1])
#contents=infile.read()
#print contents
line=infile.readline()
numofcases=int(line)
print numofcases,"cases"
count=0
outfile=open("out.txt","w")
for i in range(numofcases):
count+=1
outfile.write("Case #"+str(count)+":\n")
line=infile.readline()
print line
numbers=[]
for n in line.split():
numbers.append(int(n))
numofnum=numbers.pop(0)
print numofnum
print numbers
sum={}
for n in numbers:
sum[n]=[n,]
found=False
for i in range(len(numbers)):
#for i in sum.keys():
# print i,sum[i]
for j in numbers:
if found:
break
newsum={}
for val in sum.keys():
if j not in sum[val]:
if j+val in sum.keys():
l1=sum[val][:]
l1.append(j)
if set(sum[j+val]) != set(l1):
str1=""
for i in l1:
str1+=str(i)+" "
str1+="\n"
str2=""
for i in sum[j+val]:
str2+=str(i)+" "
str2+="\n"
outfile.write(str1)
outfile.write(str2)
print l1
print sum[j+val]
found=True
break
else:
if j+val in newsum.keys():
l1=sum[val][:]
l1.append(j)
if set(newsum[j+val]) != set(l1):
print l1
print newsum[j+val]
str1=""
for i in l1:
str1+=str(i)+" "
str1+="\n"
str2=""
for i in newsum[j+val]:
str2+=str(i)+" "
str2+="\n"
outfile.write(str1)
outfile.write(str2)
found=True
break
else:
newsum[val+j]=sum[val][:]
newsum[val+j].append(j)
if found:
break
sum.update(newsum)
if not found:
outfile.write("Impossible"+"\n")
print "impossible"
outfile.close()
| [
"[email protected]"
] | |
18fc7b4207df14536f7fea612bb7a37e5a3ea469 | 6d5545faf2af0a6bb565ad698bb824110b40e121 | /WEBAPP/MLmodel/inception_client.py.runfiles/tf_serving/external/org_tensorflow/tensorflow/keras/applications/inception_v3/__init__.py | 007b1d35ced43796547423c771944fc1cdcb3780 | [
"MIT"
] | permissive | sunsuntianyi/mlWebApp_v2 | abb129cd43540b1be51ecc840127d6e40c2151d3 | 5198685bf4c4e8973988722282e863a8eaeb426f | refs/heads/master | 2021-06-23T22:02:38.002145 | 2020-11-20T02:17:43 | 2020-11-20T02:17:43 | 162,194,249 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | /private/var/tmp/_bazel_tianyi/f29d1e61689e4e4b318f483932fff4d0/execroot/tf_serving/bazel-out/darwin-opt/genfiles/external/org_tensorflow/tensorflow/keras/applications/inception_v3/__init__.py | [
"[email protected]"
] | |
e56fe243ff26e0c4ed95e36aed0397d5793f59fd | c64f2412f7a7ebc3f6bce2a2dcbb99f6b88a2268 | /users/arxiv/users/legacy/__init__.py | 09538c9dd112574852d0fa5619c20aa21da9e6c8 | [
"MIT"
] | permissive | Quang7hong81/arxiv-auth | 565614667f6900d0d9644d010b832acfee3ba983 | ca2b0091682248d84154cf0a0d70c28e2e4e9f91 | refs/heads/master | 2023-08-11T06:58:09.849172 | 2021-05-04T20:53:44 | 2021-05-04T20:53:44 | 415,073,254 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | """
Integrations with the legacy arXiv database for users and sessions.
This package provides integrations with legacy user and sessions data in the
classic DB. These components were pulled out as a separate package because
they are required by both the accounts service and the authn/z middlware,
and maintaining them in both places would create too much duplication.
"""
from . import sessions, exceptions, authenticate, models, accounts, util, \
endorsements
from .util import create_all, init_app, current_session, drop_all, \
is_configured, is_available, transaction
| [
"[email protected]"
] | |
1a30948da27846055a1e2094b54a910a08a3491f | 727f1bc2205c88577b419cf0036c029b8c6f7766 | /out-bin/py/google/fhir/models/model_test.runfiles/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/keras/preprocessing/__init__.py | 263fd39ce30e9df743884634bca14076423ace71 | [
"Apache-2.0"
] | permissive | rasalt/fhir | 55cf78feed3596a3101b86f9e9bbf6652c6ed4ad | d49883cc4d4986e11ca66058d5a327691e6e048a | refs/heads/master | 2020-04-13T00:16:54.050913 | 2019-01-15T14:22:15 | 2019-01-15T14:22:15 | 160,260,223 | 0 | 0 | Apache-2.0 | 2018-12-03T22:07:01 | 2018-12-03T22:07:01 | null | UTF-8 | Python | false | false | 190 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/keras/preprocessing/__init__.py | [
"[email protected]"
] | |
80b5a121e4ba0b0b9fa3de601259a3ed358a8f19 | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/aio/operations_async/_available_delegations_operations_async.py | 9bd5ba9d9b004892ecb52fedad1a086d3101ab89 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 5,085 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailableDelegationsOperations:
"""AvailableDelegationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs
) -> AsyncIterable["models.AvailableDelegationsResult"]:
"""Gets all of the available subnet delegations for this subscription in this region.
:param location: The location of the subnet.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableDelegationsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.AvailableDelegationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AvailableDelegationsResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableDelegationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/availableDelegations'} # type: ignore
| [
"[email protected]"
] | |
a6b8938429590ad4f25219b94088b7a9e3c816ca | 4d21da5a3d07f4d05b997e80119cd79692ac0d25 | /Leetcode/301-400/326. Power of Three.py | 6dd5aa5bcea9575eb62407cbaf95c65965ef696e | [] | no_license | ErinC123/Algorithm | 92b2789ec3b36c49f9e65f2e7a702bb4b732e8ba | 4544fee91e811a6625000921c32ad054df550f1e | refs/heads/master | 2021-06-17T14:03:33.955233 | 2017-06-18T21:20:55 | 2017-06-18T21:20:55 | 75,894,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | # Question: 326. Power of Three
# Difficulty: Easy
# Tags: Math
'''
Given an integer, write a function to determine if it is a power of three.
Follow up:
Could you do it without using any loop / recursion?
'''
class Solution(object):
def isPowerOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
if n == 0:
return False
if n == 1:
return True
while n%3 == 0:
n = n/3
if n == 1:
return True
return False
| [
"[email protected]"
] | |
850b0fdbbdcd7b8c51b89ad013305f361b5402cd | b8e9dd6fd8f8b691cba5a3af2388467bcf6c90bb | /samples/openapi3/client/3_0_3_unit_test/python-experimental/unit_test_api/paths/request_body_post_uniqueitems_false_validation_request_body/post.pyi | 8fd7d1c438a0e379c0e8540738e4ed3eb5e2bfdb | [
"Apache-2.0"
] | permissive | FallenRiteMonk/openapi-generator | f8b98940219eecf14dc76dced4b0fbd394522aa3 | b6576d11733ecad6fa4a0a616e1a06d502a771b7 | refs/heads/master | 2023-03-16T05:23:36.501909 | 2022-09-02T01:46:56 | 2022-09-02T01:46:56 | 164,609,299 | 0 | 0 | Apache-2.0 | 2019-01-08T09:08:56 | 2019-01-08T09:08:56 | null | UTF-8 | Python | false | false | 4,583 | pyi | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import urllib3
from urllib3._collections import HTTPHeaderDict
from unit_test_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
from unit_test_api.model.uniqueitems_false_validation import UniqueitemsFalseValidation
# body param
SchemaForRequestBodyApplicationJson = UniqueitemsFalseValidation
class BaseApi(api_client.Api):
def _post_uniqueitems_false_validation_request_body_oapg(
self: api_client.Api,
body: typing.Union[SchemaForRequestBodyApplicationJson, ],
content_type: str = 'application/json',
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
"""
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if body is schemas.unset:
raise exceptions.ApiValueError(
'The required body parameter has an invalid value of: unset. Set a valid value instead')
_fields = None
_body = None
serialized_data = request_body_body.serialize(body, content_type)
_headers.add('Content-Type', content_type)
if 'fields' in serialized_data:
_fields = serialized_data['fields']
elif 'body' in serialized_data:
_body = serialized_data['body']
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
fields=_fields,
body=_body,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class PostUniqueitemsFalseValidationRequestBody(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
def post_uniqueitems_false_validation_request_body(
self: BaseApi,
body: typing.Union[SchemaForRequestBodyApplicationJson, ],
content_type: str = 'application/json',
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._post_uniqueitems_false_validation_request_body_oapg(
body=body,
content_type=content_type,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
def post(
self: BaseApi,
body: typing.Union[SchemaForRequestBodyApplicationJson, ],
content_type: str = 'application/json',
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._post_uniqueitems_false_validation_request_body_oapg(
body=body,
content_type=content_type,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
| [
"[email protected]"
] | |
96ce58746c96c0f7147da20156ec0aadcc009ca8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03338/s026655815.py | 7c92da9e9417e9f603a6f39bed45f9c3726844a6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | n = int(input())
s = input()
a = []
for i in range(1, n):
a.append(len(set(s[:i]) & set(s[i:])))
print(max(a)) | [
"[email protected]"
] | |
e215012501e1408062f10bd22f8e9dbb5bb9d9e3 | 4d1d67fde4168abc454e13b71afb843a56958f78 | /EtherPad/settings.py | cee16ccc93f013364237bbe4b3ef406f6ca2a11c | [
"MIT"
] | permissive | pandeyroshan/Sharepad.net | 431410b8ef36f8883bbfa4a3afdb4b9605701068 | 60b4614ad46f7e06f17480dcd9f0664b4f1661cc | refs/heads/master | 2022-04-06T15:07:11.230206 | 2020-02-23T01:30:30 | 2020-02-23T01:30:30 | 242,392,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,104 | py | """
Django settings for EtherPad project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u9lzvg)dlw5&(1r%w!t@%9w0idd&uu-pix+w5!80z3h_js4(q='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'EtherPad.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'EtherPad.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
a2271435a5491a60527f6283ff52ff6e8a33fd46 | b4bc1807fb084a83c02b2e39de690a570be64ebd | /events_site_example/apps/common_app/tests/helpers.py | f72cc39f95eac0b551c88db21280cf7ecbef4c68 | [] | no_license | pahaz/events-site-example | 81d7a088a36d0073b6c743d9bf417403ea945746 | f0d0ae18b137dd39afc6b09d01048bc4403dff92 | refs/heads/master | 2023-08-26T20:00:04.157919 | 2012-11-21T06:58:09 | 2012-11-21T06:58:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | # -*- coding: utf-8 -*-
from django.test import TestCase
class TestConditionBase(TestCase):
# model = ModelClass
# queryset_method_name = 'queryset_filter_method_name'
# instance_property_name = 'model_instance_property_name'
def setUp(self):
self.instance = self.create_instance()
# leave only 1 model instance
self.model.objects.all().exclude(pk=self.instance.pk).delete()
self.queryset_filter_method = getattr(self.model.objects, self.queryset_method_name)
def create_instance(self):
"""Creates instance of model"""
raise NotImplementedError('Subclasses must define this method.')
def assertInstanceMethodResponseReturns(self, response, msg):
self.assertEqual(getattr(self.instance, self.instance_property_name), response, msg=msg)
def assertFound(self, msg):
self.assertEqual(self.queryset_filter_method().count(), 1, msg=msg)
self.assertEqual(self.queryset_filter_method()[0], self.instance, msg=msg)
def assertNotFound(self, msg):
self.assertEqual(self.queryset_filter_method().count(), 0, msg=msg)
def assertConditionTrue(self, msg=None):
# test instance method
self.assertInstanceMethodResponseReturns(True, msg=msg)
# test QuerySet filter method
self.assertFound(msg=msg)
def assertConditionFalse(self, msg=None):
# test instance method
self.assertInstanceMethodResponseReturns(False, msg=msg)
# test QuerySet filter method
self.assertNotFound(msg=msg) | [
"[email protected]"
] | |
4b7f7fbbef6cd5d36f278fa8ca7c2fce75a71c78 | 7585c77f49d4a3643e4740b2ceae081c20fc4183 | /Python_Nyuumon/example04-05-01.py | 698464305692710def6032ae6adcefb3a64043a7 | [] | no_license | Minari766/study_python | c4243df47f23e8fda5bcdf16b65f3b3af97f888c | b1e48e750126f377a15f8be8b3c2547687416ec4 | refs/heads/master | 2022-12-15T10:29:05.306690 | 2020-09-03T13:05:21 | 2020-09-03T13:05:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17 | py | #coding:utf-8
ç | [
"[email protected]"
] | |
8fbd777be6a4409caac091f3dd090dc2debebeef | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/297/67077/submittedfiles/testes.py | c7507d5c71e3b61038c611e9a7d783a2490240fe | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
idade= int(input("sua idade(i): "))
altura= float(input("sua altura(f): "))
print(idade)
print('sua idade inteira eh %d e a altura eh %.2f'%(idade,altura))
| [
"[email protected]"
] | |
78f46b6b21fb1b994bdb1e8203bf4bf6920782c9 | 1f7d287ef90041e20468513a26a39e1f3d221289 | /Level-4/s01/guvi-L4-s02-py02.py | 8b0e9652fdfd4d786ce1b1f5461d7d32068eb13d | [] | no_license | ksthacker/python | d787d69f954c0e9b59b0cc96a8b8fc5c0594d8a0 | 3a3775e1b9349e313f8c96ea11eade54a7e9bf54 | refs/heads/master | 2021-04-27T16:32:40.923316 | 2019-08-21T04:50:22 | 2019-08-21T04:50:22 | 122,303,461 | 0 | 17 | null | 2019-10-03T14:59:51 | 2018-02-21T07:09:32 | Python | UTF-8 | Python | false | false | 221 | py | import sys, string, math
n,k = input().split()
n,k = int(n), int(k)
L = [ int(x) for x in input().split()]
for i in range(0,k) :
a,b = input().split()
a,b = int(a), int(b)
print(sum(L[a-1:b]))
| [
"[email protected]"
] | |
3e0576d476569ec92571f897297e7d607dc0a523 | 533f14db4ecae4ed590759ebd0540d9f1dc29b70 | /tests/test_image.py | 584f12f249acbc29eebd8566e7b1707078afc2a1 | [
"Apache-2.0"
] | permissive | tjsongzw/mmcv | 30d3271957f3fc814e0792f4bccfebda154cc208 | 4abb916d604fba808f7220cc423280452eecfc96 | refs/heads/master | 2020-04-18T02:19:04.868110 | 2019-04-22T02:49:46 | 2019-04-22T02:49:46 | 167,157,804 | 0 | 0 | Apache-2.0 | 2019-01-23T09:43:10 | 2019-01-23T09:43:10 | null | UTF-8 | Python | false | false | 12,552 | py | import os
import os.path as osp
import tempfile
import cv2
import mmcv
import numpy as np
import pytest
from numpy.testing import assert_array_equal, assert_array_almost_equal
class TestImage(object):
@classmethod
def setup_class(cls):
# the test img resolution is 400x300
cls.img_path = osp.join(osp.dirname(__file__), 'data/color.jpg')
cls.gray_img_path = osp.join(
osp.dirname(__file__), 'data/grayscale.jpg')
cls.img = cv2.imread(cls.img_path)
def assert_img_equal(self, img, ref_img, ratio_thr=0.999):
assert img.shape == ref_img.shape
assert img.dtype == ref_img.dtype
area = ref_img.shape[0] * ref_img.shape[1]
diff = np.abs(img.astype('int32') - ref_img.astype('int32'))
assert np.sum(diff <= 1) / float(area) > ratio_thr
def test_imread(self):
img = mmcv.imread(self.img_path)
assert img.shape == (300, 400, 3)
img = mmcv.imread(self.img_path, 'grayscale')
assert img.shape == (300, 400)
img = mmcv.imread(self.gray_img_path)
assert img.shape == (300, 400, 3)
img = mmcv.imread(self.gray_img_path, 'unchanged')
assert img.shape == (300, 400)
img = mmcv.imread(img)
assert_array_equal(img, mmcv.imread(img))
with pytest.raises(TypeError):
mmcv.imread(1)
def test_imfrombytes(self):
with open(self.img_path, 'rb') as f:
img_bytes = f.read()
img = mmcv.imfrombytes(img_bytes)
assert img.shape == (300, 400, 3)
def test_imwrite(self):
img = mmcv.imread(self.img_path)
out_file = osp.join(tempfile.gettempdir(), 'mmcv_test.jpg')
mmcv.imwrite(img, out_file)
rewrite_img = mmcv.imread(out_file)
os.remove(out_file)
self.assert_img_equal(img, rewrite_img)
def test_bgr2gray(self):
in_img = np.random.rand(10, 10, 3).astype(np.float32)
out_img = mmcv.bgr2gray(in_img)
computed_gray = (in_img[:, :, 0] * 0.114 + in_img[:, :, 1] * 0.587 +
in_img[:, :, 2] * 0.299)
assert_array_almost_equal(out_img, computed_gray, decimal=4)
out_img_3d = mmcv.bgr2gray(in_img, True)
assert out_img_3d.shape == (10, 10, 1)
assert_array_almost_equal(out_img_3d[..., 0], out_img, decimal=4)
def test_gray2bgr(self):
in_img = np.random.rand(10, 10).astype(np.float32)
out_img = mmcv.gray2bgr(in_img)
assert out_img.shape == (10, 10, 3)
for i in range(3):
assert_array_almost_equal(out_img[..., i], in_img, decimal=4)
def test_bgr2rgb(self):
in_img = np.random.rand(10, 10, 3).astype(np.float32)
out_img = mmcv.bgr2rgb(in_img)
assert out_img.shape == in_img.shape
assert_array_equal(out_img[..., 0], in_img[..., 2])
assert_array_equal(out_img[..., 1], in_img[..., 1])
assert_array_equal(out_img[..., 2], in_img[..., 0])
def test_rgb2bgr(self):
in_img = np.random.rand(10, 10, 3).astype(np.float32)
out_img = mmcv.rgb2bgr(in_img)
assert out_img.shape == in_img.shape
assert_array_equal(out_img[..., 0], in_img[..., 2])
assert_array_equal(out_img[..., 1], in_img[..., 1])
assert_array_equal(out_img[..., 2], in_img[..., 0])
def test_bgr2hsv(self):
in_img = np.random.rand(10, 10, 3).astype(np.float32)
out_img = mmcv.bgr2hsv(in_img)
argmax = in_img.argmax(axis=2)
computed_hsv = np.empty_like(in_img, dtype=in_img.dtype)
for i in range(in_img.shape[0]):
for j in range(in_img.shape[1]):
b = in_img[i, j, 0]
g = in_img[i, j, 1]
r = in_img[i, j, 2]
v = max(r, g, b)
s = (v - min(r, g, b)) / v if v != 0 else 0
if argmax[i, j] == 0:
h = 240 + 60 * (r - g) / (v - min(r, g, b))
elif argmax[i, j] == 1:
h = 120 + 60 * (b - r) / (v - min(r, g, b))
else:
h = 60 * (g - b) / (v - min(r, g, b))
if h < 0:
h += 360
computed_hsv[i, j, :] = [h, s, v]
assert_array_almost_equal(out_img, computed_hsv, decimal=2)
def test_imresize(self):
resized_img = mmcv.imresize(self.img, (1000, 600))
assert resized_img.shape == (600, 1000, 3)
resized_img, w_scale, h_scale = mmcv.imresize(self.img, (1000, 600),
True)
assert (resized_img.shape == (600, 1000, 3) and w_scale == 2.5
and h_scale == 2.0)
for mode in ['nearest', 'bilinear', 'bicubic', 'area', 'lanczos']:
resized_img = mmcv.imresize(
self.img, (1000, 600), interpolation=mode)
assert resized_img.shape == (600, 1000, 3)
def test_imresize_like(self):
a = np.zeros((100, 200, 3))
resized_img = mmcv.imresize_like(self.img, a)
assert resized_img.shape == (100, 200, 3)
def test_imrescale(self):
# rescale by a certain factor
resized_img = mmcv.imrescale(self.img, 1.5)
assert resized_img.shape == (450, 600, 3)
resized_img = mmcv.imrescale(self.img, 0.934)
assert resized_img.shape == (280, 374, 3)
# rescale by a certain max_size
# resize (400, 300) to (max_1000, max_600)
resized_img = mmcv.imrescale(self.img, (1000, 600))
assert resized_img.shape == (600, 800, 3)
resized_img, scale = mmcv.imrescale(
self.img, (1000, 600), return_scale=True)
assert resized_img.shape == (600, 800, 3) and scale == 2.0
# resize (400, 300) to (max_200, max_180)
resized_img = mmcv.imrescale(self.img, (180, 200))
assert resized_img.shape == (150, 200, 3)
resized_img, scale = mmcv.imrescale(
self.img, (180, 200), return_scale=True)
assert resized_img.shape == (150, 200, 3) and scale == 0.5
# test exceptions
with pytest.raises(ValueError):
mmcv.imrescale(self.img, -0.5)
with pytest.raises(TypeError):
mmcv.imrescale(self.img, [100, 100])
def test_imflip(self):
# test horizontal flip (color image)
img = np.random.rand(80, 60, 3)
h, w, c = img.shape
flipped_img = mmcv.imflip(img)
assert flipped_img.shape == img.shape
for i in range(h):
for j in range(w):
for k in range(c):
assert flipped_img[i, j, k] == img[i, w - 1 - j, k]
# test vertical flip (color image)
flipped_img = mmcv.imflip(img, direction='vertical')
assert flipped_img.shape == img.shape
for i in range(h):
for j in range(w):
for k in range(c):
assert flipped_img[i, j, k] == img[h - 1 - i, j, k]
# test horizontal flip (grayscale image)
img = np.random.rand(80, 60)
h, w = img.shape
flipped_img = mmcv.imflip(img)
assert flipped_img.shape == img.shape
for i in range(h):
for j in range(w):
assert flipped_img[i, j] == img[i, w - 1 - j]
# test vertical flip (grayscale image)
flipped_img = mmcv.imflip(img, direction='vertical')
assert flipped_img.shape == img.shape
for i in range(h):
for j in range(w):
assert flipped_img[i, j] == img[h - 1 - i, j]
def test_imcrop(self):
# yapf: disable
bboxes = np.array([[100, 100, 199, 199], # center
[0, 0, 150, 100], # left-top corner
[250, 200, 399, 299], # right-bottom corner
[0, 100, 399, 199], # wide
[150, 0, 299, 299]]) # tall
# yapf: enable
# crop one bbox
patch = mmcv.imcrop(self.img, bboxes[0, :])
patches = mmcv.imcrop(self.img, bboxes[[0], :])
assert patch.shape == (100, 100, 3)
patch_path = osp.join(osp.dirname(__file__), 'data/patches')
ref_patch = np.load(patch_path + '/0.npy')
self.assert_img_equal(patch, ref_patch)
assert isinstance(patches, list) and len(patches) == 1
self.assert_img_equal(patches[0], ref_patch)
# crop with no scaling and padding
patches = mmcv.imcrop(self.img, bboxes)
assert len(patches) == bboxes.shape[0]
for i in range(len(patches)):
ref_patch = np.load(patch_path + '/{}.npy'.format(i))
self.assert_img_equal(patches[i], ref_patch)
# crop with scaling and no padding
patches = mmcv.imcrop(self.img, bboxes, 1.2)
for i in range(len(patches)):
ref_patch = np.load(patch_path + '/scale_{}.npy'.format(i))
self.assert_img_equal(patches[i], ref_patch)
# crop with scaling and padding
patches = mmcv.imcrop(self.img, bboxes, 1.2, pad_fill=[255, 255, 0])
for i in range(len(patches)):
ref_patch = np.load(patch_path + '/pad_{}.npy'.format(i))
self.assert_img_equal(patches[i], ref_patch)
patches = mmcv.imcrop(self.img, bboxes, 1.2, pad_fill=0)
for i in range(len(patches)):
ref_patch = np.load(patch_path + '/pad0_{}.npy'.format(i))
self.assert_img_equal(patches[i], ref_patch)
def test_impad(self):
img = np.random.rand(10, 10, 3).astype(np.float32)
padded_img = mmcv.impad(img, (15, 12), 0)
assert_array_equal(img, padded_img[:10, :10, :])
assert_array_equal(
np.zeros((5, 12, 3), dtype='float32'), padded_img[10:, :, :])
assert_array_equal(
np.zeros((15, 2, 3), dtype='float32'), padded_img[:, 10:, :])
img = np.random.randint(256, size=(10, 10, 3)).astype('uint8')
padded_img = mmcv.impad(img, (15, 12, 3), [100, 110, 120])
assert_array_equal(img, padded_img[:10, :10, :])
assert_array_equal(
np.array([100, 110, 120], dtype='uint8') * np.ones(
(5, 12, 3), dtype='uint8'), padded_img[10:, :, :])
assert_array_equal(
np.array([100, 110, 120], dtype='uint8') * np.ones(
(15, 2, 3), dtype='uint8'), padded_img[:, 10:, :])
with pytest.raises(AssertionError):
mmcv.impad(img, (15, ), 0)
with pytest.raises(AssertionError):
mmcv.impad(img, (5, 5), 0)
with pytest.raises(AssertionError):
mmcv.impad(img, (5, 5), [0, 1])
def test_impad_to_multiple(self):
img = np.random.rand(11, 14, 3).astype(np.float32)
padded_img = mmcv.impad_to_multiple(img, 4)
assert padded_img.shape == (12, 16, 3)
img = np.random.rand(20, 12).astype(np.float32)
padded_img = mmcv.impad_to_multiple(img, 5)
assert padded_img.shape == (20, 15)
img = np.random.rand(20, 12).astype(np.float32)
padded_img = mmcv.impad_to_multiple(img, 2)
assert padded_img.shape == (20, 12)
def test_imrotate(self):
img = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.uint8)
assert_array_equal(mmcv.imrotate(img, 0), img)
img_r = np.array([[7, 4, 1], [8, 5, 2], [9, 6, 3]])
assert_array_equal(mmcv.imrotate(img, 90), img_r)
img_r = np.array([[3, 6, 9], [2, 5, 8], [1, 4, 7]])
assert_array_equal(mmcv.imrotate(img, -90), img_r)
img = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]).astype(np.uint8)
img_r = np.array([[0, 6, 2, 0], [0, 7, 3, 0]])
assert_array_equal(mmcv.imrotate(img, 90), img_r)
img_r = np.array([[1, 0, 0, 0], [2, 0, 0, 0]])
assert_array_equal(mmcv.imrotate(img, 90, center=(0, 0)), img_r)
img_r = np.array([[255, 6, 2, 255], [255, 7, 3, 255]])
assert_array_equal(mmcv.imrotate(img, 90, border_value=255), img_r)
img_r = np.array([[5, 1], [6, 2], [7, 3], [8, 4]])
assert_array_equal(mmcv.imrotate(img, 90, auto_bound=True), img_r)
with pytest.raises(ValueError):
mmcv.imrotate(img, 90, center=(0, 0), auto_bound=True)
def test_iminvert(self):
img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
dtype=np.uint8)
img_r = np.array([[255, 127, 0], [254, 128, 1], [253, 126, 2]],
dtype=np.uint8)
assert_array_equal(mmcv.iminvert(img), img_r)
| [
"[email protected]"
] | |
9a92894a5d92bfd4c100d639f10ae5a43bca57c9 | c66810fadc39a6ff020b8a04ec70903a5570b378 | /behavioral_patterns/state_pattern/state_exp1.py | e386e71ec9bb372658d21c61b142c70b9400b8dd | [] | no_license | MaxOvcharov/python_desing_patterns | 364c56c392bef3a882fd13fbd00a9c5d1241ccfd | eacc3f1483e40778178523d82adfb6c443dbf1ae | refs/heads/master | 2021-05-14T00:45:47.094302 | 2020-02-26T20:00:11 | 2020-02-26T20:00:11 | 116,548,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | #!/usr/bin/env python3
"""
EXAMPLE - https://sourcemaking.com/design_patterns/state.
Allow an object to alter its behavior when its internal state changes.
The object will appear to change its class.
"""
import abc
class Context:
"""
Define the interface of interest to clients.
Maintain an instance of a ConcreteState subclass that defines the
current state.
"""
def __init__(self, state):
self._state = state
def request(self):
self._state.handle()
class State(metaclass=abc.ABCMeta):
"""
Define an interface for encapsulating the behavior associated with a
particular state of the Context.
"""
@abc.abstractmethod
def handle(self):
pass
class ConcreteStateA(State):
"""
Implement a behavior associated with a state of the Context.
"""
def handle(self):
print(f'STATE: {self.__class__.__name__}')
class ConcreteStateB(State):
"""
Implement a behavior associated with a state of the Context.
"""
def handle(self):
print(f'STATE: {self.__class__.__name__}')
def main():
concrete_state_a = ConcreteStateA()
context = Context(concrete_state_a)
context.request()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
af4b2060403266755323d533964ef0dbf17e3862 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/CPQRPM-MIB.py | ac576ea95cf89c433a9e440816af0f39cbf9b7d9 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 58,041 | py | #
# PySNMP MIB module CPQRPM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CPQRPM-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:12:07 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
compaq, = mibBuilder.importSymbols("CPQHOST-MIB", "compaq")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
sysName, = mibBuilder.importSymbols("SNMPv2-MIB", "sysName")
NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Gauge32, Counter32, TimeTicks, Unsigned32, iso, Integer32, Counter64, ModuleIdentity, Bits, IpAddress, MibIdentifier, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Gauge32", "Counter32", "TimeTicks", "Unsigned32", "iso", "Integer32", "Counter64", "ModuleIdentity", "Bits", "IpAddress", "MibIdentifier", "NotificationType")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
cpqRPM = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 154))
cpqRPMTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 154, 1))
cpqRPMTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 154, 2))
cpqPMTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 154, 3))
cpqRPMTrapDescription = MibScalar((1, 3, 6, 1, 4, 1, 232, 154, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpqRPMTrapDescription.setStatus('mandatory')
cpqRPMTrapDeviceId = MibScalar((1, 3, 6, 1, 4, 1, 232, 154, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpqRPMTrapDeviceId.setStatus('mandatory')
cpqRPMTrapDeviceName = MibScalar((1, 3, 6, 1, 4, 1, 232, 154, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 50))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpqRPMTrapDeviceName.setStatus('mandatory')
cpqRPMTrapDeviceAddress = MibScalar((1, 3, 6, 1, 4, 1, 232, 154, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 50))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpqRPMTrapDeviceAddress.setStatus('mandatory')
cpqRPMTrapType = MibScalar((1, 3, 6, 1, 4, 1, 232, 154, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("system", 1), ("event", 2), ("change", 3), ("power", 4), ("environment", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpqRPMTrapType.setStatus('mandatory')
cpqRPMTrapSeverity = MibScalar((1, 3, 6, 1, 4, 1, 232, 154, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("unknown", 1), ("information", 2), ("normal", 3), ("minor", 4), ("major", 5), ("critical", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpqRPMTrapSeverity.setStatus('mandatory')
cpqRPMTrapCode = MibScalar((1, 3, 6, 1, 4, 1, 232, 154, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpqRPMTrapCode.setStatus('mandatory')
cpqRPMTrapURL = MibScalar((1, 3, 6, 1, 4, 1, 232, 154, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpqRPMTrapURL.setStatus('mandatory')
cpqRPMTrapDeviceConnected = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,1)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapConnectionLost = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,2)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapLookupFailed = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,3)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapConnectionFailed = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,4)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapDeviceSettingsChanged = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,5)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCTemp1BelowMin = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10001)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCTemp1AboveWarn = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10002)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCTemp1AboveMax = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10003)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCTemp1Normal = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10004)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCTemp2BelowMin = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10005)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCTemp2AboveWarn = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10006)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCTemp2AboveMax = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10007)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCTemp2Normal = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10008)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCVoltUnder = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10011)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCVoltOver = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10012)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCVoltNormal = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10013)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCHmdtUnder = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10021)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCHmdtOver = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10022)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCHmdtNormal = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10023)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCSmokeDetected = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10031)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCSmokeCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10032)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCShockDetected = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10041)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCShockCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10042)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCAux1Alarm = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10051)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCAux1Cleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10052)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCAux2Alarm = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10053)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCAux2Cleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10054)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCInput1Opened = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10101)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCInput1Closed = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10102)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCInput2Opened = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10103)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCInput2Closed = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10104)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCInput3Opened = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10105)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCInput3Closed = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10106)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCInput4Opened = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10107)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCInput4Closed = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10108)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCLockset1Unlocked = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10111)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCLockset1FailedToLock = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10112)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCLockset1Error = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10113)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCLockset1Locked = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10114)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCLockset2Unlocked = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10116)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCLockset2FailedToLock = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10117)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCLockset2Error = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10118)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCLockset2Locked = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10119)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCLockset1Normal = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10134)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapCMCLockset2Normal = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,10135)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSInputVoltageBelowMin = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20001)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSInputVoltageAboveMax = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20002)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSInputVoltageNormal = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20003)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSOutputVoltageBelowMin = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20011)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSOutputVoltageAboveMax = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20012)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSOutputVoltageNormal = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21020)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSOutputOverload = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20014)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSOutputOverloadCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20015)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSBatteryLow = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21055)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSBatteryLowCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21056)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSBatteryDepleted = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20022)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSBatteryLevelNormal = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20023)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSOnBattery = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21063)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSOnBypass = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20032)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSOnBuck = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21029)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSOnBoost = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21031)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSOnUtilityPower = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21064)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSTemperatureLow = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20101)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSTemperatureHigh = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20102)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSTemperatureNormal = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20103)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSInternalFailure = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20111)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSInternalFailureCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20112)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSBatteryFailure = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20121)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSBatteryFailureCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20122)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSDiagnosticTestFailed = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20131)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSDiagnosticTestSucceeded = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20132)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSInputUnderOverFreq = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20141)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSInputUnderOverFreqCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20142)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSDCStartOccurred = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,29998)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSDCStartOccurredCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,29999)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSBypassNotAvailable = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20161)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSBypassNotAvailableCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20162)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSUtilityFail = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20171)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSUtilityFailCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20172)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSUtilityNotPresent = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20181)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSUtilityNotPresentCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20182)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSBypassManualTurnedOn = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20191)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSBypassManualTurnedOff = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20192)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqRPMTrapUPSSiteWiringFault = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20201)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSSiteWiringNormal = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20202)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMtrapUPSBypassOffManual = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21060)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMtrapUPSBypassONManual = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21059)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cppqRPMtrapUPSStartedOnBatteryCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20152)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cppqRPMtrapUPSStartedOnBattery = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,20151)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMtrapUPSInputOutofRangeCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21022)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMtrapUPSInputOutofRange = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21021)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMtrapUPSTemperatureOutOfRange = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21007)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMtrapUPSTemperatureOutOfRangeCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21008)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMtrapUPSBypassOnAuto = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21047)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMtrapUPSBypassOnAutoCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21048)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMtrapUPSOutputoutofRange = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21019)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSBatteriesDisconnected = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21053)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSBatteriesDisconnectedCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21054)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSBatteryDischarged = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21057)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSBatteryDischargedCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21058)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSCheckBreaker = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21041)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSCheckBreakerCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21042)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSEPOInitiated = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21037)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSManualLoadDumped = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21033)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSManualLoadDumpedCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21034)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSCabinetDoorOpen = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21045)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSCabinetDoorOpenCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21046)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSFanFailure = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21035)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSFanFailureCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21036)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSLossOfRedundancy = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21023)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSLossOfRedundancyCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21024)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSShutdownImminent = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21013)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSShutdownImminentCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21014)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSShutdownPending = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21011)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTrapUPSShutdownPendingCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,21012)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqRPMTestTrap = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 2) + (0,50001)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceId"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapDeviceAddress"), ("CPQRPM-MIB", "cpqRPMTrapType"), ("CPQRPM-MIB", "cpqRPMTrapSeverity"))
cpqPMTrapCritical = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 3) + (0,1)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapCode"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqPMTrapWarning = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 3) + (0,2)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapCode"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqPMTrapInformation = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 3) + (0,3)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapCode"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
cpqPMTrapCleared = NotificationType((1, 3, 6, 1, 4, 1, 232, 154, 3) + (0,4)).setObjects(("SNMPv2-MIB", "sysName"), ("CPQRPM-MIB", "cpqRPMTrapCode"), ("CPQRPM-MIB", "cpqRPMTrapDescription"), ("CPQRPM-MIB", "cpqRPMTrapDeviceName"), ("CPQRPM-MIB", "cpqRPMTrapURL"))
mibBuilder.exportSymbols("CPQRPM-MIB", cpqRPMTrapCMCLockset1Normal=cpqRPMTrapCMCLockset1Normal, cpqRPM=cpqRPM, cpqRPMTrapDeviceId=cpqRPMTrapDeviceId, cpqRPMTrapUPSBypassNotAvailableCleared=cpqRPMTrapUPSBypassNotAvailableCleared, cpqRPMTrapUPSManualLoadDumpedCleared=cpqRPMTrapUPSManualLoadDumpedCleared, cpqRPMTrapUPSBatteryLevelNormal=cpqRPMTrapUPSBatteryLevelNormal, cpqRPMTrapUPSShutdownPending=cpqRPMTrapUPSShutdownPending, cpqRPMTrapUPSInternalFailureCleared=cpqRPMTrapUPSInternalFailureCleared, cpqRPMTrapCMCLockset2Unlocked=cpqRPMTrapCMCLockset2Unlocked, cpqRPMtrapUPSBypassOnAuto=cpqRPMtrapUPSBypassOnAuto, cpqRPMtrapUPSBypassOnAutoCleared=cpqRPMtrapUPSBypassOnAutoCleared, cpqRPMTrapUPSDiagnosticTestSucceeded=cpqRPMTrapUPSDiagnosticTestSucceeded, cpqRPMTrapCMCVoltUnder=cpqRPMTrapCMCVoltUnder, cpqRPMTrapCMCTemp2Normal=cpqRPMTrapCMCTemp2Normal, cpqRPMTrapCMCTemp2AboveWarn=cpqRPMTrapCMCTemp2AboveWarn, cpqRPMtrapUPSOutputoutofRange=cpqRPMtrapUPSOutputoutofRange, cpqRPMTrapUPSDCStartOccurred=cpqRPMTrapUPSDCStartOccurred, cpqRPMTrapCMCSmokeCleared=cpqRPMTrapCMCSmokeCleared, cpqRPMTrapCMCTemp2AboveMax=cpqRPMTrapCMCTemp2AboveMax, cpqRPMTrapUPSInputVoltageAboveMax=cpqRPMTrapUPSInputVoltageAboveMax, cpqRPMTrapUPSBatteryLow=cpqRPMTrapUPSBatteryLow, cpqRPMTrapCMCVoltOver=cpqRPMTrapCMCVoltOver, cpqRPMTrapUPSBatteryDepleted=cpqRPMTrapUPSBatteryDepleted, cpqRPMTrapUPSShutdownImminent=cpqRPMTrapUPSShutdownImminent, cpqPMTraps=cpqPMTraps, cpqRPMTrapUPSBatteryLowCleared=cpqRPMTrapUPSBatteryLowCleared, cpqRPMTrapCMCInput4Opened=cpqRPMTrapCMCInput4Opened, cpqRPMTrapCMCInput4Closed=cpqRPMTrapCMCInput4Closed, cpqRPMTrapUPSOnBuck=cpqRPMTrapUPSOnBuck, cpqRPMTrapCMCAux1Alarm=cpqRPMTrapCMCAux1Alarm, cpqRPMTrapUPSOnBypass=cpqRPMTrapUPSOnBypass, cpqRPMTrapUPSOnBoost=cpqRPMTrapUPSOnBoost, cpqRPMTrapUPSUtilityNotPresent=cpqRPMTrapUPSUtilityNotPresent, cpqRPMTrapUPSLossOfRedundancyCleared=cpqRPMTrapUPSLossOfRedundancyCleared, cpqRPMTrapUPSBypassNotAvailable=cpqRPMTrapUPSBypassNotAvailable, cpqRPMTrapCMCLockset2FailedToLock=cpqRPMTrapCMCLockset2FailedToLock, cpqRPMTrapUPSCabinetDoorOpenCleared=cpqRPMTrapUPSCabinetDoorOpenCleared, cpqRPMTrapCMCTemp2BelowMin=cpqRPMTrapCMCTemp2BelowMin, cpqRPMTrapUPSInternalFailure=cpqRPMTrapUPSInternalFailure, cpqPMTrapCritical=cpqPMTrapCritical, cpqRPMTrapUPSBypassManualTurnedOn=cpqRPMTrapUPSBypassManualTurnedOn, cpqRPMTrapCMCLockset1Unlocked=cpqRPMTrapCMCLockset1Unlocked, cpqRPMTrapCMCInput2Opened=cpqRPMTrapCMCInput2Opened, cpqRPMTrapUPSBatteriesDisconnectedCleared=cpqRPMTrapUPSBatteriesDisconnectedCleared, cpqRPMTrapCode=cpqRPMTrapCode, cpqRPMTrapUPSDiagnosticTestFailed=cpqRPMTrapUPSDiagnosticTestFailed, cpqRPMTrapCMCTemp1AboveMax=cpqRPMTrapCMCTemp1AboveMax, cpqRPMTrapUPSShutdownImminentCleared=cpqRPMTrapUPSShutdownImminentCleared, cpqRPMTrapDeviceConnected=cpqRPMTrapDeviceConnected, cpqRPMTrapUPSOutputVoltageBelowMin=cpqRPMTrapUPSOutputVoltageBelowMin, cpqRPMTrapUPSBatteryFailure=cpqRPMTrapUPSBatteryFailure, cpqRPMTrapDeviceName=cpqRPMTrapDeviceName, cpqRPMTrapCMCAux2Alarm=cpqRPMTrapCMCAux2Alarm, cpqRPMtrapUPSInputOutofRange=cpqRPMtrapUPSInputOutofRange, cpqRPMTrapConnectionLost=cpqRPMTrapConnectionLost, cpqRPMTrapCMCInput3Closed=cpqRPMTrapCMCInput3Closed, cpqRPMTestTrap=cpqRPMTestTrap, cpqRPMTrapUPSOutputVoltageAboveMax=cpqRPMTrapUPSOutputVoltageAboveMax, cpqRPMTrapUPSUtilityNotPresentCleared=cpqRPMTrapUPSUtilityNotPresentCleared, cpqRPMTrapUPSManualLoadDumped=cpqRPMTrapUPSManualLoadDumped, cpqRPMtrapUPSBypassOffManual=cpqRPMtrapUPSBypassOffManual, cpqRPMtrapUPSTemperatureOutOfRange=cpqRPMtrapUPSTemperatureOutOfRange, cpqRPMTrapConnectionFailed=cpqRPMTrapConnectionFailed, cpqRPMTrapCMCInput3Opened=cpqRPMTrapCMCInput3Opened, cpqPMTrapCleared=cpqPMTrapCleared, cpqRPMTrapCMCHmdtOver=cpqRPMTrapCMCHmdtOver, cpqRPMTrapCMCShockDetected=cpqRPMTrapCMCShockDetected, cpqRPMTrapUPSFanFailureCleared=cpqRPMTrapUPSFanFailureCleared, cpqRPMTrapUPSBypassManualTurnedOff=cpqRPMTrapUPSBypassManualTurnedOff, cpqRPMTrapSeverity=cpqRPMTrapSeverity, cpqRPMTrapUPSTemperatureNormal=cpqRPMTrapUPSTemperatureNormal, cpqRPMtrapUPSBypassONManual=cpqRPMtrapUPSBypassONManual, cpqRPMTrapUPSSiteWiringFault=cpqRPMTrapUPSSiteWiringFault, cpqRPMTrapUPSLossOfRedundancy=cpqRPMTrapUPSLossOfRedundancy, cpqRPMTrapUPSShutdownPendingCleared=cpqRPMTrapUPSShutdownPendingCleared, cpqRPMTrapCMCSmokeDetected=cpqRPMTrapCMCSmokeDetected, cpqRPMTrapCMCAux2Cleared=cpqRPMTrapCMCAux2Cleared, cpqRPMTrapDeviceSettingsChanged=cpqRPMTrapDeviceSettingsChanged, cpqRPMTrapUPSCabinetDoorOpen=cpqRPMTrapUPSCabinetDoorOpen, cpqRPMTrapUPSOutputOverload=cpqRPMTrapUPSOutputOverload, cpqRPMTrapCMCVoltNormal=cpqRPMTrapCMCVoltNormal, cpqRPMTrapCMCLockset2Locked=cpqRPMTrapCMCLockset2Locked, cpqRPMTrapLookupFailed=cpqRPMTrapLookupFailed, cpqPMTrapInformation=cpqPMTrapInformation, cpqRPMTrapCMCHmdtUnder=cpqRPMTrapCMCHmdtUnder, cpqRPMTrapUPSBatteryDischargedCleared=cpqRPMTrapUPSBatteryDischargedCleared, cpqRPMTrapUPSCheckBreaker=cpqRPMTrapUPSCheckBreaker, cpqRPMTrapUPSSiteWiringNormal=cpqRPMTrapUPSSiteWiringNormal, cpqRPMTrapDescription=cpqRPMTrapDescription, cppqRPMtrapUPSStartedOnBattery=cppqRPMtrapUPSStartedOnBattery, cpqRPMTrapCMCLockset2Error=cpqRPMTrapCMCLockset2Error, cpqRPMTrapUPSInputVoltageBelowMin=cpqRPMTrapUPSInputVoltageBelowMin, cpqRPMTraps=cpqRPMTraps, cpqRPMTrapUPSCheckBreakerCleared=cpqRPMTrapUPSCheckBreakerCleared, cpqRPMTrapInfo=cpqRPMTrapInfo, cpqRPMTrapType=cpqRPMTrapType, cpqPMTrapWarning=cpqPMTrapWarning, cpqRPMTrapUPSEPOInitiated=cpqRPMTrapUPSEPOInitiated, cpqRPMTrapCMCLockset1FailedToLock=cpqRPMTrapCMCLockset1FailedToLock, cpqRPMTrapUPSOnBattery=cpqRPMTrapUPSOnBattery, cpqRPMTrapURL=cpqRPMTrapURL, cpqRPMTrapCMCHmdtNormal=cpqRPMTrapCMCHmdtNormal, cpqRPMTrapCMCShockCleared=cpqRPMTrapCMCShockCleared, cpqRPMtrapUPSTemperatureOutOfRangeCleared=cpqRPMtrapUPSTemperatureOutOfRangeCleared, cpqRPMTrapCMCLockset1Error=cpqRPMTrapCMCLockset1Error, cpqRPMTrapUPSInputVoltageNormal=cpqRPMTrapUPSInputVoltageNormal, cpqRPMTrapCMCTemp1BelowMin=cpqRPMTrapCMCTemp1BelowMin, cpqRPMTrapUPSOutputVoltageNormal=cpqRPMTrapUPSOutputVoltageNormal, cpqRPMtrapUPSInputOutofRangeCleared=cpqRPMtrapUPSInputOutofRangeCleared, cpqRPMTrapCMCTemp1Normal=cpqRPMTrapCMCTemp1Normal, cpqRPMTrapCMCTemp1AboveWarn=cpqRPMTrapCMCTemp1AboveWarn, cppqRPMtrapUPSStartedOnBatteryCleared=cppqRPMtrapUPSStartedOnBatteryCleared, cpqRPMTrapUPSTemperatureHigh=cpqRPMTrapUPSTemperatureHigh, cpqRPMTrapUPSOutputOverloadCleared=cpqRPMTrapUPSOutputOverloadCleared, cpqRPMTrapCMCInput1Closed=cpqRPMTrapCMCInput1Closed, cpqRPMTrapCMCAux1Cleared=cpqRPMTrapCMCAux1Cleared, cpqRPMTrapUPSUtilityFailCleared=cpqRPMTrapUPSUtilityFailCleared, cpqRPMTrapUPSBatteryDischarged=cpqRPMTrapUPSBatteryDischarged, cpqRPMTrapCMCLockset1Locked=cpqRPMTrapCMCLockset1Locked, cpqRPMTrapUPSBatteryFailureCleared=cpqRPMTrapUPSBatteryFailureCleared, cpqRPMTrapCMCInput2Closed=cpqRPMTrapCMCInput2Closed, cpqRPMTrapUPSUtilityFail=cpqRPMTrapUPSUtilityFail, cpqRPMTrapUPSDCStartOccurredCleared=cpqRPMTrapUPSDCStartOccurredCleared, cpqRPMTrapCMCLockset2Normal=cpqRPMTrapCMCLockset2Normal, cpqRPMTrapUPSFanFailure=cpqRPMTrapUPSFanFailure, cpqRPMTrapUPSOnUtilityPower=cpqRPMTrapUPSOnUtilityPower, cpqRPMTrapUPSInputUnderOverFreq=cpqRPMTrapUPSInputUnderOverFreq, cpqRPMTrapUPSBatteriesDisconnected=cpqRPMTrapUPSBatteriesDisconnected, cpqRPMTrapUPSInputUnderOverFreqCleared=cpqRPMTrapUPSInputUnderOverFreqCleared, cpqRPMTrapUPSTemperatureLow=cpqRPMTrapUPSTemperatureLow, cpqRPMTrapCMCInput1Opened=cpqRPMTrapCMCInput1Opened, cpqRPMTrapDeviceAddress=cpqRPMTrapDeviceAddress)
| [
"[email protected]"
] | |
cc2c7d03ef3262a407a8d964d244a638cf7c6819 | ce71f783e00a82de64a0a80fe039beedc3ae4bab | /xing_ji/asgi.py | 6e85d1fe695c562046b21770cd85ee83bce7e9ac | [] | no_license | bayhax/xing_ji | d4105c55d87d8f885afe96fcff7aa905a91ca9bb | 239df41e130f84ee9c76b93fe56567ddbcc960b5 | refs/heads/master | 2023-02-07T20:30:35.923898 | 2020-12-30T07:19:28 | 2020-12-30T07:19:28 | 324,749,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
ASGI config for xing_ji project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'xing_ji.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
60f84921356470e29c35b00e56e242c0de1d90e8 | 1a6dd921861b56a0fb856856c6b318a24daf98d8 | /13 раздел/Задача N.py | 5b8663536888baef73200e665cd9ace5cdc83e17 | [
"WTFPL"
] | permissive | AlexeyZavar/informatics_solutions | cfd64529b1acd617d51315ba85b53c4d4f883471 | 66df4374b971b44a0165ad0c34b307a0b9f5fa15 | refs/heads/master | 2022-04-14T02:52:01.258939 | 2020-04-18T22:01:09 | 2020-04-18T22:01:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | # Дана строка, возможно, содержащая пробелы. Определите количество слов в этой строке. Слово — это несколько подряд идущих букв латинского алфавита (как заглавных, так и строчных).
#
import re as regular_ex
def removeTrash(s):
l1 = s.count('-')
for i in range(l1):
del s[s.index('-')]
return s
s = input()
r = r"[A-Za-z]+"
t = removeTrash(regular_ex.findall(r, s))
l = len(t)
print(l)
| [
"[email protected]"
] | |
6aca5bca8820467d965afb4f15a33b9baf041b4d | 12fc0aa145792bc0175cb369758111321706ea15 | /test/arthash/journals/integration_test.py | 26decd44e641d04c4815eb5779a809acd9e37d2f | [
"LicenseRef-scancode-free-unknown",
"Artistic-2.0"
] | permissive | arthash/arthash | 3d39f412e1fafcaccf31c62c658d542a03a4b0d0 | 8d59700308cedf3b62118ed021064b677408edeb | refs/heads/master | 2021-01-15T08:47:17.073225 | 2018-07-24T14:07:56 | 2018-07-24T14:24:50 | 59,588,339 | 2 | 1 | Artistic-2.0 | 2018-06-24T16:59:44 | 2016-05-24T16:06:17 | Python | UTF-8 | Python | false | false | 4,310 | py | import datetime, json, os, random, shutil, sys, tempfile, unittest, zipfile
from unittest.mock import patch
from pyfakefs.fake_filesystem_unittest import TestCase
from arthash.journals import keeper, organization
from arthash.util import hasher
# TODO: I'm disabling this test because it really only tests
# the most boring part of this - writing things in journals.
#
# My plan is to create a proper integration test which fires up a webserver,
# runs the client and does some (simulated) cryptography, and then tests the
# results - and then I'll reuse this code.
class IntegrationTest: # (unittest.TestCase): disabled
def do_test(self, i):
errors = list(Reader(i).test())
self.assertEqual(errors, [])
def test_0(self):
self.do_test(0)
def test_1(self):
self.do_test(1)
def test_2(self):
self.do_test(2)
def test_3(self):
self.do_test(3)
class IntegrationDesc:
TESTS = (
(18, 2, 2),
(360, 4, 3),
(180, 2, 4),
(1100, 256, 4),
)
BASE_DIR = os.path.dirname(__file__)
JOURNAL_DIR = os.path.join(BASE_DIR, 'journals', 'journal')
ZIP_FORMAT = JOURNAL_DIR + '-{count}-{org.page_size}-{org.levels}.zip'
TIME_DELTA = datetime.timedelta(seconds=2)
TIMESTAMP = datetime.datetime(2018, 7, 6)
HEX = '0123456789abcdef'
def __init__(self, count, page_size, levels):
self.temp_dir = tempfile.TemporaryDirectory()
self.directory = self.temp_dir.name
self.count = count
self.org = organization.Organization(page_size, levels)
self.time = self.TIMESTAMP
@property
def zipname(self):
return self.ZIP_FORMAT.format(**vars(self))
def add_hashes(self):
hf = keeper.Keeper(self.directory, self.org)
random.seed(0)
with patch('arthash.journals.keeper.timestamp') as timestamp:
timestamp.side_effect = self.timestamp
for i in range(int(self.count)):
hf.add_record(arthash=self.random_hash())
def random_hash(self):
return ''.join(self.HEX[random.randrange(16)] for i in range(64))
def timestamp(self):
self.time += self.TIME_DELTA
return self.time.isoformat()
class Writer(IntegrationDesc):
def write(self):
self.add_hashes()
zpf = zipfile.ZipFile(self.zipname, 'w', zipfile.ZIP_DEFLATED)
for rel_path in hasher.walk(self.directory):
abs_path = os.path.join(self.directory, rel_path)
zpf.write(abs_path, rel_path)
print('Wrote', self.count, 'hashes to', self.zipname)
@classmethod
def write_all(cls):
for test in cls.TESTS:
cls(*test).write()
class Reader(IntegrationDesc):
def __init__(self, i):
super().__init__(*self.TESTS[i])
def test(self):
# Yield a series of error messages.
self.add_hashes()
zpf = zipfile.ZipFile(self.zipname)
actual_names = set(hasher.walk(self.directory))
zip_names = set(zpf.namelist())
az, za = actual_names - zip_names, zip_names - actual_names
for name in actual_names - zip_names:
yield 'Name %s was unknown' % name
for name in zip_names - actual_names:
yield 'Name %s was missing' % name
for name in sorted(set(actual_names) & set(zip_names)):
expected = zpf.open(name).read().decode()
actual_name = os.path.join(self.directory, name)
actual = open(actual_name).read()
if actual != expected:
error = BAD_CONTENTS_ERROR.format(**locals())
print(error)
yield error
def write(self):
self.add_hashes()
zpf = zipfile.ZipFile(self.zipname, 'w', zipfile.ZIP_DEFLATED)
for rel_path in hasher.walk(self.directory):
abs_path = os.path.join(self.directory, rel_path)
zpf.write(abs_path, rel_path)
print('Wrote', self.count, 'hashes to', self.zipname)
@classmethod
def write_all(cls):
for test in cls.TESTS:
cls(*test).write()
BAD_CONTENTS_ERROR = """\
Contents differed for {name}:
Actual:
----
{actual}
----
Expected:
----
{expected}
"""
if __name__ == '__main__':
Writer.write_all()
| [
"[email protected]"
] | |
8d75649c23883d73101fb513de73c707150077c3 | ce196aba0adde47ea2767eae1d7983a1ef548bb8 | /Leg_102手舞足不蹈.py | 3faf9fd6df3bed1e64479965756f94af4c16b2b6 | [] | no_license | xiang-daode/Python3_codes | 5d2639ffd5d65065b98d029e79b8f3608a37cf0b | 06c64f85ce2c299aef7f9311e9473e0203a05b09 | refs/heads/main | 2023-08-30T14:59:55.123128 | 2021-11-03T05:12:24 | 2021-11-03T05:12:24 | 333,632,892 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | # Write your code here :-)
from turtle import *
from math import *
from time import *
#============
def rot(x,y,a,b,L1,L2):
pu();goto(x,y+100); pd();
lt(a);fd(L1);lt(b);fd(L2);lt(b*0.7);fd(L2/4);pu();
#============
def main():
for k in range(10):
for g in range(-30,30,1):
a=g;b=1.5*g
clear();
#body:
pensize(38);pencolor(.5,0.2,0.8)
pu();goto(0,-20);pd();goto(0,100);pu()
#head:
pensize(28);pencolor(.5,0.1,0.3)
pu();goto(0,132);pd();goto(0,146);pu()
#arm:
pensize(8)
home();rot(18,0,a,b,80,70)
home();rot(-18,0,180-a,-b,80,70)
#leg:
pensize(18)
pu();goto(12,-30);pd();goto(38,-200);goto(58,-200);
pu();goto(-12,-30);pd();goto(-38,-200);goto(-58,-200);
sleep(0.01)
update()
#===========
tracer(False);ht();main();
| [
"[email protected]"
] | |
036096fb4d71887e767eedfe7845b4e4f45ff8cb | 57c37b797a36501d22e499d76f1e1adb99d9f087 | /hrp/MultiAgentAssignment3-master/slam.py | b20120e8dd9c959a3c37ee586978498db2df9e54 | [] | no_license | agneevguin/husqvarna_V0 | 375d51880ebf5dd99924400e7b528c370dfab105 | b00f12ac1f05593def06ecee7b4069a0ce330b46 | refs/heads/master | 2021-04-15T03:56:27.446501 | 2016-06-02T21:54:56 | 2016-06-02T21:54:56 | 60,220,492 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 25,502 | py | #SLAM: Simultaneous Localization and Mapping
#Builds 2-D, 3D maps
#Algorithm: Graph SLAM
# SLAM in a 2 dimensional
# world. Please define a function, slam, which takes five
# parameters as input and returns the vector mu. This vector
# should have x, y coordinates interlaced, so for example,
# if there were 2 poses and 2 landmarks, mu would look like:
#
# mu = matrix([[Px0],
# [Py0],
# [Px1],
# [Py1],
# [Lx0],
# [Ly0],
# [Lx1],
# [Ly1]])
#
# data - This is the data that is generated with the included
# make_data function. You can also use test_data to
# make sure your function gives the correct result.
#
# N - The number of time steps.
#
# num_landmarks - The number of landmarks.
#
# motion_noise - The noise associated with motion. The update
# strength for motion should be 1.0 / motion_noise.
#
# measurement_noise - The noise associated with measurement.
# The update strength for measurement should be
# 1.0 / measurement_noise.
#
#
# --------------
# Testing
#
# Uncomment the test cases at the bottom of this document.
# Your output should be identical to the given results.
from math import *
import random
#===============================================================
#
# SLAM in a rectolinear world (we avoid non-linearities)
#
#
#===============================================================
# ------------------------------------------------
#
# this is the matrix class
# we use it because it makes it easier to collect constraints in GraphSLAM
# and to calculate solutions (albeit inefficiently)
#
class matrix:
# implements basic operations of a matrix class
# ------------
#
# initialization - can be called with an initial matrix
#
def __init__(self, value = [[]]):
self.value = value
self.dimx = len(value)
self.dimy = len(value[0])
if value == [[]]:
self.dimx = 0
# ------------
#
# makes matrix of a certain size and sets each element to zero
#
def zero(self, dimx, dimy):
if dimy == 0:
dimy = dimx
# check if valid dimensions
if dimx < 1 or dimy < 1:
raise ValueError, "Invalid size of matrix"
else:
self.dimx = dimx
self.dimy = dimy
self.value = [[0.0 for row in range(dimy)] for col in range(dimx)]
# ------------
#
# makes matrix of a certain (square) size and turns matrix into identity matrix
#
def identity(self, dim):
# check if valid dimension
if dim < 1:
raise ValueError, "Invalid size of matrix"
else:
self.dimx = dim
self.dimy = dim
self.value = [[0.0 for row in range(dim)] for col in range(dim)]
for i in range(dim):
self.value[i][i] = 1.0
# ------------
#
# prints out values of matrix
#
def show(self, txt = ''):
for i in range(len(self.value)):
print txt + '['+ ', '.join('%.3f'%x for x in self.value[i]) + ']'
print ' '
# ------------
#
# defines elmement-wise matrix addition. Both matrices must be of equal dimensions
#
def __add__(self, other):
# check if correct dimensions
if self.dimx != other.dimx or self.dimx != other.dimx:
raise ValueError, "Matrices must be of equal dimension to add"
else:
# add if correct dimensions
res = matrix()
res.zero(self.dimx, self.dimy)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[i][j] = self.value[i][j] + other.value[i][j]
return res
# ------------
#
# defines elmement-wise matrix subtraction. Both matrices must be of equal dimensions
#
def __sub__(self, other):
# check if correct dimensions
if self.dimx != other.dimx or self.dimx != other.dimx:
raise ValueError, "Matrices must be of equal dimension to subtract"
else:
# subtract if correct dimensions
res = matrix()
res.zero(self.dimx, self.dimy)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[i][j] = self.value[i][j] - other.value[i][j]
return res
# ------------
#
# defines multiplication. Both matrices must be of fitting dimensions
#
def __mul__(self, other):
# check if correct dimensions
if self.dimy != other.dimx:
raise ValueError, "Matrices must be m*n and n*p to multiply"
else:
# multiply if correct dimensions
res = matrix()
res.zero(self.dimx, other.dimy)
for i in range(self.dimx):
for j in range(other.dimy):
for k in range(self.dimy):
res.value[i][j] += self.value[i][k] * other.value[k][j]
return res
# ------------
#
# returns a matrix transpose
#
def transpose(self):
# compute transpose
res = matrix()
res.zero(self.dimy, self.dimx)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[j][i] = self.value[i][j]
return res
# ------------
#
# creates a new matrix from the existing matrix elements.
#
# Example:
# l = matrix([[ 1, 2, 3, 4, 5],
# [ 6, 7, 8, 9, 10],
# [11, 12, 13, 14, 15]])
#
# l.take([0, 2], [0, 2, 3])
#
# results in:
#
# [[1, 3, 4],
# [11, 13, 14]]
#
#
# take is used to remove rows and columns from existing matrices
# list1/list2 define a sequence of rows/columns that shall be taken
# is no list2 is provided, then list2 is set to list1 (good for
# symmetric matrices)
#
def take(self, list1, list2 = []):
if list2 == []:
list2 = list1
if len(list1) > self.dimx or len(list2) > self.dimy:
raise ValueError, "list invalid in take()"
res = matrix()
res.zero(len(list1), len(list2))
for i in range(len(list1)):
for j in range(len(list2)):
res.value[i][j] = self.value[list1[i]][list2[j]]
return res
# ------------
#
# creates a new matrix from the existing matrix elements.
#
# Example:
# l = matrix([[1, 2, 3],
# [4, 5, 6]])
#
# l.expand(3, 5, [0, 2], [0, 2, 3])
#
# results in:
#
# [[1, 0, 2, 3, 0],
# [0, 0, 0, 0, 0],
# [4, 0, 5, 6, 0]]
#
# expand is used to introduce new rows and columns into an existing matrix
# list1/list2 are the new indexes of row/columns in which the matrix
# elements are being mapped. Elements for rows and columns
# that are not listed in list1/list2
# will be initialized by 0.0.
#
def expand(self, dimx, dimy, list1, list2 = []):
if list2 == []:
list2 = list1
if len(list1) > self.dimx or len(list2) > self.dimy:
raise ValueError, "list invalid in expand()"
res = matrix()
res.zero(dimx, dimy)
for i in range(len(list1)):
for j in range(len(list2)):
res.value[list1[i]][list2[j]] = self.value[i][j]
return res
# ------------
#
# Computes the upper triangular Cholesky factorization of
# a positive definite matrix.
# This code is based on http://adorio-research.org/wordpress/?p=4560
#
def Cholesky(self, ztol= 1.0e-5):
res = matrix()
res.zero(self.dimx, self.dimx)
for i in range(self.dimx):
S = sum([(res.value[k][i])**2 for k in range(i)])
d = self.value[i][i] - S
if abs(d) < ztol:
res.value[i][i] = 0.0
else:
if d < 0.0:
raise ValueError, "Matrix not positive-definite"
res.value[i][i] = sqrt(d)
for j in range(i+1, self.dimx):
S = sum([res.value[k][i] * res.value[k][j] for k in range(i)])
if abs(S) < ztol:
S = 0.0
res.value[i][j] = (self.value[i][j] - S)/res.value[i][i]
return res
# ------------
#
# Computes inverse of matrix given its Cholesky upper Triangular
# decomposition of matrix.
# This code is based on http://adorio-research.org/wordpress/?p=4560
#
def CholeskyInverse(self):
res = matrix()
res.zero(self.dimx, self.dimx)
# Backward step for inverse.
for j in reversed(range(self.dimx)):
tjj = self.value[j][j]
S = sum([self.value[j][k]*res.value[j][k] for k in range(j+1, self.dimx)])
res.value[j][j] = 1.0/ tjj**2 - S/ tjj
for i in reversed(range(j)):
res.value[j][i] = res.value[i][j] = \
-sum([self.value[i][k]*res.value[k][j] for k in \
range(i+1,self.dimx)])/self.value[i][i]
return res
# ------------
#
# comutes and returns the inverse of a square matrix
#
def inverse(self):
aux = self.Cholesky()
res = aux.CholeskyInverse()
return res
# ------------
#
# prints matrix (needs work!)
#
def __repr__(self):
return repr(self.value)
# ------------------------------------------------
#
# this is the robot class
#
# our robot lives in x-y space, and its motion is
# pointed in a random direction. It moves on a straight line
# until is comes close to a wall at which point it turns
# away from the wall and continues to move.
#
# For measurements, it simply senses the x- and y-distance
# to landmarks. This is different from range and bearing as
# commonly studied in the literature, but this makes it much
# easier to implement the essentials of SLAM without
# cluttered math
#
class robot:
# --------
# init:
# creates robot and initializes location to 0, 0
#
def __init__(self, world_size = 100.0, measurement_range = 30.0,
motion_noise = 1.0, measurement_noise = 1.0):
self.measurement_noise = 0.0
self.world_size = world_size
self.measurement_range = measurement_range
self.x = world_size / 2.0
self.y = world_size / 2.0
self.motion_noise = motion_noise
self.measurement_noise = measurement_noise
self.landmarks = []
self.num_landmarks = 0
def rand(self):
return random.random() * 2.0 - 1.0
# --------
#
# make random landmarks located in the world
#
def make_landmarks(self, num_landmarks):
self.landmarks = []
for i in range(num_landmarks):
self.landmarks.append([round(random.random() * self.world_size),
round(random.random() * self.world_size)])
self.num_landmarks = num_landmarks
# --------
#
# move: attempts to move robot by dx, dy. If outside world
# boundary, then the move does nothing and instead returns failure
#
def move(self, dx, dy):
x = self.x + dx + self.rand() * self.motion_noise
y = self.y + dy + self.rand() * self.motion_noise
if x < 0.0 or x > self.world_size or y < 0.0 or y > self.world_size:
return False
else:
self.x = x
self.y = y
return True
# --------
#
# sense: returns x- and y- distances to landmarks within visibility range
# because not all landmarks may be in this range, the list of measurements
# is of variable length. Set measurement_range to -1 if you want all
# landmarks to be visible at all times
#
def sense(self):
Z = []
for i in range(self.num_landmarks):
dx = self.landmarks[i][0] - self.x + self.rand() * self.measurement_noise
dy = self.landmarks[i][1] - self.y + self.rand() * self.measurement_noise
if self.measurement_range < 0.0 or abs(dx) + abs(dy) <= self.measurement_range:
Z.append([i, dx, dy])
return Z
# --------
#
# print robot location
#
def __repr__(self):
return 'Robot: [x=%.5f y=%.5f]' % (self.x, self.y)
######################################################
# --------
# this routine makes the robot data
#
def make_data(N, num_landmarks, world_size, measurement_range, motion_noise,
measurement_noise, distance):
complete = False
while not complete:
data = []
# make robot and landmarks
r = robot(world_size, measurement_range, motion_noise, measurement_noise)
r.make_landmarks(num_landmarks)
seen = [False for row in range(num_landmarks)]
# guess an initial motion
orientation = random.random() * 2.0 * pi
dx = cos(orientation) * distance
dy = sin(orientation) * distance
for k in range(N-1):
# sense
Z = r.sense()
# check off all landmarks that were observed
for i in range(len(Z)):
seen[Z[i][0]] = True
# move
while not r.move(dx, dy):
# if we'd be leaving the robot world, pick instead a new direction
orientation = random.random() * 2.0 * pi
dx = cos(orientation) * distance
dy = sin(orientation) * distance
# memorize data
data.append([Z, [dx, dy]])
# we are done when all landmarks were observed; otherwise re-run
complete = (sum(seen) == num_landmarks)
print ' '
print 'Landmarks: ', r.landmarks
print r
return data
####################################################
# --------------------------------
#
# print the result of SLAM, the robot pose(s) and the landmarks
#
def print_result(N, num_landmarks, result):
print
print 'Estimated Pose(s):'
for i in range(N):
print ' ['+ ', '.join('%.3f'%x for x in result.value[2*i]) + ', ' \
+ ', '.join('%.3f'%x for x in result.value[2*i+1]) +']'
print
print 'Estimated Landmarks:'
for i in range(num_landmarks):
print ' ['+ ', '.join('%.3f'%x for x in result.value[2*(N+i)]) + ', ' \
+ ', '.join('%.3f'%x for x in result.value[2*(N+i)+1]) +']'
# --------------------------------
#
# slam - retains entire path and all landmarks
#
############## ENTER YOUR CODE BELOW HERE ###################
def slam(data, N, num_landmarks, motion_noise, measurement_noise):
#
#
# Add your code here!
#
#
#
#set the dimension of the filter
dim = 2 * (N + num_landmarks)
#make the constraint information matrix and vector
Omega = matrix()
Omega.zero(dim,dim)
Omega.value[0][0] = 1.0
Omega.value[1][1] = 1.0
Xi = matrix()
Xi.zero(dim, 1)
Xi.value[0][0] = world_size / 2
Xi.value[1][0] = world_size / 2
for k in range(len(data)):
#n is the index of the robots pose in the matrix/vector
n = k * 2
measurement = data[k][0]
motion = data[k][1]
# integrate measurements
for i in range(len(measurement)):
#m is the index of the landmark coordinate in the matrix/vector
m = 2 * (N + measurement[i][0])
# update the information matrix according to measurement
for b in range(2):
Omega.value[n+b][n+b] += 1.0 / measurement_noise
Omega.value[m+b][m+b] += 1.0 / measurement_noise
Omega.value[n+b][m+b] += -1.0 / measurement_noise
Omega.value[m+b][n+b] += -1.0 / measurement_noise
Xi.value[n+b][0] += -measurement[i][1+b] / measurement_noise
Xi.value[m+b][0] += measurement[i][1+b] / measurement_noise
# update the information matrix according to motion
for b in range(4):
Omega.value[n+b][n+b] += 1.0 / motion_noise
for b in range(2):
Omega.value[n+b ][n+b+2] += -1.0 / motion_noise
Omega.value[n+b+2][n+b ] += -1.0 / motion_noise
Xi.value[n+b ][0] += -motion[b] / motion_noise
Xi.value[n+b+2][0] += motion[b] / motion_noise
mu = Omega.inverse() * Xi
return mu # Make sure you return mu for grading!
############### ENTER YOUR CODE ABOVE HERE ###################
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
#
# Main routines
#
num_landmarks = 5 # number of landmarks
N = 20 # time steps
world_size = 100.0 # size of world
measurement_range = 50.0 # range at which we can sense landmarks
motion_noise = 2.0 # noise in robot motion
measurement_noise = 2.0 # noise in the measurements
distance = 20.0 # distance by which robot (intends to) move each iteratation
data = make_data(N, num_landmarks, world_size, measurement_range, motion_noise, measurement_noise, distance)
result = slam(data, N, num_landmarks, motion_noise, measurement_noise)
print_result(N, num_landmarks, result)
# -------------
# Testing
#
# Uncomment one of the test cases below to compare your results to
# the results shown for Test Case 1 and Test Case 2.
test_data1 = [[[[1, 19.457599255548065, 23.8387362100849], [2, -13.195807561967236, 11.708840328458608], [3, -30.0954905279171, 15.387879242505843]], [-12.2607279422326, -15.801093326936487]], [[[2, -0.4659930049620491, 28.088559771215664], [4, -17.866382374890936, -16.384904503932]], [-12.2607279422326, -15.801093326936487]], [[[4, -6.202512900833806, -1.823403210274639]], [-12.2607279422326, -15.801093326936487]], [[[4, 7.412136480918645, 15.388585962142429]], [14.008259661173426, 14.274756084260822]], [[[4, -7.526138813444998, -0.4563942429717849]], [14.008259661173426, 14.274756084260822]], [[[2, -6.299793150150058, 29.047830407717623], [4, -21.93551130411791, -13.21956810989039]], [14.008259661173426, 14.274756084260822]], [[[1, 15.796300959032276, 30.65769689694247], [2, -18.64370821983482, 17.380022987031367]], [14.008259661173426, 14.274756084260822]], [[[1, 0.40311325410337906, 14.169429532679855], [2, -35.069349468466235, 2.4945558982439957]], [14.008259661173426, 14.274756084260822]], [[[1, -16.71340983241936, -2.777000269543834]], [-11.006096015782283, 16.699276945166858]], [[[1, -3.611096830835776, -17.954019226763958]], [-19.693482634035977, 3.488085684573048]], [[[1, 18.398273354362416, -22.705102332550947]], [-19.693482634035977, 3.488085684573048]], [[[2, 2.789312482883833, -39.73720193121324]], [12.849049222879723, -15.326510824972983]], [[[1, 21.26897046581808, -10.121029799040915], [2, -11.917698965880655, -23.17711662602097], [3, -31.81167947898398, -16.7985673023331]], [12.849049222879723, -15.326510824972983]], [[[1, 10.48157743234859, 5.692957082575485], [2, -22.31488473554935, -5.389184118551409], [3, -40.81803984305378, -2.4703329790238118]], [12.849049222879723, -15.326510824972983]], [[[0, 10.591050242096598, -39.2051798967113], [1, -3.5675572049297553, 22.849456408289125], [2, -38.39251065320351, 7.288990306029511]], [12.849049222879723, -15.326510824972983]], [[[0, -3.6225556479370766, -25.58006865235512]], [-7.8874682868419965, -18.379005523261092]], [[[0, 1.9784503557879374, -6.5025974151499]], [-7.8874682868419965, -18.379005523261092]], [[[0, 10.050665232782423, 11.026385307998742]], [-17.82919359778298, 9.062000642947142]], [[[0, 26.526838150174818, -0.22563393232425621], [4, -33.70303936886652, 2.880339841013677]], [-17.82919359778298, 9.062000642947142]]]
test_data2 = [[[[0, 26.543274387283322, -6.262538160312672], [3, 9.937396825799755, -9.128540360867689]], [18.92765331253674, -6.460955043986683]], [[[0, 7.706544739722961, -3.758467215445748], [1, 17.03954411948937, 31.705489938553438], [3, -11.61731288777497, -6.64964096716416]], [18.92765331253674, -6.460955043986683]], [[[0, -12.35130507136378, 2.585119104239249], [1, -2.563534536165313, 38.22159657838369], [3, -26.961236804740935, -0.4802312626141525]], [-11.167066095509824, 16.592065417497455]], [[[0, 1.4138633151721272, -13.912454837810632], [1, 8.087721200818589, 20.51845934354381], [3, -17.091723454402302, -16.521500551709707], [4, -7.414211721400232, 38.09191602674439]], [-11.167066095509824, 16.592065417497455]], [[[0, 12.886743222179561, -28.703968411636318], [1, 21.660953298391387, 3.4912891084614914], [3, -6.401401414569506, -32.321583037341625], [4, 5.034079343639034, 23.102207946092893]], [-11.167066095509824, 16.592065417497455]], [[[1, 31.126317672358578, -10.036784369535214], [2, -38.70878528420893, 7.4987265861424595], [4, 17.977218575473767, 6.150889254289742]], [-6.595520680493778, -18.88118393939265]], [[[1, 41.82460922922086, 7.847527392202475], [3, 15.711709540417502, -30.34633659912818]], [-6.595520680493778, -18.88118393939265]], [[[0, 40.18454208294434, -6.710999804403755], [3, 23.019508919299156, -10.12110867290604]], [-6.595520680493778, -18.88118393939265]], [[[3, 27.18579315312821, 8.067219022708391]], [-6.595520680493778, -18.88118393939265]], [[], [11.492663265706092, 16.36822198838621]], [[[3, 24.57154567653098, 13.461499960708197]], [11.492663265706092, 16.36822198838621]], [[[0, 31.61945290413707, 0.4272295085799329], [3, 16.97392299158991, -5.274596836133088]], [11.492663265706092, 16.36822198838621]], [[[0, 22.407381798735177, -18.03500068379259], [1, 29.642444125196995, 17.3794951934614], [3, 4.7969752441371645, -21.07505361639969], [4, 14.726069092569372, 32.75999422300078]], [11.492663265706092, 16.36822198838621]], [[[0, 10.705527984670137, -34.589764174299596], [1, 18.58772336795603, -0.20109708164787765], [3, -4.839806195049413, -39.92208742305105], [4, 4.18824810165454, 14.146847823548889]], [11.492663265706092, 16.36822198838621]], [[[1, 5.878492140223764, -19.955352450942357], [4, -7.059505455306587, -0.9740849280550585]], [19.628527845173146, 3.83678180657467]], [[[1, -11.150789592446378, -22.736641053247872], [4, -28.832815721158255, -3.9462962046291388]], [-19.841703647091965, 2.5113335861604362]], [[[1, 8.64427397916182, -20.286336970889053], [4, -5.036917727942285, -6.311739993868336]], [-5.946642674882207, -19.09548221169787]], [[[0, 7.151866679283043, -39.56103232616369], [1, 16.01535401373368, -3.780995345194027], [4, -3.04801331832137, 13.697362774960865]], [-5.946642674882207, -19.09548221169787]], [[[0, 12.872879480504395, -19.707592098123207], [1, 22.236710716903136, 16.331770792606406], [3, -4.841206109583004, -21.24604435851242], [4, 4.27111163223552, 32.25309748614184]], [-5.946642674882207, -19.09548221169787]]]
## Test Case 1
##
## Estimated Pose(s):
## [49.999, 49.999]
## [37.971, 33.650]
## [26.183, 18.153]
## [13.743, 2.114]
## [28.095, 16.781]
## [42.383, 30.900]
## [55.829, 44.494]
## [70.855, 59.697]
## [85.695, 75.540]
## [74.010, 92.431]
## [53.543, 96.451]
## [34.523, 100.078]
## [48.621, 83.951]
## [60.195, 68.105]
## [73.776, 52.932]
## [87.130, 38.536]
## [80.301, 20.506]
## [72.797, 2.943]
## [55.244, 13.253]
## [37.414, 22.315]
##
## Estimated Landmarks:
## [82.954, 13.537]
## [70.493, 74.139]
## [36.738, 61.279]
## [18.696, 66.057]
## [20.633, 16.873]
## Test Case 2
##
## Estimated Pose(s):
## [49.999, 49.999]
## [69.180, 45.664]
## [87.742, 39.702]
## [76.269, 56.309]
## [64.316, 72.174]
## [52.256, 88.151]
## [44.058, 69.399]
## [37.001, 49.916]
## [30.923, 30.953]
## [23.507, 11.417]
## [34.179, 27.131]
## [44.154, 43.844]
## [54.805, 60.919]
## [65.697, 78.544]
## [77.467, 95.624]
## [96.801, 98.819]
## [75.956, 99.969]
## [70.199, 81.179]
## [64.053, 61.721]
## [58.106, 42.626]
##
## Estimated Landmarks:
## [76.778, 42.885]
## [85.064, 77.436]
## [13.546, 95.649]
## [59.448, 39.593]
## [69.262, 94.238]
### Uncomment the following three lines for test case 1 ###
#result = slam(test_data1, 20, 5, 2.0, 2.0)
#print_result(20, 5, result)
#print result
### Uncomment the following three lines for test case 2 ###
#result = slam(test_data2, 20, 5, 2.0, 2.0)
#print_result(20, 5, result)
#print result | [
"[email protected]"
] | |
282729cc3d2bd08d97a5cf989ddb3de3ca1287c4 | 73346545e69194dc1cfd887314afe600076ff263 | /polling_stations/apps/addressbase/migrations/0004_onsad_ctry_flag.py | 83cfbf605be785239acb02402bf4599556ecd4fe | [] | permissive | chris48s/UK-Polling-Stations | c7a91f80c1ea423156ac75d88dfca31ca57473ff | 4742b527dae94f0276d35c80460837be743b7d17 | refs/heads/master | 2021-08-27T18:26:07.155592 | 2017-11-29T15:57:23 | 2017-11-29T15:57:23 | 50,743,117 | 1 | 0 | BSD-3-Clause | 2017-11-29T16:03:45 | 2016-01-30T20:20:50 | Python | UTF-8 | Python | false | false | 417 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('addressbase', '0003_auto_20170406_0954'),
]
operations = [
migrations.AddField(
model_name='onsad',
name='ctry_flag',
field=models.CharField(max_length=1, blank=True),
),
]
| [
"[email protected]"
] | |
7229c4e10aaba2855bd989d9e61b8c8526911dee | a04aff1baf3dac3ad96fd78e90b0de357140db84 | /scientific_expedition/yaml_more_tips.py | 2e0b1f3dd0eb44ecee45739b74e097b9bd27ce38 | [
"MIT"
] | permissive | NigrumAquila/py_checkio | f4f66fe8e87ba88d4e9258a55521902541ca33ba | df437c2c3ad325d84714665000e3299a70e91f32 | refs/heads/master | 2022-07-30T04:33:42.107806 | 2020-05-10T09:57:58 | 2020-05-10T09:57:58 | 262,756,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | import re
def yaml(a):
yaml_dict = {}
for el in a.split('\n'):
if el != '':
key, value = el.split(':')
value = value.lstrip(' ')
if (value == '') or (value == None) or (value == 'null'):
yaml_dict[key] = None
elif (value.lower() in ['true', 'false']):
yaml_dict[key] = True if value.lower() == 'true' else False
elif (re.search(r'[a-zA-Z]+', value)):
value = re.sub(r'\\"', r'"', value)
try:
value = re.search(r'\"([\w\W]*)\"', value).group(1)
yaml_dict[key] = value
except AttributeError:
yaml_dict[key] = value
else:
yaml_dict[key] = int(value)
return yaml_dict | [
"[email protected]"
] | |
c774212f3b6e029c47eb1f0663b0ee7c4e45dbcd | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/96/usersdata/184/55977/submittedfiles/estatistica.py | 36c2b98bc0c1c7e60882c5efda63b29526938142 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | # -*- coding: utf-8 -*-
n=int(input('digite n:'))
lista1=[]
lista2=[]
def media(lista):
soma1=0
for i in range(0,len(lista),1):
soma1=soma1+lista[i]
resultado=soma1/len(lista)
return (resultado)
def desviopadrao(lista):
soma2=0
for i in range(o,len(lista),1):
soma2=soma2+(lista[i]-media(lista))**(2)
desviopadrao=(soma2/(n-1))**(0.5)
return (desviopadrao)
for i in range(0,n,1):
numero=float(input('digite um numero:'))
lista1.append(numero)
for i in range(0,n,1):
numero=float(input('digite numero:'))
lista2.append(numero)
print('%.2f'%media(lista1))
print('%.2f'%desviopadrao(lista1))
print('%.2f'%media(lista2))
print('%.2f'%desviopadrao(lista2)) | [
"[email protected]"
] | |
378c64df5fde3bb6b7506292d59d95fb8a8be1f2 | 325fde42058b2b82f8a4020048ff910cfdf737d7 | /src/kusto/azext_kusto/vendored_sdks/kusto/operations/__init__.py | 5750cdc82c59179c4778eb4c699f175f4439341b | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ebencarek/azure-cli-extensions | 46b0d18fe536fe5884b00d7ffa30f54c7d6887d1 | 42491b284e38f8853712a5af01836f83b04a1aa8 | refs/heads/master | 2023-04-12T00:28:44.828652 | 2021-03-30T22:34:13 | 2021-03-30T22:34:13 | 261,621,934 | 2 | 5 | MIT | 2020-10-09T18:21:52 | 2020-05-06T01:25:58 | Python | UTF-8 | Python | false | false | 1,235 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._cluster_operations import ClusterOperations
from ._cluster_principal_assignment_operations import ClusterPrincipalAssignmentOperations
from ._database_operations import DatabaseOperations
from ._database_principal_assignment_operations import DatabasePrincipalAssignmentOperations
from ._attached_database_configuration_operations import AttachedDatabaseConfigurationOperations
from ._data_connection_operations import DataConnectionOperations
from ._operation_operations import OperationOperations
__all__ = [
'ClusterOperations',
'ClusterPrincipalAssignmentOperations',
'DatabaseOperations',
'DatabasePrincipalAssignmentOperations',
'AttachedDatabaseConfigurationOperations',
'DataConnectionOperations',
'OperationOperations',
]
| [
"[email protected]"
] | |
1996a1e9a7e0d18dab21229e6a431be83d1a7acc | 11852c0c085ad1f635f45aa085ece3987ae45620 | /chejian/pspnet_train.py | 0d5c14c97670c49a1cc4028912ff89aa9a13f7e1 | [
"BSD-3-Clause"
] | permissive | heixialeeLeon/segment_piwise | 3dc14be9605a652676a559ec7ab6764d23c67735 | 4eaeeb4aa36298c1140c9fb8a6adef7a3bcfb03f | refs/heads/master | 2020-04-19T10:31:05.538819 | 2019-01-29T11:26:42 | 2019-01-29T11:26:42 | 168,142,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,472 | py | import numpy as np
import torch
from PIL import Image
from argparse import ArgumentParser
from torch.optim import SGD, Adam
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, CenterCrop, Normalize, Resize
from torchvision.transforms import ToTensor, ToPILImage
from piwise.dataset_chejian import ChejianDataSet
from model.pspnet import extractors
from model.pspnet.pspnet import *
from piwise.criterion import CrossEntropyLoss2d
from piwise.transform import Relabel, ToLabel, Colorize
from piwise.visualize import Dashboard
import torch.optim as optim
import argparse
NUM_CHANNELS = 3
NUM_CLASSES = 12
color_transform = Colorize()
image_transform = ToPILImage()
input_transform = Compose([
Resize(320),
ToTensor(),
Normalize([.485, .456, .406], [.229, .224, .225]),
])
target_transform = Compose([
Resize(320, interpolation=Image.NEAREST),
ToLabel(),
])
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Training With Pytorch')
parser.add_argument('--datadir', required=False, default="/data_1/data/chejian/1207")
parser.add_argument('--epochs', default=50, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--save_interval', default=10, type=int, metavar='N',
help='number of epochs to save the model')
parser.add_argument('--batch_size', default=4, type=int,
help='Batch size for training')
parser.add_argument('--resume', default=None, type=str,
help='Checkpoint state_dict file to resume training from')
parser.add_argument('--cuda', action='store_true', default=True)
parser.add_argument('--num_workers', default=4, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,
help='initial learning rate')
parser.add_argument('--lr_step', '--learning-rate step', default=10, type=int,
help='learning rate step')
parser.add_argument('--momentum', default=0.9, type=float,
help='Momentum value for optim')
parser.add_argument('--weight_decay', default=5e-4, type=float,
help='Weight decay for SGD')
parser.add_argument('--gamma', default=0.1, type=float,
help='Gamma update for SGD')
parser.add_argument('--save_folder', default='weights',
help='Directory for saving checkpoint models')
parser.add_argument('--steps-loss', type=int, default=100)
args = parser.parse_args()
print(args)
def train():
#model = SegNet(3, NUM_CLASSES)
#model = FCN16(NUM_CLASSES)
model = PSPNet(n_classes= NUM_CLASSES, sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet18')
if args.cuda:
model = model.cuda()
weight = torch.ones(12)
loader = DataLoader(ChejianDataSet(args.datadir, input_transform, target_transform),
num_workers=args.num_workers, batch_size=args.batch_size, shuffle=True)
if args.cuda:
criterion = CrossEntropyLoss2d(weight.cuda())
else:
criterion = CrossEntropyLoss2d(weight)
#optimizer = Adam(model.parameters())
optimizer = SGD(model.parameters(), lr =args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [20, 30, 40, 50], gamma=0.1)
for epoch in range(0, args.epochs+1):
epoch_loss = []
scheduler.step(epoch)
for step, (images, labels) in enumerate(loader):
if args.cuda:
images = images.cuda()
labels = labels.cuda()
inputs = Variable(images)
targets = Variable(labels)
outputs = model(inputs)
optimizer.zero_grad()
targets = targets.squeeze(1)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
epoch_loss.append(loss.data[0])
if args.steps_loss > 0 and step % args.steps_loss == 0:
average = sum(epoch_loss) / len(epoch_loss)
print(f'loss: {average} (epoch: {epoch}, step: {step})')
if epoch % 2 == 0:
save_filename = "{}/model_{}.pth".format(args.save_folder,epoch)
torch.save(model.state_dict(), save_filename)
if __name__ == "__main__":
train() | [
"[email protected]"
] | |
4f0245a842357c5dbe5e4147f485d8ff3038a40c | 8f0b0ec0a0a2db00e2134b62a1515f0777d69060 | /scripts/study_case/ID_5/matchzoo/datasets/snli/load_data.py | 06b9ec8754f14f19267ed9c9ad25faa384f88c44 | [
"Apache-2.0"
] | permissive | Liang813/GRIST | 2add5b4620c3d4207e7661eba20a79cfcb0022b5 | 544e843c5430abdd58138cdf1c79dcf240168a5f | refs/heads/main | 2023-06-09T19:07:03.995094 | 2021-06-30T05:12:19 | 2021-06-30T05:12:19 | 429,016,034 | 0 | 0 | Apache-2.0 | 2021-11-17T11:19:48 | 2021-11-17T11:19:47 | null | UTF-8 | Python | false | false | 3,224 | py | """SNLI data loader."""
import typing
from pathlib import Path
import pandas as pd
import scripts.study_case.ID_5.matchzoo as matchzoo
from scripts.study_case.ID_5.matchzoo.engine.base_task import BaseTask
_url = "https://nlp.stanford.edu/projects/snli/snli_1.0.zip"
def load_data(
stage: str = 'train',
task: typing.Union[str, BaseTask] = 'classification',
target_label: str = 'entailment',
return_classes: bool = False
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load SNLI data.
:param stage: One of `train`, `dev`, and `test`. (default: `train`)
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance. (default: `classification`)
:param target_label: If `ranking`, chose one of `entailment`,
`contradiction`, `neutral`, and `-` as the positive label.
(default: `entailment`)
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f'snli_1.0_{stage}.txt')
data_pack = _read_data(file_path, task, target_label)
if task == 'ranking' or isinstance(task, matchzoo.tasks.Ranking):
return data_pack
elif task == 'classification' or isinstance(
task, matchzoo.tasks.Classification):
classes = ['entailment', 'contradiction', 'neutral', '-']
if return_classes:
return data_pack, classes
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def _download_data():
ref_path = matchzoo.utils.get_file(
'snli', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='snli'
)
return Path(ref_path).parent.joinpath('snli_1.0')
def _read_data(path, task, target_label):
table = pd.read_csv(path, sep='\t')
df = pd.DataFrame({
'text_left': table['sentence1'],
'text_right': table['sentence2'],
'label': table['gold_label']
})
df = df.dropna(axis=0, how='any').reset_index(drop=True)
if task == 'ranking':
if target_label not in ['entailment', 'contradiction', 'neutral', '-']:
raise ValueError(f"{target_label} is not a valid target label."
f"Must be one of `entailment`, `contradiction`, "
f"`neutral` and `-`.")
df['label'] = (df['label'] == target_label)
elif task == 'classification':
classes = ['entailment', 'contradiction', 'neutral', '-']
df['label'] = df['label'].apply(classes.index)
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
return matchzoo.pack(df, task)
| [
"[email protected]"
] | |
04dcf815696115c267f19ffa51ed91d4902092d3 | 73143826c727eb012bff8c732ab5776c051f6c9b | /pytext/data/test/data_test.py | 128b7b8b815f9a089da1385a6d747c501e7b84d2 | [
"BSD-3-Clause"
] | permissive | wehaveone/pytext | f649fb81bb7bcf76c88ee81af5d204820011b1be | 41c0f46abf81c2d84ee02f6e9b91438e18adf47f | refs/heads/master | 2020-04-26T01:42:31.263801 | 2019-03-01T00:58:17 | 2019-03-01T01:03:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,965 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from pytext.common.constants import Stage
from pytext.data import Batcher, Data, types
from pytext.data.sources.data_source import SafeFileWrapper
from pytext.data.sources.tsv import TSVDataSource
from pytext.data.tensorizers import LabelTensorizer, WordTensorizer
from pytext.utils.test_utils import import_tests_module
tests_module = import_tests_module()
class DataTest(unittest.TestCase):
def setUp(self):
self.data_source = TSVDataSource(
SafeFileWrapper(tests_module.test_file("train_dense_features_tiny.tsv")),
SafeFileWrapper(tests_module.test_file("test_dense_features_tiny.tsv")),
eval_file=None,
field_names=["label", "slots", "text", "dense"],
schema={"text": types.Text, "label": types.Label},
)
self.tensorizers = {
"tokens": WordTensorizer(column="text"),
"labels": LabelTensorizer(column="label", allow_unknown=True),
}
def test_create_data_no_batcher_provided(self):
data = Data(self.data_source, self.tensorizers)
batches = list(data.batches(Stage.TRAIN))
# We should have made at least one non-empty batch
self.assertTrue(batches)
batch = next(iter(batches))
self.assertTrue(batch)
def test_create_batches(self):
data = Data(self.data_source, self.tensorizers, Batcher(batch_size=16))
batches = list(data.batches(Stage.TRAIN))
self.assertEqual(1, len(batches))
batch = next(iter(batches))
self.assertEqual(set(self.tensorizers), set(batch))
tokens, seq_lens = batch["tokens"]
self.assertEqual((10,), seq_lens.size())
self.assertEqual((10,), batch["labels"].size())
self.assertEqual({"tokens", "labels"}, set(batch))
self.assertEqual(10, len(tokens))
def test_create_batches_different_tensorizers(self):
tensorizers = {"tokens": WordTensorizer(column="text")}
data = Data(self.data_source, tensorizers, Batcher(batch_size=16))
batches = list(data.batches(Stage.TRAIN))
self.assertEqual(1, len(batches))
batch = next(iter(batches))
self.assertEqual({"tokens"}, set(batch))
tokens, seq_lens = batch["tokens"]
self.assertEqual((10,), seq_lens.size())
self.assertEqual(10, len(tokens))
def test_data_initializes_tensorsizers(self):
tensorizers = {
"tokens": WordTensorizer(column="text"),
"labels": LabelTensorizer(column="label"),
}
with self.assertRaises(AttributeError):
# verify WordTensorizer isn't in an initialized state yet
tensorizers["tokens"].vocab
Data(self.data_source, tensorizers)
# Tensorizers should have been initialized
self.assertEqual(49, len(tensorizers["tokens"].vocab))
self.assertEqual(7, len(tensorizers["labels"].labels))
def test_data_iterate_multiple_times(self):
data = Data(self.data_source, self.tensorizers)
batches = data.batches(Stage.TRAIN)
data1 = list(batches)
data2 = list(batches)
# We should have made at least one non-empty batch
self.assertTrue(data1)
self.assertTrue(data2)
batch1, _ = data1[0]
batch2, _ = data2[0]
# pytorch tensors don't have equals comparisons, so comparing the tensor
# dicts is non-trivial, but they should also be equal
self.assertEqual(batch1, batch2)
class BatcherTest(unittest.TestCase):
def test_batcher(self):
data = [{"a": i, "b": 10 + i, "c": 20 + i} for i in range(10)]
batcher = Batcher(batch_size=3)
batches = list(batcher.batchify(data))
self.assertEqual(len(batches), 4)
self.assertEqual(batches[1]["a"], [3, 4, 5])
self.assertEqual(batches[3]["b"], [19])
| [
"[email protected]"
] | |
064a0cfb47a454704273151ede7b8f79f553776f | 5e255ad1360c90478393744586663741a9569c21 | /tests/api/test_send_template_message.py | a6ab7ab71a5dd9af0ba71c8be685f6d20647de7f | [
"Apache-2.0"
] | permissive | line/line-bot-sdk-python | d76268e8b542060d6eccbacc5dbfab16960ecc35 | cffd35948238ae24982173e30b1ea1e595bbefd9 | refs/heads/master | 2023-08-31T22:12:31.698183 | 2023-08-28T01:10:09 | 2023-08-28T01:10:09 | 70,553,423 | 1,898 | 1,181 | Apache-2.0 | 2023-09-11T05:14:07 | 2016-10-11T03:42:26 | Python | UTF-8 | Python | false | false | 22,270 | py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals, absolute_import
import json
import unittest
import responses
from linebot import (
LineBotApi
)
from linebot.models import (
TemplateSendMessage, ButtonsTemplate,
PostbackAction, MessageAction,
URIAction, AltUri, DatetimePickerAction,
ConfirmTemplate, CarouselTemplate, CarouselColumn,
ImageCarouselTemplate, ImageCarouselColumn
)
class TestLineBotApi(unittest.TestCase):
maxDiff = None
def setUp(self):
self.tested = LineBotApi('channel_secret')
self.button_template_message = TemplateSendMessage(
alt_text='Buttons template',
template=ButtonsTemplate(
thumbnail_image_url='https://example.com/image.jpg',
title='Menu', text='Please select',
actions=[
PostbackAction(
label='postback', display_text='postback text',
data='action=buy&itemid=1'
),
MessageAction(
label='message', text='message text'
),
URIAction(
label='uri', uri='http://example.com/',
alt_uri=AltUri(desktop="http://example.com/desktop")
)
]
)
)
self.button_message = [{
"type": "template",
"altText": "Buttons template",
"template": {
"type": "buttons",
"thumbnailImageUrl":
"https://example.com/image.jpg",
"title": "Menu",
"text": "Please select",
"actions": [
{
"type": "postback",
"label": "postback",
"displayText": "postback text",
"data": "action=buy&itemid=1"
},
{
"type": "message",
"label": "message",
"text": "message text"
},
{
"type": "uri",
"label": "uri",
"uri": "http://example.com/",
"altUri": {
"desktop": "http://example.com/desktop"
}
}
]
}
}]
self.confirm_template_message = TemplateSendMessage(
alt_text='Confirm template',
template=ConfirmTemplate(
text='Are you sure?',
actions=[
PostbackAction(
label='postback', display_text='postback text',
data='action=buy&itemid=1'
),
MessageAction(
label='message', text='message text'
)
]
)
)
self.confirm_message = [{
"type": "template",
"altText": "Confirm template",
"template": {
"type": "confirm",
"text": "Are you sure?",
"actions": [
{
"type": "postback",
"label": "postback",
"displayText": "postback text",
"data": "action=buy&itemid=1"
},
{
"type": "message",
"label": "message",
"text": "message text"
}
]
}
}]
self.carousel_template_message = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=[
CarouselColumn(
thumbnail_image_url='https://example.com'
'/item1.jpg',
title='this is menu1', text='description1',
actions=[
PostbackAction(
label='postback1', display_text='postback text1',
data='action=buy&itemid=1'
),
MessageAction(
label='message1', text='message text1'
),
URIAction(
label='uri1',
uri='http://example.com/1'
)
]
),
CarouselColumn(
thumbnail_image_url='https://example.com'
'/item2.jpg',
image_background_color='#000000',
title='this is menu2', text='description2',
actions=[
PostbackAction(
label='postback2', display_text='postback text2',
data='action=buy&itemid=2'
),
MessageAction(
label='message2', text='message text2'
),
URIAction(
label='uri2',
uri='http://example.com/2'
)
]
),
CarouselColumn(
thumbnail_image_url='https://example.com'
'/item3.jpg',
title='this is menu3', text='description3',
actions=[
DatetimePickerAction(
label="datetime picker date",
data="action=sell&itemid=2&mode=date",
mode="date",
initial="2013-04-01",
min="2011-06-23",
max="2017-09-08"
),
DatetimePickerAction(
label="datetime picker time",
data="action=sell&itemid=2&mode=time",
mode="time",
initial="10:00",
min="00:00",
max="23:59"
),
DatetimePickerAction(
label="datetime picker datetime",
data="action=sell&itemid=2&mode=datetime",
mode="datetime",
initial="2013-04-01T10:00",
min="2011-06-23T00:00",
max="2017-09-08T23:59"
)
]
)
]
)
)
self.carousel_message = [{
"type": "template",
"altText": "Carousel template",
"template": {
"type": "carousel",
"columns": [
{
"thumbnailImageUrl":
"https://example.com/item1.jpg",
"title": "this is menu1",
"text": "description1",
"actions": [
{
"type": "postback",
"label": "postback1",
"displayText": "postback text1",
"data": "action=buy&itemid=1"
},
{
"type": "message",
"label": "message1",
"text": "message text1"
},
{
"type": "uri",
"label": "uri1",
"uri": "http://example.com/1"
}
]
},
{
"thumbnailImageUrl":
"https://example.com/item2.jpg",
"imageBackgroundColor": "#000000",
"title": "this is menu2",
"text": "description2",
"actions": [
{
"type": "postback",
"label": "postback2",
"displayText": "postback text2",
"data": "action=buy&itemid=2"
},
{
"type": "message",
"label": "message2",
"text": "message text2"
},
{
"type": "uri",
"label": "uri2",
"uri": "http://example.com/2"
}
]
},
{
"thumbnailImageUrl":
"https://example.com/item3.jpg",
"title": "this is menu3",
"text": "description3",
"actions": [
{
"type": "datetimepicker",
"label": "datetime picker date",
"data": "action=sell&itemid=2&mode=date",
"mode": "date",
"initial": "2013-04-01",
"min": "2011-06-23",
"max": "2017-09-08"
},
{
"type": "datetimepicker",
"label": "datetime picker time",
"data": "action=sell&itemid=2&mode=time",
"mode": "time",
"initial": "10:00",
"min": "00:00",
"max": "23:59"
},
{
"type": "datetimepicker",
"label": "datetime picker datetime",
"data": "action=sell&itemid=2&mode=datetime",
"mode": "datetime",
"initial": "2013-04-01T10:00",
"min": "2011-06-23T00:00",
"max": "2017-09-08T23:59"
}
]
}
],
}
}]
self.image_carousel_template_message = TemplateSendMessage(
alt_text='Image carousel template',
template=ImageCarouselTemplate(
columns=[
ImageCarouselColumn(
image_url='https://example.com/'
'item1.jpg',
action=PostbackAction(
label='postback1',
data='action=buy&itemid=1'
)
),
ImageCarouselColumn(
image_url='https://example.com'
'/item2.jpg',
action=MessageAction(
label='message2',
text='message text2'
)
),
ImageCarouselColumn(
image_url='https://example.com/'
'item3.jpg',
action=URIAction(
label='uri1',
uri='https://example.com/1'
)
)
]
)
)
self.image_carousel_message = [{
"type": "template",
"altText": "Image carousel template",
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://example.com/item1.jpg",
"action": {
"type": "postback",
"label": "postback1",
"data": "action=buy&itemid=1",
}
},
{
"imageUrl": "https://example.com/item2.jpg",
"action": {
"type": "message",
"label": "message2",
"text": "message text2"
}
},
{
"imageUrl": "https://example.com/item3.jpg",
"action": {
"type": "uri",
"label": "uri1",
"uri": "https://example.com/1"
}
}
]
}
}]
@responses.activate
def test_push_buttons_template_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/push',
json={}, status=200
)
self.tested.push_message('to', self.button_template_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/push')
self.assertEqual(
json.loads(request.body),
{
"to": "to",
'notificationDisabled': False,
"messages": self.button_message
}
)
@responses.activate
def test_reply_buttons_template_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/reply',
json={}, status=200
)
self.tested.reply_message('replyToken', self.button_template_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/reply')
self.assertEqual(
json.loads(request.body),
{
"replyToken": "replyToken",
'notificationDisabled': False,
"messages": self.button_message
}
)
@responses.activate
def test_push_confirm_template_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/push',
json={}, status=200
)
self.tested.push_message('to', self.confirm_template_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/push')
self.assertEqual(
json.loads(request.body),
{
"to": "to",
'notificationDisabled': False,
"messages": self.confirm_message
}
)
@responses.activate
def test_reply_confirm_template_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/reply',
json={}, status=200
)
self.tested.reply_message('replyToken', self.confirm_template_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/reply'
)
self.assertEqual(
json.loads(request.body),
{
"replyToken": "replyToken",
'notificationDisabled': False,
"messages": self.confirm_message
}
)
@responses.activate
def test_push_carousel_template_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/push',
json={}, status=200
)
self.tested.push_message('to', self.carousel_template_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/push')
self.assertEqual(
json.loads(request.body),
{
"to": "to",
'notificationDisabled': False,
"messages": self.carousel_message
}
)
@responses.activate
def test_reply_carousel_template_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/reply',
json={}, status=200
)
self.tested.reply_message('replyToken', self.carousel_template_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/reply')
self.assertEqual(
json.loads(request.body),
{
"replyToken": "replyToken",
'notificationDisabled': False,
"messages": self.carousel_message
}
)
@responses.activate
def test_multicast_carousel_template_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/multicast',
json={}, status=200
)
self.tested.multicast(['to1', 'to2'], self.carousel_template_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/multicast')
self.assertEqual(
json.loads(request.body),
{
"to": ['to1', 'to2'],
'notificationDisabled': False,
"messages": self.carousel_message
}
)
@responses.activate
def test_push_image_carousel_template_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/push',
json={}, status=200
)
self.tested.push_message('to', self.image_carousel_template_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/push')
self.assertEqual(
json.loads(request.body),
{
"to": "to",
'notificationDisabled': False,
"messages": self.image_carousel_message
}
)
@responses.activate
def test_reply_image_carousel_template_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/reply',
json={}, status=200
)
self.tested.reply_message('replyToken', self.image_carousel_template_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/reply')
self.assertEqual(
json.loads(request.body),
{
"replyToken": "replyToken",
'notificationDisabled': False,
"messages": self.image_carousel_message
}
)
@responses.activate
def test_multicast_image_carousel_template_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/multicast',
json={}, status=200
)
self.tested.multicast(['to1', 'to2'], self.image_carousel_template_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/multicast')
self.assertEqual(
json.loads(request.body),
{
"to": ['to1', 'to2'],
'notificationDisabled': False,
"messages": self.image_carousel_message
}
)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
5b55aa18843a13f929ea64298da6d27f6b4788f6 | b3ee8b9ded107f3ed3382ecdbfd517b9010250fc | /1.Data-exploration/Consensus/L1000/scripts/nbconverted/1.L1000_moa_median_scores_calculation.py | f90b63b58a4f564ec0d06c6c7248fcf5c4edaebe | [
"BSD-3-Clause"
] | permissive | michaelbornholdt/lincs-profiling-comparison | ce027656c3a3e4531bc93fb2e90775ee32d90224 | 1d188ecbf38923a45d1c64f8cecb01ab04566491 | refs/heads/master | 2023-05-31T07:16:40.106422 | 2021-06-23T18:38:15 | 2021-06-23T18:38:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,005 | py | #!/usr/bin/env python
# coding: utf-8
# # Consensus Signatures
#
# A consensus signature can be defined as a perturbation-specific summary profile acquired by aggregating replicate level information.
#
#
# #### Level 5 - Replicate-consensus signatures (MODZ)
# L1000 experiments are typically done in 3 or more biological replicates. We derive a consensus replicate signature by applying the
# moderated z-score (MODZ) procedure as follows. First, a pairwise Spearman correlation matrix is computed between the replicate
# signatures in the space of landmark genes with trivial self-correlations being ignored (set to 0). Then, weights for each replicate are
# computed as the sum of its correlations to the other replicates, normalized such that all weights sum to 1. Finally, the consensus
# signature is given by the linear combination of the replicate signatures with the coefficients set to the weights. This procedure serves
# to mitigate the effects of uncorrelated or outlier replicates, and can be thought of as a ‘de-noised’ representation of the given
# experiment’s transcriptional consequences.
# [Subramanian et al 2017](https://www.cell.com/action/showPdf?pii=S0092-8674%2817%2931309-0)
#
#
# ### we have expression values of 978 landmark genes for each signature id (sig_id)
#
#
#
#
# ### The goal here:
# - is to determine the median score of each MOA (Mechanism of action) per dose based on taking the median of the correlation values between compounds of the same MOA.
#
#
# ### Note:
#
# To calculate the median score for each of the two level-5 (rank and Modz) data, this notebook will have to be ran twice for each.
# In[1]:
import os
import pathlib
import requests
import pickle
import argparse
import pandas as pd
import numpy as np
import re
from os import walk
from collections import Counter
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
import random
sns.set_style("darkgrid")
import shutil
from statistics import median
import cmapPy.pandasGEXpress.parse_gct as pg
from cmapPy.pandasGEXpress.parse import parse
from io import BytesIO
from urllib.request import urlopen
from zipfile import ZipFile
# ### - Download L1000 Dataset
# In[2]:
data_dir = pathlib.Path("../../Profiles_level4/L1000/L1000_figshare_data")
os.listdir(data_dir) ##files in L1000 downloaded dataset
# ### Mechanism of actions (MOAs) - Alignment of L1000 and Cell Painting MOAs
#
# - Align the **L1000 pert_info meta_data** with the **Cell-painting meta_data** based on **broad id** and then further fill in some null values in cell painting MOA column with corresponding L1000 MOAs of the same broad sample id and do the same thing for the L1000 data, then take the L1000 moas as the one that will be used for further analysis (because it has the most distinct MOAs).
# In[3]:
commit = "94bfaeeab0d107beac262b4307aa6e9b783625fa"
cp_moa_dataset = f"https://github.com/broadinstitute/lincs-cell-painting/blob/{commit}/metadata/moa/repurposing_info_external_moa_map_resolved.tsv?raw=true"
# In[4]:
def merge_align_moa(data_dir, cp_moa_link):
"""
This function aligns L1000 MOAs with the cell painting MOAs
and further fill null MOAs in one of the them (cell painting or L1000)
with another, so far they are of the same broad sample ID.
The function outputs aligned L1000 MOA metadata dataframe,
that will be used for further analysis.
params:
data_dir: directory that contains L1000 files
cp_moa_link: github link to cell painting MOA metadata information .csv file
Returns:
df_pertinfo: dataframe with aligned L1000 MOA metadata pertubation information.
"""
df_pertinfo_5 = pd.read_csv(os.path.join(data_dir, 'REP.A_A549_pert_info.txt'), delimiter = "\t")
df_moa_cp = pd.read_csv(cp_moa_link, sep="\t")
df_pertinfo_5 = df_pertinfo_5[['pert_id', 'pert_iname', 'moa']].copy()
df_moa_cp = df_moa_cp[['broad_id', 'pert_iname', 'moa']].copy()
df_pertinfo_5.rename(columns={"pert_id": "broad_id", "pert_iname": "pert_iname_L1000", "moa": "moa_L1000"}, inplace = True)
df_moa_cp.rename(columns={"pert_iname": "pert_iname_cell_painting", "moa": "moa_cell_painting"}, inplace = True)
df_pertinfo = pd.merge(df_pertinfo_5, df_moa_cp, on=['broad_id'], how = 'left')
##fill NaNs in columns - moa_L1000, pert_iname_L1000, with corresponding values in cell_painting and VICE VERSA
df_pertinfo['moa_L1000'].fillna(value=df_pertinfo['moa_cell_painting'], inplace=True)
df_pertinfo['moa_cell_painting'].fillna(value=df_pertinfo['moa_L1000'], inplace=True)
df_pertinfo['pert_iname_cell_painting'].fillna(value=df_pertinfo['pert_iname_L1000'], inplace=True)
for col in ['pert_iname_L1000', 'moa_L1000', 'pert_iname_cell_painting', 'moa_cell_painting']:
df_pertinfo[col] = df_pertinfo[col].apply(lambda x: x.lower())
df_pertinfo.rename(columns={"broad_id": "pert_id", "pert_iname_L1000": "pert_iname",
"moa_L1000": "moa"}, inplace = True)
df_pertinfo.drop(['pert_iname_cell_painting', 'moa_cell_painting'], axis = 1, inplace = True)
return df_pertinfo
# In[5]:
df_pert_info = merge_align_moa(data_dir, cp_moa_dataset)
# In[6]:
df_pert_info.shape
# In[7]:
def construct_lvl5_df(data_dir, consensus_lvl5_file, df_pertinfo):
"""
This function returns L1000 level-5 dataframe with samples
that consist of expression values of 978 landmark genes with some
additional metadata information.
params:
data_dir: directory that contains all L1000 files
consensus_lvl5_file: L1000 level-5 (.gctx) file
df_pertinfo: dataframe with aligned L1000 MOA metadata pertubation information.
Returns:
lvl5_data: L1000 level-5 dataframe consisting of expression
values of 978 landmark genes and metadata information.
"""
lvl5_data = parse(os.path.join(data_dir, consensus_lvl5_file))
df_metalvl_5 = pd.read_csv(os.path.join(data_dir, 'col_meta_level_5_REP.A_A549_only_n9482.txt'), delimiter = "\t")
lvl5_data.data_df.rename_axis(None, inplace = True)
lvl5_data = lvl5_data.data_df.T
lvl5_data.rename_axis(None, inplace = True)
df_meta_features = df_metalvl_5[['sig_id', 'pert_id', 'pert_idose']].copy()
df_meta_features['dose'] = df_meta_features['pert_idose'].map({'-666' : 0, '0.04 uM' : 1, '0.12 uM' : 2, '0.37 uM' : 3,
'1.11 uM' : 4, '3.33 uM' : 5, '10 uM' : 6, '20 uM' : 7})
df_meta_features = pd.merge(df_meta_features, df_pertinfo, on='pert_id')
lvl5_data.reset_index(inplace = True)
lvl5_data.rename(columns={"index": "sig_id"}, inplace = True)
lvl5_data = pd.merge(lvl5_data, df_meta_features, on='sig_id')
return lvl5_data
# L1000 LEVEL 5 Data:
#
# - 'level_5_modz_n9482x978.gctx',
# - 'level_5_rank_n9482x978.gctx'
# In[8]:
df_lvl5 = construct_lvl5_df(data_dir, 'level_5_modz_n9482x978.gctx', df_pert_info)
# In[9]:
df_lvl5.shape
# ### - Remove highly correlated landmark genes and samples with Null MOAs
# In[10]:
def feature_selection(df_data):
"""
Perform feature selection by dropping columns with null MOAs values,
and highly correlated landmark genes from the data.
params:
df_data: L1000 level-5 dataframe
Returns:
df_data: refined L1000 level-5 dataframe
"""
df_data_genes = df_data.drop(['pert_id', 'dose', 'pert_iname', 'moa', 'sig_id'], axis = 1).copy()
df_data_corr = df_data_genes.corr(method = 'spearman')
drop_cols = []
n_cols = len(df_data_corr.columns)
for i in range(n_cols):
for k in range(i+1, n_cols):
val = df_data_corr.iloc[k, i]
col = df_data_corr.columns[i]
if abs(val) >= 0.8:
drop_cols.append(col)
df_data.drop(set(drop_cols), axis = 1, inplace = True)
df_data.drop(df_data[df_data['moa'].isnull()].index).reset_index(drop = True, inplace = True)
return df_data
# In[11]:
df_lvl5 = feature_selection(df_lvl5)
# In[12]:
df_lvl5.shape
# ### - Get the median scores for the MOAs based on the correlation values of cpds in the same MOAs
# In[13]:
def get_median_score(moa_list, df_dose, df_cpd_agg):
"""
Get the correlation values between compounds of each MOA,
then calculate the median of these correlation values
and assign it as the "median score" of the MOA.
params:
moa_list: list of distinct moas for a particular dose
df_dose: merged consensus and moa dataframe of a partcular dose
df_dose_corr: merged consensus and moa dataframe of compound correlations of a particular dose
Returns:
moa_med_score: Dict with moa as the keys, and their median scores as the values
moa_cpds: Dict with moa as the keys, and the list of moa for each moa as the values
"""
moa_cpds = {}
moa_median_score = {}
for moa in moa_list:
cpds = df_dose['pert_iname'][df_dose['moa'] == moa].unique().tolist()
moa_cpds[moa] = cpds
##taking correlation btw cpds for each MOA
df_cpds = df_cpd_agg.loc[cpds]
cpds_corr = df_cpds.T.corr(method = 'spearman').values
if len(cpds_corr) == 1:
median_val = 1
else:
median_val = median(list(cpds_corr[np.triu_indices(len(cpds_corr), k = 1)]))
moa_median_score[moa] = median_val
return moa_median_score, moa_cpds
# In[14]:
def check_moa(moa_med_score, moa_cpds, df_moa):
"""
Check if all distinct moas in the moa_consensus dataframe (df_moa)
are in moa_med_score & moa_cpd, if not add them as keys and give them
a null value as the size for moa_med_score and also as values for moa_cpds.
params:
moa_med_score: Dict with moa as the keys, and their median scores as the values
moa_cpds: Dict with moa as the keys, and the list of moa for each moa as the values
data_moa: merged consensus and moa df with moas
Returns:
moa_med_score: Dict with moa as the keys, and their median scores as the values
moa_cpds: Dict with moa as the keys, and the list of moa for each moa as the values
"""
moa_list = df_moa['moa'].unique().tolist()
moa_keys = moa_med_score.keys()
for moa in moa_list:
if moa not in moa_keys:
moa_med_score[moa] = np.nan
moa_cpds[moa] = np.nan
return moa_med_score, moa_cpds
# In[15]:
def get_moa_medianscores(df_moa):
"""
Generate a dataframe of distinct moas with their median scores and
corresponding list of compounds for different doses.
params:
df_moa: merged consensus and moa dataframe
Returns:
df_moa_med_score: dataframe of distinct moas with their corresponding median scores
and list of compounds for all doses.
"""
dose_list = list(set(df_moa['dose'].unique().tolist()))[1:]
for dose in dose_list:
df_dose = df_moa[df_moa['dose'] == dose].copy()
df_cpd_agg = df_dose.groupby(['pert_iname']).agg(['mean'])
df_cpd_agg.columns = df_cpd_agg.columns.droplevel(1)
df_cpd_agg.rename_axis(None, axis=0, inplace = True)
df_cpd_agg.drop(['dose'], axis = 1, inplace = True)
dose_moa_list = df_dose['moa'].unique().tolist()
#get the median of the corr values of the cpds for each MOA
dose_moa_med_score, dose_moa_cpds = get_median_score(dose_moa_list, df_dose, df_cpd_agg)
#check if all moa in the df_moa is present in the dose_moa
dose_moa_med_score, dose_moa_cpds = check_moa(dose_moa_med_score, dose_moa_cpds, df_moa)
sorted_moa_med_score = {key:value for key, value in sorted(dose_moa_med_score.items(), key=lambda item: item[0])}
sorted_dose_cpds = {key:value for key, value in sorted(dose_moa_cpds.items(), key=lambda item: item[0])}
if dose == 1:
df_moa_med_score = pd.DataFrame.from_dict(sorted_moa_med_score, orient='index', columns = ['dose_1'])
else:
df_moa_med_score['dose_' + str(dose)] = sorted_moa_med_score.values()
df_moa_med_score['moa_cpds_dose_' + str(dose)] = list(sorted_dose_cpds.values())
return df_moa_med_score
# In[16]:
df_moa_median_scores = get_moa_medianscores(df_lvl5)
# In[17]:
df_moa_median_scores.shape
# ### - Exclude MOAs with median score 1 and only null values and also columns with only null values
#
# #### The reason why we are excluding MOAs with median value == 1, is because they have only ONE compound and as a result the median correlation value will be just 1, and there will not be differences in values btw different doses.
# In[18]:
def exclude_moa(df_moa_med_score):
"""
Exclude MOAs with median score 1 and columns with only null values.
params:
df_moa_med_score: dataframe of distinct moas with their corresponding median scores
and list of compounds for all doses.
Returns:
df_moa_medians: dataframe of distinct moas with NO median values/scores of 1
and their corresponding list of compounds for all doses.
"""
moa_with_med_index = []
for moa in df_moa_med_score.index.tolist():
moa_values = df_moa_med_score.loc[moa]
if all(y != 1.0 for y in moa_values):
moa_with_med_index.append(moa)
df_moa_medians = df_moa_med_score.loc[moa_with_med_index]
null_columns = [col for col in df_moa_medians.columns
if all(df_moa_medians[col].isnull())]
null_moas = [moa for moa in df_moa_medians.index
if all(df_moa_medians.loc[moa].isnull())]
df_moa_medians.drop(null_columns, axis = 1, inplace = True)
df_moa_medians.drop(null_moas, axis = 0, inplace = True)
return df_moa_medians
# In[19]:
df_moa_medn_scores = exclude_moa(df_moa_median_scores)
# In[20]:
df_moa_medn_scores.isnull().sum()
# In[21]:
df_moa_medn_scores.shape
# In[22]:
def seperate_cpds_values(df_moa_medians):
"""
Seperate the list of compunds columns from the median values columns in
moa_median_dataframe
params:
df_moa_medians: dataframe of distinct moas with NO median scores of 1
and their corresponding list of compounds for all doses.
Returns:
df_moa_cpds: dataframe of distinct moas with only their corresponding
list of compounds for all doses.
df_moa_values: dataframe of distinct moas with only their median scores for all doses.
"""
dose_cols = [col for col in df_moa_medians.columns.tolist()
if (col.startswith("dose_"))]
df_moa_cpds = df_moa_medians.drop(dose_cols, axis = 1)
df_moa_values = df_moa_medians.loc[:, dose_cols].copy()
df_moa_values = df_moa_values.reset_index().rename(columns={"index": "moa"})
df_moa_cpds = df_moa_cpds.reset_index().rename(columns={"index": "moa"})
return df_moa_cpds, df_moa_values
# In[23]:
df_moa_cpds, df_moa_vals = seperate_cpds_values(df_moa_medn_scores)
# In[24]:
def get_moa_size(df_moa_cpds, df_moa_values):
"""
This function computes the number of compunds in each MOA
i.e. moa_size and returns dataframe including the moa_size column
params:
df_moa_cpds: dataframe of distinct moas with only their corresponding
list of compounds for all doses.
df_moa_values: dataframe of distinct moas with only their median scores for all doses.
Returns:
df_moa_cpds: dataframe of distinct moas with only their corresponding
list of compounds for all doses including moa_size column.
df_moa_values: dataframe of distinct moas with only their median scores
including moa_size column for all doses.
"""
df_moa_cpd_copy = df_moa_cpds.set_index('moa').rename_axis(None, axis=0).copy()
num_col = len(df_moa_cpd_copy.columns)
moa_count = {}
for moa in df_moa_cpd_copy.index:
col_sum = 0
for col in df_moa_cpd_copy.columns:
col_sum += len(df_moa_cpd_copy.loc[moa, col])
moa_count[moa] = round(col_sum/num_col)
df_moa_cpds['moa_size'] = moa_count.values()
df_moa_values['moa_size'] = moa_count.values()
return df_moa_cpds, df_moa_values
# In[25]:
df_moa_cpds, df_moa_vals = get_moa_size(df_moa_cpds, df_moa_vals)
# In[26]:
df_moa_cpds.head()
# In[27]:
df_moa_vals.head(10)
# In[28]:
def check_moas_cpds_doses(df_moa_cpds):
"""
check if moas have the same compounds in all doses,
and return the moas that don't have the same numbers of compounds.
params:
df_moa_cpds: dataframe of distinct moas with only their corresponding
list of compounds for all doses.
Returns:
df_moa_not_equals_cpds: dataframe of moas that don't have the same numbers of
compounds in all doses.
"""
df_moa_cpds = df_moa_cpds.set_index('moa').rename_axis(None, axis=0).copy()
df_moa_cpds.drop(['moa_size'], axis=1, inplace = True)
moas_with_no_equal_cpds = [moa for moa in df_moa_cpds.index
for num in range(len(df_moa_cpds.columns) - 1)
if not ((df_moa_cpds.loc[moa, df_moa_cpds.columns[num]])
== (df_moa_cpds.loc[moa, df_moa_cpds.columns[num+1]]))]
df_moa_not_equals_cpds = df_moa_cpds.loc[set(moas_with_no_equal_cpds)]
return df_moa_not_equals_cpds
# In[29]:
data_moa_not_equals_cpds = check_moas_cpds_doses(df_moa_cpds) ##MOAs with not the same cpds in all doses
# In[30]:
data_moa_not_equals_cpds.shape
# ### - MOAS that do not have the same number of/same compounds in all Doses
# In[31]:
for moa in data_moa_not_equals_cpds.index:
print(moa)
for idx, cols in enumerate(data_moa_not_equals_cpds.columns):
print('Dose ' + str(idx+1) +':', data_moa_not_equals_cpds.loc[moa, cols])
print('\n')
# ### - Save dataframes to .csv files
# In[32]:
def conv_list_to_str_cols(df_moa_cpds):
"""This function convert columns values that are lists to strings"""
moa_cpd_cols = [col for col in df_moa_cpds.columns.tolist()
if (col.startswith("moa_cpds_"))]
df_moa_cpds_nw = df_moa_cpds.copy()
for col in moa_cpd_cols:
df_moa_cpds_nw[col] = df_moa_cpds_nw[col].apply(lambda row: ';'.join(map(str, row)))
return df_moa_cpds_nw
# In[33]:
def save_to_csv(df, path, file_name):
"""saves moa dataframes to csv"""
if not os.path.exists(path):
os.mkdir(path)
df.to_csv(os.path.join(path, file_name), index = False)
# In[34]:
save_to_csv(df_lvl5, 'moa_sizes_consensus_datasets', 'modz_level5_data.csv')
# In[35]:
save_to_csv(df_moa_vals, 'moa_sizes_consensus_datasets', 'modz_moa_median_scores.csv')
# In[36]:
save_to_csv(conv_list_to_str_cols(df_moa_cpds), 'moa_sizes_consensus_datasets', 'L1000_moa_compounds.csv')
| [
"[email protected]"
] | |
93cbfd8b5d21f024f94a6b562b1a0ab37267c396 | e9c9e38ed91969df78bbd7f9ca2a0fdb264d8ddb | /lib/python3.8/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_switch_controller_traffic_sniffer.py | 97456d9b48f799f16f0a26e37a3af27c5845913d | [] | no_license | Arceusir/PRELIM_SKILLS_EXAM | 882fcf2868926f0bbfe1fb18d50e5fe165936c02 | b685c5b28d058f59de2875c7579739c545df2e0c | refs/heads/master | 2023-08-15T07:30:42.303283 | 2021-10-09T01:27:19 | 2021-10-09T01:27:19 | 415,167,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,868 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_switch_controller_traffic_sniffer
short_description: Configure FortiSwitch RSPAN/ERSPAN traffic sniffing parameters in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify switch_controller feature and traffic_sniffer category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
switch_controller_traffic_sniffer:
description:
- Configure FortiSwitch RSPAN/ERSPAN traffic sniffing parameters.
default: null
type: dict
suboptions:
erspan_ip:
description:
- Configure ERSPAN collector IP address.
type: str
mode:
description:
- Configure traffic sniffer mode.
type: str
choices:
- erspan-auto
- rspan
- none
target_ip:
description:
- Sniffer IPs to filter.
type: list
suboptions:
description:
description:
- Description for the sniffer IP.
type: str
dst_entry_id:
description:
- FortiSwitch dest entry ID for the sniffer IP.
type: int
ip:
description:
- Sniffer IP.
required: true
type: str
src_entry_id:
description:
- FortiSwitch source entry ID for the sniffer IP.
type: int
target_mac:
description:
- Sniffer MACs to filter.
type: list
suboptions:
description:
description:
- Description for the sniffer MAC.
type: str
dst_entry_id:
description:
- FortiSwitch dest entry ID for the sniffer MAC.
type: int
mac:
description:
- Sniffer MAC.
required: true
type: str
src_entry_id:
description:
- FortiSwitch source entry ID for the sniffer MAC.
type: int
target_port:
description:
- Sniffer ports to filter.
type: list
suboptions:
description:
description:
- Description for the sniffer port entry.
type: str
in_ports:
description:
- Configure source ingress port interfaces.
type: list
suboptions:
name:
description:
- Interface name.
required: true
type: str
out_ports:
description:
- Configure source egress port interfaces.
type: list
suboptions:
name:
description:
- Interface name.
required: true
type: str
switch_id:
description:
- Managed-switch ID. Source switch-controller.managed-switch.switch-id.
type: str
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: Configure FortiSwitch RSPAN/ERSPAN traffic sniffing parameters.
fortios_switch_controller_traffic_sniffer:
vdom: "{{ vdom }}"
switch_controller_traffic_sniffer:
erspan_ip: "<your_own_value>"
mode: "erspan-auto"
target_ip:
-
description: "<your_own_value>"
dst_entry_id: "7"
ip: "<your_own_value>"
src_entry_id: "9"
target_mac:
-
description: "<your_own_value>"
dst_entry_id: "12"
mac: "<your_own_value>"
src_entry_id: "14"
target_port:
-
description: "<your_own_value>"
in_ports:
-
name: "default_name_18"
out_ports:
-
name: "default_name_20"
switch_id: "<your_own_value> (source switch-controller.managed-switch.switch-id)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import is_same_comparison
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import serialize
def filter_switch_controller_traffic_sniffer_data(json):
option_list = ['erspan_ip', 'mode', 'target_ip',
'target_mac', 'target_port']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def switch_controller_traffic_sniffer(data, fos):
vdom = data['vdom']
switch_controller_traffic_sniffer_data = data['switch_controller_traffic_sniffer']
filtered_data = underscore_to_hyphen(filter_switch_controller_traffic_sniffer_data(switch_controller_traffic_sniffer_data))
return fos.set('switch-controller',
'traffic-sniffer',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_switch_controller(data, fos):
if data['switch_controller_traffic_sniffer']:
resp = switch_controller_traffic_sniffer(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('switch_controller_traffic_sniffer'))
return not is_successful_status(resp), \
resp['status'] == "success" and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "dict",
"children": {
"target_ip": {
"type": "list",
"children": {
"ip": {
"type": "string",
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"dst_entry_id": {
"type": "integer",
"revisions": {
"v7.0.0": False,
"v6.4.4": False,
"v6.4.0": False,
"v6.4.1": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False
}
},
"description": {
"type": "string",
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"src_entry_id": {
"type": "integer",
"revisions": {
"v7.0.0": False,
"v6.4.4": False,
"v6.4.0": False,
"v6.4.1": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False
}
}
},
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"target_port": {
"type": "list",
"children": {
"in_ports": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"out_ports": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"switch_id": {
"type": "string",
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"description": {
"type": "string",
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"mode": {
"type": "string",
"options": [
{
"value": "erspan-auto",
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "rspan",
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "none",
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"erspan_ip": {
"type": "string",
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"target_mac": {
"type": "list",
"children": {
"mac": {
"type": "string",
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"dst_entry_id": {
"type": "integer",
"revisions": {
"v7.0.0": False,
"v6.4.4": False,
"v6.4.0": False,
"v6.4.1": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False
}
},
"description": {
"type": "string",
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"src_entry_id": {
"type": "integer",
"revisions": {
"v7.0.0": False,
"v6.4.4": False,
"v6.4.0": False,
"v6.4.1": False,
"v6.2.3": True,
"v6.2.5": False,
"v6.2.7": False
}
}
},
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = None
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"switch_controller_traffic_sniffer": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["switch_controller_traffic_sniffer"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["switch_controller_traffic_sniffer"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "switch_controller_traffic_sniffer")
is_error, has_changed, result = fortios_switch_controller(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ad22536d51ac320742b2792288061eebc2f26c36 | 8f8498bb6f56b19d45a1989c8113a077348c0a02 | /SWEA/Level 4/장훈이의 높은 선반.py | 73f3564f795b4cc2858a65eb719e2a3098834f2d | [] | no_license | gjtjdtn201/practice | a09b437c892b0b601e156c09cb1f053b52fab11b | ea45582b2773616b2b8f350b927559210009d89f | refs/heads/master | 2021-01-01T13:29:46.640740 | 2020-11-28T00:55:37 | 2020-11-28T00:55:37 | 239,299,485 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | import sys
sys.stdin = open('장훈이의 높은 선반.txt', 'r')
def powerset(n, k, sum):
global ans
if sum >= B:
if ans > (sum - B):
ans = sum - B
return
if n == k:
return
else:
A[k] = 1
powerset(n, k + 1, sum + worker[k])
A[k] = 0
powerset(n, k + 1, sum)
T = int(input())
for test_case in range(1, T+1):
N, B = map(int, input().split())
worker = list(map(int, input().split()))
A = [0] * N
ans = 999999
powerset(N, 0, 0)
print('#{} {}'.format(test_case, ans)) | [
"[email protected]"
] | |
bc0154117e425f2eff6180f89c24e8c93d1337ab | 7e32484521b7a21d8bc28a1888e0a5fe766a9f5b | /mysql-connector-example.py | bf4de0a3aaaf1fc4cf6ea86745b830ecefde09e0 | [] | no_license | MerajBighamain/python_projects-1 | 583dc5e5c0719f14cc0ff9074cb7cd8aec210794 | 63507d916ddb5ba58616f16354a8b686ab4c6d42 | refs/heads/master | 2022-11-10T19:22:03.590818 | 2020-06-23T14:25:18 | 2020-06-23T14:25:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | import mysql.connector
connection = mysql.connector.connect(user="root") | [
"="
] | = |
5f328e97b8f8944298c7d0b8632c3ab21f78ba18 | 6df40bb87570388ceb25ab8eabc700bd5cb86f85 | /F_Function/manage.py | 9e421b18de32c68db74b7d61bbbe63dbe64ee560 | [
"MIT"
] | permissive | yeboahd24/AdvancedDjango | 15dccf5e0f3454bc14810a69c7badf71f088e1ea | 52715ffea132e591f98f94b781960fc12a8613e4 | refs/heads/main | 2023-04-06T15:38:47.095455 | 2021-04-22T02:14:05 | 2021-04-22T02:14:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'F_Function.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
9fd4f0ca59f4e0e97cf89be5690b1697e00b2d62 | f44e571fdac5e927235b434151dcc03f597a5e3d | /pyq/25_container/fast_data_set/py1.py | 0947f61fe0395ea1c028583f9d48c2eda7c11926 | [] | no_license | uni51/python_tutorial | 72f74e319cf4bc2662117ec0ad7b58e8a7d0e84b | 0a78111e3760ff4ff9957d67ced3bb0a83baaf9f | refs/heads/master | 2020-04-20T11:10:39.417725 | 2019-03-04T00:35:14 | 2019-03-04T00:35:14 | 168,808,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | # 集合のメソッド(追加等)
items = {'art'}
print("items:", items) # items: {'art'}
# 1要素を削除
result = items.pop() # popでは、位置を指定して削除ができません
print("items.pop():", result) # items.pop(): art
print("items:", items) # items: set()
# リストを追加 複数の要素の追加は、extendではなくupdateです。
items.update(['egg', 'fog'])
print("items.update(['egg', 'fog'])") #items.update(['egg', 'fog'])
print("items:", items) # items: {'fog', 'egg'}
# 全削除
items.clear()
print("items.clear()") # items.clear()
print("items:", items) # items: set()
# 追加 要素の追加は、appendではなくaddです。
items.add('doll')
print("items.add('doll')") # items.add('doll')
print("items:", items) # items: {'doll'}
# 削除
items.remove('doll')
print("items.remove('doll')") # items.remove('doll')
print("items:", items) # items: set()
| [
"[email protected]"
] | |
559ebd08ef157a69a6fc774088be4501fea9ab47 | b604219d67f011d44fdf79d0c88d546b3a8bd250 | /알고리즘/기타/진수계산.py | 64f802bbcddfdfcd010017dcfa2dd64407a49694 | [] | no_license | HwangToeMat/Algorithm | 197648d35e46ea1230291901de1caf2afc720f64 | ee2bb829cb71499c39f68d7691a1e84899135da5 | refs/heads/master | 2023-01-19T08:24:07.155305 | 2020-11-20T16:15:11 | 2020-11-20T16:15:11 | 296,235,075 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | """
def convert(n, base):
T = "0123456789ABCDEF"
q, r = divmod(n, base)
if q == 0:
return T[r]
else:
return convert(q, base) + T[r]
"""
def convert(n, base):
q, r = divmod(n, base)
if q == 0:
return [r]
else:
return convert(q, base) + [r]
| [
"[email protected]"
] | |
c1a6980787fb3929b0979d41514f2c9fce2e6a15 | 687928e5bc8d5cf68d543005bb24c862460edcfc | /nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnglobal_vpnurl_binding.py | e30bcf60328d3b370cfee3e566097acf59f29ff5 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | mbs91/nitro | c6c81665d6abd04de8b9f09554e5e8e541f4a2b8 | be74e1e177f5c205c16126bc9b023f2348788409 | refs/heads/master | 2021-05-29T19:24:04.520762 | 2015-06-26T02:03:09 | 2015-06-26T02:03:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,346 | py | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnglobal_vpnurl_binding(base_resource) :
""" Binding class showing the vpnurl that can be bound to vpnglobal.
"""
def __init__(self) :
self._urlname = ""
self.___count = 0
@property
def urlname(self) :
"""The intranet url.
"""
try :
return self._urlname
except Exception as e:
raise e
@urlname.setter
def urlname(self, urlname) :
"""The intranet url.
"""
try :
self._urlname = urlname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnglobal_vpnurl_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnglobal_vpnurl_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = vpnglobal_vpnurl_binding()
updateresource.urlname = resource.urlname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vpnglobal_vpnurl_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].urlname = resource[i].urlname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = vpnglobal_vpnurl_binding()
deleteresource.urlname = resource.urlname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vpnglobal_vpnurl_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].urlname = resource[i].urlname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
""" Use this API to fetch a vpnglobal_vpnurl_binding resources.
"""
try :
obj = vpnglobal_vpnurl_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
""" Use this API to fetch filtered set of vpnglobal_vpnurl_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnglobal_vpnurl_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
""" Use this API to count vpnglobal_vpnurl_binding resources configued on NetScaler.
"""
try :
obj = vpnglobal_vpnurl_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
""" Use this API to count the filtered set of vpnglobal_vpnurl_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnglobal_vpnurl_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class vpnglobal_vpnurl_binding_response(base_response) :
def __init__(self, length=1) :
self.vpnglobal_vpnurl_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnglobal_vpnurl_binding = [vpnglobal_vpnurl_binding() for _ in range(length)]
| [
"[email protected]"
] | |
338c0a147112cb40b6d8cbf00925df5f9410bf9a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/187/usersdata/354/65060/submittedfiles/al1.py | e2dd22058cfdabcf534c9ab2decc4d0b9297c6ca | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | # -*- coding: utf-8 -*-
#ENTRADA
c=float(input('Digite a temperatura em Celcius: '))
#PROCESSAMENTO
F= (9c + 160)/5
#SAIDA
print(F)
| [
"[email protected]"
] | |
25a145b4c8108a145d6d3399933dc550426d704e | a16feb303b7599afac19a89945fc2a9603ae2477 | /Simple_Python/standard/json/json_6.py | bd3667c3dd21fc36b25d45aa5824ede395a4ce01 | [] | no_license | yafeile/Simple_Study | d75874745ce388b3d0f9acfa9ebc5606a5745d78 | c3c554f14b378b487c632e11f22e5e3118be940c | refs/heads/master | 2021-01-10T22:08:34.636123 | 2015-06-10T11:58:59 | 2015-06-10T11:58:59 | 24,746,770 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | #! /usr/bin/env/python
# -*- coding:utf-8 -*-
import json
data = [{'a':'张三','b':(2,4),'c':3.0,('d',):'D tuple'}]
print 'First attempt'
try:
print json.dumps(data)
except (TypeError,ValueError), err:
print 'ERROR:',err
print
print 'Second attempt'
print json.dumps(data,skipkeys=True) | [
"[email protected]"
] | |
0420a32c8996933f205e14b10edd3b1d1bd32a45 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03377/s943143838.py | 2f2c22f802580216fca88c887e3ac3ed8a52ae9f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | A,B,X=map(int,input().split())
if (A<=X)and(X<=A+B):
print("YES")
else:
print("NO") | [
"[email protected]"
] | |
ca9ba61eb5cd74330a7ec90e9ff0add6ab1f6e29 | 913fb9ec1e709a5140676ba7b2371b1976afca72 | /alignSeqs/test.countContigEnd.py | 32ef21f447d6d5f9be0711c83cc931b70af24a2d | [] | no_license | cgreer/ResearchScripts | 171cfe9555ea06fdeb91084c12d07d1b45a2335c | 1107803bb1459d6b6e1dfb1a89679d2b6fd49062 | refs/heads/master | 2016-09-05T10:43:19.090247 | 2012-04-12T21:38:11 | 2012-04-12T21:38:11 | 1,673,080 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,747 | py | import bioLibCG
import matplotlib.pyplot as plt
def countEnd(fN):
count3 = {'A': [], 'T': [], 'G': [], 'C': []}
count5 = {'A': [], 'T': [], 'G': [], 'C': []}
countT = {'A': [], 'T': [], 'G': [], 'C': []}
f = open(fN, 'r')
for line in f:
ls = line.strip().split('\t')
seq = ls[1]
letter0 = seq[-1]
count = 0
for i in reversed(seq):
if i == letter0:
count += 1
else:
break
count3[letter0].append(count)
letter = seq[0]
countAnother = 0
for i in seq:
if i == letter:
countAnother += 1
else:
break
count5[letter].append(countAnother)
if count > countAnother:
countT[letter0].append(count)
else:
countT[letter].append(countAnother)
plt.hist(countT['C'], 15, facecolor='r', label='C', alpha = 1.00)
plt.hist(countT['G'], 15, facecolor='y', label='G', alpha = .55)
plt.hist(countT['T'], 15, facecolor='g', label='T', alpha = .55)
plt.hist(countT['A'], 15, facecolor='b', label='A', alpha = .55)
plt.xlabel('Length of Longest Contiguos End Region')
plt.ylabel('Number of Origin RNAs')
plt.legend()
plt.show()
if __name__ == "__main__":
import sys
bioLibCG.submitArgs(countEnd, sys.argv)
| [
"[email protected]"
] | |
50736ba59f9669e816e9c0bd9a64716c3f93b03b | 79a484e91a8df432a0ded93806a1e8237df7c253 | /umibukela/migrations/0032_auto_20180511_1315.py | 0a99479bc8955d38f0508147fb04a451e1f73f09 | [
"MIT"
] | permissive | OpenUpSA/umibukela | 7ba14397ad543154d3a32ebfd84e89aa07f7011e | 34c1a29a429b88c2f574e9120cfe93ba524633da | refs/heads/master | 2023-07-26T19:45:12.531887 | 2023-07-10T15:53:07 | 2023-07-10T15:53:07 | 47,106,932 | 0 | 0 | MIT | 2023-02-02T01:36:59 | 2015-11-30T09:03:27 | Python | UTF-8 | Python | false | false | 1,702 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from umibukela.models import SurveyType
def poster_template(survey_type):
template = 'print-materials/posters/'
if 'paypoint' in survey_type.name.lower():
template += 'paypoint_poster.html'
elif 'health' in survey_type.name.lower():
template += 'health_clinic_poster.html'
elif 'service office' in survey_type.name.lower():
template += 'service_office_poster.html'
else:
template += 'poster_layout.html'
return template
def set_template_fields(apps, schema_editor):
for survey_type in SurveyType.objects.filter(id__lt=8).all():
survey_type.poster_template = poster_template(survey_type)
survey_type.has_handout = True
survey_type.save()
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0031_remodel-cycle-survey-type-crs'),
]
operations = [
migrations.AddField(
model_name='surveytype',
name='has_handout',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='surveytype',
name='poster_template',
field=models.CharField(help_text=b"Path of template from the application root. If it's blank, poster links won't be generated for this survey type.", max_length=1000, null=True, blank=True),
),
migrations.AlterField(
model_name='survey',
name='cycle',
field=models.ForeignKey(related_name='surveys', to='umibukela.Cycle'),
),
migrations.RunPython(set_template_fields),
]
| [
"[email protected]"
] | |
a308a1c8547c55fc2212b03afdbcc101c0af9b33 | 87220ff6f99aef088d121f3f9d81e36a35b7d112 | /pulp_rpm/src/pulp_rpm/extension/admin/structure.py | 80052e7a4898d821d8533eeab15b871d57ad54cb | [] | no_license | ehelms/pulp_rpm | 2905294287899c591e82b12fe3a71d7b98adf1c3 | cd9c9ae1a625072df82c054f3f198728b8770a7a | refs/heads/master | 2021-01-18T11:49:14.196118 | 2012-11-12T21:16:18 | 2012-11-12T21:16:18 | 6,671,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,930 | py | # Copyright (c) 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
"""
Contains methods related to the creation and navigation of the structure of the
Puppet branch of the CLI. This module should be used in place of the extensions
themselves creating or retrieving sections to centralize the organization of
the commands.
"""
from gettext import gettext as _
# -- constants ----------------------------------------------------------------
# Root section all RPM specific functionality will be located under
SECTION_ROOT = 'rpm'
SECTION_REPO = 'repo'
# Eventually there will be a consumer section
SECTION_COPY = 'copy'
SECTION_UPLOADS = 'uploads'
SECTION_REMOVE = 'remove'
SECTION_CONTENTS = 'content'
SECTION_SYNC = 'sync'
SECTION_SYNC_SCHEDULES = 'schedules'
SECTION_PUBLISH = 'publish'
SECTION_PUBLISH_SCHEDULES = 'schedules'
SECTION_EXPORT = 'export'
DESC_ROOT = _('manage RPM-related content and features')
DESC_REPO = _('repository lifecycle commands')
DESC_COPY = _('copies one or more content units between repositories')
DESC_UPLOADS = _('upload modules into a repository')
DESC_REMOVE = _('remove copied or uploaded modules from a repository')
DESC_CONTENTS = _('search the contents of a repository')
DESC_SYNC = _('run, schedule, or view the status of sync tasks')
DESC_SYNC_SCHEDULES = _('manage repository sync schedules')
DESC_PUBLISH = _('run, schedule, or view the status of publish tasks')
DESC_PUBLISH_SCHEDULES = _('manage repository publish schedules')
DESC_EXPORT = _('run or view the status of ISO export of a repository')
# -- creation -----------------------------------------------------------------
def ensure_root(cli):
"""
Verifies that the root of RPM-related commands exists in the CLI,
creating it using constants from this module if it does not.
:param cli: CLI instance being configured
:type cli: pulp.client.extensions.core.PulpCli
"""
root_section = cli.find_section(SECTION_ROOT)
if root_section is None:
root_section = cli.create_section(SECTION_ROOT, DESC_ROOT)
return root_section
def ensure_repo_structure(cli):
"""
Verifies that the repository section and all of its subsections are present
in the CLI, creating them using constants from this module if they are not.
:param cli: CLI instance being configured
:type cli: pulp.client.extensions.core.PulpCli
"""
# Make sure the root is in place
root_section = ensure_root(cli)
# There's nothing dynamic about setting up the structure, so if the repo
# section exists, it's a safe bet it's configured with its necessary
# subsections, so just punch out.
repo_section = root_section.find_subsection(SECTION_REPO)
if repo_section is not None:
return repo_section
repo_section = root_section.create_subsection(SECTION_REPO, DESC_REPO)
# Add the direct subsections of repo
direct_subsections = (
(SECTION_COPY, DESC_COPY),
(SECTION_REMOVE, DESC_REMOVE),
(SECTION_CONTENTS, DESC_CONTENTS),
(SECTION_UPLOADS, DESC_UPLOADS),
(SECTION_SYNC, DESC_SYNC),
(SECTION_PUBLISH, DESC_PUBLISH),
(SECTION_EXPORT, DESC_EXPORT),
)
for name, description in direct_subsections:
repo_section.create_subsection(name, description)
# Add specific third-tier sections
sync_section = repo_sync_section(cli)
sync_section.create_subsection(SECTION_SYNC_SCHEDULES, DESC_SYNC_SCHEDULES)
publish_section = repo_publish_section(cli)
publish_section.create_subsection(SECTION_PUBLISH_SCHEDULES, DESC_PUBLISH_SCHEDULES)
return repo_section
# -- section retrieval --------------------------------------------------------
def repo_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO)
def repo_copy_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_COPY)
def repo_remove_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_REMOVE)
def repo_uploads_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_UPLOADS)
def repo_contents_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_CONTENTS)
def repo_sync_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_SYNC)
def repo_sync_schedules_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_SYNC, SECTION_SYNC_SCHEDULES)
def repo_publish_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_PUBLISH)
def repo_export_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_EXPORT)
def repo_publish_schedules_section(cli):
return _find_section(cli, SECTION_ROOT, SECTION_REPO, SECTION_PUBLISH, SECTION_PUBLISH_SCHEDULES)
# -- private ------------------------------------------------------------------
def _find_section(cli, *path):
"""
Follows the given path to return the indicated section from the CLI.
:param cli: CLI instance to search within
:type cli: pulp.client.extensions.core.PulpCli
:param path: path through the nest of sections to the desired section
:type path: list of str
:return: section instance that matches the path
:rtype: pulp.client.extensions.core.PulpCliSection
"""
section = cli.root_section
for p in path:
section = section.find_subsection(p)
return section
| [
"[email protected]"
] | |
6ded8a1c64af93c3e14df5f2f04e0f4cce78f83b | 3b4c2fa85a358648614c60d295cc9d77eb7e4c81 | /Chapter 6.py/pr no 1.py | 67d746383c61994e02b3ea7fb1e4dd0ed770ba7d | [] | no_license | abbhowmik/PYTHON-Course | 465e2e6c42b41f6dd731562c57d5c53c6ca2e711 | 223071569ce9b98d6725e2a33fb271ac0add6f49 | refs/heads/main | 2023-06-05T11:28:36.632080 | 2021-06-29T18:14:47 | 2021-06-29T18:14:47 | 381,456,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | num1 = int(input('Enter number 1: '))
num2 = int(input('Enter number 2: '))
num3 = int(input('Enter number 3: '))
num4 = int(input('Enter number 4: '))
if num1>num4:
f1 = num1
else:
f1 = num4
if num2>num3:
f2 = num2
else:
f2 = num3
if f1>f2:
print(f'The greatest number is {f1}')
else:
print(f'The greatest number is {f2}')
| [
"[email protected]"
] | |
9fad1b4a730f3b06087dfddb4f8961f2ce8a302c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04029/s543214320.py | 9f67a34087f19acbf01862689423ec3c7abeb734 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | from statistics import median
#import collections
#aa = collections.Counter(a) # list to list || .most_common(2)で最大の2個とりだせるお a[0][0]
from fractions import gcd
from itertools import combinations # (string,3) 3回
from collections import deque
from collections import defaultdict
import bisect
#
# d = m - k[i] - k[j]
# if kk[bisect.bisect_right(kk,d) - 1] == d:
#
#
#
# pythonで無理なときは、pypyでやると正解するかも!!
#
#
import sys
sys.setrecursionlimit(10000000)
mod = 10**9 + 7
def readInts():
return list(map(int,input().split()))
def main():
n = int(input())
print((n*(n+1))//2)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
3991f67831bc1074ca35abcaf224771ba4a4e056 | fa79a9b5eaa8b7b3cef1941ad3700106c00e57b6 | /function/update/update_novel-info.py | 18e9956da3f52ac2b860e6fa0124e83592367a15 | [
"MIT"
] | permissive | txu2k8/novel-web-server | 2e3899cb77ca6ba5d4d4d65f767c4e0a6aa0df1a | 1b77d637a6229d916b724ca2735afc00c6175368 | refs/heads/main | 2023-01-13T13:30:36.249802 | 2020-11-26T08:35:41 | 2020-11-26T08:35:41 | 316,153,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,681 | py | '''
更新小说天涯网的所有小说基本信息,书名,类型,阅读数,作者等
'''
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
import datetime
import time
import pymysql
from pyquery import PyQuery as pq
browser = webdriver.Chrome(executable_path='D:\chromedriver_win32\chromedriver.exe')
wait = WebDriverWait(browser,30) #显式等待
#翻页
def get_next_page(num):
url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=2&cat_id=6' #现代都市31/37 1
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=2&cat_id=7' #灵异悬疑31/101 2
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=1&cat_id=1' #现代言情31/118 3
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=2&cat_id=8' #职场官场28/27 4 网站将这部分数据删除了
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=1&cat_id=5' #浪漫青春28/74 5
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=1&cat_id=2' #古代言情15/14 6
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=1&cat_id=4' #女生悬疑6/5 7
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=2&cat_id=10' #历史军事 31/62 8
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=2&cat_id=9' #奇幻玄幻 31/111 9
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=2&cat_id=24' #武侠仙侠31/63 10
# url = 'https://book.tianya.cn/html2/allbooks-cat.aspx?cat_bigid=2&cat_id=25' #科幻小说6/5 11
browser.get(url)
try:
print('\n\n翻到第%d页' % num)
input = wait.until(EC.presence_of_element_located((By.CLASS_NAME, "page")))
input.clear()
input.send_keys(num)
input.send_keys(Keys.ESCAPE) # 输入回车键
wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, "span.TY_view_page > a.on"), str(num))) #查看相应的页码是否高亮
# time.sleep(6) #等待数据渲染完成
html = browser.page_source
return html
except TimeoutError as e:
print(e)
#解析页面得到相应的数据
def parse_with_pq(html):
onePage_novel_info = []
doc = pq(html)
for item in doc.find('#list-books').children('.clearfix').items():
novel_info ={
'novel_name': item.find('.mbody .blue').text(),
'author' : item.find('.mhead').remove('.blue').remove('.gray').text(),
'read_num': int(item.find('.clearfix').children().eq(1).remove('.gray').text()),
# 'category': item.find('.clearfix').children().eq(0).remove('.gray').text(),
'novel_type':str(1),
'status': item.find('.clearfix').children().eq(5).remove('.gray').text(),
'id' : item.find('.hide .btn-r').attr('_bid'),
'web_update_time' : item.find('.clearfix').children().eq(4).remove('.gray').text()
}
onePage_novel_info.append(novel_info)
return onePage_novel_info
#插入spider_time,web_update_time两个字段
def insert_data(datas):
for data in datas:
data['spider_time'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return datas
#将数据插入到数据库
def save_to_MYSQL(datas):
#连接到数据库
db = pymysql.connect(host='localhost', user='root', password='test123456', port=3306, db='spiders')
cursor = db.cursor()
sql = "INSERT INTO novel_info(novel_name,author,read_num,novel_type,status,id,spider_time,web_update_time) " \
"values(%s,%s,%s,%s,%s,%s,%s,%s) ON DUPLICATE KEY UPDATE read_num=values(read_num),status=values(status)," \
"spider_time=values(spider_time),web_update_time=values(web_update_time),novel_type=values(novel_type)"
for data in datas:
try:
# print(data['novel_name'])
cursor.execute(sql, (data['novel_name'],data['author'],data['read_num'],data['novel_type'],data['status'],data['id'],data['spider_time'],data['web_update_time']))
db.commit()
print('插入/更新数据成功',data['novel_name'])
except Exception as e:
print('插入数据失败!!',e)
db.rollback()
if __name__ == '__main__':
for num in range(1,31):
html = get_next_page(num)
datas = parse_with_pq(html)
new_datas = insert_data(datas)
save_to_MYSQL(new_datas)
| [
"[email protected]"
] | |
0fb6a382cc75c4ec7b64a0082fa4369bdb3697f2 | a8769709aeb7299fa3757f0e7bba5c617eb8cfe3 | /lesson-3/k8s/lib/python2.7/site-packages/kubernetes/client/models/v1beta1_http_ingress_rule_value.py | 50ef1b8838ade46c5a7092ec4b5f4fd5d04b3e71 | [
"Apache-2.0"
] | permissive | simox-83/workshop-k8s | 2ac5e8b282bb7c3337acc726a7d972717bf649cc | 04cb18e8b5925a3cfd84ca316952a6cb64960b31 | refs/heads/master | 2020-03-31T20:52:21.421995 | 2018-10-11T14:43:08 | 2018-10-11T14:43:08 | 152,558,678 | 0 | 0 | Apache-2.0 | 2018-10-11T08:37:20 | 2018-10-11T08:37:20 | null | UTF-8 | Python | false | false | 3,338 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1HTTPIngressRuleValue(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'paths': 'list[V1beta1HTTPIngressPath]'
}
attribute_map = {
'paths': 'paths'
}
def __init__(self, paths=None):
"""
V1beta1HTTPIngressRuleValue - a model defined in Swagger
"""
self._paths = None
self.discriminator = None
self.paths = paths
@property
def paths(self):
"""
Gets the paths of this V1beta1HTTPIngressRuleValue.
A collection of paths that map requests to backends.
:return: The paths of this V1beta1HTTPIngressRuleValue.
:rtype: list[V1beta1HTTPIngressPath]
"""
return self._paths
@paths.setter
def paths(self, paths):
"""
Sets the paths of this V1beta1HTTPIngressRuleValue.
A collection of paths that map requests to backends.
:param paths: The paths of this V1beta1HTTPIngressRuleValue.
:type: list[V1beta1HTTPIngressPath]
"""
if paths is None:
raise ValueError("Invalid value for `paths`, must not be `None`")
self._paths = paths
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1HTTPIngressRuleValue):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
d29e7055408af7ae7da5c6ab906e176235637629 | 67fbacb9af9185d2c32968f51ab642d8cc87a505 | /backend/venv/bin/pip | 412bd607e118b840a47da2dcb55830ebdc70dda7 | [] | no_license | HoldenGs/tutorial-repo | 1013d4f237a328b6480d7fcf286e09a4b2f49475 | 4735f6327a706ad1fa5648ee36873a1846b2f339 | refs/heads/main | 2023-03-23T06:18:42.027761 | 2021-03-11T23:34:47 | 2021-03-11T23:34:47 | 344,987,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/Users/holden/tutorial-repo/backend/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
5c07099b4dfaf777ccdfddb969e1d0d9ca5712b6 | bff3053fcc40995bbd8c8bf251042ada99d15427 | /Logging Advanced - Loggers, Handlers, and Formatters.py | ee7c509f9e76af6a1653d9d0ff50104949bceaab | [] | no_license | iampaavan/Pure_Python | e67213eb42229614517c249b4f5b0a01c71c8ce9 | e488b05ea526ab104ebc76a8e5e621301bed8827 | refs/heads/master | 2020-04-27T23:10:48.482213 | 2019-06-30T19:32:08 | 2019-06-30T19:32:08 | 174,765,589 | 1 | 0 | null | 2019-04-21T01:39:53 | 2019-03-10T01:39:13 | Python | UTF-8 | Python | false | false | 2,891 | py | import logging
import employeelogs
# DEBUG: Detailed information, typically of interest only when diagnosing problems.
# INFO: Confirmation that things are working as expected.
# WARNING: An indication that something unexpected happened, or indicative of some problem in the near future (e.g. ‘disk space low’). The software is still working as expected.
# ERROR: Due to a more serious problem, the software has not been able to perform some function.
# CRITICAL: A serious error, indicating that the program itself may be unable to continue running.
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s: %(name)s: %(message)s')
# file_hander = logging.FileHandler('sample.log')
file_hander = logging.FileHandler('sample_1.log')
file_hander.setLevel(logging.ERROR)
file_hander.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_hander)
logger.addHandler(stream_handler)
# logging.basicConfig(filename='sample.log', level=logging.DEBUG, format='%(asctime)s: %(name)s: %(message)s')
def add_function(x, y):
"""Addition Function"""
return x + y
def sub_function(x, y):
"""Subtraction Function"""
return x - y
def mul_function(x, y):
"""Multiplication Function"""
return x * y
def div_function(x, y):
"""Division Function"""
try:
result = x / y
except ZeroDivisionError:
# logger.error('Tried to divide the number by 0. ')
logger.exception('Tried to divide the number by 0. ')
else:
return result
# return x / y
num_1 = 20
# num_2 = 10
num_2 = 0
addition_result = add_function(num_1, num_2)
print(f'Add: {num_1} + {num_2} = {addition_result}')
# logging.debug(f'Add: {num_1} + {num_2} = {addition_result}')
# logging.info(f'Add: {num_1} + {num_2} = {addition_result}')
logger.debug(f'Add: {num_1} + {num_2} = {addition_result}')
subtraction_result = sub_function(num_1, num_2)
print(f'Sub: {num_1} - {num_2} = {subtraction_result}')
# logging.debug(f'Sub: {num_1} - {num_2} = {subtraction_result}')
# logging.info(f'Sub: {num_1} - {num_2} = {subtraction_result}')
logger.debug(f'Sub: {num_1} - {num_2} = {subtraction_result}')
multiplication_result = mul_function(num_1, num_2)
print(f'Mul: {num_1} * {num_2} = {multiplication_result}')
# logging.debug(f'Mul: {num_1} * {num_2} = {multiplication_result}')
# logging.info(f'Mul: {num_1} * {num_2} = {multiplication_result}')
logger.debug(f'Mul: {num_1} * {num_2} = {multiplication_result}')
division_result = div_function(num_1, num_2)
print(f'Div: {num_1} / {num_2} = {division_result}')
# logging.debug(f'Div: {num_1} / {num_2} = {division_result}')
# logging.info(f'Div: {num_1} / {num_2} = {division_result}')
logger.debug(f'Div: {num_1} / {num_2} = {division_result}')
| [
"[email protected]"
] | |
43a68df31cc43515a301af182ef1a918be3f2f14 | fe91e76bfd936cb7062fed96897933f4ed2c7fc2 | /py/api.py | 3f7e593a66546ef48c0b59585fef00203723ee12 | [] | no_license | shixing/CDS | 9ed64940787afe11e8521ec7ea17676f77651bd4 | a4d4c013a3189e9c092d2c3dff9e81c9219cefa8 | refs/heads/master | 2016-09-06T16:45:18.442503 | 2015-08-15T14:54:53 | 2015-08-15T14:54:53 | 21,624,047 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | from flask import Flask
from flask.ext.restful import reqparse, abort, Api, Resource
from flask import request,make_response
import sys
import configparser
import urllib
import json
import hashlib
import time
from utils.config import get_config
import numpy as np
from vector.bruteForce import BruteForceSearch
from utils.config import get_config
import vector.LSH
import vector.LSH2gram
import logging
import exp.analysis
from nearpy.distances.angular import AngularDistance
# log
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# get config
config_fn = sys.argv[1]
config = get_config(config_fn)
# lsh2gram
logging.info('H1')
lsh2gram = vector.LSH2gram.LSH2gram()
logging.info('H2')
lsh2gram.load_from_config_light(config)
logging.info('H3')
lsh2gram.engine_2gram.build_permute_index(200,10,500)
# app
logging.info('H4')
app = Flask(__name__)
api = Api(app)
logging.info('H5')
# decompose
class Decompose(Resource):
def get(self,t):
results = None
print request.args
print t
if t == 'q1':
qw = request.args['w']
k = int(request.args['k'])
naive = False
if 'naive' in request.args:
naive = True
print qw,k,naive
results = lsh2gram.query_1_2(qw,k,naive)
if t == 'q2':
qw1 = request.args['w1']
qw2 = request.args['w2']
k = int(request.args['k'])
naive = False
if 'naive' in request.args:
naive = True
results = lsh2gram.query_2_2(qw1,qw2,k,naive)
return make_response(repr(results))
logging.info('H6')
api.add_resource(Decompose,'/api/decompose/<string:t>')
if __name__ == '__main__':
logging.info('H7')
app.run()
logging.info('H8')
| [
"[email protected]"
] | |
5b7761b8cced9a76746413cdbbdc596b9522710b | dd6c21308e1cba24658c8ca7a49e2499cd167da6 | /venv/Lib/site-packages/openpyxl/descriptors/nested.py | 490f672e9c9242caa052d95f2a59ffaa5c90ad3a | [
"MIT"
] | permissive | ansonsry/Freshshop | 3a53db4d6d0bf1d6705498869a13a3aa7db6ab8c | 79ab8beb1aa993f6365182c8d3bb478ee4e028f8 | refs/heads/master | 2021-06-20T18:54:08.009409 | 2019-07-26T02:56:55 | 2019-07-26T03:02:27 | 198,931,513 | 0 | 0 | MIT | 2021-03-19T22:33:14 | 2019-07-26T02:23:49 | Python | UTF-8 | Python | false | false | 2,656 | py | from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
"""
Generic serialisable classes
"""
from .base import (
Convertible,
Bool,
Descriptor,
NoneSet,
MinMax,
Set,
Float,
Integer,
String,
Text,
)
from .sequence import Sequence
from openpyxl.compat import safe_string
from openpyxl.xml.functions import Element, localname
class Nested(Descriptor):
nested = True
attribute = "val"
def __set__(self, instance, value):
if hasattr(value, "tag"):
tag = localname(value)
if tag != self.name:
raise ValueError("Tag does not match attribute")
value = self.from_tree(value)
super(Nested, self).__set__(instance, value)
def from_tree(self, node):
return node.get(self.attribute)
def to_tree(self, tagname=None, value=None, namespace=None):
namespace = getattr(self, "namespace", namespace)
if value is not None:
if namespace is not None:
tagname = "{%s}%s" % (namespace, tagname)
value = safe_string(value)
return Element(tagname, {self.attribute:value})
class NestedValue(Nested, Convertible):
"""
Nested tag storing the value on the 'val' attribute
"""
pass
class NestedText(NestedValue):
"""
Represents any nested tag with the value as the contents of the tag
"""
def from_tree(self, node):
return node.text
def to_tree(self, tagname=None, value=None, namespace=None):
namespace = getattr(self, "namespace", namespace)
if value is not None:
if namespace is not None:
tagname = "{%s}%s" % (namespace, tagname)
el = Element(tagname)
el.text = safe_string(value)
return el
class NestedFloat(NestedValue, Float):
pass
class NestedInteger(NestedValue, Integer):
pass
class NestedString(NestedValue, String):
pass
class NestedBool(NestedValue, Bool):
def from_tree(self, node):
return node.get("val", True)
class NestedNoneSet(Nested, NoneSet):
pass
class NestedSet(Nested, Set):
pass
class NestedMinMax(Nested, MinMax):
pass
class EmptyTag(Nested, Bool):
"""
Boolean if a tag exists or not.
"""
def from_tree(self, node):
return True
def to_tree(self, tagname=None, value=None, namespace=None):
if value:
namespace = getattr(self, "namespace", namespace)
if namespace is not None:
tagname = "{%s}%s" % (namespace, tagname)
return Element(tagname)
| [
"[email protected]"
] | |
ebbddea20b6e452b551538b9bb64078fb607829f | 13ef33cb9067419fae743be1edb46471374c3a64 | /hrm/cron_job.py | 5ce271862e4a2b39de34798546facb57e0d9b768 | [] | no_license | andrewidya/littleerp | 8c33ad0ee4dac2a85bea4e540b748a47d61f3886 | 0cf8fb1be8ac3c27304807ed7aac7eb0032c2cb6 | refs/heads/master | 2021-01-24T00:42:26.962248 | 2019-07-22T01:53:58 | 2019-07-22T01:53:58 | 68,295,804 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | from django_cron import CronJobBase, Schedule
from django.utils import timezone
from django.conf import settings
import datetime
from hrm.models import EmployeeContract
class EmployeeContractCronJob(CronJobBase):
RUN_EVERY_MINS = 1
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = 'hrm.employee_contract_cron_job'
def do(self):
print("Checking contract")
print(timezone.now())
print("===========================================================")
warning = timezone.now() + datetime.timedelta(days=settings.MINIERP_SETTINGS['HRM']['recontract_warning'])
contract_list = EmployeeContract.objects.all().filter(end_date__lte=warning.date())
for contract in contract_list:
contract.contract_status = contract.check_contract_status()
contract.save(update_fields=['contract_status'])
print("===========================================================")
print("DONE")
print("===========================================================")
| [
"[email protected]"
] | |
32638c42c6165833ce4f701f734dedd12a335cf7 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /28aPKtEcWJPMwb9mm_4.py | a6c9ac7ad1ca5e7b223c9eb9f8311b6376d58e43 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | """
**Mubashir** needs your help to learn Python Programming. Help him by
modifying a given string `txt` as follows:
* Reverse the string given.
* Replace each letter to its position in the alphabet for example (a = 1, b = 2, c = 3, ...).
* Join the array and convert it to a number.
* Convert the number to binary.
* Convert the string back to a number.
See below example for more understanding :
**modify("hello") ➞ 111001101011101101101010**
"hello" = "olleh"
"olleh" = ['15', '12', '12', '5', '8']
['15', '12', '12', '5', '8'] = 15121258
15121258 = "111001101011101101101010"
"111001101011101101101010" = 111001101011101101101010
### Examples
modify("hello") ➞ 111001101011101101101010
modify("mubashir") ➞ 10110000110010000110011111000111000001
modify("edabit") ➞ 111111110110001110001
### Notes
There are no spaces and the string is lowercase.
"""
def modify(txt):
return int(bin(int(''.join([str(ord(x)-ord('a')+1) for x in txt[::-1]])))[2:])
| [
"[email protected]"
] | |
5a6e8cc5612def0640aceaad72b3d46b018163fc | 197ccfb1b9a2713155efd2b994dc4fda8d38751f | /src/contracts/Test.py | e4c0fd86f4b6631be0f91b05e0a34b8fa06d0a7e | [
"MIT"
] | permissive | stonecoldpat/sprites-python | 39bf8d436e625eabf4511f6e2d7528dc01d4e0e7 | 398abb86f27dcbd5a91b6aad648a06529c029d26 | refs/heads/master | 2020-03-20T09:40:27.468535 | 2018-06-14T10:44:34 | 2018-06-14T10:44:34 | 137,344,692 | 0 | 1 | null | 2018-06-14T10:42:30 | 2018-06-14T10:42:29 | null | UTF-8 | Python | false | false | 371 | py | class Test:
def __init__(self, contract):
self._contract = contract
def get(self):
return self._contract.functions.get()
def getArrays(self):
return self._contract.functions.getArrays()
def getStruct(self):
return self._contract.functions.getStruct()
def incr(self):
return self._contract.functions.incr()
| [
"[email protected]"
] | |
9f0fd0bb22ffa5ad4f70cdc78e5c4a4fede87dfd | ce6fc44470dcb5fca78cdd3349a7be70d75f2e3a | /AtCoder/Beginner 146/F.py | 97ed9139e54db4abaa0a8e3ada55a064fb3ad8c3 | [] | no_license | cormackikkert/competitive-programming | f3fa287fcb74248ba218ecd763f8f6df31d57424 | 3a1200b8ff9b6941c422371961a127d7be8f2e00 | refs/heads/master | 2022-12-17T02:02:40.892608 | 2020-09-20T11:47:15 | 2020-09-20T11:47:15 | 266,775,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,838 | py | N, M = map(int, input().split())
s = input()
N += 1
# Max size of tree
tree = [0] * (2 * N);
n = N
# function to build the tree
def build() :
# insert leaf nodes in tree
for i in range(N) :
tree[n + i] = (float('inf'), float('inf'));
# build the tree by calculating parents
for i in range(n - 1, 0, -1) :
tree[i] = min(tree[i << 1], tree[i << 1 | 1])
# function to update a tree node
def updateTreeNode(p, value) :
# set value at position p
tree[p + n] = (value, p);
p = p + n;
# move upward and update parents
i = p;
while i > 1 :
tree[i >> 1] = min(tree[i], tree[i ^ 1]);
i >>= 1;
# function to get sum on interval [l, r)
def query(l, r) :
res = (float('inf'), float('inf'));
# loop to find the sum in the range
l += n;
r += n;
while l < r :
if (l & 1) :
res = min(res, tree[l]);
l += 1
if (r & 1) :
r -= 1;
res = min(res, tree[r]);
l >>= 1;
r >>= 1
return res;
par = [None for i in range(N)]
build()
updateTreeNode(0, 0)
for i in range(1, N):
if s[i] == "1": continue
r = query(max(0, i - M), i)
par[i] = r[1]
updateTreeNode(i, r[0]+1)
# updateTreeNode(i, query(max(0, i - M), i))
# for k in range(1, M+1):
# if i - k < 0: break
# dp[i] = min(dp[i], (dp[i-k][0], (i - k)))
moves = []
cur = N - 1
if par[cur] == float('inf'):
print(-1)
quit()
try:
while par[cur] != None:
new = par[cur]
moves.append(cur - new)
cur = new
except:
print(-1)
quit()
moves = list(reversed(moves))
print(" ".join(map(str, moves)))
| [
"[email protected]"
] | |
aeeae4d63fce0632d0226ebc30ce1218b88f42cd | 9769c74a7e69a134657ef73dbe3c251bf171b33c | /CoreBuild/ServiceItems/ComCommand/ComCommandItem.py | bbdbca8ae13f37e0e995ff531291766ee8fa4f63 | [] | no_license | caojiaju-2017/SimpleCode | 4bdf506837ebdee905373b87198853a84afebc45 | 9f2aa7fea296d0acaf91b75e03bdaa185d93fe89 | refs/heads/master | 2020-03-19T05:06:50.028584 | 2018-06-25T13:05:22 | 2018-06-25T13:05:22 | 135,901,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from Platform.ItemBase import *
import json
class ComCommandItem(ItemBase):
def __init__(self):
super(ComCommandItem, self).__init__()
self.itemtype = ItemType.Module
self.shapetype = ImageShape.Image100002
self.itemname = "串口操作"
self.iteminfo = "模块提供串口的读写能力"
self.buildConfig()
pass
def getCfgJson(self):
return self.self_to_json()
def checkResult(self):
print("sub class")
def buildConfig(self):
'''
接口初始化函数
:return:
'''
inputS1 = InputBase()
inputS1.type = InputType.String
self.setInputDefine("param1", inputS1)
inputS2 = InputBase()
inputS2.type = InputType.String
inputS2.inputIndex = 2
self.setInputDefine("param2", inputS2)
outputS = OutputBase()
outputS.type = InputType.Boolean
self.setOutput(outputS)
| [
"[email protected]"
] | |
45c5e3cd5bc85b023d0ec30c5297be2edce48931 | 0fac73e70eeb8e3b8635de8a4eaba1197cd42641 | /shop/migrations/0013_auto_20161218_1533.py | 696a3cbe43f4ecd4a798b9baa098e4109d16141b | [] | no_license | gauraviit1/myshop_aws | 0e6c9d822cbbc6505eb7c7a71654d34591e7b168 | 261b296d79cfdf8fa4cb9105b4e2fe70e864f6a6 | refs/heads/master | 2021-01-19T13:44:12.977253 | 2017-03-03T07:52:58 | 2017-03-03T07:52:58 | 82,444,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-12-18 10:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0012_auto_20161218_1516'),
]
operations = [
migrations.AlterModelOptions(
name='attribute',
options={'ordering': ('weight', 'waist_size', 'size')},
),
]
| [
"[email protected]"
] | |
f0632da5f1733d878b113b1a94d7c12fad6b81a5 | 149e9e52304a970ffb256f290fce5f614c9e20c4 | /Python Programming language/DataCampPractice/Personal_programs/MyProjects/Projects1_mean_median_std_correlation.py | 6f838cdb179693d4985c0ae00a5f4d1b9b360ade | [] | no_license | Pasquale-Silv/Improving_Python | 7451e0c423d73a91fa572d44d3e4133b0b4f5c98 | 96b605879810a9ab6c6459913bd366b936e603e4 | refs/heads/master | 2023-06-03T15:00:21.554783 | 2021-06-22T15:26:28 | 2021-06-22T15:26:28 | 351,806,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | import numpy as np
list1 = [3, 6, 9, 15]
list2 = [5, 9, 8, 22]
np_list1 = np.array(list1)
np_list2 = np.array(list2)
np_mean_list1 = np.mean(np_list1)
np_mean_list2 = np.mean(np_list2)
print(np_mean_list1)
print("Prova media lista 1: " + str((3+6+9+15) / 4))
print(np_mean_list2)
print("Calcolo mediane:")
np_median_lista1 = np.median(np_list1)
np_median_lista2 = np.median(np_list2)
print(np_median_lista1)
print(np_median_lista2)
print("Calcolo deviazione standard:")
np_std_lista1 = np.std(np_list1)
np_std_lista2 = np.std(np_list2)
print(np_std_lista1)
print(np_std_lista2)
if(np_std_lista1 > np_std_lista2):
print("La prima lista presenta una variabilità maggiore")
elif(np_std_lista1 < np_std_lista2):
print("La seconda lista presenta una variabilità maggiore")
elif(np_std_lista1 == np_std_lista2):
print("Le liste presentano la medesima variabilità")
rho = np.corrcoef(np_list1 , np_list2)
print(rho)
rhoSingolo = rho[0,1]
print("\nCorrelazione tra le due liste: " + str(rhoSingolo))
if(rhoSingolo > 0.6):
print("La correlazione è abbastanza forte ed è positiva.")
elif(rhoSingoloo > 0.85):
print("La correlazione è molto forte ed è positiva.")
elif(rhoSingoloho == 0):
print("Non c'è correlazione lineare tra le lista oppure sussiste un altro tipo di relazione.")
elif(rhoSingoloo < -0.85):
print("La correlazione è molto forte ed è negativa.")
elif(rhoSingoloo < -0.6):
print("La correlazione è abbastanza forte ed è negativa.")
else:
print("Sussiste una debole correlazione.")
| [
"[email protected]"
] | |
f62acd7a45e7ecf496fbb62daaa716ffc27b8aca | 4faff6397853f22de36931291765824b0232fc86 | /managers.py | f1af895b387c2b21516202650d63a81b68293cbe | [] | no_license | rcrowther/need | fd331b2202f99e312ce97e16046d387dc253a8c9 | e5a0bc3ac3fa4440de04bce3f603dbf81a1a6225 | refs/heads/master | 2021-09-04T12:45:28.817299 | 2018-01-18T21:08:09 | 2018-01-18T21:08:09 | 107,792,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,093 | py | import sys
#from whoosh import fields, index
from whoosh.index import open_dir
from whoosh.qparser import QueryParser, SimpleParser
from datetime import datetime, timedelta
import time
from django.db import models
from django.forms.models import model_to_dict
#! auto-init with this data
class BaseManager:
def __init__(self):
self._need_base = None
self._need_index = None
self._whoosh_schema = None
self._schema_fields = None
self.model = None
self.pk_fieldname = None
self.name = None
def contribute_to_class(self, opts):
self._need_base = opts.need_base
self._need_index = opts.need_index
self._whoosh_schema = opts.schema
self._schema_fields = opts.schema_fields
self.model = opts.model
self.pk_fieldname = opts.pk_field.name
#self.name =
#! change schema?
#! file locked version
#! async version
#! fuzzy search
#! stem search
#! delete using fieldname
class Manager(BaseManager):
'''
A basic Whoosh manager.
Every operation is self contained, and tidies after the action.
Note that if multiple threads access the writing, any writing
operation can throw an error.
'''
def __init__(self):
super().__init__()
def bulk_add(self, it):
start = time.time()
ix = open_dir(self._need_base, self._need_index)
end = time.time()
print('opendir', 'took', str(end - start), 'time')
start = time.time()
writer = ix.writer()
end = time.time()
print('writer', 'took', str(end - start), 'time')
for e in it:
# expected inputs to dict with string values
if (issubclass(e.__class__, models.Model)):
e = model_to_dict(e)
data = {f : str(e[f]) for f in self._schema_fields}
writer.add_document(e)
writer.commit()
ix.close()
def add(self, data):
'''
Write a document.
Ignores keys not in schema. No data for unprovided schema keys.
@param data object or dict of values.
'''
# expected inputs to dict with string values
if (issubclass(data.__class__, models.Model)):
data = model_to_dict(data)
data = {f : str(data[f]) for f in self._schema_fields}
start = time.time()
ix = open_dir(self._need_base, self._need_index)
end = time.time()
print('opendir', ' took', str(end - start), 'time')
start = time.time()
writer = ix.writer()
end = time.time()
print('writer', ' took', str(end - start), 'time')
writer.add_document(**data)
start = time.time()
writer.commit()
end = time.time()
print('commit', ' took', str(end - start), 'time')
ix.close()
def delete(self, key):
'''
Delete a document.
@param key to match against pk field.
'''
# unusable on non-Model indexes
# will throw error due to self.pk_fieldname?
# assert/except?
# expected inputs to dict with string values
key = str(key)
ix = open_dir(self._need_base, self._need_index)
writer = ix.writer()
writer.delete_by_term(self.pk_fieldname, key, searcher=None)
writer.commit()
ix.close()
def delete_when(self, fieldname, text):
'''
Delete documents.
Match on any key.
@param fieldname key to match against
@param text match value.
'''
ix = open_dir(self._need_base, self._need_index)
writer = ix.writer()
writer.delete_by_term(fieldname, text, searcher=None)
writer.commit()
ix.close()
def merge(self, data):
'''
Merge a document.
Ignores keys not in schema. No data for unprovided schema keys.
Checks for unique keys then matches against parameters.
Slower than add(). Will create if entry does not exist.
@param data object or dict of values.
'''
# "It is safe to use ``update_document`` in place of ``add_document``; if
# there is no existing document to replace, it simply does an add."
# expected inputs to dict with string values
if (issubclass(data.__class__, models.Model)):
data = model_to_dict(data)
data = {f : str(data[f]) for f in self._schema_fields}
ix = open_dir(self._need_base, self._need_index)
writer = ix.writer()
writer.update_document(**data)
writer.commit()
ix.close()
def read(self, fieldnames, query, callback):
start = time.time()
ix = open_dir(self._need_base, self._need_index)
end = time.time()
print('opendir', ' took', str(end - start), 'time')
r = None
with ix.searcher() as searcher:
start = time.time()
#query = QueryParser(field, self._whoosh_schema).parse(query)
query = SimpleParser(fieldnames, self._whoosh_schema).parse(query)
end = time.time()
print('query', ' took', str(end - start), 'time')
callback(searcher.search(query))
#ix.close()
def size(self):
ix = open_dir(self._need_base, self._need_index)
r = ix.doc_count()
ix.close()
return r
class ManagerManager(Manager):
def clear(self):
'''
Empty the index.
'''
self.ix.storage.clean()
def optimize(self):
self.ix.optimize()
def load(self):
ix = open_dir(self._need_base, self._need_index)
writer = ix.writer()
for o in self.model.objects.all():
data = dict([(fn, str(getattr(o, fn))) for fn in self._schema_fields])
print(str(data))
writer.add_document(**data)
writer.commit()
ix.close()
import threading
# Pointer to the module object instance, for module-wide storage.
# https://stackoverflow.com/questions/1977362/how-to-create-module-wide-variables-in-python#1978076
this = sys.modules[__name__]
this.blocking_lock = None
# map of path to file_desciptor (whoosh index)
this.ix_registry = {}
class RegistryInfo():
def __init__(self, directory, lock):
self.directory = directory
self.lock = lock
def assert_index_registry(base, index):
path = "{0}_{1}".format(base, index)
if (path not in this.ix_registry):
this.ix_registry[path] = RegistryInfo(open_dir(base, index), threading.Lock())
return this.ix_registry[path]
class BlockingManagerManager(Manager):
def contribute_to_class(self, opts):
super().contribute_to_class(opts)
#self.threadLock = threading.Lock()
#self.ix = open_dir(self._need_base, self._need_index)
info = assert_index_registry(self._need_base, self._need_index)
self.ix = info.directory
self.threadLock = info.lock
def clear(self):
'''
Empty the index.
'''
self.threadLock.acquire()
#On fileStorage and RAMStorage, clean()
# Storage. Can only do on Filestorage.
#ix.storage.destroy()
self.ix.storage.clean()
self.threadLock.release()
def optimize(self):
self.threadLock.acquire()
self.ix.optimize()
self.threadLock.release()
class BlockingManager(Manager):
'''
A basic Whoosh manager.
Every operation is self contained, and tidies after the action.
The operations are blocking.
'''
def __init__(self):
super().__init__()
def contribute_to_class(self, opts):
super().contribute_to_class(opts)
self.threadLock = threading.Lock()
self.ix = open_dir(self._need_base, self._need_index)
def bulk_add(self, it):
def to_dict(data):
# expected inputs to dict with string values
if (issubclass(e.__class__, models.Model)):
e = model_to_dict(e)
return {f : str(e[f]) for f in self._schema_fields}
it = [to_dict(data) for data in it]
self.threadLock.acquire()
writer = self.ix.writer()
self.threadLock.release()
for e in it:
writer.add_document(e)
writer.commit()
def add(self, data):
'''
Write a document.
Ignores keys not in schema. No data for unprovided schema keys.
@param data object or dict of values.
'''
# expected inputs to dict with string values
if (issubclass(data.__class__, models.Model)):
data = model_to_dict(data)
data = {f : str(data[f]) for f in self._schema_fields}
start = time.time()
self.threadLock.acquire()
end = time.time()
print('aquire', ' took', str(end - start), 'time')
writer = self.ix.writer()
self.threadLock.release()
writer.add_document(**data)
writer.commit()
def delete(self, key):
'''
Delete a document.
@param key to match against pk field.
'''
key = str(key)
self.threadLock.acquire()
writer = self.ix.writer()
self.threadLock.release()
writer.delete_by_term(self.pk_fieldname, key, searcher=None)
writer.commit()
def delete_when(self, fieldname, text):
'''
Delete documents.
Match on any key.
@param fieldname key to match against
@param text match value.
'''
self.threadLock.acquire()
writer = self.ix.writer()
self.threadLock.release()
writer.delete_by_term(fieldname, text, searcher=None)
writer.commit()
def merge(self, **data):
'''
Merge a document.
Ignores keys not in schema. No data for unprovided schema keys.
Checks for unique keys then matches against parameters.
Slower than add(). Will create if entry does not exist.
@param data object or dict of values.
'''
# expected inputs to dict with string values
if (issubclass(data.__class__, models.Model)):
data = model_to_dict(data)
data = {f : str(data[f]) for f in self._schema_fields}
self.threadLock.acquire()
writer = self.ix.writer()
self.threadLock.release()
writer.update_document(**data)
writer.commit()
def read(self, fieldnames, query, callback):
r = None
with self.ix.searcher() as searcher:
start = time.time()
query = SimpleParser(fieldnames, self._whoosh_schema).parse(query)
end = time.time()
print('query', ' took', str(end - start), 'time')
callback(searcher.search(query))
def size(self):
r = self.ix.doc_count()
return r
| [
"[email protected]"
] | |
edf3569da3983da953bb7449f015668772156b34 | a2f6e449e6ec6bf54dda5e4bef82ba75e7af262c | /venv/Lib/site-packages/nltk/lm/__init__.py | 5911f5b8057f85608708dfbffc1ab6ca16d731ee | [] | no_license | mylonabusiness28/Final-Year-Project- | e4b79ccce6c19a371cac63c7a4ff431d6e26e38f | 68455795be7902b4032ee1f145258232212cc639 | refs/heads/main | 2023-07-08T21:43:49.300370 | 2021-06-05T12:34:16 | 2021-06-05T12:34:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:9d7a198b8f9b3a257beb02a331dbe48e28ed7deef6b7c199766958dba6bf362f
size 7695
| [
"[email protected]"
] | |
d46665a79cfdfa452ad3b320f43b4685085f7948 | 226c99d29ac089f9a9581983dd6020a267599221 | /chap07/beatiful_soup_weather.py | d1e53dafc5f28d6d51930fd50b51ea2ce1ac1111 | [] | no_license | azegun/python_study | c1b265abb41172609d144d4ba331c920ac8b9312 | 520f1f7bc83771f74f59304c66223f2c06fcf285 | refs/heads/master | 2023-05-11T16:32:23.740742 | 2021-06-10T01:03:33 | 2021-06-10T01:03:33 | 368,451,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | # 모듈을 읽어 들입니다.
from urllib import request
from bs4 import BeautifulSoup
#urlopen() 함수로 기상청의 전국 날씨를 읽습니다.
target = request.urlopen("http://www.kma.go.kr/weather/forecast/mid-term-rss3.jsp?stnId=108")
# BeatifulSoup을 사용해 웹 페이지를 분석합니다.
soup = BeautifulSoup(target, "html.parser")
for location in soup.select("location"):
print("도시 : ", location.select_one("city").string)
print("날씨 : ", location.select_one("wf").string)
print("최저기온 : ", location.select_one("tmn").string)
print("최고기온 : ", location.select_one("tmx").string)
print()
| [
"[email protected]"
] | |
f80a86d96df3a46c6e0b5d45fb32532e8de1387f | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/1/b3o.py | 7edea5f63eb53f547a2daf44b9caeb9348b4927e | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'b3O':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
05a724d4fdcff756091a51829c32237081499db4 | 23c4f6d8a2a6b97077628c2a012b2b402c816d91 | /LeetCode算法题/LCP_01_猜数字/猜数字.py | 80a344b4bcf26b2aeff3e905478392845ce83c5b | [] | no_license | exueyuanAlgorithm/AlgorithmDemo | 7ef6ff8104e8da5a81037795184115fb0ac8ca9a | d34d4b592d05e9e0e724d8834eaf9587a64c5034 | refs/heads/master | 2023-07-16T19:00:05.664780 | 2021-09-04T11:31:07 | 2021-09-04T11:31:07 | 277,327,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | class Solution(object):
def game(self, guess, answer):
"""
:type guess: List[int]
:type answer: List[int]
:rtype: int
"""
dui = 0
for i,b in enumerate(guess):
a = answer[i]
if a == b:
dui += 1
return dui | [
"[email protected]"
] | |
4b0d5eaa29a140e894f7a108449e8a07a3724c43 | 9715663a819daf3d3f05159c97a87165853d7661 | /backend/manage.py | a9df4e5da8aa1b462f5cdcb4cca8a99b6ad561a0 | [] | no_license | crowdbotics-apps/personal-app-templa-17949 | 728e039dcde1c19885637f2a3b044f8172a43867 | 5b480747cd36402e513e29657736f26a2970d87c | refs/heads/master | 2022-09-30T14:21:02.569047 | 2020-06-08T21:27:34 | 2020-06-08T21:27:34 | 270,827,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "personal_app_templa_17949.settings"
)
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c98de0cf945e97e117c47bac2d33c22d71a8e9ff | 0e1813197ae5e4d9ca0709089002a48249c2cc1f | /UnitTest/test_phonebook.py | 4b368e4de6f6c6566c8635f9225740f98bf4d31a | [] | no_license | sockduct/general | 55c1ac766d61b66463ae89c7f6fd0a748fdd79ad | f885352dc5b402cbc2488e66d37b421d5a4f82f0 | refs/heads/master | 2021-01-17T16:45:06.968332 | 2017-06-26T14:52:11 | 2017-06-26T14:52:11 | 95,455,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,845 | py | ####################################################################################################
# Unit Testing with Python - Module 1
####################################################################################################
from phonebook import Phonebook
import unittest
class PhonebookTest(unittest.TestCase):
# Test Fixture
# Setup (half of fixture) - run before each test method
def setUp(self):
self.phonebook = Phonebook()
def test_lookup_entry_by_name(self):
self.phonebook.add('Bob', '12345')
self.assertEqual('12345',self.phonebook.lookup('Bob'))
def test_missing_entry_raises_KeyError(self):
with self.assertRaises(KeyError):
self.phonebook.lookup('missing')
def test_empty_phonebook_is_consistent(self):
self.assertTrue(self.phonebook.is_consistent())
# Example of what not to do - split up into workable tests following this function
@unittest.skip('poor example')
def test_is_consistent(self):
self.assertTrue(self.phonebook.is_consistent())
self.phonebook.add('Bob', '12345')
self.assertTrue(self.phonebook.is_consistent())
self.phonebook.add('Mary', '012345')
self.assertTrue(self.phonebook.is_consistent())
# Not a good way to write test cases
# Once assertion fails, rest of test case is abandoned
self.phonebook.add('Sue', '12345') # identical to Bob
self.assertFalse(self.phonebook.is_consistent())
self.phonebook.add('Sue', '123') # prefix of Bob
self.assertFalse(self.phonebook.is_consistent())
# These test cases have much better names - each name is descriptive of the test
# Each of these test cases are structured - arrange, act, assert
# Arrange - put entries into phonebook
# Act - call is_consistent()
# Assert - assertTrue|assertFalse about results
def test_phonebook_with_normal_entires_is_consistent(self):
self.phonebook.add('Bob', '12345')
self.phonebook.add('Mary', '012345')
self.assertTrue(self.phonebook.is_consistent())
def test_phonebook_with_duplicate_entries_is_inconsistent(self):
self.phonebook.add('Bob', '12345')
self.phonebook.add('Mary', '12345')
self.assertFalse(self.phonebook.is_consistent())
def test_phonebook_with_numbers_that_prefix_one_another_is_inconsistent(self):
self.phonebook.add('Bob', '12345')
self.phonebook.add('Mary', '123')
self.assertFalse(self.phonebook.is_consistent())
def test_phonebook_adds_names_and_numbers(self):
self.phonebook.add('Sue', '12345')
self.assertIn('Sue', self.phonebook.get_names())
self.assertIn('12345', self.phonebook.get_numbers())
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
6fc62e33dedb16842586b86715afb347e7ce39aa | 9ba2b89dbdeefa54c6b6935d772ce36be7b05292 | /devilry/devilry_gradingsystem/tests/views/test_download_feedbackdraftfile.py | c840023a5866328f619fed3370b7ae85504fe7d5 | [] | no_license | kristtuv/devilry-django | 0ffcd9d2005cad5e51f6377484a83d778d65050f | dd2a4e5a887b28268f3a45cc3b25a40c0e313fd3 | refs/heads/master | 2020-04-27T06:02:45.518765 | 2019-02-15T13:28:20 | 2019-02-15T13:28:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,700 | py | import unittest
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.http import urlencode
from devilry.project.develop.testhelpers.corebuilder import AssignmentGroupBuilder
from devilry.project.develop.testhelpers.corebuilder import UserBuilder
from devilry.devilry_gradingsystem.models import FeedbackDraftFile
@unittest.skip('devilry_gradingsystem will most likely be replaced in 3.0')
class TestDownloadFeedbackDraftFileView(TestCase):
def setUp(self):
self.testexaminer = UserBuilder('testexaminer').user
self.deliverybuilder = AssignmentGroupBuilder\
.quickadd_ducku_duck1010_active_assignment1_group()\
.add_examiners(self.testexaminer)\
.add_deadline_in_x_weeks(weeks=1)\
.add_delivery_x_hours_before_deadline(hours=1)
self.draftfile = FeedbackDraftFile(
delivery=self.deliverybuilder.delivery,
saved_by=self.testexaminer,
filename='test.txt')
self.draftfile.file.save('test.txt', ContentFile('A testfile'))
def _login(self, user):
self.client.login(username=user.shortname, password='test')
def _get_as(self, user, pk, **querystring):
self._login(user)
url = reverse('devilry_gradingsystem_feedbackdraftfile', kwargs={
'pk': pk
})
if querystring:
url = '{}?{}'.format(url, urlencode(querystring))
return self.client.get(url)
def test_403_not_owner_or_superuser(self):
response = self._get_as(UserBuilder('otheruser').user, self.draftfile.id)
self.assertEquals(response.status_code, 403)
def test_404_not_found(self):
response = self._get_as(self.testexaminer, 10001)
self.assertEquals(response.status_code, 404)
def _test_as(self, user):
response = self._get_as(user, self.draftfile.id)
self.assertEquals(response.status_code, 200)
self.assertEquals(response['content-type'], 'text/plain')
self.assertEquals(response.content, 'A testfile')
self.assertNotIn('content-disposition', response)
def test_ok_as_owner(self):
self._test_as(self.testexaminer)
def test_ok_as_superuser(self):
self._test_as(UserBuilder('superuser', is_superuser=True).user)
def test_download_content_disposition(self):
response = self._get_as(self.testexaminer, self.draftfile.id, download='yes')
self.assertEquals(response.status_code, 200)
self.assertIn('content-disposition', response)
self.assertEquals(response['content-disposition'], 'attachment; filename=test.txt')
| [
"[email protected]"
] | |
9b759138288b7fa6e8de1955f6a5c7a82ff1f46b | 19fae3f001961b87f6d12f8a2a9c6be8fec9138e | /pyspeckit/mpfit/__init__.py | 0dbe0dc30f8eb42e426964ef26c4f60e47299463 | [
"MIT"
] | permissive | myrithub/pyspeckit | 22070974f86fbb78fdffb5cb21e219678c5346fb | 4617d77ac24fb33738422aa36a72cb00857e0dbd | refs/heads/master | 2020-05-29T14:10:36.204422 | 2015-09-08T18:04:54 | 2015-09-08T18:04:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py | from mpfit import mpfit,mpfitException
| [
"[email protected]"
] | |
be425247d07ffc9e54eca102ea842a6821ec10c2 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_15127.py | a18f7ed45ea3963fbc59cfa5e45f73f16be14d69 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,847 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((450.799, 555.407, 512.365), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((401.759, 536.467, 468.165), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((352.253, 509.736, 408.547), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((487.507, 474.435, 407.354), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((203.362, 472.932, 289.774), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((420.641, 550.665, 477.445), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((421.557, 551.473, 477.933), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((438.534, 567.96, 493.011), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((440.88, 593.421, 481.935), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((452.271, 617.411, 472.281), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((465.457, 638.486, 459.779), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((455.904, 626.56, 436.053), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((425.153, 551.311, 505.589), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((479.67, 704.301, 367.482), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((308.918, 625.491, 293.421), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((308.918, 625.491, 293.421), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((331.354, 616.12, 309.604), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((348.391, 601.765, 328.781), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((361.292, 590.798, 352.338), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((368.23, 581.342, 378.954), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((371.182, 572.657, 406.354), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((372.579, 567.985, 434.764), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((425.201, 697.742, 218.01), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((327.681, 441.746, 657.455), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((337.207, 547.461, 433.662), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((337.207, 547.461, 433.662), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((362.887, 534.305, 429.463), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((383.013, 521.549, 412.551), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((382.008, 493.477, 405.329), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((472.019, 505.695, 490.262), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((291.711, 473.021, 320.626), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((425.627, 520.424, 467.975), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((425.686, 520.345, 467.985), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((438.679, 533.455, 446.837), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((457.366, 538.94, 466.932), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((450.803, 565.477, 460.752), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((439.705, 585.025, 444.243), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((426.62, 609.957, 445.857), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((429.987, 636.645, 453.876), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((363.516, 596.62, 490.496), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((499.047, 673.248, 414.804), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((348.761, 553.249, 496.295), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((361.596, 537.911, 478.655), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((390.044, 504.343, 440.828), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((418.125, 469.937, 404.705), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((457.07, 445.969, 471.682), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((425.984, 442.675, 304.657), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((435.527, 497.708, 533.011), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((418.03, 494.648, 510.541), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((404.798, 496.797, 485.006), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((381.53, 488.388, 468.998), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((358.978, 482.145, 449.956), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((334.006, 472.549, 435.137), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((380.294, 519.514, 476.059), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((277.621, 425.683, 391.717), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
f160a2a7c5cef3e792945a605faf4f8c5bb13c06 | ce3964c7195de67e07818b08a43286f7ec9fec3e | /calculate_tetragonal_from_rs.py | 6cd6a88ad6a1d08ee5476959f8675ef36a8160ad | [] | no_license | zhuligs/physics | 82b601c856f12817c0cfedb17394b7b6ce6b843c | 7cbac1be7904612fd65b66b34edef453aac77973 | refs/heads/master | 2021-05-28T07:39:19.822692 | 2013-06-05T04:53:08 | 2013-06-05T04:53:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | #!/usr/bin/env python
# backlib.py
import math
def main():
# constants
a0 = 5.291772108E-09 # Bohr radius in cm
pi = math.pi
# inputs
Rs = input('For what value of Rs would you like the box? ')
N = input('How many particles are there in your box? ')
ratio = input('What is the c/a ratio? ')
volume = (1.0/3)*(4*N*pi)*(Rs*a0)**3
a = (volume*6.7483346e+24/ratio)**(1.0/3)
c = a*ratio
print 'rs:',Rs
print 'ratio:',ratio
print 'a:',a,'bohr'
print 'c:',c,'bohr'
print 'volume:',volume*6.7483346e+24,'bohr^3'
print 'density:',N/(volume*6.7483346e+24),'particles/bohr^3'
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2e7796b0f384d360c9aa18cc0cfdb8abd7d1932b | 9eae66764d420fa4872baf5a10a5c66cc7fca580 | /contenidos/semana-09/EjemplosClase/main_3.py | fbcc6a865bd35e2e2c35fa636a297972b496d9a3 | [] | no_license | isidonoso/Prueba | f89ad14d5ffb61f0df6d53fdccddf2395e49ab19 | 29a4ef531d69ed79f56cc5fa1879375b743972a8 | refs/heads/master | 2022-12-26T07:03:57.881036 | 2020-10-02T01:11:29 | 2020-10-02T01:11:29 | 291,497,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QGridLayout, QPushButton, QVBoxLayout)
class Ventana(QWidget):
def __init__(self):
super().__init__()
self.etiqueta1 = QLabel('Texto a modificar', self)
self.grilla = QGridLayout()
valores = ['0', '1']
posiciones = [(0,0),(0,1)]
for i in range(2):
boton = QPushButton(valores[i])
# Conectamos el evento clicked con la función
boton.clicked.connect(self.boton_clickeado)
self.grilla.addWidget(boton, *posiciones[i])
vbox = QVBoxLayout()
vbox.addWidget(self.etiqueta1)
vbox.addLayout(self.grilla)
self.setLayout(vbox)
self.setWindowTitle('Emit signal')
self.show()
def boton_clickeado(self):
# Sender retorna el objeto que fue clickeado.
boton = self.sender()
# Obtenemos el identificador del elemento en la grilla
idx = self.grilla.indexOf(boton)
# Con el identificador obtenemos la posición del ítem en la grilla
posicion = self.grilla.getItemPosition(idx)
# Actualizamos label1
self.etiqueta1.setText(f'Presionado boton {idx}, en fila/columna: {posicion[:2]}.')
app = QApplication(sys.argv)
ex = Ventana()
sys.exit(app.exec_()) | [
"[email protected]"
] | |
2b83c2886cd1db915273df2a5786015b27537710 | 22d84f804271a629a395cec785b7eb4b47f72f36 | /examples/statistics/ANOVA_rutherford_2.py | 5d75201ea451cc523a716af8176b98a77449362d | [] | no_license | imclab/Eelbrain | bb787294218a2ba00f90f447af0e629abadeac88 | e52eb3a5bd8bf8fc9aece2fb4413e0286e080c46 | refs/heads/master | 2021-01-21T23:34:15.673295 | 2014-05-02T19:17:04 | 2014-05-02T19:17:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,028 | py | # Rutherford (2001) Examples, cross-checked results:
#
# factorial anova
#
#Independent Measures (p. 53):
## SS df MS F p
##_____________________________________________________________
##A 432.0000 1 432.0000 47.7474*** .0000
##B 672.0000 2 336.0000 37.1368*** .0000
##A x B 224 2 112 12.3789*** .0001
##subject(A x B) 380.0000 42 9.0476
##_____________________________________________________________
##Total 1708 47
#
#
#Repeated Measure (p. 86):
## SS df MS F p
##_________________________________________________________________
##A 432.0000 1 432.0000 40.1416*** .0004
##B 672.0000 2 336.0000 29.2174*** .0000
##A x B 224.0000 2 112.0000 17.1055*** .0002
##subject 52.0000 7 7.4286 0.7927 .5984
##A x subject 75.3333 7 10.7619 1.6436 .2029
##B x subject 161.0000 14 11.5000 1.7564 .1519
##A x B x subject 91.6667 14 6.5476
##_________________________________________________________________
##Total 1708 47
import numpy as np
from eelbrain.lab import *
Y = np.array([ 7, 3, 6, 6, 5, 8, 6, 7,
7,11, 9,11,10,10,11,11,
8,14,10,11,12,10,11,12,
16, 7,11, 9,10,11, 8, 8,
16,10,13,10,10,14,11,12,
24,29,10,22,25,28,22,24])
A = Factor([1,0], rep=3*8, name='A')
B = Factor(range(3), tile=2, rep=8, name='B')
# Independent Measures:
subject = Factor(range(8*6), name='subject', random=True)
print test.anova(Y, A*B+subject(A%B), title="Independent Measures:")
# Repeated Measure:
subject = Factor(range(8), tile=6, name='subject', random=True)
print test.anova(Y, A * B * subject, title="Repeated Measure:")
| [
"[email protected]"
] | |
67e71bb59332794c1489ec2409abb7ee23400fb6 | faecebfb2aba45bc3dbb85d55f491365ff578344 | /game/ui_enemy_health_bar.py | 4e22450855f2381304d2be227478cdec3dfe8a97 | [] | no_license | MyreMylar/vania | c4658d16ae394cf8adecebd16470e82e773f98da | e813587db4d3a83e60188903238d8f0a3f124012 | refs/heads/master | 2020-08-11T01:29:48.462223 | 2019-10-11T15:05:38 | 2019-10-11T15:05:38 | 214,462,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,559 | py | import pygame
class UIEnemyHealthBar(pygame.sprite.Sprite):
"""
A UI that will display the enemy's health capacity and their current health.
"""
def __init__(self, enemy, *groups):
super().__init__(*groups)
self.enemy = enemy
self.position = enemy.screen_position[:]
self.width = int(enemy.rect.width*0.75)
self.height = 10
self.rect = pygame.Rect(self.position, (self.width, self.height))
self.background_colour = pygame.Color("#000000")
self.background_surface = pygame.Surface((self.rect.w, self.rect.h)).convert()
self.background_surface.fill(self.background_colour)
self.image = pygame.Surface((self.rect.w, self.rect.h)).convert()
self.hover_height = 10
self.horiz_padding = 2
self.vert_padding = 2
self.capacity_width = self.width - (self.horiz_padding * 2)
self.capacity_height = self.height - (self.vert_padding * 2)
self.health_capacity_rect = pygame.Rect([self.horiz_padding,
self.vert_padding],
[self.capacity_width, self.capacity_height])
self.health_empty_colour = pygame.Color("#CCCCCC")
self.health_colour = pygame.Color("#f4251b")
self.current_health = 50
self.health_capacity = 100
self.health_percentage = self.current_health / self.health_capacity
self.current_health_rect = pygame.Rect([self.horiz_padding,
self.vert_padding],
[int(self.capacity_width*self.health_percentage),
self.capacity_height])
def update(self):
self.position = [self.enemy.screen_position[0] - self.enemy.rect.width/2,
self.enemy.screen_position[1] - (self.enemy.rect.height/2) - self.hover_height]
self.current_health = self.enemy.current_health
self.health_capacity = self.enemy.base_health
self.health_percentage = self.current_health / self.health_capacity
self.current_health_rect.width = int(self.capacity_width * self.health_percentage)
self.image.blit(self.background_surface, (0, 0))
pygame.draw.rect(self.image, self.health_empty_colour, self.health_capacity_rect)
pygame.draw.rect(self.image, self.health_colour, self.current_health_rect)
self.rect.x = self.position[0]
self.rect.y = self.position[1]
| [
"[email protected]"
] | |
de26a67822e84a5591ccfa7820b8be8780320d27 | 277d4ee56616bb5930c57a57c68a202bf5085501 | /stubs/thinc/neural/_classes/maxout.pyi | bf42de303504a6c926134e3e0fd43fcc450d1488 | [
"MIT"
] | permissive | miskolc/spacy-pytorch-transformers | fc502523644eb25cb293e0796b46535ba581a169 | ab132b674c5a91510eb8cc472cdbdf5877d24145 | refs/heads/master | 2020-07-22T09:47:17.905850 | 2019-09-04T15:12:09 | 2019-09-04T15:12:09 | 207,156,566 | 1 | 0 | MIT | 2019-09-08T18:37:55 | 2019-09-08T18:37:55 | null | UTF-8 | Python | false | false | 735 | pyi | # Stubs for thinc.neural._classes.maxout (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from ...describe import Biases, Dimension, Gradient, Synapses
from ..util import get_array_module
from .model import Model
from typing import Any, Optional
def xavier_uniform_init(W: Any, ops: Any) -> None: ...
def normal_init(W: Any, ops: Any) -> None: ...
class Maxout(Model):
name: str = ...
nO: Any = ...
nI: Any = ...
nP: Any = ...
drop_factor: Any = ...
def __init__(self, nO: Optional[Any] = ..., nI: Optional[Any] = ..., pieces: int = ..., **kwargs: Any) -> None: ...
def predict(self, X__BI: Any): ...
def begin_update(self, X__bi: Any, drop: float = ...): ...
| [
"[email protected]"
] | |
f0bd60edb571b6ddd80c330a4471bfd278cc8c71 | a74b980fd95d5d810315f181449fc9d1710e6923 | /savecode/threeyears/idownserver/config_dispatch.py | a9332f00f371526603ca0cd4873e8a2e22388314 | [
"Apache-2.0"
] | permissive | cbbbbbbbb/sspywork | b70f5539203b47b21eec2f0514ddca155affc2b8 | 8f05a6b91fc205960edd57f9076facec04f49a1a | refs/heads/master | 2023-03-22T19:45:13.024076 | 2021-03-08T01:24:21 | 2021-03-08T01:24:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | """配置命令数据分配器"""
# -*- coding:utf-8 -*-
from datacontract.datamatcher import ExtMatcher
from .taskdispatcher import (
AutoTaskDispatcher,
CmdDispatcher,
DispatchConfig,
IScanDispatcher,
IScoutDispatcher,
TaskDispatcher,
)
# 分配器配置
dispatchconfig = DispatchConfig(
taskdispatchers={
"idowntask": TaskDispatcher(
uniquename="idowntask",
datamatcher=ExtMatcher(["idown_task", "an_cookie"]),
maxwaitcount=1,
maxwaittime=3,
relation_inputer_src=None,
),
"cmd": CmdDispatcher(
uniquename="cmd",
datamatcher=ExtMatcher(["idown_cmd"]),
maxwaitcount=1,
maxwaittime=3,
relation_inputer_src=None,
),
"iscantask": IScanDispatcher(
uniquename="iscantask",
datamatcher=ExtMatcher(["iscan_task"]),
maxwaitcount=1,
maxwaittime=3,
relation_inputer_src=None,
),
"iscouttask": IScoutDispatcher(
uniquename="iscouttask",
datamatcher=ExtMatcher(["iscout_task"]),
maxwaitcount=1,
maxwaittime=3,
relation_inputer_src=None,
),
"autotask": AutoTaskDispatcher(
uniquename="autotask",
datamatcher=ExtMatcher(["automated_task"]),
maxwaitcount=1,
maxwaittime=3,
relation_inputer_src=None,
),
}
)
| [
"[email protected]"
] | |
3a7848c94352123a109254d2864aa07d7452327f | 1d60c5a7b8ce6277bff514e376f79848f706344c | /Machine Learning Scientist with Python/14. Introduction to TensorFlow in Python/02. Linear models/04. Modifying the loss function.py | bc7863271f780922fc8c310bb47f666f7fa4ed4a | [] | no_license | DidiMilikina/DataCamp | 338c6e6d3b4f5b6c541c1aba155a36e9ee24949d | 3bf2cf3c1430190a7f8e54efda7d50a5fd66f244 | refs/heads/master | 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,503 | py | '''
Modifying the loss function
In the previous exercise, you defined a tensorflow loss function and then evaluated it once for a set of actual and predicted values. In this exercise, you will compute the loss within another function called loss_function(), which first generates predicted values from the data and variables. The purpose of this is to construct a function of the trainable model variables that returns the loss. You can then repeatedly evaluate this function for different variable values until you find the minimum. In practice, you will pass this function to an optimizer in tensorflow. Note that features and targets have been defined and are available. Additionally, Variable, float32, and keras are available.
Instructions
100 XP
Define a variable, scalar, with an initial value of 1.0 and a type of float32.
Define a function called loss_function(), which takes scalar, features, and targets as arguments in that order.
Use a mean absolute error loss function.
'''
SOLUTION
# Initialize a variable named scalar
scalar = Variable(1.0, float32)
# Define the model
def model(scalar, features = features):
return scalar * features
# Define a loss function
def loss_function(scalar=scalar, features = features, targets = targets):
# Compute the predicted values
predictions = model(scalar, features)
# Return the mean absolute error loss
return keras.losses.mae(targets, predictions)
# Evaluate the loss function and print the loss
print(loss_function(scalar).numpy()) | [
"[email protected]"
] | |
147f6932afc8b302279c9890d43d0cc9046e3413 | b21822a35da6cda8d7b7c89a4ada9a5651aed7b2 | /Problem-2.py | e5131635a1999b7147b237d1275810e7337eda37 | [] | no_license | s4git21/Backtracking-3 | ba39bf9a7f303adf3d6eb13baae5d14f2b6bb4bc | 0412b355f0c6f24cdcf82648eeb22de8ae50f078 | refs/heads/master | 2023-06-14T05:27:11.218350 | 2021-07-06T00:50:25 | 2021-07-06T00:50:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | """
Approach: 1) use DFS traversal and move in all 4 directions to look for the string in the board.
2) note that the traversal can start again from the current cell so you have to mark it visited
a) instead of maintaining another board for keep track of visited cells, we'll chang the char value to indicate that
it is visited
TC: O(n*3^L) n = number of cells, L = len of string
SC: O(L) the recursion stack can have all the chars in the string
"""
class Solution:
def exist(self, board: List[List[str]], word: str) -> bool:
for r, row in enumerate(board):
for c, cell in enumerate(row):
if self.backtrack(board, word, 0, r, c):
return True
return False
def backtrack(self, board, word, index, r, c):
# base
if index == len(word):
return True
if r < 0 or r >= len(board) or c < 0 or c >= len(board[0]) or board[r][c] != word[index]:
return False
# action
temp = board[r][c]
board[r][c] = '#'
# logic
dirs = [[0, 1], [0, -1], [-1, 0], [1, 0]]
for roff, coff in dirs:
new_row = r + roff
new_col = c + coff
if self.backtrack(board, word, index + 1, new_row, new_col):
return True
# backtrack
board[r][c] = temp | [
"[email protected]"
] | |
36fcc9c738f1be919faac0c727dbe29e7658f835 | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/discord/scheduled_event/scheduled_event_entity_metadata/tests/test__parse_speaker_ids.py | f554c1e962233b4b01ee1c67dfa68b0f357bebc9 | [
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | Python | UTF-8 | Python | false | false | 672 | py | import vampytest
from ..fields import parse_speaker_ids
def test__parse_speaker_ids():
"""
Tests whether ``parse_speaker_ids`` works as intended.
"""
speaker_id_1 = 202303120072
speaker_id_2 = 202303120073
for input_data, expected_output in (
({}, None),
({'speaker_ids': None}, None),
({'speaker_ids': []}, None),
({'speaker_ids': [str(speaker_id_1), str(speaker_id_2)]}, (speaker_id_1, speaker_id_2)),
({'speaker_ids': [str(speaker_id_2), str(speaker_id_1)]}, (speaker_id_1, speaker_id_2)),
):
output = parse_speaker_ids(input_data)
vampytest.assert_eq(output, expected_output)
| [
"[email protected]"
] | |
4b82ce3e1aab7b779c48a30fcf2274b21180ea7c | 084e35c598426b1137f9cd502e1b5e7f09cdf034 | /每日一题/problem1049_最后一块石头的重量II.py | da7c8a54ef837b1d51c1e29591562d625bc8f668 | [] | no_license | sakurasakura1996/Leetcode | 3a941dadd198ee2f54b69057ae3bbed99941974c | 78f239959af98dd3bd987fb17a3544010e54ae34 | refs/heads/master | 2021-09-11T05:07:44.987616 | 2021-09-07T05:39:34 | 2021-09-07T05:39:34 | 240,848,992 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | from typing import List
class Solution:
def lastStoneWeightII(self, stones: List[int]) -> int:
total = sum(stones)
n, m = len(stones), total // 2
dp = [[False] * (m + 1) for _ in range(n + 1)]
dp[0][0] = True
for i in range(n):
for j in range(m + 1):
if j < stones[i]:
dp[i + 1][j] = dp[i][j]
else:
dp[i + 1][j] = dp[i][j] or dp[i][j - stones[i]]
ans = None
for j in range(m, -1, -1):
if dp[n][j]:
ans = total - 2 * j
break
return ans
| [
"[email protected]"
] | |
8c36cb6d74d1abe8ef66867deca99e9e23ce1b7b | 33c51931bc7d6f73da5a64ecc0e7cb751e7fc62c | /karesansui/gadget/hostby1networksettings.py | ab6a6f59f9618c75a7563f4314a2af46e8af5265 | [
"MIT",
"GPL-1.0-or-later"
] | permissive | qmutz/karesansui | e86ed107f313f8c4140797a2c3250c5f16b524c2 | f4ba1cf6f88cf76c3e4dbc444139d73134f7c9d1 | refs/heads/develop | 2023-05-06T14:52:38.668631 | 2019-02-01T03:57:00 | 2019-02-01T03:57:00 | 316,682,704 | 0 | 0 | MIT | 2021-06-03T14:59:45 | 2020-11-28T07:43:33 | null | UTF-8 | Python | false | false | 8,354 | py | # -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import sys
import re
import web
import karesansui
from karesansui.lib.rest import Rest, auth
from karesansui.db.model._2pysilhouette import Job, JobGroup
from karesansui.db.access.machine import findbyhost1
from karesansui.db.access._2pysilhouette import save_job_collaboration
from karesansui.db.access.machine2jobgroup import new as m2j_new
from pysilhouette.command import dict2command
from karesansui.lib.utils import get_ifconfig_info, get_bonding_info, dict_ksort, available_virt_mechs, is_param
from karesansui.lib.const import BONDING_COMMAND_ADD, NETWORK_COMMAND_RESTART, BONDING_MODE
from karesansui.lib.checker import Checker, \
CHECK_EMPTY, CHECK_VALID
NETWORK_RESTART = 1
def validates_bonding(obj, target_regex):
checker = Checker()
check = True
_ = obj._
checker.errors = []
count = 0
for input in obj.input:
m = target_regex.match(input)
if m:
count += 1
check = checker.check_netdev_name(_('Target Device Name'),
m.group('dev'),
CHECK_EMPTY | CHECK_VALID,
) and check
if count < 2:
check = False
checker.add_error(_('Not enough target devices for bonding.'))
if is_param(obj.input, 'bonding_target_dev_primary'):
check = checker.check_netdev_name(_('Primary Device Name'),
obj.input.bonding_target_dev_primary,
CHECK_EMPTY | CHECK_VALID,
) and check
else:
check = False
checker.add_error(_('"%s" is required.') %_('Primary Device Name'))
if is_param(obj.input, 'bonding_mode'):
if obj.input.bonding_mode not in BONDING_MODE:
check = False
checker.add_error(_('Unknown bonding mode.'))
else:
check = False
checker.add_error(_('"%s" is required.') %_('Bonding Mode'))
obj.view.alert = checker.errors
return check
class HostBy1NetworkSettings(Rest):
@auth
def _GET(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
self.view.host_id = host_id
bond_list = get_bonding_info()
if self.is_mode_input() is True:
exist_bond_max_num = -1
exist_bond_list = get_ifconfig_info("regex:^bond")
for bond_name in exist_bond_list.keys():
try:
num = int(bond_name.replace("bond",""))
except ValueError:
continue
if exist_bond_max_num < num:
exist_bond_max_num = num
self.view.create_bond_name = "bond%s" % (exist_bond_max_num + 1)
dev_list = get_ifconfig_info("regex:^eth")
for bond in bond_list:
for slave in bond_list[bond]['slave']:
if slave in dev_list:
dev_list[slave]['bond'] = bond
#pysical_dev_list = get_ifconfig_info("regex:^peth")
pysical_dev_list = get_ifconfig_info("regex:^br")
for pysical_dev in pysical_dev_list:
if pysical_dev[1:] in dev_list:
dev_list[pysical_dev[1:]]['bridge'] = pysical_dev
self.view.bond_target_dev = dev_list
self.view.hypervisors = available_virt_mechs()
return True
dev_list = get_ifconfig_info()
for bond in bond_list:
if bond in dev_list:
dev_list[bond]['bond'] = True
for slave in bond_list[bond]['slave']:
for dev in dev_list:
if dev == slave:
dev_list[dev]['bond'] = True
self.view.current = dev_list
self.view.bond_list = bond_list
return True
@auth
def _POST(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
target_regex = re.compile(r"^bonding_target_dev_select_(?P<dev>eth[0-9]+)")
if not validates_bonding(self, target_regex):
self.logger.debug("Add bonding failed. Did not validate.")
return web.badrequest(self.view.alert)
target_dev = []
for input in self.input:
m = target_regex.match(input)
if m:
target_dev.append(m.group('dev'))
primary = self.input.bonding_target_dev_primary
mode = self.input.bonding_mode
cmdname = u"Add Bonding Setting"
cmd = BONDING_COMMAND_ADD
options = {}
options['dev'] = ','.join(target_dev)
options["primary"] = primary
options["mode"] = mode
_cmd = dict2command(
"%s/%s" % (karesansui.config['application.bin.dir'], cmd), options)
_jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey'])
_job = Job('%s command' % cmdname, 0, _cmd)
_jobgroup.jobs.append(_job)
host = findbyhost1(self.orm, host_id)
_machine2jobgroup = m2j_new(machine=host,
jobgroup_id=-1,
uniq_key=karesansui.sheconf['env.uniqkey'],
created_user=self.me,
modified_user=self.me,
)
save_job_collaboration(self.orm,
self.pysilhouette.orm,
_machine2jobgroup,
_jobgroup,
)
return web.accepted()
@auth
def _PUT(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
self.view.host_id = host_id
host = findbyhost1(self.orm, host_id)
status = int(self.input.status)
if status != NETWORK_RESTART:
return web.badrequest()
cmdname = u"Restart Network"
cmd = NETWORK_COMMAND_RESTART
options = {}
_cmd = dict2command(
"%s/%s" % (karesansui.config['application.bin.dir'], cmd), options)
_jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey'])
_job = Job('%s command' % cmdname, 0, _cmd)
_jobgroup.jobs.append(_job)
host = findbyhost1(self.orm, host_id)
_machine2jobgroup = m2j_new(machine=host,
jobgroup_id=-1,
uniq_key=karesansui.sheconf['env.uniqkey'],
created_user=self.me,
modified_user=self.me,
)
save_job_collaboration(self.orm,
self.pysilhouette.orm,
_machine2jobgroup,
_jobgroup,
)
return web.accepted()
urls = (
'/host/(\d+)/networksettings/?(\.part)$', HostBy1NetworkSettings,
)
| [
"[email protected]"
] | |
a2784b7f3609ce114701b71b1c5d856b349dcc2c | ba157236151a65e3e1fde2db78b0c7db81b5d3f6 | /String/reverse_only_letters.py | 99750af83a43b0a37f1d087de1d7be02d80b9b6f | [] | no_license | JaberKhanjk/LeetCode | 152488ccf385b449d2a97d20b33728483029f85b | 78368ea4c8dd8efc92e3db775b249a2f8758dd55 | refs/heads/master | 2023-02-08T20:03:34.704602 | 2020-12-26T06:24:33 | 2020-12-26T06:24:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | class Solution(object):
def reverseOnlyLetters(self, S):
stack = []
hash_map = {}
for i,each in enumerate(S):
if (each >= 'a' and each <= 'z') or (each >= 'A' and each <= 'Z'):
stack.append(each)
else:
hash_map[i] = each
n = len(S)
final = ""
for i in range(n):
if i in hash_map:
final += hash_map[i]
else:
final += stack.pop()
return final
"""
:type S: str
:rtype: str
"""
| [
"[email protected]"
] | |
168e1a720ae61eeab4b60a352e20ce73f9ca790a | 61aa319732d3fa7912e28f5ff7768498f8dda005 | /src/arch/x86/isa/insts/simd128/floating_point/arithmetic/multiplication.py | d4d99381ec3931f0f03fb84ea1f7192fe1c20f0a | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] | permissive | TeCSAR-UNCC/gem5-SALAM | 37f2f7198c93b4c18452550df48c1a2ab14b14fb | c14c39235f4e376e64dc68b81bd2447e8a47ff65 | refs/heads/main | 2023-06-08T22:16:25.260792 | 2023-05-31T16:43:46 | 2023-05-31T16:43:46 | 154,335,724 | 62 | 22 | BSD-3-Clause | 2023-05-31T16:43:48 | 2018-10-23T13:45:44 | C++ | UTF-8 | Python | false | false | 3,943 | py | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
microcode = '''
def macroop MULSS_XMM_XMM {
mmulf xmml, xmml, xmmlm, size=4, ext=Scalar
};
def macroop MULSS_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
mmulf xmml, xmml, ufp1, size=4, ext=Scalar
};
def macroop MULSS_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
mmulf xmml, xmml, ufp1, size=4, ext=Scalar
};
def macroop MULSD_XMM_XMM {
mmulf xmml, xmml, xmmlm, size=8, ext=Scalar
};
def macroop MULSD_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
mmulf xmml, xmml, ufp1, size=8, ext=Scalar
};
def macroop MULSD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
mmulf xmml, xmml, ufp1, size=8, ext=Scalar
};
def macroop MULPS_XMM_XMM {
mmulf xmml, xmml, xmmlm, size=4, ext=0
mmulf xmmh, xmmh, xmmhm, size=4, ext=0
};
def macroop MULPS_XMM_M {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8
mmulf xmml, xmml, ufp1, size=4, ext=0
mmulf xmmh, xmmh, ufp2, size=4, ext=0
};
def macroop MULPS_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8
mmulf xmml, xmml, ufp1, size=4, ext=0
mmulf xmmh, xmmh, ufp2, size=4, ext=0
};
def macroop MULPD_XMM_XMM {
mmulf xmml, xmml, xmmlm, size=8, ext=0
mmulf xmmh, xmmh, xmmhm, size=8, ext=0
};
def macroop MULPD_XMM_M {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8
mmulf xmml, xmml, ufp1, size=8, ext=0
mmulf xmmh, xmmh, ufp2, size=8, ext=0
};
def macroop MULPD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8
mmulf xmml, xmml, ufp1, size=8, ext=0
mmulf xmmh, xmmh, ufp2, size=8, ext=0
};
'''
| [
"[email protected]"
] | |
ce7c741bc80b28875478e6d0bd778a104eaeb01d | ebe5167148cfff43d24b6c66e44634bb55513b72 | /solutions/graph/133.Clone.Graph.py | 8d6ec848c07c4a432abacd4871ee8f9ad1532cbd | [] | no_license | ljia2/leetcode.py | c90ac38a25331d61d3ff77fd135b82372da3a09f | 08c6d27498e35f636045fed05a6f94b760ab69ca | refs/heads/master | 2020-03-25T03:37:13.318582 | 2019-07-18T23:14:41 | 2019-07-18T23:14:41 | 143,351,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,532 | py | # Definition for a Node.
class Node(object):
def __init__(self, val, neighbors):
self.val = val
self.neighbors = neighbors
class DFSSolution(object):
def cloneGraph(self, node):
"""
Given a reference of a node in a connected undirected graph, return a deep copy (clone) of the graph.
Each node in the graph contains a val (int) and a list (List[Node]) of its neighbors.
Example:
Input:
{"$id":"1","neighbors":[{"$id":"2","neighbors":[{"$ref":"1"},{"$id":"3","neighbors":[{"$ref":"2"},{"$id":"4","neighbors":[{"$ref":"3"},{"$ref":"1"}],"val":4}],"val":3}],"val":2},{"$ref":"4"}],"val":1}
Explanation:
Node 1's value is 1, and it has two neighbors: Node 2 and 4.
Node 2's value is 2, and it has two neighbors: Node 1 and 3.
Node 3's value is 3, and it has two neighbors: Node 2 and 4.
Node 4's value is 4, and it has two neighbors: Node 1 and 3.
Note:
The number of nodes will be between 1 and 100.
The undirected graph is a simple graph, which means no repeated edges and no self-loops in the graph.
Since the graph is undirected, if node p has node q as neighbor, then node q must have node p as neighbor too.
You must return the copy of the given node as a reference to the cloned graph.
:type node: Node
:rtype: Node
DFS + dict key = original node, val = copy node.
"""
if not node:
return None
node2copy = dict()
self.dfs(node, set(), node2copy)
return node2copy[node]
def dfs(self, node, visited, node2copy):
if node in visited:
return
visited.add(node)
if node not in node2copy.keys():
node2copy[node] = Node(node.val, [])
for neighbor in node.neighbors:
if neighbor not in node2copy.keys():
neighborcopy = node2copy[neighbor]
else:
neighborcopy = Node(neighbor.val, [])
node2copy[neighbor] = neighborcopy
node2copy[node].neighbors.append(neighborcopy)
self.dfs(neighbor, visited, node2copy)
return
class BFSSolution(object):
def cloneGraph(self, node):
"""
Given a reference of a node in a connected undirected graph, return a deep copy (clone) of the graph.
Each node in the graph contains a val (int) and a list (List[Node]) of its neighbors.
Example:
Input:
{"$id":"1","neighbors":[{"$id":"2","neighbors":[{"$ref":"1"},{"$id":"3","neighbors":[{"$ref":"2"},{"$id":"4","neighbors":[{"$ref":"3"},{"$ref":"1"}],"val":4}],"val":3}],"val":2},{"$ref":"4"}],"val":1}
Explanation:
Node 1's value is 1, and it has two neighbors: Node 2 and 4.
Node 2's value is 2, and it has two neighbors: Node 1 and 3.
Node 3's value is 3, and it has two neighbors: Node 2 and 4.
Node 4's value is 4, and it has two neighbors: Node 1 and 3.
Note:
The number of nodes will be between 1 and 100.
The undirected graph is a simple graph, which means no repeated edges and no self-loops in the graph.
Since the graph is undirected, if node p has node q as neighbor, then node q must have node p as neighbor too.
You must return the copy of the given node as a reference to the cloned graph.
:type node: Node
:rtype: Node
BFS + dict key = original node, val = copy node.
"""
if not node:
return None
node2copy = dict()
self.bfs(node, node2copy)
return node2copy[node]
def bfs(self, node, node2copy):
qe = [node]
visited = set()
while qe:
size = len(qe)
while size > 0:
n = qe.pop(0)
size -= 0
if n in visited:
continue
# get or generate the copy of node
node2copy[n] = Node(n.val, [])
visited.add(n)
# populate the neighbors of nn according to that of n.
for neighbor in n.neighbors:
qe.append(neighbor)
# n node's copy nn's neighbor has been populated.
# set up the dict node2copy by cloning the edges
for node in node2copy.keys():
for neighbor in node.neighbors:
node2copy[node].neighbors.append(node2copy[neighbor])
return
| [
"[email protected]"
] | |
25d1d944d7e4f8f2d8859c6fad01ba0ade0590b9 | bfe345ba31673553ce156e4dca1ba5b6317ca4c2 | /core/models/cms/interface.py | f3476948da56841820fdffaf4026b4a2080662e6 | [] | no_license | phillipmalboeuf/dauphin | 215c6432fbcaf574ea0bd987238a0d2309d699af | 101d8ffc113f36c8d1754077eebae2a6f18d4a3d | refs/heads/master | 2023-01-09T01:55:36.905732 | 2021-06-20T18:35:35 | 2021-06-20T18:35:35 | 84,462,808 | 0 | 0 | null | 2023-01-05T06:11:24 | 2017-03-09T16:13:59 | Python | UTF-8 | Python | false | false | 405 | py | from core import app
from flask import request, abort
from core.models.core.content import Content
from core.models.core.has_routes import HasRoutes
from core.helpers.validation_rules import validation_rules
from core.helpers.json import to_json
with app.app_context():
class Interface(Content):
collection_name = 'interface'
@classmethod
def get(cls, lang=None):
return cls.list(lang)[0]
| [
"[email protected]"
] | |
1498fee5084ad1aba990b5613482f15303cbf55b | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-workspaceapp/huaweicloudsdkworkspaceapp/v1/model/create_share_folder_request.py | 100f5ccf04f31234da8ec88802583bc9e72f6215 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,965 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateShareFolderRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'storage_id': 'str',
'body': 'CreateShareFolderReq'
}
attribute_map = {
'storage_id': 'storage_id',
'body': 'body'
}
def __init__(self, storage_id=None, body=None):
"""CreateShareFolderRequest
The model defined in huaweicloud sdk
:param storage_id: WKS存储ID
:type storage_id: str
:param body: Body of the CreateShareFolderRequest
:type body: :class:`huaweicloudsdkworkspaceapp.v1.CreateShareFolderReq`
"""
self._storage_id = None
self._body = None
self.discriminator = None
self.storage_id = storage_id
if body is not None:
self.body = body
@property
def storage_id(self):
"""Gets the storage_id of this CreateShareFolderRequest.
WKS存储ID
:return: The storage_id of this CreateShareFolderRequest.
:rtype: str
"""
return self._storage_id
@storage_id.setter
def storage_id(self, storage_id):
"""Sets the storage_id of this CreateShareFolderRequest.
WKS存储ID
:param storage_id: The storage_id of this CreateShareFolderRequest.
:type storage_id: str
"""
self._storage_id = storage_id
@property
def body(self):
"""Gets the body of this CreateShareFolderRequest.
:return: The body of this CreateShareFolderRequest.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.CreateShareFolderReq`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateShareFolderRequest.
:param body: The body of this CreateShareFolderRequest.
:type body: :class:`huaweicloudsdkworkspaceapp.v1.CreateShareFolderReq`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateShareFolderRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
27bc677940517f8b690617146b98d68d33e73b91 | 4bcc9806152542ab43fc2cf47c499424f200896c | /tensorflow/examples/custom_ops_doc/multiplex_4/multiplex_4_load_use.py | 3716b63836e08b403abcc2ab5c1ec98035281c2f | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | tensorflow/tensorflow | 906276dbafcc70a941026aa5dc50425ef71ee282 | a7f3934a67900720af3d3b15389551483bee50b8 | refs/heads/master | 2023-08-25T04:24:41.611870 | 2023-08-25T04:06:24 | 2023-08-25T04:14:08 | 45,717,250 | 208,740 | 109,943 | Apache-2.0 | 2023-09-14T20:55:50 | 2015-11-07T01:19:20 | C++ | UTF-8 | Python | false | false | 1,358 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary for showing C++ backward compatibility.
This loads a previously created SavedModel (esp. a model created by
multiplex_2_save.py which uses the "old" op and C++ kernel from multiplex_2)
and runs the model using the "new" multiplex_4 C++ kernel.
https://www.tensorflow.org/guide/saved_model
https://www.tensorflow.org/api_docs/python/tf/saved_model/save
"""
from absl import app
from tensorflow.examples.custom_ops_doc.multiplex_4 import model_using_multiplex
def main(argv):
del argv # not used
path = 'model_using_multiplex'
result = model_using_multiplex.load_and_use(path)
print('Result:', result)
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
] | |
8480bed289ae9be9d6385822faf59b4d35b0087f | 84fb44bbf23254fe787535f66baec310dca593b7 | /struct/djstruct/tests.py | 04252db944554359c113e44553eb7e60f61a9cbd | [] | no_license | gomson/structure-api | 0c6da71bea57d23bd2a31d827fd865f3729ce2a5 | 37619dad77ce51fb0832d1ff746cc6909cd82813 | refs/heads/master | 2021-01-19T19:24:24.152571 | 2016-06-24T21:40:00 | 2016-06-24T21:40:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,350 | py | from nose.tools import eq_, ok_
from pprint import pprint
from django.core.urlresolvers import reverse
from django.test import TestCase
from rest_framework import status
# from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from rest_framework.test import APITestCase
from .models import DjangoBaseNode, DjangoDependencyRelation
from .serializers import DjangoBaseNodeSerializer
class TestDjangoBaseNodePersist(TestCase):
def test_base_node_create(self):
n = DjangoBaseNode(path='mechanics/kinematics')
n.save()
ok_(n.id is not None)
eq_(n.path, 'mechanics/kinematics')
# def test_path_is_required(self):
# pass
# n = BaseNode(path=None)
# raises...
# n.save()
class TestCreateUpdateRetrieveDjangoBaseNode(APITestCase):
def setUp(self):
# print 'in setUp ...'
client = APIClient()
def _create_test_node(self):
nodedata = {
"path": "test/path",
"scope": "minireftest",
"version": "0.1",
"comment": "Le comment",
}
url = reverse('djangobasenode-list')
response = self.client.post(url, nodedata, format='json')
# print response.status_code, response.data['id'], response
self._nodeid = response.data['id']
eq_(response.status_code, status.HTTP_201_CREATED, "Can't create.")
def test_create_node(self):
self._create_test_node()
def test_update_node(self):
self._create_test_node()
# GET
url = reverse('djangobasenode-detail', kwargs={'uuid':self._nodeid})
response = self.client.get(url, format='json')
# print response.status_code, response
eq_(response.status_code, status.HTTP_200_OK)
ok_(response.data['id'])
eq_(response.data['path'], "test/path")
# CHANGE
putdata = response.data
putdata['path'] = "test/updated_path"
# PUT
response = self.client.put(url, putdata, format='json')
# print response.status_code, response
eq_(response.status_code, status.HTTP_200_OK)
eq_(response.data['id'], self._nodeid)
eq_(response.data['path'], "test/updated_path")
def test_retrieve_node(self):
self._create_test_node()
url = reverse('djangobasenode-detail', kwargs={'uuid':self._nodeid})
response = self.client.get(url, format='json')
eq_(response.status_code, status.HTTP_200_OK)
ok_(response.data['id'])
eq_(response.data['path'], "test/path")
class TestCreateUpdateRetrieveDjangoBaseNode(APITestCase):
def setUp(self):
# print 'in setUp ...'
client = APIClient()
def _create_test_node(self):
nodedata = {
"path": "test/path",
"scope": "minireftest",
"version": "0.1",
"comment": "Le comment",
}
url = reverse('djangobasenode-list')
response = self.client.post(url, nodedata, format='json')
# print response.status_code, response.data['id'], response
self._nodeid = response.data['id']
eq_(response.status_code, status.HTTP_201_CREATED, "Can't create.")
def test_create_node(self):
self._create_test_node()
def test_update_node(self):
self._create_test_node()
# GET
url = reverse('djangobasenode-detail', kwargs={'uuid':self._nodeid})
response = self.client.get(url, format='json')
# print response.status_code, response
eq_(response.status_code, status.HTTP_200_OK)
ok_(response.data['id'])
eq_(response.data['path'], "test/path")
# CHANGE
putdata = response.data
putdata['path'] = "test/updated_path"
# PUT
response = self.client.put(url, putdata, format='json')
# print response.status_code, response
eq_(response.status_code, status.HTTP_200_OK)
eq_(response.data['id'], self._nodeid)
eq_(response.data['path'], "test/updated_path")
def test_retrieve_node(self):
self._create_test_node()
url = reverse('djangobasenode-detail', kwargs={'uuid':self._nodeid})
response = self.client.get(url, format='json')
eq_(response.status_code, status.HTTP_200_OK)
ok_(response.data['id'])
eq_(response.data['path'], "test/path")
class TestRetrieveRelationships(APITestCase):
def setUp(self):
client = APIClient()
def _create_basenodes(self):
n1 = DjangoBaseNode(path='testmath/quadratic_equation')
n1.save()
self._n1 = n1
n2 = DjangoBaseNode(path='testmechanics/kinematics')
n2.save()
self._n2 = n2
n3 = DjangoBaseNode(path='testmechanics/projectile_motion')
n3.save()
self._n3 = n3
def _create_relations(self):
r12 = DjangoDependencyRelation(
prerequisite=self._n1,
usedfor=self._n2,
level='UGRAD',
explain_usedfor='test Solving quadratics is useful in kinematics.',
explain_prerequisite='test You need to know how to solve quadratic equations to solve certain kinematics problems.'
)
r12.save()
r23 = DjangoDependencyRelation(
prerequisite=self._n2,
usedfor=self._n3,
level='GRAD',
explain_usedfor='One-dimensional kinematics is used in two-dimensional projectile motion.',
explain_prerequisite='You should be familiar with one-dimensional kinamtics before attacking two-dimensional kinematics porblems.'
)
r23.save()
def test_prerequisites_good(self):
self._create_basenodes()
self._create_relations()
url_n2 = reverse('djangobasenode-detail', kwargs={'uuid':self._n2.uuid})
response = self.client.get(url_n2, format='json')
eq_(response.status_code, status.HTTP_200_OK)
eq_(response.data['id'], str(self._n2.uuid))
eq_(response.data['path'], 'testmechanics/kinematics')
# prerequsites
eq_(len(response.data['prerequisites']), 1)
eq_(response.data['prerequisites'][0]['prerequisite']['id'], str(self._n1.uuid))
eq_(response.data['prerequisites'][0]['level'], 'UGRAD')
# usedfors
eq_(len(response.data['usedfors']), 1)
eq_(response.data['usedfors'][0]['usedfor']['id'], str(self._n3.uuid))
eq_(response.data['usedfors'][0]['level'], 'GRAD')
class TestRelationshipTransitivity(TestCase):
def _create_basenodes(self):
n1 = DjangoBaseNode(path='math/quadratic_equation')
n1.save()
self._n1 = n1
n2 = DjangoBaseNode(path='mechanics/kinematics')
n2.save()
self._n2 = n2
n3 = DjangoBaseNode(path='mechanics/projectile_motion')
n3.save()
self._n3 = n3
def _create_relations(self):
r12 = DjangoDependencyRelation(
prerequisite=self._n1,
usedfor=self._n2,
level='UGRAD',
explain_usedfor='Solving quadratics is useful in kinematics.',
explain_prerequisite='You need to know how to solve quadratic equations to solve certain kinematics problems.'
)
r12.save()
r23 = DjangoDependencyRelation(
prerequisite=self._n2,
usedfor=self._n3,
level='UGRAD',
explain_usedfor='One-dimensional kinematics is used in two-dimensional projectile motion.',
explain_prerequisite='You should be familiar with one-dimensional kinamtics before attacking two-dimensional kinematics porblems.'
)
r23.save()
def test_transitivy_n1n2n3(self):
self._create_basenodes()
self._create_relations()
# forward
n_start = self._n1
n_mid = n_start.usedfors.all()[0]
n_finish = n_mid.usedfors.all()[0]
eq_(n_finish, self._n3)
# backward
n_mid = n_finish.prerequsites.all()[0]
n_start = n_mid.prerequsites.all()[0]
eq_(n_start, self._n1)
class TestDjangoBaseRelationshipPersist(TestCase):
def _create_basenodes(self):
n1 = DjangoBaseNode(path='math/quadratic_equation')
n1.save()
self._n1 = n1
n2 = DjangoBaseNode(path='mechanics/kinematics')
n2.save()
self._n2 = n2
def test_baserelation_create(self):
self._create_basenodes()
n1 = self._n1
n2 = self._n2
r = DjangoDependencyRelation(
prerequisite=n1,
usedfor=n2,
level='UGRAD',
explain_usedfor='Solving quadratics is useful in kinematics.',
explain_prerequisite='You need to know how to solve quadratic equations to solve certain kinematics problems.'
)
r.save()
ok_(r.id is not None)
eq_(r.level, 'UGRAD')
eq_(len(n1.prerequsites.all()), 0)
eq_(len(n1.usedfors.all()), 1)
eq_(n1.usedfors.all()[0], n2)
eq_(len(n2.prerequsites.all()), 1)
eq_(n2.prerequsites.all()[0], n1)
eq_(len(n2.usedfors.all()), 0)
| [
"[email protected]"
] | |
78b768e2735891cfd4e3f013fde8b38428d1d0c3 | af97a81a69d100e684b8e9f02bc1b117069b9895 | /Testcases_InCoder/CWE-117/sonar_1.py | 75ba3e45cab4af79ddf4d72b0685e4b46958d8fb | [] | no_license | s2e-lab/SecurityEval | 8de3e2f78fffa7058d79cce84fbcd8ff29c8468d | f637f67b8e8d52d641ab15903e46a41ead7e30a5 | refs/heads/main | 2023-06-17T22:55:08.047769 | 2023-06-16T16:54:54 | 2023-06-16T16:54:54 | 509,664,226 | 28 | 7 | null | 2023-05-27T04:55:05 | 2022-07-02T05:10:17 | Python | UTF-8 | Python | false | false | 458 | py | from flask import request, current_app, Flask
app = Flask(__name__)
@app.route('/log')
def log():
'''
get the input from the request and log it as an error
'''
try:
ip = request.remote_addr
except:
ip = '127.0.0.1'
error_log = current_app.config['ERROR_LOG']
with open(error_log, 'a') as f:
f.write('{0}\t{1}\t{2}\n'.format(ip, request.method, request.path))
return '', 200 | [
"[email protected]"
] | |
b0c4373a67cea73b8d82a20aa2d6771cd401cd59 | 8fe627b48df3a4711becd58cf57f18a0d5a3a758 | /manage.py | b71770ffec3329da7cba5a01a93d4e8a36cc6c9f | [] | no_license | crowdbotics-apps/brth-20192 | 33b25c2cc763091130aed2d5289111bc65b0cc8e | f060004fc23de1774699ecc7f1dbd6f8ca52a841 | refs/heads/master | 2022-12-09T09:06:41.732984 | 2020-09-10T21:12:32 | 2020-09-10T21:12:32 | 294,523,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'brth_20192.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
7a54088b69601dc6582e0d6ff16f9dbd3318156d | c06002a780c0a1f33cc7bc1a84b4e54d1edf91f8 | /Payload_Type/apfell/mythic/agent_functions/download.py | 912e89946fef3f8441438de4ca6de6aaf320927e | [] | no_license | apple0012/apfell | 98c5331383a09c420f7fde128d65882a87945337 | 8d0c83b6482a9724bcc0754384f423973c9c5dfe | refs/heads/master | 2023-07-17T19:58:49.909027 | 2021-09-03T20:19:52 | 2021-09-03T20:19:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,863 | py | from mythic_payloadtype_container.MythicCommandBase import *
import json
from mythic_payloadtype_container.MythicRPC import *
class DownloadArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {}
async def parse_arguments(self):
if len(self.command_line) > 0:
if self.command_line[0] == "{":
temp_json = json.loads(self.command_line)
if "host" in temp_json:
# this means we have tasking from the file browser rather than the popup UI
# the apfell agent doesn't currently have the ability to do _remote_ listings, so we ignore it
self.command_line = temp_json["path"] + "/" + temp_json["file"]
else:
raise Exception("Unsupported JSON")
else:
raise Exception("Must provide a path to download")
class DownloadCommand(CommandBase):
cmd = "download"
needs_admin = False
help_cmd = "download {path to remote file}"
description = "Download a file from the victim machine to the Mythic server in chunks (no need for quotes in the path)."
version = 1
supported_ui_features = ["file_browser:download"]
author = "@its_a_feature_"
parameters = []
attackmapping = ["T1020", "T1030", "T1041"]
argument_class = DownloadArguments
browser_script = BrowserScript(script_name="download", author="@its_a_feature_")
async def create_tasking(self, task: MythicTask) -> MythicTask:
resp = await MythicRPC().execute("create_artifact", task_id=task.id,
artifact="$.NSFileHandle.fileHandleForReadingAtPath, readDataOfLength",
artifact_type="API Called",
)
return task
async def process_response(self, response: AgentResponse):
pass
| [
"[email protected]"
] | |
571e0708ec0d32fd46c632db1f7e914a8d5f89e9 | 2d8da5cacd21dd425688d67e1a92faa50aefc6bc | /bulb-switcher.py | 9b83a07e1278919276ff86b2dbc70c2d92431363 | [] | no_license | stella-shen/Leetcode | 970857edb74ae3ccf4bcce0c40e972ab8bcc5348 | 16ad99a6511543f0286559c483206c43ed655ddd | refs/heads/master | 2021-01-19T02:48:49.918054 | 2018-11-29T10:36:43 | 2018-11-29T10:36:43 | 47,523,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | import math
class Solution(object):
def bulbSwitch(self, n):
"""
:type n: int
:rtype: int
"""
return int(math.sqrt(n)) | [
"[email protected]"
] | |
22e182483efad689fb6e85baeebdc8221a228639 | a2b6bc9bdd2bdbe5871edb613065dd2397175cb3 | /Cookbook/Array/移动零.py | 69b79cb17710f98a8a696166a87d241bba491561 | [] | no_license | Asunqingwen/LeetCode | ed8d2043a31f86e9e256123439388d7d223269be | b7c59c826bcd17cb1333571eb9f13f5c2b89b4ee | refs/heads/master | 2022-09-26T01:46:59.790316 | 2022-09-01T08:20:37 | 2022-09-01T08:20:37 | 95,668,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | '''
给定一个数组 nums,编写一个函数将所有 0 移动到数组的末尾,同时保持非零元素的相对顺序。
示例:
输入: [0,1,0,3,12]
输出: [1,3,12,0,0]
说明:
必须在原数组上操作,不能拷贝额外的数组。
尽量减少操作次数。
'''
from typing import List
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
first = second = 0
while second < len(nums):
if nums[second] != 0:
nums[first], nums[second] = nums[second], nums[first]
first += 1
second += 1
if __name__ == '__main__':
nums = [0, 1, 0, 3, 12]
sol = Solution()
sol.moveZeroes(nums)
print(nums)
| [
"[email protected]"
] | |
023384e4cc57c97689bf7abe06d61f93b5d0695f | eee10264c0e24b488110ca089816b291b05ed8c7 | /ingestors/documents/html.py | 5538f17b1a40258591783812725fe650655ebe0e | [
"MIT"
] | permissive | pombredanne/ingestors | def03d1b9a71640a1987a05e26385240a3650eb8 | 7f7ec82a6757743bc00e84b6b3d62b1c3cf7630a | refs/heads/master | 2021-08-22T23:34:56.808521 | 2017-12-01T17:42:49 | 2017-12-01T17:42:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from ingestors.base import Ingestor
from ingestors.support.html import HTMLSupport
from ingestors.support.encoding import EncodingSupport
class HTMLIngestor(Ingestor, EncodingSupport, HTMLSupport):
"HTML file ingestor class. Extracts the text from the web page."
MIME_TYPES = ['text/html']
def ingest(self, file_path):
"""Ingestor implementation."""
html_body = self.read_file_decoded(file_path)
self.result.flag(self.result.FLAG_HTML)
self.extract_html_content(html_body)
| [
"[email protected]"
] | |
24fdaeb60ef7bfaa6b0959fad30390a8df853cb9 | 6b9084d234c87d7597f97ec95808e13f599bf9a1 | /training/transt/logger/_wandb.py | 7f869c9b3cf935c338a8f9e823b7f4cac86f5ad6 | [] | no_license | LitingLin/ubiquitous-happiness | 4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc | aae2d764e136ca4a36c054212b361dd7e8b22cba | refs/heads/main | 2023-07-13T19:51:32.227633 | 2021-08-03T16:02:03 | 2021-08-03T16:02:03 | 316,664,903 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,211 | py | try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
from miscellanies.flatten_dict import flatten_dict
from miscellanies.git_status import get_git_status
from miscellanies.torch.distributed import is_main_process, is_dist_available_and_initialized
class WandbLogger:
def __init__(self, id_, project_name: str, config: dict,
tags: list, step_times: int,
initial_step: int, log_freq: int,
only_log_on_main_process: bool,
watch_model_freq: int,
watch_model_parameters=False, watch_model_gradients=False,
tensorboard_root_path=None
):
if not has_wandb:
print('Install wandb to enable remote logging')
return
if tensorboard_root_path is not None:
wandb.tensorboard.patch(pytorch=True, tensorboardX=False, root_logdir=tensorboard_root_path)
self.id = id_
self.project_name = project_name
config = flatten_dict(config)
config['git_version'] = get_git_status()
self.tags = tags
self.config = config
self.step = initial_step
self.log_freq = log_freq
self.only_log_on_main_process = only_log_on_main_process
self.step_times = step_times
if watch_model_parameters and watch_model_gradients:
watch_model = 'all'
elif watch_model_parameters:
watch_model = 'parameters'
elif watch_model_gradients:
watch_model = 'gradients'
else:
watch_model = None
self.watch_model = watch_model
self.watch_model_freq = watch_model_freq
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def _is_disabled(self):
return self.only_log_on_main_process and not is_main_process()
def start(self):
if self._is_disabled():
return
configs = {'project': self.project_name, 'entity': 'llt', 'tags': self.tags, 'config': flatten_dict(self.config),
'force': True, 'job_type': 'train', 'id': self.id}
if not self.only_log_on_main_process and is_dist_available_and_initialized():
configs['group'] = 'ddp'
wandb.init(**configs)
def log_train(self, epoch, forward_stats, backward_stats):
if self._is_disabled():
return
if self.step % self.log_freq == 0:
step = self.step * self.step_times
log = {'epoch': epoch, 'batch': step, **forward_stats, **backward_stats}
wandb.log(log, step=step)
self.step += 1
def log_test(self, epoch, summary):
if self._is_disabled():
return
step = self.step * self.step_times
summary = {'test_' + k: v for k, v in summary.items()}
summary['epoch'] = epoch
wandb.log(summary, step=step)
def watch(self, model):
if self._is_disabled():
return
wandb.watch(model, log=self.watch_model, log_freq=self.watch_model_freq)
def stop(self):
if self._is_disabled():
return
wandb.finish()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.