ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b4059dbbca5cf289e31b5750e4135e415f0b4ccf | import torch, os, errno
from torch.autograd import Variable
def mask_(matrices, maskval=0.0, mask_diagonal=True):
"""
Masks out all values in the given batch of matrices where i <= j holds,
i < j if mask_diagonal is false
In place operation
:param tns:
:return:
"""
b, h, w = matrices.size()
indices = torch.triu_indices(h, w, offset=0 if mask_diagonal else 1)
matrices[:, indices[0], indices[1]] = maskval
def d(tensor=None):
"""
Returns a device string either for the best available device,
or for the device corresponding to the argument
:param tensor:
:return:
"""
if tensor is None:
return 'cuda' if torch.cuda.is_available() else 'cpu'
return 'cuda' if tensor.is_cuda else 'cpu'
def here(subpath=None):
"""
:return: the path in which the package resides (the directory containing the 'former' dir)
"""
if subpath is None:
return os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
return os.path.abspath(os.path.join(os.path.dirname(__file__), '../..', subpath))
def contains_nan(tensor):
return bool((tensor != tensor).sum() > 0)
def makedirs(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise |
py | b4059e5e9a13916d50fce9fee4c85966df558a5a | import pygame
import numpy
import random
import copy
import sys
class Obstacles():
def __init__(self):
self.obstaclesList = []
def makeObstacles(self, pos, r):
pos = pygame.math.Vector2(pos)
radius = r
self.obstaclesList.append((pos, r))
def drawObstacles(self, DISPLAY):
for i in range(len(self.obstaclesList)):
pos, r = self.obstaclesList[i]
pygame.draw.circle(DISPLAY, (255, 0, 0),
(int(pos[0]), int(pos[1])), int(r), 0)
|
py | b4059f8c4e17fca56620d880bf4ed5cc0e572938 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""tests for ManifestDestiny"""
import doctest
import os
import sys
from optparse import OptionParser
def run_tests(raise_on_error=False, report_first=False):
# add results here
results = {}
# doctest arguments
directory = os.path.dirname(os.path.abspath(__file__))
extraglobs = {}
doctest_args = dict(extraglobs=extraglobs,
module_relative=False,
raise_on_error=raise_on_error)
if report_first:
doctest_args['optionflags'] = doctest.REPORT_ONLY_FIRST_FAILURE
# gather tests
directory = os.path.dirname(os.path.abspath(__file__))
tests = [ test for test in os.listdir(directory)
if test.endswith('.txt') and test.startswith('test_')]
os.chdir(directory)
# run the tests
for test in tests:
try:
results[test] = doctest.testfile(test, **doctest_args)
except doctest.DocTestFailure, failure:
raise
except doctest.UnexpectedException, failure:
raise failure.exc_info[0], failure.exc_info[1], failure.exc_info[2]
return results
def main(args=sys.argv[1:]):
# parse command line options
parser = OptionParser(description=__doc__)
parser.add_option('--raise', dest='raise_on_error',
default=False, action='store_true',
help="raise on first error")
parser.add_option('--report-first', dest='report_first',
default=False, action='store_true',
help="report the first error only (all tests will still run)")
parser.add_option('-q', '--quiet', dest='quiet',
default=False, action='store_true',
help="minimize output")
options, args = parser.parse_args(args)
quiet = options.__dict__.pop('quiet')
# run the tests
results = run_tests(**options.__dict__)
# check for failure
failed = False
for result in results.values():
if result[0]: # failure count; http://docs.python.org/library/doctest.html#basic-api
failed = True
break
if failed:
sys.exit(1) # error
if not quiet:
# print results
print "manifestparser.py: All tests pass!"
for test in sorted(results.keys()):
result = results[test]
print "%s: failed=%s, attempted=%s" % (test, result[0], result[1])
if __name__ == '__main__':
main()
|
py | b405a042e4f2bec41d358ce8d32c186667485b8e | from mimetypes import guess_type
from fastapi import APIRouter, File, UploadFile, Depends
from app.crud.auth.UserDao import UserDao
from app.crud.oss.PityOssDao import PityOssDao
from app.handler.fatcory import PityResponse
from app.middleware.oss import OssClient
from app.models.oss_file import PityOssFile
from app.routers import Permission, get_session
from config import Config
router = APIRouter(prefix="/oss")
@router.post("/upload")
async def create_oss_file(filepath: str, file: UploadFile = File(...), user_info=Depends(Permission(Config.MEMBER))):
try:
file_content = await file.read()
client = OssClient.get_oss_client()
record = await PityOssDao.query_record(file_path=await client.get_real_path(filepath),
deleted_at=0)
if record is not None:
# 异常没有很详细定义,正常来讲应该包装异常类,比如叫FileExistsException
raise Exception("文件已存在")
# oss上传 WARNING: 可能存在数据不同步的问题,oss成功本地失败
file_url, file_size, sha = await client.create_file(filepath, file_content)
# 本地数据也要备份一份
model = PityOssFile(user_info['id'], filepath, file_url, PityOssFile.get_size(file_size), sha)
await PityOssDao.insert_record(model, True)
return PityResponse.success()
except Exception as e:
return PityResponse.failed(f"上传失败: {e}")
@router.post("/avatar")
async def upload_avatar(file: UploadFile = File(...), user_info=Depends(Permission(Config.MEMBER))):
try:
file_content = await file.read()
suffix = file.filename.split(".")[-1]
filepath = f"user_{user_info['id']}.{suffix}"
client = OssClient.get_oss_client()
file_url, _, _ = await client.create_file(filepath, file_content, base_path="avatar")
await UserDao.update_avatar(user_info['id'], file_url)
return PityResponse.success(file_url)
except Exception as e:
return PityResponse.failed(f"上传头像失败: {e}")
@router.get("/list")
async def list_oss_file(filepath: str = '', user_info=Depends(Permission(Config.MEMBER))):
try:
records = await PityOssDao.list_record(condition=[PityOssFile.file_path.like(f'%{filepath}%')])
return PityResponse.records(records)
except Exception as e:
return PityResponse.failed(f"获取失败: {e}")
@router.get("/delete")
async def delete_oss_file(filepath: str, user_info=Depends(Permission(Config.MANAGER)), session=Depends(get_session)):
try:
# 先获取到本地的记录,拿到sha值
record = await PityOssDao.query_record(file_path=filepath, deleted_at=0)
if record is None:
raise Exception("文件不存在或已被删除")
result = await PityOssDao.delete_record_by_id(session, user_info["id"], record.id, log=True)
client = OssClient.get_oss_client()
f_path = f"{filepath}${result.sha}" if result.sha else filepath
await client.delete_file(f_path)
return PityResponse.success()
except Exception as e:
return PityResponse.failed(f"删除失败: {e}")
@router.post("/update")
async def update_oss_file(filepath: str, file: UploadFile = File(...), user_info=Depends(Permission(Config.MEMBER))):
"""
更新oss文件,路径不能变化
:param user_info:
:param filepath:
:param file:
:return:
"""
try:
client = OssClient.get_oss_client()
file_content = await file.read()
await client.update_file(filepath, file_content)
return PityResponse.success()
except Exception as e:
return PityResponse.failed(f"删除失败: {e}")
@router.get("/download")
async def download_oss_file(filepath: str):
"""
更新oss文件,路径不能变化
:param filepath:
:return:
"""
try:
client = OssClient.get_oss_client()
# 切割获取文件名
path, filename = await client.download_file(filepath)
return PityResponse.file(path, filename)
except Exception as e:
return PityResponse.failed(f"下载失败: {e}")
|
py | b405a113b5d78c6a77a6c50be4674e39f6709d19 | import mimetypes
import os.path
import random
import string
from django.core.files.storage import default_storage
from django.template.defaultfilters import slugify
def slugify_filename(filename):
""" Slugify filename """
name, ext = os.path.splitext(filename)
slugified = get_slugified_name(name)
return slugified + ext
def get_slugified_name(filename):
slugified = slugify(filename)
return slugified or get_random_string()
def get_random_string():
return ''.join(random.sample(string.ascii_lowercase*6, 6))
def get_thumb_filename(file_name):
"""
Generate thumb filename by adding _thumb to end of
filename before . (if present)
"""
return u'{0}_thumb{1}'.format(*os.path.splitext(file_name))
def get_image_format(extension):
mimetypes.init()
return mimetypes.types_map[extension]
def get_media_url(path):
"""
Determine system file's media URL.
"""
return default_storage.url(path)
|
py | b405a12ad38971b947eeb0d32bbfc66323feb27e | # Copyright (c) 2008, 2010 Aldo Cortesi
# Copyright (c) 2009 Ben Duffield
# Copyright (c) 2010 aldo
# Copyright (c) 2010-2012 roger
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Kenji_Takahashi
# Copyright (c) 2011-2015 Tycho Andersen
# Copyright (c) 2012-2013 dequis
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2013 xarvh
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Filipe Nepomuceno
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import itertools
from .. import bar, hook
from . import base
from typing import Any, List, Tuple # noqa: F401
class _GroupBase(base._TextBox, base.PaddingMixin, base.MarginMixin):
defaults = [
("borderwidth", 3, "Current group border width"),
("center_aligned", True, "center-aligned group box"),
] # type: List[Tuple[str, Any, str]]
def __init__(self, **config):
base._TextBox.__init__(self, width=bar.CALCULATED, **config)
self.add_defaults(_GroupBase.defaults)
self.add_defaults(base.PaddingMixin.defaults)
self.add_defaults(base.MarginMixin.defaults)
def box_width(self, groups):
width, _ = self.drawer.max_layout_size(
[i.label for i in groups],
self.font,
self.fontsize
)
return width + self.padding_x * 2 + self.borderwidth * 2
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
if self.fontsize is None:
calc = self.bar.height - self.margin_y * 2 - \
self.borderwidth * 2 - self.padding_y * 2
self.fontsize = max(calc, 1)
self.layout = self.drawer.textlayout(
"",
"ffffff",
self.font,
self.fontsize,
self.fontshadow
)
self.setup_hooks()
def setup_hooks(self):
def hook_response(*args, **kwargs):
self.bar.draw()
hook.subscribe.client_managed(hook_response)
hook.subscribe.client_urgent_hint_changed(hook_response)
hook.subscribe.client_killed(hook_response)
hook.subscribe.setgroup(hook_response)
hook.subscribe.group_window_add(hook_response)
hook.subscribe.current_screen_change(hook_response)
hook.subscribe.changegroup(hook_response)
def drawbox(self, offset, text, bordercolor, textcolor, highlight_color=None,
width=None, rounded=False, block=False, line=False, highlighted=False):
self.layout.text = text
self.layout.font_family = self.font
self.layout.font_size = self.fontsize
self.layout.colour = textcolor
if width is not None:
self.layout.width = width
if line:
pad_y = [
(self.bar.height - self.layout.height - self.borderwidth) / 2,
(self.bar.height - self.layout.height + self.borderwidth) / 2
]
else:
pad_y = self.padding_y
framed = self.layout.framed(
self.borderwidth,
bordercolor,
0,
pad_y,
highlight_color
)
y = self.margin_y
if self.center_aligned:
for t in base.MarginMixin.defaults:
if t[0] == 'margin':
y += (self.bar.height - framed.height) / 2 - t[1]
break
if block:
framed.draw_fill(offset, y, rounded)
elif line:
framed.draw_line(offset, y, highlighted)
else:
framed.draw(offset, y, rounded)
class AGroupBox(_GroupBase):
"""A widget that graphically displays the current group"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [("border", "000000", "group box border color")]
def __init__(self, **config):
_GroupBase.__init__(self, **config)
self.add_defaults(AGroupBox.defaults)
def button_press(self, x, y, button):
self.bar.screen.cmd_next_group()
def calculate_length(self):
return self.box_width(self.qtile.groups) + self.margin_x * 2
def draw(self):
self.drawer.clear(self.background or self.bar.background)
e = next(
i for i in self.qtile.groups
if i.name == self.bar.screen.group.name
)
self.drawbox(self.margin_x, e.name, self.border, self.foreground)
self.drawer.draw(offsetx=self.offset, width=self.width)
class GroupBox(_GroupBase):
"""
A widget that graphically displays the current group.
All groups are displayed by their label.
If the label of a group is the empty string that group will not be displayed.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("active", "FFFFFF", "Active group font colour"),
("inactive", "404040", "Inactive group font colour"),
(
"highlight_method",
"border",
"Method of highlighting ('border', 'block', 'text', or 'line')"
"Uses `*_border` color settings"
),
("rounded", True, "To round or not to round box borders"),
(
"this_current_screen_border",
"215578",
"Border or line colour for group on this screen when focused."
),
(
"this_screen_border",
"215578",
"Border or line colour for group on this screen when unfocused."
),
(
"other_current_screen_border",
"404040",
"Border or line colour for group on other screen when focused."
),
(
"other_screen_border",
"404040",
"Border or line colour for group on other screen when unfocused."
),
(
"highlight_color",
["000000", "282828"],
"Active group highlight color when using 'line' highlight method."
),
(
"urgent_alert_method",
"border",
"Method for alerting you of WM urgent "
"hints (one of 'border', 'text', 'block', or 'line')"
),
("urgent_text", "FF0000", "Urgent group font color"),
("urgent_border", "FF0000", "Urgent border or line color"),
(
"disable_drag",
False,
"Disable dragging and dropping of group names on widget"
),
("invert_mouse_wheel", False, "Whether to invert mouse wheel group movement"),
("use_mouse_wheel", True, "Whether to use mouse wheel events"),
(
"visible_groups",
None,
"Groups that will be visible. "
"If set to None or [], all groups will be visible."
"Visible groups are identified by name not by their displayed label."
),
(
"hide_unused",
False,
"Hide groups that have no windows and that are not displayed on any screen."
),
(
"spacing",
None,
"Spacing between groups"
"(if set to None, will be equal to margin_x)")
]
def __init__(self, **config):
_GroupBase.__init__(self, **config)
self.add_defaults(GroupBox.defaults)
if self.spacing is None:
self.spacing = self.margin_x
self.clicked = None
@property
def groups(self):
"""
returns list of visible groups.
The existing groups are filtered by the visible_groups attribute and
their label. Groups with an empty string as label are never contained.
Groups that are not named in visible_groups are not returned.
"""
if self.hide_unused:
if self.visible_groups:
return [g for g in self.qtile.groups
if g.label and (g.windows or g.screen) and
g.name in self.visible_groups]
else:
return [g for g in self.qtile.groups if g.label and
(g.windows or g.screen)]
else:
if self.visible_groups:
return [g for g in self.qtile.groups
if g.label and g.name in self.visible_groups]
else:
return [g for g in self.qtile.groups if g.label]
def get_clicked_group(self, x, y):
group = None
new_width = self.margin_x - self.spacing / 2.0
width = 0
for g in self.groups:
new_width += self.box_width([g]) + self.spacing
if width <= x <= new_width:
group = g
break
width = new_width
return group
def button_press(self, x, y, button):
self.clicked = None
group = None
current_group = self.qtile.current_group
if button == (5 if not self.invert_mouse_wheel else 4):
if self.use_mouse_wheel:
i = itertools.cycle(self.qtile.groups)
while next(i) != current_group:
pass
while group is None or group not in self.groups:
group = next(i)
elif button == (4 if not self.invert_mouse_wheel else 5):
if self.use_mouse_wheel:
i = itertools.cycle(reversed(self.qtile.groups))
while next(i) != current_group:
pass
while group is None or group not in self.groups:
group = next(i)
else:
group = self.get_clicked_group(x, y)
if not self.disable_drag:
self.clicked = group
if group:
if self.bar.screen.group != group or not self.disable_drag:
self.bar.screen.set_group(group)
else:
self.bar.screen.toggle_group(group)
def button_release(self, x, y, button):
if button not in (5, 4):
group = self.get_clicked_group(x, y)
if group and self.clicked:
group.cmd_switch_groups(self.clicked.name)
self.clicked = None
def calculate_length(self):
width = self.margin_x * 2 + (len(self.groups) - 1) * self.spacing
for g in self.groups:
width += self.box_width([g])
return width
def group_has_urgent(self, group):
return len([w for w in group.windows if w.urgent]) > 0
def draw(self):
self.drawer.clear(self.background or self.bar.background)
offset = self.margin_x
for i, g in enumerate(self.groups):
to_highlight = False
is_block = (self.highlight_method == 'block')
is_line = (self.highlight_method == 'line')
bw = self.box_width([g])
if self.group_has_urgent(g) and self.urgent_alert_method == "text":
text_color = self.urgent_text
elif g.windows:
text_color = self.active
else:
text_color = self.inactive
if g.screen:
if self.highlight_method == 'text':
border = self.bar.background
text_color = self.this_current_screen_border
else:
if self.bar.screen.group.name == g.name:
if self.qtile.current_screen == self.bar.screen:
border = self.this_current_screen_border
to_highlight = True
else:
border = self.this_screen_border
else:
if self.qtile.current_screen == g.screen:
border = self.other_current_screen_border
else:
border = self.other_screen_border
elif self.group_has_urgent(g) and \
self.urgent_alert_method in ('border', 'block', 'line'):
border = self.urgent_border
if self.urgent_alert_method == 'block':
is_block = True
elif self.urgent_alert_method == 'line':
is_line = True
else:
border = self.background or self.bar.background
self.drawbox(
offset,
g.label,
border,
text_color,
highlight_color=self.highlight_color,
width=bw,
rounded=self.rounded,
block=is_block,
line=is_line,
highlighted=to_highlight
)
offset += bw + self.spacing
self.drawer.draw(offsetx=self.offset, width=self.width)
|
py | b405a1b13e464b65e5cfac3d972ccd0a753dd00f | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1PortworxVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'fs_type': 'str',
'read_only': 'bool',
'volume_id': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'read_only': 'readOnly',
'volume_id': 'volumeID'
}
def __init__(self, fs_type=None, read_only=None, volume_id=None):
"""
V1PortworxVolumeSource - a model defined in Swagger
"""
self._fs_type = None
self._read_only = None
self._volume_id = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if read_only is not None:
self.read_only = read_only
self.volume_id = volume_id
@property
def fs_type(self):
"""
Gets the fs_type of this V1PortworxVolumeSource.
FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.
:return: The fs_type of this V1PortworxVolumeSource.
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""
Sets the fs_type of this V1PortworxVolumeSource.
FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.
:param fs_type: The fs_type of this V1PortworxVolumeSource.
:type: str
"""
self._fs_type = fs_type
@property
def read_only(self):
"""
Gets the read_only of this V1PortworxVolumeSource.
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
:return: The read_only of this V1PortworxVolumeSource.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this V1PortworxVolumeSource.
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
:param read_only: The read_only of this V1PortworxVolumeSource.
:type: bool
"""
self._read_only = read_only
@property
def volume_id(self):
"""
Gets the volume_id of this V1PortworxVolumeSource.
VolumeID uniquely identifies a Portworx volume
:return: The volume_id of this V1PortworxVolumeSource.
:rtype: str
"""
return self._volume_id
@volume_id.setter
def volume_id(self, volume_id):
"""
Sets the volume_id of this V1PortworxVolumeSource.
VolumeID uniquely identifies a Portworx volume
:param volume_id: The volume_id of this V1PortworxVolumeSource.
:type: str
"""
if volume_id is None:
raise ValueError("Invalid value for `volume_id`, must not be `None`")
self._volume_id = volume_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1PortworxVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | b405a24d75cdb0554a638097921b6385bcc273b8 | import tweepy
import csv
#Twitter API credentials
consumer_key = "UKbRRwBrmbNJe99x7AUilL7lm"
consumer_secret = "YvdWDgRSMpxknxEKBz7LrSZjcR1CkKPgdDaIft4NjpngIobfmD"
access_key = "855282597085069312-VrXGJQONyrk9V43EtnqV9LjbZaVbOde"
access_secret = "LyS1w3VINgqbhA8JqW3rOfCAWefOByc0YbjcFw2y8zucy"
def get_all_tweets(screen_name):
#Twitter only allows access to a users most recent 3240 tweets with this method
#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
#initialize a list to hold all the tweepy Tweets
alltweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.user_timeline(screen_name = screen_name,count=200)
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
#transform the tweepy tweets into a 2D array that will populate the csv
outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode("utf-8")] for tweet in alltweets]
#write the csv
with open('%s_tweets.csv' % screen_name, 'w') as f:
writer = csv.writer(f)
writer.writerow(["id","created_at","text"])
writer.writerows(outtweets)
pass
if __name__ == '__main__':
#pass in the username of the account you want to download
get_all_tweets("DummyBbmp") |
py | b405a2f32d1d3c07c09abfb8277824ab88dfb310 | # -*- coding: utf-8 -*-
from challenge.grid import Position, Grid
import unittest
class PositionTestSuite(unittest.TestCase):
"""Advanced test cases."""
def test_instantion(self):
""" Test if we can instantiate a grid object """
position = Position('dummy-grid', '33E')
self.assertEqual(type(position), Position)
def test_init(self):
""" Test initialization of parameters in init method """
pos_str = '31E'
position = Position('dummy-grid', pos_str)
self.assertEqual(position.x_coord, 3)
self.assertEqual(position.y_coord, 1)
self.assertEqual(position.direction, 'E')
self.assertEqual(position.grid, 'dummy-grid')
def test_turn_left(self):
""" Test a turn to the left """
pos_str = '31E'
grid = Grid(5, 5)
position = Position(grid, pos_str)
coords = position.turn('L')
self.assertEqual(position.direction, 'N')
self.assertEqual(coords, '31N')
def test_turn_right(self):
""" Test a turn to the right """
pos_str = '31E'
grid = Grid(5, 5)
position = Position(grid, pos_str)
coords = position.turn('R')
self.assertEqual(position.direction, 'S')
self.assertEqual(coords, '31S')
def test_turn_edge_left(self):
pos_str = '31N'
grid = Grid(5, 5)
position = Position(grid, pos_str)
coords = position.turn('L')
self.assertEqual(position.direction, 'W')
self.assertEqual(coords, '31W')
def test_turn_edge_right(self):
pos_str = '31W'
grid = Grid(5, 5)
position = Position(grid, pos_str)
coords = position.turn('R')
self.assertEqual(position.direction, 'N')
self.assertEqual(coords, '31N')
def test_move_north(self):
pos_str = '12N'
grid = Grid(5, 5)
position = Position(grid, pos_str)
coords = position.move()
self.assertEqual(position.direction, 'N')
self.assertEqual(coords, '13N')
def test_move_east(self):
pos_str = '12E'
grid = Grid(5, 5)
position = Position(grid, pos_str)
coords = position.move()
self.assertEqual(position.direction, 'E')
def test_move_south(self):
pos_str = '12S'
grid = Grid(5, 5)
position = Position(grid, pos_str)
coords = position.move()
self.assertEqual(position.direction, 'S')
self.assertEqual(coords, '11S')
def test_move_west(self):
pos_str = '12W'
grid = Grid(5, 5)
position = Position(grid, pos_str)
coords = position.move()
self.assertEqual(position.direction, 'W')
self.assertEqual(coords, '02W')
if __name__ == '__main__':
unittest.main() |
py | b405a37cf0738e9e97b421e5ef2361c1876c8825 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/9/8 下午8:58
# @Author : Vassago
# @File : __init__.py.py
# @Software: PyCharm |
py | b405a3a0abb834584774c48821b6723f0b8653c3 | #!/usr/bin/python3
"""
# --------------------------------------------------------------------------------------------------
# Rittman Mead Markdown to Confluence Tool
# --------------------------------------------------------------------------------------------------
# Create or Update Atlas pages remotely using markdown files.
#
# --------------------------------------------------------------------------------------------------
# Usage: rest_md2conf.py markdown spacekey
# --------------------------------------------------------------------------------------------------
"""
import logging
import sys
import os
import re
import json
import collections
import mimetypes
import codecs
import argparse
import urllib
import webbrowser
import requests
import markdown
logging.basicConfig(level=logging.INFO, format='%(asctime)s - \
%(levelname)s - %(funcName)s [%(lineno)d] - \
%(message)s')
LOGGER = logging.getLogger(__name__)
# ArgumentParser to parse arguments and options
PARSER = argparse.ArgumentParser()
PARSER.add_argument("markdownFile", help="Full path of the markdown file to convert and upload.")
PARSER.add_argument('spacekey',
help="Confluence Space key for the page. If omitted, will use user space.")
PARSER.add_argument('-u', '--username', help='Confluence username if $CONFLUENCE_USERNAME not set.')
PARSER.add_argument('-p', '--apikey', help='Confluence API key if $CONFLUENCE_API_KEY not set.')
PARSER.add_argument('-o', '--orgname',
help='Confluence organisation if $CONFLUENCE_ORGNAME not set. '
'e.g. https://XXX.atlassian.net/wiki'
'If orgname contains a dot, considered as the fully qualified domain name.'
'e.g. https://XXX')
PARSER.add_argument('-a', '--ancestor',
help='Parent page under which page will be created or moved.')
PARSER.add_argument('-t', '--attachment', nargs='+',
help='Attachment(s) to upload to page. Paths relative to the markdown file.')
PARSER.add_argument('-c', '--contents', action='store_true', default=False,
help='Use this option to generate a contents page.')
PARSER.add_argument('-g', '--nogo', action='store_true', default=False,
help='Use this option to skip navigation after upload.')
PARSER.add_argument('-n', '--nossl', action='store_true', default=False,
help='Use this option if NOT using SSL. Will use HTTP instead of HTTPS.')
PARSER.add_argument('-d', '--delete', action='store_true', default=False,
help='Use this option to delete the page instead of create it.')
PARSER.add_argument('-l', '--loglevel', default='INFO',
help='Use this option to set the log verbosity.')
PARSER.add_argument('-s', '--simulate', action='store_true', default=False,
help='Use this option to only show conversion result.')
ARGS = PARSER.parse_args()
# Assign global variables
try:
# Set log level
LOGGER.setLevel(getattr(logging, ARGS.loglevel.upper(), None))
MARKDOWN_FILE = ARGS.markdownFile
SPACE_KEY = ARGS.spacekey
USERNAME = os.getenv('CONFLUENCE_USERNAME', ARGS.username)
API_KEY = os.getenv('CONFLUENCE_API_KEY', ARGS.apikey)
ORGNAME = os.getenv('CONFLUENCE_ORGNAME', ARGS.orgname)
ANCESTOR = ARGS.ancestor
NOSSL = ARGS.nossl
DELETE = ARGS.delete
SIMULATE = ARGS.simulate
ATTACHMENTS = ARGS.attachment
GO_TO_PAGE = not ARGS.nogo
CONTENTS = ARGS.contents
if USERNAME is None:
LOGGER.error('Error: Username not specified by environment variable or option.')
sys.exit(1)
if API_KEY is None:
LOGGER.error('Error: API key not specified by environment variable or option.')
sys.exit(1)
if not os.path.exists(MARKDOWN_FILE):
LOGGER.error('Error: Markdown file: %s does not exist.', MARKDOWN_FILE)
sys.exit(1)
if SPACE_KEY is None:
SPACE_KEY = '~%s' % (USERNAME)
if ORGNAME is not None:
if ORGNAME.find('.') != -1:
CONFLUENCE_API_URL = 'https://%s' % ORGNAME
else:
CONFLUENCE_API_URL = 'https://%s.atlassian.net/wiki' % ORGNAME
else:
LOGGER.error('Error: Org Name not specified by environment variable or option.')
sys.exit(1)
if NOSSL:
CONFLUENCE_API_URL.replace('https://', 'http://')
except Exception as err:
LOGGER.error('\n\nException caught:\n%s ', err)
LOGGER.error('\nFailed to process command line arguments. Exiting.')
sys.exit(1)
def convert_comment_block(html):
"""
Convert markdown code bloc to Confluence hidden comment
:param html: string
:return: modified html string
"""
open_tag = '<ac:placeholder>'
close_tag = '</ac:placeholder>'
html = html.replace('<!--', open_tag).replace('-->', close_tag)
return html
def convert_code_block(html):
"""
Convert html code blocks to Confluence macros
:param html: string
:return: modified html string
"""
code_blocks = re.findall(r'<pre><code.*?>.*?</code></pre>', html, re.DOTALL)
if code_blocks:
for tag in code_blocks:
conf_ml = '<ac:structured-macro ac:name="code">'
conf_ml = conf_ml + '<ac:parameter ac:name="theme">Midnight</ac:parameter>'
conf_ml = conf_ml + '<ac:parameter ac:name="linenumbers">true</ac:parameter>'
lang = re.search('code class="(.*)"', tag)
if lang:
lang = lang.group(1)
else:
lang = 'none'
conf_ml = conf_ml + '<ac:parameter ac:name="language">' + lang + '</ac:parameter>'
content = re.search(r'<pre><code.*?>(.*?)</code></pre>', tag, re.DOTALL).group(1)
content = '<ac:plain-text-body><![CDATA[' + content + ']]></ac:plain-text-body>'
conf_ml = conf_ml + content + '</ac:structured-macro>'
conf_ml = conf_ml.replace('<', '<').replace('>', '>')
conf_ml = conf_ml.replace('"', '"').replace('&', '&')
html = html.replace(tag, conf_ml)
return html
def convert_info_macros(html):
"""
Converts html for info, note or warning macros
:param html: html string
:return: modified html string
"""
info_tag = '<p><ac:structured-macro ac:name="info"><ac:rich-text-body><p>'
note_tag = info_tag.replace('info', 'note')
warning_tag = info_tag.replace('info', 'warning')
close_tag = '</p></ac:rich-text-body></ac:structured-macro></p>'
# Custom tags converted into macros
html = html.replace('<p>~?', info_tag).replace('?~</p>', close_tag)
html = html.replace('<p>~!', note_tag).replace('!~</p>', close_tag)
html = html.replace('<p>~%', warning_tag).replace('%~</p>', close_tag)
# Convert block quotes into macros
quotes = re.findall('<blockquote>(.*?)</blockquote>', html, re.DOTALL)
if quotes:
for quote in quotes:
note = re.search('^<.*>Note', quote.strip(), re.IGNORECASE)
warning = re.search('^<.*>Warning', quote.strip(), re.IGNORECASE)
if note:
clean_tag = strip_type(quote, 'Note')
macro_tag = clean_tag.replace('<p>', note_tag).replace('</p>', close_tag).strip()
elif warning:
clean_tag = strip_type(quote, 'Warning')
macro_tag = clean_tag.replace('<p>', warning_tag).replace('</p>', close_tag).strip()
else:
macro_tag = quote.replace('<p>', info_tag).replace('</p>', close_tag).strip()
html = html.replace('<blockquote>%s</blockquote>' % quote, macro_tag)
# Convert doctoc to toc confluence macro
html = convert_doctoc(html)
return html
def convert_doctoc(html):
"""
Convert doctoc to confluence macro
:param html: html string
:return: modified html string
"""
toc_tag = '''<p>
<ac:structured-macro ac:name="toc">
<ac:parameter ac:name="printable">true</ac:parameter>
<ac:parameter ac:name="style">disc</ac:parameter>
<ac:parameter ac:name="maxLevel">7</ac:parameter>
<ac:parameter ac:name="minLevel">1</ac:parameter>
<ac:parameter ac:name="type">list</ac:parameter>
<ac:parameter ac:name="outline">clear</ac:parameter>
<ac:parameter ac:name="include">.*</ac:parameter>
</ac:structured-macro>
</p>'''
html = re.sub('\<\!\-\- START doctoc.*END doctoc \-\-\>', toc_tag, html, flags=re.DOTALL)
return html
def strip_type(tag, tagtype):
"""
Strips Note or Warning tags from html in various formats
:param tag: tag name
:param tagtype: tag type
:return: modified tag
"""
tag = re.sub('%s:\s' % tagtype, '', tag.strip(), re.IGNORECASE)
tag = re.sub('%s\s:\s' % tagtype, '', tag.strip(), re.IGNORECASE)
tag = re.sub('<.*?>%s:\s<.*?>' % tagtype, '', tag, re.IGNORECASE)
tag = re.sub('<.*?>%s\s:\s<.*?>' % tagtype, '', tag, re.IGNORECASE)
tag = re.sub('<(em|strong)>%s:<.*?>\s' % tagtype, '', tag, re.IGNORECASE)
tag = re.sub('<(em|strong)>%s\s:<.*?>\s' % tagtype, '', tag, re.IGNORECASE)
tag = re.sub('<(em|strong)>%s<.*?>:\s' % tagtype, '', tag, re.IGNORECASE)
tag = re.sub('<(em|strong)>%s\s<.*?>:\s' % tagtype, '', tag, re.IGNORECASE)
string_start = re.search('<.*?>', tag)
tag = upper_chars(tag, [string_start.end()])
return tag
def upper_chars(string, indices):
"""
Make characters uppercase in string
:param string: string to modify
:param indices: character indice to change to uppercase
:return: uppercased string
"""
upper_string = "".join(c.upper() if i in indices else c for i, c in enumerate(string))
return upper_string
def process_refs(html):
"""
Process references
:param html: html string
:return: modified html string
"""
refs = re.findall('\n(\[\^(\d)\].*)|<p>(\[\^(\d)\].*)', html)
if refs:
for ref in refs:
if ref[0]:
full_ref = ref[0].replace('</p>', '').replace('<p>', '')
ref_id = ref[1]
else:
full_ref = ref[2]
ref_id = ref[3]
full_ref = full_ref.replace('</p>', '').replace('<p>', '')
html = html.replace(full_ref, '')
href = re.search('href="(.*?)"', full_ref).group(1)
superscript = '<a id="test" href="%s"><sup>%s</sup></a>' % (href, ref_id)
html = html.replace('[^%s]' % ref_id, superscript)
return html
def get_page(title):
"""
Retrieve page details by title
:param title: page tile
:return: Confluence page info
"""
LOGGER.info('\tRetrieving page information: %s', title)
url = '%s/rest/api/content?title=%s&spaceKey=%s&expand=version,ancestors' % (
CONFLUENCE_API_URL, urllib.parse.quote_plus(title), SPACE_KEY)
session = requests.Session()
session.auth = (USERNAME, API_KEY)
response = session.get(url)
# Check for errors
try:
response.raise_for_status()
except requests.RequestException as err:
LOGGER.error('err.response: %s', err)
if response.status_code == 404:
LOGGER.error('Error: Page not found. Check the following are correct:')
LOGGER.error('\tSpace Key : %s', SPACE_KEY)
LOGGER.error('\tOrganisation Name: %s', ORGNAME)
else:
LOGGER.error('Error: %d - %s', response.status_code, response.content)
sys.exit(1)
data = response.json()
LOGGER.debug("data: %s", str(data))
if len(data[u'results']) >= 1:
page_id = data[u'results'][0][u'id']
version_num = data[u'results'][0][u'version'][u'number']
link = '%s%s' % (CONFLUENCE_API_URL, data[u'results'][0][u'_links'][u'webui'])
page_info = collections.namedtuple('PageInfo', ['id', 'version', 'link'])
page = page_info(page_id, version_num, link)
return page
return False
# Scan for images and upload as attachments if found
def add_images(page_id, html):
"""
Scan for images and upload as attachments if found
:param page_id: Confluence page id
:param html: html string
:return: html with modified image reference
"""
source_folder = os.path.dirname(os.path.abspath(MARKDOWN_FILE))
for tag in re.findall('<img(.*?)\/>', html):
rel_path = re.search('src="(.*?)"', tag).group(1)
alt_text = re.search('alt="(.*?)"', tag).group(1)
abs_path = os.path.join(source_folder, rel_path)
basename = os.path.basename(rel_path)
upload_attachment(page_id, abs_path, alt_text)
if re.search('http.*', rel_path) is None:
if CONFLUENCE_API_URL.endswith('/wiki'):
html = html.replace('%s' % (rel_path),
'/wiki/download/attachments/%s/%s' % (page_id, basename))
else:
html = html.replace('%s' % (rel_path),
'/download/attachments/%s/%s' % (page_id, basename))
return html
def add_contents(html):
"""
Add contents page
:param html: html string
:return: modified html string
"""
contents_markup = '<ac:structured-macro ac:name="toc">\n<ac:parameter ac:name="printable">' \
'true</ac:parameter>\n<ac:parameter ac:name="style">disc</ac:parameter>'
contents_markup = contents_markup + '<ac:parameter ac:name="maxLevel">5</ac:parameter>\n' \
'<ac:parameter ac:name="minLevel">1</ac:parameter>'
contents_markup = contents_markup + '<ac:parameter ac:name="class">rm-contents</ac:parameter>\n' \
'<ac:parameter ac:name="exclude"></ac:parameter>\n' \
'<ac:parameter ac:name="type">list</ac:parameter>'
contents_markup = contents_markup + '<ac:parameter ac:name="outline">false</ac:parameter>\n' \
'<ac:parameter ac:name="include"></ac:parameter>\n' \
'</ac:structured-macro>'
html = contents_markup + '\n' + html
return html
def add_attachments(page_id, files):
"""
Add attachments for an array of files
:param page_id: Confluence page id
:param files: list of files to attach to the given Confluence page
:return: None
"""
source_folder = os.path.dirname(os.path.abspath(MARKDOWN_FILE))
if files:
for file in files:
upload_attachment(page_id, os.path.join(source_folder, file), '')
def create_page(title, body, ancestors):
"""
Create a new page
:param title: confluence page title
:param body: confluence page content
:param ancestors: confluence page ancestor
:return:
"""
LOGGER.info('Creating page...')
url = '%s/rest/api/content/' % CONFLUENCE_API_URL
session = requests.Session()
session.auth = (USERNAME, API_KEY)
session.headers.update({'Content-Type': 'application/json'})
new_page = {'type': 'page', \
'title': title, \
'space': {'key': SPACE_KEY}, \
'body': { \
'storage': { \
'value': body, \
'representation': 'storage' \
} \
}, \
'ancestors': ancestors \
}
LOGGER.debug("data: %s", json.dumps(new_page))
response = session.post(url, data=json.dumps(new_page))
try:
response.raise_for_status()
except requests.exceptions.HTTPError as excpt:
LOGGER.error("error: %s - %s", excpt, response.content)
exit(1)
if response.status_code == 200:
data = response.json()
space_name = data[u'space'][u'name']
page_id = data[u'id']
version = data[u'version'][u'number']
link = '%s%s' % (CONFLUENCE_API_URL, data[u'_links'][u'webui'])
LOGGER.info('Page created in %s with ID: %s.', space_name, page_id)
LOGGER.info('URL: %s', link)
img_check = re.search('<img(.*?)\/>', body)
if img_check or ATTACHMENTS:
LOGGER.info('\tAttachments found, update procedure called.')
update_page(page_id, title, body, version, ancestors, ATTACHMENTS)
else:
if GO_TO_PAGE:
webbrowser.open(link)
else:
LOGGER.error('Could not create page.')
sys.exit(1)
def delete_page(page_id):
"""
Delete a page
:param page_id: confluence page id
:return: None
"""
LOGGER.info('Deleting page...')
url = '%s/rest/api/content/%s' % (CONFLUENCE_API_URL, page_id)
session = requests.Session()
session.auth = (USERNAME, API_KEY)
session.headers.update({'Content-Type': 'application/json'})
response = session.delete(url)
response.raise_for_status()
if response.status_code == 204:
LOGGER.info('Page %s deleted successfully.', page_id)
else:
LOGGER.error('Page %s could not be deleted.', page_id)
def update_page(page_id, title, body, version, ancestors, attachments):
"""
Update a page
:param page_id: confluence page id
:param title: confluence page title
:param body: confluence page content
:param version: confluence page version
:param ancestors: confluence page ancestor
:param attachments: confluence page attachments
:return: None
"""
LOGGER.info('Updating page...')
# Add images and attachments
body = add_images(page_id, body)
add_attachments(page_id, attachments)
url = '%s/rest/api/content/%s' % (CONFLUENCE_API_URL, page_id)
session = requests.Session()
session.auth = (USERNAME, API_KEY)
session.headers.update({'Content-Type': 'application/json'})
page_json = { \
"id": page_id, \
"type": "page", \
"title": title, \
"space": {"key": SPACE_KEY}, \
"body": { \
"storage": { \
"value": body, \
"representation": "storage" \
} \
}, \
"version": { \
"number": version + 1, \
"minorEdit" : True \
}, \
'ancestors': ancestors \
}
response = session.put(url, data=json.dumps(page_json))
response.raise_for_status()
if response.status_code == 200:
data = response.json()
link = '%s%s' % (CONFLUENCE_API_URL, data[u'_links'][u'webui'])
LOGGER.info("Page updated successfully.")
LOGGER.info('URL: %s', link)
if GO_TO_PAGE:
webbrowser.open(link)
else:
LOGGER.error("Page could not be updated.")
def get_attachment(page_id, filename):
"""
Get page attachment
:param page_id: confluence page id
:param filename: attachment filename
:return: attachment info in case of success, False otherwise
"""
url = '%s/rest/api/content/%s/child/attachment?filename=%s' % (CONFLUENCE_API_URL, page_id, filename)
session = requests.Session()
session.auth = (USERNAME, API_KEY)
response = session.get(url)
response.raise_for_status()
data = response.json()
if len(data[u'results']) >= 1:
att_id = data[u'results'][0]['id']
att_info = collections.namedtuple('AttachmentInfo', ['id'])
attr_info = att_info(att_id)
return attr_info
return False
def upload_attachment(page_id, file, comment):
"""
Upload an attachement
:param page_id: confluence page id
:param file: attachment file
:param comment: attachment comment
:return: boolean
"""
if re.search('http.*', file):
return False
content_type = mimetypes.guess_type(file)[0]
filename = os.path.basename(file)
if not os.path.isfile(file):
LOGGER.error('File %s cannot be found --> skip ', file)
return False
file_to_upload = {
'comment': comment,
'file': (filename, open(file, 'rb'), content_type, {'Expires': '0'})
}
attachment = get_attachment(page_id, filename)
if attachment:
url = '%s/rest/api/content/%s/child/attachment/%s/data' % (CONFLUENCE_API_URL, page_id, attachment.id)
else:
url = '%s/rest/api/content/%s/child/attachment/' % (CONFLUENCE_API_URL, page_id)
session = requests.Session()
session.auth = (USERNAME, API_KEY)
session.headers.update({'X-Atlassian-Token': 'no-check'})
LOGGER.info('\tUploading attachment %s...', filename)
response = session.post(url, files=file_to_upload)
response.raise_for_status()
return True
def main():
"""
Main program
:return:
"""
LOGGER.info('\t\t----------------------------------')
LOGGER.info('\t\tMarkdown to Confluence Upload Tool')
LOGGER.info('\t\t----------------------------------\n\n')
LOGGER.info('Markdown file:\t%s', MARKDOWN_FILE)
LOGGER.info('Space Key:\t%s', SPACE_KEY)
with open(MARKDOWN_FILE, 'r') as mdfile:
title = mdfile.readline().lstrip('#').strip()
mdfile.seek(0)
LOGGER.info('Title:\t\t%s', title)
with codecs.open(MARKDOWN_FILE, 'r', 'utf-8') as mdfile:
html = markdown.markdown(mdfile.read(), extensions=['markdown.extensions.tables',
'markdown.extensions.fenced_code'])
html = '\n'.join(html.split('\n')[1:])
html = convert_info_macros(html)
html = convert_comment_block(html)
html = convert_code_block(html)
if CONTENTS:
html = add_contents(html)
html = process_refs(html)
LOGGER.debug('html: %s', html)
if SIMULATE:
LOGGER.info("Simulate mode is active - stop processing here.")
sys.exit(0)
LOGGER.info('Checking if Atlas page exists...')
page = get_page(title)
if DELETE and page:
delete_page(page.id)
sys.exit(1)
if ANCESTOR:
parent_page = get_page(ANCESTOR)
if parent_page:
ancestors = [{'type': 'page', 'id': parent_page.id}]
else:
LOGGER.error('Error: Parent page does not exist: %s', ANCESTOR)
sys.exit(1)
else:
ancestors = []
if page:
update_page(page.id, title, html, page.version, ancestors, ATTACHMENTS)
else:
create_page(title, html, ancestors)
LOGGER.info('Markdown Converter completed successfully.')
if __name__ == "__main__":
main()
|
py | b405a4ca9e2a50651bf08bb6cfe391943b2f4708 | import gc
import inspect
exclude = [
"function",
"type",
"list",
"dict",
"tuple",
"wrapper_descriptor",
"module",
"method_descriptor",
"member_descriptor",
"instancemethod",
"builtin_function_or_method",
"frame",
"classmethod",
"classmethod_descriptor",
"_Environ",
"MemoryError",
"_Printer",
"_Helper",
"getset_descriptor",
]
def dumpObjects():
gc.collect()
oo = gc.get_objects()
for o in oo:
if getattr(o, "__class__", None):
name = o.__class__.__name__
if name not in exclude:
filename = inspect.getabsfile(o.__class__)
print "Object of class:", name, "...",
print "defined in file:", filename
if __name__=="__main__":
class TestClass:
pass
testObject1 = TestClass()
testObject2 = TestClass()
dumpObjects()
|
py | b405a5a6a96fdb17eac05ccf4678efc54a8d0a3d | # Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError: # pragma: NO COVER
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
|
py | b405a5c75ce151350bfa126760ee26e523e8faf3 | # Generated by Django 3.1.5 on 2021-02-23 15:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archiv', '0004_spatialcoverage'),
]
operations = [
migrations.AlterModelOptions(
name='spatialcoverage',
options={'ordering': ['id'], 'verbose_name': 'Spatial Coverage'},
),
migrations.RemoveField(
model_name='spatialcoverage',
name='exactish_geom',
),
migrations.AddField(
model_name='spatialcoverage',
name='fuzzyness',
field=models.IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9)], default=1, help_text='1 sehr sicher, 10 sehr unsicher', verbose_name='Sicherheitsindikator'),
),
]
|
py | b405a6308423028b520f5e868ea98a2e287111a7 | #!/usr/bin/env python
"""
Custom test runner
If args or options, we run the testsuite as quickly as possible.
If args but no options, we default to using the spec plugin and aborting on
first error/failure.
If options, we ignore defaults and pass options onto Nose.
Examples:
Run all tests (as fast as possible)
$ ./runtests.py
Run all unit tests (using spec output)
$ ./runtests.py tests/unit
Run all checkout unit tests (using spec output)
$ ./runtests.py tests/unit/checkout
Re-run failing tests (requires pytest-cache)
$ ./runtests.py ... --lf
Drop into pdb when a test fails
$ ./runtests.py ... --pdb
"""
import os
import multiprocessing
import sys
import logging
import warnings
import pytest
from django.utils.six.moves import map
# No logging
logging.disable(logging.CRITICAL)
if __name__ == '__main__':
args = sys.argv[1:]
verbosity = 1
if not args:
# If run with no args, try and run the testsuite as fast as possible.
# That means across all cores and with no high-falutin' plugins.
try:
cpu_count = int(multiprocessing.cpu_count())
except ValueError:
cpu_count = 1
args = [
'--capture=no', '--nomigrations', '-n=%d' % cpu_count,
'tests'
]
else:
# Some args/options specified. Check to see if any nose options have
# been specified. If they have, then don't set any
has_options = any(map(lambda x: x.startswith('--'), args))
if not has_options:
# Default options:
# --exitfirst Abort on first error/failure
# --capture=no Don't capture STDOUT
args.extend(['--capture=no', '--nomigrations', '--exitfirst'])
else:
args = [arg for arg in args if not arg.startswith('-')]
with warnings.catch_warnings():
# The warnings module in default configuration will never cause tests
# to fail, as it never raises an exception. We alter that behaviour by
# turning DeprecationWarnings into exceptions, but exclude warnings
# triggered by third-party libs. Note: The context manager is not thread
# safe. Behaviour with multiple threads is undefined.
warnings.filterwarnings('error', category=DeprecationWarning)
warnings.filterwarnings('error', category=RuntimeWarning)
libs = r'(sorl\.thumbnail.*|bs4.*|webtest.*)'
warnings.filterwarnings(
'ignore', r'.*', DeprecationWarning, libs)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
result_code = pytest.main(args)
sys.exit(result_code)
|
py | b405a65c67644384531ac0e5fd96b61d1cc7ab53 | from manimlib.imports import *
class Test1(Scene):
CONFIG = {
}
def construct(self):
lines = StreamLines(lambda x: np.array([1, 0, 0]),
**{"x_min": -4,
"x_max": 0,
"y_min": -2,
"y_max": 2,
"delta_x": 0.5,
"delta_y": 0.25,
"n_repeats": 1,
"noise_factor": 1,})
anim = AnimatedStreamLines(lines,
**{"lag_range": 2,
"line_anim_class": ShowPassingFlash,
"line_anim_config": {
"run_time": 1,
"rate_func": linear,
"time_width": 0.4,
},})
self.add(anim)
self.wait(2)
class Test2(Scene):
CONFIG = {
"path_1": r"C:\Manim\manim-26_05_2020\science_animations\animals_being_served_linear.png",
"path_2": r"C:\Manim\manim-26_05_2020\science_animations\animals_being_served_sloped.png",
}
def construct(self):
image_1 = ImageMobject(self.path_1).scale(4)
self.add(image_1)
self.wait()
g = FunctionGraph(lambda x: 0.17*x - 0.2, x_min=0, x_max=8.4, color=RED, stroke_width=4).shift(4*LEFT)
self.play(ShowCreation(g))
self.wait(2)
self.play(FadeOut(g))
self.wait(2)
image_2 = ImageMobject(self.path_2).scale(4)
self.add(image_2)
self.wait()
self.play(FadeIn(g))
g2 = FunctionGraph(lambda x: 0.17*x**(0.8) - 0.2, x_min=0, x_max=8.4, color=DARK_BLUE, stroke_width=4).shift(4*LEFT)
self.play(TransformFromCopy(g, g2))
self.wait(5)
class Test2B(Scene):
CONFIG = {
"path_1": r"C:\Manim\manim-26_05_2020\science_animations\animals_being_served_linear.png",
"path_2": r"C:\Manim\manim-26_05_2020\science_animations\animals_being_served_sloped.png",
}
def construct(self):
# image_1 = ImageMobject(self.path_1).scale(4)
# self.add(image_1)
# self.wait()
g = FunctionGraph(lambda x: 0.17 * x - 0.2, x_min=0, x_max=8.4, color=RED, stroke_width=4).shift(4 * LEFT)
# self.play(ShowCreation(g))
# self.wait(2)
# self.play(FadeOut(g))
#
# self.wait(2)
image_2 = ImageMobject(self.path_2).scale(4)
self.add(image_2)
self.wait()
self.play(FadeIn(g))
g2 = FunctionGraph(lambda x: 0.17 * x ** (0.8) - 0.2, x_min=0, x_max=8.4, color=DARK_BLUE,
stroke_width=4).shift(4 * LEFT)
g3 = FunctionGraph(lambda x: 0.17*3 * math.log(x+1) - 0.2 + 0.0*x, x_min=0, x_max=8.4, color=ORANGE,
stroke_width=4).shift(4*LEFT)
self.play(TransformFromCopy(g, g2))
self.play(TransformFromCopy(g, g3))
self.wait(5)
class Test3(Scene):
CONFIG = {
"camera_config": {"background_color": "#2F2F2E", },
"path_1": r"C:\Manim\manim-26_05_2020\science_animations\man_stairs.png",
"path_2": r"C:\Manim\manim-26_05_2020\science_animations\man_with_goat_stairs.png",
"back": r"C:\Manim\manim-26_05_2020\science_animations\blackboard_texture.png",
}
def construct(self):
back = ImageMobject(self.back).scale(4)
#self.add(back)
image_1 = ImageMobject(self.path_1).scale(4)
self.play(FadeIn(image_1))
self.wait(2)
image_2 = ImageMobject(self.path_2).scale(4)
self.play(Transform(image_1, image_2))
self.wait(3)
class Test4(Scene):
CONFIG = {
"camera_config": {"background_color": "#2F2F2E", },
"text_config": {"stroke_color": "#2F2F2E", "fill_color": WHITE},
"path_1": r"C:\Manim\manim-26_05_2020\science_animations\green_check.png",
"back": r"C:\Manim\manim-26_05_2020\science_animations\blackboard_texture.png",
}
def construct(self):
back = ImageMobject(self.back).scale(4)
#self.add(back)
top_text = TextMobject("\\underline{Good Theory Checklist}", **self.text_config).shift(1.5*UP)
list1 = TextMobject("$\\square$ Accurately predict real-world data", **self.text_config)
list2 = TextMobject("$\\square$ Make reasonable assumptions", **self.text_config).align_to(list1, LEFT, alignment_vect=UP).shift(1*DOWN)
self.play(Write(top_text))
self.play(Write(list1))
self.play(Write(list2))
self.wait(2)
check1 = ImageMobject(self.path_1).scale(0.25).move_to(list1.get_left()).shift(0.2 * RIGHT + 0.1*UP)
check2 = ImageMobject(self.path_1).scale(0.25).move_to(list2.get_left()).shift(0.2 * RIGHT + 0.1*UP)
self.play(FadeIn(check1))
self.wait(2)
self.play(FadeOut(check1))
self.wait(2)
self.play(FadeIn(check2))
self.wait(2)
self.play(FadeOut(check2))
# self.play(FadeIn(check1), FadeIn(check2))
self.wait(3)
class Test5(Scene):
CONFIG = {
"camera_config": {"background_color": GREEN, },
"text_config": {"stroke_color": "#2F2F2E", "fill_color": WHITE},
"back": r"C:\Manim\manim-26_05_2020\science_animations\blackboard_texture.png",
"path_1": r"C:\Manim\manim-26_05_2020\science_animations\elephant.png",
"path_2": r"C:\Manim\manim-26_05_2020\science_animations\mouse.png",
"path_3": r"C:\Manim\manim-26_05_2020\science_animations\energy.png",
}
def construct(self):
back = ImageMobject(self.back).scale(4)
#self.add(back)
elephant = ImageMobject(self.path_1).scale(4)
mouse = ImageMobject(self.path_2).scale(4)
energy = TextMobject("Energy?", **self.text_config).scale(3.5).move_to(1.25*UP + 3.6*LEFT)
self.play(FadeIn(elephant))
self.wait()
self.play(FadeIn(mouse))
self.wait()
self.play(Write(energy))
self.wait(3)
class Test6(MovingCameraScene):
CONFIG = {
"back": r"C:\Manim\manim-26_05_2020\science_animations\blackboard_texture.png",
}
def construct(self):
back = ImageMobject(self.back).scale(4)
self.add(back)
l = Line(color=WHITE)
self.play(ShowCreation(l))
self.play(self.camera_frame.set_width, 20,
back.scale, 1.40625)
self.wait(2)
class Credits(Scene):
CONFIG = {
#"camera_config": {"background_color": "#2F2F2E", },
#"text_config": {"stroke_color": "#2F2F2E", "fill_color": WHITE},
"back": r"C:\Manim\manim-26_05_2020\science_animations\blackboard_texture.png",
}
def construct(self):
text1 = TextMobject("By Dhruv Bhatia,", "Joe Emmetti,", "and Thomas Patti").arrange(DOWN)
text2 = TextMobject("With thanks to", "Steven Subotnick,", "John Stein,", "and Faye Thomas").arrange(DOWN)
text3 = TextMobject("Made with Manim,", "an open source animation library", "by Grant Sanderson").arrange(DOWN)
self.play(ShowCreation(text1), run_time=2)
self.wait(3)
self.play(Uncreate(text1), run_time=2)
self.play(ShowCreation(text2), run_time=2)
self.wait(3)
self.play(Uncreate(text2), run_time=2)
self.play(ShowCreation(text3), run_time=2)
self.wait(3)
self.play(Uncreate(text3), run_time=2)
self.wait(3)
|
py | b405a6a3e368217d855f9092cc8a18da628ef363 | from datacatalog import settings
__all__ = ['STORES']
STORES = [
{'level': '0', 'prefix': '/uploads',
'storage_system': settings.STORAGE_SYSTEM,
'manager': settings.TACC_MANAGER_ACCOUNT,
'database': settings.MONGODB_DATABASE},
{'level': '1', 'prefix': '/products',
'storage_system': settings.STORAGE_SYSTEM,
'manager': settings.TACC_MANAGER_ACCOUNT,
'database': settings.MONGODB_DATABASE},
{'level': '2', 'prefix': '/products',
'storage_system': settings.STORAGE_SYSTEM,
'manager': settings.TACC_MANAGER_ACCOUNT,
'database': settings.MONGODB_DATABASE},
{'level': '3', 'prefix': '/products',
'storage_system': settings.STORAGE_SYSTEM,
'manager': settings.TACC_MANAGER_ACCOUNT,
'database': settings.MONGODB_DATABASE},
{'level': 'Reference', 'prefix': '/reference',
'storage_system': settings.STORAGE_SYSTEM,
'manager': settings.TACC_MANAGER_ACCOUNT,
'database': settings.MONGODB_DATABASE}
]
|
py | b405a6b72696e06878146af708af812a6230d325 | #!/usr/bin/env python2
from __future__ import print_function
import json
import argparse
import jinja2
import os
import os.path
import sys
import ssg.build_profile
import ssg.constants
import ssg.xml
import ssg.build_yaml
def parse_args():
script_desc = \
"Obtains and displays XCCDF profile statistics. Namely number " + \
"of rules in the profile, how many of these rules have their OVAL " + \
"check implemented, how many have a remediation available, ..."
parser = argparse.ArgumentParser(description="Profile statistics and utilities tool")
subparsers = parser.add_subparsers(title='subcommands', dest="subcommand")
parser_stats = subparsers.add_parser("stats", description=script_desc,
help=("Show profile statistics"))
parser_stats.add_argument("--profile", "-p",
action="store",
help="Show statistics for this XCCDF Profile only. If "
"not provided the script will show stats for all "
"available profiles.")
parser_stats.add_argument("--benchmark", "-b", required=True,
action="store",
help="Specify XCCDF file to act on. Must be a plain "
"XCCDF file, doesn't work on source datastreams yet!")
parser_stats.add_argument("--implemented-ovals", default=False,
action="store_true", dest="implemented_ovals",
help="Show IDs of implemented OVAL checks.")
parser_stats.add_argument("--missing-stig-ids", default=False,
action="store_true", dest="missing_stig_ids",
help="Show rules in STIG profiles that don't have STIG IDs.")
parser_stats.add_argument("--missing-ovals", default=False,
action="store_true", dest="missing_ovals",
help="Show IDs of unimplemented OVAL checks.")
parser_stats.add_argument("--implemented-fixes", default=False,
action="store_true", dest="implemented_fixes",
help="Show IDs of implemented remediations.")
parser_stats.add_argument("--missing-fixes", default=False,
action="store_true", dest="missing_fixes",
help="Show IDs of unimplemented remediations.")
parser_stats.add_argument("--assigned-cces", default=False,
action="store_true", dest="assigned_cces",
help="Show IDs of rules having CCE assigned.")
parser_stats.add_argument("--missing-cces", default=False,
action="store_true", dest="missing_cces",
help="Show IDs of rules missing CCE element.")
parser_stats.add_argument("--implemented", default=False,
action="store_true",
help="Equivalent of --implemented-ovals, "
"--implemented_fixes and --assigned-cves "
"all being set.")
parser_stats.add_argument("--missing", default=False,
action="store_true",
help="Equivalent of --missing-ovals, --missing-fixes"
" and --missing-cces all being set.")
parser_stats.add_argument("--all", default=False,
action="store_true", dest="all",
help="Show all available statistics.")
parser_stats.add_argument("--format", default="plain",
choices=["plain", "json", "csv", "html"],
help="Which format to use for output.")
parser_stats.add_argument("--output",
help="If defined, statistics will be stored under this directory.")
subtracted_profile_desc = \
"Subtract rules and variable selections from profile1 based on rules present in " + \
"profile2. As a result, a new profile is generated. It doesn't support profile " + \
"inheritance, this means that only rules explicitly " + \
"listed in the profiles will be taken in account."
parser_sub = subparsers.add_parser("sub", description=subtracted_profile_desc,
help=("Subtract rules and variables from profile1 "
"based on selections present in profile2."))
parser_sub.add_argument('--profile1', type=str, dest="profile1",
required=True, help='YAML profile')
parser_sub.add_argument('--profile2', type=str, dest="profile2",
required=True, help='YAML profile')
args = parser.parse_args()
if args.subcommand == "stats":
if args.all:
args.implemented = True
args.missing = True
if args.implemented:
args.implemented_ovals = True
args.implemented_fixes = True
args.assigned_cces = True
if args.missing:
args.missing_ovals = True
args.missing_fixes = True
args.missing_cces = True
args.missing_stig_ids = True
return args
def main():
args = parse_args()
if args.subcommand == "sub":
try:
profile1 = ssg.build_yaml.Profile.from_yaml(args.profile1)
profile2 = ssg.build_yaml.Profile.from_yaml(args.profile2)
except jinja2.exceptions.TemplateNotFound as e:
print("Error: Profile {} could not be found.".format(str(e)))
exit(1)
subtracted_profile = profile1 - profile2
exclusive_rules = len(subtracted_profile.get_rule_selectors())
exclusive_vars = len(subtracted_profile.get_variable_selectors())
if exclusive_rules > 0:
print("{} rules were left after subtraction.".format(exclusive_rules))
if exclusive_vars > 0:
print("{} variables were left after subtraction.".format(exclusive_vars))
if exclusive_rules > 0 or exclusive_vars > 0:
profile1_basename = os.path.splitext(
os.path.basename(args.profile1))[0]
profile2_basename = os.path.splitext(
os.path.basename(args.profile2))[0]
subtracted_profile_filename = "{}_sub_{}.profile".format(
profile1_basename, profile2_basename)
print("Creating a new profile containing the exclusive selections: {}".format(
subtracted_profile_filename))
subtracted_profile.title = profile1.title + " subtracted by " + profile2.title
subtracted_profile.dump_yaml(subtracted_profile_filename)
print("Profile {} was created successfully".format(
subtracted_profile_filename))
else:
print("Subtraction would produce an empty profile. No new profile was generated")
exit(0)
benchmark = ssg.build_profile.XCCDFBenchmark(args.benchmark)
ret = []
if args.profile:
ret.append(benchmark.show_profile_stats(args.profile, args))
else:
all_profile_elems = benchmark.tree.findall("./{%s}Profile" % (ssg.constants.XCCDF11_NS))
ret = []
for elem in all_profile_elems:
profile = elem.get('id')
if profile is not None:
ret.append(benchmark.show_profile_stats(profile, args))
if args.format == "json":
print(json.dumps(ret, indent=4))
if args.format == "html":
from json2html import json2html
filtered_output = []
output_path = "./"
if args.output:
output_path = args.output
if not os.path.exists(output_path):
os.mkdir(output_path)
content_path = os.path.join(output_path, "content")
if not os.path.exists(content_path):
os.mkdir(content_path)
content_list = [
'rules',
'missing_stig_ids',
'missing_ovals',
'missing_bash_fixes',
'missing_ansible_fixes',
'missing_puppet_fixes',
'missing_anaconda_fixes',
'missing_cces'
]
link = """<a href="{}"><div style="height:100%;width:100%">{}</div></a>"""
for profile in ret:
for content in content_list:
content_file = "{}_{}.txt".format(profile['profile_id'], content)
content_filepath = os.path.join("content", content_file)
count = len(profile[content])
if count > 0:
count_href_element = link.format(content_filepath, count)
profile['{}_count'.format(content)] = count_href_element
with open(os.path.join(content_path, content_file), 'w+') as f:
f.write('\n'.join(profile[content]))
else:
profile['{}_count'.format(content)] = count
del profile[content]
filtered_output.append(profile)
with open(os.path.join(output_path, "statistics.html"), 'w+') as f:
f.write(json2html.convert(json=json.dumps(filtered_output), escape=False))
elif args.format == "csv":
# we can assume ret has at least one element
# CSV header
print(",".join(ret[0].keys()))
for line in ret:
print(",".join([str(value) for value in line.values()]))
if __name__ == '__main__':
main()
|
py | b405a8392c2cb90b479ffe736ee34bb466b41d95 | # model settings
temperature = 0.01
model = dict(
type='UVCTrackerRecursive',
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(3, ),
strides=(1, 2, 1, 1),
norm_eval=False,
zero_init_residual=True),
cls_head=dict(
type='UVCHead',
loss_feat=dict(type='CosineSimLoss'),
loss_aff=dict(
type='ConcentrateLoss',
win_len=8,
stride=8,
temperature=temperature,
with_norm=True,
loss_weight=1.),
loss_bbox=dict(type='MSELoss', loss_weight=10.),
in_channels=512,
channels=128,
temperature=temperature,
with_norm=True,
init_std=0.01,
num_convs=0,
spatial_type=None,
track_type='center'))
# model training and testing settings
train_cfg = dict(
patch_size=96,
img_as_ref=True,
img_as_tar=True,
strong_aug=False,
diff_crop=False,
center_ratio=0.,
recursive_times=2)
test_cfg = dict(
precede_frames=7,
topk=5,
temperature=temperature,
strides=(1, 2, 1, 1),
out_indices=(2, ),
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
# img_norm_cfg = dict(
# mean=[50, 0, 0], std=[50, 127, 127], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=2, frame_interval=8, num_clips=1),
dict(type='DecordDecode'),
# dict(type='Resize', scale=(-1, 256)),
# dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(256, 256), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
# dict(type='PhotoMetricDistortion'),
# dict(type='RGB2LAB'),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=['img_norm_cfg']),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
# dict(type='RGB2LAB'),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=32,
workers_per_gpu=4,
val_workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
optimizer = dict(type='Adam', lr=1e-4)
optimizer_config = dict(grad_clip=None)
# learning policy
# lr_config = dict(policy='CosineAnnealing', min_lr=0)
lr_config = dict(policy='Fixed')
total_epochs = 50
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1, metrics='davis', key_indicator='J&F-Mean', rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
# dict(type='WandbLoggerHook', init_kwargs=dict(
# project='uvc', name='{{fileBasenameNoExtension}}', config=dict(
# model=model, train_cfg=train_cfg, test_cfg=test_cfg,
# data=data))),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
|
py | b405a877ce83c8877cdfd061447e8d4743e76f71 | """Asynchronous WSGI_ Remote Procedure Calls middleware. It implements a
JSON-RPC_ server and client. Check out the
:ref:`json-rpc tutorial <tutorials-calculator>` if you want to get started
quickly with a working example.
To quickly setup a server::
class MyRpc(rpc.JSONRPC):
def rpc_ping(self, request):
return 'pong'
class Wsgi(wsgi.LazyWsgi):
def handler(self, environ=None):
app = wsgi.Router('/',
post=MyRpc(),
response_content_types=['application/json'])
return wsgi.WsgiHandler([app])
if __name__ == '__main__':
wsgi.WSGIServer(Wsgi()).start()
* The ``MyRpc`` handles the requests
* Routing is delegated to the :class:`.Router` which handle only ``post``
requests with content type ``application/json``.
API
===========
.. module:: pulsar.apps.rpc.handlers
RpcHandler
~~~~~~~~~~~~~~
.. autoclass:: RpcHandler
:members:
:member-order: bysource
rpc method decorator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: rpc_method
.. module:: pulsar.apps.rpc.jsonrpc
JSON RPC
~~~~~~~~~~~~~~~~
.. autoclass:: JSONRPC
:members:
:member-order: bysource
JsonProxy
~~~~~~~~~~~~~~~~
.. autoclass:: JsonProxy
:members:
:member-order: bysource
.. module:: pulsar.apps.rpc.mixins
Server Commands
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: PulsarServerCommands
:members:
:member-order: bysource
.. _JSON-RPC: http://www.jsonrpc.org/specification
.. _WSGI: http://www.python.org/dev/peps/pep-3333/
"""
from .handlers import (
RpcHandler, rpc_method, InvalidRequest, InvalidParams,
NoSuchFunction, InternalError
)
from .jsonrpc import JSONRPC, JsonProxy, JsonBatchProxy
from .mixins import PulsarServerCommands
__all__ = [
'RpcHandler',
'rpc_method',
'InvalidRequest',
'InvalidParams',
'NoSuchFunction',
'InternalError',
'JSONRPC',
'JsonProxy',
'JsonBatchProxy',
'PulsarServerCommands'
]
|
py | b405a954210fcb131ab40a0ae606b12b835ef05b | import logging
import os
from urllib.request import urlopen
from django.core.files import File
from django.core.files.base import ContentFile
from django.core.serializers.json import DjangoJSONEncoder
from django.core.validators import FileExtensionValidator
from django.db import models
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from jutil.format import is_media_full_path, strip_media_root, get_media_full_path
from jutil.modelfields import SafeCharField, SafeTextField
from jsanctions.helpers import get_country_iso2_code
logger = logging.getLogger(__name__)
REMARK_BRIEF_LENGTH = 128
DEFAULT_DESCRIPTION_TYPE = {"blank": True, "max_length": 512, "default": ""}
DEFAULT_REMARK_TYPE = {
"verbose_name": _("remark"),
"null": True,
"default": None,
"blank": True,
"on_delete": models.SET_NULL,
}
DEFAULT_CODE_TYPE = {"blank": True, "max_length": 32, "default": ""}
DEFAULT_DATE_TYPE = {"blank": True, "null": True, "default": None}
LANGUAGE_CODE_TYPE = {"blank": True, "default": "", "max_length": 5}
COUNTRY_CODE_TYPE = {"blank": True, "default": "", "max_length": 3}
DEFAULT_BOOLEAN_TYPE = {"blank": True, "default": None, "null": True}
DEFAULT_INT_TYPE = {"blank": True, "default": None, "null": True}
REGULATION_SUMMARY_TYPE = {
"verbose_name": _("regulation summary"),
"blank": True,
"default": None,
"null": True,
"on_delete": models.PROTECT,
}
class SanctionsListFileManager(models.Manager):
def create_from_filename(self, filename: str, **kwargs):
full_path = os.path.realpath(filename)
file = self.create(**kwargs)
assert isinstance(file, SanctionsListFile)
if is_media_full_path(full_path):
file.file.name = strip_media_root(full_path)
file.save()
logger.info("%s used as is", file.file)
else:
with open(full_path, "rb") as fp:
plain_filename = os.path.basename(filename)
file.file.save(plain_filename, File(fp))
logger.info("%s written", file.file)
return file
def create_from_url(self, url: str, filename: str, **kwargs):
response = urlopen(url)
body = response.read()
plain_filename = os.path.basename(filename)
file = self.create(**kwargs)
file.file.save(plain_filename, ContentFile(body))
logger.info("%s written", file.file)
return file
class SanctionListObject(models.Model):
pass
class SanctionsListFile(SanctionListObject):
objects = SanctionsListFileManager() # type: ignore
created = models.DateTimeField(verbose_name=_("created"), default=now, blank=True, editable=False, db_index=True)
imported = models.DateTimeField(verbose_name=_("imported"), default=None, null=True, blank=True, editable=False, db_index=True)
generation_date = models.DateField(verbose_name=_("generation date"), default=None, blank=True, null=True, editable=False, db_index=True)
file = models.FileField(verbose_name=_("file"), upload_to="uploads", validators=[FileExtensionValidator(["xml"])])
list_type = SafeCharField(verbose_name=_("list type"), max_length=128, db_index=True)
global_file_id = SafeCharField(verbose_name=_("global file id"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
class Meta:
verbose_name = _("sanction list")
verbose_name_plural = _("sanction lists")
def __str__(self):
return "{}".format(os.path.basename(self.file.name))
@property
def full_path(self) -> str:
return get_media_full_path(self.file.name)
class Remark(models.Model):
container = models.ForeignKey(SanctionListObject, on_delete=models.CASCADE)
text = SafeTextField(verbose_name=_("text"), blank=True)
class Meta:
verbose_name = _("remark")
verbose_name_plural = _("remarks")
def __str__(self) -> str:
return str(self.text)
@property
def text_brief(self) -> str:
return self.text if len(self.text) < REMARK_BRIEF_LENGTH else self.text[:REMARK_BRIEF_LENGTH] + "..."
class SubjectType(SanctionListObject):
PERSON = "P"
ENTERPRISE = "E"
VESSEL = "V"
AIRCRAFT = "A"
CLASSIFICATION_CODES = [
(PERSON, _("person")),
(ENTERPRISE, _("enterprise")),
(VESSEL, _("vessel")),
(AIRCRAFT, _("aircraft")),
]
classification_code = SafeCharField(verbose_name=_("classification code"), **DEFAULT_CODE_TYPE) # type: ignore
code = SafeCharField(verbose_name=_("code"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
class Meta:
verbose_name = _("subject type")
verbose_name_plural = _("subject types")
def __str__(self) -> str:
return str(self.code)
class Regulation(SanctionListObject):
sanction = models.ForeignKey("SanctionEntity", verbose_name=_("sanction entity"), on_delete=models.CASCADE)
regulation_type = SafeCharField(verbose_name=_("regulation type"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
organisation_type = SafeCharField(verbose_name=_("organization type"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
publication_date = models.DateField(verbose_name=_("publication date"), **DEFAULT_DATE_TYPE) # type: ignore
publication_url = models.URLField(verbose_name=_("url"), blank=True, default="")
entry_into_force_date = models.DateField(verbose_name=_("entry into force date"), **DEFAULT_DATE_TYPE) # type: ignore
number_title = SafeCharField(verbose_name=_("number title"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
programme = SafeCharField(verbose_name=_("programmer"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
logical_id = models.BigIntegerField(verbose_name=_("logical id"), blank=True, null=True, default=None)
class Meta:
verbose_name = _("regulation")
verbose_name_plural = _("regulations")
class RegulationSummary(SanctionListObject):
regulation_type = SafeCharField(verbose_name=_("regulation type"), **DEFAULT_CODE_TYPE) # type: ignore
number_title = SafeCharField(verbose_name=_("number title"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
publication_date = models.DateField(verbose_name=_("publication date"), **DEFAULT_DATE_TYPE) # type: ignore
publication_url = models.URLField(verbose_name=_("url"), blank=True, default="")
class Meta:
verbose_name = _("regulation summary")
verbose_name_plural = _("regulation summaries")
def __str__(self) -> str:
return "{} {}".format(self.regulation_type, self.number_title)
class NameAlias(SanctionListObject):
sanction = models.ForeignKey("SanctionEntity", verbose_name=_("sanction entity"), on_delete=models.CASCADE)
first_name = SafeCharField(verbose_name=_("first name"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
middle_name = SafeCharField(verbose_name=_("middle name"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
last_name = SafeCharField(verbose_name=_("last name"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
whole_name = SafeCharField(verbose_name=_("whole name"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
name_language = SafeCharField(verbose_name=_("name language"), **LANGUAGE_CODE_TYPE) # type: ignore
function = SafeCharField(verbose_name=_("function"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
title = SafeCharField(verbose_name=_("title"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
regulation_language = SafeCharField(verbose_name=_("regulation language"), **LANGUAGE_CODE_TYPE) # type: ignore
logical_id = models.BigIntegerField(verbose_name=_("logical id"), blank=True, null=True, default=None)
regulation_summary = models.ForeignKey(RegulationSummary, **REGULATION_SUMMARY_TYPE) # type: ignore
class Meta:
verbose_name = _("name alias")
verbose_name_plural = _("name aliases")
def __str__(self) -> str:
return str(self.whole_name)
def clean(self):
if len(self.function) > 256:
self.function = self.function[:256]
if not self.whole_name:
self.whole_name = (self.first_name + " " + self.last_name).strip()
class Identification(SanctionListObject):
sanction = models.ForeignKey("SanctionEntity", verbose_name=_("sanction entity"), on_delete=models.CASCADE)
diplomatic = models.BooleanField(verbose_name=_("diplomatic"), **DEFAULT_BOOLEAN_TYPE) # type: ignore
known_expired = models.BooleanField(verbose_name=_("known expired"), **DEFAULT_BOOLEAN_TYPE) # type: ignore
known_false = models.BooleanField(verbose_name=_("known false"), **DEFAULT_BOOLEAN_TYPE) # type: ignore
reported_lost = models.BooleanField(verbose_name=_("reported lost"), **DEFAULT_BOOLEAN_TYPE) # type: ignore
revoked_by_issuer = models.BooleanField(verbose_name=_("revoked by issuer"), **DEFAULT_BOOLEAN_TYPE) # type: ignore
issue_date = models.DateField(verbose_name=_("issue date"), **DEFAULT_DATE_TYPE) # type: ignore
issued_by = SafeCharField(verbose_name=_("issued by"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
latin_number = SafeCharField(verbose_name=_("latin number"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
name_on_document = SafeCharField(verbose_name=_("name on document"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
number = SafeCharField(verbose_name=_("number"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
region = SafeCharField(verbose_name=_("region"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
country_iso2_code = SafeCharField(verbose_name=_("issued by"), **COUNTRY_CODE_TYPE) # type: ignore
country_description = SafeCharField(verbose_name=_("country description"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
identification_type_code = SafeCharField(verbose_name=_("identification type code"), **DEFAULT_CODE_TYPE) # type: ignore
identification_type_description = SafeCharField(verbose_name=_("identification type code"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
regulation_language = SafeCharField(verbose_name=_("regional language"), **LANGUAGE_CODE_TYPE) # type: ignore
logical_id = models.BigIntegerField(verbose_name=_("logical id"), blank=True, null=True, default=None)
regulation_summary = models.ForeignKey(RegulationSummary, **REGULATION_SUMMARY_TYPE) # type: ignore
class Meta:
verbose_name = _("identification")
verbose_name_plural = _("identifications")
def clean(self):
if self.country_description and not self.country_iso2_code:
self.country_iso2_code = get_country_iso2_code(self.country_description)
class BirthDate(SanctionListObject):
sanction = models.ForeignKey("SanctionEntity", verbose_name=_("sanction entity"), on_delete=models.CASCADE)
circa = models.BooleanField(verbose_name=_("circa"), **DEFAULT_BOOLEAN_TYPE) # type: ignore
calendar_type = SafeCharField(verbose_name=_("calendar type"), **DEFAULT_CODE_TYPE) # type: ignore
city = SafeCharField(verbose_name=_("city"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
zip_code = SafeCharField(verbose_name=_("zip code"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
birth_date = models.DateField(verbose_name=_("birth date"), **DEFAULT_DATE_TYPE) # type: ignore
birth_date_description = SafeCharField(verbose_name=_("zip code"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
day_of_month = models.IntegerField(verbose_name=_("day of month"), **DEFAULT_INT_TYPE) # type: ignore
month_of_year = models.IntegerField(verbose_name=_("month of year"), **DEFAULT_INT_TYPE) # type: ignore
year = models.IntegerField(verbose_name=_("year"), **DEFAULT_INT_TYPE) # type: ignore
region = SafeCharField(verbose_name=_("region"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
place = SafeCharField(verbose_name=_("place"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
country_iso2_code = SafeCharField(verbose_name=_("country"), **COUNTRY_CODE_TYPE) # type: ignore
country_description = SafeCharField(verbose_name=_("country description"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
regulation_language = SafeCharField(verbose_name=_("regional language"), **LANGUAGE_CODE_TYPE) # type: ignore
logical_id = models.BigIntegerField(verbose_name=_("logical id"), blank=True, null=True, default=None)
class Meta:
verbose_name = _("birth date")
verbose_name_plural = _("birth dates")
class Citizenship(SanctionListObject):
sanction = models.ForeignKey("SanctionEntity", verbose_name=_("sanction entity"), on_delete=models.CASCADE)
region = SafeCharField(verbose_name=_("region"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
country_iso2_code = SafeCharField(verbose_name=_("country"), **COUNTRY_CODE_TYPE) # type: ignore
country_description = SafeCharField(verbose_name=_("country description"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
regulation_language = SafeCharField(verbose_name=_("regional language"), **LANGUAGE_CODE_TYPE) # type: ignore
logical_id = models.BigIntegerField(verbose_name=_("logical id"), blank=True, null=True, default=None)
regulation_summary = models.ForeignKey(RegulationSummary, **REGULATION_SUMMARY_TYPE) # type: ignore
class Meta:
verbose_name = _("citizenship")
verbose_name_plural = _("citizenships")
class Address(SanctionListObject):
sanction = models.ForeignKey("SanctionEntity", verbose_name=_("sanction entity"), on_delete=models.CASCADE)
city = SafeCharField(verbose_name=_("city"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
street = SafeCharField(verbose_name=_("street"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
po_box = SafeCharField(verbose_name=_("p.o. box"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
zip_code = SafeCharField(verbose_name=_("zip code"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
as_at_listing_time = models.BooleanField(_("as at listing time"), **DEFAULT_BOOLEAN_TYPE) # type: ignore
place = SafeCharField(verbose_name=_("place"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
region = SafeCharField(verbose_name=_("region"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
country_iso2_code = SafeCharField(verbose_name=_("country"), **COUNTRY_CODE_TYPE) # type: ignore
country_description = SafeCharField(verbose_name=_("country description"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
regulation_language = SafeCharField(verbose_name=_("regional language"), **LANGUAGE_CODE_TYPE) # type: ignore
logical_id = models.BigIntegerField(verbose_name=_("logical id"), blank=True, null=True, default=None)
regulation_summary = models.ForeignKey(RegulationSummary, **REGULATION_SUMMARY_TYPE) # type: ignore
class Meta:
verbose_name = _("address")
verbose_name_plural = _("addresses")
def clean(self):
if self.country_description and not self.country_iso2_code:
self.country_iso2_code = get_country_iso2_code(self.country_description)
class SanctionEntity(SanctionListObject):
source = models.ForeignKey(SanctionsListFile, verbose_name=_("source"), on_delete=models.CASCADE)
designation_details = SafeCharField(verbose_name=_("designation details"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
united_nation_id = SafeCharField(verbose_name=_("United Nation identifier"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
eu_reference_number = SafeCharField(verbose_name=_("EU reference number"), **DEFAULT_DESCRIPTION_TYPE) # type: ignore
logical_id = models.BigIntegerField(verbose_name=_("logical id"), blank=True, null=True, default=None)
subject_type = models.ForeignKey(SubjectType, verbose_name=_("subject type"), on_delete=models.PROTECT, null=True, default=None, blank=True)
data = models.JSONField(_("data"), default=dict, blank=True, encoder=DjangoJSONEncoder) # type: ignore
class Meta:
verbose_name = _("sanction entity")
verbose_name_plural = _("sanction entities")
def __str__(self):
return "{}-{}".format(self.source.list_type, self.logical_id)
|
py | b405a98ecb569591433037f19e277088db1250e4 | import graphene
from .posts import schema as posts_schema
class Query(posts_schema.PostsQuery, graphene.ObjectType):
# This class will inherit from multiple Queries
# as we begin to add more apps to our project
pass
schema = graphene.Schema(query=Query) |
py | b405aa0163b047b0a486fdfddcfd6b2919be04bf | import urllib
import re
def get_quote(symbol):
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('id="ref_694653_l".*?>(.*?)<', content)
if m:
quote = m.group(1)
else:
quote = 'no quote available for: ' + symbol
return quote
|
py | b405abdd30eef8bbca05eeeef633fc404a0ca8df | import unittest
import numpy as np
import ndhist
class Test(unittest.TestCase):
def test_ndhist_clear_method(self):
"""Tests if the clear method of the ndhist class works properly.
"""
axis_0 = ndhist.axes.linear(0,10)
h = ndhist.ndhist((axis_0,))
self.assertTrue(h.axes[0].has_underflow_bin)
self.assertTrue(h.axes[0].has_overflow_bin)
self.assertTrue(h.shape == (12,))
self.assertTrue(h.nbins == (10,))
self.assertTrue(np.all(h.axes[0].binedges == np.array(
[-np.inf,0,1,2,3,4,5,6,7,8,9,10,+np.inf]
)))
h.fill(([0,1,2,3,4,5,6,7,8,9,10,10,10],))
self.assertTrue(np.all(h.bincontent == 1))
h2 = h[3:6]
self.assertFalse(h2.axes[0].has_underflow_bin)
self.assertFalse(h2.axes[0].has_overflow_bin)
self.assertTrue(h2.shape == (3,))
self.assertTrue(h2.nbins == (3,))
self.assertTrue(np.all(h2.axes[0].binedges == np.array(
[2., 3., 4., 5.]
)))
self.assertTrue(np.all(h2.bincontent == 1))
h2.clear()
self.assertTrue(np.all(h2.bincontent == 0))
self.assertTrue(np.all(h.bincontent == np.array(
[ 1., 1., 0., 0., 0., 1., 1., 1., 1., 1.]
)))
if(__name__ == "__main__"):
unittest.main()
|
py | b405ac46d79c42086f2d1c3b703dad116f1ca0fb | import contextlib
from google import auth
from google.auth.transport import grpc as google_auth_transport_grpc
from google.auth.transport import requests as google_auth_transport_requests
BIGSTORE_SCOPES = [
'https://www.googleapis.com/auth/devstorage.write_only',
]
RESULTSTORE_SCOPES = [
"https://www.googleapis.com/auth/cloud-source-tools",
"https://www.googleapis.com/auth/cloud-platform"
]
ALL_SCOPES = BIGSTORE_SCOPES + RESULTSTORE_SCOPES
class Credentials():
""" Credentials container/helper for resultstoreui"""
def __init__(self):
"""
Initialize Credentials
"""
self.channel = None
self.scopes = ALL_SCOPES
@contextlib.contextmanager
def create_secure_channel(self, addr):
"""
Creates a secure channel using GOOGLE_APPLICATION_CREDENTIALS from the
users path
Args:
target (str): The host and port of the service
Returns:
A gRPC channel
"""
credentials, _ = auth.default(scopes=self.scopes)
request = google_auth_transport_requests.Request()
channel = google_auth_transport_grpc.secure_authorized_channel(
credentials, request, addr)
self.channel = channel
yield channel
def get_active_channel(self):
"""Returns current active channel"""
return self.channel
def get_scopes(self):
"""Returns scopes"""
return self.scopes
|
py | b405ace309915e1f4a94f8dc6f76712e5d3d2422 | from importmagician import import_from
with import_from('./'):
# Data pipeline
from configs.lane_detection.common.datasets.culane_seg import dataset
from configs.lane_detection.common.datasets.train_level0_288 import train_augmentation
from configs.lane_detection.common.datasets.test_288 import test_augmentation
# Optimization pipeline
from configs.lane_detection.common.optims.segloss_5class import loss
from configs.lane_detection.common.optims.sgd0048 import optimizer
from configs.lane_detection.common.optims.ep12_poly_warmup600 import lr_scheduler
train = dict(
exp_name='resnet101_resa_culane',
workers=2,
batch_size=2,
checkpoint=None,
# Device args
world_size=8,
dist_url='tcp://localhost:12345',
device='cuda',
val_num_steps=0, # Seg IoU validation (mostly useless)
save_dir='./checkpoints',
input_size=(288, 800),
original_size=(590, 1640),
num_classes=5,
num_epochs=12,
collate_fn=None, # 'dict_collate_fn' for LSTR
seg=True # Seg-based method or not
)
test = dict(
exp_name='resnet101_resa_culane',
workers=2,
batch_size=8,
checkpoint='./checkpoints/resnet101_resa_culane/model.pt',
# Device args
device='cuda',
save_dir='./checkpoints',
seg=True,
gap=20,
ppl=18,
thresh=0.3,
collate_fn=None, # 'dict_collate_fn' for LSTR
input_size=(288, 800),
original_size=(590, 1640),
max_lane=4,
dataset_name='culane'
)
model = dict(
name='RESA_Net',
backbone_cfg=dict(
name='predefined_resnet_backbone',
backbone_name='resnet101',
return_layer='layer3',
pretrained=True,
replace_stride_with_dilation=[False, True, True]
),
reducer_cfg=dict(
name='RESAReducer',
in_channels=1024,
reduce=128
),
spatial_conv_cfg=dict(
name='RESA',
num_channels=128,
iteration=5,
alpha=2.0
),
classifier_cfg=dict(
name='BUSD',
in_channels=128,
num_classes=5
),
lane_classifier_cfg=dict(
name='EDLaneExist',
num_output=5 - 1,
flattened_size=4500,
dropout=0.1,
pool='avg'
)
)
|
py | b405aeda84251c56857c2f0da138af8f4d488325 | """Tests for istanbul-setup.py.
"""
import functools
istanbul_setup = __import__('istanbul-setup')
Board = functools.partial(istanbul_setup.Board, shuffle=False)
def test_inner():
assert len(Board().inner) == 4, "A 4x4 Base board should have 4 Fountain spaces."
assert len(Board(mocha=True).inner) == 6, "A 5x4 Mocha board should have 6 Fountain spaces."
assert len(Board(letters=True).inner) == 6, "A 5x4 Letters board should have 6 Fountain spaces."
assert len(Board(mocha=True, letters=True).inner) == 1, "A 5x5 Bazaar board should have 1 Fountain space."
def test_islegal():
assert Board().islegal(), "Unshuffled Base board should be legal."
assert not Board(mocha=True).islegal(), "Unshuffled Mocha board shouldn't be legal due to tea house."
assert not Board(letters=True).islegal(), "Unshuffled Letters board shouldn't be legal due to tea house."
assert not Board(mocha=True, letters=True).islegal(), "Unshuffled Bazaar board shouldn't be legal due to fountain and tea house."
def test_main():
istanbul_setup.main([]) |
py | b405b039546c23a08559747231d884ab339f14b6 | import unittest
import sys
sys.path.append("..")
from Graph import Graph
from Dijkstra import *
class Graph_Test(unittest.TestCase):
def setUp(self):
self.graph = Graph()
def test_add_nodes(self):
self.graph.add_node('A')
self.graph.add_node('B')
self.graph.add_node('C')
self.graph.add_node('D')
self.graph.add_node('E')
self.graph.add_node('F')
self.graph.add_node('G')
self.assertEqual(self.graph.nodes, set(['A', 'B', 'C', 'D', 'E', 'E', 'F', 'G']))
def test_shortest_path(self, ):
self.graph.add_node('A')
self.graph.add_node('B')
self.graph.add_node('C')
self.graph.add_node('D')
self.graph.add_node('E')
self.graph.add_node('F')
self.graph.add_node('G')
self.graph.add_edge('A', 'B', 10)
self.graph.add_edge('A', 'C', 20)
self.graph.add_edge('B', 'D', 15)
self.graph.add_edge('C', 'D', 30)
self.graph.add_edge('B', 'E', 50)
self.graph.add_edge('D', 'E', 30)
self.graph.add_edge('E', 'F', 5)
self.graph.add_edge('F', 'G', 2)
dijkstra_output = dijkstra(self.graph, 'A')
self.assertEqual(shortest_path(self.graph, dijkstra_output,'A', 'E'), (55, ['A', 'B', 'D', 'E']))
self.assertEqual(shortest_path(self.graph, dijkstra_output,'A', 'G'), (62, ['A', 'B', 'D', 'E', 'F', 'G']))
def test_for_one(self, ):
self.graph.add_node('A')
dijkstra_output = dijkstra(self.graph, 'A')
self.assertEqual(shortest_path(self.graph, dijkstra_output,'A', 'G'), "There is no sense in your request!")
def test_for_dijkstra_heap(self):
self.graph.add_node('A')
self.graph.add_node('B')
self.graph.add_node('C')
self.graph.add_node('D')
self.graph.add_node('E')
self.graph.add_node('F')
self.graph.add_node('G')
self.graph.add_edge('A', 'B', 10)
self.graph.add_edge('A', 'C', 20)
self.graph.add_edge('B', 'D', 15)
self.graph.add_edge('C', 'D', 30)
self.graph.add_edge('B', 'E', 50)
self.graph.add_edge('D', 'E', 30)
self.graph.add_edge('E', 'F', 5)
self.graph.add_edge('F', 'G', 2)
self.assertEqual(dijkstra_with_heap(self.graph, 'A', 'E'), (55, ['A', 'B', 'D', 'E']))
self.assertEqual(dijkstra_with_heap(self.graph, 'A', 'G'), (62, ['A', 'B', 'D', 'E', 'F', 'G']))
suite = unittest.TestLoader().loadTestsFromTestCase(Graph_Test)
unittest.TextTestRunner(verbosity=2).run(suite)
|
py | b405b08c4a0b78593b041f45ee3d49d82703bde2 | """
Enums for edge cases of polarizarion.
NOTE!
Enums presented here confirm to the IEEE convention of left-/righ-handedness.
"""
from enum import Enum
from math import sqrt, pi
from pylarization.ellipse import PolarizationEllipse
from pylarization.vectors import JonesVector, StokesVector
class JonesVectorState(Enum):
LINEAR_HORIZONTAL = JonesVector(1, 0)
LINEAR_VERTICAL = JonesVector(0, 1)
LINEAR_DIAGONAL = JonesVector(sqrt(2) * 0.5, sqrt(2) * 0.5)
LINEAR_ANTIDIAGONAL = JonesVector(sqrt(2) * 0.5, -sqrt(2) * 0.5)
CIRCULAR_LEFT_HANDED = JonesVector(sqrt(2) * 0.5, sqrt(2) * 0.5 * 1j)
CIRCULAR_RIGHT_HANDED = JonesVector(sqrt(2) * 0.5, -sqrt(2) * 0.5 * 1j)
class StokesVectorState(Enum):
LINEAR_HORIZONTAL = StokesVector(1, 1, 0, 0)
LINEAR_VERTICAL = StokesVector(1, -1, 0, 0)
LINEAR_DIAGONAL = StokesVector(1, 0, 1, 0)
LINEAR_ANTIDIAGONAL = StokesVector(1, 0, -1, 0)
CIRCULAR_LEFT_HANDED = StokesVector(1, 0, 0, 1)
CIRCULAR_RIGHT_HANDED = StokesVector(1, 0, 0, -1)
class PolarizationEllipseState(Enum):
LINEAR_HORIZONTAL = PolarizationEllipse(1.0, 0.0, 0.0)
LINEAR_VERTICAL = PolarizationEllipse(0.0, 1.0, 0.0)
LINEAR_DIAGONAL = PolarizationEllipse(sqrt(2) * 0.5, sqrt(2) * 0.5, 0.0)
LINEAR_ANTIDIAGONAL = PolarizationEllipse(sqrt(2) * 0.5, sqrt(2) * 0.5, pi)
CIRCULAR_LEFT_HANDED = PolarizationEllipse(sqrt(2) * 0.5, sqrt(2) * 0.5, pi/2)
CIRCULAR_RIGHT_HANDED = PolarizationEllipse(sqrt(2) * 0.5, sqrt(2) * 0.5, -pi/2) |
py | b405b1770cd34d66420b5bbcf7d2674907f95b1f | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
# #############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
# #############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.tight_layout()
plt.show()
|
py | b405b1ef752a1702183bea0b47a0bc6616babde1 | # -*- coding: utf-8 -*-
"""User functions to streamline working with selected pymer4 LMER fit
attributes from lme4::lmer and lmerTest for ``fitgrid.lmer`` grids.
"""
import functools
import re
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib import pyplot as plt
import fitgrid
from fitgrid.fitgrid import LMERFitGrid
def get_lmer_dfbetas(epochs, factor, **kwargs):
r"""Fit lmers leaving out factor levels one by one, compute DBETAS.
Parameters
----------
epochs : Epochs
Epochs object
factor : str
column name of the factor of interest
**kwargs
keyword arguments to pass on to ``fitgrid.lmer``, like ``RHS``
Returns
-------
dfbetas : pandas.DataFrame
dataframe containing DFBETAS values
Examples
--------
Example calculation showing how to pass in model fitting parameters::
dfbetas = fitgrid.utils.lmer.get_lmer_dfbetas(
epochs=epochs,
factor='subject_id',
RHS='x + (x|a)
)
Notes
-----
DFBETAS is computed according to the following formula [NieGroPel2012]_:
.. math::
DFBETAS_{ij} = \frac{\hat{\gamma}_i - \hat{\gamma}_{i(-j)}}{se\left(\hat{\gamma}_{i(-j)}\right)}
for parameter :math:`i` and level :math:`j` of ``factor``.
"""
# get the factor levels
table = epochs.table.reset_index().set_index(
[epochs.epoch_id, epochs.time]
)
levels = table[factor].unique()
# produce epochs tables with each level left out
looo_epochs = (
fitgrid.epochs_from_dataframe(
table[table[factor] != level],
time=epochs.time,
epoch_id=epochs.epoch_id,
channels=epochs.channels,
)
for level in levels
)
# fit lmer on these epochs
fitter = functools.partial(fitgrid.lmer, **kwargs)
grids = map(fitter, looo_epochs)
coefs = (grid.coefs for grid in grids)
# get coefficient estimates and se from leave one out fits
looo_coefs = pd.concat(coefs, keys=levels, axis=1)
looo_estimates = looo_coefs.loc[pd.IndexSlice[:, :, 'Estimate'], :]
looo_se = looo_coefs.loc[pd.IndexSlice[:, :, 'SE'], :]
# get coefficient estimates from regular fit (all levels included)
all_levels_coefs = fitgrid.lmer(epochs, **kwargs).coefs
all_levels_estimates = all_levels_coefs.loc[
pd.IndexSlice[:, :, 'Estimate'], :
]
# drop outer level of index for convenience
for df in (looo_estimates, looo_se, all_levels_estimates):
df.index = df.index.droplevel(level=-1)
# (all_levels_estimate - level_excluded_estimate) / level_excluded_se
dfbetas = all_levels_estimates.sub(looo_estimates, level=1).div(
looo_se, level=1
)
return dfbetas.stack(level=0)
def get_lmer_warnings(lmer_grid):
"""grid the LMERFitGrid lme4::lmer4 warnings by type
lmer warnings are a mishmash of characters, punctuation, and digits, some with
numerical values specific to the message, for instance,
| Model failed to converge with max|grad| = 0.00222262 (tol = 0.002, component 1)
| unable to evaluate scaled gradient
| boundary (singular) fit: see ?isSingular
| np.nan
The warning strings are returned as-is except for stripping
leading and trailing whitespace and the "= N.NNNNNNNN" portion of the
max \|grad\| convergence failure.
Parameters
----------
lmer_grid : fitgrid.LMERFitGrid
as returned by ``fitgrid.lmer()``, shape = time x channel
Returns
-------
warning_grids : dict
A dictionary, the keys are lmer warning strings, each value
is a `pandas.DataFrame` indicator grid where grid.loc[time, channel] == 1 if the
lmer warning == key, otherwise 0.
"""
if not isinstance(lmer_grid, LMERFitGrid):
msg = (
"get_lmer_warnings() must be called on an "
f"LMERFitGrid not {type(lmer_grid)}"
)
raise ValueError(msg)
# In pymer4 0.7.1+ and lme4::lmer 0.22+ warnings come back from
# lme4::lmer via pymer4 as list of strings and each LMERFitgrid
# cell may have a list of 0, 1, 2, ... ? warnings. This means
# LMERFitGrid.warnings time index may have missing time stamps (= no
# warnings), a single time stamp (one warning), or duplicate time
# stamps (> 1 warning) and np.nan at channels where there is no
# warning at that timestamp.
# strip reported decimal values so max|grad| convergence failures are one kind
tidy_strings = lmer_grid.warnings.applymap(
lambda x: re.sub(
r"max\|grad\|\s+=\s+\d+\.\d+\s+", "max|grad| ", x
).strip()
if isinstance(x, str)
else x # no warning == np.nan
).rename_axis([lmer_grid.time, "wdx", "_empty"], axis=0)
# the number and types of warning generally vary by time and/or channel
warning_kinds = (
pd.Series(tidy_strings.to_numpy().flatten()).dropna().unique()
)
# collect messy gappy, multiple warnings as a dict of key==warning,
# value==tidy time x channel indicator grid (0, 1)
warning_grids = {}
assert lmer_grid._grid.shape == lmer_grid.has_warning.shape
for warning_kind in warning_kinds:
# empty grid w/ correct shape, row index and columns
warning_grid = pd.DataFrame(
np.zeros(lmer_grid._grid.shape, dtype=int),
index=lmer_grid._grid.index.copy(),
columns=lmer_grid._grid.columns.copy(),
)
# select rows w/ at least one non-na
warning_rows = tidy_strings[tidy_strings == warning_kind].dropna(
axis=0, how="all"
)
assert warning_rows.index.names[0] == lmer_grid._grid.index.name
assert all(
warning_rows.index.get_level_values(0)
== warning_rows.index.get_level_values(0).unique()
)
for rdx, row in warning_rows.iterrows():
warning_grid.loc[rdx[0], :] = (row == warning_kind).astype(int)
assert all(warning_grid.index == lmer_grid._grid.index)
assert all(warning_grid.columns == lmer_grid._grid.columns)
warning_grids[warning_kind] = warning_grid
return warning_grids
def plot_lmer_warnings(lmer_grid, which="each", verbose=True):
"""Raster plot lme4::lmer warning grids
Parameters
----------
lmer_grid : fitgrid.LMERFitGrid
as returned by ``fitgrid.lmer()``, shape = time x channel
which : {"each", "all", or list of str}
select the types of warnings to plot. `each` (default) plots
each type of warning separately. `all` plots one grid showing
where any type of warning occurred. A list of strings searches
the lmer warnings and plots those that match.
verbose : bool, default=True
If `True` warn of failed matches for warnings keywords.
Examples
--------
default, plot each warning grid separately
>>> plot_lmer_warnings(lmer_grid)
one plot shows everywhere there is a warning
>>> plot_lmer_warnings(lmer_grid, which="all")
plot just warnings that match these strings
>>> plot_lmer_warnings(lmer_grid, which=["converge", "singular"])
"""
def _plot_warnings(warning, warning_grid):
# masked array non-values are transparent in pcolormesh
_, axi = plt.subplots(figsize=(12, len(warning_grid.columns) / 2))
axi.set_title(warning)
ylabels = warning_grid.columns
axi.yaxis.set_major_locator(
mpl.ticker.FixedLocator(np.arange(len(ylabels)))
)
axi.yaxis.set_major_formatter(mpl.ticker.FixedFormatter(ylabels))
axi.pcolormesh(
warning_grid.index,
np.arange(len(ylabels)),
np.ma.masked_not_equal(warning_grid.T.to_numpy(), 1),
shading="nearest",
cmap=mpl.colors.ListedColormap(['red']),
)
# validate kwarg
if not (
isinstance(which, str)
or (
isinstance(which, list)
and all((isinstance(wrn, str) for wrn in which))
)
):
raise ValueError(
"The value for which=value must be 'any', 'each', a warning "
f"string pattern to match or list of them, not this: {which}"
)
warning_grids = get_lmer_warnings(lmer_grid)
warning_grids["all"] = lmer_grid.has_warning.astype(int)
keys = None
if which == "all":
keys = ["all"]
elif which == "each":
keys = list(warning_grids.keys())
else:
# lookup matching patterns var so as to not step on original kwarg
patterns = [which] if isinstance(which, str) else which
keys = []
for pattern in patterns:
matches = [key for key in warning_grids if pattern in key]
keys += matches # may be []
if verbose and not matches:
warnings.warn(f"warning pattern '{pattern}' not found")
assert isinstance(keys, list), f"this should be type list: {type(keys)}"
for key in keys:
if verbose:
print(f"{key}")
_plot_warnings(key, warning_grids[key])
if verbose and not keys:
warnings.warn(f"no model warnings match {which}")
|
py | b405b2f7c239cac5d7d007c2312e9505f9513525 | """``tornado.gen`` is a generator-based interface to make it easier to
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler:
.. testcode::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
could be written with ``gen`` as:
.. testcode::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`.
You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished:
.. testcode::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
.. testoutput::
:hide:
If the `~functools.singledispatch` library is available (standard in
Python 3.4, available via the `singledispatch
<https://pypi.python.org/pypi/singledispatch>`_ package on older
versions), additional types of objects may be yielded. Tornado includes
support for ``asyncio.Future`` and Twisted's ``Deferred`` class when
``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported.
See the `convert_yielded` function to extend this mechanism.
.. versionchanged:: 3.2
Dict support added.
.. versionchanged:: 4.1
Support added for yielding ``asyncio`` Futures and Twisted Deferreds
via ``singledispatch``.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import functools
import itertools
import sys
import types
import weakref
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
try:
from functools import singledispatch # py34+
except ImportError as e:
try:
from singledispatch import singledispatch # backport
except ImportError:
singledispatch = None
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
class TimeoutError(Exception):
"""Exception raised by ``with_timeout``."""
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future.add_done_callback(stack_context.wrap(final_callback))
return wrapper
def coroutine(func, replace_callback=True):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
.. warning::
When exceptions occur inside a coroutine, the exception
information will be stored in the `.Future` object. You must
examine the result of the `.Future` object, or the exception
may go unnoticed by your code. This means yielding the function
if called from another coroutine, using something like
`.IOLoop.run_sync` for top-level calls, or passing the `.Future`
to `.IOLoop.add_future`.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
The two decorators differ in their treatment of the ``callback``
argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = TracebackFuture()
if replace_callback and 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = getattr(e, 'value', None)
except Exception:
future.set_exc_info(sys.exc_info())
return future
else:
if isinstance(result, types.GeneratorType):
# Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to
# use "optional" coroutines in critical path code without
# performance penalty for the synchronous case.
try:
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = TracebackFuture()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future.set_result(getattr(e, 'value', None))
except Exception:
future.set_exc_info(sys.exc_info())
else:
Runner(result, future, yielded)
try:
return future
finally:
# Subtle memory optimization: if next() raised an exception,
# the future's exc_info contains a traceback which
# includes this stack frame. This creates a cycle,
# which will be collected at the next full GC but has
# been shown to greatly increase memory usage of
# benchmarks (relative to the refcount-based scheme
# used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it.
future = None
future.set_result(result)
return future
return wrapper
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
class WaitIterator(object):
"""Provides an iterator to yield the results of futures as they finish.
Yielding a set of futures like this:
``results = yield [future1, future2]``
pauses the coroutine until both ``future1`` and ``future2``
return, and then restarts the coroutine with the results of both
futures. If either future is an exception, the expression will
raise that exception and all the results will be lost.
If you need to get the result of each future as soon as possible,
or if you need the result of some futures even if others produce
errors, you can use ``WaitIterator``::
wait_iterator = gen.WaitIterator(future1, future2)
while not wait_iterator.done():
try:
result = yield wait_iterator.next()
except Exception as e:
print("Error {} from {}".format(e, wait_iterator.current_future))
else:
print("Result {} recieved from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
Because results are returned as soon as they are available the
output from the iterator *will not be in the same order as the
input arguments*. If you need to know which future produced the
current result, you can use the attributes
``WaitIterator.current_future``, or ``WaitIterator.current_index``
to get the index of the future from the input list. (if keyword
arguments were used in the construction of the `WaitIterator`,
``current_index`` will use the corresponding keyword).
.. versionadded:: 4.1
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise ValueError(
"You must provide args or kwargs, not both")
if kwargs:
self._unfinished = dict((f, k) for (k, f) in kwargs.items())
futures = list(kwargs.values())
else:
self._unfinished = dict((f, i) for (i, f) in enumerate(args))
futures = args
self._finished = collections.deque()
self.current_index = self.current_future = None
self._running_future = None
self_ref = weakref.ref(self)
for future in futures:
future.add_done_callback(functools.partial(
self._done_callback, self_ref))
def done(self):
"""Returns True if this iterator has no more results."""
if self._finished or self._unfinished:
return False
# Clear the 'current' values when iteration is done.
self.current_index = self.current_future = None
return True
def next(self):
"""Returns a `.Future` that will yield the next available result.
Note that this `.Future` will not be the same object as any of
the inputs.
"""
self._running_future = TracebackFuture()
if self._finished:
self._return_result(self._finished.popleft())
return self._running_future
@staticmethod
def _done_callback(self_ref, done):
self = self_ref()
if self is not None:
if self._running_future and not self._running_future.done():
self._return_result(done)
else:
self._finished.append(done)
def _return_result(self, done):
"""Called set the returned future's state that of the future
we yielded, and set the current future for the iterator.
"""
chain_future(done, self._running_future)
self.current_future = done
self.current_index = self._unfinished.pop(done)
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
def Task(func, *args, **kwargs):
"""Adapts a callback-based asynchronous function for use in coroutines.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
.. versionchanged:: 4.0
``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when
yielded.
"""
future = Future()
def handle_exception(typ, value, tb):
if future.done():
return False
future.set_exc_info((typ, value, tb))
return True
def set_result(result):
if future.done():
return
future.set_result(result)
with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs)
return future
class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None):
"""Adapts a `.Future` to the `YieldPoint` interface.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
self.future = future
self.io_loop = io_loop or IOLoop.current()
def start(self, runner):
if not self.future.done():
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result = self.future.result()
def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key)
else:
return True
def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result()
else:
return self.result
class Multi(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
Takes a list of ``YieldPoints`` or ``Futures`` and returns a list of
their responses. It is not necessary to call `Multi` explicitly,
since the engine will do so automatically when the generator yields
a list of ``YieldPoints`` or a mixture of ``YieldPoints`` and ``Futures``.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
"""
def __init__(self, children):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if is_future(i):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result = (i.get_result() for i in self.children)
if self.keys is not None:
return dict(zip(self.keys, result))
else:
return list(result)
def multi_future(children):
"""Wait for multiple asynchronous futures in parallel.
Takes a list of ``Futures`` (but *not* other ``YieldPoints``) and returns
a new Future that resolves when all the other Futures are done.
If all the ``Futures`` succeeded, the returned Future's result is a list
of their results. If any failed, the returned Future raises the exception
of the first one to fail.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
It is not necessary to call `multi_future` explcitly, since the engine will
do so automatically when the generator yields a list of `Futures`.
This function is faster than the `Multi` `YieldPoint` because it does not
require the creation of a stack context.
.. versionadded:: 4.0
"""
if isinstance(children, dict):
keys = list(children.keys())
children = children.values()
else:
keys = None
assert all(is_future(i) for i in children)
unfinished_children = set(children)
future = Future()
if not children:
future.set_result({} if keys is not None else [])
def callback(f):
unfinished_children.remove(f)
if not unfinished_children:
try:
result_list = [i.result() for i in children]
except Exception:
future.set_exc_info(sys.exc_info())
else:
if keys is not None:
future.set_result(dict(zip(keys, result_list)))
else:
future.set_result(result_list)
for f in children:
f.add_done_callback(callback)
return future
def maybe_future(x):
"""Converts ``x`` into a `.Future`.
If ``x`` is already a `.Future`, it is simply returned; otherwise
it is wrapped in a new `.Future`. This is suitable for use as
``result = yield gen.maybe_future(f())`` when you don't know whether
``f()`` returns a `.Future` or not.
"""
if is_future(x):
return x
else:
fut = Future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
"""Wraps a `.Future` in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
Currently only supports Futures, not other `YieldPoint` classes.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
"""
# TODO: allow yield points in addition to futures?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
def error_callback(future):
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error("Exception in Future %r after timeout",
future, exc_info=True)
def timeout_callback():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future.add_done_callback(error_callback)
timeout_handle = io_loop.add_timeout(
timeout, timeout_callback)
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
def sleep(duration):
"""Return a `.Future` that resolves after the given number of seconds.
When used with ``yield`` in a coroutine, this is a non-blocking
analogue to `time.sleep` (which should not be used in coroutines
because it is blocking)::
yield gen.sleep(0.5)
Note that calling this function on its own does nothing; you must
wait on the `.Future` it returns (usually by yielding it).
.. versionadded:: 4.1
"""
f = Future()
IOLoop.current().call_later(duration, lambda: f.set_result(None))
return f
_null_future = Future()
_null_future.set_result(None)
moment = Future()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
.. versionadded:: 4.0
"""
moment.set_result(None)
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.TracebackFuture`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
self.result_future = result_future
self.future = _null_future
self.yield_point = None
self.pending_callbacks = None
self.results = None
self.running = False
self.finished = False
self.had_exception = False
self.io_loop = IOLoop.current()
# For efficiency, we do not create a stack context until we
# reach a YieldPoint (stack contexts are required for the historical
# semantics of YieldPoints, but not for Futures). When we have
# done so, this field will be set and must be called at the end
# of the coroutine.
self.stack_context_deactivate = None
if self.handle_yield(first_yielded):
self.run()
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if self.pending_callbacks is None:
# Lazily initialize the old-style YieldPoint data structures.
self.pending_callbacks = set()
self.results = {}
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
self.yield_point = None
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if not future.done():
return
self.future = None
try:
orig_stack_contexts = stack_context._state.contexts
exc_info = None
try:
value = future.result()
except Exception:
self.had_exception = True
exc_info = sys.exc_info()
if exc_info is not None:
yielded = self.gen.throw(*exc_info)
exc_info = None
else:
yielded = self.gen.send(value)
if stack_context._state.contexts is not orig_stack_contexts:
self.gen.throw(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
self.result_future.set_result(getattr(e, 'value', None))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
self.result_future.set_exc_info(sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
if not self.handle_yield(yielded):
return
finally:
self.running = False
def handle_yield(self, yielded):
# Lists containing YieldPoints require stack contexts;
# other lists are handled via multi_future in convert_yielded.
if (isinstance(yielded, list) and
any(isinstance(f, YieldPoint) for f in yielded)):
yielded = Multi(yielded)
elif (isinstance(yielded, dict) and
any(isinstance(f, YieldPoint) for f in yielded.values())):
yielded = Multi(yielded)
if isinstance(yielded, YieldPoint):
# YieldPoints are too closely coupled to the Runner to go
# through the generic convert_yielded mechanism.
self.future = TracebackFuture()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
self.future.set_result(
yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
else:
try:
self.future = convert_yielded(yielded)
except BadYieldError:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if not self.future.done() or self.future is moment:
self.io_loop.add_future(
self.future, lambda f: self.run())
return False
return True
def result_callback(self, key):
return stack_context.wrap(_argument_adapter(
functools.partial(self.set_result, key)))
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = TracebackFuture()
self.future.set_exc_info((typ, value, tb))
self.run()
return True
else:
return False
def _deactivate_stack_context(self):
if self.stack_context_deactivate is not None:
self.stack_context_deactivate()
self.stack_context_deactivate = None
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
def _argument_adapter(callback):
"""Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
"""
def wrapper(*args, **kwargs):
if kwargs or len(args) > 1:
callback(Arguments(args, kwargs))
elif args:
callback(args[0])
else:
callback(None)
return wrapper
def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled separately
# via Multi().
if isinstance(yielded, (list, dict)):
return multi_future(yielded)
elif is_future(yielded):
return yielded
else:
raise BadYieldError("yielded unknown object %r" % (yielded,))
if singledispatch is not None:
convert_yielded = singledispatch(convert_yielded)
|
py | b405b47ec67860900675fa7f967cac677fc5a403 | """
File adapted from https://github.com/ds4dm/learn2branch
"""
import os
import argparse
import pickle
import glob
import shutil
import gzip
import math
import numpy as np
import multiprocessing as mp
import pyscipopt as scip
import utilities
class VanillaFullstrongBranchingDataCollector(scip.Branchrule):
"""
Implements branching policy to be used by SCIP such that data collection required for hybrid models is embedded in it.
"""
def __init__(self, rng, query_expert_prob=0.60):
self.khalil_root_buffer = {}
self.obss = []
self.targets = []
self.obss_feats = []
self.exploration_policy = "pscost"
self.query_expert_prob = query_expert_prob
self.rng = rng
self.iteration_counter = 0
def branchinit(self):
self.ndomchgs = 0
self.ncutoffs = 0
self.khalil_root_buffer = {}
def branchexeclp(self, allowaddcons):
self.iteration_counter += 1
query_expert = self.rng.rand() < self.query_expert_prob
if query_expert or self.model.getNNodes() == 1:
candidate_vars, *_ = self.model.getPseudoBranchCands()
candidate_mask = [var.getCol().getLPPos() for var in candidate_vars]
state = utilities.extract_state(self.model)
state_khalil = utilities.extract_khalil_variable_features(self.model, candidate_vars, self.khalil_root_buffer)
result = self.model.executeBranchRule('vanillafullstrong', allowaddcons)
cands_, scores, npriocands, bestcand = self.model.getVanillafullstrongData()
best_var = cands_[bestcand]
self.add_obs(best_var, (state, state_khalil), (cands_, scores))
if self.model.getNNodes() == 1:
self.state = [state, state_khalil, self.obss[0]]
self.model.branchVar(best_var)
result = scip.SCIP_RESULT.BRANCHED
else:
result = self.model.executeBranchRule(self.exploration_policy, allowaddcons)
# fair node counting
if result == scip.SCIP_RESULT.REDUCEDDOM:
self.ndomchgs += 1
elif result == scip.SCIP_RESULT.CUTOFF:
self.ncutoffs += 1
return {'result': result}
def add_obs(self, best_var, state_, cands_scores=None): # TODO: return to normalcy
"""
Adds sample to the `self.obs` to be processed later at the end of optimization.
Parameters
----------
best_var : pyscipopt.Variable
object representing variable in LP
state_ : tuple
extracted features of constraints and variables at a node
cands_scores : np.array
scores of each of the candidate variable on which expert policy was executed
Return
------
(bool): True if sample is added succesfully. False o.w.
"""
if self.model.getNNodes() == 1:
self.obss = []
self.targets = []
self.obss_feats = []
self.map = sorted([x.getCol().getIndex() for x in self.model.getVars(transformed=True)])
cands, scores = cands_scores
# Do not record inconsistent scores. May happen if SCIP was early stopped (time limit).
if any([s < 0 for s in scores]):
return False
state, state_khalil = state_
var_features = state[2]['values']
cons_features = state[0]['values']
edge_features = state[1]
# add more features to variables
cands_index = [x.getCol().getIndex() for x in cands]
khalil_features = -np.ones((var_features.shape[0], state_khalil.shape[1]))
cand_ind = np.zeros((var_features.shape[0], 1))
khalil_features[cands_index] = state_khalil
cand_ind[cands_index] = 1
var_features = np.concatenate([var_features, khalil_features, cand_ind], axis=1)
tmp_scores = -np.ones(len(self.map))
if scores:
tmp_scores[cands_index] = scores
self.targets.append(best_var.getCol().getIndex())
self.obss.append([var_features, cons_features, edge_features])
print(self.obss.shape)
depth = self.model.getCurrentNode().getDepth()
self.obss_feats.append({'depth': depth, 'scores': np.array(tmp_scores), 'iteration': self.iteration_counter})
return True
def make_samples(in_queue, out_queue):
"""
Worker loop: fetch an instance, run an episode and record samples.
Parameters
----------
in_queue : multiprocessing.Queue
Input queue from which orders are received.
out_queue : multiprocessing.Queue
Output queue in which to send samples.
"""
while True:
episode, instance, seed, time_limit, outdir, rng = in_queue.get()
m = scip.Model()
m.setIntParam('display/verblevel', 0)
m.readProblem(f'{instance}')
utilities.init_scip_params(m, seed=seed)
m.setIntParam('timing/clocktype', 2)
m.setRealParam('limits/time', time_limit)
m.setLongintParam('limits/nodes', node_limit)
branchrule = VanillaFullstrongBranchingDataCollector(rng, node_record_prob)
m.includeBranchrule(
branchrule=branchrule,
name="Sampling branching rule", desc="",
priority=666666, maxdepth=-1, maxbounddist=1)
m.setBoolParam('branching/vanillafullstrong/integralcands', True)
m.setBoolParam('branching/vanillafullstrong/scoreall', True)
m.setBoolParam('branching/vanillafullstrong/collectscores', True)
m.setBoolParam('branching/vanillafullstrong/donotbranch', True)
m.setBoolParam('branching/vanillafullstrong/idempotent', True)
out_queue.put({
"type": 'start',
"episode": episode,
"instance": instance,
"seed": seed
})
m.optimize()
# data storage - root and node data are saved separately.
# node data carries a reference to the root filename.
if m.getNNodes() >= 1 and len(branchrule.obss) > 0 :
filenames = []
max_depth = max(x['depth'] for x in branchrule.obss_feats)
stats = {'nnodes': m.getNNodes(), 'time': m.getSolvingTime(), 'gap': m.getGap(), 'nobs': len(branchrule.obss)}
# prepare root data
sample_state, sample_khalil_state, root_obss = branchrule.state
sample_cand_scores = branchrule.obss_feats[0]['scores']
sample_cands = np.where(sample_cand_scores != -1)[0]
sample_cand_scores = sample_cand_scores[sample_cands]
cand_choice = np.where(sample_cands == branchrule.targets[0])[0][0]
root_filename = f"{outdir}/sample_root_0_{episode}.pkl"
filenames.append(root_filename)
with gzip.open(root_filename, 'wb') as f:
pickle.dump({
'type': 'root',
'episode': episode,
'instance': instance,
'seed': seed,
'stats': stats,
'root_state': [sample_state, sample_khalil_state, sample_cands, cand_choice, sample_cand_scores],
'obss': [branchrule.obss[0], branchrule.targets[0], branchrule.obss_feats[0], None],
'max_depth': max_depth
}, f)
# node data
for i in range(1, len(branchrule.obss)):
iteration_counter = branchrule.obss_feats[i]['iteration']
filenames.append(f"{outdir}/sample_node_{iteration_counter}_{episode}.pkl")
with gzip.open(filenames[-1], 'wb') as f:
pickle.dump({
'type' : 'node',
'episode': episode,
'instance': instance,
'seed': seed,
'stats': stats,
'root_state': f"{outdir}/sample_root_0_{episode}.pkl",
'obss': [branchrule.obss[i], branchrule.targets[i], branchrule.obss_feats[i], None],
'max_depth': max_depth
}, f)
out_queue.put({
"type": "done",
"episode": episode,
"instance": instance,
"seed": seed,
"filenames": filenames,
"nnodes": len(filenames),
})
m.freeProb()
def send_orders(orders_queue, instances, seed, time_limit, outdir, start_episode):
"""
Worker loop: fetch an instance, run an episode and record samples.
Parameters
----------
orders_queue : multiprocessing.Queue
Input queue from which orders are received.
instances : list
list of filepaths of instances which are solved by SCIP to collect data
seed : int
initial seed to insitalize random number generator with
time_limit : int
maximum time for which to solve an instance while collecting data
outdir : str
directory where to save data
start_episode : int
episode to resume data collection. It is used if the data collection process was stopped earlier for some reason.
"""
rng = np.random.RandomState(seed)
episode = 0
while True:
instance = rng.choice(instances)
seed = rng.randint(2**32)
# already processed; for a broken process; for root dataset to not repeat instances and seed
if episode <= start_episode:
episode += 1
continue
orders_queue.put([episode, instance, seed, time_limit, outdir, rng])
episode += 1
def collect_samples(instances, outdir, rng, n_samples, n_jobs, time_limit):
"""
Worker loop: fetch an instance, run an episode and record samples.
Parameters
----------
instances : list
filepaths of instances which will be solved to collect data
outdir : str
directory where to save data
rng : np.random.RandomState
random number generator
n_samples : int
total number of samples to collect.
n_jobs : int
number of CPUs to utilize or number of instances to solve in parallel.
time_limit : int
maximum time for which to solve an instance while collecting data
"""
os.makedirs(outdir, exist_ok=True)
# start workers
orders_queue = mp.Queue(maxsize=2*n_jobs)
answers_queue = mp.SimpleQueue()
workers = []
for i in range(n_jobs):
p = mp.Process(
target=make_samples,
args=(orders_queue, answers_queue),
daemon=True)
workers.append(p)
p.start()
# dir to keep samples temporarily; helps keep a prefect count
tmp_samples_dir = f'{outdir}/tmp'
os.makedirs(tmp_samples_dir, exist_ok=True)
# if the process breaks due to some reason, resume from this last_episode.
existing_samples = glob.glob(f"{outdir}/*.pkl")
last_episode, last_i = -1, 0
if existing_samples:
last_episode = max(int(x.split("/")[-1].split(".pkl")[0].split("_")[-2]) for x in existing_samples) # episode is 2nd last
last_i = max(int(x.split("/")[-1].split(".pkl")[0].split("_")[-1]) for x in existing_samples) # sample number is the last
# start dispatcher
dispatcher = mp.Process(
target=send_orders,
args=(orders_queue, instances, rng.randint(2**32), time_limit, tmp_samples_dir, last_episode),
daemon=True)
dispatcher.start()
i = last_i # for a broken process
in_buffer = 0
while i <= n_samples:
sample = answers_queue.get()
if sample['type'] == 'start':
in_buffer += 1
if sample['type'] == 'done':
for filename in sample['filenames']:
x = filename.split('/')[-1].split(".pkl")[0]
os.rename(filename, f"{outdir}/{x}.pkl")
i += 1
print(f"[m {os.getpid()}] {i} / {n_samples} samples written, ep {sample['episode']} ({in_buffer} in buffer).")
if i == n_samples:
# early stop dispatcher (hard)
if dispatcher.is_alive():
dispatcher.terminate()
print(f"[m {os.getpid()}] dispatcher stopped...")
break
if not dispatcher.is_alive():
break
# stop all workers (hard)
for p in workers:
p.terminate()
shutil.rmtree(tmp_samples_dir, ignore_errors=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# problem parameters
parser.add_argument(
'problem',
help='MILP instance type to process.',
choices=['setcover', 'cauctions', 'facilities', 'indset'],
)
parser.add_argument(
'-s', '--seed',
help='Random generator seed.',
type=utilities.valid_seed,
default=0,
)
parser.add_argument(
'-j', '--njobs',
help='Number of parallel jobs.',
type=int,
default=1,
)
args = parser.parse_args()
train_size = 150000
valid_size = 30000
test_size = 30000
time_limit = 3600
node_limit = 500
node_record_prob = 1.0
basedir = "data/samples"
# get instance filenames
if args.problem == 'setcover':
instances_train = glob.glob('data/instances/setcover/train_500r_1000c_0.05d/*.lp')
instances_valid = glob.glob('data/instances/setcover/valid_500r_1000c_0.05d/*.lp')
instances_test = glob.glob('data/instances/setcover/test_500r_1000c_0.05d/*.lp')
out_dir = f'{basedir}/setcover/500r_1000c_0.05d'
elif args.problem == 'cauctions':
instances_train = glob.glob('data/instances/cauctions/train_100_500/*.lp')
instances_valid = glob.glob('data/instances/cauctions/valid_100_500/*.lp')
instances_test = glob.glob('data/instances/cauctions/test_100_500/*.lp')
out_dir = f'{basedir}/cauctions/test' # TODO: f'{basedir}/cauctions/100_500'
elif args.problem == 'indset':
instances_train = glob.glob('data/instances/indset/train_750_4/*.lp')
instances_valid = glob.glob('data/instances/indset/valid_750_4/*.lp')
instances_test = glob.glob('data/instances/indset/test_750_4/*.lp')
out_dir = f'{basedir}/indset/750_4'
elif args.problem == 'facilities':
instances_train = glob.glob('data/instances/facilities/train_100_100_5/*.lp')
instances_valid = glob.glob('data/instances/facilities/valid_100_100_5/*.lp')
instances_test = glob.glob('data/instances/facilities/test_100_100_5/*.lp')
out_dir = f'{basedir}/facilities/100_100_5'
time_limit = 600
else:
raise NotImplementedError
print(f"{len(instances_train)} train instances for {train_size} samples")
print(f"{len(instances_valid)} validation instances for {valid_size} samples")
print(f"{len(instances_test)} test instances for {test_size} samples")
rng = np.random.RandomState(args.seed + 1)
collect_samples(instances_train, out_dir + "/train", rng, train_size, args.njobs, time_limit)
print("Success: Train data collection")
rng = np.random.RandomState(args.seed + 1)
collect_samples(instances_valid, out_dir + "/valid", rng, valid_size, args.njobs, time_limit)
print("Success: Valid data collection")
rng = np.random.RandomState(args.seed + 1)
collect_samples(instances_test, out_dir + "/test", rng, test_size, args.njobs, time_limit)
print("Success: Test data collection")
if args.problem == "indset":
mediumvalid_size = 2000
instances_mediumvalid = glob.glob('data/instances/indset/mediumvalid_1000_4/*.lp')
out_dir = f'{basedir}/indset/1000_4'
print(f"{len(instances_mediumvalid)} medium validation instances for {mediumvalid_size} samples")
rng = np.random.RandomState(args.seed + 1)
collect_samples(instances_mediumvalid, out_dir + "/mediumvalid", rng, mediumvalid_size, args.njobs, time_limit)
print("Success: Medium validation data collection")
|
py | b405b4de6fc38b84a9d19efd1937953045182584 | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1SfnYe9wAXbhLomtJQmHyYWRkkv5ZAU7yg(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/carthagenet/KT1SfnYe9wAXbhLomtJQmHyYWRkkv5ZAU7yg.json')
def test_storage_encoding_KT1SfnYe9wAXbhLomtJQmHyYWRkkv5ZAU7yg(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1SfnYe9wAXbhLomtJQmHyYWRkkv5ZAU7yg(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1SfnYe9wAXbhLomtJQmHyYWRkkv5ZAU7yg(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
py | b405b53c3445eac2572b19f3f07cb850f5cf5fd6 | from enum import Enum
from typing import Tuple
import varint
from peerdid.types import VerificationMethodType, VerificationMethodTypeAgreement
class Codec(Enum):
X25519 = 0xEC
ED25519 = 0xED
def to_multicodec(value: bytes, key_type: VerificationMethodType) -> bytes:
codec = _get_codec(key_type)
prefix = varint.encode(codec.value)
return b"".join([prefix, value])
def from_multicodec(value: bytes) -> Tuple[bytes, Codec]:
try:
prefix_int = varint.decode_bytes(value)
except Exception:
raise ValueError(
"Invalid key: Invalid multicodec prefix in {}".format(str(value))
)
try:
codec = Codec(prefix_int)
except ValueError:
raise ValueError(
"Invalid key: Unknown multicodec prefix {} in {}".format(
str(prefix_int), str(value)
)
)
prefix = varint.encode(prefix_int)
return value[len(prefix) :], codec
def _get_codec(key_type: VerificationMethodType) -> Codec:
if isinstance(key_type, VerificationMethodTypeAgreement):
return Codec.X25519
else:
return Codec.ED25519
|
py | b405b550931f2603bafe15633f6cf1d9fe45c06a | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for TF 1.X and 2.X compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def assign(ref, value, name=None):
if hasattr(tf.compat.v1, 'assign'):
return tf.compat.v1.assign(ref, value, name=name)
else:
return ref.assign(value, name=name)
def initialize_variables(testcase):
"""Handle global variable initialization in TF 1.X.
Arguments:
testcase: instance of tf.test.TestCase
"""
if hasattr(tf, 'global_variables_initializer') and not tf.executing_eagerly():
testcase.evaluate(tf.global_variables_initializer())
def is_v1_apis():
return hasattr(tf, 'assign')
|
py | b405b5aff731daa40d3c037ef3e89f3a7f28f69e | from mcculw import ul
from mcculw.enums import ULRange, InfoType, AnalogInputMode
from mcculw.enums import ScanOptions, BoardInfo, TriggerEvent, TrigType, FunctionType
from mcculw.ul import ULError
import ctypes
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from warnings import warn
import os
from ..instruments.USB_1208HS_4AO import *
__all__ = ('RUN',)
class RUN():
def __init__(self, daq):
self.daq = daq.configure()
self.board_num = daq.board_num
self.ul_range = daq.ul_range
return
def load_waveform(self,wvfrm_vstack):
"""load a waveform for daq
----
wvfrm_stack: numpy.vstack
"""
wf_1d, nzeros_front, nzeros_back = waveforms_to_1d_array(wvfrm_vstack)
self.wf_1d = wf_1d
self.nzeros_front = nzeros_front
self.nzeros_back = nzeros_back
self.input_wfm_df = pd.DataFrame({i:wvfrm_vstack[i,:] for i in range(wvfrm_vstack.shape[0])})
def config(self, out_channel_start,out_channel_end,in_channel_start,in_channel_end,nave,quiet = False):
"""configure run
----
out_channel_start: int, specify which start channel to output waveform
out_channel_end: int
in_channel_start: int
in_channel_end: int
"""
self.out_channel_end = out_channel_end
self.out_channel_start = out_channel_start
self.in_channel_end = in_channel_end
self.in_channel_start = in_channel_start
self.nave = nave
self.quiet = quiet
def go(self):
"""start the run"""
to_average = []
#stop old processes in case
ul.stop_background(self.board_num, FunctionType.AOFUNCTION)
ul.stop_background(self.board_num, FunctionType.AIFUNCTION)
nchannels_out = self.out_channel_end - self.out_channel_start + 1
for i in range(self.nave):
returned = apply_and_listen(self.wf_1d, self.nzeros_front, self.nzeros_back,
in_channel_start=self.in_channel_start, in_channel_end=self.in_channel_end,
out_channel_start=self.out_channel_start, out_channel_end=self.out_channel_end,
board_num=self.board_num, quiet=self.quiet)
memhandle_in, memhandle_out, data_array_in, data_array_out, count_in, time = returned
# Free the buffer and set the data_array to None
ul.win_buf_free(memhandle_out)
data_array_out = None
#now that it is done convert from data_array back to numpy data:
out = []
for i in range(0, count_in):
out.append(ul.to_eng_units(self.board_num, self.ul_range, data_array_in[i]))
out = np.array(out)
#clear memory
ul.win_buf_free(memhandle_in)
data_array_in = None
#append data
to_average.append(out)
data = np.array(to_average)
means = np.mean(data, axis = 0)
out = waveform_1d_to_array(means, nchannels_in=nchannels_out)
self.waveform_collected = out
self.time = time
return
def plot(self,**kwargs):
"""plot waveform_collected"""
if not hasattr(self, 'time'):
raise AttributeError('no data has been collected, suggest self.go()')
return
fig, ax = plt.subplots(**kwargs)
for i in range(self.waveform_collected.shape[0]):
ax.plot(self.time*1e6, self.waveform_collected[i,:])
ax.set_xlabel('time (us)')
return fig, ax
def get_df(self):
"""return pandas dataframe of waveform_collected"""
if not hasattr(self, 'waveform_collected'):
raise AttributeError('no data has been collected, suggest self.go()')
nchannels_in = self.in_channel_end - self.in_channel_start + 1
#for time so divide by how many channels in
nzeros_front_for_time = int(self.nzeros_front/nchannels_in)
nzeros_back_for_time = int(self.nzeros_back/nchannels_in)
time = self.time[nzeros_front_for_time:-nzeros_back_for_time]
data = pd.DataFrame({
'time':time,
})
warn("You are getting the input wfm data (which is perfect), not measured current input to the coil")
for i in self.input_wfm_df:
data['AOUT_{}'.format(i)] = 10*(self.input_wfm_df[i]-2047)/2047
for i, x in enumerate(self.waveform_collected):
x_for_data = x[nzeros_front_for_time:-nzeros_back_for_time]
data['AIN_{}'.format(i)] = x_for_data
self.data = data
return
def save(self, path, name):
"""save waveform_collected too file"""
if not hasattr(self, 'data'):
self.get_df(self)
#check if file name exists:
file_set = set(os.listdir(path))
if name in file_set:
yn = input('file already exists. Overwrite? (y/n)')
if yn == 'y':
self.data.to_csv(path+name, index = False)
else:
print('Ok. Skipping.')
else:
self.data.to_csv(path+name, index = False)
return |
py | b405b7f987fbb7b8f9dfa3f7640dbdcf78cb1914 | from torch._six import container_abcs
import collections.abc
from itertools import repeat
def as_triple(x, d_value=1):
if isinstance(x, container_abcs.Iterable):
x = list(x)
if len(x)==2:
x = [d_value] + x
return x
else:
return [d_value] + [x] * 2
def _ntuple_same(n):
def parse(x):
if isinstance(x, int):
return tuple(repeat(x, n))
elif isinstance(x, collections.abc.Iterable):
assert len(set(x))==1, 'the size of kernel must be the same for each side'
return tuple(repeat(x[0], n))
return parse
def _to_ntuple(n):
def parse(x):
if isinstance(x, int):
return tuple(repeat(x, n))
elif isinstance(x, collections.abc.Iterable):
if len(set(x))==1:
return tuple(repeat(x[0], n))
else:
assert len(x)==n , 'wrong format'
return x
return parse
_pair_same = _ntuple_same(2)
_triple_same = _ntuple_same(3)
_to_pair = _to_ntuple(2)
_to_triple = _to_ntuple(3)
|
py | b405b87f4f7baa3ec8c75e61582e230f7a59739f | import sys
class AppTestMemory:
spaceconfig = dict(usemodules=('_multiprocessing', 'mmap',
'_rawffi', 'itertools',
'signal', 'select',
'binascii'))
if sys.platform == 'win32':
spaceconfig['usemodules'] += ('_cffi_backend',)
else:
spaceconfig['usemodules'] += ('fcntl',)
def test_address_of(self):
import _multiprocessing
raises(TypeError, _multiprocessing.address_of_buffer, None)
raises(TypeError, _multiprocessing.address_of_buffer, "a")
if sys.platform == "win32":
test_address_of.dont_track_allocations = True
def test_mmap_address(self):
import mmap
import _multiprocessing
# This is a bit faster than importing ctypes
import _ctypes
class c_double(_ctypes._SimpleCData):
_type_ = "d"
sizeof_double = _ctypes.sizeof(c_double)
buf = mmap.mmap(-1, 300)
buf[0:300] = '\0' * 300
# Get the address of shared memory
address, length = _multiprocessing.address_of_buffer(buf)
assert length == 300
# build a ctypes object from it
var = c_double.from_address(address)
assert buf[0:sizeof_double] == '\0' * sizeof_double
assert var.value == 0
# check that both objects share the same memory
var.value = 123.456
assert buf[0:sizeof_double] != '\0' * sizeof_double
buf[0:sizeof_double] = '\0' * sizeof_double
assert var.value == 0
|
py | b405b901e75dc52f5c0926adafcb594ede78d3a9 |
"""
Context object of incoming request
"""
class Context(object):
"""
Context stores model relevant worker information
Some fixed during load times and some
"""
def __init__(self, model_name, model_dir, manifest, batch_size, gpu, mms_version, limit_max_image_pixels=True):
self.model_name = model_name
self.manifest = manifest
print("manifest: %s", self.manifest)
self._system_properties = {
"model_dir": model_dir,
"gpu_id": gpu,
"batch_size": batch_size,
"server_name": "MMS",
"server_version": mms_version,
"limit_max_image_pixels": limit_max_image_pixels,
}
print("system_properties: %s", self._system_properties)
self.request_ids = None
self.request_processor = None
self._metrics = None
self._limit_max_image_pixels = True
print("All args: " + "\n".join([str(model_name), str(model_dir), str(
manifest), str(batch_size), str(gpu), str(mms_version)]))
@property
def system_properties(self):
return self._system_properties
@property
def request_processor(self):
return self._request_processor
@request_processor.setter
def request_processor(self, request_processor):
self._request_processor = request_processor
@property
def metrics(self):
return self._metrics
@metrics.setter
def metrics(self, metrics):
self._metrics = metrics
def get_request_id(self, idx=0):
return self.request_ids.get(idx)
def get_request_header(self, idx, key):
return self._request_processor[idx].get_request_property(key)
def get_all_request_header(self, idx):
return self._request_processor[idx].get_request_properties()
def set_response_content_type(self, idx, value):
self.set_response_header(idx, 'content-type', value)
def get_response_content_type(self, idx):
return self.get_response_headers(idx).get('content-type')
def get_response_status(self, idx):
return self._request_processor[idx].get_response_status_code(), \
self._request_processor[idx].get_response_status_phrase()
def set_response_status(self, code=200, phrase="", idx=0):
"""
Set the status code of individual requests
:param phrase:
:param idx: The index data in the list(data) that is sent to the handle() method
:param code:
:return:
"""
if self._request_processor is not None and self._request_processor[idx] is not None:
self._request_processor[idx].report_status(code,
reason_phrase=phrase)
def set_all_response_status(self, code=200, phrase=""):
"""
Set the status code of individual requests
:param phrase:
:param code:
:return:
"""
for idx, _ in enumerate(self._request_processor):
self._request_processor[idx].report_status(code, reason_phrase=phrase)
def get_response_headers(self, idx):
return self._request_processor[idx].get_response_headers()
def set_response_header(self, idx, key, value):
self._request_processor[idx].add_response_property(key, value)
# TODO: Should we add "add_header()" interface, to have multiple values for a single header. EG: Accept headers.
def __eq__(self, other):
return isinstance(other, Context) and self.__dict__ == other.__dict__
class RequestProcessor(object):
"""
Request processor
"""
def __init__(self, request_header):
self._status_code = 200
self._reason_phrase = None
self._response_header = {}
self._request_header = request_header
def get_request_property(self, key):
return self._request_header.get(key)
def report_status(self, code, reason_phrase=None):
self._status_code = code
self._reason_phrase = reason_phrase
def get_response_status_code(self):
return self._status_code
def get_response_status_phrase(self):
return self._reason_phrase
def add_response_property(self, key, value):
self._response_header[key] = value
def get_response_headers(self):
return self._response_header
def get_response_header(self, key):
return self._response_header.get(key)
def get_request_properties(self):
return self._request_header
|
py | b405ba23ab4765a5abf3207190141299fa294076 | /home/runner/.cache/pip/pool/6d/f0/fb/736b2b7aee2fd1ee9ea084ead3e581b5eb7d99a954f2525648b10ecee0 |
py | b405ba308d397c53f503b60dbd7fd1d83cf72245 | import contextlib
import functools
import json
import random
import logging
from operator import attrgetter
import faker
import factory
import factory.fuzzy
from share.schema import ShareV2Schema
from share.schema.shapes import RelationShape
from share.transform.chain.links import IRILink
from share.util import TopologicalSorter
from share.util.graph import MutableGraph, MutableNode
logger = logging.getLogger(__name__)
sharev2_schema = ShareV2Schema()
used_ids = set()
_Faker = faker.Faker()
def format_id(type_name, id):
return '_:{}--{}'.format(type_name, id)
class FactoryGraph(MutableGraph):
# Override to ignore IDs
def topologically_sorted(self):
def sort_key(node):
return (
node.type,
*(node.attrs().items())
)
return TopologicalSorter(
sorted(self, key=sort_key),
dependencies=lambda n: sorted(
self.successors(n.id),
key=lambda id: sort_key(self.get_node(id)),
),
key=attrgetter('id'),
).sorted()
# Within tests, `graph1 == graph2` compares their contents
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.to_jsonld(in_edges=False) == other.to_jsonld(in_edges=False)
# Normalize IDs to ease comparison
def to_jsonld(self, *args, **kwargs):
jsonld = super().to_jsonld(*args, **kwargs)
id_map = {
node['@id']: '_:__{}'.format(i)
for i, node in enumerate(jsonld['@graph'])
}
def map_id(value):
if isinstance(value, dict):
value['@id'] = id_map[value['@id']]
elif isinstance(value, list):
for v in value:
map_id(v)
for node in jsonld['@graph']:
for v in node.values():
map_id(v)
map_id(node)
return jsonld
# More readable test output
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
json.dumps(self.to_jsonld(in_edges=False), indent=4, sort_keys=True),
)
class FactoryNode(MutableNode):
def __new__(cls, graph, id, type=None, **attrs):
return super().__new__(cls, graph, id, type, attrs)
def __init__(self, graph, id, type, **attrs):
super().__init__(graph, id, type, attrs)
class RandomStateManager:
def __init__(self, randoms, seed=None):
self._randoms = randoms
self._seed = seed or random.random()
self._states = {}
def get_states(self):
return tuple(r.getstate() for r in self._randoms)
def set_states(self, states):
for r, state in zip(self._randoms, states):
r.setstate(state)
def reseed(self, seed=None):
self._seed = seed or random.random()
for r in self._randoms:
r.seed(self._seed)
# factory.fuzzy.reseed_random(self._seed)
self._states = {}
@contextlib.contextmanager
def seed(self, name=None, seed=None):
old_states = self.get_states()
new_states = self._states.get(name) if name else None
if new_states is None:
initial_seed = seed or self._seed
for r in self._randoms:
r.seed(initial_seed)
new_states = self.get_states()
if name:
self._states[name] = new_states
self.set_states(new_states)
yield hash(new_states)
# Save the new state if it was advanced/used
if name:
self._states[name] = self.get_states()
# Leave random(s) untouched upon exiting
self.set_states(old_states)
class GraphBuilder:
def __init__(self):
self.random_states = RandomStateManager([random, _Faker.random])
def reseed(self):
self.random_states.reseed()
def build(self, *nodes, normalize_fields=False):
# Reset all seeds at the being of each graph generation
# Ensures that graphs will be comparable
self.random_states.reseed(self.random_states._seed)
graph = FactoryGraph()
NodeBuilder(graph, self.random_states, normalize_fields).build_nodes(nodes)
return graph
def __call__(self, *args, **kwargs):
return self.build(*args, **kwargs)
class NodeBuilder:
def __init__(self, graph, random_states, normalize_fields=False):
self.graph = graph
self.random_states = random_states
self.normalize_fields = normalize_fields
def get_factory(self, schema_type):
return {
'AbstractCreativeWork': AbstractCreativeWorkFactory,
'AbstractAgent': AbstractAgentFactory,
'AbstractAgentWorkRelation': AbstractAgentWorkRelationFactory,
'AbstractWorkRelation': AbstractWorkRelationFactory,
# 'AbstractAgentRelation': AbstractAgentRelationFactory,
'WorkIdentifier': WorkIdentifierFactory,
'AgentIdentifier': AgentIdentifierFactory,
'Subject': SubjectFactory,
'ThroughSubjects': ThroughSubjectsFactory,
'Tag': TagFactory,
'ThroughTags': ThroughTagsFactory,
# 'Award': AwardFactory,
# 'ThroughAwards': ThroughAwardsFactory,
}[schema_type.concrete_type]
def build_nodes(self, nodes):
for n in nodes:
if isinstance(n, list):
self.build_nodes(n)
else:
self.build(n)
def build(self, attrs):
assert 'type' in attrs, 'Must provide "type" when constructing a node'
attrs = {**attrs} # make a copy to avoid mutating the arg
node_type = attrs.pop('type')
sparse = attrs.pop('sparse', False)
seed = attrs.pop('seed', None)
if 'id' in attrs and attrs['id'] in self.graph:
id = attrs.pop('id')
assert not attrs, 'Cannot reference a previously defined node by id and set attrs'
return self.graph.get_node(id)
if self.normalize_fields:
attrs['parse'] = True
relations = {}
for key in tuple(attrs.keys()):
if isinstance(attrs[key], (dict, list)):
relations[key] = attrs.pop(key)
schema_type = sharev2_schema.get_type(node_type.replace('Abstract', ''))
# If it's a specific type, pass it along, otherwise let the factory choose a subtype
if node_type == schema_type.concrete_type:
attrs['type'] = random.choice(
list(sharev2_schema.get_type_names(schema_type.concrete_type))
)
else:
attrs['type'] = schema_type.name
# Extract/generate required relations.
# e.g. WorkIdentifier requires a work, Creator requires work and agent
for field_name in schema_type.explicit_fields:
field = sharev2_schema.get_field(node_type, field_name)
if (
field_name not in attrs
and field.is_relation
and field.is_required
):
try:
relation = relations.pop(field_name)
except KeyError:
# Value missing for required relation; generate a fake one
relation = {'type': field.related_concrete_type}
attrs[field_name] = self.build(relation)
if sparse:
# Don't generate fake data for missing fields
node = FactoryNode(self.graph, **attrs)
else:
if seed:
seed_ctx = self.random_states.seed(seed=str(seed) + schema_type.concrete_type)
else:
seed_ctx = self.random_states.seed(name=schema_type.concrete_type)
with seed_ctx:
node = self.get_factory(schema_type)(graph=self.graph, **attrs)
# Build specified *-to-many relations
for key, value in sorted(relations.items(), key=lambda x: x[0]):
field = sharev2_schema.get_field(node_type, key)
if isinstance(value, list):
if field.relation_shape == RelationShape.MANY_TO_MANY:
related = [self.build(v) for v in value]
for rel in related:
self.build({
'type': field.through_concrete_type,
field.incoming_through_relation: node,
field.outgoing_through_relation: rel,
})
else:
reverse_name = field.inverse_relation
for v in value:
v[reverse_name] = node
self.build(v)
else:
node[key] = self.build(value)
return node
class GraphNodeFactory(factory.Factory):
id = None # Let the graph generate an ID
graph = factory.SelfAttribute('..graph') # Subfactories use the parent's graph
class Meta:
abstract = True
model = FactoryNode
inline_args = ('graph',)
@factory.lazy_attribute
def type(self):
raise NotImplementedError('must give a `type`!')
@factory.post_generation
def parse(self, _, parse, **kwargs):
# Override this to parse fields like the regulator is expected to
pass
class AbstractAgentFactory(GraphNodeFactory):
@factory.lazy_attribute
def name(self):
if self.type == 'Person':
if any(getattr(self, n, None) for n in ('given_name', 'family_name', 'suffix', 'additional_name')):
return None
return _Faker.name()
return _Faker.company()
class Meta:
model = FactoryNode
@factory.post_generation
def parse(self, _, parse, **kwargs):
if not parse or self.type != 'Person':
return
name = self['name']
if not name:
self['name'] = ' '.join(filter(None, (
self[k]
for k in ['given_name', 'additional_name', 'family_name', 'suffix']
)))
class TagFactory(GraphNodeFactory):
name = factory.Faker('word')
class SubjectFactory(GraphNodeFactory):
name = factory.Faker('word')
class AbstractCreativeWorkFactory(GraphNodeFactory):
title = factory.Faker('sentence')
description = factory.Faker('paragraph')
language = factory.Faker('language_code')
# related_agents = factory.SubFactory(AgentWorkRelationFactory)
# identifiers = factory.SubFactory('tests.share.normalize.factories.WorkIdentifierFactory')
# related_works = factory.SubFactory(RelatedWorkFactory)
date_updated = factory.Faker('date', pattern='%Y-%m-%dT%H:%M:%SZ')
date_published = factory.Faker('date', pattern='%Y-%m-%dT%H:%M:%SZ')
rights = factory.Faker('paragraph')
free_to_read_type = factory.Faker('url')
free_to_read_date = factory.Faker('date', pattern='%Y-%m-%dT%H:%M:%SZ')
is_deleted = False
class Meta:
model = FactoryNode
class AbstractAgentWorkRelationFactory(GraphNodeFactory):
# lazy attr
# agent = factory.SubFactory(AbstractAgentFactory)
# creative_work = factory.SubFactory(AbstractCreativeWorkFactory)
# order_cited = factory.Faker('pyint')
@factory.lazy_attribute
def cited_as(self):
return self.agent['name']
# lazy attr base on type
# award = factory.SubFactory(AwardFactory)
class Meta:
model = FactoryNode
class AbstractWorkRelationFactory(GraphNodeFactory):
# related = factory.SubFactory(AbstractCreativeWorkFactory)
# subject = factory.SubFactory(AbstractCreativeWorkFactory)
class Meta:
model = FactoryNode
class ThroughTagsFactory(GraphNodeFactory):
pass
# tag = factory.SubFactory(TagFactory)
# creative_work = factory.SubFactory(AbstractCreativeWorkFactory)
class ThroughSubjectsFactory(GraphNodeFactory):
pass
# subject = factory.SubFactory(SubjectFactory)
# creative_work = factory.SubFactory(AbstractCreativeWorkFactory)
class WorkIdentifierFactory(GraphNodeFactory):
uri = factory.Faker('url')
# creative_work = factory.SubFactory(AbstractCreativeWorkFactory)
@factory.post_generation
def parse(self, _, parse, **kwargs):
if parse:
parsed = IRILink().execute(self['uri'])
self['uri'] = parsed['IRI']
self['scheme'] = parsed['scheme']
self['host'] = parsed['authority']
class AgentIdentifierFactory(GraphNodeFactory):
uri = factory.Faker('url')
# agent = factory.SubFactory(AbstractAgentFactory)
@factory.post_generation
def parse(self, _, parse, **kwargs):
if parse:
parsed = IRILink().execute(self['uri'])
self['uri'] = parsed['IRI']
self['scheme'] = parsed['scheme']
self['host'] = parsed['authority']
def _get_node_builder_params(seed=None, id=None, schema_type=None, model=None, **kwargs):
ret = {'type': schema_type.name.lower(), **kwargs}
if id is not None:
ret['id'] = format_id(schema_type.concrete_type.lower().replace('abstract', ''), id)
if seed is not None:
ret['seed'] = seed
return ret
__all__ = ()
for schema_type in sharev2_schema.schema_types.values():
locals()[schema_type.name] = functools.partial(_get_node_builder_params, schema_type=schema_type)
|
py | b405be17d77012eb99bd26b33dc4f2782afee878 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.music.app import Music
class TestMusic(GaiaTestCase):
def setUp(self):
GaiaTestCase.setUp(self)
# add track to storage
self.push_resource('MUS_0001.mp3')
def test_select_album_play(self):
# https://moztrap.mozilla.org/manage/case/4031/
music_app = Music(self.marionette)
music_app.launch()
# switch to albums view
list_view = music_app.tap_albums_tab()
# check that albums (at least one) are available
albums = list_view.media
self.assertGreater(len(albums), 0, 'The mp3 file could not be found')
# select an album
sublist_view = albums[0].tap_first_album()
# select play
# This wait is timing out because of bug 862156
player_view = sublist_view.tap_play()
# play for a short duration
self.wait_for_condition(
lambda m: player_view.player_elapsed_time == '00:05',
message='Mp3 sample did not start playing')
# validate playback
self.assertTrue(player_view.is_player_playing(), 'The player is not playing')
# select stop
player_view.tap_play()
# validate stopped playback
self.assertFalse(player_view.is_player_playing(), 'The player did not stop playing')
|
py | b405bec5ce7711258a2ca61aa15c1e227c9cde86 | # Iyzipay API python client
# API docs at https://iyzico.com
# Authors:
# Yalcin Yenigun <[email protected]>
# Nurettin Bakkal <[email protected]>
# Configuration variables
api_key = 'your api key'
secret_key = 'your secret key'
base_url = 'sandbox-api.iyzipay.com'
# Resource
from iyzipay.iyzipay_resource import ( # noqa
ApiTest,
BinNumber,
InstallmentInfo,
Approval,
Disapproval,
CheckoutFormInitialize,
CheckoutForm,
Payment,
ThreedsInitialize,
ThreedsPayment,
Cancel,
Refund,
Card,
CardList,
Bkm,
BkmInitialize,
PeccoInitialize,
PeccoPayment,
CheckoutFormInitializePreAuth,
PaymentPreAuth,
PaymentPostAuth,
ThreedsInitializePreAuth,
RefundChargedFromMerchant,
PayoutCompletedTransactionList,
BouncedBankTransferList,
SubMerchant,
CrossBookingToSubMerchant,
CrossBookingFromSubMerchant,
BasicPayment,
BasicPaymentPreAuth,
BasicPaymentPostAuth,
BasicThreedsInitialize,
BasicThreedsInitializePreAuth,
BasicThreedsPayment,
BasicBkm,
BasicBkmInitialize,
RetrievePaymentDetails,
RetrieveTransactions,
IyziLinkProduct,
SubscriptionProduct,
SubscriptionPlan,
SubscriptionCustomer,
SubscriptionCheckoutForm,
SubscriptionCheckoutDirect,
IyziFileBase64Encoder)
from iyzipay.pki_builder import ( # noqa
PKIBuilder,
)
|
py | b405c3d17d2da6a42c208a19e2fbd09f6e1b9d7e | # -*- coding: utf-8 -*-
import unittest
import datetime
from pyboleto.bank.sicredi import BoletoSicredi
from .testutils import BoletoTestCase
class TestBancoSicredi(BoletoTestCase):
def setUp(self):
self.dados = []
for i in range(3):
d = BoletoSicredi()
d.carteira = '1'
d.posto = '08'
d.aceite = 'Sim'
d.especie_documento = 'DI'
d.agencia_cedente = '0434'
d.conta_cedente = '36699'
d.data_vencimento = datetime.date(2018, 1, 25)
d.data_documento = datetime.date(2017, 11, 24)
d.data_processamento = datetime.date(2017, 11, 24)
d.valor_documento = 90.75
d.nosso_numero = '18324121'
d.numero_documento = '33287-1/12'
self.dados.append(d)
def test_linha_digitavel(self):
self.assertEqual(
self.dados[0].linha_digitavel,
'74891.11836 24121.904346 08366.991068 1 74150000009075'
)
def test_codigo_de_barras(self):
self.assertEqual(
self.dados[0].barcode,
'74891741500000090751118324121904340836699106'
)
def test_agencia(self):
self.assertEqual(self.dados[0].agencia_cedente, '0434')
def test_conta(self):
self.assertEqual(self.dados[0].conta_cedente, '36699')
def test_dv_nosso_numero(self):
self.assertEqual(self.dados[0].dv_nosso_numero, 9)
suite = unittest.TestLoader().loadTestsFromTestCase(TestBancoSicredi)
if __name__ == '__main__':
unittest.main()
|
py | b405c41cfc3555bc4306b19637b71b4dd1b0d3e4 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model defination for the Object Localization Network (OLN) Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from official.vision.detection.dataloader import anchor
from official.vision.detection.dataloader import mode_keys
from official.vision.detection.modeling import losses
from official.vision.detection.modeling.architecture import factory
from official.vision.detection.modeling.architecture import keras_utils
from official.vision.detection.modeling.maskrcnn_model import MaskrcnnModel
from official.vision.detection.ops import postprocess_ops
from official.vision.detection.ops import roi_ops
from official.vision.detection.ops import spatial_transform_ops
from official.vision.detection.ops import target_ops
from official.vision.detection.utils import box_utils
class OlnMaskModel(MaskrcnnModel):
"""OLN-Mask model function."""
def __init__(self, params):
super(OlnMaskModel, self).__init__(params)
self._params = params
# Different heads and layers.
self._include_rpn_class = params.architecture.include_rpn_class
self._include_mask = params.architecture.include_mask
self._include_frcnn_class = params.architecture.include_frcnn_class
self._include_frcnn_box = params.architecture.include_frcnn_box
self._include_centerness = params.rpn_head.has_centerness
self._include_box_score = (params.frcnn_head.has_scoring and
params.architecture.include_frcnn_box)
self._include_mask_score = (params.mrcnn_head.has_scoring and
params.architecture.include_mask)
# Architecture generators.
self._backbone_fn = factory.backbone_generator(params)
self._fpn_fn = factory.multilevel_features_generator(params)
self._rpn_head_fn = factory.rpn_head_generator(params)
if self._include_centerness:
self._rpn_head_fn = factory.oln_rpn_head_generator(params)
else:
self._rpn_head_fn = factory.rpn_head_generator(params)
self._generate_rois_fn = roi_ops.OlnROIGenerator(params.roi_proposal)
self._sample_rois_fn = target_ops.ROIScoreSampler(params.roi_sampling)
self._sample_masks_fn = target_ops.MaskSampler(
params.architecture.mask_target_size,
params.mask_sampling.num_mask_samples_per_image)
if self._include_box_score:
self._frcnn_head_fn = factory.oln_box_score_head_generator(params)
else:
self._frcnn_head_fn = factory.fast_rcnn_head_generator(params)
if self._include_mask:
if self._include_mask_score:
self._mrcnn_head_fn = factory.oln_mask_score_head_generator(params)
else:
self._mrcnn_head_fn = factory.mask_rcnn_head_generator(params)
# Loss function.
self._rpn_score_loss_fn = losses.RpnScoreLoss(params.rpn_score_loss)
self._rpn_box_loss_fn = losses.RpnBoxLoss(params.rpn_box_loss)
if self._include_centerness:
self._rpn_iou_loss_fn = losses.OlnRpnIoULoss()
self._rpn_center_loss_fn = losses.OlnRpnCenterLoss()
self._frcnn_class_loss_fn = losses.FastrcnnClassLoss()
self._frcnn_box_loss_fn = losses.FastrcnnBoxLoss(params.frcnn_box_loss)
if self._include_box_score:
self._frcnn_box_score_loss_fn = losses.OlnBoxScoreLoss(
params.frcnn_box_score_loss)
if self._include_mask:
self._mask_loss_fn = losses.MaskrcnnLoss()
self._generate_detections_fn = postprocess_ops.OlnDetectionGenerator(
params.postprocess)
self._transpose_input = params.train.transpose_input
assert not self._transpose_input, 'Transpose input is not supportted.'
def build_outputs(self, inputs, mode):
is_training = mode == mode_keys.TRAIN
model_outputs = {}
image = inputs['image']
_, image_height, image_width, _ = image.get_shape().as_list()
backbone_features = self._backbone_fn(image, is_training)
fpn_features = self._fpn_fn(backbone_features, is_training)
# rpn_centerness.
if self._include_centerness:
rpn_score_outputs, rpn_box_outputs, rpn_center_outputs = (
self._rpn_head_fn(fpn_features, is_training))
model_outputs.update({
'rpn_center_outputs':
tf.nest.map_structure(lambda x: tf.cast(x, tf.float32),
rpn_center_outputs),
})
object_scores = rpn_center_outputs
else:
rpn_score_outputs, rpn_box_outputs = self._rpn_head_fn(
fpn_features, is_training)
object_scores = None
model_outputs.update({
'rpn_score_outputs':
tf.nest.map_structure(lambda x: tf.cast(x, tf.float32),
rpn_score_outputs),
'rpn_box_outputs':
tf.nest.map_structure(lambda x: tf.cast(x, tf.float32),
rpn_box_outputs),
})
input_anchor = anchor.Anchor(self._params.architecture.min_level,
self._params.architecture.max_level,
self._params.anchor.num_scales,
self._params.anchor.aspect_ratios,
self._params.anchor.anchor_size,
(image_height, image_width))
rpn_rois, rpn_roi_scores = self._generate_rois_fn(
rpn_box_outputs,
rpn_score_outputs,
input_anchor.multilevel_boxes,
inputs['image_info'][:, 1, :],
is_training,
is_box_lrtb=self._include_centerness,
object_scores=object_scores,
)
if (not self._include_frcnn_class and
not self._include_frcnn_box and
not self._include_mask):
# if not is_training:
# For direct RPN detection,
# use dummy box_outputs = (dy,dx,dh,dw = 0,0,0,0)
box_outputs = tf.zeros_like(rpn_rois)
box_outputs = tf.concat([box_outputs, box_outputs], -1)
boxes, scores, classes, valid_detections = self._generate_detections_fn(
box_outputs, rpn_roi_scores, rpn_rois,
inputs['image_info'][:, 1:2, :],
is_single_fg_score=True, # if no_background, no softmax is applied.
keep_nms=True)
model_outputs.update({
'num_detections': valid_detections,
'detection_boxes': boxes,
'detection_classes': classes,
'detection_scores': scores,
})
return model_outputs
# ---- OLN-Proposal finishes here. ----
if is_training:
rpn_rois = tf.stop_gradient(rpn_rois)
rpn_roi_scores = tf.stop_gradient(rpn_roi_scores)
# Sample proposals.
(rpn_rois, rpn_roi_scores, matched_gt_boxes, matched_gt_classes,
matched_gt_indices) = (
self._sample_rois_fn(rpn_rois, rpn_roi_scores, inputs['gt_boxes'],
inputs['gt_classes']))
# Create bounding box training targets.
box_targets = box_utils.encode_boxes(
matched_gt_boxes, rpn_rois, weights=[10.0, 10.0, 5.0, 5.0])
# If the target is background, the box target is set to all 0s.
box_targets = tf.where(
tf.tile(
tf.expand_dims(tf.equal(matched_gt_classes, 0), axis=-1),
[1, 1, 4]), tf.zeros_like(box_targets), box_targets)
model_outputs.update({
'class_targets': matched_gt_classes,
'box_targets': box_targets,
})
# Create Box-IoU targets. {
box_ious = box_utils.bbox_overlap(
rpn_rois, inputs['gt_boxes'])
matched_box_ious = tf.reduce_max(box_ious, 2)
model_outputs.update({
'box_iou_targets': matched_box_ious,}) # }
roi_features = spatial_transform_ops.multilevel_crop_and_resize(
fpn_features, rpn_rois, output_size=7)
if not self._include_box_score:
class_outputs, box_outputs = self._frcnn_head_fn(
roi_features, is_training)
else:
class_outputs, box_outputs, score_outputs = self._frcnn_head_fn(
roi_features, is_training)
model_outputs.update({
'box_score_outputs':
tf.nest.map_structure(lambda x: tf.cast(x, tf.float32),
score_outputs),})
model_outputs.update({
'class_outputs':
tf.nest.map_structure(lambda x: tf.cast(x, tf.float32),
class_outputs),
'box_outputs':
tf.nest.map_structure(lambda x: tf.cast(x, tf.float32),
box_outputs),
})
# Add this output to train to make the checkpoint loadable in predict mode.
# If we skip it in train mode, the heads will be out-of-order and checkpoint
# loading will fail.
if not self._include_frcnn_box:
box_outputs = tf.zeros_like(box_outputs) # dummy zeros.
if self._include_box_score:
score_outputs = tf.cast(tf.squeeze(score_outputs, -1),
rpn_roi_scores.dtype)
# box-score = (rpn-centerness * box-iou)^(1/2)
# TR: rpn_roi_scores: b,1000, score_outputs: b,512
# TS: rpn_roi_scores: b,1000, score_outputs: b,1000
box_scores = tf.pow(
rpn_roi_scores * tf.sigmoid(score_outputs), 1/2.)
if not self._include_frcnn_class:
boxes, scores, classes, valid_detections = self._generate_detections_fn(
box_outputs,
box_scores,
rpn_rois,
inputs['image_info'][:, 1:2, :],
is_single_fg_score=True,
keep_nms=True,)
else:
boxes, scores, classes, valid_detections = self._generate_detections_fn(
box_outputs, class_outputs, rpn_rois,
inputs['image_info'][:, 1:2, :],
keep_nms=True,)
model_outputs.update({
'num_detections': valid_detections,
'detection_boxes': boxes,
'detection_classes': classes,
'detection_scores': scores,
})
# ---- OLN-Box finishes here. ----
if not self._include_mask:
return model_outputs
if is_training:
rpn_rois, classes, mask_targets = self._sample_masks_fn(
rpn_rois, matched_gt_boxes, matched_gt_classes, matched_gt_indices,
inputs['gt_masks'])
mask_targets = tf.stop_gradient(mask_targets)
classes = tf.cast(classes, dtype=tf.int32)
model_outputs.update({
'mask_targets': mask_targets,
'sampled_class_targets': classes,
})
else:
rpn_rois = boxes
classes = tf.cast(classes, dtype=tf.int32)
mask_roi_features = spatial_transform_ops.multilevel_crop_and_resize(
fpn_features, rpn_rois, output_size=14)
mask_outputs = self._mrcnn_head_fn(mask_roi_features, classes, is_training)
if is_training:
model_outputs.update({
'mask_outputs':
tf.nest.map_structure(lambda x: tf.cast(x, tf.float32),
mask_outputs),
})
else:
model_outputs.update({'detection_masks': tf.nn.sigmoid(mask_outputs)})
return model_outputs
def build_loss_fn(self):
if self._keras_model is None:
raise ValueError('build_loss_fn() must be called after build_model().')
filter_fn = self.make_filter_trainable_variables_fn()
trainable_variables = filter_fn(self._keras_model.trainable_variables)
def _total_loss_fn(labels, outputs):
if self._include_rpn_class:
rpn_score_loss = self._rpn_score_loss_fn(outputs['rpn_score_outputs'],
labels['rpn_score_targets'])
else:
rpn_score_loss = 0.0
if self._include_centerness:
rpn_center_loss = self._rpn_center_loss_fn(
outputs['rpn_center_outputs'], labels['rpn_center_targets'])
rpn_box_loss = self._rpn_iou_loss_fn(
outputs['rpn_box_outputs'], labels['rpn_box_targets'],
labels['rpn_center_targets'])
else:
rpn_center_loss = 0.0
rpn_box_loss = self._rpn_box_loss_fn(
outputs['rpn_box_outputs'], labels['rpn_box_targets'])
if self._include_frcnn_class:
frcnn_class_loss = self._frcnn_class_loss_fn(
outputs['class_outputs'], outputs['class_targets'])
else:
frcnn_class_loss = 0.0
if self._include_frcnn_box:
frcnn_box_loss = self._frcnn_box_loss_fn(
outputs['box_outputs'], outputs['class_targets'],
outputs['box_targets'])
else:
frcnn_box_loss = 0.0
if self._include_box_score:
box_score_loss = self._frcnn_box_score_loss_fn(
outputs['box_score_outputs'], outputs['box_iou_targets'])
else:
box_score_loss = 0.0
if self._include_mask:
mask_loss = self._mask_loss_fn(outputs['mask_outputs'],
outputs['mask_targets'],
outputs['sampled_class_targets'])
else:
mask_loss = 0.0
model_loss = (
rpn_score_loss + rpn_box_loss + rpn_center_loss +
frcnn_class_loss + frcnn_box_loss + box_score_loss +
mask_loss)
l2_regularization_loss = self.weight_decay_loss(trainable_variables)
total_loss = model_loss + l2_regularization_loss
return {
'total_loss': total_loss,
'loss': total_loss,
'fast_rcnn_class_loss': frcnn_class_loss,
'fast_rcnn_box_loss': frcnn_box_loss,
'fast_rcnn_box_score_loss': box_score_loss,
'mask_loss': mask_loss,
'model_loss': model_loss,
'l2_regularization_loss': l2_regularization_loss,
'rpn_score_loss': rpn_score_loss,
'rpn_box_loss': rpn_box_loss,
'rpn_center_loss': rpn_center_loss,
}
return _total_loss_fn
def build_input_layers(self, params, mode):
is_training = mode == mode_keys.TRAIN
input_shape = (
params.olnmask_parser.output_size +
[params.olnmask_parser.num_channels])
if is_training:
batch_size = params.train.batch_size
input_layer = {
'image':
tf.keras.layers.Input(
shape=input_shape,
batch_size=batch_size,
name='image',
dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32),
'image_info':
tf.keras.layers.Input(
shape=[4, 2],
batch_size=batch_size,
name='image_info',
),
'gt_boxes':
tf.keras.layers.Input(
shape=[params.olnmask_parser.max_num_instances, 4],
batch_size=batch_size,
name='gt_boxes'),
'gt_classes':
tf.keras.layers.Input(
shape=[params.olnmask_parser.max_num_instances],
batch_size=batch_size,
name='gt_classes',
dtype=tf.int64),
}
if self._include_mask:
input_layer['gt_masks'] = tf.keras.layers.Input(
shape=[
params.olnmask_parser.max_num_instances,
params.olnmask_parser.mask_crop_size,
params.olnmask_parser.mask_crop_size
],
batch_size=batch_size,
name='gt_masks')
else:
batch_size = params.eval.batch_size
input_layer = {
'image':
tf.keras.layers.Input(
shape=input_shape,
batch_size=batch_size,
name='image',
dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32),
'image_info':
tf.keras.layers.Input(
shape=[4, 2],
batch_size=batch_size,
name='image_info',
),
}
return input_layer
def build_model(self, params, mode):
if self._keras_model is None:
input_layers = self.build_input_layers(self._params, mode)
with keras_utils.maybe_enter_backend_graph():
outputs = self.model_outputs(input_layers, mode)
model = tf.keras.models.Model(
inputs=input_layers, outputs=outputs, name='olnmask')
assert model is not None, 'Fail to build tf.keras.Model.'
model.optimizer = self.build_optimizer()
self._keras_model = model
return self._keras_model
|
py | b405c46b59aeef6e0d40162cf714c66dea858996 | """Example script to define and execute a series of Salish Sea NEMO model runs.
"""
from __future__ import absolute_import
import os
import salishsea_cmd.api
def main():
run_desc = base_run_description()
runs = ('all_forcing', 'ssh_only')
tides= ('lateral', 'lateral')
surface=('surface','surface.nosurge')
for run_id,tide_id,surface_id in zip(runs,tides,surface):
do_run(run_id, run_desc, tide_id, surface_id)
def do_run(run_id, run_desc, tide_id, surface_id):
run_desc['run_id'] = run_id
run_desc['namelists'][3] = (
'namelist.{}'.format(tide_id))
run_desc['namelists'][2] = (
'namelist.{}'.format(surface_id))
salishsea_cmd.api.run_in_subprocess(
run_id,
run_desc,
'iodef.xml',
os.path.join('/home/nksoonti/MEOPAR/SalishSea/results/storm_surges/revisions/feb2006/', run_id))
def base_run_description():
# Relative paths from SS-run-sets/SalishSea/storm_surges/new_config
run_desc = salishsea_cmd.api.run_description(
walltime='20:00:00',
NEMO_code='../../../../NEMO-code/',
forcing='../../../../NEMO-forcing/',
runs_dir='../../../../SalishSea/',
init_conditions=(
'/home/nksoonti/MEOPAR/SalishSea/results/spin-up/31jan9feb'),
)
run_desc['email'] = '[email protected]'
# Relative paths to namelist section files
run_desc['namelists'] = [
'namelist.feb2006.time',
'namelist.feb2006.domain',
'namelist.surface',
'namelist.lateral',
'namelist.bottom',
'namelist.tracers',
'namelist.dynamics',
'namelist.compute.6x14',
]
return run_desc
if __name__ == '__main__':
main()
|
py | b405c63df1278d62b81bd1a4cd46a295f0aa92e4 | # -*- coding: utf-8 -*-
"""
This runner is used only for test purposes and servers no production purpose
"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import time
from salt.ext import six
from salt.ext.six.moves import range
def arg(*args, **kwargs):
"""
Output the given args and kwargs
Kwargs will be filtered for 'private' keynames.
"""
kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if not k.startswith("__"))
ret = {
"args": args,
"kwargs": kwargs,
}
return ret
def raw_arg(*args, **kwargs):
"""
Output the given args and kwargs
"""
ret = {
"args": args,
"kwargs": kwargs,
}
return ret
def metasyntactic(locality="us"):
"""
Return common metasyntactic variables for the given locality
"""
lookup = {
"us": [
"foo",
"bar",
"baz",
"qux",
"quux",
"quuz",
"corge",
"grault",
"garply",
"waldo",
"fred",
"plugh",
"xyzzy",
"thud",
],
"uk": ["wibble", "wobble", "wubble", "flob"],
}
return lookup.get(locality, None)
def stdout_print():
"""
Print 'foo' and return 'bar'
"""
print("foo")
return "bar"
def sleep(s_time=10):
"""
Sleep t seconds, then return True
"""
print(s_time)
time.sleep(s_time)
return True
def stream():
"""
Return True
"""
ret = True
for i in range(1, 100):
__jid_event__.fire_event(
{"message": "Runner is {0}% done".format(i)}, "progress"
)
time.sleep(0.1)
return ret
def get_opts():
"""
.. versionadded:: 2018.3.0
Return the configuration options of the master.
CLI Example:
.. code-block:: bash
salt-run test.get_opts
"""
return __opts__
|
py | b405c8b01efe8a9e5b06e71cb4d89063a4536eca | def predict(data_set, model, **kwargs):
"""
Produces predictions with a trained Keras model where inputs are arrays
:param DataSet data_set: A DataSet with a NumpyDataWrapper called "features"
:param model: A (fitted) Keras model
:param kwargs: Additional parameters to be passed to model.predict
:return: A NumpyDataWrapper with an array of predictions as its underlying
"""
prediction_array = model.predict(x=data_set.features.underlying, **kwargs)
from mercury_ml.common.data_wrappers.numpy import NumpyDataWrapper
return NumpyDataWrapper(underlying=prediction_array, field_names=data_set.targets.field_names)
def predict_generator(data_set, model, **kwargs):
"""
Produces predictions with a trained Keras model where inputs are generators
:param DataSet data_set: A DataSet with a KerasIteratorFeaturesDataWrapper called "features"
:param model: A (fitted) Keras model
:param kwargs: Additional parameters to be passed to model.predict
:return: A NumpyDataWrapper with an array of predictions as its underlying
"""
prediction_array = model.predict_generator(generator=data_set.features.underlying, **kwargs)
from mercury_ml.common.data_wrappers.numpy import NumpyDataWrapper
return NumpyDataWrapper(underlying=prediction_array, field_names=data_set.features.underlying.get_labels_dummies()) |
py | b405c9848df9cf315d68d341bb603874b14d6f4b | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright (c) 2021-2021. Jason Cameron +
# All rights reserved. +
# This file is part of the edoC discord bot project , +
# and is released under the "MIT License Agreement". Please see the LICENSE +
# file that should have been included as part of this package. +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from os import environ, listdir
from utils.default import edoC, config
config = config()
# TODO add a fully not erroring get_prefix
bot = edoC()
environ["JISHAKU_HIDE"] = "True"
environ["JISHAKU_NO_UNDERSCORE"] = "True"
NO_LOAD_COG = ''
#async def process_commands(self, message):
# ctx = await self.get_context(message, cls=Context)
#
# if ctx.command is not None and ctx.guild is not None:
# if message.author.id in self.banlist:
# await ctx.send("You are banned from using commands.")
#
# elif not self.ready:
# await ctx.send("I'm not ready to receive commands. Please wait a few seconds.")
#
# else:
# await self.invoke(ctx)
#
try:
bot.load_extension("jishaku")
for file in listdir("cogs"):
if NO_LOAD_COG:
if file.startswith(NO_LOAD_COG):
continue
if file.endswith(".py"):
name = file[:-3]
bot.load_extension(f"cogs.{name}")
except Exception:
raise ChildProcessError("Problem with one of the cogs/utils")
try:
bot.run(config["token"], reconnect=True)
except Exception as e:
print(f"Error when logging in: {e}")
|
py | b405ca5c19bd60bffd27ebed33907aa4cbf83da9 | import os
import json
from hashlib import md5
from tornado import web
from notebook.utils import url_path_join
from notebook.base.handlers import IPythonHandler
__all__ = ['load_jupyter_server_extension']
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'nbextension', 'static');
CONFIG = os.path.expanduser('~/.pyesasky')
class ESASkyFileHandler(IPythonHandler):
def get(self, filename):
filename = os.path.basename(filename)
# First we check if this is a standard file in the static directory
if os.path.exists(os.path.join(STATIC_DIR, filename)):
path = os.path.join(STATIC_DIR, filename)
else:
# If not, we open the config file which should contain a JSON
# dictionary with filenames and paths.
if not os.path.exists(CONFIG):
raise web.HTTPError(404)
with open(CONFIG) as f:
config = json.load(f)
if filename in config['paths']:
path = config['paths'][filename]
else:
raise web.HTTPError(404)
with open(path, 'rb') as f:
content = f.read()
self.finish(content)
def serve_file(path, extension=''):
if not os.path.exists(path):
raise ValueError("Path {0} does not exist".format(path))
hash = md5(path.encode('utf-8')).hexdigest() + extension
with open(CONFIG) as f:
config = json.load(f)
if hash not in config['paths']:
config['paths'][hash] = os.path.abspath(path)
with open(CONFIG, 'w') as f:
json.dump(config, f)
return '/esasky/' + hash
def load_jupyter_server_extension(nb_server_app):
web_app = nb_server_app.web_app
host_pattern = '.*$'
if not os.path.exists(CONFIG):
config = {'paths': {}}
with open(CONFIG, 'w') as f:
json.dump(config, f)
route_pattern = url_path_join(web_app.settings['base_url'], '/esasky/(.*)')
web_app.add_handlers(host_pattern, [(route_pattern, ESASkyFileHandler)])
|
py | b405cb276fdad9c4d85068b027d5609da5e551cd | from datetime import datetime
from google.protobuf.timestamp_pb2 import Timestamp
from th2_grpc_common.common_pb2 import MessageGroupBatch, MessageGroup, AnyMessage, Message, EventID, Value, \
MessageMetadata, MessageID, RawMessage, RawMessageMetadata
from th2_common.schema.message.impl.rabbitmq.parsed.rabbit_parsed_batch_sender import RabbitParsedBatchSender
from th2_common.schema.message.impl.rabbitmq.parsed.rabbit_parsed_batch_subscriber import RabbitParsedBatchSubscriber
from th2_common.schema.message.impl.rabbitmq.raw.rabbit_raw_batch_sender import RabbitRawBatchSender
from th2_common.schema.message.impl.rabbitmq.raw.rabbit_raw_batch_subscriber import RabbitRawBatchSubscriber
class TestCheckParsingMessageGroupBatch:
def test_parse_message_group_batch_to_message_batch_and_back(self):
start_time = datetime.now()
seconds = int(start_time.timestamp())
nanos = int(start_time.microsecond * 1000)
test_message = Message(parent_event_id=EventID(id='1'),
metadata=MessageMetadata(
id=MessageID(),
timestamp=Timestamp(seconds=seconds, nanos=nanos),
message_type='test_type',
properties={'property1': 'property1_value',
'property2': 'property2_value'},
protocol='protocol_name'
),
fields={'field1': Value(simple_value='value1'),
'field2': Value(simple_value='value2')})
messages = [AnyMessage(message=test_message), AnyMessage(message=test_message)]
groups = [MessageGroup(messages=messages)]
group_batch = MessageGroupBatch(groups=groups)
original_bytes = group_batch.SerializeToString()
value_from_bytes = RabbitParsedBatchSubscriber.value_from_bytes(original_bytes)
bytes_from_value = RabbitParsedBatchSender.value_to_bytes(value_from_bytes[0])
assert original_bytes == bytes_from_value
def test_parse_message_group_batch_to_raw_message_batch_and_back(self):
start_time = datetime.now()
seconds = int(start_time.timestamp())
nanos = int(start_time.microsecond * 1000)
test_message = RawMessage(metadata=RawMessageMetadata(id=MessageID(),
timestamp=Timestamp(seconds=seconds, nanos=nanos),
properties={'property1': 'property1_value',
'property2': 'property2_value'},
protocol='protocol_name'
),
body=str.encode('12345'))
messages = [AnyMessage(raw_message=test_message), AnyMessage(raw_message=test_message)]
groups = [MessageGroup(messages=messages)]
group_batch = MessageGroupBatch(groups=groups)
original_bytes = group_batch.SerializeToString()
value_from_bytes = RabbitRawBatchSubscriber.value_from_bytes(original_bytes)
bytes_from_value = RabbitRawBatchSender.value_to_bytes(value_from_bytes[0])
assert original_bytes == bytes_from_value
|
py | b405cbcd7a0219e4e33242f195a9b00e686e8dcc | from enum import Enum
# region Declaration of some implications
class ContinuousImplication(Enum):
"""
Object that stores the values of the most known implications defined in [0, 1]^2.
Note: Although the name of the object is ContinuousImplication, it does not mean that the defined implications are
continuous functions. The name is taken to differentiate them from discrete implication functions.
"""
LUKASIEWICZ = "continuous_lukasiewicz_implication"
GODEL = "continuous_godel_implication"
REICHENBACH = "continous_reichenbach_implication"
KLEENE_DIENES = "continous_kleenedienes_implication"
GOGUEN = "continuous_goguen_implication"
RESCHER = "continuous_rescher_implication"
YAGER = "continuous_yager_implication"
WEBER = "continuous_weber_implication"
FODOR = "continuous_fodor_implication"
# endregion
# region Implications
def get_continuous_implication(implication: ContinuousImplication, n: int):
"""
Returns a DiscreteFuzzyImplicationOperator object representing the selected implication.
Args:
implication: An DiscreteImplication value, representing the chosen implication.
n: An integer, representing the dimension of the domain where the t-norm is defined.
Returns:
A DiscreteFuzzyImplicationOperator object.
"""
pass
# endregion
|
py | b405cc1fec62edd5eb3b3d6a459cdddb8ff93f51 | """
ASGI config for plotly_django_tutorial project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'plotly_django_tutorial.settings')
application = get_asgi_application()
|
py | b405cd52a12ff4bdefb0e055f1c7fb0e5433781e | # -*- coding: utf-8 -*-
from __future__ import print_function
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from phone_hunter.items import Phone
class SymphonySpider(CrawlSpider):
name = 'symphony'
allowed_domains = ['www.symphony-mobile.com']
start_urls = [
'https://www.symphony-mobile.com/product.php?cat=1&sub_cat=5'
]
rules = (
Rule(
LinkExtractor(
allow=r'/product-details.php\?id=\d+'
),
callback='parse_product',
follow=True),
Rule(
LinkExtractor(
restrict_css=('ul.pagination li a',)
),
follow=True),
)
def parse_product(self, response):
item = Phone()
item['model'] = response.css('h1.phonename::text').extract()
if type(item['model']) == list:
item['model'] = item['model'][0].strip()
item['price'] = response.css('h4.pTag::text').extract()[0].strip()
specs = response.xpath('//*[@id="spec"]/div/div[1]/div/table//tr')
for spec in specs:
feature, prop = spec.xpath('td/text()').extract()
item["_".join(feature.lower().split())] = prop.strip()
return item
|
py | b405cdc18e0bcf1074cd52ff04ab5f3a19d2e46c | import numpy as np
import mxnet as mx
from mxnet import nd, gluon
from ..utils.nn_architecture_utils import get_embed_sizes
class NumericBlock(gluon.HybridBlock):
""" Single Dense layer that jointly embeds all numeric and one-hot features """
def __init__(self, params, **kwargs):
super(NumericBlock, self).__init__(**kwargs)
with self.name_scope():
self.body = gluon.nn.Dense(params['numeric_embed_dim'], activation=params['activation'])
def hybrid_forward(self, F, x):
return self.body(x)
class EmbedBlock(gluon.HybridBlock):
""" Used to embed a single embedding feature. """
def __init__(self, embed_dim, num_categories, **kwargs):
super(EmbedBlock, self).__init__(**kwargs)
with self.name_scope():
self.body = gluon.nn.Embedding(input_dim=num_categories, output_dim=embed_dim,
weight_initializer=mx.init.Orthogonal(scale=0.1, rand_type='uniform')) # for Xavier-style: scale = np.sqrt(3/float(embed_dim))
def hybrid_forward(self, F, x):
return self.body(x)
class FeedforwardBlock(gluon.HybridBlock):
""" Standard Feedforward layers """
def __init__(self, params, num_net_outputs, **kwargs):
super(FeedforwardBlock, self).__init__(**kwargs)
layers = params['layers']
with self.name_scope():
self.body = gluon.nn.HybridSequential()
if params['use_batchnorm']:
self.body.add(gluon.nn.BatchNorm())
if params['dropout_prob'] > 0:
self.body.add(gluon.nn.Dropout(params['dropout_prob']))
for i in range(len(layers)):
layer_width = layers[i]
if layer_width < 1 or int(layer_width) != layer_width:
raise ValueError("layers must be ints >= 1")
self.body.add(gluon.nn.Dense(layer_width, activation=params['activation']))
if params['use_batchnorm']:
self.body.add(gluon.nn.BatchNorm())
if params['dropout_prob'] > 0:
self.body.add(gluon.nn.Dropout(params['dropout_prob']))
self.body.add(gluon.nn.Dense(num_net_outputs, activation=None))
def hybrid_forward(self, F, x):
return self.body(x)
class WideAndDeepBlock(gluon.HybridBlock):
""" Standard feedforward layers with a single skip connection from output directly to input (ie. deep and wide network).
"""
def __init__(self, params, num_net_outputs, **kwargs):
super(WideAndDeepBlock, self).__init__(**kwargs)
self.deep = FeedforwardBlock(params, num_net_outputs, **kwargs)
with self.name_scope(): # Skip connection, ie. wide network branch
self.wide = gluon.nn.Dense(num_net_outputs, activation=None)
def hybrid_forward(self, F, x):
return self.deep(x) + self.wide(x)
class EmbedNet(gluon.Block): # TODO: hybridize?
""" Gluon net with input layers to handle numerical data & categorical embeddings
which are concatenated together after input layer and then passed into feedforward network.
If architecture_desc != None, then we assume EmbedNet has already been previously created,
and we create a new EmbedNet based on the provided architecture description
(thus ignoring train_dataset, params, num_net_outputs).
"""
def __init__(self, train_dataset=None, params=None, num_net_outputs=None, architecture_desc=None, ctx=None, **kwargs):
if (architecture_desc is None) and (train_dataset is None or params is None or num_net_outputs is None):
raise ValueError("train_dataset, params, num_net_outputs cannot = None if architecture_desc=None")
super(EmbedNet, self).__init__(**kwargs)
if architecture_desc is None: # Adpatively specify network architecture based on training dataset
self.from_logits = False
self.has_vector_features = train_dataset.has_vector_features()
self.has_embed_features = train_dataset.num_embed_features() > 0
if self.has_embed_features:
num_categs_per_feature = train_dataset.getNumCategoriesEmbeddings()
embed_dims = get_embed_sizes(train_dataset, params, num_categs_per_feature)
else: # Ignore train_dataset, params, etc. Recreate architecture based on description:
self.architecture_desc = architecture_desc
self.has_vector_features = architecture_desc['has_vector_features']
self.has_embed_features = architecture_desc['has_embed_features']
self.from_logits = architecture_desc['from_logits']
num_net_outputs = architecture_desc['num_net_outputs']
params = architecture_desc['params']
if self.has_embed_features:
num_categs_per_feature = architecture_desc['num_categs_per_feature']
embed_dims = architecture_desc['embed_dims']
# Define neural net parameters:
if self.has_vector_features:
self.numeric_block = NumericBlock(params)
if self.has_embed_features:
self.embed_blocks = gluon.nn.HybridSequential()
for i in range(len(num_categs_per_feature)):
self.embed_blocks.add(EmbedBlock(embed_dims[i], num_categs_per_feature[i]))
if params['network_type'] == 'feedforward':
self.output_block = FeedforwardBlock(params, num_net_outputs)
elif params['network_type'] == 'widedeep':
self.output_block = WideAndDeepBlock(params, num_net_outputs)
else:
raise ValueError("unknown network_type specified: %s" % params['network_type'])
y_range = params['y_range'] # Used specifically for regression. = None for classification.
self.y_constraint = None # determines if Y-predictions should be constrained
if y_range is not None:
if y_range[0] == -np.inf and y_range[1] == np.inf:
self.y_constraint = None # do not worry about Y-range in this case
elif y_range[0] >= 0 and y_range[1] == np.inf:
self.y_constraint = 'nonnegative'
elif y_range[0] == -np.inf and y_range[1] <= 0:
self.y_constraint = 'nonpositive'
else:
self.y_constraint = 'bounded'
self.y_lower = nd.array(params['y_range'][0]).reshape(1,)
self.y_upper = nd.array(params['y_range'][1]).reshape(1,)
if ctx is not None:
self.y_lower = self.y_lower.as_in_context(ctx)
self.y_upper = self.y_upper.as_in_context(ctx)
self.y_span = self.y_upper - self.y_lower
if architecture_desc is None: # Save Architecture description
self.architecture_desc = {'has_vector_features': self.has_vector_features,
'has_embed_features': self.has_embed_features,
'params': params, 'num_net_outputs': num_net_outputs,
'from_logits': self.from_logits}
if self.has_embed_features:
self.architecture_desc['num_categs_per_feature'] = num_categs_per_feature
self.architecture_desc['embed_dims'] = embed_dims
def forward(self, data_batch):
if self.has_vector_features:
numerical_data = data_batch['vector'] # NDArray
numerical_activations = self.numeric_block(numerical_data)
input_activations = numerical_activations
if self.has_embed_features:
embed_data = data_batch['embed'] # List
# TODO: Remove below lines or write logic to switch between using these lines and the multithreaded version once multithreaded version is optimized
embed_activations = self.embed_blocks[0](embed_data[0])
for i in range(1, len(self.embed_blocks)):
embed_activations = nd.concat(embed_activations,
self.embed_blocks[i](embed_data[i]), dim=2)
# TODO: Optimize below to perform better before using
# lock = threading.Lock()
# results = {}
#
# def _worker(i, results, embed_block, embed_data, is_recording, is_training, lock):
# if is_recording:
# with mx.autograd.record(is_training):
# output = embed_block(embed_data)
# else:
# output = embed_block(embed_data)
# output.wait_to_read()
# with lock:
# results[i] = output
#
# is_training = mx.autograd.is_training()
# is_recording = mx.autograd.is_recording()
# threads = [threading.Thread(target=_worker,
# args=(i, results, embed_block, embed_data,
# is_recording, is_training, lock),
# )
# for i, (embed_block, embed_data) in
# enumerate(zip(self.embed_blocks, embed_data))]
#
# for thread in threads:
# thread.start()
# for thread in threads:
# thread.join()
#
# embed_activations = []
# for i in range(len(results)):
# output = results[i]
# embed_activations.append(output)
#
# #embed_activations = []
# #for i in range(len(self.embed_blocks)):
# # embed_activations.append(self.embed_blocks[i](embed_data[i]))
# embed_activations = nd.concat(*embed_activations, dim=2)
embed_activations = embed_activations.flatten()
if not self.has_vector_features:
input_activations = embed_activations
else:
input_activations = nd.concat(embed_activations, input_activations)
if self.y_constraint is None:
return self.output_block(input_activations)
else:
unscaled_pred = self.output_block(input_activations)
if self.y_constraint == 'nonnegative':
return self.y_lower + nd.abs(unscaled_pred)
elif self.y_constraint == 'nonpositive':
return self.y_upper - nd.abs(unscaled_pred)
else:
"""
print("unscaled_pred",unscaled_pred)
print("nd.sigmoid(unscaled_pred)", nd.sigmoid(unscaled_pred))
print("self.y_span", self.y_span)
print("self.y_lower", self.y_lower)
print("self.y_lower.shape", self.y_lower.shape)
print("nd.sigmoid(unscaled_pred).shape", nd.sigmoid(unscaled_pred).shape)
"""
return nd.sigmoid(unscaled_pred) * self.y_span + self.y_lower
|
py | b405cdf7b653878aabc3dbd1bb26b03bb3616234 | import numpy as np
default_Q_t = np.identity(3) * np.random.rand(3, 1) * 0.1
default_R = np.identity(3) * np.random.rand(3, 1) * 0.1
class kalman_filter():
def __init__(self, Q_t = default_Q_t, R = default_R):
"""
:param Q_t: Covariance matrix defining noise of motion model deltax§
:param R:
"""
self.Q_t = Q_t
self.R = R
def run_filter(self, state, covariance, control, observation):
"""
:param state: Previous believe state
:param covariance: Covariance matrix
:param control: kinematics values
:param observation:
:param R:
:return: Corrected state and covariance
"""
# Initialing distributions
A = np.identity(3)
B = np.array([[np.cos(state[2]), 0],
[np.sin(state[2]), 0],
[0, 1]])
C = np.identity(3)
# Prediction
state = np.matmul(A, state) + np.matmul(B, control) # mu_t
covariance = np.matmul(np.matmul(A, covariance), A.T) + self.R # sum_t
# Correction
K_t = covariance * C.T * np.linalg.inv(np.matmul(np.matmul(C, covariance), C.T) + self.Q_t.T) # Kalman gain
try:
new_state = state + np.matmul(K_t, (observation - np.matmul(C, state)))
except:
print("ERROR")
new_covariance = np.matmul((np.identity(3) - np.matmul(K_t, C)), covariance)
return state, new_state, new_covariance
# def kalman_filter(state, covariance, control, observation, Q_t = default_Q_t, R = default_R):
# """
# :param state: Previous believe state
# :param covariance: Covariance matrix
# :param control: kinematics values
# :param observation:
# :param Q_t: Covariance matrix defining noise of motion model deltax§
# :param R: Covariance matrix defining noise of motion model epsilon
# :return: Corrected state and covariance
# """
#
# # Initialing distributions
# A = np.identity(3)
# B = np.array([[np.cos(state[2]), 0],
# [np.sin(state[2]), 0],
# [0, 1]])
# C = np.identity(3)
#
# # Prediction
# state = np.matmul(A, state) + np.matmul(B, control) # mu_t
# covariance = np.matmul(np.matmul(A, covariance), A.T) + R # sum_t
#
# # Correction
# K_t = covariance * C.T * np.linalg.inv(np.matmul(np.matmul(C, covariance), C.T) + Q_t.T) # Kalman gain
# try:
# new_state = state + np.matmul(K_t, (observation - np.matmul(C, state)))
# except:
# print("ERROR")
# new_covariance = np.matmul((np.identity(3) - np.matmul(K_t, C)), covariance)
#
# return state, new_state, new_covariance
|
py | b405cf7ac37791049465ddb424f69f53a154fe71 | import os
UTIL_DIR = os.path.dirname(__file__)
SRC_DIR = os.path.dirname(UTIL_DIR)
ROOT_DIR = os.path.dirname(SRC_DIR)
DATA_DIR = os.path.join(ROOT_DIR, "data")
OUTPUT_DIR = os.path.join(ROOT_DIR, "output")
GUI_DIR = os.path.join(SRC_DIR, "gui")
GUI_STATIC_DIR = os.path.join(GUI_DIR, "static")
CTL_PANEL_FILE = os.path.join(ROOT_DIR, "control_panel.xlsx")
print(f"working with data found in {DATA_DIR}")
|
py | b405cfbbdb6608b8d24ff4d85ebdaeb843075cff | import logging
class ConnRefCounter(object):
"""A simple connection reference counter to keep
track of active connections and enable reuse."""
def __init__(self):
self._counter = {}
self._logr = logging.getLogger(__name__)
def increase(self, conn_id, ref_id):
"""Increases the reference counter for the connection."""
if conn_id not in self._counter:
self._counter[conn_id] = set()
self._counter[conn_id].add(ref_id)
self._logr.debug("Added ref {} to conn <{}> (current: {})".format(
ref_id, conn_id, len(self._counter[conn_id])))
def decrease(self, conn_id, ref_id):
"""Decreases the reference counter for the connection."""
if conn_id not in self._counter:
self._logr.warning("Attempted to decrease ref of unknown conn: {}".format(conn_id))
return
try:
self._counter[conn_id].remove(ref_id)
self._logr.debug("Removed ref {} from conn <{}> (current: {})".format(
ref_id, conn_id, len(self._counter[conn_id])))
except KeyError:
self._logr.warning("Attempted to remove unknown reference: {}".format(ref_id))
def has_any(self, conn_id):
"""Returns True if the connection has any references pointing to it."""
return conn_id in self._counter and len(self._counter[conn_id])
|
py | b405d16e7adf626668de582a5a37683f9d04317c | from gaphor import UML
from gaphor.tests import TestCase
from gaphor.UML.classes import AssociationItem, ClassItem
class GaphasTest(TestCase):
services = TestCase.services + ["sanitizer_service", "undo_manager"]
def test_remove_class_with_association(self):
c1 = self.create(ClassItem, UML.Class)
c1.name = "klassitem1"
c2 = self.create(ClassItem, UML.Class)
c2.name = "klassitem2"
a = self.create(AssociationItem)
assert len(self.diagram.canvas.get_all_items()) == 3
self.connect(a, a.head, c1)
self.connect(a, a.tail, c2)
assert a.subject
assert self.element_factory.lselect(UML.Association)[0] is a.subject
c1.unlink()
self.diagram.canvas.update_now()
|
py | b405d28452db07ae4496467be9431f09a800f73c | # -*- coding: utf-8 -*-
"""
Created on Wed May 17 01:21:53 2017
@author: konodera
"""
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
from glob import glob
import os
from tqdm import tqdm
from sklearn.model_selection import KFold
#import pickle
from time import time
from datetime import datetime
import gc
#from itertools import chain
# =============================================================================
# def
# =============================================================================
def start(fname):
global st_time
st_time = time()
print("""
#==============================================================================
# START!!! {} PID: {} time: {}
#==============================================================================
""".format( fname, os.getpid(), datetime.today() ))
# send_line(f'START {fname} time: {elapsed_minute():.2f}min')
return
def end(fname):
print("""
#==============================================================================
# SUCCESS !!! {}
#==============================================================================
""".format(fname))
print('time: {:.2f}min'.format( elapsed_minute() ))
# send_line(f'FINISH {fname} time: {elapsed_minute():.2f}min')
return
def elapsed_minute():
return (time() - st_time)/60
def mkdir_p(path):
try:
os.stat(path)
except:
os.mkdir(path)
def to_pickles(df, path, split_size=3, inplace=True):
"""
path = '../output/mydf'
wirte '../output/mydf/0.p'
'../output/mydf/1.p'
'../output/mydf/2.p'
"""
if inplace==True:
df.reset_index(drop=True, inplace=True)
else:
df = df.reset_index(drop=True)
gc.collect()
mkdir_p(path)
kf = KFold(n_splits=split_size)
for i, (train_index, val_index) in enumerate(tqdm(kf.split(df))):
df.iloc[val_index].to_pickle(str(path) + '/' + str(i).zfill(3) + '.p')
return
def read_pickles(path, col=None):
if col is None:
df = pd.concat([pd.read_pickle(f) for f in tqdm(sorted(glob(path+'/*')))])
else:
df = pd.concat([pd.read_pickle(f)[col] for f in tqdm(sorted(glob(path+'/*')))])
return df
def reduce_memory(df, ix_start=0):
df.fillna(-1, inplace=True)
df_ = df.sample(9999, random_state=71)
## int
col_int8 = []
col_int16 = []
col_int32 = []
for c in tqdm(df.columns[ix_start:], miniters=20):
if df[c].dtype=='O':
continue
if (df_[c] == df_[c].astype(np.int8)).all():
col_int8.append(c)
elif (df_[c] == df_[c].astype(np.int16)).all():
col_int16.append(c)
elif (df_[c] == df_[c].astype(np.int32)).all():
col_int32.append(c)
df[col_int8] = df[col_int8].astype(np.int8)
df[col_int16] = df[col_int16].astype(np.int16)
df[col_int32] = df[col_int32].astype(np.int32)
## float
col = [c for c in df.dtypes[df.dtypes==np.float64].index if '_id' not in c]
df[col] = df[col].astype(np.float32)
gc.collect()
#==============================================================================
# main
#==============================================================================
if __name__ == "__main__":
files = sorted(glob('../input/*'))
data = {}
for f in files:
if os.path.isfile(f):
data[f.split('/')[-1]] = pd.read_csv(f)
print("""
#==============================================================================
# SUCCESS !!! {}
#==============================================================================
""".format(__file__))
|
py | b405d436c02374b40ca530745c7c06b8eb90ab9d | # This file is part of the MapProxy project.
# Copyright (C) 2013 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import threading
import hashlib
from io import BytesIO
from mapproxy.image import ImageSource
from mapproxy.cache.tile import Tile
from mapproxy.cache.base import TileCacheBase, tile_buffer, CacheBackendError
try:
import riak
except ImportError:
riak = None
except TypeError:
import warnings
warnings.warn("riak version not compatible with this Python version")
riak = None
import logging
log = logging.getLogger(__name__)
class UnexpectedResponse(CacheBackendError):
pass
class RiakCache(TileCacheBase):
def __init__(self, nodes, protocol, bucket, tile_grid, use_secondary_index=False, timeout=60):
if riak is None:
raise ImportError("Riak backend requires 'riak' package.")
self.nodes = nodes
self.protocol = protocol
self.lock_cache_id = 'riak-' + hashlib.md5(bucket.encode('utf-8')).hexdigest()
self.request_timeout = timeout * 1000
self.bucket_name = bucket
self.tile_grid = tile_grid
self.use_secondary_index = use_secondary_index
self._db_conn_cache = threading.local()
@property
def connection(self):
if not getattr(self._db_conn_cache, 'connection', None):
self._db_conn_cache.connection = riak.RiakClient(protocol=self.protocol, nodes=self.nodes)
return self._db_conn_cache.connection
@property
def bucket(self):
if not getattr(self._db_conn_cache, 'bucket', None):
self._db_conn_cache.bucket = self.connection.bucket(self.bucket_name)
return self._db_conn_cache.bucket
def _get_object(self, coord):
(x, y, z) = coord
key = '%(z)d_%(x)d_%(y)d' % locals()
obj = False
try:
obj = self.bucket.get(key, r=1, timeout=self.request_timeout)
except Exception as e:
log.warning('error while requesting %s: %s', key, e)
if not obj:
obj = self.bucket.new(key=key, data=None, content_type='application/octet-stream')
return obj
def _get_timestamp(self, obj):
metadata = obj.usermeta
timestamp = metadata.get('timestamp')
if timestamp != None:
return float(timestamp)
obj.usermeta = {'timestamp': '0'}
return 0.0
def is_cached(self, tile):
return self.load_tile(tile, True)
def _store_bulk(self, tiles):
for tile in tiles:
res = self._get_object(tile.coord)
with tile_buffer(tile) as buf:
data = buf.read()
res.encoded_data = data
res.usermeta = {
'timestamp': str(tile.timestamp),
'size': str(tile.size),
}
if self.use_secondary_index:
x, y, z = tile.coord
res.add_index('tile_coord_bin', '%02d-%07d-%07d' % (z, x, y))
try:
res.store(w=1, dw=1, pw=1, return_body=False, timeout=self.request_timeout)
except riak.RiakError as ex:
log.warning('unable to store tile: %s', ex)
return False
return True
def store_tile(self, tile):
if tile.stored:
return True
return self._store_bulk([tile])
def store_tiles(self, tiles):
tiles = [t for t in tiles if not t.stored]
return self._store_bulk(tiles)
def load_tile_metadata(self, tile):
if tile.timestamp:
return
# is_cached loads metadata
self.load_tile(tile, True)
def load_tile(self, tile, with_metadata=False):
if tile.timestamp is None:
tile.timestamp = 0
if tile.source or tile.coord is None:
return True
res = self._get_object(tile.coord)
if res.exists:
tile_data = BytesIO(res.encoded_data)
tile.source = ImageSource(tile_data)
if with_metadata:
tile.timestamp = self._get_timestamp(res)
tile.size = len(res.encoded_data)
return True
return False
def remove_tile(self, tile):
if tile.coord is None:
return True
res = self._get_object(tile.coord)
if not res.exists:
# already removed
return True
try:
res.delete(w=1, r=1, dw=1, pw=1, timeout=self.request_timeout)
except riak.RiakError as ex:
log.warning('unable to remove tile: %s', ex)
return False
return True
def _fill_metadata_from_obj(self, obj, tile):
tile_md = obj.usermeta
timestamp = tile_md.get('timestamp')
if timestamp:
tile.timestamp = float(timestamp)
def _key_iterator(self, level):
"""
Generator for all tile keys in `level`.
"""
# index() returns a list of all keys so we check for tiles in
# batches of `chunk_size`*`chunk_size`.
grid_size = self.tile_grid.grid_sizes[level]
chunk_size = 256
for x in range(grid_size[0]/chunk_size):
start_x = x * chunk_size
end_x = start_x + chunk_size - 1
for y in range(grid_size[1]/chunk_size):
start_y = y * chunk_size
end_y = start_y + chunk_size - 1
query = self.bucket.get_index('tile_coord_bin',
'%02d-%07d-%07d' % (level, start_x, start_y),
'%02d-%07d-%07d' % (level, end_x, end_y))
for link in query.run():
yield link.get_key()
def remove_tiles_for_level(self, level, before_timestamp=None):
bucket = self.bucket
client = self.connection
for key in self._key_iterator(level):
if before_timestamp:
obj = self.bucket.get(key, r=1)
dummy_tile = Tile((0, 0, 0))
self._fill_metadata_from_obj(obj, dummy_tile)
if dummy_tile.timestamp < before_timestamp:
obj.delete()
else:
riak.RiakObject(client, bucket, key).delete()
|
py | b405d467f926f04a4ffc4ba001ff4ec33db78ae3 | import led_panel
import numpy
import time
x = 0
y = 0
while True:
frame = numpy.zeros((led_panel.height, led_panel.width), dtype=numpy.uint8)
frame[y, x] = 255
led_panel.send(brightness=50, packed_frame=led_panel.pack(frame))
if x < led_panel.width - 1:
x += 1
else:
x = 0
if y < led_panel.height - 1:
y += 1
else:
y = 0
|
py | b405d484b61c0c1ee761e046be98e3403469b4e1 | mypi = 3.14
def add(a, b):
return a + b
def area(r):
return mypi * r * r
print(area(4.0)) |
py | b405d4fb5d6c98d5787cc88dc895b9060684e0e9 | """About package."""
|
py | b405d50a2070ac46a28b8e51d9f27c27e1c2d8c6 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": u"Módulo de Documentos (UD)",
"version": "1.1",
"category": u"Documentos",
"description": u"""Módulo de Documentos
====================
Permite a criação de documentos personalizáveis além de dar acesso à setores específicos.
Obs.: Ainda não oferece suporte para envio de E-mails.
Instalação do WeasyPrint:
-------------------------
* sudo apt-get install python-dev build-essential
* sudo apt-get install python-pip python-lxml libcairo2 libpango1.0-0 libgdk-pixbuf2.0-0 libffi-dev shared-mime-info
* sudo pip install cffi lxml html5lib cairocffi tinycss cssselect cairosvg
* sudo pip install WeasyPrint
Se necessário, instale: sudo apt-get install python-setuptools
Maiores informações, acesse: http://weasyprint.org/docs/install/""",
"author": u"Cloves Oliveira",
"data": ["security/ud_documentos_security.xml",
"security/ir.model.access.csv",
"ud_documentos_view.xml",
],
"depends": ["base", "ud"],
"installable": True,
"auto_install": False,
# "application": True,
"css": ["static/src/css/documentos.css"],
"js": [],
"qweb": [],
"update_xml":[],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
py | b405d50e9523f05191a573068290a1dd685bfcd5 | import random
import os
from typing import Type
menu = """
===================================================================================================================================
===================================================================================================================================
(/) (/)
(/) (/)
(/) (/)
(/) 7MMF' `7MMF' (/)
(/) MM MM (/)
(/) MM MM ,6"Yb. `7MMpMMMb. .P"Ybmmm `7MMpMMMb.pMMMb. ,6"Yb. `7MMpMMMb. (/)
(/)) MMmmmmmmMM 8) MM MM MM :MI I8 MM MM MM 8) MM MM MM (/)
(/)(/) MM MM ,pm9MM MM MM WmmmP" MM MM MM ,pm9MM MM MM (/)(/)
(/)'`(/) MM MM 8M MM MM MM 8M MM MM MM 8M MM MM MM (/)'`(/)
(/) (/) JMML. .JMML.`Moo9^Yo..JMML JMML. YMMMMMb .JMML JMML JMML.`Moo9^Yo..JMML JMML. (/) (/)
(/) (/) 6' dP (/) (/)
(/) (/) Ybmmmd' (/) (/)
(/) (/) (/) (/)
(/) (/) ╔═════════════════════════════════════╗ (/) (/)
(/)(/) Desarrollado por Carlos Valencia 🦊 (/)(/)
`""` ╚═════════════════════════════════════╝ `""`
===================================================================================================================================
===================================================================================================================================
Bienvenido al juego del ahorcado!!!
En este juego tendrás que adivinar una palabra que la computadora elegió al azar.
Al iniciar el juego contars con 7 vidas ❤️❤️❤️❤️❤️❤️❤️
Por cada intento fallido perderás una vida, elige con cuidado 👀
Buena suerte 🍀
"""
IMAGES = [ """
____
|/ |
| (_)
| /|\\
| |
| | |
|
|_____
""",
"""
____
|/ |
| (_)
| \|/
| |
| / \
|
|_____
""",
"""
____
|/ |
| (_)
| \|/
| |
| /
|
|_____
""",
"""
____
|/ |
| (_)
| \|/
| |
|
|
|_____
""",
"""
____
|/ |
| (_)
| \|
| |
|
|
|_____
""",
"""
____
|/ |
| (_)
| |
| |
|
|
|_____
""",
"""
____
|/ |
| (_)
|
|
|
|
|_____
""",
"""
____
|/ |
|
|
|
|
|
|_____
""",
]
win = """
.--------.
.: : : :___`. sSSSSs .S_SSSs .S_sSSs .S_SSSs sSSs sdSS_SSSSSSbs sSSs
.'!!::::: \\_\ `. d%%%%SP .SS~SSSSS .SS~YS%%b .SS~SSSSS d%%SP YSSS~S%SSSSSP d%%SP
/%O!!::::::::\\_\. \ d%S' S%S SSSS S%S `S%b S%S SSSS d%S' S%S d%S'
/%%O!!::::::::: : . \ S%S S%S S%S S%S S%S S%S S%S S%| S%S S%S
|%%OO!!::::::::::: : . | S&S S%S SSSS%S S%S S&S S%S SSSS%S S&S S&S S&S
|%%OO!!::::::::::::: :| S&S S&S SSS%S S&S S&S S&S SSS%S Y&Ss S&S S&S_Ss
|%%OO!!!::::::::::::: :| S&S S&S S&S S&S S&S S&S S&S `S&&S S&S S&S~SP
\%%OO!!!:::::::::::: :| S&S sSSs S&S S&S S&S S&S S&S S&S `S*S S&S S&S
\%%OO!!!::::::::::::/ S*b `S%% S*S S&S S*S S*S S*S S&S l*S S*S S*b
\%OO!!!!::::::::::/ S*S S% S*S S*S S*S S*S S*S S*S .S*P S*S S*S.
;%%OO!!!!!!:::::' SS_sSSS S*S S*S S*S S*S S*S S*S sSS*S S*S SSSbs
`%%%OO!!!!!!:' Y~YSSY SSS S*S S*S SSS SSS S*S YSS' S*S YSSP
`%%%OO!%%' SP SP SP SP
`%%%%' Y Y Y Y
/__\`-. =================================================================================
/ =================================================================================
(
\
"""
lose = """
...
;::::;
;::::; :;
;:::::' :; .S_sSSs sSSs .S_sSSs .S_sSSs .S sSSs sdSS_SSSSSSbs sSSs
;:::::; ;. .SS~YS%%b d%%SP .SS~YS%%b .SS~YS%%b .SS d%%SP YSSS~S%SSSSSP d%%SP
,:::::' ; OOO S%S `S%b d%S' S%S `S%b S%S `S%b S%S d%S' S%S d%S'
::::::; ; OOOOO S%S S%S S%S S%S S%S S%S S%S S%S S%| S%S S%S
;:::::; ; OOOOOOOO S%S d*S S&S S%S d*S S%S S&S S&S S&S S&S S&S
,;::::::; ;' / OOOOOOO S&S .S*S S&S_Ss S&S .S*S S&S S&S S&S Y&Ss S&S S&S_Ss
;:::::::::`. ,,,;. / / DOOOOOO S&S_sdSSS S&S~SP S&S_sdSSS S&S S&S S&S `S&&S S&S S&S~SP
.';:::::::::::::::::;, / / DOOOO S&S~YSSY S&S S&S~YSY%b S&S S&S S&S `S*S S&S S&S
,::::::;::::::;;;;::::;, / / DOOO S*S S*b S*S `S%b S*S d*S S*S l*S S*S S*b
;`::::::`'::::::;;;::::: ,#/ / DOOO S*S S*S. S*S S%S S*S .S*S S*S .S*P S*S S*S.
:`:::::::`;::::::;;::: ;::# / DOOO S*S SSSbs S*S S&S S*S_sdSSS S*S sSS*S S*S SSSbs
::`:::::::`;:::::::: ;::::# / DOO S*S YSSP S*S SSS SSS~YSSY S*S YSS' S*S YSSP
`:`:::::::`;:::::: ;::::::#/ DOO SP SP SP SP
:::`:::::::`;; ;:::::::::## OO Y Y Y Y
::::`:::::::`;::::::::;:::# OO =================================================================================
`:::::`::::::::::::;'`:;::# O =================================================================================
`:::::`::::::::;' / / `:#
::::::`:::::;' / / `#
"""
def word_transformation():
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u")
)
with open("./archivos/data.txt", "r", encoding="utf-8") as f:
words = [i.replace("\n", "") for i in f]
word_selected = random.choice(words)
for a, b in replacements:
word_selected = word_selected.replace(a, b)
return word_selected
def run():
attemps = 7
word_selected = word_transformation()
spaces = ["_"] * len(word_selected)
while True:
os.system("clear")
print(menu)
for character in spaces:
print(character, end=" ")
print(IMAGES[attemps])
print("Te quedan", attemps, "vidas ❤️")
try:
letter = input("Ingresa una letra y presiona Enter: ").lower()
assert letter.isalpha(), input("¡Solo se puede ingresar letras! 👀, Presiona la tecla Enter para volver a ingresar un valor.")
assert len(letter) == 1, input("¡Solo se puede ingresar una letra a la vez! 👀, Presiona la tecla Enter para volver a ingresar un valor.")
except AssertionError as ae:
print(ae)
continue
found = False
for idx, character in enumerate(word_selected):
if character == letter:
spaces[idx] = letter
found = True
if not found:
attemps -= 1
if "_" not in spaces:
os.system("clear")
print(win)
print("Felicidades!!! encontraste la palabra 🦊", word_selected, "🦊")
break
input()
if attemps == 0:
os.system("clear")
print(lose)
print("Oh oh!!! la palabra que debías adivinar era 😲", word_selected, "😲")
break
input()
if __name__ == "__main__":
run() |
py | b405d7ae3b8f69922f2e55cab49976d5615e1fdb | import argparse
import os
import re
def get_imports(lines):
regex_packages = [
re.compile(r'^\s*import\s+(?P<p>[\w\d_]+)'),
re.compile(r'^\s*from\s+(?P<p>[\w\d_]+)')
]
imports = []
for line in lines:
for search in regex_packages:
imports.extend(re.findall(search, line))
return imports
def main():
parser = argparse.ArgumentParser()
parser.add_argument('entry', type=argparse.FileType('r'))
args = parser.parse_args()
entry_path = os.path.abspath(args.entry.name)
print('Entry file:\t', entry_path)
entry_dir = os.path.dirname(entry_path)
print('Entry dir:\t', entry_dir)
entry_point_lines = [line.strip() for line in args.entry.readlines()]
print('Imports:\t', get_imports(entry_point_lines))
if __name__ == '__main__':
main()
|
py | b405d7e3e32bf6926b236bcc8f68a5f8267c6372 | import itertools
import os
import mingus.extra.lilypond as LilyPond
from mingus.core import progressions
from mingus.containers import Bar,Track
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
#basic blues progression
my_progression = [
"I", "I", "I", "I",
"IV", "IV", "I", "I",
"V", "V", "I", "I",
]
#getting the chords for the progression
my_chords = progressions.to_chords(my_progression)
#add a 4th note on each chord
my_chords = [ my_chord + [my_chord[1]] for my_chord in my_chords ]
#grouping notes as 4x4
all_notes = list(itertools.chain(*my_chords))
all_notes_4x4 = chunks(all_notes,4)
#generating the track
my_track = Track()
for my4notes in all_notes_4x4:
my_bar = Bar()
for my_note in my4notes:
my_bar + my_note
my_track + my_bar
#exporting
my_lilypond = LilyPond.from_Track(my_track)
_folder = "Scores"
_file = "poc"
_path = os.path.join(_folder,_file)
LilyPond.to_png(my_lilypond, _path ) |
py | b405d88c644540d0daf10ddc1e16656d92755364 | # Copyright (c) Facebook, Inc. and its affiliates.
# Modifications Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
class SequenceGenerator(object):
def __init__(
self,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
normalize_scores=True,
len_penalty=1.,
unk_penalty=0.,
retain_dropout=False,
temperature=1.,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None
):
"""Generates translations of a given source sentence.
Args:
tgt_dict (~fairseq.data.Dictionary): target dictionary
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
retain_dropout (bool, optional): use dropout when generating
(default: False)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos() if eos is None else eos
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.retain_dropout = retain_dropout
self.temperature = temperature
self.match_source_len = match_source_len
self.no_repeat_ngram_size = no_repeat_ngram_size
assert temperature > 0, '--temperature must be greater than 0'
self.search = (
search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy
)
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Generate a batch of translations.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
model = EnsembleModel(models)
return self._generate(model, sample, **kwargs)
@torch.no_grad()
def _generate(
self,
model,
sample,
prefix_tokens=None,
bos_token=None,
**kwargs
):
if not self.retain_dropout:
model.eval()
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample['net_input'].items()
if k != 'prev_output_tokens'
}
src_tokens = encoder_input['src_tokens']
src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
input_size = src_tokens.size()
# batch dimension goes first followed by source lengths
bsz = input_size[0]
src_len = input_size[1]
beam_size = self.beam_size
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
model.max_decoder_positions() - 1,
)
assert self.min_len <= max_len, 'min_len cannot be larger than max_len, please adjust these!'
# compute the encoder output for each beam
encoder_outs = model.forward_encoder(encoder_input)
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = model.reorder_encoder_out(encoder_outs, new_order)
# initialize buffers
scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn, attn_buf = None, None
# The blacklist indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then the blacklist would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfin_idx):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size or step == max_len:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
if self.match_source_len and step > src_lengths[unfin_idx]:
score = -math.inf
if self.normalize_scores:
normalized_score = score
unnormalized_score = score * ((step + 1) ** self.len_penalty)
else:
normalized_score = score / ((step + 1) ** self.len_penalty)
unnormalized_score = score
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
return {
'tokens': tokens_clone[i],
'score': normalized_score,
'unnormalized_score': unnormalized_score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': None,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
model.reorder_incremental_state(reorder_state)
encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state)
lprobs, avg_attn_scores = model.forward_decoder(
tokens[:, :step + 1], encoder_outs, temperature=self.temperature,
)
lprobs[lprobs != lprobs] = -math.inf
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, :self.eos] = -math.inf
lprobs[:, self.eos + 1:] = -math.inf
# handle prefix tokens (possibly with different lengths)
if prefix_tokens is not None and step < prefix_tokens.size(1) and step < max_len:
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = -math.inf
lprobs[prefix_mask] = lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
def replicate_first_beam(tensor, mask):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
# copy tokens, scores and lprobs from the first beam to all beams
tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
scores = replicate_first_beam(scores, eos_mask_batch_dim)
lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
cpu_tokens = tokens.cpu()
for bbsz_idx in range(bsz * beam_size):
gen_tokens = cpu_tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
if ngram[-1] != self.pad:
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if type(avg_attn_scores) is list:
avg_attn_scores = avg_attn_scores[0]
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, avg_attn_scores.size(1), max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
self.search.set_src_lengths(src_lengths)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(cpu_tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
banned_tokens_per_sample = gen_ngrams[bbsz_idx].get(ngram_index, [])
banned_tokens_per_sample = [(bbsz_idx, t) for t in banned_tokens_per_sample]
return banned_tokens_per_sample
banned_tokens = []
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
for bbsz_idx in range(bsz * beam_size):
banned_tokens.extend(calculate_banned_tokens(bbsz_idx))
if banned_tokens:
banned_tokens = torch.LongTensor(banned_tokens)
lprobs.index_put_(tuple(banned_tokens.t()), lprobs.new_tensor([-math.inf] * len(banned_tokens)))
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos, except for blacklisted ones
# or candidates with a score of -inf
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][blacklist] = 0
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist, active_hypos)
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized
class EnsembleModel(torch.nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models = torch.nn.ModuleList(models)
self.incremental_states = None
if all(hasattr(m, 'decoder') and isinstance(m.decoder, FairseqIncrementalDecoder) for m in models):
self.incremental_states = {m: {} for m in models}
def has_encoder(self):
return hasattr(self.models[0], 'encoder')
def max_decoder_positions(self):
return min(m.max_decoder_positions() for m in self.models)
@torch.no_grad()
def forward_encoder(self, encoder_input):
if not self.has_encoder():
return None
return [model.encoder(**encoder_input) for model in self.models]
@torch.no_grad()
def forward_decoder(self, tokens, encoder_outs, temperature=1.):
if len(self.models) == 1:
return self._decode_one(
tokens,
self.models[0],
encoder_outs[0] if self.has_encoder() else None,
self.incremental_states,
log_probs=True,
temperature=temperature,
)
log_probs = []
avg_attn = None
for model, encoder_out in zip(self.models, encoder_outs):
probs, attn = self._decode_one(
tokens,
model,
encoder_out,
self.incremental_states,
log_probs=True,
temperature=temperature,
)
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(len(self.models))
if avg_attn is not None:
avg_attn.div_(len(self.models))
return avg_probs, avg_attn
def _decode_one(
self, tokens, model, encoder_out, incremental_states, log_probs,
temperature=1.,
):
if self.incremental_states is not None:
decoder_out = list(model.forward_decoder(
tokens, encoder_out=encoder_out, incremental_state=self.incremental_states[model],
))
else:
decoder_out = list(model.forward_decoder(tokens, encoder_out=encoder_out))
decoder_out[0] = decoder_out[0][:, -1:, :]
if temperature != 1.:
decoder_out[0].div_(temperature)
attn = decoder_out[1] if len(decoder_out) > 1 else None
if type(attn) is dict:
attn = attn.get('attn', None)
if type(attn) is list:
attn = attn[0]
if attn is not None:
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
probs = probs[:, -1, :]
return probs, attn
def reorder_encoder_out(self, encoder_outs, new_order):
if not self.has_encoder():
return
return [
model.encoder.reorder_encoder_out(encoder_out, new_order)
for model, encoder_out in zip(self.models, encoder_outs)
]
def reorder_incremental_state(self, new_order):
if self.incremental_states is None:
return
for model in self.models:
model.decoder.reorder_incremental_state(self.incremental_states[model], new_order)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(self, tgt_dict, left_pad_target=False, **kwargs):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
@torch.no_grad()
def generate(self, models, sample, **kwargs):
model = EnsembleModelWithAlignment(models)
finalized = super()._generate(model, sample, **kwargs)
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.shape[0]
beam_size = self.beam_size
src_tokens, src_lengths, prev_output_tokens, tgt_tokens = \
self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, 'full_context_alignment', False) for m in model.models):
attn = model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]['attention'].transpose(1, 0)
for i in range(bsz * beam_size)
]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = utils.extract_hard_alignment(attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos)
finalized[i // beam_size][i % beam_size]['alignment'] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.shape[0]
src_tokens = src_tokens[:, None, :].expand(-1, self.beam_size, -1).contiguous().view(bsz * self.beam_size, -1)
src_lengths = sample['net_input']['src_lengths']
src_lengths = src_lengths[:, None].expand(-1, self.beam_size).contiguous().view(bsz * self.beam_size)
prev_output_tokens = data_utils.collate_tokens(
[beam['tokens'] for example in hypothesis for beam in example],
self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam['tokens'] for example in hypothesis for beam in example],
self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]['attn']
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
def _decode_one(
self, tokens, model, encoder_out, incremental_states, log_probs,
temperature=1.,
):
if self.incremental_states is not None:
decoder_out = list(model.forward_decoder(
tokens,
encoder_out=encoder_out,
incremental_state=self.incremental_states[model],
))
else:
decoder_out = list(model.forward_decoder(tokens, encoder_out=encoder_out))
decoder_out[0] = decoder_out[0][:, -1:, :]
if temperature != 1.:
decoder_out[0].div_(temperature)
attn = decoder_out[1] if len(decoder_out) > 1 else None
if type(attn) is dict:
attn = attn.get('attn', None)
if type(attn) is list:
attn = attn[0]
if attn is not None:
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
probs = probs[:, -1, :]
return probs, attn
|
py | b405d89c8bc8368cda5483ed908572fa06608c04 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Fecal E.coli developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool persistence.
By default, fecald will dump mempool on shutdown and
then reload it on startup. This can be overridden with
the -persistmempool=0 command line option.
Test is as follows:
- start node0, node1 and node2. node1 has -persistmempool=0
- create 5 transactions on node2 to its own address. Note that these
are not sent to node0 or node1 addresses because we don't want
them to be saved in the wallet.
- check that node0 and node1 have 5 transactions in their mempools
- shutdown all nodes.
- startup node0. Verify that it still has 5 transactions
in its mempool. Shutdown node0. This tests that by default the
mempool is persistent.
- startup node1. Verify that its mempool is empty. Shutdown node1.
This tests that with -persistmempool=0, the mempool is not
dumped to disk when the node is shut down.
- Restart node0 with -persistmempool=0. Verify that its mempool is
empty. Shutdown node0. This tests that with -persistmempool=0,
the mempool is not loaded from disk on start up.
- Restart node0 with -persistmempool. Verify that it has 5
transactions in its mempool. This tests that -persistmempool=0
does not overwrite a previously valid mempool stored on disk.
- Remove node0 mempool.dat and verify savemempool RPC recreates it
and verify that node1 can load it and has 5 transaction in its
mempool.
- Verify that savemempool throws when the RPC is called if
node1 can't write to disk.
"""
import os
import time
from test_framework.test_framework import RavenTestFramework
from test_framework.util import *
class MempoolPersistTest(RavenTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
self.log.debug("Mine a single block to get out of IBD")
self.nodes[0].generate(1)
self.sync_all()
self.log.debug("Send 5 transactions from node2 (to its own address)")
for i in range(5):
self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
self.sync_all()
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Stop-start node0 and node1. Verify that node0 has the transactions in its mempool and node1 does not.")
self.stop_nodes()
self.start_node(0)
self.start_node(1)
# Give fecald a second to reload the mempool
time.sleep(1)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
# Give fecald a second to reload the mempool
time.sleep(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
mempooldat0 = os.path.join(self.options.tmpdir, 'node0', 'regtest', 'mempool.dat')
mempooldat1 = os.path.join(self.options.tmpdir, 'node1', 'regtest', 'mempool.dat')
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
assert os.path.isfile(mempooldat0)
self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions")
os.rename(mempooldat0, mempooldat1)
self.stop_nodes()
self.start_node(1, extra_args=[])
wait_until(lambda: len(self.nodes[1].getrawmempool()) == 5)
self.log.debug("Prevent fecald from writing mempool.dat to disk. Verify that `savemempool` fails")
# to test the exception we are setting bad permissions on a tmp file called mempool.dat.new
# which is an implementation detail that could change and break this test
mempooldotnew1 = mempooldat1 + '.new'
with os.fdopen(os.open(mempooldotnew1, os.O_CREAT, 0o000), 'w'):
pass
assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
os.remove(mempooldotnew1)
if __name__ == '__main__':
MempoolPersistTest().main()
|
py | b405da7a0e9180048cef17bea5fe539c5ec25f65 | """ Passive Markers
Fluid simulation with additional marker fields that are passively transported with the fluid.
The dense marker is sampled on a regular grid while the sparse marker is a collection of particles.
"""
from phi.physics._boundaries import Domain, STICKY as CLOSED
from phi.flow import *
math.seed(0)
def checkerboard(size=8, offset=2):
return math.to_float(math.all((DOMAIN.cells.center - offset) % (2 * size) < size, 'vector'))
DOMAIN = Domain(x=126, y=160, boundaries=CLOSED)
DT = 0.2
velocity = DOMAIN.staggered_grid(Noise(vector=2, scale=100)) * 4
dense_marker = CenteredGrid(checkerboard(), DOMAIN.boundaries['scalar'], DOMAIN.bounds)
points = math.join_dimensions(DOMAIN.cells.center.x[::4].y[::4], ('x', 'y'), collection('points')).points.as_batch()
sparse_marker = DOMAIN.points(points)
for _ in view(framerate=10, play=False).range():
velocity, _ = fluid.make_incompressible(velocity)
dense_marker = advect.advect(dense_marker, velocity, DT)
sparse_marker = advect.advect(sparse_marker, velocity, DT)
velocity = advect.semi_lagrangian(velocity, velocity, DT)
|
py | b405dab1964133efa9365d5a9dda658eb433c985 | # -*- coding: utf-8 -*-
"""Kubeflow notebook server utility functions."""
import json
import os
import random
import re
import tarfile
import time
import uuid
import warnings
from tempfile import TemporaryFile
from ast import literal_eval
from kubernetes import client
from kubernetes.client.rest import ApiException
from kubernetes.stream import stream
from kube_config import load_kube_config
KF_PIPELINES_NAMESPACE = os.getenv("KF_PIPELINES_NAMESPACE", "anonymous")
NOTEBOOK_NAME = "server"
NOTEBOOK_NAMESPACE = "anonymous"
NOTEBOOK_POD_NAME = "server-0"
NOTEBOOK_CONAINER_NAME = "server"
class ApiClientForJsonPatch(client.ApiClient):
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, async_req=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None):
header_params["Content-Type"] = self.select_header_content_type(["application/json-patch+json"])
return super().call_api(resource_path, method, path_params, query_params, header_params, body,
post_params, files, response_type, auth_settings, async_req, _return_http_data_only,
collection_formats, _preload_content, _request_timeout)
def create_persistent_volume_claim(name):
"""
Creates a persistent volume claim.
Parameters
----------
name : str
"""
print(f"Creating volume {name}...", flush=True)
load_kube_config()
api_instance = client.CoreV1Api()
try:
api_instance.read_namespaced_persistent_volume_claim(
name=name,
namespace=NOTEBOOK_NAMESPACE,
)
warnings.warn(f"Volume {name} already exists...")
return
except ApiException:
pass
try:
body = {
"metadata": {
"name": name,
},
"spec": {
"accessModes": [
"ReadWriteOnce",
],
"resources": {
"requests": {
"storage": "10Gi",
},
}
},
}
api_instance.create_namespaced_persistent_volume_claim(
namespace=NOTEBOOK_NAMESPACE,
body=body,
)
except ApiException as e:
body = literal_eval(e.body)
message = body["message"]
raise Exception(f"Error while trying to patch notebook server: {message}")
def create_config_map(task_id, experiment_notebook_content):
"""
Create a ConfigMap with the notebook of the given task.
Parameters
----------
task_id : str
experiment_notebook_content : str
"""
config_map_name = f"configmap-{task_id}"
load_kube_config()
v1 = client.CoreV1Api()
body = {
"metadata": {
"name": config_map_name,
},
"data": {
"Experiment.ipynb": experiment_notebook_content
}
}
v1.create_namespaced_config_map(
namespace=KF_PIPELINES_NAMESPACE,
body=body,
)
warnings.warn(f"ConfigMap of task {task_id} created!")
def patch_notebook_server(volume_mounts):
"""
Adds a list of volume mounts to the notebook server.
Parameters
----------
volume_mounts : list
"""
print("Adding volumes to notebook server...", flush=True)
load_kube_config()
api_instance = client.CoreV1Api()
custom_api = client.CustomObjectsApi(api_client=ApiClientForJsonPatch())
try:
body = custom_api.get_namespaced_custom_object(
group="kubeflow.org",
version="v1",
namespace=NOTEBOOK_NAMESPACE,
plural="notebooks",
name=NOTEBOOK_NAME,
)
# filters volume mounts that were already added
volume_mounts = [m for m in volume_mounts if not any(v for v in body["spec"]["template"]["spec"]["volumes"] if m["name"] == v["name"])]
except ApiException as e:
body = literal_eval(e.body)
message = body["message"]
raise Exception(f"Error while trying to patch notebook server: {message}")
body = []
for v in volume_mounts:
body.extend([
{
"op": "add",
"path": "/spec/template/spec/volumes/-",
"value": {
"name": v["name"],
"persistentVolumeClaim": {
"claimName": v["name"],
},
},
},
{
"op": "add",
"path": "/spec/template/spec/containers/0/volumeMounts/-",
"value": {
"mountPath": v["mount_path"],
"name": v["name"],
},
},
])
if len(body) > 0:
try:
custom_api.patch_namespaced_custom_object(
group="kubeflow.org",
version="v1",
namespace=NOTEBOOK_NAMESPACE,
plural="notebooks",
name=NOTEBOOK_NAME,
body=body,
_request_timeout=5,
)
except ApiException as e:
body = literal_eval(e.body)
message = body["message"]
raise Exception(f"Error while trying to patch notebook server: {message}")
# Wait for the pod to be ready and have all containers running
while True:
try:
pod = api_instance.read_namespaced_pod(
name=NOTEBOOK_POD_NAME,
namespace=NOTEBOOK_NAMESPACE,
_request_timeout=5,
)
if pod.status.phase == "Running" \
and all([c.state.running for c in pod.status.container_statuses]):
print("Mounted volumes in notebook server!", flush=True)
break
except ApiException:
pass
finally:
warnings.warn("Waiting for notebook server to be ready...")
time.sleep(5)
def copy_files_inside_pod(local_path, destination_path, task_name):
"""
Copies local files to a pod in notebook server.
Based on this example:
https://github.com/prafull01/Kubernetes-Utilities/blob/master/kubectl_cp_as_python_client.py
Parameters
----------
local_path : str
destination_path : str
task_name : task_name
"""
print(f"Copying {local_path} to {destination_path}...", flush=True)
load_kube_config()
api_instance = client.CoreV1Api()
# The following command extracts the contents of STDIN to /home/jovyan/tasks
exec_command = ["tar", "xvf", "-", "-C", "/home/jovyan/tasks"]
container_stream = stream(
api_instance.connect_get_namespaced_pod_exec,
name=NOTEBOOK_POD_NAME,
namespace=NOTEBOOK_NAMESPACE,
command=exec_command,
container=NOTEBOOK_CONAINER_NAME,
stderr=True,
stdin=True,
stdout=True,
tty=False,
_preload_content=False,
)
with TemporaryFile() as tar_buffer:
# Prepares an uncompressed tarfile that will be written to STDIN
with tarfile.open(fileobj=tar_buffer, mode="w") as tar:
for root, dirs, files in os.walk(local_path):
for filename in files:
# Local filepath
filepath = os.path.join(root, filename)
# Filepath inside pod
pod_root = root.lstrip(local_path)
destination_path = os.path.join(task_name, pod_root, filename)
tar.add(filepath, arcname=destination_path)
# Rewinds to beggining of tarfile
tar_buffer.seek(0)
# WARNING:
# Attempts to write the entire tarfile caused connection errors for large files
# The loop below reads/writes small chunks to prevent these errors
data = tar_buffer.read(1000000)
while container_stream.is_open():
container_stream.update(timeout=10)
if container_stream.peek_stdout():
print("STDOUT: %s" % container_stream.read_stdout(), flush=True)
if container_stream.peek_stderr():
print("STDERR: %s" % container_stream.read_stderr(), flush=True)
if data:
container_stream.write_stdin(data)
data = tar_buffer.read(1000000)
else:
break
container_stream.close()
print(f"Copied {local_path} to {destination_path}!", flush=True)
def set_notebook_metadata(notebook_path, task_id, experiment_id, operator_id):
"""
Sets metadata values in notebook file.
Parameters
----------
notebook_path : str
task_id : str
experiment_id : str
operator_id : str
"""
print(f"Setting metadata in {notebook_path}...", flush=True)
load_kube_config()
api_instance = client.CoreV1Api()
# The following command sets task_id in the metadata of a notebook
python_script = (
f"import json; "
f"f = open('/home/jovyan/tasks/{notebook_path}'); "
f"n = json.load(f); "
f"n['metadata']['task_id'] = '{task_id}'; "
f"n['metadata']['experiment_id'] = '{experiment_id}'; "
f"n['metadata']['operator_id'] = '{operator_id}'; "
f"f.close(); "
f"f = open('/home/jovyan/tasks/{notebook_path}', 'w'); "
f"json.dump(n, f, indent=1); "
f"f.close()"
)
exec_command = [
"python",
"-c",
python_script,
]
container_stream = stream(
api_instance.connect_get_namespaced_pod_exec,
name=NOTEBOOK_POD_NAME,
namespace=NOTEBOOK_NAMESPACE,
command=exec_command,
container=NOTEBOOK_CONAINER_NAME,
stderr=True,
stdin=False,
stdout=True,
tty=False,
_preload_content=False,
)
while container_stream.is_open():
container_stream.update(timeout=10)
if container_stream.peek_stdout():
warnings.warn("STDOUT: %s" % container_stream.read_stdout())
if container_stream.peek_stderr():
warnings.warn("STDERR: %s" % container_stream.read_stderr())
container_stream.close()
print(f"Set metadata in {notebook_path}!", flush=True)
def uuid_alpha():
"""
Generates an uuid that always starts with an alpha char.
Returns
-------
str
"""
uuid_ = str(uuid.uuid4())
if not uuid_[0].isalpha():
c = random.choice(["a", "b", "c", "d", "e", "f"])
uuid_ = f"{c}{uuid_[1:]}"
return uuid_
def parse_parameters(notebook_path):
"""
Parses and returns the parameters declared in a notebook.
Parameters
----------
notebook_path : str
Returns
-------
list:
A list of parameters (name, default, type, label, description).
"""
if not os.path.exists(notebook_path):
return []
with open(notebook_path) as f:
notebook = json.load(f)
parameters = []
cells = notebook.get("cells", [])
for cell in cells:
cell_type = cell["cell_type"]
tags = cell["metadata"].get("tags", [])
if cell_type == "code" and "parameters" in tags:
source = cell["source"]
parameters.extend(read_parameters_from_source(source))
return parameters
def read_parameters_from_source(source):
"""
Lists the parameters declared in source code.
Parameters
----------
source : list
Source code lines.
Returns
-------
list:
A list of parameters (name, default, type, label, description).
"""
parameters = []
# Regex to capture a parameter declaration
# Inspired by Google Colaboratory Forms
# Example of a parameter declaration:
# name = "value" #@param ["1st option", "2nd option"] {type:"string", label:"Foo Bar", description:"Foo Bar"}
pattern = re.compile(r"^(\w+)\s*=\s*(.+)\s*#@param(?:(\s+\[.*\]))?(\s+\{.*\})")
for line in source:
match = pattern.search(line)
if match:
try:
name = match.group(1)
default = match.group(2)
options = match.group(3)
metadata = match.group(4)
parameter = {"name": name}
if default and default != "None":
if default in ["True", "False"]:
default = default.lower()
parameter["default"] = json.loads(default)
if options:
parameter["options"] = json.loads(options)
# adds quotes to metadata keys
metadata = re.sub(r"(\w+):", r'"\1":', metadata)
parameter.update(json.loads(metadata))
parameters.append(parameter)
except json.JSONDecodeError:
pass
return parameters
|
py | b405dacbb4eb74987cd9537d05d5af1912e1f17d | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block processing.
This reimplements tests from the bitcoinj/FullBlockTestGenerator used
by the pull-tester.
We use the testing framework in which we expect a particular answer from
each test.
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
from test_framework.script import *
from test_framework.mininode import network_thread_start
import struct
class PreviousSpendableOutput():
def __init__(self, tx = CTransaction(), n = -1):
self.tx = tx
self.n = n # the output we're spending
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def __init__(self, header=None):
super(CBrokenBlock, self).__init__(header)
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
if with_witness:
r += tx.serialize_with_witness()
else:
r += tx.serialize_without_witness()
return r
def normal_serialize(self):
r = b""
r += super(CBrokenBlock, self).serialize()
return r
class FullBlockTest(ComparisonTestFramework):
# Can either run this test as 1 node with expected answers, or two and compare them.
# Change the "outcome" variable from each TestInstance object to only do the comparison.
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.tip = None
self.blocks = {}
def add_options(self, parser):
super().add_options(parser)
parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True)
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
network_thread_start()
self.test.run()
def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])):
tx = create_transaction(spend_tx, n, b"", value, script)
return tx
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in spend_tx
def sign_tx(self, tx, spend_tx, n):
scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, n, value, script)
self.sign_tx(tx, spend_tx, n)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
if self.tip == None:
base_block_hash = self.genesis_hash
block_time = int(time.time())+1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
coinbase.rehash()
if spend == None:
block = create_block(base_block_hash, coinbase, block_time)
block.nVersion = 0x20000000
else:
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
block.nVersion = 0x20000000
tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi
self.sign_tx(tx, spend.tx, spend.n)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
if solve:
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject = None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
# shorthand for functions
block = self.next_block
create_tx = self.create_tx
create_and_sign_tx = self.create_and_sign_transaction
# these must be updated if consensus changes
MAX_BLOCK_SIGOPS = 20000
# Create a new block
block(0)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(5000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(33):
out.append(get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
block(1, spend=out[0])
save_spendable_output()
yield accepted()
block(2, spend=out[1])
yield accepted()
save_spendable_output()
# so fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
tip(1)
b3 = block(3, spend=out[1])
txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0)
yield rejected()
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
block(4, spend=out[2])
yield accepted()
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
tip(2)
block(5, spend=out[2])
save_spendable_output()
yield rejected()
block(6, spend=out[3])
yield accepted()
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(7, spend=out[2])
yield rejected()
block(8, spend=out[4])
yield rejected()
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
tip(6)
block(9, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(10, spend=out[3])
yield rejected()
block(11, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# (b12 added last)
# \-> b3 (1) -> b4 (2)
tip(5)
b12 = block(12, spend=out[3])
save_spendable_output()
b13 = block(13, spend=out[4])
# Deliver the block header for b12, and the block b13.
# b13 should be accepted but the tip won't advance until b12 is delivered.
yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
save_spendable_output()
# b14 is invalid, but the node won't know that until it tries to connect
# Tip still can't advance because b12 is missing
block(14, spend=out[5], additional_coinbase_value=1)
yield rejected()
yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
# Test that a block with a lot of checksigs is okay
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
tip(13)
block(15, spend=out[5], script=lots_of_checksigs)
yield accepted()
save_spendable_output()
# Test that a block with too many checksigs is rejected
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
block(16, spend=out[6], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
tip(15)
block(17, spend=txout_b3)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
tip(13)
block(18, spend=txout_b3)
yield rejected()
block(19, spend=out[6])
yield rejected()
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
block(20, spend=out[7])
yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase'))
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
tip(13)
block(21, spend=out[6])
yield rejected()
block(22, spend=out[5])
yield rejected()
# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b23 = block(23, spend=out[6])
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
yield accepted()
save_spendable_output()
# Make the next block one byte bigger and check that it fails
tip(15)
b24 = block(24, spend=out[6])
script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length+1)])
tx.vout = [CTxOut(0, script_output)]
b24 = update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1)
yield rejected(RejectResult(16, b'bad-blk-length'))
block(25, spend=out[7])
yield rejected()
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b26 = block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = update_block(26, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b26 chain to make sure bitcoind isn't accepting b26
block(27, spend=out[7])
yield rejected(False)
# Now try a too-large-coinbase script
tip(15)
b28 = block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = update_block(28, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b28 chain to make sure bitcoind isn't accepting b28
block(29, spend=out[7])
yield rejected(False)
# b30 has a max-sized coinbase scriptSig.
tip(23)
b30 = block(30)
b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
b30.vtx[0].rehash()
b30 = update_block(30, [])
yield accepted()
save_spendable_output()
# b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
#
# genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b36 (11)
# \-> b34 (10)
# \-> b32 (9)
#
# MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
b31 = block(31, spend=out[8], script=lots_of_multisigs)
assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
yield accepted()
save_spendable_output()
# this goes over the limit because the coinbase has one sigop
too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
b32 = block(32, spend=out[9], script=too_many_multisigs)
assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# CHECKMULTISIGVERIFY
tip(31)
lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
block(33, spend=out[9], script=lots_of_multisigs)
yield accepted()
save_spendable_output()
too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
block(34, spend=out[10], script=too_many_multisigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# CHECKSIGVERIFY
tip(33)
lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
b35 = block(35, spend=out[10], script=lots_of_checksigs)
yield accepted()
save_spendable_output()
too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
block(36, spend=out[11], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Check spending of a transaction in a block which failed to connect
#
# b6 (3)
# b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b37 (11)
# \-> b38 (11/37)
#
# save 37's spendable output, but then double-spend out11 to invalidate the block
tip(35)
b37 = block(37, spend=out[11])
txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0)
tx = create_and_sign_tx(out[11].tx, out[11].n, 0)
b37 = update_block(37, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
tip(35)
block(38, spend=txout_b37)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Check P2SH SigOp counting
#
#
# 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
# \-> b40 (12)
#
# b39 - create some P2SH outputs that will require 6 sigops to spend:
#
# redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
# p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
#
tip(35)
b39 = block(39)
b39_outputs = 0
b39_sigops_per_output = 6
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG])
redeem_script_hash = hash160(redeem_script)
p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
spend = out[11]
tx = create_tx(spend.tx, spend.n, 1, p2sh_script)
tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE])))
self.sign_tx(tx, spend.tx, spend.n)
tx.rehash()
b39 = update_block(39, [tx])
b39_outputs += 1
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size=len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
b39_outputs += 1
b39 = update_block(39, [])
yield accepted()
save_spendable_output()
# Test sigops in P2SH redeem scripts
#
# b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
# The first tx has one sigop and then at the end we add 2 more to put us just over the max.
#
# b41 does the same, less one, so it has the maximum sigops permitted.
#
tip(39)
b40 = block(40, spend=out[12])
sigops = get_legacy_sigopcount_block(b40)
numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
assert_equal(numTxes <= b39_outputs, True)
lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
new_txs = []
for i in range(1, numTxes+1):
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(lastOutpoint, b''))
# second input is corresponding P2SH output from b39
tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
# Note: must pass the redeem_script (not p2sh_script) to the signature hash function
(sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, redeem_script])
tx.vin[1].scriptSig = scriptSig
tx.rehash()
new_txs.append(tx)
lastOutpoint = COutPoint(tx.sha256, 0)
b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
tx.rehash()
new_txs.append(tx)
update_block(40, new_txs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# same as b40, but one less sigop
tip(39)
block(41, spend=None)
update_block(41, b40.vtx[1:-1])
b41_sigops_to_fill = b40_sigops_to_fill - 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
tx.rehash()
update_block(41, [tx])
yield accepted()
# Fork off of b39 to create a constant base again
#
# b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
# \-> b41 (12)
#
tip(39)
block(42, spend=out[12])
yield rejected()
save_spendable_output()
block(43, spend=out[13])
yield accepted()
save_spendable_output()
# Test a number of really invalid scenarios
#
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
# the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
b44 = CBlock()
b44.nVersion = 0x20000000
b44.nTime = self.tip.nTime + 1
b44.hashPrevBlock = self.tip.sha256
b44.nBits = 0x207fffff
b44.vtx.append(coinbase)
b44.hashMerkleRoot = b44.calc_merkle_root()
b44.solve()
self.tip = b44
self.block_heights[b44.sha256] = height
self.blocks[44] = b44
yield accepted()
# A block with a non-coinbase as the first tx
non_coinbase = create_tx(out[15].tx, out[15].n, 1)
b45 = CBlock()
b45.nVersion = 0x20000000
b45.nTime = self.tip.nTime + 1
b45.hashPrevBlock = self.tip.sha256
b45.nBits = 0x207fffff
b45.vtx.append(non_coinbase)
b45.hashMerkleRoot = b45.calc_merkle_root()
b45.calc_sha256()
b45.solve()
self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1
self.tip = b45
self.blocks[45] = b45
yield rejected(RejectResult(16, b'bad-cb-missing'))
# A block with no txns
tip(44)
b46 = CBlock()
b46.nVersion = 0x20000000
b46.nTime = b44.nTime+1
b46.hashPrevBlock = b44.sha256
b46.nBits = 0x207fffff
b46.vtx = []
b46.hashMerkleRoot = 0
b46.solve()
self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1
self.tip = b46
assert 46 not in self.blocks
self.blocks[46] = b46
s = ser_uint256(b46.hashMerkleRoot)
yield rejected(RejectResult(16, b'bad-blk-length'))
# Hypothesize: Temporarily disable test
# A block with invalid work
#tip(44)
#b47 = block(47, solve=False)
#target = uint256_from_compact(b47.nBits)
#while b47.scrypt256 < target: #changed > to <
# b47.nNonce += 1
# b47.rehash()
#yield rejected(RejectResult(16, b'high-hash'))
# A block with timestamp > 2 hrs in the future
tip(44)
b48 = block(48, solve=False)
b48.nTime = int(time.time()) + 60 * 60 * 3
b48.solve()
yield rejected(RejectResult(16, b'time-too-new'))
# A block with an invalid merkle hash
tip(44)
b49 = block(49)
b49.hashMerkleRoot += 1
b49.solve()
yield rejected(RejectResult(16, b'bad-txnmrklroot'))
# A block with an incorrect POW limit
tip(44)
b50 = block(50)
b50.nBits = b50.nBits - 1
b50.solve()
yield rejected(RejectResult(16, b'bad-diffbits'))
# A block with two coinbase txns
tip(44)
b51 = block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = update_block(51, [cb2])
yield rejected(RejectResult(16, b'bad-cb-multiple'))
# A block w/ duplicate txns
# Note: txns have to be in the right position in the merkle tree to trigger this error
tip(44)
b52 = block(52, spend=out[15])
tx = create_tx(b52.vtx[1], 0, 1)
b52 = update_block(52, [tx, tx])
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
# Test block timestamps
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
# \-> b54 (15)
#
tip(43)
block(53, spend=out[14])
yield rejected() # rejected since b44 is at same height
save_spendable_output()
# invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast)
b54 = block(54, spend=out[15])
b54.nTime = b35.nTime - 1
b54.solve()
yield rejected(RejectResult(16, b'time-too-old'))
# valid timestamp
tip(53)
b55 = block(55, spend=out[15])
b55.nTime = b35.nTime
update_block(55, [])
yield accepted()
save_spendable_output()
# Test CVE-2012-2459
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
# \-> b57 (16)
# \-> b56p2 (16)
# \-> b56 (16)
#
# Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
# affecting the merkle root of a block, while still invalidating it.
# See: src/consensus/merkle.h
#
# b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
# Result: OK
#
# b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
# root but duplicate transactions.
# Result: Fails
#
# b57p2 has six transactions in its merkle tree:
# - coinbase, tx, tx1, tx2, tx3, tx4
# Merkle root calculation will duplicate as necessary.
# Result: OK.
#
# b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
# duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
# that the error was caught early, avoiding a DOS vulnerability.)
# b57 - a good block with 2 txs, don't submit until end
tip(55)
b57 = block(57)
tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
tx1 = create_tx(tx, 0, 1)
b57 = update_block(57, [tx, tx1])
# b56 - copy b57, add a duplicate tx
tip(55)
b56 = copy.deepcopy(b57)
self.blocks[56] = b56
assert_equal(len(b56.vtx),3)
b56 = update_block(56, [tx1])
assert_equal(b56.hash, b57.hash)
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
# b57p2 - a good block with 6 tx'es, don't submit until end
tip(55)
b57p2 = block("57p2")
tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
tx1 = create_tx(tx, 0, 1)
tx2 = create_tx(tx1, 0, 1)
tx3 = create_tx(tx2, 0, 1)
tx4 = create_tx(tx3, 0, 1)
b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4])
# b56p2 - copy b57p2, duplicate two non-consecutive tx's
tip(55)
b56p2 = copy.deepcopy(b57p2)
self.blocks["b56p2"] = b56p2
assert_equal(b56p2.hash, b57p2.hash)
assert_equal(len(b56p2.vtx),6)
b56p2 = update_block("b56p2", [tx3, tx4])
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
tip("57p2")
yield accepted()
tip(57)
yield rejected() #rejected because 57p2 seen first
save_spendable_output()
# Test a few invalid tx types
#
# -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> ??? (17)
#
# tx with prevout.n out of range
tip(57)
b58 = block(58, spend=out[17])
tx = CTransaction()
assert(len(out[17].tx.vout) < 42)
tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff))
tx.vout.append(CTxOut(0, b""))
tx.calc_sha256()
b58 = update_block(58, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# tx with output value > input value out of range
tip(57)
b59 = block(59)
tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN)
b59 = update_block(59, [tx])
yield rejected(RejectResult(16, b'bad-txns-in-belowout'))
# reset to good chain
tip(57)
b60 = block(60, spend=out[17])
yield accepted()
save_spendable_output()
# Test BIP30
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b61 (18)
#
# Blocks are not allowed to contain a transaction whose id matches that of an earlier,
# not-fully-spent transaction in the same chain. To test, make identical coinbases;
# the second one should be rejected.
#
tip(60)
b61 = block(61, spend=out[18])
b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases
b61.vtx[0].rehash()
b61 = update_block(61, [])
assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
yield rejected(RejectResult(16, b'bad-txns-BIP30'))
# Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b62 (18)
#
tip(60)
b62 = block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff #this locktime is non-final
assert(out[18].n < len(out[18].tx.vout))
tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
assert(tx.vin[0].nSequence < 0xffffffff)
tx.calc_sha256()
b62 = update_block(62, [tx])
yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
# Test a non-final coinbase is also rejected
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b63 (-)
#
tip(60)
b63 = block(63)
b63.vtx[0].nLockTime = 0xffffffff
b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
b63.vtx[0].rehash()
b63 = update_block(63, [])
yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
# the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
# What matters is that the receiving node should not reject the bloated block, and then reject the canonical
# block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
# \
# b64a (18)
# b64a is a bloated block (non-canonical varint)
# b64 is a good block (same as b64 but w/ canonical varint)
#
tip(60)
regular_block = block("64a", spend=out[18])
# make it a "broken_block," with non-canonical serialization
b64a = CBrokenBlock(regular_block)
b64a.initialize(regular_block)
self.blocks["64a"] = b64a
self.tip = b64a
tx = CTransaction()
# use canonical serialization to calculate size
script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
b64a = update_block("64a", [tx])
assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
yield TestInstance([[self.tip, None]])
# comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore
self.test.block_store.erase(b64a.sha256)
tip(60)
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
self.blocks[64] = b64
update_block(64, [])
yield accepted()
save_spendable_output()
# Spend an output created in the block itself
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
#
tip(64)
block(65)
tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 0)
update_block(65, [tx1, tx2])
yield accepted()
save_spendable_output()
# Attempt to spend an output created later in the same block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b66 (20)
tip(65)
block(66)
tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 1)
update_block(66, [tx2, tx1])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to double-spend a transaction created in a block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b67 (20)
#
#
tip(65)
block(67)
tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 1)
tx3 = create_and_sign_tx(tx1, 0, 2)
update_block(67, [tx1, tx2, tx3])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# More tests of block subsidy
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b68 (20)
#
# b68 - coinbase with an extra 10 satoshis,
# creates a tx that has 9 satoshis from out[20] go to fees
# this fails because the coinbase is trying to claim 1 satoshi too much in fees
#
# b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
# this succeeds
#
tip(65)
block(68, additional_coinbase_value=10)
tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9)
update_block(68, [tx])
yield rejected(RejectResult(16, b'bad-cb-amount'))
tip(65)
b69 = block(69, additional_coinbase_value=10)
tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10)
update_block(69, [tx])
yield accepted()
save_spendable_output()
# Test spending the outpoint of a non-existent transaction
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b70 (21)
#
tip(69)
block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
tx.vout.append(CTxOut(1, b""))
update_block(70, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b71 (21)
#
# b72 is a good block.
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71.
#
tip(69)
b72 = block(72)
tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2)
tx2 = create_and_sign_tx(tx1, 0, 1)
b72 = update_block(72, [tx1, tx2]) # now tip is 72
b71 = copy.deepcopy(b72)
b71.vtx.append(tx2) # add duplicate tx2
self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
self.blocks[71] = b71
assert_equal(len(b71.vtx), 4)
assert_equal(len(b72.vtx), 3)
assert_equal(b72.sha256, b71.sha256)
tip(71)
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
tip(72)
yield accepted()
save_spendable_output()
# Test some invalid scripts and MAX_BLOCK_SIGOPS
#
# -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b** (22)
#
# b73 - tx with excessive sigops that are placed after an excessively large script element.
# The purpose of the test is to make sure those sigops are counted.
#
# script is a bytearray of size 20,526
#
# bytearray[0-19,998] : OP_CHECKSIG
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
#
tip(72)
b73 = block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4
element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
a[MAX_BLOCK_SIGOPS] = element_size % 256
a[MAX_BLOCK_SIGOPS+1] = element_size // 256
a[MAX_BLOCK_SIGOPS+2] = 0
a[MAX_BLOCK_SIGOPS+3] = 0
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b73 = update_block(73, [tx])
assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# b74/75 - if we push an invalid script element, all prevous sigops are counted,
# but sigops after the element are not counted.
#
# The invalid script element is that the push_data indicates that
# there will be a large amount of data (0xffffff bytes), but we only
# provide a much smaller number. These bytes are CHECKSIGS so they would
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
#
#
tip(72)
b74 = block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
a[MAX_BLOCK_SIGOPS+1] = 0xfe
a[MAX_BLOCK_SIGOPS+2] = 0xff
a[MAX_BLOCK_SIGOPS+3] = 0xff
a[MAX_BLOCK_SIGOPS+4] = 0xff
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b74 = update_block(74, [tx])
yield rejected(RejectResult(16, b'bad-blk-sigops'))
tip(72)
b75 = block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS-1] = 0x4e
a[MAX_BLOCK_SIGOPS] = 0xff
a[MAX_BLOCK_SIGOPS+1] = 0xff
a[MAX_BLOCK_SIGOPS+2] = 0xff
a[MAX_BLOCK_SIGOPS+3] = 0xff
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b75 = update_block(75, [tx])
yield accepted()
save_spendable_output()
# Check that if we push an element filled with CHECKSIGs, they are not counted
tip(75)
b76 = block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a))
b76 = update_block(76, [tx])
yield accepted()
save_spendable_output()
# Test transaction resurrection
#
# -> b77 (24) -> b78 (25) -> b79 (26)
# \-> b80 (25) -> b81 (26) -> b82 (27)
#
# b78 creates a tx, which is spent in b79. After b82, both should be in mempool
#
# The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
# rather obscure reason that the Python signature code does not distinguish between
# Low-S and High-S values (whereas the bitcoin code has custom code which does so);
# as a result of which, the odds are 50% that the python code will use the right
# value and the transaction will be accepted into the mempool. Until we modify the
# test framework to support low-S signing, we are out of luck.
#
# To get around this issue, we construct transactions which are not signed and which
# spend to OP_TRUE. If the standard-ness rules change, this test would need to be
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
#
tip(76)
block(77)
tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN)
update_block(77, [tx77])
yield accepted()
save_spendable_output()
block(78)
tx78 = create_tx(tx77, 0, 9*COIN)
update_block(78, [tx78])
yield accepted()
block(79)
tx79 = create_tx(tx78, 0, 8*COIN)
update_block(79, [tx79])
yield accepted()
# mempool should be empty
assert_equal(len(self.nodes[0].getrawmempool()), 0)
tip(77)
block(80, spend=out[25])
yield rejected()
save_spendable_output()
block(81, spend=out[26])
yield rejected() # other chain is same length
save_spendable_output()
block(82, spend=out[27])
yield accepted() # now this chain is longer, triggers re-org
save_spendable_output()
# now check that tx78 and tx79 have been put back into the peer's mempool
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 2)
assert(tx78.hash in mempool)
assert(tx79.hash in mempool)
# Test invalid opcodes in dead execution paths.
#
# -> b81 (26) -> b82 (27) -> b83 (28)
#
block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script)
tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE]))
tx2.vin[0].scriptSig = CScript([OP_FALSE])
tx2.rehash()
update_block(83, [tx1, tx2])
yield accepted()
save_spendable_output()
# Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
#
# -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
#
block(84)
tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.calc_sha256()
self.sign_tx(tx1, out[29].tx, out[29].n)
tx1.rehash()
tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN]))
tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN]))
tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE]))
tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN]))
update_block(84, [tx1,tx2,tx3,tx4,tx5])
yield accepted()
save_spendable_output()
tip(83)
block(85, spend=out[29])
yield rejected()
block(86, spend=out[30])
yield accepted()
tip(84)
block(87, spend=out[30])
yield rejected()
save_spendable_output()
block(88, spend=out[31])
yield accepted()
save_spendable_output()
# trying to spend the OP_RETURN output is rejected
block("89a", spend=out[32])
tx = create_tx(tx1, 0, 0, CScript([OP_TRUE]))
update_block("89a", [tx])
yield rejected()
# Test re-org of a week's worth of blocks (1088 blocks)
# This test takes a minute or two and can be accomplished in memory
#
if self.options.runbarelyexpensive:
tip(88)
LARGE_REORG_SIZE = 1088
test1 = TestInstance(sync_every_block=False)
spend=out[32]
for i in range(89, LARGE_REORG_SIZE + 89):
b = block(i, spend)
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
b = update_block(i, [tx])
assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
test1.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
spend = get_spendable_output()
yield test1
chain1_tip = i
# now create alt chain of same length
tip(88)
test2 = TestInstance(sync_every_block=False)
for i in range(89, LARGE_REORG_SIZE + 89):
block("alt"+str(i))
test2.blocks_and_transactions.append([self.tip, False])
yield test2
# extend alt chain to trigger re-org
block("alt" + str(chain1_tip + 1))
yield accepted()
# ... and re-org back to the first chain
tip(chain1_tip)
block(chain1_tip + 1)
yield rejected()
block(chain1_tip + 2)
yield accepted()
chain1_tip += 2
if __name__ == '__main__':
FullBlockTest().main()
|
py | b405db692d1aa80f8dbe9b9ce10363a868d4523f | import time
import click
import cv2
from PIL import Image
from .processors import PreviewNativeProcessor, PreviewOpenCVProcessor
@click.command()
@click.argument('file_path', type=click.Path(exists=True))
@click.option('--output', type=click.Path(), default='output.jpg')
@click.option('--preview-height', type=int, default=750)
@click.option('--preview-width', type=int, default=1000)
@click.option('--fov', type=float, default=55.0)
@click.option('--latitude', type=float, default=0.0)
@click.option('--longitude', type=float, default=0.0)
@click.option('--no-opencv', type=bool, default=False)
def main(file_path, output, preview_height, preview_width, fov, latitude, longitude, no_opencv):
click.echo(click.style("Image will be loaded from path '{}'".format(file_path), fg='blue'))
preview_size = (preview_width, preview_height)
click.echo(click.style("Preview will be generated at size '{}x{}'".format(preview_size[0], preview_size[1]), fg='blue'))
click.echo(click.style(" with fov = {} degrees".format(fov), fg='blue'))
click.echo(click.style(" at (latitude, longitude) = ({}, {})".format(latitude, longitude), fg='blue'))
click.echo("Loading image...", nl=False)
t0 = time.time()
if not no_opencv:
p = PreviewOpenCVProcessor(file_path, preview_size=preview_size, fov=fov, latitude=latitude, longitude=longitude)
else:
click.echo(click.style("Warning: very unefficient implementation, it might take more than a minute", fg='orange'))
p = PreviewNativeProcessor(file_path, preview_size=preview_size, fov=fov, latitude=latitude, longitude=longitude)
click.echo(" executed in {:.3f}s".format(time.time() - t0))
click.echo("Generating preview...", nl=False)
t0 = time.time()
out = p.generate()
click.echo(" executed in {:.3f}s".format(time.time() - t0))
click.echo("Saving the preview...", nl=False)
t0 = time.time()
cv2.imwrite(output, out)
click.echo(" executed in {:.3f}s".format(time.time() - t0))
click.echo(click.style("Done!", fg='green'))
if __name__ == "__main__":
main()
|
py | b405db85a7c24621a71e2fd93844ac6c99e5a691 | from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from .fields import TestField
class TestM2M(models.Model):
slug = models.SlugField()
class Meta:
app_label = "generic_plus"
class TestRelated(models.Model):
slug = models.SlugField()
class Meta:
app_label = "generic_plus"
class TestFileModel(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = GenericForeignKey('content_type', 'object_id')
field_identifier = models.SlugField(null=False, blank=True, default="")
file = models.FileField(upload_to="test")
description = models.TextField(blank=True)
related = models.ForeignKey(TestRelated, null=True, blank=True,
on_delete=models.CASCADE)
m2m = models.ManyToManyField(TestM2M, blank=True)
class Meta:
app_label = "generic_plus"
def save(self, **kwargs):
super(TestFileModel, self).save(**kwargs)
model_class = self.content_type.model_class()
fields_with_models = [
(f, f.model if f.model != model_class else None)
for f in model_class._meta.get_fields()
if not f.is_relation
or f.one_to_one
or (f.many_to_one and f.related_model)]
for field, field_model_class in fields_with_models:
field_model_class = field_model_class or model_class
if (isinstance(field, TestField) and field.field_identifier == self.field_identifier):
field_model_class.objects.filter(pk=self.object_id).update(**{
field.attname: self.file.name or '',
})
class TestGenericPlusModel(models.Model):
slug = models.SlugField()
test_file = TestField(upload_to="test")
class Meta:
app_label = "generic_plus"
class SecondTestGenericPlusModel(models.Model):
slug = models.SlugField()
test_file = TestField(upload_to="test")
class Meta:
app_label = "generic_plus"
class OtherGenericRelatedModel(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = GenericForeignKey('content_type', 'object_id')
slug = models.SlugField()
class Meta:
app_label = "generic_plus"
|
py | b405dbbce808aa7b1e6cc0d0893cbdb8746adeea | #!/usr/bin/env python
#Author: Duy Tin Truong ([email protected])
# at CIBIO, University of Trento, Italy
__author__ = 'Duy Tin Truong ([email protected])'
__version__ = '0.1'
__date__ = '4 May 2015'
# import sys
# import os
import argparse as ap
import dendropy
from StringIO import StringIO
import re
from collections import defaultdict
# import ConfigParser
import matplotlib.colors as colors
import subprocess
def read_params():
p = ap.ArgumentParser()
p.add_argument('--ifn_tree',
required=True,
default=None,
type=str,
help='The input tree in newick format.')
p.add_argument('--colorized_metadata',
required=False,
default='unset',
type=str,
help='The metadata field to colorize. Default "unset".')
p.add_argument('--fig_size',
required=False,
default=8,
type=float,
help='The figure size. Default "8".')
p.add_argument('--legend_marker_size',
required=False,
default=20,
type=int,
help='The legend marker size. Default "20".'
)
p.add_argument('--legend_font_size',
required=False,
default=10,
type=int,
help='The legend font size. Default "10".'
)
p.add_argument('--legend_marker_edge_width',
required=False,
default=0.2,
type=float,
help='The legend marker edge width. Default "0.2".'
)
p.add_argument('--leaf_marker_size',
required=False,
default=20,
type=int,
help='The legend marker size. Default "20".'
)
p.add_argument('--leaf_marker_edge_width',
required=False,
default=0.2,
type=float,
help='The legend marker edge width. Default "0.2".'
)
p.add_argument('--dpi',
required=False,
default=300,
type=int,
help='The figure dpi.')
p.add_argument('--figure_extension',
required=False,
default='.png',
type=str,
help='The figure extension. Default ".png".')
p.add_argument('--ofn_prefix',
required=False,
default=None,
type=str,
help='The prefix of output files.')
return p.parse_args()
def run(cmd):
print cmd
subprocess.call(cmd.split())
def main(args):
tree = dendropy.Tree.get_from_path(args.ifn_tree, schema='newick',
preserve_underscores=True)
tree.reroot_at_midpoint()
count = 0
metadatas = set([])
node2metadata = {}
for node in tree.preorder_node_iter():
nodestr = node.get_node_str().strip("'")
if node.is_leaf():
if '.' in nodestr:
nodestr = nodestr.replace('.',',')
node.taxon = dendropy.Taxon(label=nodestr)
substrs = re.findall(
'%s-[a-zA-Z0-9.]*'%args.colorized_metadata,
nodestr)
if substrs:
md = substrs[0].replace(args.colorized_metadata + '-', '')
metadatas.add(md)
node2metadata[nodestr] = md
else:
count += 1
node.taxon = dendropy.Taxon(label='node_%d'%count)
metadatas = sorted(list(metadatas))
color_names = colors.cnames.keys()
metadata2color = {}
for i, md in enumerate(metadatas):
metadata2color[md] = color_names[i % len(color_names)]
if not args.ofn_prefix:
args.ofn_prefix = args.ifn_tree
ofn_tree = args.ofn_prefix + '.graphlantree'
tree.write_to_path(ofn_tree, 'newick')
ofn_annot = args.ofn_prefix + '.annot'
with open(ofn_annot, 'w') as ofile:
#ofile.write('clade_separation\t0\n')
ofile.write('branch_bracket_width\t0\n')
#ofile.write('clade_separation\t0.15\n')
ofile.write('branch_bracket_depth\t0\n')
#ofile.write('branch_thickness\t1.25\n')
ofile.write('annotation_background_width\t0\n')
# legend
ofile.write('#legends\n')
ofile.write('class_legend_font_size\t%d\n'%args.legend_font_size)
for md in metadata2color:
ofile.write('%s\tclade_marker_size\t%d\n'%(md, args.legend_marker_size))
ofile.write('%s\tclade_marker_color\t%s\n'%(md, metadata2color[md]))
ofile.write('%s\tclade_marker_edge_width\t%f\n'%(md, args.legend_marker_edge_width))
# remove intermedate nodes
for node in tree.preorder_node_iter():
if not node.is_leaf():
nodestr = node.get_node_str().strip("'")
ofile.write('%s\tclade_marker_size\t0\n'%(nodestr))
# colorize leaf nodes
for node in tree.seed_node.leaf_nodes():
nodestr = node.get_node_str().strip("'")
if nodestr in node2metadata:
leaf_color = metadata2color[node2metadata[nodestr]]
ofile.write('%s\tclade_marker_size\t%d\n'%(nodestr, args.leaf_marker_size))
ofile.write('%s\tclade_marker_color\t%s\n'%(nodestr, leaf_color))
ofile.write('%s\tclade_marker_edge_width\t%f\n'%(nodestr, args.leaf_marker_edge_width))
ofn_xml = args.ofn_prefix + '.xml'
cmd = 'graphlan_annotate.py --annot %s %s %s'%(ofn_annot, ofn_tree, ofn_xml)
run(cmd)
ofn_fig = args.ofn_prefix + args.figure_extension
cmd = 'graphlan.py %s %s --dpi %d --size %f'%(ofn_xml, ofn_fig, args.dpi, args.fig_size)
run(cmd)
print 'Output file: %s'%ofn_fig
if __name__ == "__main__":
args = read_params()
main(args)
#test()
|
py | b405dc5b973afc9f6b4c8f5faa833712863b4d17 | # -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import forms
from CadVlan.messages import error_messages
class UserGroupForm(forms.Form):
def __init__(self, user_list, *args, **kwargs):
super(UserGroupForm, self).__init__(*args, **kwargs)
if user_list is not None:
self.fields['users'].choices = [
(st['id'], st['nome']) for st in user_list["users"]]
users = forms.MultipleChoiceField(
label=u'Usuários', required=True, error_messages=error_messages, widget=forms.SelectMultiple(attrs={'style': "width: 310px"}))
class PermissionGroupForm(forms.Form):
def __init__(self, function_list, *args, **kwargs):
super(PermissionGroupForm, self).__init__(*args, **kwargs)
self.fields['function'].choices = [
(st['id'], st['function']) for st in function_list]
id_group_perms = forms.IntegerField(
label="", required=False, widget=forms.HiddenInput(), error_messages=error_messages)
function = forms.ChoiceField(
label=u'Função', required=True, error_messages=error_messages)
read = forms.BooleanField(
label=u'Leitura', required=False, error_messages=error_messages)
write = forms.BooleanField(
label=u'Escrita', required=False, error_messages=error_messages)
class IndividualPermsGroupUserEditForm(forms.Form):
id = forms.IntegerField(label='', widget=forms.HiddenInput(), required=False)
id_obj = forms.IntegerField(label="", widget=forms.HiddenInput(), required=False)
id_type_obj = forms.IntegerField(label="", widget=forms.HiddenInput(), required=False)
id_group = forms.IntegerField(label="", widget=forms.HiddenInput(), required=False)
read = forms.BooleanField(widget=forms.CheckboxInput(), required=False)
write = forms.BooleanField(widget=forms.CheckboxInput(), required=False)
change_config = forms.BooleanField(widget=forms.CheckboxInput(), required=False)
delete = forms.BooleanField(widget=forms.CheckboxInput(), required=False)
class IndividualPermsGroupUserCreateForm(forms.Form):
def __init__(self, fields, *args, **kwargs):
super(IndividualPermsGroupUserCreateForm, self).__init__(*args, **kwargs)
if fields is not None:
self.fields['user_group'].choices = [
(ch['id'], ch['nome']) for ch in fields['user_group']]
self.fields['object_type'].choices = [
(ch['id'], ch['name']) for ch in fields['object_type']]
id = forms.IntegerField(label='', widget=forms.HiddenInput(), required=False)
id_obj = forms.IntegerField(label="", widget=forms.HiddenInput(), required=False)
user_group = forms.ChoiceField(
label=u'Grupo do Usuário',
choices=[],
required=True,
error_messages=error_messages,
widget=forms.Select(attrs={
'style': 'width: 310px',
'class': 'select2'}
)
)
object_type = forms.ChoiceField(
label=u'Tipo do Objeto',
choices=[],
required=True,
error_messages=error_messages,
widget=forms.Select(attrs={
'style': 'width: 310px',
'class': 'select2'}
)
)
read = forms.BooleanField(widget=forms.CheckboxInput(), required=False)
write = forms.BooleanField(widget=forms.CheckboxInput(), required=False)
change_config = forms.BooleanField(widget=forms.CheckboxInput(), required=False)
delete = forms.BooleanField(widget=forms.CheckboxInput(), required=False)
class GeneralPermsGroupUserEditForm(forms.Form):
id = forms.IntegerField(label='', widget=forms.HiddenInput(), required=False)
id_type_obj = forms.IntegerField(label="", widget=forms.HiddenInput(), required=False)
id_group = forms.IntegerField(label="", widget=forms.HiddenInput(), required=False)
read = forms.BooleanField(widget=forms.CheckboxInput(), required=False)
write = forms.BooleanField(widget=forms.CheckboxInput(), required=False)
change_config = forms.BooleanField(widget=forms.CheckboxInput(), required=False)
delete = forms.BooleanField(widget=forms.CheckboxInput(), required=False)
class DeletePermsForm(forms.Form):
ids_perms = forms.CharField(widget=forms.HiddenInput(), label='', required=False)
id_ugroup = forms.IntegerField(widget=forms.HiddenInput, label='', required=False)
id_type_obj = forms.IntegerField(widget=forms.HiddenInput, label='', required=False)
id_obj = forms.IntegerField(widget=forms.HiddenInput, label='', required=False)
class HiddenIdsPermsForm(forms.Form):
id_ugroup = forms.IntegerField(widget=forms.HiddenInput, label='', required=False)
id_type_obj = forms.IntegerField(widget=forms.HiddenInput, label='', required=False)
|
py | b405dce7228e75f01e326c818eebd06964be5349 | import fnmatch
import os
import shutil
import subprocess
from settings import model_dir, path_to_pipeline_config, \
path_to_od_lib, path_to_od_dir
from object_detector.util import update_finetune_checkpoint
def train():
"""
Retrain the current version of the object detection model.
Normally, this function is only called by train_models() in flask_label/api.py.
It is assumed that a train record called 'train.record' exists in the metadata directory and
that the pipeline.config in the metadata directory is updated accordingly.
These assumptions are fulfilled when api.py/train_models is used.
"""
new_ckpt = ''
od_model_dir = os.path.join(model_dir, 'ssd_mobilenet_v2_coco_2018_03_29')
existent_checkpoints = [name for name in os.listdir(od_model_dir) if
os.path.isdir(os.path.join(od_model_dir, name))]
existent_checkpoints.sort(key=int)
# remove incomplete checkpoints
for ckpt in existent_checkpoints:
is_legit = False
path_to_ckpt = os.path.join(od_model_dir, ckpt)
chkpt_files = os.listdir(path_to_ckpt)
for f in chkpt_files:
if fnmatch.fnmatch(f, 'saved_model.pb'):
is_legit = True
break
if not is_legit:
shutil.rmtree(path_to_ckpt)
existent_checkpoints.remove(ckpt)
# only keep last 10 checkpoints
if len(existent_checkpoints) > 10:
path_to_ckpt = os.path.join(od_model_dir, existent_checkpoints[0])
shutil.rmtree(path_to_ckpt)
actual_checkpoint = existent_checkpoints[-1]
actual_checkpoint_dir = os.path.join(od_model_dir, actual_checkpoint)
new_checkpoint_dir = os.path.join(od_model_dir, str(int(actual_checkpoint) + 1))
files_in_actual_ckpt = os.listdir(actual_checkpoint_dir)
for f in files_in_actual_ckpt:
if fnmatch.fnmatch(f, 'model.ckpt*'):
update_finetune_checkpoint(os.path.join(actual_checkpoint_dir, 'model.ckpt'))
break
path_to_train_script = os.path.join(path_to_od_dir, 'model_main.py')
path_to_export_script = os.path.join(path_to_od_lib, 'export_inference_graph.py')
train_command = ['python', path_to_train_script, '--pipeline_config_path',
path_to_pipeline_config, '--model_dir', new_checkpoint_dir]
p = subprocess.Popen(train_command, shell=False, stdout=subprocess.PIPE)
p.communicate()
files_in_new_ckpt = os.listdir(new_checkpoint_dir)
for f in files_in_new_ckpt:
if fnmatch.fnmatch(f, 'model.ckpt*'):
new_ckpt = os.path.splitext(f)[0]
break
path_to_model_ckpt = os.path.join(new_checkpoint_dir, new_ckpt)
export_command = ['python', path_to_export_script, '--input_type', 'image_tensor',
'--pipeline_config_path', path_to_pipeline_config,
'--trained_checkpoint_prefix', path_to_model_ckpt,
'--output_directory', new_checkpoint_dir]
p = subprocess.Popen(export_command, shell=False, stdout=subprocess.PIPE)
p.communicate()
shutil.move(os.path.join(new_checkpoint_dir, 'saved_model', 'saved_model.pb'),
os.path.join(new_checkpoint_dir, 'saved_model.pb'))
if __name__ == '__main__':
train()
|
py | b405dd389e167f2294e6b19ea1e3431cf9578995 | import gym
import pybullet_envs
import numpy
import time
import sys
sys.path.insert(0, '../..')
import libs_agents
from libs_common.Training import *
import models.ddpg_baseline.model.src.model_critic as ModelCritic
import models.ddpg_baseline.model.src.model_actor as ModelActor
import models.ddpg_baseline.model.src.config as Config
path = "models/ddpg_baseline/model/"
env = pybullet_envs.make("AntBulletEnv-v0")
env.render()
agent = libs_agents.AgentDDPG(env, ModelCritic, ModelActor, Config)
max_iterations = 4*(10**6)
#trainig = TrainingIterations(env, agent, max_iterations, path, 10000)
#trainig.run()
agent.load(path)
agent.disable_training()
while True:
reward, done = agent.main()
env.render()
time.sleep(0.01)
|
py | b405dd8f9065a43c189ce77ddf163cf5a6bfc417 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from core import models
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(
None, {'fields': ('email', 'password')}
),
(
_('Personal Info'), {'fields': ('name',)}
),
(
_('Permissions'),
{
'fields': (
'is_active',
'is_staff',
'is_superuser',
)
}
),
(
_('Important dates'), {'fields': ('last_login',)}
),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(models.User, UserAdmin)
|
py | b405de29ac7f656d4faee33a57619935219d0dee | """
Django settings for photoalbum project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
import cloudinary
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l4l36m*3wx+r4ctvo8i_@mqhj^v)1uj8m%(+7w5zson1-!z)^7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gallery',
'bootstrap3',
'cloudinary',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'photoalbum.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'photoalbum.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASSWORD'),
'HOST':os.environ.get('DB_HOST', '127.0.0.1'),
'PORT':os.environ.get('DB_PORT', 5432),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# cloudinary.config(
# cloud_name = os.environ.get('CLOUDINARY_CLOUD_NAME'),
# api_key = os.environ.get('CLOUDINARY_API_KEY'),
# api_secret = os.environ.get('CLOUDINARY_API_SECRET')
# )
cloudinary.config(
cloud_name = 'siderra',
api_key = '575213331593564',
api_secret = 'n0Xt_UrUvM6EmbmzRM04PpCR5Rs',
) |
py | b405dfd2cf072158fecab9214943050c822664d2 | #!/usr/bin/env python3
import subprocess
import argparse
import gzip
import sys
import os
import re
"""
GTF example rows:
StringTie:
chr1 StringTie transcript 11869 14409 . + . transcript_id "ENST00000456328.2"; gene_id "MSTRG.47"; gene_name "DDX11L1"; xloc "XLOC_000047"; ref_gene_id "ENSG00000223972.5"; cmp_ref "ENST00000456328.2"; class_code "="; tss_id "TSS47";
Ensembl GRCh38.103:
1 havana transcript 11869 14409 . + . gene_id "ENSG00000223972"; gene_version "5"; transcript_id "ENST00000456328"; transcript_version "2"; gene_name "DDX11L1"; gene_source "havana"; gene_biotype "transcribed_unprocessed_pseudogene"; transcript_name "DDX11L1-202"; transcript_source "havana"; transcript_biotype "processed_transcript"; tag "basic"; transcript_support_level "1";
gencode.v37:
chr1 HAVANA transcript 11869 14409 . + . gene_id "ENSG00000223972.5"; transcript_id "ENST00000456328.2"; gene_type "transcribed_unprocessed_pseudogene"; gene_name "DDX11L1"; transcript_type "processed_transcript"; transcript_name "DDX11L1-202"; level 2; transcript_support_level "1"; hgnc_id "HGNC:37102"; tag "basic"; havana_gene "OTTHUMG00000000961.2"; havana_transcript "OTTHUMT00000362751.1";
"""
################################################################################
def setup_argument_parser():
"""Setup argparse parser."""
help_description = """
Extract transcript biotype info from --in-gtf,
and add it to --add-gtf file. Write new GTF to --out-gtf.
"""
# Define argument parser.
p = argparse.ArgumentParser(add_help=False,
prog="gtf_add_transcript_biotype_info.py",
description=help_description,
formatter_class=argparse.MetavarTypeHelpFormatter)
# Argument groups.
p_man = p.add_argument_group("REQUIRED ARGUMENTS")
p_opt = p.add_argument_group("OPTIONAL ARGUMENTS")
# Arguments.
p_opt.add_argument("-h", "--help",
action="help",
help="Print help message")
p_man.add_argument("--in-gtf",
dest="in_gtf",
type=str,
required = True,
help = "GTF file to take gene and transcript infos from")
p_man.add_argument("--add-gtf",
dest="add_gtf",
type=str,
required = True,
help = "GTF file to add gene and transcript infos to")
p_man.add_argument("--out-gtf",
dest="out_gtf",
type=str,
required = True,
help = "Output GTF file to write -add-gtf with added infos to")
p_opt.add_argument("--only-std-chr",
dest="only_std_chr",
default = False,
action = "store_true",
help = "Output only standard chromosome entries (chr1, chr2 ... ) (default: False)")
return p
################################################################################
def gtf_get_transcript_biotypes(in_gtf):
"""
Get transcript biotype info for each transript ID.
Return transcript ID -> transcript biotype mapping.
"""
# Biotype to count dic.
trid2tbt_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
feature = cols[2]
infos = cols[8]
if feature != "transcript":
continue
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_id = m.group(1)
# Extract transcript biotype.
m = re.search('transcript_biotype "(.+?)"', infos)
if not m:
m = re.search('transcript_type "(.+?)"', infos)
assert m, "transcript_biotype or transcript_type entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
transcript_biotype = m.group(1)
trid2tbt_dic[transcript_id] = transcript_biotype
f.close()
# Check and return to shack.
assert trid2tbt_dic, "no transcript biotype information read in"
return trid2tbt_dic
################################################################################
def check_convert_chr_id(chr_id):
"""
Check and convert chromosome IDs to format:
chr1, chr2, chrX, ...
If chromosome IDs like 1,2,X, .. given, convert to chr1, chr2, chrX ..
Return False if given chr_id not standard and not convertable.
Filter out scaffold IDs like:
GL000009.2, KI270442.1, chr14_GL000009v2_random
chrUn_KI270442v1 ...
>>> chr_id = "chrX"
>>> check_convert_chr_id(chr_id)
'chrX'
>>> chr_id = "4"
>>> check_convert_chr_id(chr_id)
'chr4'
>>> chr_id = "MT"
>>> check_convert_chr_id(chr_id)
'chrM'
>>> chr_id = "GL000009.2"
>>> check_convert_chr_id(chr_id)
False
>>> chr_id = "chrUn_KI270442v1"
>>> check_convert_chr_id(chr_id)
False
"""
assert chr_id, "given chr_id empty"
if re.search("^chr", chr_id):
if not re.search("^chr[\dMXY]+$", chr_id):
chr_id = False
else:
# Convert to "chr" IDs.
if chr_id == "MT":
chr_id = "M"
if re.search("^[\dMXY]+$", chr_id):
chr_id = "chr" + chr_id
else:
chr_id = False
return chr_id
################################################################################
def gtf_add_infos(add_gtf, out_gtf, trid2tbt_dic,
only_std_chr=False):
"""
Add infos to add_gtf. Output to out_gtf.
"""
GTFOUT = open(out_gtf, "w")
c_added_info = 0
c_no_new_info = 0
if re.search(".+\.gz$", add_gtf):
f = gzip.open(add_gtf, 'rt')
else:
f = open(add_gtf, "r")
for line in f:
if re.search("^#", line):
GTFOUT.write(line)
continue
cols = line.strip().split("\t")
col1 = cols[0]
col2 = cols[1]
feature = cols[2]
col4 = cols[3]
col5 = cols[4]
col6 = cols[5]
col7 = cols[6]
col8 = cols[7]
infos = cols[8]
if only_std_chr:
new_chr_id = check_convert_chr_id(col1)
if not new_chr_id:
continue
else:
col1 = new_chr_id
if feature == "transcript":
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(add_gtf, line)
tr_id = m.group(1)
tr_biotype_str = 'transcript_type "-";'
if tr_id in trid2tbt_dic:
c_added_info += 1
tr_biotype = trid2tbt_dic[tr_id]
tr_biotype_str = 'transcript_type "' + tr_biotype + '";'
else:
c_no_new_info += 1
new_infos = infos + " " + tr_biotype_str
GTFOUT.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" %(col1, col2, feature, col4, col5, col6, col7, col8, new_infos))
else:
GTFOUT.write(line)
f.close()
GTFOUT.close()
print("# --add-gtf transcript IDs with added info from --in-gtf: %s" %(c_added_info))
print("# --add-gtf transcript IDs not appearing in --in-gtf: %s" %(c_no_new_info))
################################################################################
if __name__ == '__main__':
parser = setup_argument_parser()
args = parser.parse_args()
assert os.path.exists(args.in_gtf), "--in-gtf %s not found" %(args.in_gtf)
assert os.path.exists(args.add_gtf), "--add-gtf %s not found" %(args.add_gtf)
assert args.in_gtf != args.add_gtf, "--in-gtf == --add-gtf (!)"
print("Get transcript biotype info from --in-gtf ... ")
trid2tbt_dic = gtf_get_transcript_biotypes(args.in_gtf)
c_trids = len(trid2tbt_dic)
tbt2c_dic = {}
for tr_id in trid2tbt_dic:
tbt = trid2tbt_dic[tr_id]
if tbt not in tbt2c_dic:
tbt2c_dic[tbt] = 1
else:
tbt2c_dic[tbt] += 1
c_tbts = len(tbt2c_dic)
print("# of transcript IDs read in from --in-gtf: %i" %(c_trids))
print("# of associated transcript biotypes: %i" %(c_tbts))
print("")
if tbt2c_dic:
print("Encountered transcript biotypes and counts:")
for tbt, tbt_c in sorted(tbt2c_dic.items(), key=lambda item: item[1], reverse=True):
print("\"%s\" %i" %(tbt, tbt_c))
print("")
print("Add info to --add-gtf and store in new --out-gtf ...")
gtf_add_infos(args.add_gtf, args.out_gtf, trid2tbt_dic,
only_std_chr=args.only_std_chr)
print("")
|
py | b405dfe499f2cd599a6c84f4b877e2a3e46f9ba1 | class Solution:
def reformatDate(self, date: str) -> str:
month_idxes = {month : idx for idx, month in enumerate(["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"], 1)}
day, month, year = date.split()
d = int(day[:-2])
month = month_idxes[month]
year = int(year)
return f"{year}-{month:02d}-{d:02d}"
|
py | b405e154c9d246b39f2fb5eae553d715a1f7bcb0 |
import json
import re
import copy
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from skopt import BayesSearchCV
from skopt.space import Real, Integer
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
import pandas as pd
import numpy as np
def main():
df_train = pd.read_csv('../train_dataset.csv')
df_test = pd.read_csv('../test_dataset.csv')
X_train, y_train = df_train.iloc[:, 2:].values, df_train.iloc[:, 0].values
X_test, y_test = df_test.iloc[:, 2:].values, df_test.iloc[:, 0].values
# log-uniform: understand as search over p = exp(x) by varying x
opt = BayesSearchCV(
estimator=SVC(),
# ref: https://github.com/automl/auto-sklearn/blob/master/autosklearn/pipeline/components/classification/libsvm_svc.py
search_spaces={
'C': Real(1e-6, 1e+6, 'log-uniform'),
'gamma': Real(3.0517578125e-05, 8, 'log-uniform'),
'kernel': ['rbf', 'poly', 'sigmoid'], # categorical parameter
'decision_function_shape': ['ovo', 'ovr'],
'degree': Integer(2, 5),
'coef0': Real(-1, 1, 'uniform'),
'tol': Real(1e-5, 1e-1, 'log-uniform')
},
cv = StratifiedKFold(
n_splits=10,
shuffle=True
),
n_jobs=3,
n_iter=100,
verbose=0,
refit=True
)
def status_print(_):
"""Status callback durring bayesian hyperparameter search"""
# Get all the models tested so far in DataFrame format
all_models = pd.DataFrame(opt.cv_results_)
best_parap_copy = copy.deepcopy(opt.best_params_)
for k, v in opt.best_params_.items():
best_parap_copy[k] = v if isinstance(
v, str) or isinstance(v, float) else v.item()
param_list = []
for each in json.dumps(best_parap_copy)[1:-1].split(', '):
param_list.append('='.join(each[1:].split('": ')))
if hasattr(opt.estimator, 'verbose'):
param_list.append('verbose=True')
param = opt.estimator.__class__.__name__ + \
'(' + ', '.join(param_list) + ')'
# Get current parameters and the best parameters
print('Model #{}\nBest roc_auc: {}\nBest params: {}\n'.format(
len(all_models),
np.round(opt.best_score_, 4),
param
))
opt.fit(X_train, y_train, callback=status_print)
print("val. score: %s" % opt.best_score_)
print("test score: %s" % opt.score(X_test, y_test))
if __name__ == '__main__':
main()
|
py | b405e15a7b783c74bba05a9fc0756b4b96c998c5 | import logging
from airflow.utils.task_group import TaskGroup
from airflow.operators.python import PythonOperator
from debussy_framework.v2.operators.basic import StartOperator
from debussy_concert.core.motif.motif_base import MotifBase
class StartMotif(MotifBase):
def __init__(self, name=None) -> None:
super().__init__(name=name)
def build(self, dag, parent_task_group):
task_group = TaskGroup(group_id=self.name, parent_group=parent_task_group)
start_dag = StartOperator(phase="dag", dag=dag, task_group=task_group)
log_input = PythonOperator(
task_id='log_input',
python_callable=lambda **x: logging.info(x),
op_kwargs={'config': self.config},
dag=dag,
task_group=task_group
)
start_dag >> log_input
return task_group
|
py | b405e3dc0500a8bbb789712f5ec1ecfa6424019b | """pynetdicom configuration options"""
# Default (non-user) event logging
# * If 'none' then events will not be logged at all, however there will still
# be some logging (warnings, errors, etc)
# * If 'standard' then certain events will be logged (association
# negotiation, DIMSE messaging, etc)
# Usage:
# from pynetdicom import _config
# _config.LOG_HANDLER_LEVEL = ('none'|'standard')
LOG_HANDLER_LEVEL = 'standard'
# Enforce UID conformance
# * If True then UIDs will be checked to ensure they're conformant to the
# DICOM Standard and if not then an appropriate response sent.
# * If False then UIDs will only be checked to ensure they're no longer
# then 64 characters and if not then an appropriate response sent.
# Usage:
# from pynetdicom import _config
# _config.ENFORCE_UID_CONFORMANCE = (True|False)
ENFORCE_UID_CONFORMANCE = False
|
py | b405e3e1068e2dc4e11be2c2418d0cd9ebdab312 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from ironic_lib import metrics_utils
from oslo_log import log
from oslo_utils import timeutils
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
from ironic.api.controllers import base
from ironic.api.controllers import link
from ironic.api.controllers.v1 import collection
from ironic.api.controllers.v1 import types
from ironic.api.controllers.v1 import utils as api_utils
from ironic.api import expose
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import policy
import ironic.conf
from ironic import objects
CONF = ironic.conf.CONF
LOG = log.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
_DEFAULT_RETURN_FIELDS = ('hostname', 'conductor_group', 'alive')
class Conductor(base.APIBase):
"""API representation of a bare metal conductor."""
hostname = wsme.wsattr(wtypes.text)
"""The hostname for this conductor"""
conductor_group = wsme.wsattr(wtypes.text)
"""The conductor group this conductor belongs to"""
alive = types.boolean
"""Indicates whether this conductor is considered alive"""
drivers = wsme.wsattr([wtypes.text])
"""The drivers enabled on this conductor"""
links = wsme.wsattr([link.Link])
"""A list containing a self link and associated conductor links"""
def __init__(self, **kwargs):
self.fields = []
fields = list(objects.Conductor.fields)
# NOTE(kaifeng): alive is not part of objects.Conductor.fields
# because it's an API-only attribute.
fields.append('alive')
for field in fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
@staticmethod
def _convert_with_links(conductor, url, fields=None):
conductor.links = [link.Link.make_link('self', url, 'conductors',
conductor.hostname),
link.Link.make_link('bookmark', url, 'conductors',
conductor.hostname,
bookmark=True)]
return conductor
@classmethod
def convert_with_links(cls, rpc_conductor, fields=None):
conductor = Conductor(**rpc_conductor.as_dict())
conductor.alive = not timeutils.is_older_than(
conductor.updated_at, CONF.conductor.heartbeat_timeout)
if fields is not None:
api_utils.check_for_invalid_fields(fields, conductor.as_dict())
conductor = cls._convert_with_links(conductor,
pecan.request.public_url,
fields=fields)
conductor.sanitize(fields)
return conductor
def sanitize(self, fields):
"""Removes sensitive and unrequested data.
Will only keep the fields specified in the ``fields`` parameter.
:param fields:
list of fields to preserve, or ``None`` to preserve them all
:type fields: list of str
"""
if fields is not None:
self.unset_fields_except(fields)
@classmethod
def sample(cls, expand=True):
time = datetime.datetime(2000, 1, 1, 12, 0, 0)
sample = cls(hostname='computer01',
conductor_group='',
alive=True,
drivers=['ipmi'],
created_at=time,
updated_at=time)
fields = None if expand else _DEFAULT_RETURN_FIELDS
return cls._convert_with_links(sample, 'http://localhost:6385',
fields=fields)
class ConductorCollection(collection.Collection):
"""API representation of a collection of conductors."""
conductors = [Conductor]
"""A list containing conductor objects"""
def __init__(self, **kwargs):
self._type = 'conductors'
# NOTE(kaifeng) Override because conductors use hostname instead of uuid.
@classmethod
def get_key_field(cls):
return 'hostname'
@staticmethod
def convert_with_links(conductors, limit, url=None, fields=None, **kwargs):
collection = ConductorCollection()
collection.conductors = [Conductor.convert_with_links(c, fields=fields)
for c in conductors]
collection.next = collection.get_next(limit, url=url, **kwargs)
for conductor in collection.conductors:
conductor.sanitize(fields)
return collection
@classmethod
def sample(cls):
sample = cls()
conductor = Conductor.sample(expand=False)
sample.conductors = [conductor]
return sample
class ConductorsController(rest.RestController):
"""REST controller for conductors."""
invalid_sort_key_list = ['alive', 'drivers']
def _get_conductors_collection(self, marker, limit, sort_key, sort_dir,
resource_url=None, fields=None,
detail=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
if sort_key in self.invalid_sort_key_list:
raise exception.InvalidParameterValue(
_("The sort_key value %(key)s is an invalid field for "
"sorting") % {'key': sort_key})
marker_obj = None
if marker:
marker_obj = objects.Conductor.get_by_hostname(
pecan.request.context, marker, online=None)
conductors = objects.Conductor.list(pecan.request.context, limit=limit,
marker=marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
parameters = {'sort_key': sort_key, 'sort_dir': sort_dir}
if detail is not None:
parameters['detail'] = detail
return ConductorCollection.convert_with_links(conductors, limit,
url=resource_url,
fields=fields,
**parameters)
@METRICS.timer('ConductorsController.get_all')
@expose.expose(ConductorCollection, types.name, int, wtypes.text,
wtypes.text, types.listtype, types.boolean)
def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc',
fields=None, detail=None):
"""Retrieve a list of conductors.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
This value cannot be larger than the value of max_limit
in the [api] section of the ironic configuration, or only
max_limit resources will be returned.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
:param detail: Optional, boolean to indicate whether retrieve a list
of conductors with detail.
"""
cdict = pecan.request.context.to_policy_values()
policy.authorize('baremetal:conductor:get', cdict, cdict)
if not api_utils.allow_expose_conductors():
raise exception.NotFound()
api_utils.check_allow_specify_fields(fields)
api_utils.check_allowed_fields(fields)
api_utils.check_allowed_fields([sort_key])
fields = api_utils.get_request_return_fields(fields, detail,
_DEFAULT_RETURN_FIELDS)
return self._get_conductors_collection(marker, limit, sort_key,
sort_dir, fields=fields,
detail=detail)
@METRICS.timer('ConductorsController.get_one')
@expose.expose(Conductor, types.name, types.listtype)
def get_one(self, hostname, fields=None):
"""Retrieve information about the given conductor.
:param hostname: hostname of a conductor.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
"""
cdict = pecan.request.context.to_policy_values()
policy.authorize('baremetal:conductor:get', cdict, cdict)
if not api_utils.allow_expose_conductors():
raise exception.NotFound()
api_utils.check_allow_specify_fields(fields)
api_utils.check_allowed_fields(fields)
conductor = objects.Conductor.get_by_hostname(pecan.request.context,
hostname, online=None)
return Conductor.convert_with_links(conductor, fields=fields)
|
py | b405e3e75906cbd3ffde6d2da5214628029bf7b2 | import os
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from .misc import get_cifar_models
from collections import OrderedDict
__all__ = [
'load_optimizer',
'load_learning_rate_schedule',
'load_checkpoint',
# cifar
'load_transform',
'load_dataset',
'load_model',
# detection
'load_state_dict_path',
'load_checkpoint_path',
'load_ensemble_path',
]
def __process_state_dict(state_dict):
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] if k[:7] == "module." else k
new_state_dict[name] = v
return new_state_dict
# General loaders compatible with cifar and imagenet
def load_optimizer(args, model):
# Get optimiser name
opt_name = args.optim.lower()
# Print message to LOG
print("==> Creating '{}' optimiser".format(opt_name))
# Supports only SGD and RMSprop
if opt_name.startswith("sgd"):
optimizer = torch.optim.SGD(
model.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
nesterov = "nesterov" in opt_name,
)
elif opt_name.startswith("adam"):
optimizer = torch.optim.Adam(
model.parameters(),
lr = args.lr,
weight_decay = args.weight_decay,
)
elif opt_name == "rmsprop":
optimizer = torch.optim.RMSprop(
model.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
eps = 0.0316,
alpha = 0.9,
)
else:
msg = "Invalid optimizer {}. Only SGD and RMSprop are supported."
raise RuntimeError(msg.format(args.opt))
return optimizer
def load_learning_rate_schedule(args, optimizer):
args.lr_scheduler = args.lr_scheduler.lower()
# Print message to LOG
print("==> Creating '{}' learning rate scheduler".format(args.lr_scheduler))
# Supports only MultiStep and Step and Exponential schedules
if args.lr_scheduler == "multisteplr":
main_lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones = args.schedule, gamma = args.gamma)
elif args.lr_scheduler == "steplr":
main_lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=args.schedule_step, gamma = args.gamma)
elif args.lr_scheduler == "exponentiallr":
main_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer, gamma = args.gamma)
elif args.lr_scheduler == "cycliclr":
step_size_up = args.total_steps // 2
step_size_down = args.total_steps - step_size_up
main_lr_scheduler = torch.optim.lr_scheduler.CyclicLR(
optimizer, base_lr = args.base_lr, max_lr = args.max_lr,
step_size_up=step_size_up, step_size_down=step_size_down)
else:
raise RuntimeError(
"Invalid lr scheduler '{}'. Only MultiStepLR, StepLR and ExponentialLR "
"are supported.".format(args.lr_scheduler)
)
return main_lr_scheduler
# Use this when training models
def load_checkpoint(args, model, optimizer, reset = False):
# Defaults
best_acc = 0.0
start_epoch = 0
# Load checkpoint
# args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
# Extract information
if not reset:
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
# For dataparallell and loading issues
try: model.load_state_dict(__process_state_dict(checkpoint['state_dict']))
except RuntimeError: model.model.load_state_dict(__process_state_dict(checkpoint['state_dict']))
# optimizer.load_state_dict(checkpoint['optimizer'])
return model, optimizer, best_acc, start_epoch
# Loaders only compatible with cifar
def load_transform(args):
# Let the normalisation layer be different for daf
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
if args.arch.startswith('daf'): normalize = transforms.Normalize((0.50, 0.50, 0.50), (0.50, 0.50, 0.50))
# Default transformation
transform_train = transform_test = transforms.Compose([
transforms.ToTensor(),
normalize,
])
# And with data augmentation
if args.augment:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding = 4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
return transform_train, transform_test
def load_dataset(args, transform_train, transform_test, return_sets = False):
if args.dataset == 'cifar10':
dataloader = datasets.CIFAR10
num_classes = 10
else:
dataloader = datasets.CIFAR100
num_classes = 100
trainloader = None
if transform_train is not None:
trainset = dataloader(root='./data', train=True, download=True, transform=transform_train)
trainloader = data.DataLoader(trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)
testloader = None
if transform_test is not None:
testset = dataloader(root='./data', train=False, download=False, transform=transform_test)
testloader = data.DataLoader(testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
if return_sets: return trainloader, testloader, num_classes, (trainset, testset)
return trainloader, testloader, num_classes
def load_model(args, models, num_classes):
if 'densenet' in args.arch:
model = models.__dict__[args.arch](args = args)
elif 'daf' in args.arch:
model = models.__dict__[args.arch](args = args)
else: raise ValueError("==> Model architecture can not be loaded.")
return model
# These loaders are used for detection
def load_state_dict_path(path):
# Load checkpoint
assert os.path.isfile(path) or os.path.islink(path), 'Error: no checkpoint directory found!'
# Get checkpoint dict
checkpoint = torch.load(path)
# Get attributes
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
state_dict = checkpoint['state_dict']
return __process_state_dict(state_dict), {'best_acc': best_acc, 'start_epoch': start_epoch}
def load_checkpoint_path(args, num_classes, path, use_cuda):
# Get model directory
if 'cifar' in args.dataset:
models = get_cifar_models()
model = load_model(args, models, num_classes)
if use_cuda: model = model.cuda()
# Get state dict
state_dict, info = load_state_dict_path(path)
model.load_state_dict(state_dict)
return model
def load_ensemble_path(args, num_classes, path, use_cuda):
# Load every model in ensemble
ensemble = []
for file in os.listdir(path):
# Create full path to file
filepath = os.path.join(path, file)
print("Loading model from:", filepath)
ensemble.append(load_checkpoint_path(args, num_classes, filepath, use_cuda))
return ensemble
|
py | b405e4068039467dbbffe41ae09beb58b202d3aa | import sys
DOCKER_SOURCE_HEADER = """
################################### SOURCE #####################################
"""
DEPENDENCIES = """
RUN apt-get -o Acquire::ForceIPv4=true update \\
&& apt-get -o Acquire::ForceIPv4=true install -yq --no-install-recommends \\
%s && apt-get clean \\
&& rm -rf /var/lib/apt/lists/*
"""
CLONE = """
RUN git clone %s /%s \\
&& cd /%s \\"""
CHECKOUT = """
&& git checkout %s \\"""
UNZIP = """
RUN mkdir /%s && cd /%s \\
&& wget %s \\
&& unzip %s && rm %s \\
&& cd %s-%s \\"""
BUILD_CMAKE = """
&& mkdir build \\
&& cd build \\
&& cmake %s %s \\
&& %s \\"""
BUILD_CATKIN_MAKE = """
&& mkdir -p ${HOME}/catkin_ws/src \\
&& cp -R /%s ${HOME}/catkin_ws/src/. \\
&& cd ${HOME}/catkin_ws \\
&& apt-get -o Acquire::ForceIPv4=true update \\
&& /bin/bash -c "source /opt/ros/${ROS_DISTRO}/setup.bash && rosdep update && rosdep install --as-root apt:false --from-paths src --ignore-src -r -y" \\
&& apt-get clean \\
&& rm -rf /var/lib/apt/lists/* \\
&& /bin/bash -c "source /opt/ros/${ROS_DISTRO}/setup.bash && catkin_make" \\"""
BUILD_CATKIN_BUILD = """
&& mkdir -p ${HOME}/catkin_ws/src \\
&& cp -R /%s ${HOME}/catkin_ws/src/. \\
&& cd ${HOME}/catkin_ws \\
&& apt-get -o Acquire::ForceIPv4=true update \\
&& /bin/bash -c "source /opt/ros/${ROS_DISTRO}/setup.bash && rosdep update && rosdep install --as-root apt:false --from-paths src --ignore-src -r -y" \\
&& apt-get clean \\
&& rm -rf /var/lib/apt/lists/* \\
&& /bin/bash -c "source /opt/ros/${ROS_DISTRO}/setup.bash && catkin build" \\"""
KEEP_SOURCE = """
&& cp -R /%s ${HOME}/. \\"""
DELETE = """
&& rm -fr /%s
"""
def write(DOCKER_FILE, package_list):
s = DOCKER_SOURCE_HEADER
for p in package_list:
if 'depends' in p.keys() and p['depends']:
pstr = ''
for apt_pack in p['depends']:
pstr += ' ' + apt_pack + ' \\\n'
s += DEPENDENCIES % pstr
if 'repo' in p.keys():
s += CLONE % (p['repo'], p['name'], p['name'])
elif 'zip' in p.keys():
url = p['zip']
archive = url.split('/')[-1]
version = '.'.join(archive.split('.')[:-1])
s += UNZIP % (p['name'], p['name'], url, archive, archive, p['name'], version)
else:
print('source: cannot get source for package %s' % p['name'])
sys.exit(1)
if 'checkout' in p.keys():
s += CHECKOUT % p['checkout']
if p['build'] == 'cmake':
if 'cmake_options' in p.keys():
cmake_options = p['cmake_options']
else:
cmake_options = ''
if 'cmake_folder' in p.keys():
cmake_folder = p['cmake_folder']
else:
cmake_folder = '../'
if 'make_command' in p.keys():
make_command = p['make_command']
else:
make_command = 'make -j4 install'
s += BUILD_CMAKE % (cmake_options, cmake_folder, make_command)
if 'keep_source' in p.keys() and p['keep_source'] == True:
s += KEEP_SOURCE % p['name']
elif p['build'] == 'catkin_build':
s += BUILD_CATKIN_BUILD % p['name']
elif p['build'] == 'catkin_make':
s += BUILD_CATKIN_MAKE % p['name']
else:
print("Warning: build method '%s' not defined for package '%s'" % (p['build'], p['name']))
s += DELETE % p['name']
with open(DOCKER_FILE, "a") as dockerfile:
dockerfile.write(s)
return
|
py | b405e69f8ec132e9309fd63c6e0cccbea467c0e3 | import os
import os.path as osp
import argparse
import cv2
import numpy as np
from PIL import Image
from skimage.filters import gaussian
import torch
import torchvision.transforms as transforms
def convert_linear(img):
return img.convert('L')
def convert_canny(img, th1=100, th2=200):
# https://learnopencv.com/edge-detection-using-opencv/
img_blur = cv2.GaussianBlur(np.array(img.convert('L')), (3, 3), 0)
edges = cv2.Canny(img_blur, th1, th2)
inv = cv2.bitwise_not(edges)
return Image.fromarray(inv.astype('uint8'), 'L')
def convert_xdog(img, sigma=0.8, k=1.6, gamma=0.98, eps=-0.1, phi=200,
thresh=False):
'''
https://github.com/aaroswings/XDoG-Python/blob/main/XDoG.py
sigma=0.8, k=1.6, gamma=0.98, eps=-0.1, phi=200
https://github.com/heitorrapela/xdog/blob/master/main.py
sigma=0.5, k=1.6, gamma=1, eps=1, phi=1
these values do not work and lead to all black results (designed for uint8)
https://subscription.packtpub.com/book/data/9781789537147/1/ch01lvl1sec06/creating-pencil-sketches-from-images
sigma=0.5, k=200, gamma=1, eps=0.01, phi=10
these values do get edges but does not look like a sketch or manga
'''
img = np.array(img.convert('L'))
g_filtered_1 = gaussian(img, sigma)
g_filtered_2 = gaussian(img, sigma * k)
z = g_filtered_1 - gamma * g_filtered_2
z[z < eps] = 1.
mask = z >= eps
z[mask] = 1. + np.tanh(phi * z[mask])
if thresh:
mean = z.mean()
z[z < mean] = 0.
z[z >= mean] = 1.
z = cv2.normalize(src=z, dst=None, alpha=0, beta=255,
norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
return Image.fromarray(z.astype('uint8'), 'L')
def convert_xdog_serial(img, alg=0.5, k=4.5, gamma=19, eps=0.01, phi=10**9):
'''
https://github.com/SerialLain3170/Colorization/blob/c920440413429af588e0b6bd6799640d1feda68e/nohint_pix2pix/xdog.py
sigma_range=[0.3, 0.4, 0.5], k_sigma=4.5, p=19, eps=0.01, phi=10**9,
sigma_large = sigma * k_sigma
p is similar to gamma but also multiplies by first gaussian
'''
sigma = float(alg.split('_')[2])
img = np.array(img.convert('L'))
g_filtered_1 = gaussian(img, sigma)
g_filtered_2 = gaussian(img, sigma * k)
z = (1+gamma) * g_filtered_1 - gamma * g_filtered_2
si = np.multiply(img, z)
edges = np.zeros(si.shape)
si_bright = si >= eps
si_dark = si < eps
edges[si_bright] = 1.0
edges[si_dark] = 1.0 + np.tanh(phi * (si[si_dark] - eps))
edges = cv2.normalize(src=edges, dst=None, alpha=0, beta=255,
norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
return Image.fromarray(edges.astype('uint8'), 'L')
def convert_sketchkeras(img, model, thresh=0.1, image_size=512):
'''
https://github.com/lllyasviel/sketchKeras
https://github.com/higumax/sketchKeras-pytorch
'''
device = next(model.parameters()).device
width, height = img.size
tsfm_resize_og = transforms.Resize((height, width),
transforms.InterpolationMode.BICUBIC)
if width > height:
new_width, new_height = (image_size, int(image_size / width * height))
else:
new_width, new_height = (int(image_size / height * width), image_size)
img_array = np.array(img.convert('RGB'))
img_resized = cv2.resize(img_array, (new_width, new_height))
new_height, new_width, c = img_resized.shape
blurred = cv2.GaussianBlur(img_resized, (0, 0), 3)
highpass = img_resized.astype(int) - blurred.astype(int)
highpass = highpass.astype(float) / 128.0
highpass /= np.max(highpass)
ret = np.zeros((image_size, image_size, 3), dtype=float)
ret[0:new_height, 0:new_width, 0:c] = highpass
x = ret.reshape(1, *ret.shape).transpose(3, 0, 1, 2)
x = torch.tensor(x).float().to(device)
with torch.no_grad():
pred = model(x).squeeze()
pred = pred.cpu().detach().numpy()
pred = np.amax(pred, 0)
pred[pred < thresh] = 0
pred = 1 - pred
pred *= 255
pred = np.clip(pred, 0, 255).astype(np.uint8)
pred = pred[:new_height, :new_width]
pred = Image.fromarray(pred, 'L')
output_og_size = tsfm_resize_og(pred)
return output_og_size
def convert_aoda(img, model, image_size=512):
'''
Adversarial Open Domain Adaptation for Sketch-to-Photo Synthesis
https://github.com/Mukosame/Anime2Sketch
https://github.com/Mukosame/AODA
'''
device = next(model.parameters()).device
tsfm = transforms.Compose([
transforms.Resize((image_size, image_size),
transforms.InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
width, height = img.size
tsfm_resize_og = transforms.Resize((height, width),
transforms.InterpolationMode.BICUBIC)
img_tensor = tsfm(img.convert('RGB')).unsqueeze(0).to(device)
with torch.no_grad():
out = model(img_tensor)
# tensor to img
out = out.data[0].cpu().float().numpy()
out = np.tile(out, (3, 1, 1))
out = (np.transpose(out, (1, 2, 0)) + 1) / 2.0 * 255.0
out = Image.fromarray(out.astype(np.uint8), 'RGB')
out_og_size = tsfm_resize_og(out)
return out_og_size.convert('L')
def convert_pidinet(img, model, alg):
'''
Pixel Difference Convolutional Networks
https://github.com/zhuoinoulu/pidinet
https://arxiv.org/abs/2108.07009
'''
device = next(model.parameters()).device
filt = int(alg.split('_')[1])
tsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
img = tsfm(img.convert('RGB')).unsqueeze(0)
img = img.to(device)
with torch.no_grad():
results = model(img)
result = torch.squeeze(results[filt]).cpu().numpy()
result = cv2.normalize(src=result, dst=None, alpha=0, beta=255,
norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
inv = cv2.bitwise_not(result)
return Image.fromarray(inv.astype(np.uint8), 'L')
def convert_image(args, img, model=None):
if args.preprocess == 'linear':
img_new = convert_linear(img)
elif args.preprocess == 'canny':
img_new = convert_canny(img)
elif args.preprocess == 'xdog':
img_new = convert_xdog(img)
elif args.preprocess == 'xdog_th':
img_new = convert_xdog(img, thresh=True)
elif 'xdog_serial' in args.preprocess:
img_new = convert_xdog_serial(img, args.preprocess)
elif args.preprocess == 'sketchkeras':
img_new = convert_sketchkeras(img, model)
elif args.preprocess == 'aoda':
img_new = convert_aoda(img, model)
elif 'pidinet' in args.preprocess:
img_new = convert_pidinet(img, model, args.preprocess)
else:
raise NotImplementedError
return img_new
if __name__ == '__main__':
algos = ['linear', 'canny', 'xdog', 'xdog_th', 'xdog_serial_0.3',
'xdog_serial_0.4', 'xdog_serial_0.5', 'sketchkeras', 'aoda',
'pidinet_-1', 'pidinet_0', 'pidinet_1', 'pidinet_2', 'pidinet_3']
parser = argparse.ArgumentParser()
parser.add_argument('--test_path', type=str,
default=osp.join('samples', 'test.jpg'),
help='test img path')
parser.add_argument('--save', type=str, default='results',
help='save folder')
args = parser.parse_args()
os.makedirs(args.save, exist_ok=True)
img = Image.open(args.test_path)
fn = osp.splitext(osp.split(osp.normpath(args.test_path))[1])[0]
for alg in algos:
args.preprocess = alg
if alg == 'sketchkeras':
from build_sketchkeras import get_sketchkeras
model = get_sketchkeras()
img_new = convert_image(args, img, model)
img_new.save(osp.join(args.save, f'{fn}_{alg}.jpg'))
elif alg == 'aoda':
from build_aoda import get_aoda
model = get_aoda()
img_new = convert_image(args, img, model)
img_new.save(osp.join(args.save, f'{fn}_{alg}.jpg'))
elif 'pidinet' in alg:
from build_pidinet import get_pidinet
model = get_pidinet()
img_new = convert_image(args, img, model)
img_new.save(osp.join(args.save, f'{fn}_{alg}.jpg'))
else:
img_new = convert_image(args, img)
img_new.save(osp.join(args.save, f'{fn}_{alg}.jpg'))
|
py | b405e7132bb0df0543959eb9fa6b937a55c1a134 | from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import os
import sys
import golem
here = os.path.abspath(os.path.dirname(__file__))
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests']
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='golem-framework',
version=golem.__version__,
description='Test automation framework for functional tests using Selenium',
# long_description=long_description,
url='https://github.com/golemhq/golem',
author='Luciano Renzi',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
],
keywords='test automation framework selenium webdriver',
packages=find_packages(),
setup_requires=['setuptools-pep8'],
install_requires=['Flask>=0.12.2',
'Flask-login>=0.4.0',
'selenium>=3.6.0, <4.0.0a1',
'requests>=2.18.4',
'py-webdriver-manager'
],
tests_require=['pytest'],
entry_points={
'console_scripts': [
'golem-admin = golem.bin.golem_admin:main',
'golem = golem.bin.golem_init:main'
]
},
cmdclass={'test': PyTest},
include_package_data=True,
platforms='any',
test_suite='',
extras_require={
'testing': ['pytest'],
}
)
|
py | b405e73feaff2b8a5d9a2615f095efb37ab62271 | import logging
import time
from typing import Any, Dict, List, Optional, Set
from blspy import G1Element
from staicoin.consensus.cost_calculator import calculate_cost_of_program, NPCResult
from staicoin.full_node.bundle_tools import simple_solution_generator
from staicoin.full_node.mempool_check_conditions import get_name_puzzle_conditions
from staicoin.types.blockchain_format.coin import Coin
from staicoin.types.blockchain_format.program import Program, SerializedProgram
from staicoin.types.announcement import Announcement
from staicoin.types.blockchain_format.sized_bytes import bytes32
from staicoin.types.coin_spend import CoinSpend
from staicoin.types.generator_types import BlockGenerator
from staicoin.types.spend_bundle import SpendBundle
from staicoin.util.ints import uint8, uint32, uint64, uint128
from staicoin.util.hash import std_hash
from staicoin.wallet.derivation_record import DerivationRecord
from staicoin.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
DEFAULT_HIDDEN_PUZZLE_HASH,
calculate_synthetic_secret_key,
puzzle_for_pk,
solution_for_conditions,
)
from staicoin.wallet.puzzles.puzzle_utils import (
make_assert_coin_announcement,
make_assert_puzzle_announcement,
make_assert_my_coin_id_condition,
make_assert_absolute_seconds_exceeds_condition,
make_create_coin_announcement,
make_create_puzzle_announcement,
make_create_coin_condition,
make_reserve_fee_condition,
)
from staicoin.wallet.secret_key_store import SecretKeyStore
from staicoin.wallet.sign_coin_spends import sign_coin_spends
from staicoin.wallet.transaction_record import TransactionRecord
from staicoin.wallet.util.transaction_type import TransactionType
from staicoin.wallet.util.wallet_types import WalletType
from staicoin.wallet.wallet_coin_record import WalletCoinRecord
from staicoin.wallet.wallet_info import WalletInfo
class Wallet:
wallet_state_manager: Any
log: logging.Logger
wallet_id: uint32
secret_key_store: SecretKeyStore
cost_of_single_tx: Optional[int]
@staticmethod
async def create(
wallet_state_manager: Any,
info: WalletInfo,
name: str = None,
):
self = Wallet()
self.log = logging.getLogger(name if name else __name__)
self.wallet_state_manager = wallet_state_manager
self.wallet_id = info.id
self.secret_key_store = SecretKeyStore()
self.cost_of_single_tx = None
return self
async def get_max_send_amount(self, records=None):
spendable: List[WalletCoinRecord] = list(
await self.wallet_state_manager.get_spendable_coins_for_wallet(self.id(), records)
)
if len(spendable) == 0:
return 0
spendable.sort(reverse=True, key=lambda record: record.coin.amount)
if self.cost_of_single_tx is None:
coin = spendable[0].coin
tx = await self.generate_signed_transaction(
coin.amount, coin.puzzle_hash, coins={coin}, ignore_max_send_amount=True
)
program: BlockGenerator = simple_solution_generator(tx.spend_bundle)
# npc contains names of the coins removed, puzzle_hashes and their spend conditions
result: NPCResult = get_name_puzzle_conditions(
program,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=self.wallet_state_manager.constants.COST_PER_BYTE,
safe_mode=True,
)
cost_result: uint64 = calculate_cost_of_program(
program.program, result, self.wallet_state_manager.constants.COST_PER_BYTE
)
self.cost_of_single_tx = cost_result
self.log.info(f"Cost of a single tx for standard wallet: {self.cost_of_single_tx}")
max_cost = self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM / 5 # avoid full block TXs
current_cost = 0
total_amount = 0
total_coin_count = 0
for record in spendable:
current_cost += self.cost_of_single_tx
total_amount += record.coin.amount
total_coin_count += 1
if current_cost + self.cost_of_single_tx > max_cost:
break
return total_amount
@classmethod
def type(cls) -> uint8:
return uint8(WalletType.STANDARD_WALLET)
def id(self) -> uint32:
return self.wallet_id
async def get_confirmed_balance(self, unspent_records=None) -> uint128:
return await self.wallet_state_manager.get_confirmed_balance_for_wallet(self.id(), unspent_records)
async def get_unconfirmed_balance(self, unspent_records=None) -> uint128:
return await self.wallet_state_manager.get_unconfirmed_balance(self.id(), unspent_records)
async def get_spendable_balance(self, unspent_records=None) -> uint128:
spendable = await self.wallet_state_manager.get_confirmed_spendable_balance_for_wallet(
self.id(), unspent_records
)
return spendable
async def get_pending_change_balance(self) -> uint64:
unconfirmed_tx = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(self.id())
addition_amount = 0
for record in unconfirmed_tx:
if not record.is_in_mempool():
self.log.warning(f"Record: {record} not in mempool")
continue
our_spend = False
for coin in record.removals:
if await self.wallet_state_manager.does_coin_belong_to_wallet(coin, self.id()):
our_spend = True
break
if our_spend is not True:
continue
for coin in record.additions:
if await self.wallet_state_manager.does_coin_belong_to_wallet(coin, self.id()):
addition_amount += coin.amount
return uint64(addition_amount)
def puzzle_for_pk(self, pubkey: bytes) -> Program:
return puzzle_for_pk(pubkey)
async def hack_populate_secret_key_for_puzzle_hash(self, puzzle_hash: bytes32) -> G1Element:
maybe = await self.wallet_state_manager.get_keys(puzzle_hash)
if maybe is None:
error_msg = f"Wallet couldn't find keys for puzzle_hash {puzzle_hash}"
self.log.error(error_msg)
raise ValueError(error_msg)
# Get puzzle for pubkey
public_key, secret_key = maybe
# HACK
synthetic_secret_key = calculate_synthetic_secret_key(secret_key, DEFAULT_HIDDEN_PUZZLE_HASH)
self.secret_key_store.save_secret_key(synthetic_secret_key)
return public_key
async def hack_populate_secret_keys_for_coin_spends(self, coin_spends: List[CoinSpend]) -> None:
"""
This hack forces secret keys into the `_pk2sk` lookup. This should eventually be replaced
by a persistent DB table that can do this look-up directly.
"""
for coin_spend in coin_spends:
await self.hack_populate_secret_key_for_puzzle_hash(coin_spend.coin.puzzle_hash)
async def puzzle_for_puzzle_hash(self, puzzle_hash: bytes32) -> Program:
public_key = await self.hack_populate_secret_key_for_puzzle_hash(puzzle_hash)
return puzzle_for_pk(bytes(public_key))
async def get_new_puzzle(self) -> Program:
dr = await self.wallet_state_manager.get_unused_derivation_record(self.id())
return puzzle_for_pk(bytes(dr.pubkey))
async def get_puzzle_hash(self, new: bool) -> bytes32:
if new:
return await self.get_new_puzzlehash()
else:
record: Optional[
DerivationRecord
] = await self.wallet_state_manager.get_current_derivation_record_for_wallet(self.id())
if record is None:
return await self.get_new_puzzlehash()
return record.puzzle_hash
async def get_new_puzzlehash(self, in_transaction: bool = False) -> bytes32:
return (await self.wallet_state_manager.get_unused_derivation_record(self.id(), in_transaction)).puzzle_hash
def make_solution(
self,
primaries: Optional[List[Dict[str, Any]]] = None,
min_time=0,
me=None,
coin_announcements: Optional[Set[bytes32]] = None,
coin_announcements_to_assert: Optional[Set[bytes32]] = None,
puzzle_announcements: Optional[Set[bytes32]] = None,
puzzle_announcements_to_assert: Optional[Set[bytes32]] = None,
fee=0,
) -> Program:
assert fee >= 0
condition_list = []
if primaries:
for primary in primaries:
condition_list.append(make_create_coin_condition(primary["puzzlehash"], primary["amount"]))
if min_time > 0:
condition_list.append(make_assert_absolute_seconds_exceeds_condition(min_time))
if me:
condition_list.append(make_assert_my_coin_id_condition(me["id"]))
if fee:
condition_list.append(make_reserve_fee_condition(fee))
if coin_announcements:
for announcement in coin_announcements:
condition_list.append(make_create_coin_announcement(announcement))
if coin_announcements_to_assert:
for announcement_hash in coin_announcements_to_assert:
condition_list.append(make_assert_coin_announcement(announcement_hash))
if puzzle_announcements:
for announcement in puzzle_announcements:
condition_list.append(make_create_puzzle_announcement(announcement))
if puzzle_announcements_to_assert:
for announcement_hash in puzzle_announcements_to_assert:
condition_list.append(make_assert_puzzle_announcement(announcement_hash))
return solution_for_conditions(condition_list)
async def select_coins(self, amount, exclude: List[Coin] = None) -> Set[Coin]:
"""
Returns a set of coins that can be used for generating a new transaction.
Note: This must be called under a wallet state manager lock
"""
if exclude is None:
exclude = []
spendable_amount = await self.get_spendable_balance()
if amount > spendable_amount:
error_msg = (
f"Can't select amount higher than our spendable balance. Amount: {amount}, spendable: "
f" {spendable_amount}"
)
self.log.warning(error_msg)
raise ValueError(error_msg)
self.log.info(f"About to select coins for amount {amount}")
unspent: List[WalletCoinRecord] = list(
await self.wallet_state_manager.get_spendable_coins_for_wallet(self.id())
)
sum_value = 0
used_coins: Set = set()
# Use older coins first
unspent.sort(reverse=True, key=lambda r: r.coin.amount)
# Try to use coins from the store, if there isn't enough of "unused"
# coins use change coins that are not confirmed yet
unconfirmed_removals: Dict[bytes32, Coin] = await self.wallet_state_manager.unconfirmed_removals_for_wallet(
self.id()
)
for coinrecord in unspent:
if sum_value >= amount and len(used_coins) > 0:
break
if coinrecord.coin.name() in unconfirmed_removals:
continue
if coinrecord.coin in exclude:
continue
sum_value += coinrecord.coin.amount
used_coins.add(coinrecord.coin)
self.log.debug(f"Selected coin: {coinrecord.coin.name()} at height {coinrecord.confirmed_block_height}!")
# This happens when we couldn't use one of the coins because it's already used
# but unconfirmed, and we are waiting for the change. (unconfirmed_additions)
if sum_value < amount:
raise ValueError(
"Can't make this transaction at the moment. Waiting for the change from the previous transaction."
)
self.log.debug(f"Successfully selected coins: {used_coins}")
return used_coins
async def _generate_unsigned_transaction(
self,
amount: uint64,
newpuzzlehash: bytes32,
fee: uint64 = uint64(0),
origin_id: bytes32 = None,
coins: Set[Coin] = None,
primaries_input: Optional[List[Dict[str, Any]]] = None,
ignore_max_send_amount: bool = False,
announcements_to_consume: Set[Announcement] = None,
) -> List[CoinSpend]:
"""
Generates a unsigned transaction in form of List(Puzzle, Solutions)
Note: this must be called under a wallet state manager lock
"""
if primaries_input is None:
primaries: Optional[List[Dict]] = None
total_amount = amount + fee
else:
primaries = primaries_input.copy()
primaries_amount = 0
for prim in primaries:
primaries_amount += prim["amount"]
total_amount = amount + fee + primaries_amount
if not ignore_max_send_amount:
max_send = await self.get_max_send_amount()
if total_amount > max_send:
raise ValueError(f"Can't send more than {max_send} in a single transaction")
if coins is None:
coins = await self.select_coins(total_amount)
assert len(coins) > 0
self.log.info(f"coins is not None {coins}")
spend_value = sum([coin.amount for coin in coins])
change = spend_value - total_amount
assert change >= 0
spends: List[CoinSpend] = []
primary_announcement_hash: Optional[bytes32] = None
# Check for duplicates
if primaries is not None:
all_primaries_list = [(p["puzzlehash"], p["amount"]) for p in primaries] + [(newpuzzlehash, amount)]
if len(set(all_primaries_list)) != len(all_primaries_list):
raise ValueError("Cannot create two identical coins")
for coin in coins:
self.log.info(f"coin from coins {coin}")
puzzle: Program = await self.puzzle_for_puzzle_hash(coin.puzzle_hash)
# Only one coin creates outputs
if primary_announcement_hash is None and origin_id in (None, coin.name()):
if primaries is None:
primaries = [{"puzzlehash": newpuzzlehash, "amount": amount}]
else:
primaries.append({"puzzlehash": newpuzzlehash, "amount": amount})
if change > 0:
change_puzzle_hash: bytes32 = await self.get_new_puzzlehash()
primaries.append({"puzzlehash": change_puzzle_hash, "amount": change})
message_list: List[bytes32] = [c.name() for c in coins]
for primary in primaries:
message_list.append(Coin(coin.name(), primary["puzzlehash"], primary["amount"]).name())
message: bytes32 = std_hash(b"".join(message_list))
solution: Program = self.make_solution(
primaries=primaries,
fee=fee,
coin_announcements={message},
coin_announcements_to_assert=announcements_to_consume,
)
primary_announcement_hash = Announcement(coin.name(), message).name()
else:
solution = self.make_solution(coin_announcements_to_assert={primary_announcement_hash})
spends.append(
CoinSpend(
coin, SerializedProgram.from_bytes(bytes(puzzle)), SerializedProgram.from_bytes(bytes(solution))
)
)
self.log.info(f"Spends is {spends}")
return spends
async def sign_transaction(self, coin_spends: List[CoinSpend]) -> SpendBundle:
return await sign_coin_spends(
coin_spends,
self.secret_key_store.secret_key_for_public_key,
self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
)
async def generate_signed_transaction(
self,
amount: uint64,
puzzle_hash: bytes32,
fee: uint64 = uint64(0),
origin_id: bytes32 = None,
coins: Set[Coin] = None,
primaries: Optional[List[Dict[str, bytes32]]] = None,
ignore_max_send_amount: bool = False,
announcements_to_consume: Set[Announcement] = None,
) -> TransactionRecord:
"""
Use this to generate transaction.
Note: this must be called under a wallet state manager lock
"""
if primaries is None:
non_change_amount = amount
else:
non_change_amount = uint64(amount + sum(p["amount"] for p in primaries))
transaction = await self._generate_unsigned_transaction(
amount, puzzle_hash, fee, origin_id, coins, primaries, ignore_max_send_amount, announcements_to_consume
)
assert len(transaction) > 0
self.log.info("About to sign a transaction")
await self.hack_populate_secret_keys_for_coin_spends(transaction)
spend_bundle: SpendBundle = await sign_coin_spends(
transaction,
self.secret_key_store.secret_key_for_public_key,
self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
)
now = uint64(int(time.time()))
add_list: List[Coin] = list(spend_bundle.additions())
rem_list: List[Coin] = list(spend_bundle.removals())
assert sum(a.amount for a in add_list) + fee == sum(r.amount for r in rem_list)
return TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=now,
to_puzzle_hash=puzzle_hash,
amount=uint64(non_change_amount),
fee_amount=uint64(fee),
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=add_list,
removals=rem_list,
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
async def push_transaction(self, tx: TransactionRecord) -> None:
"""Use this API to send transactions."""
await self.wallet_state_manager.add_pending_transaction(tx)
# This is to be aggregated together with a coloured coin offer to ensure that the trade happens
async def create_spend_bundle_relative_staicoin(self, staicoin_amount: int, exclude: List[Coin]) -> SpendBundle:
list_of_solutions = []
utxos = None
# If we're losing value then get coins with at least that much value
# If we're gaining value then our amount doesn't matter
if staicoin_amount < 0:
utxos = await self.select_coins(abs(staicoin_amount), exclude)
else:
utxos = await self.select_coins(0, exclude)
assert len(utxos) > 0
# Calculate output amount given sum of utxos
spend_value = sum([coin.amount for coin in utxos])
staicoin_amount = spend_value + staicoin_amount
# Create coin solutions for each utxo
output_created = None
for coin in utxos:
puzzle = await self.puzzle_for_puzzle_hash(coin.puzzle_hash)
if output_created is None:
newpuzhash = await self.get_new_puzzlehash()
primaries = [{"puzzlehash": newpuzhash, "amount": staicoin_amount}]
solution = self.make_solution(primaries=primaries)
output_created = coin
list_of_solutions.append(CoinSpend(coin, puzzle, solution))
await self.hack_populate_secret_keys_for_coin_spends(list_of_solutions)
spend_bundle = await sign_coin_spends(
list_of_solutions,
self.secret_key_store.secret_key_for_public_key,
self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
)
return spend_bundle
|
py | b405e74af77a51bc63aeeb919038ee81153260fd | #!/usr/bin/env python3
'''
FlowsLogger.py
Logging facility module for flows
----------------------------------
Copyright 2016 Davide Mastromatteo
License: Apache-2.0
'''
import logging
import threading
from flows import Global
class FlowsLogger:
"""
FlowsLogger class - Logger Factory
"""
_instance = None
_instance_lock = threading.Lock()
_logger_instance = None
@classmethod
def default_instance(cls):
"""
For use like a singleton, return the existing instance of the object
or a new instance
"""
if cls._instance is None:
with cls._instance_lock:
if cls._instance is None:
cls._instance = FlowsLogger()
return cls._instance
def get_logger(self):
"""
Returns the standard logger
"""
if Global.LOGGER:
Global.LOGGER.debug('configuring a logger')
if self._logger_instance is not None:
return self._logger_instance
self._logger_instance = logging.getLogger("flowsLogger")
self._logger_instance.setLevel(logging.DEBUG)
log_format = '%(asctime)s - [%(levelname)s]|%(thread)d\t%(message)s'
log_date_format = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(log_format, log_date_format)
new_log_stream_handler = logging.StreamHandler()
new_log_stream_handler.setFormatter(formatter)
new_log_stream_handler.setLevel(logging.INFO)
self._logger_instance.addHandler(new_log_stream_handler)
return self._logger_instance
def reconfigure_log_level(self):
"""
Returns a new standard logger instance
"""
if Global.LOGGER:
Global.LOGGER.debug('reconfiguring logger level')
stream_handlers = filter(lambda x: type(x) is logging.StreamHandler,
self._logger_instance.handlers)
for x in stream_handlers:
x.level = Global.CONFIG_MANAGER.log_level
return self.get_logger()
|
py | b405e96a4461f7b07f165e19856d01e47b1f1c1d | #!/usr/bin/python3
# EmreOvunc - BerkayIpek
# 28.04.2020
# pip3 install pandas requests xlrd xlwt
from os import mkdir
from os import remove
from os import listdir
from time import sleep
from xlwt import Workbook
from pandas import ExcelFile
from os.path import exists
from requests import get
from datetime import datetime
date = str(datetime.now()).split(' ')[0]
time = str(datetime.now()).split(' ')[1].split(":")[0] + "_" +\
str(datetime.now()).split(' ')[1].split(":")[1] + "_" +\
str(datetime.now()).split(' ')[1].split(":")[2].split('.')[0]
dirName = date + "_" + time
mkdir(dirName)
try:
for files in listdir("."):
if files.endswith(".xlsx"):
file = files
except:
exit(0)
try:
file_resultName = "results.txt"
error_resultName = "errors.txt"
error_result = open(dirName + "/" + error_resultName, 'a')
except:
remove(dirName)
exit(0)
try:
excel = ExcelFile(file)
sheet = excel.parse("Sheet1")
except:
remove(dirName)
exit(0)
ipList = []
for data in sheet['DestinationIP_1']:
ip = data.strip().split('=')[1]
ipList.append((ip, 0))
site1 = 'https://www.whois.com/whois/'
parse1 = 'registryData'
site2 = 'https://who.is/whois-ip/ip-address/'
parse2 = 'col-md-12 queryResponseBodyKey'
global result, result_, counter
result_ = ""
result = []
counter = 1
subnetmask = {
"8": "255.0.0.0",
"9": "255.128.0.0",
"10": "255.192.0.0",
"11": "255.224.0.0",
"12": "255.240.0.0",
"13": "255.248.0.0",
"14": "255.252.0.0",
"15": "255.254.0.0",
"16": "255.255.0.0",
"17": "255.255.128.0",
"18": "255.255.192.0",
"19": "255.255.224.0",
"20": "255.255.240.0",
"21": "255.255.248.0",
"22": "255.255.252.0",
"23": "255.255.254.0",
"24": "255.255.255.0",
"25": "255.255.255.128",
"26": "255.255.255.192",
"27": "255.255.255.224",
"28": "255.255.255.240",
"29": "255.255.255.248",
"30": "255.255.255.252"
}
# IP & Subnet
def Int2Bin(integer):
binary = '.'.join([bin(int(x) + 256)[3:] for x in integer.split('.')])
return binary
# Wild Card
def complement(number):
if number == '0':
number = '1'
elif number == '.':
pass
else:
number = '0'
return number
def find_wildcard(binary_subnet):
binary_list = list(binary_subnet)
wildcard = ''.join(complement(binary_list[y]) for y in range(len(binary_list)))
return wildcard
def convert_decimal(wildcard_Binary):
binary = {}
for x in range(4):
binary[x] = int(wildcard_Binary.split(".")[x], 2)
dec = ".".join(str(binary[x]) for x in range(4))
return dec
# Network ID
def andOP(IP1, IP2):
ID_list = {}
for y in range(4):
ID_list[y] = int(IP1.split(".")[y]) & int(IP2.split(".")[y])
ID = ".".join(str(ID_list[z]) for z in range(4))
return ID
# Broadcast IP
def orOP(IP1, IP2):
Broadcast_list = {}
for z in range(4):
Broadcast_list[z] = int(IP1.split(".")[z]) | int(IP2.split(".")[z])
broadcast = ".".join(str(Broadcast_list[c]) for c in range(4))
return broadcast
# Max IP
def maxiIP(brdcstIP):
maxIPs = brdcstIP.split(".")
if int(brdcstIP.split(".")[3]) - 1 == 0:
if int(brdcstIP.split(".")[2]) - 1 == 0:
if int(brdcstIP.split(".")[1]) - 1 == 0:
maxIPs[0] = int(brdcstIP.split(".")[0]) - 1
else:
maxIPs[1] = int(brdcstIP.split(".")[1]) - 1
else:
maxIPs[2] = int(brdcstIP.split(".")[2]) - 1
else:
maxIPs[3] = int(brdcstIP.split(".")[3]) - 1
return ".".join(str(maxIPs[x]) for x in range(4))
# Min IP
def miniIP(ntwrkID):
miniIPs = ntwrkID.split(".")
if int(ntwrkID.split(".")[3]) + 1 == 256:
if int(ntwrkID.split(".")[2]) + 1 == 256:
if int(ntwrkID.split(".")[1]) + 1 == 256:
miniIPs[0] = int(ntwrkID.split(".")[0]) + 1
miniIPs[1] = 0
miniIPs[2] = 0
miniIPs[3] = 0
else:
miniIPs[1] = int(ntwrkID.split(".")[1]) + 1
miniIPs[2] = 0
miniIPs[3] = 0
else:
miniIPs[2] = int(ntwrkID.split(".")[2]) + 1
miniIPs[3] = 0
else:
miniIPs[3] = int(ntwrkID.split(".")[3]) + 1
return ".".join(str(miniIPs[x]) for x in range(4))
def web(site, parseKey, ip):
fl = 0
for res in result:
if res[0] == ip:
fl = 1
break
if fl == 0:
req = get(site + str(ip))
if req.status_code == 200:
runflag = 0
try:
try:
try:
result_ = \
str(req.content).split(parseKey)[1].split('OrgName')[1].split('OrgId')[0].split('\\n')[0].split(
':')[1].strip()
except:
try:
cidr = \
str(req.content).split(parseKey)[1].split('CIDR')[1].split('NetName')[0].split('\\n')[
0].split(':')[1].strip()
result_ = \
str(req.content).split(parseKey)[1].split('netname')[1].split('country')[0].split('\\n')[
0].split(':')[1].strip()
except:
result_ = \
str(req.content).split(parseKey)[1].split('org-name')[1].split('org-type')[0].split('\\n')[
0].split(':')[1].strip()
except:
try:
result_ = str(req.content).split(parseKey)[1].split('NetName')[1].split('NetHandle')[0].strip()
except:
try:
result_ = str(req.content).split(parseKey)[1].split('<pre>')[1].split('\\n')[0]
except:
try:
result_ = str(req.content).split(parseKey)[1].split('netname')[1].split('descr')[0].split('\\n')[
0].split(':')[1].strip()
except:
if site == site2:
web(site1, parseKey, ip)
else:
result_ = "Captcha ERROR"
error_result.write(str("\n" + result_ + "=" + ip))
runflag += 1
except:
error_result.write(str("\n" + ip))
result_ = ""
runflag += 1
if "div" in result_:
result_ = 'Parsing ERROR'
error_result.write(str("\n" + result_ + "=" + ip))
runflag += 1
try:
if runflag == 0:
subnet(req, parseKey, ip, result_)
except:
subnet(req, parseKey, ip, result_)
else:
error_result.write(str("\n" + ip + ":" + req.status_code))
def subnet(req, parseKey, ip, result_):
try:
cidr = str(req.content).split(parseKey)[1].split('CIDR')[1].split('NetName')[0].split('\\n')[0].split(':')[
1].strip()
netmask = str(req.content).split(parseKey)[1].split('CIDR')[1].split('NetName')[0].split('\\n')[0].split(':')[
1].strip().split('/')[1]
except:
cidr = ""
netmask = "8"
if not len(cidr.split(',')) > 1:
if not netmask == "8":
try:
netmask = subnetmask[netmask]
IP_binary = Int2Bin(ip)
Subnet_binary = Int2Bin(netmask)
IP_binary = Int2Bin(ip)
Subnet_binary = Int2Bin(netmask)
wildcard_binary = find_wildcard(Int2Bin(netmask))
WildCard = convert_decimal(wildcard_binary)
networkID = andOP(ip, netmask)
network_Binary = Int2Bin(networkID)
broadcastIP = orOP(networkID, WildCard)
broadcastIP_binary = Int2Bin(broadcastIP)
maxIP = maxiIP(broadcastIP)
maxIP_binary = Int2Bin(maxIP)
minIP = miniIP(networkID)
minIP_binary = Int2Bin(networkID)
while True:
maxIP, minIP, result_ = searchList(maxIP, minIP, result_)
if maxIP == minIP:
break
except:
error_result.write(str("\nNetmask ERROR=" + ip))
else:
maxIP, minIP, result_ = searchList(ip, ip, result_)
else:
maxIP, minIP, result_ = searchList(ip, ip, result_)
def searchvalid(minip, result_):
global counter
for nb in range(0, len(ipList)):
if ipList[nb][1] == 0:
if ipList[nb][0] == minip:
ipList[nb] = (ipList[nb][0], 1)
file_result = open(dirName + "/" + file_resultName, 'a')
file_result.write("\n" + ipList[nb][0] + ":" + result_)
file_result.close()
result.append((ipList[nb][0], result_))
print('Progress: ' + str(counter))
counter += 1
def searchIP(maxip, minip, result_):
if maxip.split('.')[0] == minip.split('.')[0]:
if maxip.split('.')[1] == minip.split('.')[1]:
if maxip.split('.')[2] == minip.split('.')[2]:
if not maxip == minip:
increasePart = int(minip.split('.')[3]) + 1
minip = minip.split('.')[0] + "." + minip.split('.')[1] + "." + minip.split('.')[2] + "." + str(
increasePart)
else:
pass
else:
if minip.split('.')[3] == "255":
increasePart = int(minip.split('.')[2]) + 1
minip = minip.split('.')[0] + "." + minip.split('.')[1] + "." + str(increasePart) + ".0"
else:
increasePart = int(minip.split('.')[3]) + 1
minip = minip.split('.')[0] + "." + minip.split('.')[1] + "." + minip.split('.')[2] + "." + str(
increasePart)
else:
if minip.split('.')[3] == "255":
if not minip.split('.')[2] == "255":
increasePart = int(minip.split('.')[2]) + 1
minip = minip.split('.')[0] + "." + minip.split('.')[1] + "." + str(increasePart) + ".0"
else:
increasePart = int(minip.split('.')[1]) + 1
minip = minip.split('.')[0] + "." + str(increasePart) + ".0" + ".0"
else:
if minip.split('.')[2] == "255":
increasePart = int(minip.split('.')[1]) + 1
minip = minip.split('.')[0] + "." + str(increasePart) + ".0" + ".0"
else:
if minip.split('.')[3] == "255":
increasePart = int(minip.split('.')[2]) + 1
minip = minip.split('.')[0] + "." + minip.split('.')[1] + "." + str(increasePart) + ".0"
else:
increasePart = int(minip.split('.')[3]) + 1
minip = minip.split('.')[0] + "." + minip.split('.')[1] + "." + minip.split('.')[2] + "." + str(
increasePart)
return maxip, minip, result_
def searchList(maxip, minip, result_):
searchvalid(minip, result_)
return searchIP(maxip, minip, result_)
flag = 0
for data in sheet['DestinationIP_1']:
ip = data.strip().split('=')[1]
if flag % 2 == 0:
web(site1, parse1, ip)
else:
web(site2, parse2, ip)
flag += 1
sleep(2)
error_result.close()
workbook = Workbook()
sheet = workbook.add_sheet('results')
for nbr in range(0, len(result)):
sheet.write(nbr, 0, str(result[nbr][0]))
sheet.write(nbr, 1, str(result[nbr][1]))
workbook.save(dirName + '/results.xls')
if exists("~" + file):
remove("~" + file)
exit()
|
py | b405e96c661d706ce6f193b9fb552d54d9b2571e |
"""Test the performance of simple HTTP serving and client using the Tornado
framework.
A trivial "application" is generated which generates a number of chunks of
data as a HTTP response's body.
"""
import sys
import socket
import pyperf
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer
from tornado.gen import coroutine
from tornado.ioloop import IOLoop
from tornado.netutil import bind_sockets
from tornado.web import RequestHandler, Application
HOST = "127.0.0.1"
FAMILY = socket.AF_INET
CHUNK = b"Hello world\n" * 1000
NCHUNKS = 5
CONCURRENCY = 150
class MainHandler(RequestHandler):
@coroutine
def get(self):
for _ in range(NCHUNKS):
self.write(CHUNK)
yield self.flush()
def compute_etag(self):
# Overriden to avoid stressing hashlib in this benchmark
return None
def make_application():
return Application([
(r"/", MainHandler),
])
def make_http_server(request_handler):
server = HTTPServer(request_handler)
sockets = bind_sockets(0, HOST, family=FAMILY)
assert len(sockets) == 1
server.add_sockets(sockets)
sock = sockets[0]
return server, sock
def bench_tornado(loops):
server, sock = make_http_server(make_application())
host, port = sock.getsockname()
url = "http://%s:%s/" % (host, port)
namespace = {}
@coroutine
def run_client():
client = AsyncHTTPClient()
range_it = range(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
futures = [client.fetch(url) for j in range(CONCURRENCY)]
for fut in futures:
resp = yield fut
buf = resp.buffer
buf.seek(0, 2)
assert buf.tell() == len(CHUNK) * NCHUNKS
namespace['dt'] = pyperf.perf_counter() - t0
client.close()
IOLoop.current().run_sync(run_client)
server.stop()
return namespace['dt']
if __name__ == "__main__":
# 3.8 changed the default event loop to ProactorEventLoop which doesn't
# implement everything required by tornado and breaks this benchmark.
# Restore the old WindowsSelectorEventLoop default for now.
# https://bugs.python.org/issue37373
# https://github.com/python/pyperformance/issues/61
# https://github.com/tornadoweb/tornado/pull/2686
if sys.platform == 'win32' and sys.version_info[:2] >= (3, 8):
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
kw = {}
if pyperf.python_has_jit():
# PyPy needs to compute more warmup values to warmup its JIT
kw['warmups'] = 30
runner = pyperf.Runner(**kw)
runner.metadata['description'] = ("Test the performance of HTTP requests "
"with Tornado.")
runner.bench_time_func('tornado_http', bench_tornado)
|
py | b405ea08b056dba84ea4d643536ba3627316661a | from torch.utils.data.sampler import \
(Sampler, SequentialSampler, RandomSampler,
SubsetRandomSampler, WeightedRandomSampler, BatchSampler)
from torch.utils.data.dataset import \
(Dataset, IterableDataset, TensorDataset, ConcatDataset, ChainDataset,
Subset, random_split)
from torch.utils.data.dataset import IterableDataset as IterDataPipe
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.dataloader import DataLoader, _DatasetKind, get_worker_info
from torch.utils.data.decorator import functional_datapipe, guaranteed_datapipes_determinism, non_deterministic
__all__ = ['Sampler', 'SequentialSampler', 'RandomSampler',
'SubsetRandomSampler', 'WeightedRandomSampler', 'BatchSampler',
'DistributedSampler', 'Dataset', 'IterableDataset', 'TensorDataset',
'ConcatDataset', 'ChainDataset', 'Subset', 'random_split',
'DataLoader', '_DatasetKind', 'get_worker_info',
'IterDataPipe', 'functional_datapipe', 'guaranteed_datapipes_determinism',
'non_deterministic']
################################################################################
# import subpackage
################################################################################
from torch.utils.data import datapipes
|
py | b405eaa26090d819f5afe6bca3a6886286c4e904 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2017 Stephen Bunn ([email protected])
# MIT License <https://opensource.org/licenses/MIT>
from ._common import BaseRuleTest
class TranslateTextRuleTest(BaseRuleTest):
""" Tests the ``translate_text`` rule.
"""
@property
def rule_name(self):
""" The name of the rule.
"""
return 'translate_text'
@property
def rule_arguments(self):
""" The arguments for this rule's application.
"""
return (
[{r'He(L+)o': "L's: {0}"}],
{},
)
@property
def rule_group(self):
""" The group type of the rule.
"""
return 'value_rules'
|
py | b405eaa409a85d7e22bfa1b19eca3f5848cf814d | from MediaPlayer.Util.Enums import ExtensionType, ExtensionName, ExtensionProtocolMessageType
from MediaPlayer.Util import Bencode
class ProtocolExtension:
def __init__(self, extension_name, id_name, extension_type, reserved_id, bit_mask, supported):
self.extension_name = extension_name
self.id_name = id_name
self.extension_type = extension_type
self.reserved_id = reserved_id
self.bit_mask = bit_mask
self.supported = supported
class ProtocolExtensionManager:
known_extensions = [
ProtocolExtension(ExtensionName.FastExtension, "", ExtensionType.Basic, 7, 0x04, True),
ProtocolExtension(ExtensionName.ExtensionProtocol, "", ExtensionType.Basic, 5, 0x10, True),
ProtocolExtension(ExtensionName.DHT, "", ExtensionType.Basic, 7, 0x01, True),
ProtocolExtension(ExtensionName.PeerExchange, "ut_pex", ExtensionType.Extension, ExtensionProtocolMessageType.PeerExchange, 0, True),
ProtocolExtension(ExtensionName.Metadata, "ut_metadata", ExtensionType.Extension, ExtensionProtocolMessageType.Metadata, 0, True)
]
@staticmethod
def get_extension(name):
filtered = [x for x in ProtocolExtensionManager.known_extensions if x.extension_name == name]
if len(filtered) == 1:
return filtered[0]
@staticmethod
def get_extension_by_id_name(name):
filtered = [x for x in ProtocolExtensionManager.known_extensions if x.id_name == name]
if len(filtered) == 1:
return filtered[0]
@staticmethod
def get_extensions():
return ProtocolExtensionManager.known_extensions.copy()
@staticmethod
def add_extensions_to_handshake(byte):
for ext in [x for x in ProtocolExtensionManager.known_extensions if x.extension_type == ExtensionType.Basic and x.supported]:
byte[ext.reserved_id] |= ext.bit_mask
return byte
@staticmethod
def create_extension_dictionary():
dic = dict()
m_dic = dict()
for ext in [x for x in ProtocolExtensionManager.known_extensions if x.extension_type == ExtensionType.Extension and x.supported]:
m_dic[ext.id_name.encode('utf8')] = int(ext.reserved_id)
dic[b'm'] = m_dic
return Bencode.bencode(dic)
|
py | b405ec407ecd1f4a4963c461f2ce5f5a6fbb1858 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright © 2018 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
# Other trademarks may be trademarks of their respective owners.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Vaideeswaran Ganesan
#
import os
import sys
from omsdk.sdkdevice import iDeviceRegistry, iDeviceDriver, iDeviceDiscovery
from omsdk.sdkdevice import iDeviceTopologyInfo
from omsdk.sdkproto import PWSMAN,PREDFISH, PSNMP, ProtocolEnum, ProtocolOptionsFactory
from omdrivers.enums.iDRAC.iDRACEnums import *
from omsdk.idracmsgdb import eemiregistry
from omsdk.sdkcenum import TypeHelper
from omsdk.http.sdkredfishbase import RedfishOptions
from omsdk.http.sdkwsmanbase import WsManOptions
logger = logging.getLogger(__name__)
class NoConfig:
def __init__(self, arg1):
logger.debug("iDRAC:Not implemented")
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
try:
from pysnmp.hlapi import *
from pysnmp.smi import *
PySnmpPresent = True
except ImportError:
PySnmpPresent = False
try:
from omdrivers.lifecycle.iDRAC.iDRACJobs import iDRACJobs
from omdrivers.lifecycle.iDRAC.iDRACConfig import iDRACConfig
from omdrivers.lifecycle.iDRAC.iDRACConfig import iDRACRedfishCmds
from omdrivers.lifecycle.iDRAC.iDRACConfig import iDRACWsManCmds
from omdrivers.lifecycle.iDRAC.iDRACLogs import iDRACLogs
from omdrivers.lifecycle.iDRAC.iDRACUpdate import iDRACUpdate
from omdrivers.lifecycle.iDRAC.iDRACLicense import iDRACLicense
from omdrivers.lifecycle.iDRAC.iDRACSecurity import iDRACSecurity
from omdrivers.lifecycle.iDRAC.iDRACStreaming import iDRACStreaming
from omdrivers.lifecycle.iDRAC.iDRACCredsMgmt import iDRACCredsMgmt
except ImportError as ex:
logger.debug(str(ex))
iDRACJobs = NoConfig
iDRACConfig = NoConfig
iDRACLogs = NoConfig
iDRACUpdate = NoConfig
iDRACRedfishCmds = {}
iDRACWsManCmds = {}
iDRACCompEnum = EnumWrapper("iDRACCompEnum", {
"System" : "System",
"Memory" : "Memory",
"CPU" : "CPU",
"iDRAC" : "iDRAC",
"FC" : "FC",
"NIC" : "NIC",
"HostNIC" : "HostNIC",
"PCIDevice" : "PCIDevice",
"Fan" : "Fan",
"PowerSupply" : "PowerSupply",
"Enclosure" : "Enclosure",
"EnclosureEMM" : "EnclosureEMM",
"EnclosurePSU" : "EnclosurePSU",
"EnclosureSensor" : "EnclosureSensor",
"EnclosureFanSensor" : "EnclosureFanSensor",
"EnclosureTempSensor" : "EnclosureTempSensor",
"VFlash" : "VFlash",
"Video" : "Video",
"ControllerBattery" : "ControllerBattery" ,
"Controller" : "Controller",
"ControllerSensor" : "ControllerSensor",
"VirtualDisk" : "VirtualDisk",
"PhysicalDisk" : "PhysicalDisk",
"PCIeSSDExtender" : "PCIeSSDExtender",
"PCIeSSDBackPlane" : "PCIeSSDBackPlane",
"PCIeSSDDisk" : "PCIeSSDDisk",
"Sensors_Amperage" : "Sensors_Amperage",
"Sensors_Temperature" : "Sensors_Temperature",
"Sensors_Voltage" : "Sensors_Voltage",
"Sensors_Intrusion" : "Sensors_Intrusion",
"Sensors_Battery" : "Sensors_Battery",
"Sensors_Fan" : "Sensors_Fan",
"LogicalSystem" : "LogicalSystem",
"License" : "License",
"iDRACNIC" : "iDRACNIC",
"BIOS" : "BIOS",
"SystemMetrics" : "SystemMetrics",
"SystemBoardMetrics" : "SystemBoardMetrics",
"PresenceAndStatusSensor" : "PresenceAndStatusSensor"
}).enum_type
iDRACSensorEnum = EnumWrapper("iDRACSensorEnum", {
"ServerSensor" : "ServerSensor",
"NumericSensor" : "NumericSensor",
"PSNumericSensor" : "PSNumericSensor",
}).enum_type
iDRACMiscEnum = EnumWrapper("iDRACMiscEnum", {
"SystemString" : "SystemString",
"NICString" : "NICString",
"NICEnumeration" : "NICEnumeration",
"iDRACString" : "iDRACString",
"iDRACEnumeration" : "iDRACEnumeration",
"NICStatistics" : "NICStatistics",
"NICCapabilities" : "NICCapabilities",
"SwitchConnection" : "SwitchConnection",
"FCStatistics" : "FCStatistics",
"HostNICView" : "HostNICView",
"RAIDEnumeration" : "RAIDEnumeration",
"LCString" : "LCString",
"ChassisRF" : "ChassisRF",
"DellAttributes" : "DellAttributes"
}).enum_type
iDRACMetricsEnum = EnumWrapper("iDRACMetricsEnum", {
"AggregationMetric" : "AggregationMetric",
"BaseMetricValue" : "BaseMetricValue",
}).enum_type
#iDRACFirmEnum.SelLog : "http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_SELLogEntry",
iDRACComponentTree = {
iDRACCompEnum.System : [
iDRACCompEnum.Memory,
iDRACCompEnum.CPU,
iDRACCompEnum.iDRAC,
iDRACCompEnum.FC,
iDRACCompEnum.NIC,
iDRACCompEnum.PCIDevice,
iDRACCompEnum.Fan,
iDRACCompEnum.PowerSupply,
iDRACCompEnum.VFlash,
iDRACCompEnum.Video,
iDRACCompEnum.License,
iDRACCompEnum.HostNIC,
iDRACCompEnum.BIOS,
"Sensors",
"Storage"
],
iDRACCompEnum.iDRAC : [
iDRACCompEnum.iDRACNIC,
],
"Storage" : [
iDRACCompEnum.Controller,
],
"Sensors" : [
iDRACCompEnum.Sensors_Amperage,
iDRACCompEnum.Sensors_Temperature,
iDRACCompEnum.Sensors_Voltage,
iDRACCompEnum.Sensors_Intrusion,
iDRACCompEnum.Sensors_Battery,
iDRACCompEnum.Sensors_Fan,
iDRACCompEnum.PresenceAndStatusSensor
],
iDRACCompEnum.Controller : [
iDRACCompEnum.Enclosure, # Enclosure.RAID.Modular.3-1
iDRACCompEnum.VirtualDisk, #VirtualDisk.RAID.Modular.3-1
iDRACCompEnum.PhysicalDisk, #DirectDisk.RAID
iDRACCompEnum.ControllerSensor
],
iDRACCompEnum.VirtualDisk : [
iDRACCompEnum.PhysicalDisk
],
iDRACCompEnum.ControllerSensor : [
iDRACCompEnum.ControllerBattery,
],
iDRACCompEnum.Enclosure : [
iDRACCompEnum.EnclosureEMM,
iDRACCompEnum.EnclosurePSU,
iDRACCompEnum.PhysicalDisk,
iDRACCompEnum.EnclosureSensor,
iDRACCompEnum.PCIeSSDExtender
],
iDRACCompEnum.PCIeSSDExtender : [
iDRACCompEnum.PCIeSSDBackPlane
],
iDRACCompEnum.PCIeSSDBackPlane : [
iDRACCompEnum.PCIeSSDDisk
],
iDRACCompEnum.EnclosureSensor : [
iDRACCompEnum.EnclosureFanSensor,
iDRACCompEnum.EnclosureTempSensor
]
}
iDRACSWCompMapping = {
'BIOS' : 'BIOS.*',
'CMC' : 'CMC.*',
'CPLD' : 'CPLD.*',
'LC' : '.*LC.Embedded.*',
'PhysicalDisk' : 'Disk.*',
'DriverPack' : 'DriverPack.*',
'Enclosure' : 'Enclosure.*',
'NIC' : 'NIC.*',
'OSCollector' : 'OSCollector.*',
'RAID' : 'RAID.*',
'iDRAC' : 'iDRAC.*',
'Chassis' : '.*Chassis.*'
}
# http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_NICStatistics
iDRACWsManViews = {
iDRACCompEnum.System: "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_SystemView",
iDRACCompEnum.Memory : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_MemoryView",
iDRACCompEnum.CPU : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_CPUView",
iDRACCompEnum.Fan : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_FanView",
iDRACCompEnum.iDRAC : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_iDRACCardView",
iDRACCompEnum.iDRACNIC : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_iDRACCardView",
iDRACCompEnum.FC : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_FCView",
iDRACCompEnum.NIC : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_NICView",
iDRACCompEnum.HostNIC : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_HostNetworkInterfaceView",
iDRACCompEnum.PowerSupply : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_PowerSupplyView",
iDRACCompEnum.VFlash : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_VFlashView",
iDRACCompEnum.Video : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_VideoView",
iDRACCompEnum.PhysicalDisk : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_PhysicalDiskView",
iDRACCompEnum.PCIeSSDExtender : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_PCIeSSDExtenderView",
iDRACCompEnum.PCIeSSDBackPlane : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_PCIeSSDBackPlaneView",
iDRACCompEnum.PCIeSSDDisk : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_PCIeSSDView",
iDRACCompEnum.ControllerBattery : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_ControllerBatteryView",
iDRACCompEnum.Controller : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_ControllerView",
iDRACCompEnum.ControllerSensor : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_ControllerView",
iDRACCompEnum.EnclosureEMM : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_EnclosureEMMView",
iDRACCompEnum.EnclosurePSU : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_EnclosurePSUView",
iDRACCompEnum.Enclosure : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_EnclosureView",
iDRACCompEnum.EnclosureSensor : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_EnclosureView",
iDRACCompEnum.PCIDevice : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_PCIDeviceView",
iDRACCompEnum.VirtualDisk : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_VirtualDiskView",
iDRACSensorEnum.ServerSensor : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_Sensor",
iDRACSensorEnum.NumericSensor : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_NumericSensor",
iDRACSensorEnum.PSNumericSensor : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_PSNumericSensor",
iDRACFirmEnum.Firmware : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_SoftwareIdentity",
iDRACJobsEnum.Jobs : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_LifecycleJob",
iDRACOSDJobsEnum.OSDJobs : "http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_OSDConcreteJob",
# iDRACMiscEnum.SystemString : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_SystemString",
iDRACMiscEnum.SystemString : ["http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_SystemString", "select FQDD,InstanceID,AttributeName,CurrentValue from DCIM_SystemString WHERE AttributeName = 'OSName' or AttributeName = 'OSVersion'"],
# iDRACMiscEnum.NICString : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_NICString",
iDRACMiscEnum.NICString: ["http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_NICString", "select FQDD,InstanceID,AttributeName,CurrentValue from DCIM_NICString WHERE AttributeName = 'VirtWWN' or AttributeName = 'VirtWWPN' or AttributeName = 'WWN' or AttributeName = 'WWPN'"],
iDRACMiscEnum.NICEnumeration : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_NICEnumeration",
# iDRACMiscEnum.iDRACString : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_iDRACCardString",
iDRACMiscEnum.iDRACString : ["http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_iDRACCardString", "select FQDD,InstanceID,AttributeName,CurrentValue from DCIM_iDRACCardString WHERE InstanceID = 'iDRAC.Embedded.1#IPv4.1#Address' or InstanceID = 'iDRAC.Embedded.1#Info.1#Product' or InstanceID = 'iDRAC.Embedded.1#CurrentNIC.1#MACAddress' or InstanceID = 'iDRAC.Embedded.1#CurrentIPv6.1#Address1' or InstanceID = 'iDRAC.Embedded.1#GroupManager.1#GroupName' or InstanceID = 'iDRAC.Embedded.1#NIC.1#SwitchConnection' or InstanceID = 'iDRAC.Embedded.1#NIC.1#SwitchPortConnection' or AttributeName = 'Destination'"],
# iDRACMiscEnum.iDRACEnumeration : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_iDRACCardEnumeration",
iDRACMiscEnum.iDRACEnumeration : ["http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_iDRACCardEnumeration", "select FQDD,InstanceID,AttributeName,CurrentValue from DCIM_iDRACCardEnumeration WHERE InstanceID='iDRAC.Embedded.1#GroupManager.1#Status' or InstanceID='iDRAC.Embedded.1#NIC.1#Duplex' or InstanceID='iDRAC.Embedded.1#NIC.1#Speed' or InstanceID='iDRAC.Embedded.1#NIC.1#Enable' or InstanceID='iDRAC.Embedded.1#Lockdown.1#SystemLockdown' or AttributeName = 'State'"],
iDRACMiscEnum.NICStatistics : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_NICStatistics",
iDRACMiscEnum.NICCapabilities : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_NICCapabilities",
iDRACMiscEnum.SwitchConnection : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_SwitchConnectionView",
iDRACMiscEnum.FCStatistics : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_FCStatistics",
iDRACMiscEnum.HostNICView : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_HostNetworkInterfaceView",
iDRACCompEnum.License : "http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_License",
iDRACLicenseEnum.LicensableDevice : "http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_LicensableDevice",
iDRACLogsEnum.SELLog : "http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_SELLogEntry",
iDRACCompEnum.BIOS : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_SoftwareIdentity",
iDRACCompEnum.EnclosureFanSensor : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_EnclosureFanSensor",
iDRACMiscEnum.RAIDEnumeration : ["http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_RAIDEnumeration","select FQDD,InstanceID,AttributeName,CurrentValue from DCIM_RAIDEnumeration WHERE AttributeName = 'RAIDNegotiatedSpeed'"],
iDRACCompEnum.EnclosureTempSensor : "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/root/dcim/DCIM_EnclosureTemperatureSensor",
iDRACMetricsEnum.BaseMetricValue : "http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_BaseMetricValue",
iDRACMetricsEnum.AggregationMetric : "http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_AggregationMetricValue",
iDRACCompEnum.PresenceAndStatusSensor : "http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_PresenceAndStatusSensor",
iDRACMiscEnum.LCString : ["http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_LCString","select InstanceID,AttributeName,CurrentValue from DCIM_LCString WHERE AttributeName = 'VirtualAddressManagementApplication'"]
}
iDRACWsManViews_FieldSpec = {
iDRACCompEnum.Memory : {
"Size" : { 'Type' : 'Bytes', 'InUnits' : "MB" },
"CurrentOperatingSpeed" : { 'Type' : 'ClockSpeed', 'InUnits' : "MHz", 'OutUnits' : 'MHz' },
"Speed" : { 'Type' : 'ClockSpeed', 'InUnits' : "MHz" },
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical",
"0x8000" : "Unknown",
"0xFFFF" : "Unknown"
}
}
},
iDRACCompEnum.Fan : {
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical"
}
},
"RedundancyStatus": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"1": "DMTF Reserved",
"2": "Fully Redundant",
"3": "Degraded Redundancy",
"4": "Redundancy Lost",
"5": "Overall Failure",
"6": "Not Applicable"
}
}
},
iDRACCompEnum.FC : {
"LinkStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Down",
"1" : "Up",
"2" : "Unknown",
}
}
},
iDRACCompEnum.Controller : {
"CacheSizeInMB" : { 'Rename' : 'CacheSize', 'Type' : 'Bytes', 'InUnits' : 'MB', 'OutUnits' : 'GB' },
"SecurityStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Encryption Not Capable",
"1" : "Encryption Capable",
"2" : "Security Key Assigned"
}
},
"EncryptionMode" : {
'Lookup' : 'True',
'Values' : {
"0" : "None",
"1" : "Local Key Management",
"2" : "Dell Key Management",
"3" : "Pending Dell Key Management"
}
},
"EncryptionCapability" : {
'Lookup' : 'True',
'Values' : {
"0" : "None",
"1" : "Local Key Management Capable",
"2" : "Dell Key Management Capable",
"3" : "Local Key Management and Dell Key Management Capable"
}
},
"SlicedVDCapability" : {
'Lookup' : 'True',
'Values' : {
"0" : "Sliced Virtual Disk creation not supported",
"1" : "Sliced Virtual Disk creation supported"
}
},
"CachecadeCapability" : {
'Lookup' : 'True',
'Values' : {
"0" : "Cachecade Virtual Disk not supported",
"1" : "Cachecade Virtual Disk supported"
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical",
"0x8000" : "Unknown",
"0xFFFF" : "Unknown"
}
},
"RollupStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical",
}
}
},
iDRACCompEnum.CPU : {
"CPUFamily" : {
'Lookup' : 'True',
'Values' : {
"1" : "Other",
"2" : "Unknown",
"3" : "8086",
"4" : "80286",
"5" : "80386",
"6" : "80486",
"7" : "8087",
"8" : "80287",
"9" : "80387",
"A" : "80487",
"B" : "Pentium(R)brand",
"C" : "Pentium(R)Pro",
"D" : "pentium(R) II",
"E" : "Pentium(R) Processor with MMX(TM) technology",
"F" : "Celeron(TM)",
"10" : "Pentium(R) II Xeon(TM)",
"11" : "Pentium(R) III",
"12" : "M1 Family",
"13" : "M2 Family",
"14" : "Intel(R) Celeron(R) M processor",
"15" : "Intel(R) Pentium(R) 4 HT processor",
"18" : "K5 Family",
"19" : "K6 Family" ,
"1A" : "K6-2",
"1B" : "K6-3",
"1C" : "AMD Athlon(TM) Processor Family",
"1D" : "AMD(R) Duron(TM) Processor",
"1E" : "AMD29000 Family",
"1F" : "K6-2+",
"20" : "Power PC Family",
"21" : "Power PC 601",
"22" : "Power PC 603",
"23" : "Power PC 603+",
"24" : "Power PC 604",
"25" : "Power PC 620",
"26" : "Power PC X704",
"27" : "Power PC 750",
"28" : "Intel(R) Core(TM) Duo processor",
"29" : "Intel(R) Core(TM) Duo mobile processor",
"2A" : "Intel(R) Core(TM) Solo mobile processor",
"2B" : "Intel(R) Atom(TM) processor",
"30" : "Alpha Family",
"31" : "Alpha 21064",
"32" : "Alpha 21066",
"33" : "Alpha 21164",
"34" : "Alpha 21164PC",
"35" : "Alpha 21164a",
"36" : "Alpha 21264",
"37" : "Alpha 21364",
"38" : "AMD Turion(TM) II Ultra Dual-Core Mobile M Processor Family",
"39" : "AMD Turion(TM) II Dual-Core Mobile M Processor Family",
"3A" : "AMD Athlon(TM) II Dual-Core Mobile M Processor Family",
"3B" : "AMD Opteron(TM) 6100 Series Processor",
"3C" : "AMD Opteron(TM) 4100 Series Processor",
"3D" : "AMD Opteron(TM) 6200 Series Processor",
"3E" : "AMD Opteron(TM) 4200 Series Processor",
"40" : "MIPS Family",
"41" : "MIPS R4000",
"42" : "MIPS R4200",
"43" : "MIPS R4400",
"44" : "MIPS R4600",
"45" : "MIPS R10000",
"46" : "AMD C-Series Processor",
"47" : "AMD E-Series Processor",
"48" : "AMD S-Series Processor",
"49" : "AMD G-Series Processor",
"50" : "SPARC Family",
"51" : "SuperSPARC",
"52" : "microSPARC II",
"53" : "microSPARC IIep",
"54" : "UltraSPARC",
"55" : "UltraSPARC II",
"56" : "UltraSPARC IIi",
"57" : "UltraSPARC III",
"58" : "UltraSPARC IIIi",
"60" : "68040",
"61" : "68xxx Family",
"62" : "68000",
"63" : "68010",
"64" : "68020",
"65" : "68030",
"70" : "Hobbit Family",
"78" : "Crusoe(TM) TM5000 Family",
"79" : "Crusoe(TM) TM3000 Family",
"7A" : "Efficeon(TM) TM8000 Family",
"80" : "Weitek",
"82" : "Itanium(TM) Processor",
"83" : "AMD Athlon(TM) 64 Processor Family",
"84" : "AMD Opteron(TM) Processor Family",
"85" : "AMD Sempron(TM) Processor Family",
"86" : "AMD Turion(TM) 64 Mobile Technology",
"87" : "Dual-Core AMD Opteron(TM) Processor Family",
"88" : "AMD Athlon(TM) 64 X2 Dual-Core Processor Family",
"89" : "AMD Turion(TM) 64 X2 Mobile Technology",
"8A" : "Quad-Core AMD Opteron(TM) Processor Family",
"8B" : "Third Generation AMD Opteron(TM) Processor Family",
"8C" : "AMD Phenom(TM) FX Quad-Core Processor Family",
"8D" : "AMD Phenom(TM) X4 Quad-Core Processor Family",
"8E" : "AMD Phenom(TM) X2 Dual-Core Processor Family",
"8F" : "AMD Athlon(TM) X2 Dual-Core Processor Family",
"90" : "PA-RISC Family",
"91" : "PA-RISC 8500",
"92" : "PA-RISC 8000",
"93" : "PA-RISC 7300LC",
"94" : "PA-RISC 7200",
"95" : "PA-RISC 7100LC",
"96" : "PA-RISC 7100",
"A0" : "V30 Family",
"A1" : "Quad-Core Intel(R) Xeon(R) processor 3200 Series",
"A2" : "Dual-Core Intel(R) Xeon(R) processor 3000 Series",
"A3" : "Quad-Core Intel(R) Xeon(R) processor 5300 Series",
"A4" : "Dual-Core Intel(R) Xeon(R) processor 5100 Series",
"A5" : "Dual-Core Intel(R) Xeon(R) processor 5000 Series",
"A6" : "Dual-Core Intel(R) Xeon(R) processor LV",
"A7" : "Dual-Core Intel(R) Xeon(R) processor ULV",
"A8" : "Dual-Core Intel(R) Xeon(R) processor 7100 Series",
"A9" : "Quad-Core Intel(R) Xeon(R) processor 5400 Series",
"AA" : "Quad-Core Intel(R) Xeon(R) processor",
"AB" : "Dual-Core Intel(R) Xeon(R) processor 5200 Series",
"AC" : "Dual-Core Intel(R) Xeon(R) processor 7200 Series",
"AD" : "Quad-Core Intel(R) Xeon(R) processor 7300 Series",
"AE" : "Quad-Core Intel(R) Xeon(R) processor 7400 Series",
"AF" : "Multi-Core Intel(R) Xeon(R) processor 7400 Series",
"B0" : "Pentium(R) III Xeon(TM)",
"B1" : "Pentium(R) III Processor with Intel(R) SpeedStep(TM) Technology",
"B2" : "Pentium(R) 4",
"B3" : "Intel(R) Xeon(TM)",
"B4" : "AS400 Family",
"B5" : "Intel(R) Xeon(TM) Processor MP",
"B6" : "AMD Athlon(TM) XP Family",
"B7" : "AMD Athlon(TM) MP Family",
"B8" : "Intel(R) Itanium(R) 2",
"B9" : "Intel(R) Pentium(R) M Processor",
"BA" : "Intel(R) Celeron(R) D Processor",
"BB" : "Intel(R) Pentium(R) D Processor",
"BC" : "Intel(R) Pentium(R) Processor Extreme Edition",
"BD" : "Intel(R) Core(TM) Solo Processor",
"BE" : "K7",
"BF" : "Intel(R) Core(TM) 2 Duo Processor",
"C0" : "Intel(R) Core(TM) 2 Solo Processor",
"C1" : "Intel(R) Core(TM) 2 Extreme Processor",
"C2" : "Intel(R) Core(TM) 2 Quad Processor",
"C3" : "Intel(R) Core(TM) 2 Extreme mobile Processor",
"C4" : "Intel(R) Core(TM) 2 Duo mobile Processor",
"C5" : "Intel(R) Core(TM) 2 solo mobile Processor",
"C6" : "Intel(R) Core(TM) i7 processor",
"C7" : "Dual-Core Intel(R) Celeron(R) Processor",
"C8" : "S/390 and zSeries Family",
"C9" : "ESA/390 G4",
"CA" : "ESA/390 G5",
"CB" : "ESA/390 G6",
"CC" : "z/Architecture base",
"CD" : "Intel(R) Core(TM) i5 processor",
"CE" : "Intel(R) Core(TM) i3 processor",
"D2" : "VIA C7(TM)-M Processor Family",
"D3" : "VIA C7(TM)-D Processor Family",
"D4" : "VIA C7(TM) Processor Family",
"D5" : "VIA Eden(TM) Processor Family",
"D6" : "Multi-Core Intel(R) Xeon(R) processor",
"D7" : "Dual-Core Intel(R) Xeon(R) processor 3xxx Series",
"D8" : "Quad-Core Intel(R) Xeon(R) processor 3xxx Series",
"D9" : "VIA Nano(TM) Processor Family",
"DA" : "Dual-Core Intel(R) Xeon(R) processor 5xxx Series",
"DB" : "Quad-Core Intel(R) Xeon(R) processor 5xxx Series",
"DD" : "Dual-Core Intel(R) Xeon(R) processor 7xxx Series",
"DE" : "Quad-Core Intel(R) Xeon(R) processor 7xxx Series",
"DF" : "Multi-Core Intel(R) Xeon(R) processor 7xxx Series",
"E0" : "Multi-Core Intel(R) Xeon(R) processor 3400 Series",
"E6" : "Embedded AMD Opteron(TM) Quad-Core Processor Family",
"E7" : "AMD Phenom(TM) Triple-Core Processor Family",
"E8" : "AMD Turion(TM) Ultra Dual-Core Mobile Processor Family",
"E9" : "AMD Turion(TM) Dual-Core Mobile Processor Family",
"EA" : "AMD Athlon(TM) Dual-Core Processor Family",
"EB" : "AMD Sempron(TM) SI Processor Family",
"EC" : "AMD Phenom(TM) II Processor Family",
"ED" : "AMD Athlon(TM) II Processor Family",
"EE" : "Six-Core AMD Opteron(TM) Processor Family",
"EF" : "AMD Sempron(TM) M Processor Family",
"FA" : "i860",
"FB" : "i960",
"FE" : "Reserved (SMBIOS Extension)",
"FF" : "Reserved (Un-initialized Flash Content - Lo)",
"104" : "SH-3",
"105" : "SH-4",
"118" : "ARM",
"119" : "StrongARM",
"12C" : "6x86",
"12D" : "MediaGX",
"12E" : "MII",
"140" : "WinChip",
"15E" : "DSP",
"1F4" : "Video Processor",
"FFFE" : "Reserved (For Future Special Purpose Assignment)",
"FFFF" : "Reserved (Un-initialized Flash Content - Hi)",
"E5" : "AMD AMD Sempron(TM) II Processor",
"66" : "AMD Athlon(TM) X4 Quad-Core Processor Family",
"3F" : "AMD FX(TM) Series Processor",
"4F" : "AMD FirePro(TM) Series Processor",
"E4" : "AMD Opteron(TM) 3000 Series Processor",
"4E" : "AMD Opteron(TM) 3300 Series Processor",
"4C" : "AMD Opteron(TM) 4300 Series Processor",
"4D" : "AMD Opteron(TM) 6300 Series Processor",
"69" : "AMD Opteron(TM) A-Series Processor",
"67" : "AMD Opteron(TM) X1000 Series Processor",
"68" : "AMD Opteron(TM) X2000 Series APU",
"6A" : "AMD Opteron(TM) X3000 Series APU",
"4B" : "AMD R-Series Processor",
"4A" : "AMD Z-Series Processor",
"6B" : "AMD Zen Processor Family",
"2C" : "Intel(R) Core(TM) M processor",
"2D" : "Intel(R) Core(TM) m3 processor",
"2E" : "Intel(R) Core(TM) m5 processor",
"2F" : "Intel(R) Core(TM) m7 processor"
}
},
"HyperThreadingCapable" : {
'Lookup' : 'True',
'Values' : {
"0" : "No",
"1" : "Yes"
}
},
"VirtualizationTechnologyCapable": {
'Lookup' : 'True',
'Values' : {
"0" : "No",
"1" : "Yes"
}
},
"TurboModeCapable": {
'Lookup' : 'True',
'Values' : {
"0" : "No",
"1" : "Yes"
}
},
"HyperThreadingEnabled": {
'Lookup' : 'True',
'Values' : {
"0" : "No",
"1" : "Yes"
}
},
"TurboModeEnabled": {
'Lookup' : 'True',
'Values' : {
"0" : "No",
"1" : "Yes"
}
},
"VirtualizationTechnologyEnabled": {
'Lookup' : 'True',
'Values' : {
"0" : "No",
"1" : "Yes"
}
},
"ExecuteDisabledEnabled": {
'Lookup' : 'True',
'Values' : {
"0" : "No",
"1" : "Yes",
"2" : "Not Applicable"
}
},
"ExecuteDisabledCapable": {
'Lookup' : 'True',
'Values' : {
"0" : "No",
"1" : "Yes"
}
},
"MaxClockSpeed" : { 'Type' : 'ClockSpeed', 'InUnits' : "MHz" },
"CurrentClockSpeed" : { 'Type' : 'ClockSpeed', 'InUnits' : "MHz", 'OutUnits' : 'GHz' },
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical",
"0x8000" : "Unknown",
"0xFFFF" : "Unknown"
}
}
},
iDRACMiscEnum.NICStatistics : {
"LinkStatus" : {
'Lookup' : 'True',
'Values' : {
'0' : "Unknown",
'1' : "Up",
'3' : "Down"
}
}
},
iDRACCompEnum.PhysicalDisk : {
"SizeInBytes" : { 'Rename' : 'Size', 'Type' : 'Bytes' , 'InUnits' : 'B', 'Metrics' : 'GB' },
"UsedSizeInBytes" : { 'Rename' : 'UsedSize', 'Type' : 'Bytes' , 'InUnits' : 'B', 'Metrics' : 'GB' },
"FreeSizeInBytes" : { 'Rename' : 'FreeSize', 'Type' : 'Bytes' , 'InUnits' : 'B', 'Metrics' : 'GB' },
"BlockSizeInBytes": { 'Rename' : 'BlockSize', 'Type' : 'Bytes' , 'InUnits' : 'B', 'Metrics' : 'B' },
"RemainingRatedWriteEndurance": {
'Lookup' : 'True',
'Values' : {
'255' : "Not Available"
}
},
"MediaType" : {
'Lookup' : 'True',
'Values' : {
'0' : "HDD",
'1' : "SSD"
}
},
"BusProtocol" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "SCSI",
"2" : "PATA",
"3" : "FIBRE",
"4" : "USB",
"5" : "SATA",
"6" : "SAS",
"7" : "PCIe",
"8" : "NVME"
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical",
"0x8000" : "Unknown",
"0xFFFF" : "Unknown"
}
},
"RaidStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Ready",
"2" : "Online",
"3" : "Foreign",
"4" : "Offline",
"5" : "Blocked",
"6" : "Failed",
"7" : "Degraded",
"8" : "Non-RAID",
"9" : "Missing"
}
},
"PredictiveFailureState" : {
'Lookup' : 'True',
'Values' : {
"0" : "Healthy",
"1" : "Warning"
}
},
"FailurePredicted": {
'Rename' : 'PredictiveFailureState',
'Lookup' : 'True',
'Values' : {
"YES" : "Warning",
"NO" : "Healthy"
}
},
"MaxCapableSpeed" : {
'Lookup': 'True',
'Values': {
"0":"Unknown", "1": "1.5 Gbps", "2": "3 Gbps", "3": "6 Gbps","4": "12 Gbps"
}
},
"T10PICapability": {
'Lookup': 'True',
'Values': {
"0": "T10 PI not supported", "1": "T10 PI supported"
}
},
},
iDRACCompEnum.PCIeSSDDisk : {
"SizeInBytes" : { 'Rename' : 'Size', 'Type' : 'Bytes' , 'InUnits' : 'B', 'Metrics' : 'GB' },
"UsedSizeInBytes" : { 'Rename' : 'UsedSize', 'Type' : 'Bytes' , 'InUnits' : 'B', 'Metrics' : 'GB' },
"FreeSizeInBytes" : { 'Rename' : 'FreeSize', 'Type' : 'Bytes' , 'InUnits' : 'B', 'Metrics' : 'GB' },
"BlockSizeInBytes": { 'Rename' : 'BlockSize', 'Type' : 'Bytes' , 'InUnits' : 'B', 'Metrics' : 'B' },
"RemainingRatedWriteEndurance": {
'Lookup' : 'True',
'Values' : {
'255' : "Unknown"
}
},
"MediaType" : {
'Lookup' : 'True',
'Values' : {
'0' : "HDD",
'1' : "SSD"
}
},
"BusProtocol" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "SCSI",
"2" : "PATA",
"3" : "FIBRE",
"4" : "USB",
"5" : "SATA",
"6" : "SAS",
"7" : "PCIe",
"8" : "NVME"
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical",
"0x8000" : "Unknown",
"0xFFFF" : "Unknown"
}
},
"RaidStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Ready",
"2" : "Online",
"3" : "Foreign",
"4" : "Offline",
"5" : "Blocked",
"6" : "Failed",
"7" : "Degraded",
"8" : "Non-RAID",
"9" : "Missing"
}
},
"FailurePredicted": {
'Rename' : 'PredictiveFailureState',
'Lookup' : 'True',
'Values' : {
"YES" : "Warning",
"NO" : "Healthy"
}
},
"DriveFormFactor" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "1.8 inch",
"2" : "2.5 inch",
"3" : "3.5 inch",
"4" : "2.5 inch Add-in card"
}
}
},
iDRACCompEnum.PCIeSSDExtender : {
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical",
"0x8000" : "Unknown",
"0xFFFF" : "Unknown"
}
}
},
iDRACCompEnum.PCIeSSDBackPlane : {
"RollupStatus" : {'Rename' : 'PrimaryStatus',
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical",
"0x8000" : "Unknown",
"0xFFFF" : "Unknown"
}
}
},
iDRACCompEnum.System: {
"SysMemMaxCapacitySize" : { 'Type' : 'Bytes' , 'InUnits' : 'MB', 'OutUnits' : 'TB' },
"SysMemTotalSize" : { 'Type' : 'Bytes' , 'InUnits' : 'MB', 'OutUnits' : 'GB' },
"CurrentRollupStatus" : {
'Lookup' : 'True',
'Values' : {
'0' : 'Unknown',
'1' : 'Healthy',
'2' : 'Warning',
'3' : 'Critical'
}
},
"FanRollupStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical"
}
},
"RollupStatus" : {
'Rename': 'PrimaryStatus',
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical"
}
},
"PowerCapEnabledState": {
'Lookup': 'True',
'Values': {
"2": "Enabled",
"3": "Disabled"
}
},
"PowerState": {
'Lookup': 'True',
'Values': {
"2": "On",
"8": "Off - Soft"
}
}
},
iDRACCompEnum.VirtualDisk : {
"SizeInBytes" : { 'Rename' : 'Size', 'Type' : 'Bytes' , 'InUnits' : 'B' , 'Metrics' : 'GB'},
"BlockSizeInBytes": { 'Rename' : 'BlockSize', 'Type' : 'Bytes' , 'InUnits' : 'B', 'Metrics' : 'B' },
"RAIDTypes" : {
'Lookup' : 'True',
'Values' : {
'1' : 'No RAID',
'2' : 'RAID 0',
'4' : 'RAID 1',
'64' : 'RAID 5',
'128' : 'RAID 6',
'2048' : 'RAID 10',
'8192' : 'RAID 50',
'16384' : 'RAID 60'
}
},
"RAIDStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Ready",
"2" : "Online",
"3" : "Foreign",
"4" : "Offline",
"5" : "Blocked",
"6" : "Failed",
"7" : "Degraded"
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical"
}
},
"StripeSize" : {
'Lookup' : 'True',
'Values' : {
"0" : "Default",
"1" : "512",
"2" : "1024",
"4" : "2048",
"8" : "4096",
"16" : "8192",
"32" : "16384",
"64" : "32768",
"128" : "65536",
"256" : "131072",
"512" : "262144",
"1024" : "524288",
"2048" : "1048576",
"4096" : "2097152",
"8192" : "4194304",
"16384" :"8388608",
"32768" : "16777216"
}
},
"EnabledState" : { 'Rename' : 'State',
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Other",
"2" : "Enabled",
"3" : "Disabled",
"4" : "Shutting Down",
"5" : "Not Applicable",
"6" : "Enabled but Offline",
"7" : "In Test",
"8" : "Deferred",
"9" : "Quiesce",
"10" : "Starting"
}
}
},
iDRACCompEnum.VFlash : {
"Capacity" : { 'Type' : 'Bytes', 'InUnits' : 'MB' },
"AvailableSize" : { 'Type' : 'Bytes', 'InUnits' : 'MB' },
"HealthStatus" : { 'Rename' : 'PrimaryStatus',
'Lookup' : 'True',
'Values' : {
"OK" : "Healthy",
"Error" : "Critical",
"Critical" : "Critical"
}
}
},
iDRACSensorEnum.ServerSensor : {
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical"
}
},
"ElementName": { 'Rename' : 'Location'},
"EnabledState" : { 'Rename' : 'State',
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Other",
"2" : "Enabled",
"3" : "Disabled",
"4" : "Shutting Down",
"5" : "Not Applicable",
"6" : "Enabled but Offline",
"7" : "In Test",
"8" : "Deferred",
"9" : "Quiesce",
"10" : "Starting"
}
}
},
iDRACSensorEnum.NumericSensor : {
"CurrentReading" : {'UnitModify': 'UnitModifier',
'UnitName' : 'BaseUnits',
'BaseUnits' : {
'6' : None, #'Amps',
'7' : None, #'Watts',
'2' : None, #'Degrees C',
'5' : None, #'Volts',
'19' : None, #'RPM',
'65' : None, #'Percentage'
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical"
}
},
"ElementName": { 'Rename' : 'Location'},
"EnabledState" : { 'Rename' : 'State',
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Other",
"2" : "Enabled",
"3" : "Disabled",
"4" : "Shutting Down",
"5" : "Not Applicable",
"6" : "Enabled but Offline",
"7" : "In Test",
"8" : "Deferred",
"9" : "Quiesce",
"10" : "Starting"
}
}
},
iDRACSensorEnum.PSNumericSensor : {
"CurrentReading" : {'UnitModify': 'UnitModifier',
'UnitName' : 'BaseUnits',
'BaseUnits' : {
'6' : None, #'Amps',
'7' : None, #'Watts',
'2' : None, # 'Degrees C',
'5' : None, # 'Volts',
'19' : None, # 'RPM',
'65' : None, #'Percentage'
}
},
"ElementName": { 'Rename' : 'Location'},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical"
}
},
"EnabledState" : { 'Rename' : 'State',
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Other",
"2" : "Enabled",
"3" : "Disabled",
"4" : "Shutting Down",
"5" : "Not Applicable",
"6" : "Enabled but Offline",
"7" : "In Test",
"8" : "Deferred",
"9" : "Quiesce",
"10" : "Starting"
}
}
},
iDRACMiscEnum.iDRACEnumeration : {
"InstanceID" : {
'Lookup' : 'True',
'Values' : {
"iDRAC.Embedded.1#GroupManager.1#Status" : 'GroupStatus',
"iDRAC.Embedded.1#NIC.1#Duplex" : 'NICDuplex',
"iDRAC.Embedded.1#NIC.1#Speed": 'NICSpeed',
"iDRAC.Embedded.1#Lockdown.1#SystemLockdown" : 'SystemLockDown',
"iDRAC.Embedded.1#NIC.1#Enable" : 'NICEnabled'
}
}
},
iDRACMiscEnum.iDRACString : {
"InstanceID" : {
'Lookup' : 'True',
'Values' : {
"iDRAC.Embedded.1#IPv4.1#Address" : 'IPv4Address',
"iDRAC.Embedded.1#Info.1#Product" : 'ProductInfo',
"iDRAC.Embedded.1#CurrentNIC.1#MACAddress" : 'MACAddress',
"iDRAC.Embedded.1#CurrentIPv6.1#Address1" : 'IPv6Address',
"iDRAC.Embedded.1#GroupManager.1#GroupName" : 'GroupName',
"iDRAC.Embedded.1#NIC.1#SwitchConnection" : 'SwitchConnection',
"iDRAC.Embedded.1#NIC.1#SwitchPortConnection" : 'SwitchPortConnection'
}
}
},
iDRACCompEnum.PowerSupply : {
"TotalOutputPower" : {'UnitScale': '0', 'UnitAppend' : 'W'},
"Range1MaxInputPower" : {'UnitScale': '0', 'UnitAppend' : 'W'},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical"
}
},
"RedundancyStatus" : { 'Rename' : 'Redundancy',
'Lookup' : 'True',
'Values' : {
"0" : "Unknown",
"1" : "DMTF Reserved",
"2" : "Fully Redundant",
"3" : "Degraded Redundancy",
"4" : "Redundancy Lost",
"5" : "Overall Failure"
}
}
},
iDRACMetricsEnum.BaseMetricValue : {
"InstanceID" : {
'Lookup' : 'True',
'Values' : {
"DCIM:System:Point:Energy:Cont" : 'EnergyConsumption',
"DCIM:System:Point:PowerHdrm:Cont" : 'PowerConsumption',
"DCIM:System:Point:InletTempWarnPerc:Cont" : 'InletTempWarnPerc',
"DCIM:System:Point:InletTempCriticalPerc:Cont" : 'InletTempCriticalPerc'
}
}
},
iDRACMetricsEnum.AggregationMetric: {
"InstanceID" : {
'Lookup' : 'True',
'Values' : {
"DCIM:SystemBoard:Min:CPUUsage:1H" : 'CPUUsageMin1H',
"DCIM:SystemBoard:Min:CPUUsage:1D" : 'CPUUsageMin1D',
"DCIM:SystemBoard:Min:CPUUsage:1W" : 'CPUUsageMin1W',
"DCIM:SystemBoard:Max:CPUUsage:1H" : 'CPUUsageMax1H',
"DCIM:SystemBoard:Max:CPUUsage:1D" : 'CPUUsageMax1D',
"DCIM:SystemBoard:Max:CPUUsage:1W" : 'CPUUsageMax1W',
"DCIM:SystemBoard:Avg:CPUUsage:1H" : 'CPUUsageAvg1H',
"DCIM:SystemBoard:Avg:CPUUsage:1D" : 'CPUUsageAvg1D',
"DCIM:SystemBoard:Avg:CPUUsage:1W" : 'CPUUsageAvg1W',
"DCIM:SystemBoard:Min:MemoryUsage:1H" : 'MemoryUsageMin1H',
"DCIM:SystemBoard:Min:MemoryUsage:1D" : 'MemoryUsageMin1D',
"DCIM:SystemBoard:Min:MemoryUsage:1W" : 'MemoryUsageMin1W',
"DCIM:SystemBoard:Max:MemoryUsage:1H" : 'MemoryUsageMax1H',
"DCIM:SystemBoard:Max:MemoryUsage:1D" : 'MemoryUsageMax1D',
"DCIM:SystemBoard:Max:MemoryUsage:1W" : 'MemoryUsageMax1W',
"DCIM:SystemBoard:Avg:MemoryUsage:1H" : 'MemoryUsageAvg1H',
"DCIM:SystemBoard:Avg:MemoryUsage:1D" : 'MemoryUsageAvg1D',
"DCIM:SystemBoard:Avg:MemoryUsage:1W" : 'MemoryUsageAvg1W',
"DCIM:SystemBoard:Min:IOUsage:1H" : 'IOUsageMin1H',
"DCIM:SystemBoard:Min:IOUsage:1D" : 'IOUsageMin1D',
"DCIM:SystemBoard:Min:IOUsage:1W" : 'IOUsageMin1W',
"DCIM:SystemBoard:Max:IOUsage:1H" : 'IOUsageMax1H',
"DCIM:SystemBoard:Max:IOUsage:1D" : 'IOUsageMax1D',
"DCIM:SystemBoard:Max:IOUsage:1W" : 'IOUsageMax1W',
"DCIM:SystemBoard:Avg:IOUsage:1H" : 'IOUsageAvg1H',
"DCIM:SystemBoard:Avg:IOUsage:1D" : 'IOUsageAvg1D',
"DCIM:SystemBoard:Avg:IOUsage:1W" : 'IOUsageAvg1W',
"DCIM:SystemBoard:Min:SYSUsage:1H" : 'SYSUsageMin1H',
"DCIM:SystemBoard:Min:SYSUsage:1D" : 'SYSUsageMin1D',
"DCIM:SystemBoard:Min:SYSUsage:1W" : 'SYSUsageMin1W',
"DCIM:SystemBoard:Max:SYSUsage:1H" : 'SYSUsageMax1H',
"DCIM:SystemBoard:Max:SYSUsage:1D" : 'SYSUsageMax1D',
"DCIM:SystemBoard:Max:SYSUsage:1W" : 'SYSUsageMax1W',
"DCIM:SystemBoard:Avg:SYSUsage:1H" : 'SYSUsageAvg1H',
"DCIM:SystemBoard:Avg:SYSUsage:1D" : 'SYSUsageAvg1D',
"DCIM:SystemBoard:Avg:SYSUsage:1W" : 'SYSUsageAvg1W',
"DCIM:System:Max:Current:Cont" : 'PeakAmperage',
"DCIM:System:Max:Power:Cont" : 'PeakPower',
"DCIM:System:Max:PowerHdrm:Cont" : 'PeakHeadroom',
"DCIM:SystemBoard:Peak:CPUUsage" : 'SYSPeakCPUUsage',
"DCIM:SystemBoard:Peak:IOUsage" : 'SYSPeakIOUsage',
"DCIM:SystemBoard:Peak:MemoryUsage" : 'SYSPeakMemoryUsage',
"DCIM:SystemBoard:Peak:SYSUsage" : 'SYSPeakSYSUsage'
}
}
},
iDRACCompEnum.License : {
"LicenseInstallDate" : {'DateTime' : None},
"LicenseSoldDate" : {'DateTime' : None},
"LicensePrimaryStatus": {
'Rename' : 'PrimaryStatus',
'Lookup': 'True',
'Values': {
"0": "Unknown",
"1": "Healthy",
"2": "Warning",
"3": "Critical"
}
},
"LicenseType": {
'Lookup': 'True',
'Values': {
"1": "Perpetual",
"2": "Leased",
"3": "Evaluation",
"4": "Site"
}
}
},
iDRACCompEnum.EnclosureFanSensor: {
"PrimaryStatus": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"1": "Healthy",
"2": "Warning",
"3": "Critical"
}
}
},
iDRACCompEnum.EnclosureTempSensor: {
"PrimaryStatus": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"1": "Healthy",
"2": "Warning",
"3": "Critical"
}
}
},
iDRACCompEnum.HostNIC: {
"Status": {
'Rename': 'PrimaryStatus',
'Lookup': 'True',
'Values': {
"0": "Healthy",
"1": "Critical",
"2": "Warning",
"3": "Warning",
"4": "Warning",
"5": "Warning",
"6": "Critical"
}
}
},
iDRACCompEnum.PresenceAndStatusSensor: {
"CurrentState":{
'Rename': 'PrimaryStatus',
'Lookup': 'True',
'Values': {
"OK": "Healthy",
"Critical": "Critical"
}
}
},
iDRACMiscEnum.NICCapabilities: {
"FCoEBootSupport": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"2": "Supported",
"3": "Not Supported"
}
},
"PXEBootSupport": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"2": "Supported",
"3": "Not Supported"
}
},
"iSCSIBootSupport": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"2": "Supported",
"3": "Not Supported"
}
},
"WOLSupport": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"2": "Supported",
"3": "Not Supported"
}
},
"FlexAddressingSupport": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"2": "Supported",
"3": "Not Supported"
}
},
"VFSRIOVSupport": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"2": "Supported",
"3": "Not Supported"
}
},
"iSCSIOffloadSupport": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"2": "Supported",
"3": "Not Supported"
}
},
"FCoEOffloadSupport": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"2": "Supported",
"3": "Not Supported"
}
},
"NicPartitioningSupport": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"2": "Supported",
"3": "Not Supported"
}
},
"TCPChimneySupport": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"2": "Supported",
"3": "Not Supported"
}
},
"DCBExchangeProtocol": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"2": "Supported",
"3": "Not Supported"
}
}
},
iDRACCompEnum.NIC: {
"FCoEOffloadMode": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"2": "Enabled",
"3": "Disabled"
}
},
"iScsiOffloadMode": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"2": "Enabled",
"3": "Disabled"
}
},
"AutoNegotiation": {
'Lookup': 'True',
'Values': {
"0": "Unknown",
"2": "Enabled",
"3": "Disabled"
}
}
},
iDRACCompEnum.ControllerBattery: {
"RAIDState": {
'Lookup': 'True',
'Values': {
"0" : "Unknown",
"1" : "Ready",
"6" : "Failed",
"7" : "Degraded",
"9" : "Missing",
"10" : "Charging",
"12" : "Below Threshold"
}
}
},
iDRACCompEnum.Enclosure: {
"PrimaryStatus": {
'Lookup' : 'True',
'Values' : {
"0" : "Warning",
"1" : "Healthy",
"2" : "Warning",
"3" : "Critical"
}
}
}
}
iDRACClassifier = [ iDRACCompEnum.System ]
iDRACRedfishViews = {
iDRACCompEnum.System: ["Systems","Members"],
iDRACCompEnum.NIC : ["Systems","Members","EthernetInterfaces","Members"],
iDRACCompEnum.CPU : ["Systems","Members","Processors","Members"],
iDRACCompEnum.Sensors_Fan : ["Systems","Members","Links","CooledBy"],
iDRACCompEnum.PowerSupply : ["Systems","Members","Links","PoweredBy"],
iDRACCompEnum.Sensors_Voltage : ["Chassis","Members","Power","Voltages"],
iDRACCompEnum.Sensors_Temperature : ["Chassis","Members","Thermal","Temperatures"],
iDRACCompEnum.Controller : ["Systems","Members","SimpleStorage","Members"],
iDRACCompEnum.iDRAC : ["Managers", "Members"],
iDRACMiscEnum.ChassisRF : ["Chassis","Members"],
iDRACMiscEnum.DellAttributes : ["Managers", "Members", "Links", "Oem", "Dell", "DellAttributes"]
}
iDRACRedfishViews_FieldSpec = {
iDRACCompEnum.System : {
"MemorySummary" : {'Create' : {
'SysMemTotalSize' : {'_Attribute' : 'TotalSystemMemoryGiB'},
'SysMemPrimaryStatus' :{ '_Attribute' : {'Status':'Health'}}
}
},
"ProcessorSummary" : {'Create' : {
'CPURollupStatus' : {'_Attribute' : {'Status':'Health'}}
}
},
"Status" : {'Create' : { 'PrimaryStatus' : {'_Attribute' : 'Health'}}},
"SKU" : { 'Rename' : 'ServiceTag'},
"BiosVersion" : { 'Rename' : 'BIOSVersionString'},
"PartNumber" : { 'Rename' : 'BoardPartNumber'},
"SerialNumber" : { 'Rename' : 'BoardSerialNumber'}
},
iDRACCompEnum.NIC : {
"Id" : { 'Rename' : 'FQDD'},
"SpeedMbps" : {'Rename' : 'LinkSpeed', 'UnitScale': '0', 'UnitAppend' : 'Mbps'},
# "Name" : {'Rename' : 'ProductName'},
"AutoNeg" : {'Rename' : 'AutoNegotiation'},
"MACAddress" : {'Rename' : 'CurrentMACAddress'},
"Status" : {'Create' : {
'PrimaryStatus' : {'_Attribute' : 'Health'},
'LinkStatus' : {'_Attribute' : 'State',
'_Mapping' : {'Enabled' : 'Up',
'Disabled' : 'Down',
'StandbyOffline': 'Down',
'StandbySpare' : 'Down',
'InTest' : 'Down',
'Starting' : 'Down',
'Absent' : 'Down',
'UnavailableOffline' : 'Down',
'Deferring' : 'Down',
'Quiesced' : 'Down',
'Updating' : 'Down'}
}
}
},
"Description" : {'Rename' : 'DeviceDescription'}
},
iDRACCompEnum.CPU : {
"Id" : { 'Rename' : 'FQDD'},
"ProcessorId" : {'Create' : {
'VendorId' : {'_Attribute' : 'VendorID'},
'CPUFamily': {'_Attribute' : 'EffectiveFamily'}
}
},
"Status" : {'Create' : {'PrimaryStatus' : {'_Attribute' : 'Health'}}},
"TotalCores" : { 'Rename' : 'NumberOfProcessorCores'},
"TotalThreads" : { 'Rename' : 'NumberOfEnabledThreads'},
"MaxSpeedMHz" : {'Rename' : 'MaxClockSpeed'},
"Name" : {'Rename' : 'DeviceDescription'}
},
iDRACCompEnum.Sensors_Fan : {
"MemberID" : { 'Rename' : 'Key'},
"MemberId" : { 'Rename' : 'Key'},
"FanName" : { 'Rename' : 'Location'},
"Status" : {'Create' : {'PrimaryStatus' : {'_Attribute' : 'Health'},
'State' : {'_Attribute' : 'State'}}
},
"Reading" : { 'Rename' : 'CurrentReading'}
},
iDRACCompEnum.Sensors_Voltage : {
"MemberID" : { 'Rename' : 'Key'},
"MemberId" : { 'Rename' : 'Key'},
"Name" : { 'Rename' : 'Location'},
"Status" : {'Create' : {'PrimaryStatus' : {'_Attribute' : 'Health'},
'State' : {'_Attribute' : 'State'}}
},
"ReadingVolts" : { 'Rename' : 'Reading(V)'}
},
iDRACCompEnum.Sensors_Temperature : {
"MemberID" : { 'Rename' : 'Key'},
"MemberId" : { 'Rename' : 'Key'},
"Name" : { 'Rename' : 'Location'},
"Status" : {'Create' : {'PrimaryStatus' : {'_Attribute' : 'Health'},
'State' : {'_Attribute' : 'State'}}
},
"ReadingCelsius" : { 'Rename' : 'CurrentReading(Degree Celsius)'}
},
iDRACCompEnum.PowerSupply : {
"MemberID" : { 'Rename' : 'FQDD'},
"LastPowerOutputWatts" : {'Rename' : 'TotalOutputPower'},
"LineInputVoltage" : { 'Rename' : 'InputVoltage'},
"Status" : {'Create' : {'PrimaryStatus' : {'_Attribute' : 'Health'}}
},
"Redundancy" : { 'Rename' : 'RedfishRedundancy'}#This is to not show redundancy
},
iDRACCompEnum.Controller : {
"Id" : { 'Rename' : 'FQDD'},
"Name" : {'Rename' : 'ProductName'},
"Status" : {'Create' : {'PrimaryStatus' : {'_Attribute' : 'Health'}}
}
},
iDRACMiscEnum.DellAttributes : {
"Attributes" : {'Create' : {'GroupName' : {'_Attribute' : 'GroupManager.1.GroupName'},
'GroupStatus' : {'_Attribute' : 'GroupManager.1.Status'},
'OSName' : {'_Attribute' : 'ServerOS.1.OSName'},
'OSVersion' : {'_Attribute' : 'ServerOS.1.OSVersion'},
'SystemLockDown' : {'_Attribute' : 'Lockdown.1.SystemLockdown'},
'LifecycleControllerVersion' : {'_Attribute' : 'Info.1.Version'},
'IPv4Address' : {'_Attribute' : 'CurrentIPv4.1.Address'},
'ProductInfo' : {'_Attribute' : 'Info.1.Product'},
'MACAddress' : {'_Attribute' : 'CurrentNIC.1.MACAddress'},
'NICDuplex' : {'_Attribute' : 'NIC.1.Duplex'},
'NICSpeed' : {'_Attribute' : 'NIC.1.Speed'},
'DNSDomainName' : {'_Attribute' : 'NIC.1.DNSDomainName'},
'DNSRacName' : {'_Attribute' : 'NIC.1.DNSRacName'},
'IPv6Address' : {'_Attribute' : 'IPv6.1.Address1'},
'PermanentMACAddress': {'_Attribute' : 'NIC.1.MACAddress'},
'VirtualAddressManagementApplication' : {'_Attribute' : 'LCAttributes.1.VirtualAddressManagementApplication'},
'ChassisServiceTag' : {'_Attribute' : 'ChassisInfo.1.ChassisServiceTag'}}
}
},
iDRACMiscEnum.ChassisRF : {
"Location" : {'Create' : {'ChassisLocation' : {'_Attribute' : 'Info'}}},
'SKU' : {'Rename' : 'ChassisServiceTag'},
'Model' : {'Rename' : 'ChassisModel'},
'Name' : {'Rename' : 'ChassisName'},
'PhysicalSecurity' : {'Create' : {'IntrusionRollupStatus' : {'_Attribute' :'IntrusionSensor'}}}
}
# iDRACCompEnum.iDRAC : {
# "Links" : {'Create' : {'DellHealth' : {'Oem':{'Dell' : '@odata.type'}}}
# }
# }
}
def chk_classifier(myListoFDict, cls=None):
valid = False
flist = []
for sys in myListoFDict:
id = sys.get('Id', 'None')
if 'System.Embedded' in id:
flist.append(sys)
if flist:
valid = True
return (valid, flist)
classify_cond = {
iDRACCompEnum.System :
{
ProtocolEnum.REDFISH : chk_classifier
}
}
if PySnmpPresent:
iDRACSNMPViews = {
iDRACCompEnum.System : {
'SysObjectID' : ObjectIdentity('SNMPv2-MIB', 'sysObjectID'),
"ServiceTag" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.1.3.1'),
"NodeID" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.1.3.18'),
"Model" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.1.3.12'),
"SystemGeneration" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.1.1.7'),
"ChassisServiceTag" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.1.2.1'),
"PrimaryStatus" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.2.1'),
'ChassisModel' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.10.1.6"),
'StateSettings' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.10.1.3"),
'Manufacturer' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.10.1.8"),
'ChassisName' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.10.1.7"),
'parentIndexReference' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.10.1.5"),
'StateCapabilities' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.10.1.2"),
'Status' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.10.1.4"),
'HostName' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.10.1.15"),
"OSVersion" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.1.3.14'),
'ServiceTag' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.10.1.11"),
'SystemRevisionName' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.10.1.48"),
'SystemRevisionNumber' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.10.1.47"),
'ExpressServiceCodeName' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.10.1.49"),
'AssetTag' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.10.1.10"),
'SysMemPrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.200.10.1.27"),
"CPURollupStatus" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.200.10.1.50"),
"FanRollupStatus" : ObjectIdentity(".1.3.6.1.4.1.674.10892.5.4.200.10.1.21"),
"PSRollupStatus" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.200.10.1.9"),
"StorageRollupStatus" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.2.3"),
"VoltRollupStatus" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.200.10.1.12"),
"TempRollupStatus" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.200.10.1.24"),
"CurrentRollupStatus" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.200.10.1.15"),
"BatteryRollupStatus" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.200.10.1.52"),
"SDCardRollupStatus" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.200.10.1.56"),
"IDSDMRollupStatus" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.200.10.1.58"),
"ChassisIntrusion" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.200.10.1.30"),
"ChassisStatus" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.200.10.1.4"),
"CoolingUnit" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.200.10.1.44"),
"PowerUnit" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.200.10.1.42"),
"LifecycleControllerVersion" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.1.1.8"),
"OSName" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.1.3.6"),
"iDRACURL" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.1.1.6"),
# "RollupStatus" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.2.1"),
"DeviceType" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.1.1.2"),
"SysName" : ObjectIdentity("1.3.6.1.2.1.1.5")
},
iDRACCompEnum.CPU : {
'Index' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.2"),
'PrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.5"),
'Type' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.7"),
'Manufacturer' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.8"),
'CPUFamily' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.10"),
'MaxClockSpeed' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.11"),
'CurrentClockSpeed' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.12"),
'ExternalClockSpeed' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.13"),
'Voltage' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.14"),
'Version' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.16"),
"NumberOfProcessorCores" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.17"),
"CoreEnabledCount" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.18"),
"ThreadCount" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.19"),
"Characteristics" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.20"),
"ExtendedCapabilities" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.21"),
"ExtendedEnabled" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.22"),
'Model' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.23"),
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.26"),
'processorDeviceStateSettings' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.30.1.4"),
},
iDRACCompEnum.Memory : {
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.50.1.26"),
'PrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.50.1.5"),
'MemoryType' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.50.1.7"),
'LocationName' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.50.1.8"),
'BankLabel' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.50.1.10"),
'Size' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.50.1.14"),
'CurrentOperatingSpeed' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.50.1.15"),
'Manufacturer' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.50.1.21"),
'PartNumber' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.50.1.22"),
'SerialNumber' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.50.1.23"),
'StateCapabilities' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.50.1.3"),
'memoryDeviceStateSettings' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.50.1.4"),
# 'Rank' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.50.1.2"),
# The OID above corresponds to Index and Rank not in iDRAC MIB
},
iDRACCompEnum.NIC : {
'Index' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.90.1.2"),
'PrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.90.1.3"),
'LinkStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.90.1.4"),
'ProductName' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.90.1.6"),
'Vendor' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.90.1.7"),
'CurrentMACAddress' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.90.1.15"),
'PermanentMACAddress' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.90.1.16"),
'PCIBusNumber' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.90.1.17"),
'PCIDeviceNumber' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.90.1.18"),
'PCIFunctionNumber' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.90.1.19"),
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.90.1.30"),
"TOECapabilityFlags" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.90.1.23"),
"iSCSICapabilityFlags" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.90.1.27"),
"iSCSIEnabled" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.90.1.28"),
},
iDRACCompEnum.PCIDevice : {
'Index' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.80.1.2"),
'PrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.80.1.5"),
'DataBusWidth' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.80.1.7"),
'Manufacturer' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.80.1.8"),
'Description' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.80.1.9"),
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.1100.80.1.12"),
},
iDRACCompEnum.Sensors_Fan : {
'Index' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.700.12.1.2"),
'PrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.700.12.1.5"),
'coolingUnitIndexReference' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.700.12.1.15"),
'CurrentReading' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.700.12.1.6"),
'Type' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.700.12.1.7"),
'State' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.700.12.1.4"),
'Location' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.700.12.1.8"),
'SubType' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.700.12.1.16"),
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.700.12.1.19"),
},
iDRACCompEnum.PowerSupply : {
'Index' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.600.12.1.2"),
'PrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.600.12.1.5"),
"TotalOutputPower" : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.600.12.1.6"),
'Type' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.600.12.1.7"),
'Location' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.600.12.1.8"),
'InputVoltage' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.600.12.1.9"),
'Range1MaxInputPower' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.600.12.1.14"),
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.600.12.1.15"),
'IndexReference' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.600.12.1.10"),
'powerSupplyStateCapabilitiesUnique' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.600.12.1.3"),
'PowerSupplySensorState' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.600.12.1.11"),
},
iDRACCompEnum.Enclosure : {
'Number' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.1"),
'ProductName' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.2"),
'ServiceTag' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.8"),
'AssetTag' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.9"),
'State' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.4"),
'PrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.24"),
'Version' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.26"),
'SASAddress' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.30"),
'DriveCount' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.31"),
'TotalSlots' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.32"),
'FanCount' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.40"),
'PSUCount' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.41"),
'EMMCount' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.42"),
'TempProbeCount' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.43"),
'Position' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.45"),
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.47"),
'DeviceDescription' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.48"),
'RollUpStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.3.1.23"),
},
iDRACCompEnum.EnclosureEMM : {
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.13.1.15"),
'Name' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.13.1.2"),
'Number' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.13.1.1"),
'State' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.13.1.4"),
'PartNumber' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.13.1.6"),
'FWVersion' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.13.1.8"),
'DeviceDescription' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.13.1.16"),
'PrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.13.1.11"),
},
"EnclosureFan" : {
'Number' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.7.1.1"),
'Name' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.7.1.2"),
'State' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.7.1.4"),
'CurrentSpeed' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.7.1.11"),
'PartNumber' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.7.1.7"),
'PrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.7.1.15"),
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.7.1.20"),
'DeviceDescription' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.7.1.21"),
},
iDRACCompEnum.EnclosurePSU : {
'Name' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.9.1.2"),
'Number' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.9.1.1"),
'State' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.9.1.4"),
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.9.1.15"),
'PartNumber' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.9.1.7"),
'PrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.9.1.9"),
'DeviceDescription' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.9.1.16"),
},
iDRACCompEnum.ControllerBattery : {
'Number' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.15.1.1"),
'State' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.15.1.4"),
'PrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.15.1.6"),
'PredictedCapacity' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.15.1.10"),
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.15.1.20"),
'DeviceDescription' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.15.1.21"),
},
iDRACCompEnum.Controller : {
'ProductName' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.2"),
'ControllerFirmwareVersion' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.8"),
'CacheSize' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.9"),
'RollupStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.37"),
'PrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.38"),
'DriverVersion' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.41"),
'PCISlot' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.42"),
'HotspareStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.59"),
'CopyBackMode' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.71"),
'SecurityStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.72"),
'EncryptionCapability' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.74"),
'LoadBalancingMode' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.75"),
'MaxSpeed' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.76"),
'SASAddress' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.77"),
'Number' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.1"),
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.78"),
'DeviceDescription' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.79"),
'T10PICapability': ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.80"),
'SupportRAID10UnevenSpans': ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.81"),
'SupportEnhancedAutoForeignImport': ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.82"),
'SupportControllerBootMode': ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.1.1.83"),
},
iDRACCompEnum.VirtualDisk : {
'Number' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.1"),
'Name' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.2"),
'RAIDStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.4"),
'Size' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.6"),
'WriteCachePolicy' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.10"),
'ReadCachePolicy' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.11"),
'RAIDTypes' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.13"),
'StripeSize' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.14"),
'PrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.20"),
'Secured' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.24"),
'IsCacheCade' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.25"),
'DiskCachePolicy' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.26"),
'MediaType' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.33"),
'RemainingRedundancy' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.34"),
'OperationalState' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.30"),
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.35"),
'DeviceDescription' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.36"),
},
iDRACCompEnum.PhysicalDisk : {
'Number' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.1"),
'Name' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.2"),
'RaidStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.4"),
'Model' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.6"),
'SerialNumber' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.7"),
'Revision' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.8"),
'Size' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.11"),
'UsedSize' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.17"),
'FreeSize' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.19"),
'BusProtocol' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.21"),
'HotSpareStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.22"),
'PrimaryStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.24"),
'PPID' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.27"),
'SASAddress' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.28"),
'RAIDNegotiatedSpeed' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.29"),
'PredictiveFailureState' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.31"),
'MaxCapableSpeed' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.30"),
'MediaType' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.35"),
'PowerState' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.42"),
'DriveFormFactor' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.53"),
'Manufacturer' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.3"),
'ManufacturingDay' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.32"),
'ManufacturingWeek' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.33"),
'ManufacturingYear' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.34"),
'OperationalState' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.50"),
'SecurityState' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.52"),
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.54"),
'DeviceDescription' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.55"),
'T10PICapability': ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.57"),
'BlockSize': ObjectIdentity("1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1.58"),
},
"FRU" : {
'FQDD' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.2000.10.1.12"),
'ChassisIndex' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.2000.10.1.1"),
'SerialNumberName' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.2000.10.1.7"),
'RevisionName' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.2000.10.1.9"),
'InformationStatus' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.2000.10.1.3"),
'ManufacturerName' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.2000.10.1.6"),
'PartNumberName' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.2000.10.1.8"),
'Index' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.2000.10.1.2"),
},
# "systemBattery" : {
# "ChassisIndex" : ObjectIdentity('IDRAC-MIB-SMIv2', 'systemBatteryChassisIndex'),
# "Index" : ObjectIdentity('IDRAC-MIB-SMIv2', 'systemBatteryIndex'),
# "Status" : ObjectIdentity('IDRAC-MIB-SMIv2', 'systemBatteryStatus'),
# "Reading" : ObjectIdentity('IDRAC-MIB-SMIv2', 'systemBatteryReading'),
# "LocationName" : ObjectIdentity('IDRAC-MIB-SMIv2', 'systemBatteryLocationName'),
# },
"firmware" : {
'Status' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.60.1.5"),
'VersionName' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.60.1.11"),
'StateSettings' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.60.1.4"),
'Type' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.60.1.7"),
'Size' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.60.1.6"),
'chassisIndex' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.60.1.1"),
'TypeName' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.60.1.8"),
'StateCapabilities' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.60.1.3"),
'Index' : ObjectIdentity("1.3.6.1.4.1.674.10892.5.4.300.60.1.2"),
},
"SystemBIOS" : {
"chassisIndex" : ObjectIdentity('IDRAC-MIB-SMIv2', 'systemBIOSchassisIndex'),
"Index" : ObjectIdentity('IDRAC-MIB-SMIv2', 'systemBIOSIndex'),
# "Status" : ObjectIdentity('IDRAC-MIB-SMIv2', 'systemBIOSStatus'),
# "ReleaseDateName" : ObjectIdentity('IDRAC-MIB-SMIv2', 'systemBIOSReleaseDateName'),
"VersionName" : ObjectIdentity('IDRAC-MIB-SMIv2', 'systemBIOSVersionName'),
"ManufacturerName" : ObjectIdentity('IDRAC-MIB-SMIv2', 'systemBIOSManufacturerName'),
},
iDRACCompEnum.Sensors_Amperage : {
"Location" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.30.1.8'),
"PrimaryStatus" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.30.1.5'),
"State" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.30.1.4'),
"ProbeReading" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.30.1.6'),
"ProbeType" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.30.1.7'),
"CurrentReading" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.30.1.16'),
},
iDRACCompEnum.Sensors_Battery : {
"State" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.50.1.4'),
"PrimaryStatus" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.50.1.5'),
"CurrentReading" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.50.1.6'),
"Location" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.50.1.7'),
},
iDRACCompEnum.Sensors_Intrusion : {
"State" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.300.70.1.4'),
"Type" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.300.70.1.7'),
"PrimaryStatus" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.300.70.1.5'),
"CurrentReading" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.300.70.1.6'),
"Location" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.300.70.1.8'),
},
iDRACCompEnum.Sensors_Voltage : {
"State" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.20.1.4'),
"CurrentReading" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.20.1.16'),
"PrimaryStatus" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.20.1.5'),
"Reading(V)" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.20.1.6'),
"Location" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.600.20.1.8'),
},
iDRACCompEnum.Sensors_Temperature : {
"State" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.700.20.1.4'),
"CurrentReading" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.700.20.1.16'),
"PrimaryStatus" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.700.20.1.5'),
"CurrentReading(Degree Celsius)" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.700.20.1.6'),
"Location" : ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.700.20.1.8'),
"SensorType": ObjectIdentity('1.3.6.1.4.1.674.10892.5.4.700.20.1.7.1'),
},
}
iDRACSNMPViews_FieldSpec = {
iDRACCompEnum.Memory : {
"Size" : { 'Type' : 'Bytes', 'InUnits' : "KB" },
"CurrentOperatingSpeed" : { 'Type' : 'ClockSpeed', 'InUnits' : "MHz", 'OutUnits' : 'MHz' },
"memoryDeviceStateSettings" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Enabled",
"4" : "Not Ready",
"6" : "Enabled Not Ready"
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
},
iDRACCompEnum.Controller : {
"CacheSize" : { 'Type' : 'Bytes', 'InUnits' : 'MB', 'OutUnits' : 'GB' },
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"RollupStatus": {
'Lookup': 'True',
'Values': {
"1": "Unknown",
"2": "Unknown",
"3": "Healthy",
"4": "Warning",
"5": "Critical",
"6": "Critical"
}
},
"T10PICapability": {
'Lookup': 'True',
'Values': {
"1": "Other", "2": "Capable", "3": "Not Capable"
}
},
"EncryptionCapability": {
'Lookup': 'True',
'Values': {
"1": "Other",
"2": "None",
"3": "LKM"
}
},
"SecurityStatus": {
'Lookup': 'True',
'Values': {
"1": "Unknown",
"2": "None",
"3": "LKM"
}
},
"SupportEnhancedAutoForeignImport": {
'Lookup': 'True',
'Values': {
"1": "Other",
"2": "Not Supported",
"3": "Disabled",
"4": "Enabled"
}
},
"SupportControllerBootMode": {
'Lookup': 'True',
'Values': {
"0": "Not Supported",
"1": "Supported"
}
},
"SupportRAID10UnevenSpans": {
'Lookup': 'True',
'Values': {
"0": "Uneven span for RAID10 not supported",
"1": "Uneven span for RAID10 supported"
}
}
},
iDRACCompEnum.Enclosure: {
"State": {
'Lookup' : 'True',
'Values' : {
"1": "Unknown",
"2": "Ready",
"3": "Failed",
"4": "Missing",
"5": "Degraded"
}
},
"PrimaryStatus": {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
}
},
iDRACCompEnum.Sensors_Fan: {
"State" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Enabled",
"4" : "Not Ready",
"6" : "Enabled Not Ready"
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical",
"7" : "Warning",
"8" : "Critical",
"9" : "Critical",
"10" : "Critical"
}
},
"Type" : {
'Lookup' : 'True',
'Values' : {
"1" : "coolingDeviceTypeIsOther",
"2" : "coolingDeviceTypeIsUnknown",
"3" : "coolingDeviceTypeIsAFan",
"4" : "coolingDeviceTypeIsABlower",
"5" : "coolingDeviceTypeIsAChipFan",
"6" : "coolingDeviceTypeIsACabinetFan",
"7" : "coolingDeviceTypeIsAPowerSupplyFan",
"8" : "coolingDeviceTypeIsAHeatPipe",
"9" : "coolingDeviceTypeIsRefrigeration",
"10" : "coolingDeviceTypeIsActiveCooling",
"11" : "coolingDeviceTypeIsPassiveCooling"
}
},
"SubType" : {
'Lookup' : 'True',
'Values' : {
"1" : "coolingDeviceSubTypeIsOther",
"2" : "coolingDeviceSubTypeIsUnknown",
"3" : "coolingDeviceSubTypeIsAFanThatReadsInRPM",
"4" : "coolingDeviceSubTypeIsAFanReadsONorOFF",
"5" : "coolingDeviceSubTypeIsAPowerSupplyFanThatReadsinRPM",
"6" : "coolingDeviceSubTypeIsAPowerSupplyFanThatReadsONorOFF",
"16" : "coolingDeviceSubTypeIsDiscrete"
}
},
},
iDRACCompEnum.CPU : {
"processorDeviceStateSettings" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Enabled",
"4" : "Not Ready",
"6" : "Enabled Not Ready"
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"CPUFamily" : {
'Lookup' : 'True',
'Values' : {
"1" : "Other",
"2" : "Unknown",
"3" : "8086",
"4" : "80286",
"5" : "Intel386 processor",
"6" : "Intel486 processor",
"7" : "8087",
"8" : "80287",
"9" : "80387",
"10" : "80487",
"11" : "Pentium processor Family",
"12" : "Pentium Pro processor",
"13" : "Pentium II processor",
"14" : "Pentium processor with MMX technology",
"15" : "Celeron processor",
"16" : "Pentium II Xeon processor",
"17" : "Pentium III processor",
"18" : "Pentium III Xeon processor",
"19" : "Pentium III Processor with Intel SpeedStep Technology",
"20" : "Itanium processor",
"21" : "Intel Xeon",
"22" : "Pentium 4 Processor",
"23" : "Intel Xeon processor MP",
"24" : "Intel Itanium 2 processor",
"25" : "K5 Family",
"26" : "K6 Family",
"27" : "K6-2",
"28" : "K6-3",
"29" : "AMD Athlon Processor Family",
"30" : "AMD2900 Family",
"31" : "K6-2+",
"32" : "Power PC Family",
"33" : "Power PC 601",
"34" : "Power PC 603",
"35" : "Power PC 603+",
"36" : "Power PC 604",
"37" : "Power PC 620",
"38" : "Power PC x704",
"39" : "Power PC 750",
"40" : "Intel(R) Core(TM) Duo processor",
"41" : "Intel(R) Core(TM) Duo mobile processor",
"42" : "Intel(R) Core(TM) Solo mobile processor",
"43" : "Intel(R) Atom(TM) processor",
"48" : "Alpha Family",
"49" : "Alpha 21064",
"50" : "Alpha 21066",
"51" : "Alpha 21164",
"52" : "Alpha 21164PC",
"53" : "Alpha 21164a",
"54" : "Alpha 21264",
"55" : "Alpha 21364",
"56" : "AMD Turion(TM) II Ultra Dual-Core Mobile M Processor Family",
"57" : "AMD Turion(TM) II Dual-Core Mobile M Processor Family",
"58" : "AMD Athlon(TM) II Dual-Core Mobile M Processor Family",
"59" : "AMD Opteron(TM) 6100 Series Processor",
"60" : "AMD Opteron(TM) 4100 Series Processor",
"61" : "AMD Opteron(TM) 6200 Series Processor",
"62" : "AMD Opteron(TM) 4200 Series Processor",
"64" : "MIPS Family",
"65" : "MIPS R4000",
"66" : "MIPS R4200",
"67" : "MIPS R4400",
"68" : "MIPS R4600",
"69" : "MIPS R10000",
"80" : "SPARC Family",
"81" : "SuperSPARC",
"82" : "microSPARC II",
"83" : "microSPARC IIep",
"84" : "UltraSPARC",
"85" : "UltraSPARC II",
"86" : "UltraSPARC IIi",
"87" : "UltraSPARC III",
"88" : "UltraSPARC IIIi",
"96" : "68040 Family",
"97" : "68xxx",
"98" : "68000",
"99" : "68010",
"100" : "68020",
"101" : "68030",
"112" : "Hobbit Family",
"120" : "Crusoe TM5000 Family",
"121" : "Crusoe TM3000 Family",
"122" : "Efficeon TM8000 Family",
"128" : "Weitek",
"130" : "Intel(R) Celeron(R) M processor",
"131" : "AMD Athlon 64 Processor Family",
"132" : "AMD Opteron Processor Family",
"133" : "AMD Sempron Processor Family",
"134" : "AMD Turion 64 Mobile Technology",
"135" : "Dual-Core AMD Opteron(TM) Processor Family",
"136" : "AMD Athlon 64 X2 Dual-Core Processor Family",
"137" : "AMD Turion(TM) 64 X2 Mobile Technology",
"138" : "Quad-Core AMD Opteron(TM) Processor Family",
"139" : "Third-Generation AMD Opteron(TM) Processor Family",
"140" : "AMD Phenom(TM) FX Quad-Core Processor Family",
"141" : "AMD Phenom(TM) X4 Quad-Core Processor Family",
"142" : "AMD Phenom(TM) X2 Dual-Core Processor Family",
"143" : "AMD Athlon(TM) X2 Dual-Core Processor Family",
"144" : "PA-RISC Family",
"145" : "PA-RISC 8500",
"146" : "PA-RISC 8000",
"147" : "PA-RISC 7300LC",
"148" : "PA-RISC 7200",
"149" : "PA-RISC 7100LC",
"150" : "PA-RISC 7100",
"160" : "V30 Family",
"161" : "Quad-Core Intel(R) Xeon(R) processor 3200 Series",
"162" : "Dual-Core Intel(R) Xeon(R) processor 3000 Series",
"163" : "Quad-Core Intel(R) Xeon(R) processor 5300 Series",
"164" : "Dual-Core Intel(R) Xeon(R) processor 5100 Series",
"165" : "Dual-Core Intel(R) Xeon(R) processor 5000 Series",
"166" : "Dual-Core Intel(R) Xeon(R) processor LV",
"167" : "Dual-Core Intel(R) Xeon(R) processor ULV",
"168" : "Dual-Core Intel(R) Xeon(R) processor 7100 Series",
"169" : "Quad-Core Intel(R) Xeon(R) processor 5400 Series",
"170" : "Quad-Core Intel(R) Xeon(R) processor",
"171" : "Dual-Core Intel(R) Xeon(R) processor 5200 Series",
"172" : "Dual-Core Intel(R) Xeon(R) processor 7200 Series",
"173" : "Quad-Core Intel(R) Xeon(R) processor 7300 Series",
"174" : "Quad-Core Intel(R) Xeon(R) processor 7400 Series",
"175" : "Multi-Core Intel(R) Xeon(R) processor 7400 Series",
"176" : "M1 Family",
"177" : "M2 Family",
"179" : "Intel(R) Pentium(R) 4 HT processor",
"180" : "AS400 Family",
"182" : "AMD Athlon XP Processor Family",
"183" : "AMD Athlon MP Processor Family",
"184" : "AMD Duron Processor Family",
"185" : "Intel Pentium M processor",
"186" : "Intel Celeron D processor",
"187" : "Intel Pentium D processor",
"188" : "Intel Pentium Processor Extreme Edition",
"189" : "Intel(R) Core(TM) Solo processor",
"190" : "Intel(R) Core(TM)2 processor",
"191" : "Intel(R) Core(TM)2 Duo processor",
"192" : "Intel(R) Core(TM)2 Solo processor",
"193" : "Intel(R) Core(TM)2 Extreme processor",
"194" : "Intel(R) Core(TM)2 Quad processor",
"195" : "Intel(R) Core(TM)2 Extreme mobile processor",
"196" : "Intel(R) Core(TM)2 Duo mobile processor",
"197" : "Intel(R) Core(TM)2 Solo mobile processor",
"198" : "Intel(R) Core(TM) i7 processor",
"199" : "Dual-Core Intel(R) Celeron(R) Processor",
"200" : "IBM390 Family",
"201" : "G4",
"202" : "G5",
"203" : "ESA/390 G6",
"204" : "z/Architectur base",
"205" : "Intel(R) Core(TM) i5 processor",
"206" : "Intel(R) Core(TM) i3 processor",
"210" : "VIA C7(TM)-M Processor Family",
"211" : "VIA C7(TM)-D Processor Family",
"212" : "VIA C7(TM) Processor Family",
"213" : "VIA Eden(TM) Processor Family",
"214" : "Multi-Core Intel(R) Xeon(R) processor",
"215" : "Dual-Core Intel(R) Xeon(R) processor 3xxx Series",
"216" : "Quad-Core Intel(R) Xeon(R) processor 3xxx Series",
"217" : "VIA Nano(TM) Processor Family",
"218" : "Dual-Core Intel(R) Xeon(R) processor 5xxx Series",
"219" : "Quad-Core Intel(R) Xeon(R) processor 5xxx Series",
"221" : "Dual-Core Intel(R) Xeon(R) processor 7xxx Series",
"222" : "Quad-Core Intel(R) Xeon(R) processor 7xxx Series",
"223" : "Multi-Core Intel(R) Xeon(R) processor 7xxx Series",
"224" : "Multi-Core Intel(R) Xeon(R) processor 3400 Series ",
"230" : "Embedded AMD Opteron(TM) Quad-Core Processor Family",
"231" : "AMD Phenom(TM) Triple-Core Processor Family",
"232" : "AMD Turion(TM) Ultra Dual-Core Mobile Processor Family",
"233" : "AMD Turion(TM) Dual-Core Mobile Processor Family",
"234" : "AMD Athlon(TM) Dual-Core Processor Family",
"235" : "AMD Sempron(TM) SI Processor Family",
"236" : "AMD Phenom(TM) II Processor Family",
"237" : "AMD Athlon(TM) II Processor Family",
"238" : "Six-Core AMD Opteron(TM) Processor Family",
"239" : "AMD Sempron(TM) M Processor Family",
"250" : "i860",
"251" : "i960"
}
}
},
iDRACCompEnum.NIC : {
"LinkStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Up",
"2" : "Down",
"3" : "Down",
"4" : "Down",
"10" : "Down",
"11" : "Down",
"12" : "Down",
"13" : "Down",
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"CurrentMACAddress" : {
'Macedit' : 'True'
},
"PermanentMACAddress" : {
'Macedit' : 'True'
}
},
iDRACCompEnum.VirtualDisk : {
"Size" : { 'Type' : 'Bytes', 'InUnits' : 'MB' },
"RAIDStatus" : {
'Lookup' : 'True',
'Values' : {
'1' : 'Unknown',
'2' : 'Online',
'3' : 'Failed',
'4' : 'Degraded'
}
},
"RAIDTypes" : {
'Lookup' : 'True',
'Values' : {
'1' : 'Other',
'2' : 'RAID 0',
'3' : 'RAID 1',
'4' : 'RAID 5',
'5' : 'RAID 6',
'6' : 'RAID 10',
'7' : 'RAID 50',
'8' : 'RAID 60',
'9' : 'Concatenated RAID 1',
'10' : 'Concatenated RAID 5'
}
},
"ReadCachePolicy" : {
'Lookup' : 'True',
'Values' : {
'1' : 'No Read Ahead',
'2' : 'Read Ahead',
'3' : 'Adaptive Read Ahead',
}
},
"DiskCachePolicy" : {
'Lookup' : 'True',
'Values' : {
'1' : 'Enabled',
'2' : 'Disabled',
'3' : 'Default',
}
},
"WriteCachePolicy" : {
'Lookup' : 'True',
'Values' : {
'1' : 'Write Through',
'2' : 'Write Back',
'3' : 'Write Back Force',
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"StripeSize" : {
'Lookup' : 'True',
'Values' : {
"1" : "Other",
"2" : "default",
"3" : "512",
"4" : "1024",
"5" : "2048",
"6" : "4096",
"7" : "8192",
"8" : "16384",
"9" : "32768",
"10" : "65536",
"11" : "131072",
"12" : "262144",
"13" : "524288",
"14" : "1048576",
"15" : "2097152",
"16" : "4194304",
"17" : "8388608",
"18" : "16777216",
}
}
},
iDRACCompEnum.PhysicalDisk : {
"Size" : { 'Type' : 'Bytes' , 'InUnits' : 'MB' },
"UsedSize" : { 'Type' : 'Bytes' , 'InUnits' : 'MB' , 'Metrics' : 'GB'},
"FreeSize" : { 'Type' : 'Bytes' , 'InUnits' : 'MB', 'Metrics' : 'GB' },
"BlockSize": {'Type': 'Bytes', 'InUnits': 'B', 'OutUnits': 'B'},
"RaidStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Ready",
"3" : "Online",
"4" : "Foreign",
"5" : "Offline",
"6" : "Blocked",
"7" : "Failed",
"8" : "Nonraid",
"9" : "Removed",
"10" : "Readonly"
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical",
"7" : "Warning",
"8" : "Critical",
"9" : "Critical",
"10" : "Critical",
}
},
"MediaType" : {
'Lookup' : 'True',
'Values' : {
"1" : "unknown",
"2" : "HDD",
"3" : "SSD"
}
},
"ManufacturingDay": {
'Lookup': 'True',
'Values': {
"1": "Sunday",
"2": "Monday",
"3": "Tuesday",
"4": "Wednesday",
"5": "Thursday",
"6": "Friday",
"7": "Saturday"
}
},
"MaxCapableSpeed": {
'Lookup': 'True',
'Values': {
"1": "Unknown",
"2": "1.5 Gbps",
"3": "3.0 Gbps",
"4": "6.0 Gbps",
"5": "12.0 Gbps",
"6": "5 GT/s",
"7": "8 GT/s"
}
},
"T10PICapability": {
'Lookup': 'True',
'Values': {
"1": "Other", "2": "Capable", "3": "Not Capable"
}
},
"RAIDNegotiatedSpeed": {
'Lookup': 'True',
'Values': {
"1": "Unknown",
"2": "1.5 Gbps",
"3": "3.0 Gbps",
"4": "6.0 Gbps",
"5": "12.0 Gbps",
"6": "5 GT/s",
"7": "8 GT/s"
}
},
"SecurityState": {
'Lookup': 'True',
'Values': {
"1": "Supported",
"2": "Not Supported",
"3": "Secured",
"4": "Locked",
"5": "Foreign"
}
},
"HotSpareStatus": {
'Lookup': 'True',
'Values': {
"1": "Not A Spare",
"2": "Dedicated Hot Spare",
"3": "Global Hot Spare"
}
},
"PredictiveFailureState": {
'Lookup': 'True',
'Values': {
"0": "Smart Alert Absent",
"1": "Smart Alert Present"
}
},
"BusProtocol": {
'Lookup': 'True',
'Values': {
"1": "Unknown",
"2": "SCSI",
"3": "SAS",
"4": "SATA",
"5": "Fibre Channel",
"6": "PCIe"
}
},
"DriveFormFactor": {
'Lookup': 'True',
'Values': {
"1": "Unknown",
"2": "1.8 inch",
"3": "2.5 inch",
"4": "3.5 inch"
}
}
},
iDRACCompEnum.System : {
"SystemGeneration" : {
'Lookup' : 'True',
'Values' : {
"1" : "other",
"2" : "unknown",
"16" : "12G Monolithic",
"17" : "12G Modular",
"21" : "12G DCS",
"32" : "13G Monolithic",
"33" : "13G Modular",
"34" : "13G DCS",
"48" : "14G Monolithic",
"49" : "14G Modular",
"50" : "14G DCS"
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"RollupStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"PSRollupStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"CPURollupStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"SysMemPrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"FanRollupStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"BatteryRollupStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"SDCardRollupStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"ChassisIntrusion" : { 'Rename' : 'IntrusionRollupStatus',
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"CoolingUnit" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"PowerUnit" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"ChassisStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"StorageRollupStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"VoltRollupStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"CurrentRollupStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"TempRollupStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
}
},
iDRACCompEnum.PowerSupply : {
'TotalOutputPower' : {'UnitScale': '-1', 'UnitAppend' : 'W'},
'Range1MaxInputPower' : {'UnitScale': '-1', 'UnitAppend' : 'W'},
"powerSupplyStateCapabilitiesUnique" : {
'Lookup' : 'True',
'Values' : {
"0" : "No Power Supply State",
"1" : "unknown",
"2" : "onlineCapable",
"4" : "notReadyCapable",
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"PowerSupplySensorState" : {
'Lookup' : 'True',
'Values' : {
"1" : "presenceDetected",
"2" : "psFailureDetected",
"4" : "predictiveFailure",
"8" : "psACLost",
"16" : "acLostOrOutOfRange",
"32" : "acOutOfRangeButPresent",
"64" : "configurationError"
}
}
},
iDRACCompEnum.Sensors_Battery : {
"CurrentReading" : {
'Lookup' : 'True',
'Values' : {
"1" : "predictiveFailure",
"2" : "failed",
"4" : "presenceDetected",
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"State" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Enabled",
"4" : "Not Ready",
"6" : "Enabled Not Ready"
}
}
},
iDRACCompEnum.Sensors_Intrusion: {
"CurrentReading" : {
'Lookup' : 'True',
'Values' : {
"1" : "chassisNotBreached",
"2" : "chassisBreached",
"3" : "chassisBreachedPrior",
"4" : "chassisBreachSensorFailure",
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical"
}
},
"Type" : {
'Lookup' : 'True',
'Values' : {
"1" : "chassisBreachDetectionWhenPowerON",
"2" : "chassisBreachDetectionWhenPowerOFF",
}
},
"State" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Enabled",
"4" : "Not Ready",
"6" : "Enabled Not Ready"
}
}
},
iDRACCompEnum.Sensors_Amperage : {
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical",
"7" : "Warning",
"8" : "Critical",
"9" : "Critical",
"10" : "Critical"
}
},
"ProbeType": {
'Lookup': 'True',
'Values': {
"1": "amperageProbeTypeIsOther",
"2": "amperageProbeTypeIsUnknown",
"3": "amperageProbeTypeIs1Point5Volt",
"4": "amperageProbeTypeIs3Point3volt",
"5": "amperageProbeTypeIs5Volt",
"6": "amperageProbeTypeIsMinus5Volt",
"7": "amperageProbeTypeIs12Volt",
"8": "amperageProbeTypeIsMinus12Volt",
"9": "amperageProbeTypeIsIO",
"10": "amperageProbeTypeIsCore",
"11": "amperageProbeTypeIsFLEA",
"12": "amperageProbeTypeIsBattery",
"13": "amperageProbeTypeIsTerminator",
"14": "amperageProbeTypeIs2Point5Volt",
"15": "amperageProbeTypeIsGTL",
"16": "amperageProbeTypeIsDiscrete",
"23": "amperageProbeTypeIsPowerSupplyAmps",
"24": "amperageProbeTypeIsPowerSupplyWatts",
"25": "amperageProbeTypeIsSystemAmps",
"26": "amperageProbeTypeIsSystemWatts"
}
},
"State" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Enabled",
"4" : "Not Ready",
"6" : "Enabled Not Ready"
}
},
"CurrentReading": {
'Lookup': 'True',
'Values': {
"1": "Good",
"2": "Bad"
}
}
},
iDRACCompEnum.Sensors_Temperature : {
'Reading(Degree Celsius)' : {'UnitScale': '-1', 'UnitAppend' : 'Degree Celsius'},
'CurrentReading(Degree Celsius)': {'UnitScale': '-1'},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical",
"7" : "Warning",
"8" : "Critical",
"9" : "Critical",
"10" : "Critical"
}
},
"State" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Enabled",
"4" : "Not Ready",
"6" : "Enabled Not Ready"
}
},
"SensorType": {
'Lookup': 'True',
'Values': {
"1": "Other",
"2": "Unknown",
"3": "Ambient ESM",
"16": "Discrete"
}
}
},
iDRACCompEnum.Sensors_Voltage : {
'Reading(V)' : {'UnitScale': '-3', 'UnitAppend' : 'V'},
"CurrentReading" : {
'Lookup' : 'True',
'Values' : {
"1" : "voltageIsGood",
"2" : "voltageIsBad",
}
},
"PrimaryStatus" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Unknown",
"3" : "Healthy",
"4" : "Warning",
"5" : "Critical",
"6" : "Critical",
"7" : "Warning",
"8" : "Critical",
"9" : "Critical",
"10" : "Critical"
}
},
"State" : {
'Lookup' : 'True',
'Values' : {
"1" : "Unknown",
"2" : "Enabled",
"4" : "Not Ready",
"6" : "Enabled Not Ready"
}
}
},
iDRACCompEnum.ControllerBattery: {
"PrimaryStatus": {
'Lookup': 'True',
'Values': {
"1": "Unknown",
"2": "Unknown",
"3": "Healthy",
"4": "Warning",
"5": "Critical",
"6": "Critical"
}
}
}
}
iDRACSNMPClassifier = {
iDRACCompEnum.System : {
'SysObjectID' : 'SNMPv2-SMI::enterprises\\.674\\.10892\\.5|IDRAC-MIB-SMIv2::outOfBandGroup'
}
}
# Agnostic of protocols
iDRACSubsystemHealthSpec = {
iDRACCompEnum.System : { "Component" : iDRACCompEnum.System, "Field": 'PrimaryStatus' },
iDRACCompEnum.Memory : { "Component" : iDRACCompEnum.System, "Field" : 'SysMemPrimaryStatus' },
iDRACCompEnum.CPU : { "Component" : iDRACCompEnum.System, "Field": 'CPURollupStatus' },
'Sensors_Fan' : { "Component" : iDRACCompEnum.System, "Field": 'FanRollupStatus' },
# iDRACCompEnum.iDRAC : { "Component" : iDRACCompEnum.System, "Field": 'RollupStatus' },
iDRACCompEnum.PowerSupply : { "Component" : iDRACCompEnum.System, "Field": 'PSRollupStatus' },
'Storage' : { "Component" : iDRACCompEnum.System, "Field": 'StorageRollupStatus' },
'License' : { "Component" : iDRACCompEnum.System, "Field": 'LicensingRollupStatus' },
'Sensors_Voltage' : { "Component" : iDRACCompEnum.System, "Field": 'VoltRollupStatus' },
'Sensors_Temperature' : { "Component" : iDRACCompEnum.System, "Field": 'TempRollupStatus' },
'Sensors_Battery' : { "Component" : iDRACCompEnum.System, "Field": 'BatteryRollupStatus' },
'VFlash' : { "Component" : iDRACCompEnum.System, "Field": 'SDCardRollupStatus' },
'Sensors_Intrusion' : { "Component" : iDRACCompEnum.System, "Field": 'IntrusionRollupStatus' },
'Sensors_Amperage' : { "Component" : iDRACCompEnum.System, "Field": 'CurrentRollupStatus' },
'Chassis' : { "Component" : iDRACCompEnum.System, "Field": 'ChassisStatus' },
'Cooling_Unit' : { "Component" : iDRACCompEnum.System, "Field": 'CoolingUnit' },
'Power_Unit' : { "Component" : iDRACCompEnum.System, "Field": 'PowerUnit' },
}
iDRACUnionCompSpec = {
"Sensors":{
"_components": [
"ServerSensor",
"NumericSensor",
"PSNumericSensor"
],
"_components_enum": [
iDRACSensorEnum.ServerSensor,
iDRACSensorEnum.NumericSensor,
iDRACSensorEnum.PSNumericSensor
],
"_remove_duplicates" : True,
"_pivot" : "SensorType",
"SensorType" : {
"1": "Battery",
"2" : "Temperature",
"3" : "Voltage",
"5" : "Fan",
"13" : "Amperage",
"16" : "Intrusion"
}
}
}
iDRACDynamicValUnion = {
"System":{
"_complexkeys": {
"SystemString" : ["FQDD", "AttributeName", "CurrentValue"],
"LCString" :[None, "AttributeName", "CurrentValue" ],
"iDRACEnumeration" :[None, "InstanceID", "CurrentValue"]
},
"_components_enum": [
iDRACMiscEnum.SystemString,
iDRACMiscEnum.LCString,
iDRACMiscEnum.iDRACEnumeration
],
"_createFlag" : False
},
"NIC":{
"_complexkeys": {
"NICString" :["FQDD", "AttributeName", "CurrentValue"],
},
"_components_enum": [
iDRACMiscEnum.NICString,
]
},
"iDRAC":{
"_complexkeys": {
"iDRACEnumeration" :["FQDD", "InstanceID", "CurrentValue"],
"iDRACString" :["FQDD", "InstanceID", "CurrentValue"]
},
"_components_enum": [
iDRACMiscEnum.iDRACEnumeration,
iDRACMiscEnum.iDRACString
]
},
"iDRACNIC":{
"_complexkeys": {
"iDRACEnumeration" :["FQDD", "InstanceID", "CurrentValue"],
"iDRACString" :["FQDD", "InstanceID", "CurrentValue"]
},
"_components_enum": [
iDRACMiscEnum.iDRACEnumeration,
iDRACMiscEnum.iDRACString
],
"_createFlag" : False
},
"PhysicalDisk":{
"_complexkeys": {
"RAIDEnumeration" :["FQDD", "AttributeName", "CurrentValue"],
},
"_components_enum": [
iDRACMiscEnum.RAIDEnumeration
],
"_createFlag" : False
},
"SystemMetrics":{
"_complexkeys": {
"BaseMetricValue" :[None, "InstanceID", "MetricValue" ]
},
"_components_enum": [
iDRACMetricsEnum.BaseMetricValue
],
"_createFlag" : True
},
"SystemBoardMetrics":{
"_complexkeys": {
"AggregationMetric" :[None, "InstanceID", "MetricValue" ]
},
"_components_enum": [
iDRACMetricsEnum.AggregationMetric
],
"_createFlag" : True
}
}
iDRACMergeJoinCompSpec = {
"NIC" : {
"_components" : [
["NIC", "FQDD", "NICStatistics", "FQDD"],
["NIC", "FQDD", "NICCapabilities", "FQDD"],
["NIC", "FQDD", "SwitchConnection", "FQDD"],
["NIC", "FQDD", "HostNICView", "DeviceFQDD"]
],
"_components_enum": [
iDRACCompEnum.NIC,
iDRACMiscEnum.NICStatistics,
iDRACMiscEnum.NICCapabilities,
iDRACMiscEnum.SwitchConnection,
iDRACMiscEnum.HostNICView
],
"_overwrite" : False
},
"FC" : {
"_components" : [
["FC", "FQDD", "FCStatistics", "FQDD"]
],
"_components_enum": [
iDRACCompEnum.FC,
iDRACMiscEnum.FCStatistics
],
"_overwrite" : False
}
}
iDRAC_more_details_spec = {
"System":{
"_components_enum": [
iDRACCompEnum.System,
iDRACMiscEnum.ChassisRF,
iDRACMiscEnum.DellAttributes,
iDRACCompEnum.iDRAC,
iDRACMiscEnum.iDRACString
]
},
"iDRAC":{
"_components_enum": [
iDRACCompEnum.iDRAC,
iDRACMiscEnum.DellAttributes
]
}
}
class iDRAC(iDeviceDiscovery):
def __init__(self, srcdir):
if PY2:
super(iDRAC, self).__init__(iDeviceRegistry("iDRAC", srcdir, iDRACCompEnum))
else:
super().__init__(iDeviceRegistry("iDRAC", srcdir, iDRACCompEnum))
self.srcdir = srcdir
self.protofactory.add(PWSMAN(
selectors = {"__cimnamespace" : "root/dcim" },
views = iDRACWsManViews,
view_fieldspec = iDRACWsManViews_FieldSpec,
compmap = iDRACSWCompMapping,
cmds = iDRACWsManCmds
))
self.protofactory.add(PREDFISH(
views=iDRACRedfishViews,
cmds=iDRACRedfishCmds,
view_fieldspec=iDRACRedfishViews_FieldSpec,
classifier_cond=classify_cond
))
if PySnmpPresent:
self.protofactory.add(PSNMP(
views = iDRACSNMPViews,
classifier = iDRACSNMPClassifier,
view_fieldspec = iDRACSNMPViews_FieldSpec
))
self.protofactory.addCTree(iDRACComponentTree)
self.protofactory.addSubsystemSpec(iDRACSubsystemHealthSpec)
self.protofactory.addClassifier(iDRACClassifier)
self.prefDiscOrder = 1
def my_entitytype(self, pinfra, ipaddr, creds, protofactory):
return iDRACEntity(self.ref, protofactory, ipaddr, creds, self.srcdir, 'iDRAC')
def my_aliases(self):
return ['Server']
class iDRACEntity(iDeviceDriver):
def __init__(self, ref, protofactory, ipaddr, creds, srcdir, name):
if PY2:
super(iDRACEntity, self).__init__(ref, protofactory, ipaddr, creds)
else:
super().__init__(ref, protofactory, ipaddr, creds)
self.config_dir = os.path.join(srcdir, name, "Config")
self.ePowerStateEnum = PowerStateEnum
self.job_mgr = iDRACJobs(self)
self.use_redfish = False
self.config_mgr = iDRACConfig(self)
self.log_mgr = iDRACLogs(self)
self.update_mgr = iDRACUpdate(self)
self.license_mgr = iDRACLicense(self)
self.security_mgr = iDRACSecurity(self)
self.streaming_mgr = iDRACStreaming(self)
self.user_mgr = iDRACCredsMgmt(self)
self.comp_union_spec = iDRACUnionCompSpec
self.comp_misc_join_spec = iDRACDynamicValUnion
self.comp_merge_join_spec = iDRACMergeJoinCompSpec
self.more_details_spec = iDRAC_more_details_spec
self.device_type = 'Server'
self.eemi_registry = eemiregistry
def my_reset(self):
if hasattr(self, 'update_mgr'):
self.update_mgr.reset()
#self.config_mgr.reset()
#self.log_mgr.reset()
def my_fix_obj_index(self, clsName, key, js):
retval = None
if clsName == "System":
if 'ServiceTag' not in js or js['ServiceTag'] is None:
js['ServiceTag'] = self.ipaddr
retval = js['ServiceTag']
else:
idlist = ['Id','DeviceID', 'MemberID', 'MemberId', '@odata.id']
retval = clsName + "_null"
for id in idlist:
if id in js:
retval = js[id]
return retval
return retval
def _isin(self, parentClsName, parent, childClsName, child):
if TypeHelper.resolve(parentClsName) == "Controller" and \
TypeHelper.resolve(childClsName) == "PhysicalDisk" and \
("Disk.Direct" not in self._get_obj_index(childClsName, child)):
return False
if TypeHelper.resolve(parentClsName) == "VirtualDisk" and \
TypeHelper.resolve(childClsName) == "PhysicalDisk":
if 'PhysicalDiskIDs' in parent:
parentdiskListStr = parent['PhysicalDiskIDs'].strip("[]")
if (self._get_obj_index(childClsName, child) in parentdiskListStr):
return True
else:
return False
return self._get_obj_index(parentClsName, parent) in \
self._get_obj_index(childClsName, child)
def _get_MemoryType(self, idx):
ty = self._get_field_device("Memory", "type", idx)
return self.ref.Translate("Memory", "type", ty)
@property
def ServiceTag(self):
return self._get_field_device(self.ComponentEnum.System, "ServiceTag")
@property
def SystemID(self):
return self._get_field_device(self.ComponentEnum.System, "SystemID")
@property
def SystemIDInHex(self):
sid = self._get_field_device(self.ComponentEnum.System, "SystemID")
# following line is kludge for reflection api
if sid == None or sid == '<not_found>': sid = '0'
return (('0000' + str(hex(int(sid)))[2:])[-4:])
@property
def Model(self):
return self._get_field_device(self.ComponentEnum.System, "Model")
@property
def ServerGeneration(self):
return self._get_field_device(self.ComponentEnum.System, "SystemGeneration")
@property
def CMCIPAddress(self):
val = self._get_field_device(self.ComponentEnum.System, "CMCIP")
if val is None or val in ['<not_found>', "Not Available", '']:
return None
return val
@property
def IsRackStyleManaged(self):
# return true if rack services, pounce platform
if not "Modular" in self.get_server_generation():
return True
# check if psu is enumerable from idrac. if yes, it is rsm mode
self.get_partial_entityjson(self.ComponentEnum.PowerSupply)
psfq= self._get_field_device(self.ComponentEnum.PowerSupply, "FQDD", 0)
if psfq is None or psfq in ['<not_found>', "Not Available", '']:
return False
return True
@property
def AssetTag(self):
return self._get_field_device(self.ComponentEnum.System, "AssetTag")
@property
def IDRACURL(self):
self.get_partial_entityjson(self.ComponentEnum.iDRAC)
return self._get_field_device(self.ComponentEnum.iDRAC, "URLString")
@property
def IDRACFirmwareVersion(self):
self.get_partial_entityjson(self.ComponentEnum.iDRAC)
return self._get_field_device(self.ComponentEnum.iDRAC, "LifecycleControllerVersion")
@property
def PowerCap(self):
return self._get_field_device(self.ComponentEnum.System, "PowerCap")
@property
def PowerState(self):
pstate = self._get_field_device(self.ComponentEnum.System, "PowerState")
return TypeHelper.convert_to_enum(int(pstate), PowerStateEnum)
@property
def IDRACDNSName(self):
self.get_partial_entityjson(self.ComponentEnum.iDRAC)
return self._get_field_device(self.ComponentEnum.iDRAC, "DnsDRACName")
def _should_i_include(self, component, entry):
#if component in ["PhysicalDisk"]:
# if entry["RollupStatus"] == 0 or entry["PrimaryStatus"] == 0:
# return False
if component == 'System':
if self.cfactory.work_protocols[0].name == "WSMAN":
port = 443
if isinstance(self.pOptions, ProtocolOptionsFactory):
pOptions = self.pOptions.get(ProtocolEnum.REDFISH)
if pOptions:
port = pOptions.port
elif isinstance(self.pOptions, WsManOptions):
port = self.pOptions.port
if ':' in self.ipaddr:
entry['iDRACURL'] = "https://["+str(self.ipaddr) +"]:"+str(port)
else:
entry['iDRACURL'] = "https://" + str(self.ipaddr) + ":" +str(port)
if 'ChassisRF' in self.entityjson:
ChaSysDict = self.entityjson['ChassisRF'][0]
ChassisDict = None
if len(self.entityjson['ChassisRF']) > 1:
for chinst in self.entityjson['ChassisRF']:
if 'Chassis/System.' in chinst['@odata.id']:
ChaSysDict = chinst
if 'Chassis/Chassis.' in chinst['@odata.id']:
ChassisDict = chinst
chassisAttr = ['ChassisServiceTag', 'ChassisLocation', 'ChassisName', 'IntrusionRollupStatus']
for attr in chassisAttr:
# if attr in ChasysDict and ChasysDict[attr]:
entry.update({attr: ChaSysDict.get(attr, 'Not Available')})
chassisAttr = ['ChassisServiceTag', 'ChassisModel', 'ChassisName']
if ChassisDict:
for attr in chassisAttr:
# if attr in ChassisDict and ChassisDict[attr]:
entry.update({attr : ChassisDict.get(attr, 'Not Available')})
# del self.entityjson['ChassisRF']
if 'DellAttributes' in self.entityjson:
dellAttrList = self.entityjson['DellAttributes']
needAttr = ['ChassisServiceTag', 'OSName' ,'OSVersion','SystemLockDown','LifecycleControllerVersion','VirtualAddressManagementApplication']
for dAttr in dellAttrList:
for attr in needAttr:
if attr in dAttr and dAttr[attr]:
entry.update({attr: dAttr[attr]})
if component == 'iDRAC':
if 'DellAttributes' in self.entityjson:
dellAttrList = self.entityjson['DellAttributes']
needAttr = ['GroupName','GroupStatus','SystemLockDown','IPv4Address','ProductInfo','MACAddress',
'NICDuplex', 'NICSpeed' ,'DNSDomainName','DNSRacName','IPv6Address', 'PermanentMACAddress']
for dAttr in dellAttrList:
for attr in needAttr:
if dAttr.get(attr,None):
entry.update({attr: dAttr[attr]})
if self.cfactory.work_protocols[0].name == "REDFISH":
port = 443
if isinstance(self.pOptions, ProtocolOptionsFactory):
pOptions = self.pOptions.get(ProtocolEnum.REDFISH)
if pOptions:
port = pOptions.port
elif isinstance(self.pOptions, RedfishOptions):
port = self.pOptions.port
if ':' in self.ipaddr:
entry['URLString'] = "https://["+str(self.ipaddr) +"]:"+str(port)
else:
entry['URLString'] = "https://" + str(self.ipaddr) + ":" +str(port)
if 'System' in self.entityjson:
self.entityjson["System"][0]["iDRACURL"] = entry['URLString']
if component == 'iDRACNIC':
if 'NICEnabled' in entry:
h_map = {"Enabled" : "Healthy", "Disabled" : "Critical", "Unknown" : "Unknown"}
entry['PrimaryStatus'] = h_map.get(entry.get('NICEnabled', "Unknown"),"Unknown")
if component in ["Sensors_Battery"]:
if "OtherSensorTypeDescription" in entry:
if not entry["OtherSensorTypeDescription"] == 'Battery':
return False
if component == "NIC":
supportedBootProtocol = ""
sbpDict = {"FCoEBootSupport" : "FCOE,",
"PXEBootSupport" : "PXE,",
"iSCSIBootSupport" : "iSCSI,"}
for bootproto in sbpDict:
if(bootproto in entry) and (entry[bootproto] == "2"):
supportedBootProtocol=supportedBootProtocol+sbpDict[bootproto]
if(supportedBootProtocol != ""):
entry["SupportedBootProtocol"] = supportedBootProtocol.rstrip(',')
nicCapabilities = ""
ncpDict = {"WOLSupport" : "WOL,",
"FlexAddressingSupport" : "FlexAddressing,",
"VFSRIOVSupport" : "SR-IOV,",
"iSCSIOffloadSupport" : "iSCSI Offload,",
"FCoEOffloadSupport" : "FCoE Offload,",
"NicPartitioningSupport" : "Partitioning,",
"TCPChimneySupport" : "TOE,",
"DCBExchangeProtocol" : "DCB,"}
for ncp in ncpDict:
if(ncp in entry) and (entry[ncp] == "2"):
nicCapabilities=nicCapabilities+ncpDict[ncp]
if(nicCapabilities != ""):
entry["NICCapabilities"] = nicCapabilities.rstrip(',')
if 'PrimaryStatus' not in entry:
entry['PrimaryStatus'] = {'Up':'Healthy','Down':'Critical', 'Unknown':'Unknown'}.get(entry.get('LinkStatus','Unknown'))
if component == "BIOS":
# SCOM Requirement to get 1 instance
if not (entry["ComponentType"] == 'BIOS') or not ("INSTALLED#" in entry["InstanceID"]):
return False
else:
if 'System' in self.entityjson:
entry['SMBIOSPresent'] = 'True'
entry['BIOSReleaseDate'] = self.entityjson['System'][0]['BIOSReleaseDate']
if component == "HostNIC":
if not entry["DeviceFQDD"] or entry["DeviceFQDD"] == "Not Available":
return False
if component == 'Sensors_Fan':
cl = ['Key','Location']
for x in cl:
if x in entry:
s = entry[x]
if '|' in s:
entry[x] = s.split('|')[-1]
cl = None
if component == "VFlash":
if 'PrimaryStatus' in entry:
if entry["PrimaryStatus"] == "Not Available":
entry["PrimaryStatus"] = "Unknown"
if component == 'SystemBoardMetrics':
try:
entry['PeakAmperage'] = float(entry.get('PeakAmperage',0))/10
except ValueError:
logger.info(self.ipaddr+" Warning: Converting PeakAmperage not a number "+entry.get('PeakAmperage', 'Not Present'))
entry['PeakAmperage'] = "0"
if component == "PresenceAndStatusSensor":
if entry.get('ElementName') != "Chassis Controller":
return False
if 'Sensors_' in component:
if (entry.get('DeviceID', None)):
entry['DeviceID'] = entry.get('Key')#Redfish Case for SCOM to Adapt
# or change in Monitor file Sensor/NumericSensor/PSNumericSensor to ElementName
entry['Key'] = entry.get('Location', entry.get('Key', component))
if (entry.get('SensorType', "Not Available")):
entry["SensorType"] = component.split('_')[-1]
return True
def _should_i_modify_component(self, finalretjson, component):
if 'Sensors_' in component:
pkeys = ["Key"]
filtered = {tuple((k, d[k]) for k in sorted(d) if k in pkeys): d for d in finalretjson[component]}
finalretjson[component] = list(filtered.values())
if component == 'ChassisRF' or component == 'DellAttributes':
del finalretjson[component]
# if component == 'Subsystem':
# component = finalretjson.keys()
# subsystem = finalretjson["Subsystem"]
# finalretjson["Subsystem"] = list(filter(lambda eachdict: eachdict['Key'] in component, subsystem))
def _get_topology_info(self):
return iDRACTopologyInfo(self.get_json_device())
def _get_topology_influencers(self):
return { 'System' : [
'ServiceTag',
'SystemGeneration',
'Model',
'GroupManager'
] }
@property
def ContainmentTree(self):
"""
Removing PowerSupply, Sensors_Fan and Sensor_intrusion Groups
:return: JSON
"""
device_json = self.get_json_device()
ctree = self._build_ctree(self.protofactory.ctree, device_json)
syslist = self.entityjson.get('System', [{}])
sysdict = syslist[0]
blademodel = sysdict.get('Model', 'Not Available')
#logger.info(self.ipaddr+" BLAde Model "+blademodel)
if blademodel:
if ('poweredge m' in str(blademodel).lower()):
systree = ctree.get('System', {})
systree.pop('PowerSupply',None)
sensdict = systree.get('Sensors',{})
sensdict.pop('Sensors_Fan', None)
sensdict.pop('Sensors_Intrusion', None)
return ctree
class iDRACTopologyInfo(iDeviceTopologyInfo):
def __init__(self, json):
if PY2:
super(iDeviceTopologyInfo, self).__init__('Server', json)
else:
super().__init__('Server', json)
def my_static_groups(self, tbuild):
tbuild.add_group('Dell', static=True)
tbuild.add_group('Dell Servers', 'Dell', static=True)
tbuild.add_group('Dell Rack Workstations', 'Dell', static=True)
tbuild.add_group('Dell Modular Servers', 'Dell Servers', static=True)
tbuild.add_group('Dell Monolithic Servers', 'Dell Servers', static=True)
tbuild.add_group('Dell Sled Servers', 'Dell Servers', static=True)
tbuild.add_group('Dell FM Servers', 'Dell Sled Servers', static=True)
tbuild.add_group('Dell Unmanaged Servers', 'Dell Servers', static=True)
tbuild.add_group('Dell iDRAC GMs', 'Dell', static=True)
def my_groups(self, tbuild):
if 'ServiceTag' not in self.system:
return False
serviceTag = self.system['ServiceTag']
grpname = 'Dell Unmanaged Servers'
if 'SystemGeneration' in self.system:
if 'Modular' in self.system['SystemGeneration']:
grpname = 'Dell Modular Servers'
elif 'Monolithic' or 'DCS' in self.system['SystemGeneration']:
grpname = 'Dell Monolithic Servers'
if 'Model' in self.system:
if 'FM' in self.system['Model']:
fmgrp = 'FMServer-' + serviceTag
tbuild.add_group(fmgrp, 'Dell FM Servers')
self._add_myself(tbuild, fmgrp)
grpname = fmgrp
if 'FC' in self.system['Model']:
grpname = 'Dell Sled Servers'
self._add_myself(tbuild, grpname)
# if 'GroupManager' in self.system and self.system['GroupManager']:
# fmgrp = 'iGM-' + self.system['GroupManager']
# tbuild.add_group(fmgrp, 'Dell iDRAC GMs')
# self._add_myself(tbuild, fmgrp)
return True
def my_assoc(self, tbuild):
if 'ServiceTag' not in self.system:
return False
serviceTag = self.system['ServiceTag']
if 'ChassisServiceTag' not in self.system:
# Rack Server or Rack Station or Tower system
return True
chassisSvcTag = self.system['ChassisServiceTag']
if chassisSvcTag is None or chassisSvcTag == serviceTag:
return True
### Commented out this section as slot
### returned by iDRAC is different from CMC-Slot FQDD
#slot = 'undef'
#if 'BaseBoardChassisSlot' in self.system:
# slot = self.system['BaseBoardChassisSlot']
#
#self._add_assoc(tbuild, ['CMC', chassisSvcTag],
# ['ComputeModule', slot],
# [self.mytype, self.system['Key']])
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.