id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
451045
|
from typing import List
class Solution:
def findMiddleIndex(self, nums: List[int]) -> int:
s, far = sum(nums), 0
for i, v in enumerate(nums):
if s - v == 2 * far:
return i
far += v
return -1
|
451056
|
from globibot.lib.plugin import Plugin
from globibot.lib.decorators import command
from globibot.lib.helpers import parsing as p
from globibot.lib.helpers.hooks import master_only
import re
URL_PATTERN = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
class Reactions(Plugin):
def load(self):
self.votes = dict()
async def on_new(self, message):
if URL_PATTERN.findall(message.content) or message.attachments:
await self.bot.add_reaction(message, '👍')
await self.bot.add_reaction(message, '👎')
async def on_reaction_add(self, reaction, user):
if reaction.emoji == '❌':
await self.check_vote_delete(reaction)
elif reaction.emoji == '👎':
await self.check_downvote(reaction)
@command(p.string('!react') + p.bind(p.mention, 'who') + p.bind(p.word, 'emoji'), master_only)
async def react_last_user(self, message, who, emoji):
try:
last_message = next(
msg for msg in list(self.bot.messages)[-2::-1]
if msg.channel.id == message.channel.id and msg.author.id == str(who)
)
except StopIteration:
self.debug('No Message')
else:
await self.bot.add_reaction(last_message, emoji)
await self.bot.delete_message(message)
@command(p.string('!react') + p.bind(p.mention, 'who') + p.bind(p.emoji, 'emoji_id'), master_only)
async def react_last_user_emoji(self, message, who, emoji_id):
try:
last_message = next(
msg for msg in list(self.bot.messages)[-2::-1]
if msg.channel.id == message.channel.id and msg.author.id == str(who)
)
except StopIteration:
self.debug('No Message')
else:
for server in self.bot.servers:
for emoji in server.emojis:
if emoji.id == str(emoji_id):
try:
await self.bot.add_reaction(last_message, emoji)
await self.bot.delete_message(message)
return
except:
break
await self.send_message(message.channel, "I can't use that emoji 😢", delete_after=5)
await self.bot.add_reaction(last_message, '❓')
@command(
p.string('!votedel') + p.bind(p.mention, 'who') +
p.bind(p.maybe(p.integer), 'count'),
master_only
)
async def votedel(self, message, who, count=10):
try:
last_message = next(
msg for msg in list(self.bot.messages)[-2::-1]
if msg.channel.id == message.channel.id and msg.author.id == str(who)
)
except StopIteration:
self.debug('No Message')
else:
self.votes[last_message.id] = (last_message, count)
await self.bot.add_reaction(last_message, '❌')
await self.send_message(
message.channel,
'Deletion vote started: Cast your vote by clicking the ❌ reaction in order to delete {}\'s message ({} votes needed)'
.format(last_message.author.mention, count),
delete_after = 10
)
async def check_vote_delete(self, reaction):
try:
message, count = self.votes[reaction.message.id]
except KeyError:
pass
else:
if reaction.count >= count:
del self.votes[reaction.message.id]
await self.bot.delete_message(message)
await self.send_message(
reaction.message.channel,
'Deletion vote passed',
delete_after = 5
)
async def check_downvote(self, reaction):
if reaction.count >= 10:
await self.bot.delete_message(reaction.message)
await self.send_message(
reaction.message.channel,
'{}, I deleted your post since it was disliked too many times'
.format(reaction.message.author.mention),
delete_after = 10
)
|
451081
|
import numpy as np
from autoconf import conf
from autofit import exc
from autofit.messages.normal import NormalMessage, UniformNormalMessage
from autofit.messages.transform import log_10_transform
from autofit.messages.transform_wrapper import TransformedWrapperInstance
from .abstract import epsilon, assert_within_limits
class Limits:
@staticmethod
def for_class_and_attributes_name(cls, attribute_name):
limit_dict = conf.instance.prior_config.for_class_and_suffix_path(
cls, [attribute_name, "gaussian_limits"]
)
return limit_dict["lower"], limit_dict["upper"]
class WrappedInstance(
TransformedWrapperInstance
):
__identifier_fields__ = ("lower_limit", "upper_limit")
__database_args__ = (
"lower_limit",
"upper_limit",
"log_norm",
"id_",
)
def __init__(
self,
transformed_wrapper,
*args,
lower_limit,
upper_limit,
**kwargs
):
super().__init__(
transformed_wrapper,
*args,
lower_limit=lower_limit,
upper_limit=upper_limit,
**kwargs
)
self.lower_limit = lower_limit
self.upper_limit = upper_limit
if self.lower_limit >= self.upper_limit:
raise exc.PriorException(
"The upper limit of a prior must be greater than its lower limit"
)
def _new_for_base_message(
self,
message
):
return type(self)(
lower_limit=self.lower_limit,
upper_limit=self.upper_limit,
id_=self.instance().id,
params=message.parameters
)
class UniformPrior(WrappedInstance):
"""A prior with a uniform distribution between a lower and upper limit"""
def __init__(
self,
lower_limit=0.0,
upper_limit=1.0,
id_=None,
params=(0.0, 1.0)
):
if any(map(np.isnan, params)):
raise exc.MessageException(
"nan parameter passed to UniformPrior"
)
lower_limit = float(lower_limit)
upper_limit = float(upper_limit)
Message = UniformNormalMessage.shifted(
shift=lower_limit,
scale=upper_limit - lower_limit,
)
super().__init__(
Message,
*params,
lower_limit=lower_limit,
upper_limit=upper_limit,
id_=id_
)
def logpdf(self, x):
# TODO: handle x as a numpy array
if x == self.lower_limit:
x += epsilon
elif x == self.upper_limit:
x -= epsilon
return self.instance().logpdf(x)
def __str__(self):
"""The line of text describing this prior for the model_mapper.info file"""
return f"UniformPrior, lower_limit = {self.lower_limit}, upper_limit = {self.upper_limit}"
@assert_within_limits
def value_for(self, unit):
"""
Parameters
----------
unit: Float
A unit hypercube value between 0 and 1
Returns
-------
value: Float
A value for the attribute between the upper and lower limits
"""
return round(super().value_for(unit), 14)
# noinspection PyUnusedLocal
@staticmethod
def log_prior_from_value(value):
"""
Returns the log prior of a physical value, so the log likelihood of a model evaluation can be converted to a
posterior as log_prior + log_likelihood.
This is used by Emcee in the log likelihood function evaluation.
NOTE: For a UniformPrior this is always zero, provided the value is between the lower and upper limit. Given
this is check for when the instance is made (in the *instance_from_vector* function), we thus can simply return
zero in this function.
"""
return 0.0
class LogUniformPrior(WrappedInstance):
"""A prior with a uniform distribution between a lower and upper limit"""
def __init__(
cls,
lower_limit=1e-6,
upper_limit=1.0,
id_=None,
params=(0.0, 1.0)
):
if lower_limit <= 0.0:
raise exc.PriorException(
"The lower limit of a LogUniformPrior cannot be zero or negative."
)
lower_limit = float(lower_limit)
upper_limit = float(upper_limit)
Message = UniformNormalMessage.shifted(
shift=np.log10(lower_limit),
scale=np.log10(upper_limit / lower_limit),
).transformed(
log_10_transform
)
super().__init__(
Message,
*params,
id_=id_,
lower_limit=lower_limit,
upper_limit=upper_limit,
)
__identifier_fields__ = ("lower_limit", "upper_limit")
@staticmethod
def log_prior_from_value(value):
"""
Returns the log prior of a physical value, so the log likelihood of a model evaluation can be converted to a
posterior as log_prior + log_likelihood.
This is used by Emcee in the log likelihood function evaluation.
Parameters
----------
value : float
The physical value of this prior's corresponding parameter in a `NonLinearSearch` sample."""
return 1.0 / value
@assert_within_limits
def value_for(self, unit: float) -> float:
return super().value_for(unit)
def __str__(self):
"""The line of text describing this prior for the model_mapper.info file"""
return f"LogUniformPrior, lower_limit = {self.lower_limit}, upper_limit = {self.upper_limit}"
class GaussianPrior(NormalMessage):
"""A prior with a gaussian distribution"""
__identifier_fields__ = (
"lower_limit",
"upper_limit",
"mean",
"sigma"
)
@assert_within_limits
def value_for(self, unit):
"""
Parameters
----------
unit: Float
A unit hypercube value between 0 and 1
Returns
-------
value: Float
A value for the attribute biased to the gaussian distribution
"""
return super().value_for(unit)
|
451105
|
import pytest
import smartsheet
# Given Python's variable naming convention of snake_case,
# and Smartsheet's API attribute naming convention of
# lowerCamelCase, this set of tests is intended to make
# sure all of that is handled correctly.
@pytest.mark.usefixtures("smart_setup")
class TestModelAttributes:
def test_row(self, smart_setup):
smart = smart_setup['smart']
# above, above
# access_level, accessLevel
# attachments, attachments
# cells, cells
# columns, columns
# conditional_format, conditionalFormat
# created_at, createdAt
# discussions, discussions
# expanded, expanded
# filtered_out, filteredOut
# format, format
# id, id
# in_critical_path, inCriticalPath
# locked, locked
# locked_for_user, lockedForUser
# modified_at, modifiedAt
# parent_id, parentId
# permalink, permalink
# row_number, rowNumber
# sheet_id, sheetId
# sibling_id, siblingId
# to_bottom, toBottom
# to_top, toTop
# version, version
model = smart.models.Row({
'above': True,
'accessLevel': 'VIEWER',
'attachments': smart.models.Attachment(),
'cells': smart.models.Cell(),
'columns': smart.models.Column(),
'conditionalFormat': 'foo',
'discussions': smart.models.Discussion(),
'expanded': True,
'filteredOut': True,
'format': 'foo',
'id': 19082,
'inCriticalPath': True,
'locked': True,
'lockedForUser': True,
'parentId': 19082,
'permalink': 'foo',
'rowNumber': 19082,
'sheetId': 19082,
'siblingId': 19082,
'toBottom': True,
'toTop': True,
'version': 19082
})
assert model.above == True
assert model.access_level == 'VIEWER'
assert isinstance(model.attachments[0], smart.models.Attachment)
assert isinstance(model.cells[0], smart.models.Cell)
assert isinstance(model.columns[0], smart.models.Column)
assert model.conditional_format == 'foo'
assert isinstance(model.discussions[0], smart.models.Discussion)
assert model.expanded == True
assert model.filtered_out == True
assert model.format == 'foo'
assert model.id == 19082
assert model.in_critical_path == True
assert model.locked == True
assert model.locked_for_user == True
assert model.parent_id == 19082
assert model.permalink == 'foo'
assert model.row_number == 19082
assert model.sheet_id == 19082
assert model.sibling_id == 19082
assert model.to_bottom == True
assert model.to_top == True
assert model.version == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_row_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Row({
'above': True,
'attachments': smart.models.Attachment(),
'cells': smart.models.Cell(),
'columns': smart.models.Column(),
'discussions': smart.models.Discussion(),
'expanded': True,
'filteredOut': True,
'locked': True,
'lockedForUser': True,
'permalink': 'foo',
'rowNumber': 19082,
'sheetId': 19082,
'version': 19082,
'access_level': 'VIEWER',
'conditional_format': 'foo',
'format': 'foo',
'id': 19082,
'in_critical_path': True,
'parent_id': 19082,
'sibling_id': 19082,
'to_bottom': True,
'to_top': True
})
assert model.above == True
assert model.access_level == 'VIEWER'
assert isinstance(model.attachments[0], smart.models.Attachment)
assert isinstance(model.cells[0], smart.models.Cell)
assert isinstance(model.columns[0], smart.models.Column)
assert model.conditional_format == 'foo'
assert isinstance(model.discussions[0], smart.models.Discussion)
assert model.expanded == True
assert model.filtered_out == True
assert model.format == 'foo'
assert model.id == 19082
assert model.in_critical_path == True
assert model.locked == True
assert model.locked_for_user == True
assert model.parent_id == 19082
assert model.permalink == 'foo'
assert model.row_number == 19082
assert model.sheet_id == 19082
assert model.sibling_id == 19082
assert model.to_bottom == True
assert model.to_top == True
assert model.version == 19082
def test_home(self, smart_setup):
smart = smart_setup['smart']
# folders, folders
# reports, reports
# sheets, sheets
# templates, templates
# workspaces, workspaces
model = smart.models.Home({
'folders': smart.models.Folder(),
'reports': smart.models.Report(),
'sheets': smart.models.Sheet(),
'templates': smart.models.Template(),
'workspaces': smart.models.Workspace()
})
assert isinstance(model.folders[0], smart.models.Folder)
assert isinstance(model.reports[0], smart.models.Report)
assert isinstance(model.sheets[0], smart.models.Sheet)
assert isinstance(model.templates[0], smart.models.Template)
assert isinstance(model.workspaces[0], smart.models.Workspace)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_cell(self, smart_setup):
smart = smart_setup['smart']
# column_id, columnId
# column_type, columnType
# conditional_format, conditionalFormat
# display_value, displayValue
# format, format
# formula, formula
# hyperlink, hyperlink
# link_in_from_cell, linkInFromCell
# links_out_to_cells, linksOutToCells
# strict, strict
# value, value
model = smart.models.Cell({
'columnId': 19082,
'columnType': 'foo',
'conditionalFormat': 'foo',
'displayValue': 'foo',
'format': 'foo',
'formula': 'foo',
'hyperlink': smart.models.Hyperlink(),
'linkInFromCell': smart.models.CellLink(),
'linksOutToCells': smart.models.CellLink(),
'strict': True,
'value': 'foo'
})
assert model.column_id == 19082
assert model.column_type == 'foo'
assert model.conditional_format == 'foo'
assert model.display_value == 'foo'
assert model.format == 'foo'
assert model.formula == 'foo'
assert isinstance(model.hyperlink, smart.models.Hyperlink)
assert isinstance(model.link_in_from_cell, smart.models.CellLink)
assert isinstance(model.links_out_to_cells[0], smart.models.CellLink)
assert model.strict == True
assert model.value == 'foo'
model.hyperlink = {}
assert isinstance(model.hyperlink, smart.models.Hyperlink)
model.linkInFromCell = {}
assert isinstance(model.link_in_from_cell, smart.models.CellLink)
model.linksOutToCells = {}
assert isinstance(model.links_out_to_cells[0], smart.models.CellLink)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_cell_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Cell({
'columnType': 'foo',
'conditionalFormat': 'foo',
'displayValue': 'foo',
'formula': 'foo',
'hyperlink': smart.models.Hyperlink(),
'strict': True,
'value': 'foo',
'column_id': 19082,
'format': 'foo',
'link_in_from_cell': smart.models.CellLink(),
'links_out_to_cells': smart.models.CellLink()
})
assert model.column_id == 19082
assert model.column_type == 'foo'
assert model.conditional_format == 'foo'
assert model.display_value == 'foo'
assert model.format == 'foo'
assert model.formula == 'foo'
assert isinstance(model.hyperlink, smart.models.Hyperlink)
assert isinstance(model.link_in_from_cell, smart.models.CellLink)
assert isinstance(model.links_out_to_cells[0], smart.models.CellLink)
assert model.strict == True
assert model.value == 'foo'
def test_user(self, smart_setup):
smart = smart_setup['smart']
# admin, admin
# email, email
# first_name, firstName
# group_admin, groupAdmin
# _id, id
# last_name, lastName
# licensed_sheet_creator, licensedSheetCreator
# name, name
# resource_viewer, resourceViewer
# status, status
model = smart.models.User({
'admin': True,
'email': 'foo',
'firstName': 'foo',
'groupAdmin': True,
'id': 19082,
'lastName': 'foo',
'licensedSheetCreator': True,
'name': 'foo',
'resourceViewer': True,
'status': 'ACTIVE'
})
assert model.admin == True
assert model.email == 'foo'
assert model.first_name == 'foo'
assert model.group_admin == True
assert model.id == 19082
assert model.last_name == 'foo'
assert model.licensed_sheet_creator == True
assert model.name == 'foo'
assert model.resource_viewer == True
assert model.status == 'ACTIVE'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_user_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.User({
'admin': True,
'email': 'foo',
'name': 'foo',
'status': 'ACTIVE',
'first_name': 'foo',
'group_admin': True,
'id': 19082,
'last_name': 'foo',
'licensed_sheet_creator': True,
'resource_viewer': True
})
assert model.admin == True
assert model.email == 'foo'
assert model.first_name == 'foo'
assert model.group_admin == True
assert model.id == 19082
assert model.last_name == 'foo'
assert model.licensed_sheet_creator == True
assert model.name == 'foo'
assert model.resource_viewer == True
assert model.status == 'ACTIVE'
def test_group(self, smart_setup):
smart = smart_setup['smart']
# created_at, createdAt
# description, description
# _id, id
# members, members
# modified_at, modifiedAt
# name, name
# owner, owner
# owner_id, ownerId
model = smart.models.Group({
'description': 'foo',
'id': 19082,
'members': smart.models.GroupMember(),
'name': 'foo',
'owner': 'foo',
'ownerId': 19082
})
assert model.description == 'foo'
assert model.id == 19082
assert isinstance(model.members[0], smart.models.GroupMember)
assert model.name == 'foo'
assert model.owner == 'foo'
assert model.owner_id == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_group_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Group({
'description': 'foo',
'id': 19082,
'members': smart.models.GroupMember(),
'name': 'foo',
'owner': 'foo',
'owner_id': 19082
})
assert model.description == 'foo'
assert model.id == 19082
assert isinstance(model.members[0], smart.models.GroupMember)
assert model.name == 'foo'
assert model.owner == 'foo'
assert model.owner_id == 19082
def test_error(self, smart_setup):
smart = smart_setup['smart']
# request_response, requestResponse
# result, result
model = smart.models.Error({
'result': smart.models.ErrorResult()
})
assert isinstance(model.result, smart.models.ErrorResult)
model.result = {}
assert isinstance(model.result, smart.models.ErrorResult)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_error_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Error({
'result': smart.models.ErrorResult()
})
assert isinstance(model.result, smart.models.ErrorResult)
def test_email(self, smart_setup):
smart = smart_setup['smart']
# cc_me, ccMe
# message, message
# send_to, sendTo
# subject, subject
model = smart.models.Email({
'ccMe': True,
'message': 'foo',
'sendTo': smart.models.Recipient(),
'subject': 'foo'
})
assert model.cc_me == True
assert model.message == 'foo'
assert isinstance(model.send_to[0], smart.models.Recipient)
assert model.subject == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_email_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Email({
'message': 'foo',
'subject': 'foo',
'cc_me': True,
'send_to': smart.models.Recipient()
})
assert model.cc_me == True
assert model.message == 'foo'
assert isinstance(model.send_to[0], smart.models.Recipient)
assert model.subject == 'foo'
def test_sheet(self, smart_setup):
smart = smart_setup['smart']
# access_level, accessLevel
# attachments, attachments
# columns, columns
# created_at, createdAt
# dependencies_enabled, dependenciesEnabled
# discussions, discussions
# effective_attachment_options, effectiveAttachmentOptions
# favorite, favorite
# from_id, fromId
# gantt_enabled, ganttEnabled
# id, id
# modified_at, modifiedAt
# name, name
# owner, owner
# owner_id, ownerId
# permalink, permalink
# read_only, readOnly
# resource_management_enabled, resourceManagementEnabled
# rows, rows
# show_parent_rows_for_filters, showParentRowsForFilters
# source, source
# total_row_count, totalRowCount
# user_settings, userSettings
# version, version
model = smart.models.Sheet({
'accessLevel': 'VIEWER',
'attachments': smart.models.Attachment(),
'columns': smart.models.Column(),
'dependenciesEnabled': True,
'discussions': smart.models.Discussion(),
'effectiveAttachmentOptions': ['FILE'],
'favorite': True,
'fromId': 19082,
'ganttEnabled': True,
'id': 19082,
'name': 'foo',
'owner': 'foo',
'ownerId': 19082,
'permalink': 'foo',
'readOnly': True,
'resourceManagementEnabled': True,
'rows': smart.models.Row(),
'showParentRowsForFilters': True,
'source': smart.models.Source(),
'totalRowCount': 19082,
'userSettings': smart.models.SheetUserSettings(),
'version': 19082
})
assert model.access_level == 'VIEWER'
assert isinstance(model.attachments[0], smart.models.Attachment)
assert isinstance(model.columns[0], smart.models.Column)
assert model.dependencies_enabled == True
assert isinstance(model.discussions[0], smart.models.Discussion)
assert model.effective_attachment_options[0] == 'FILE'
assert model.favorite == True
assert model.from_id == 19082
assert model.gantt_enabled == True
assert model.id == 19082
assert model.name == 'foo'
assert model.owner == 'foo'
assert model.owner_id == 19082
assert model.permalink == 'foo'
assert model.read_only == True
assert model.resource_management_enabled == True
assert isinstance(model.rows[0], smart.models.Row)
assert model.show_parent_rows_for_filters == True
assert isinstance(model.source, smart.models.Source)
assert model.total_row_count == 19082
assert isinstance(model.user_settings, smart.models.SheetUserSettings)
assert model.version == 19082
model.effective_attachment_options = 'FILE'
assert model.effective_attachment_options[0] == 'FILE'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('FILE')
model.effective_attachment_options = tmplist
assert model.effective_attachment_options[0] == 'FILE'
model.source = {}
assert isinstance(model.source, smart.models.Source)
model.userSettings = {}
assert isinstance(model.user_settings, smart.models.SheetUserSettings)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_sheet_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Sheet({
'attachments': smart.models.Attachment(),
'columns': smart.models.Column(),
'discussions': smart.models.Discussion(),
'favorite': True,
'name': 'foo',
'owner': 'foo',
'permalink': 'foo',
'rows': smart.models.Row(),
'source': smart.models.Source(),
'version': 19082,
'access_level': 'VIEWER',
'dependencies_enabled': True,
'effective_attachment_options': ['FILE'],
'from_id': 19082,
'gantt_enabled': True,
'id': 19082,
'owner_id': 19082,
'read_only': True,
'resource_management_enabled': True,
'show_parent_rows_for_filters': True,
'total_row_count': 19082,
'user_settings': smart.models.SheetUserSettings()
})
assert model.access_level == 'VIEWER'
assert isinstance(model.attachments[0], smart.models.Attachment)
assert isinstance(model.columns[0], smart.models.Column)
assert model.dependencies_enabled == True
assert isinstance(model.discussions[0], smart.models.Discussion)
assert model.effective_attachment_options[0] == 'FILE'
assert model.favorite == True
assert model.from_id == 19082
assert model.gantt_enabled == True
assert model.id == 19082
assert model.name == 'foo'
assert model.owner == 'foo'
assert model.owner_id == 19082
assert model.permalink == 'foo'
assert model.read_only == True
assert model.resource_management_enabled == True
assert isinstance(model.rows[0], smart.models.Row)
assert model.show_parent_rows_for_filters == True
assert isinstance(model.source, smart.models.Source)
assert model.total_row_count == 19082
assert isinstance(model.user_settings, smart.models.SheetUserSettings)
assert model.version == 19082
def test_share(self, smart_setup):
smart = smart_setup['smart']
# access_level, accessLevel
# cc_me, ccMe
# email, email
# group_id, groupId
# id, id
# message, message
# name, name
# subject, subject
# type, type
# user_id, userId
model = smart.models.Share({
'accessLevel': 'VIEWER',
'ccMe': True,
'email': 'foo',
'groupId': 19082,
'id': 'foo',
'message': 'foo',
'name': 'foo',
'subject': 'foo',
'type': 'USER',
'userId': 19082
})
assert model.access_level == 'VIEWER'
assert model.cc_me == True
assert model.email == 'foo'
assert model.group_id == 19082
assert model.id == 'foo'
assert model.message == 'foo'
assert model.name == 'foo'
assert model.subject == 'foo'
assert model.type == 'USER'
assert model.user_id == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_share_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Share({
'email': 'foo',
'message': 'foo',
'name': 'foo',
'subject': 'foo',
'access_level': 'VIEWER',
'cc_me': True,
'group_id': 19082,
'id': 'foo',
'type': 'USER',
'user_id': 19082
})
assert model.access_level == 'VIEWER'
assert model.cc_me == True
assert model.email == 'foo'
assert model.group_id == 19082
assert model.id == 'foo'
assert model.message == 'foo'
assert model.name == 'foo'
assert model.subject == 'foo'
assert model.type == 'USER'
assert model.user_id == 19082
def test_source(self, smart_setup):
smart = smart_setup['smart']
# id, id
# type, type
model = smart.models.Source({
'id': 19082,
'type': 'sheet'
})
assert model.id == 19082
assert model.type == 'sheet'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_source_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Source({
'id': 19082,
'type': 'sheet'
})
assert model.id == 19082
assert model.type == 'sheet'
def test_report(self, smart_setup):
smart = smart_setup['smart']
# source_sheets, sourceSheets
# from_id, fromId
# modified_at, modifiedAt
# owner_id, ownerId
# columns, columns
# dependencies_enabled, dependenciesEnabled
# discussions, discussions
# version, version
# id, id
# gantt_enabled, ganttEnabled
# show_parent_rows_for_filters, showParentRowsForFilters
# created_at, createdAt
# name, name
# attachments, attachments
# total_row_count, totalRowCount
# favorite, favorite
# access_level, accessLevel
# rows, rows
# read_only, readOnly
# permalink, permalink
# source, source
# effective_attachment_options, effectiveAttachmentOptions
# owner, owner
# resource_management_enabled, resourceManagementEnabled
# user_settings, userSettings
model = smart.models.Report({
'sourceSheets': smart.models.Sheet(),
'fromId': 19082,
'ownerId': 19082,
'columns': smart.models.ReportColumn(),
'dependenciesEnabled': True,
'discussions': smart.models.Discussion(),
'version': 19082,
'id': 19082,
'ganttEnabled': True,
'showParentRowsForFilters': True,
'name': 'foo',
'attachments': smart.models.Attachment(),
'totalRowCount': 19082,
'favorite': True,
'accessLevel': 'VIEWER',
'rows': smart.models.ReportRow(),
'readOnly': True,
'permalink': 'foo',
'source': smart.models.Source(),
'effectiveAttachmentOptions': ['FILE'],
'owner': 'foo',
'resourceManagementEnabled': True,
'userSettings': smart.models.SheetUserSettings()
})
assert isinstance(model.source_sheets[0], smart.models.Sheet)
assert model.from_id == 19082
assert model.owner_id == 19082
assert isinstance(model.columns[0], smart.models.Column)
assert model.dependencies_enabled == True
assert isinstance(model.discussions[0], smart.models.Discussion)
assert model.version == 19082
assert model.id == 19082
assert model.gantt_enabled == True
assert model.show_parent_rows_for_filters == True
assert model.name == 'foo'
assert isinstance(model.attachments[0], smart.models.Attachment)
assert model.total_row_count == 19082
assert model.favorite == True
assert model.access_level == 'VIEWER'
assert isinstance(model.rows[0], smart.models.Row)
assert model.read_only == True
assert model.permalink == 'foo'
assert isinstance(model.source, smart.models.Source)
assert model.effective_attachment_options[0] == 'FILE'
assert model.owner == 'foo'
assert model.resource_management_enabled == True
assert isinstance(model.user_settings, smart.models.SheetUserSettings)
model.source = {}
assert isinstance(model.source, smart.models.Source)
model.effective_attachment_options = 'FILE'
assert model.effective_attachment_options[0] == 'FILE'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('FILE')
model.effective_attachment_options = tmplist
assert model.effective_attachment_options[0] == 'FILE'
model.userSettings = {}
assert isinstance(model.user_settings, smart.models.SheetUserSettings)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_report_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Report({
'columns': smart.models.ReportColumn(),
'discussions': smart.models.Discussion(),
'version': 19082,
'name': 'foo',
'attachments': smart.models.Attachment(),
'favorite': True,
'rows': smart.models.ReportRow(),
'permalink': 'foo',
'source': smart.models.Source(),
'owner': 'foo',
'source_sheets': smart.models.Sheet(),
'from_id': 19082,
'owner_id': 19082,
'dependencies_enabled': True,
'id': 19082,
'gantt_enabled': True,
'show_parent_rows_for_filters': True,
'total_row_count': 19082,
'access_level': 'VIEWER',
'read_only': True,
'effective_attachment_options': ['FILE'],
'resource_management_enabled': True,
'user_settings': smart.models.SheetUserSettings()
})
assert isinstance(model.source_sheets[0], smart.models.Sheet)
assert model.from_id == 19082
assert model.owner_id == 19082
assert isinstance(model.columns[0], smart.models.Column)
assert model.dependencies_enabled == True
assert isinstance(model.discussions[0], smart.models.Discussion)
assert model.version == 19082
assert model.id == 19082
assert model.gantt_enabled == True
assert model.show_parent_rows_for_filters == True
assert model.name == 'foo'
assert isinstance(model.attachments[0], smart.models.Attachment)
assert model.total_row_count == 19082
assert model.favorite == True
assert model.access_level == 'VIEWER'
assert isinstance(model.rows[0], smart.models.Row)
assert model.read_only == True
assert model.permalink == 'foo'
assert isinstance(model.source, smart.models.Source)
assert model.effective_attachment_options[0] == 'FILE'
assert model.owner == 'foo'
assert model.resource_management_enabled == True
assert isinstance(model.user_settings, smart.models.SheetUserSettings)
def test_folder(self, smart_setup):
smart = smart_setup['smart']
# favorite, favorite
# folders, folders
# id, id
# name, name
# permalink, permalink
# reports, reports
# sheets, sheets
# templates, templates
model = smart.models.Folder({
'favorite': True,
'folders': smart.models.Folder(),
'id': 19082,
'name': 'foo',
'permalink': 'foo',
'reports': smart.models.Report(),
'sheets': smart.models.Sheet(),
'templates': smart.models.Template()
})
assert model.favorite == True
assert isinstance(model.folders[0], smart.models.Folder)
assert model.id == 19082
assert model.name == 'foo'
assert model.permalink == 'foo'
assert isinstance(model.reports[0], smart.models.Report)
assert isinstance(model.sheets[0], smart.models.Sheet)
assert isinstance(model.templates[0], smart.models.Template)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_folder_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Folder({
'favorite': True,
'folders': smart.models.Folder(),
'name': 'foo',
'permalink': 'foo',
'reports': smart.models.Report(),
'sheets': smart.models.Sheet(),
'templates': smart.models.Template(),
'id': 19082
})
assert model.favorite == True
assert isinstance(model.folders[0], smart.models.Folder)
assert model.id == 19082
assert model.name == 'foo'
assert model.permalink == 'foo'
assert isinstance(model.reports[0], smart.models.Report)
assert isinstance(model.sheets[0], smart.models.Sheet)
assert isinstance(model.templates[0], smart.models.Template)
def test_column(self, smart_setup):
smart = smart_setup['smart']
# auto_number_format, autoNumberFormat
# format, format
# hidden, hidden
# id, id
# index, index
# locked, locked
# locked_for_user, lockedForUser
# options, options
# primary, primary
# symbol, symbol
# system_column_type, systemColumnType
# tags, tags
# title, title
# type, type
# width, width
model = smart.models.Column({
'autoNumberFormat': smart.models.AutoNumberFormat(),
'format': 'foo',
'hidden': True,
'id': 19082,
'index': 19082,
'locked': True,
'lockedForUser': True,
'options': ['foo'],
'primary': True,
'symbol': 'STAR',
'systemColumnType': 'AUTO_NUMBER',
'tags': ['foo'],
'title': 'foo',
'type': 'TEXT_NUMBER',
'width': 19082
})
assert isinstance(model.auto_number_format, smart.models.AutoNumberFormat)
assert model.format == 'foo'
assert model.hidden == True
assert model.id == 19082
assert model.index == 19082
assert model.locked == True
assert model.locked_for_user == True
assert model.options[0] == 'foo'
assert model.primary == True
assert model.symbol == 'STAR'
assert model.system_column_type == 'AUTO_NUMBER'
assert model.tags[0] == 'foo'
assert model.title == 'foo'
assert model.type == 'TEXT_NUMBER'
assert model.width == 19082
model.autoNumberFormat = {}
assert isinstance(model.auto_number_format, smart.models.AutoNumberFormat)
model.options = 'foo'
assert model.options[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.options = tmplist
assert model.options[0] == 'foo'
model.tags = 'foo'
assert model.tags[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.tags = tmplist
assert model.tags[0] == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_column_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Column({
'hidden': True,
'index': 19082,
'locked': True,
'options': ['foo'],
'primary': True,
'symbol': 'STAR',
'tags': ['foo'],
'title': 'foo',
'width': 19082,
'auto_number_format': smart.models.AutoNumberFormat(),
'format': 'foo',
'id': 19082,
'locked_for_user': True,
'system_column_type': 'AUTO_NUMBER',
'type': 'TEXT_NUMBER'
})
assert isinstance(model.auto_number_format, smart.models.AutoNumberFormat)
assert model.format == 'foo'
assert model.hidden == True
assert model.id == 19082
assert model.index == 19082
assert model.locked == True
assert model.locked_for_user == True
assert model.options[0] == 'foo'
assert model.primary == True
assert model.symbol == 'STAR'
assert model.system_column_type == 'AUTO_NUMBER'
assert model.tags[0] == 'foo'
assert model.title == 'foo'
assert model.type == 'TEXT_NUMBER'
assert model.width == 19082
def test_result(self, smart_setup):
smart = smart_setup['smart']
# message, message
# result, result
# result_code, resultCode
# version, version
model = smart.models.Result({
'message': 'foo',
'resultCode': 19082,
'version': 19082
})
assert model.message == 'foo'
assert model.result_code == 19082
assert model.version == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_result_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Result({
'message': 'foo',
'version': 19082,
'result_code': 19082
})
assert model.message == 'foo'
assert model.result_code == 19082
assert model.version == 19082
def test_contact(self, smart_setup):
smart = smart_setup['smart']
# email, email
# id, id
# name, name
model = smart.models.Contact({
'email': 'foo',
'id': 'foo',
'name': 'foo'
})
assert model.email == 'foo'
assert model.id == 'foo'
assert model.name == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_contact_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Contact({
'email': 'foo',
'name': 'foo',
'id': 'foo'
})
assert model.email == 'foo'
assert model.id == 'foo'
assert model.name == 'foo'
def test_comment(self, smart_setup):
smart = smart_setup['smart']
# attachments, attachments
# created_at, createdAt
# created_by, createdBy
# discussion_id, discussionId
# id, id
# modified_at, modifiedAt
# text, text
model = smart.models.Comment({
'attachments': smart.models.Attachment(),
'createdBy': smart.models.User(),
'discussionId': 19082,
'id': 19082,
'text': 'foo'
})
assert isinstance(model.attachments[0], smart.models.Attachment)
assert isinstance(model.created_by, smart.models.User)
assert model.discussion_id == 19082
assert model.id == 19082
assert model.text == 'foo'
model.createdBy = {}
assert isinstance(model.created_by, smart.models.User)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_comment_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Comment({
'attachments': smart.models.Attachment(),
'text': 'foo',
'created_by': smart.models.User(),
'discussion_id': 19082,
'id': 19082
})
assert isinstance(model.attachments[0], smart.models.Attachment)
assert isinstance(model.created_by, smart.models.User)
assert model.discussion_id == 19082
assert model.id == 19082
assert model.text == 'foo'
def test_account(self, smart_setup):
smart = smart_setup['smart']
# id, id
# name, name
model = smart.models.Account({
'id': 19082,
'name': 'foo'
})
assert model.id == 19082
assert model.name == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_account_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Account({
'name': 'foo',
'id': 19082
})
assert model.id == 19082
assert model.name == 'foo'
def test_version(self, smart_setup):
smart = smart_setup['smart']
# version, version
model = smart.models.Version({
'version': 19082
})
assert model.version == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_cell_link(self, smart_setup):
smart = smart_setup['smart']
# column_id, columnId
# row_id, rowId
# sheet_id, sheetId
# sheet_name, sheetName
# status, status
model = smart.models.CellLink({
'columnId': 19082,
'rowId': 19082,
'sheetId': 19082,
'sheetName': 'foo',
'status': 'OK'
})
assert model.column_id == 19082
assert model.row_id == 19082
assert model.sheet_id == 19082
assert model.sheet_name == 'foo'
assert model.status == 'OK'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_cell_link_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.CellLink({
'status': 'OK',
'column_id': 19082,
'row_id': 19082,
'sheet_id': 19082,
'sheet_name': 'foo'
})
assert model.column_id == 19082
assert model.row_id == 19082
assert model.sheet_id == 19082
assert model.sheet_name == 'foo'
assert model.status == 'OK'
def test_template(self, smart_setup):
smart = smart_setup['smart']
# access_level, accessLevel
# description, description
# id, id
# name, name
model = smart.models.Template({
'accessLevel': 'VIEWER',
'description': 'foo',
'id': 19082,
'name': 'foo'
})
assert model.access_level == 'VIEWER'
assert model.description == 'foo'
assert model.id == 19082
assert model.name == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_template_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Template({
'description': 'foo',
'name': 'foo',
'access_level': 'VIEWER',
'id': 19082
})
assert model.access_level == 'VIEWER'
assert model.description == 'foo'
assert model.id == 19082
assert model.name == 'foo'
def test_row_email(self, smart_setup):
smart = smart_setup['smart']
# message, message
# column_ids, columnIds
# send_to, sendTo
# include_attachments, includeAttachments
# subject, subject
# include_discussions, includeDiscussions
# cc_me, ccMe
model = smart.models.RowEmail({
'message': 'foo',
'columnIds': [19082],
'sendTo': smart.models.Recipient(),
'includeAttachments': True,
'subject': 'foo',
'includeDiscussions': True,
'ccMe': True
})
assert model.message == 'foo'
assert model.column_ids[0] == 19082
assert isinstance(model.send_to[0], smart.models.Recipient)
assert model.include_attachments == True
assert model.subject == 'foo'
assert model.include_discussions == True
assert model.cc_me == True
model.column_ids = 19082
assert model.column_ids[0] == 19082
tmplist = smartsheet.types.TypedList(int)
tmplist.append(19082)
model.column_ids = tmplist
assert model.column_ids[0] == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_row_email_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.RowEmail({
'message': 'foo',
'subject': 'foo',
'column_ids': [19082],
'send_to': smart.models.Recipient(),
'include_attachments': True,
'include_discussions': True,
'cc_me': True
})
assert model.message == 'foo'
assert model.column_ids[0] == 19082
assert isinstance(model.send_to[0], smart.models.Recipient)
assert model.include_attachments == True
assert model.subject == 'foo'
assert model.include_discussions == True
assert model.cc_me == True
def test_criteria(self, smart_setup):
smart = smart_setup['smart']
# operator, operator
# value1, value1
# value2, value2
model = smart.models.Criteria({
'operator': 'EQUAL',
'values': ['foo', 'foo']
})
assert model.operator == 'EQUAL'
assert model.values[0] == 'foo'
assert model.values[1] == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_currency(self, smart_setup):
smart = smart_setup['smart']
# code, code
# symbol, symbol
model = smart.models.Currency({
'code': 'USD',
'symbol': '$'
})
assert model.code == 'USD'
assert model.symbol == '$'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_favorite(self, smart_setup):
smart = smart_setup['smart']
# object_id, objectId
# type, type
model = smart.models.Favorite({
'objectId': 19082,
'type': 'workspace'
})
assert model.object_id == 19082
assert model.type == 'workspace'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_favorite_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Favorite({
'object_id': 19082,
'type': 'workspace'
})
assert model.object_id == 19082
assert model.type == 'workspace'
def test_hyperlink(self, smart_setup):
smart = smart_setup['smart']
# report_id, reportId
# sheet_id, sheetId
# url, url
model = smart.models.Hyperlink({
'reportId': 19082,
'sheetId': 19082,
'url': 'foo'
})
assert model.report_id == 19082
assert model.sheet_id == 19082
assert model.url == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_hyperlink_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Hyperlink({
'url': 'foo',
'report_id': 19082,
'sheet_id': 19082
})
assert model.report_id == 19082
assert model.sheet_id == 19082
assert model.url == 'foo'
def test_recipient(self, smart_setup):
smart = smart_setup['smart']
# email, email
# group_id, groupId
model = smart.models.Recipient({
'email': 'foo',
'groupId': 19082
})
assert model.email == 'foo'
assert model.group_id == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_recipient_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Recipient({
'email': 'foo',
'group_id': 19082
})
assert model.email == 'foo'
assert model.group_id == 19082
def test_workspace(self, smart_setup):
smart = smart_setup['smart']
# access_level, accessLevel
# favorite, favorite
# folders, folders
# id, id
# name, name
# permalink, permalink
# reports, reports
# sheets, sheets
# templates, templates
model = smart.models.Workspace({
'accessLevel': 'VIEWER',
'favorite': True,
'folders': smart.models.Folder(),
'id': 19082,
'name': 'foo',
'permalink': 'foo',
'reports': smart.models.Report(),
'sheets': smart.models.Sheet(),
'templates': smart.models.Template()
})
assert model.access_level == 'VIEWER'
assert model.favorite == True
assert isinstance(model.folders[0], smart.models.Folder)
assert model.id == 19082
assert model.name == 'foo'
assert model.permalink == 'foo'
assert isinstance(model.reports[0], smart.models.Report)
assert isinstance(model.sheets[0], smart.models.Sheet)
assert isinstance(model.templates[0], smart.models.Template)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_workspace_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Workspace({
'favorite': True,
'folders': smart.models.Folder(),
'name': 'foo',
'permalink': 'foo',
'reports': smart.models.Report(),
'sheets': smart.models.Sheet(),
'templates': smart.models.Template(),
'access_level': 'VIEWER',
'id': 19082
})
assert model.access_level == 'VIEWER'
assert model.favorite == True
assert isinstance(model.folders[0], smart.models.Folder)
assert model.id == 19082
assert model.name == 'foo'
assert model.permalink == 'foo'
assert isinstance(model.reports[0], smart.models.Report)
assert isinstance(model.sheets[0], smart.models.Sheet)
assert isinstance(model.templates[0], smart.models.Template)
def test_report_row(self, smart_setup):
smart = smart_setup['smart']
# in_critical_path, inCriticalPath
# cells, cells
# sibling_id, siblingId
# modified_at, modifiedAt
# columns, columns
# row_number, rowNumber
# format, format
# expanded, expanded
# access_level, accessLevel
# version, version
# discussions, discussions
# id, id
# parent_id, parentId
# sheet_id, sheetId
# to_top, toTop
# to_bottom, toBottom
# permalink, permalink
# locked_for_user, lockedForUser
# created_at, createdAt
# conditional_format, conditionalFormat
# filtered_out, filteredOut
# above, above
# locked, locked
# attachments, attachments
model = smart.models.ReportRow({
'inCriticalPath': True,
'cells': smart.models.ReportCell(),
'siblingId': 19082,
'columns': smart.models.Column(),
'rowNumber': 19082,
'format': 'foo',
'expanded': True,
'accessLevel': 'VIEWER',
'version': 19082,
'discussions': smart.models.Discussion(),
'id': 19082,
'parentId': 19082,
'sheetId': 19082,
'toTop': True,
'toBottom': True,
'permalink': 'foo',
'lockedForUser': True,
'conditionalFormat': 'foo',
'filteredOut': True,
'above': True,
'locked': True,
'attachments': smart.models.Attachment()
})
assert model.in_critical_path == True
assert isinstance(model.cells[0], smart.models.Cell)
assert model.sibling_id == 19082
assert isinstance(model.columns[0], smart.models.Column)
assert model.row_number == 19082
assert model.format == 'foo'
assert model.expanded == True
assert model.access_level == 'VIEWER'
assert model.version == 19082
assert isinstance(model.discussions[0], smart.models.Discussion)
assert model.id == 19082
assert model.parent_id == 19082
assert model.sheet_id == 19082
assert model.to_top == True
assert model.to_bottom == True
assert model.permalink == 'foo'
assert model.locked_for_user == True
assert model.conditional_format == 'foo'
assert model.filtered_out == True
assert model.above == True
assert model.locked == True
assert isinstance(model.attachments[0], smart.models.Attachment)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_report_row_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.ReportRow({
'cells': smart.models.ReportCell(),
'columns': smart.models.Column(),
'rowNumber': 19082,
'expanded': True,
'version': 19082,
'discussions': smart.models.Discussion(),
'permalink': 'foo',
'lockedForUser': True,
'filteredOut': True,
'above': True,
'locked': True,
'attachments': smart.models.Attachment(),
'in_critical_path': True,
'sibling_id': 19082,
'format': 'foo',
'access_level': 'VIEWER',
'id': 19082,
'parent_id': 19082,
'sheet_id': 19082,
'to_top': True,
'to_bottom': True,
'conditional_format': 'foo'
})
assert model.in_critical_path == True
assert isinstance(model.cells[0], smart.models.Cell)
assert model.sibling_id == 19082
assert isinstance(model.columns[0], smart.models.Column)
assert model.row_number == 19082
assert model.format == 'foo'
assert model.expanded == True
assert model.access_level == 'VIEWER'
assert model.version == 19082
assert isinstance(model.discussions[0], smart.models.Discussion)
assert model.id == 19082
assert model.parent_id == 19082
assert model.sheet_id == 19082
assert model.to_top == True
assert model.to_bottom == True
assert model.permalink == 'foo'
assert model.locked_for_user == True
assert model.conditional_format == 'foo'
assert model.filtered_out == True
assert model.above == True
assert model.locked == True
assert isinstance(model.attachments[0], smart.models.Attachment)
def test_font_family(self, smart_setup):
smart = smart_setup['smart']
# name, name
# traits, traits
model = smart.models.FontFamily({
'name': 'foo',
'traits': ['foo']
})
assert model.name == 'foo'
assert model.traits[0] == 'foo'
model.traits = 'foo'
assert model.traits[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.traits = tmplist
assert model.traits[0] == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_server_info(self, smart_setup):
smart = smart_setup['smart']
# formats, formats
# supported_locales, supportedLocales
model = smart.models.ServerInfo({
'formats': smart.models.FormatTables(),
'supportedLocales': ['foo']
})
assert isinstance(model.formats, smart.models.FormatTables)
assert model.supported_locales[0] == 'foo'
model.formats = {}
assert isinstance(model.formats, smart.models.FormatTables)
model.supported_locales = 'foo'
assert model.supported_locales[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.supported_locales = tmplist
assert model.supported_locales[0] == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_server_info_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.ServerInfo({
'formats': smart.models.FormatTables(),
'supported_locales': ['foo']
})
assert isinstance(model.formats, smart.models.FormatTables)
assert model.supported_locales[0] == 'foo'
def test_report_cell(self, smart_setup):
smart = smart_setup['smart']
# link_in_from_cell, linkInFromCell
# virtual_column_id, virtualColumnId
# column_type, columnType
# hyperlink, hyperlink
# conditional_format, conditionalFormat
# value, value
# column_id, columnId
# format, format
# strict, strict
# display_value, displayValue
# links_out_to_cells, linksOutToCells
# formula, formula
model = smart.models.ReportCell({
'linkInFromCell': smart.models.CellLink(),
'virtualColumnId': 19082,
'columnType': 'foo',
'hyperlink': smart.models.Hyperlink(),
'conditionalFormat': 'foo',
'value': 'foo',
'columnId': 19082,
'format': 'foo',
'strict': True,
'displayValue': 'foo',
'linksOutToCells': smart.models.CellLink(),
'formula': 'foo'
})
assert isinstance(model.link_in_from_cell, smart.models.CellLink)
assert model.virtual_column_id == 19082
assert model.column_type == 'foo'
assert isinstance(model.hyperlink, smart.models.Hyperlink)
assert model.conditional_format == 'foo'
assert model.value == 'foo'
assert model.column_id == 19082
assert model.format == 'foo'
assert model.strict == True
assert model.display_value == 'foo'
assert isinstance(model.links_out_to_cells[0], smart.models.CellLink)
assert model.formula == 'foo'
model.linkInFromCell = {}
assert isinstance(model.link_in_from_cell, smart.models.CellLink)
model.hyperlink = {}
assert isinstance(model.hyperlink, smart.models.Hyperlink)
model.linksOutToCells = {}
assert isinstance(model.links_out_to_cells[0], smart.models.CellLink)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_report_cell_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.ReportCell({
'columnType': 'foo',
'hyperlink': smart.models.Hyperlink(),
'conditionalFormat': 'foo',
'value': 'foo',
'strict': True,
'displayValue': 'foo',
'formula': 'foo',
'link_in_from_cell': smart.models.CellLink(),
'virtual_column_id': 19082,
'column_id': 19082,
'format': 'foo',
'links_out_to_cells': smart.models.CellLink()
})
assert isinstance(model.link_in_from_cell, smart.models.CellLink)
assert model.virtual_column_id == 19082
assert model.column_type == 'foo'
assert isinstance(model.hyperlink, smart.models.Hyperlink)
assert model.conditional_format == 'foo'
assert model.value == 'foo'
assert model.column_id == 19082
assert model.format == 'foo'
assert model.strict == True
assert model.display_value == 'foo'
assert isinstance(model.links_out_to_cells[0], smart.models.CellLink)
assert model.formula == 'foo'
def test_row_mapping(self, smart_setup):
smart = smart_setup['smart']
# from_, from
# to, to
model = smart.models.RowMapping({
'from': 19082,
'to': 19082
})
assert model.from_ == 19082
assert model.to == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_row_mapping_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.RowMapping({
'to': 19082,
'from': 19082
})
assert model.from_ == 19082
assert model.to == 19082
def test_o_auth_error(self, smart_setup):
smart = smart_setup['smart']
# error, error
# error_code, errorCode
# error_description, error_description
model = smart.models.OAuthError({
'error': 'invalid_request',
'errorCode': 19082,
'error_description': 'foo'
})
assert model.error == 'invalid_request'
assert model.error_code == 19082
assert model.error_description == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_o_auth_error_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.OAuthError({
'error': 'invalid_request',
'error_description': 'foo',
'error_code': 19082
})
assert model.error == 'invalid_request'
assert model.error_code == 19082
assert model.error_description == 'foo'
def test_discussion(self, smart_setup):
smart = smart_setup['smart']
# access_level, accessLevel
# comment, comment
# comment_attachments, commentAttachments
# comments, comments
# created_by, createdBy
# id, id
# last_commented_at, lastCommentedAt
# last_commented_user, lastCommentedUser
# parent_id, parentId
# parent_type, parentType
# read_only, readOnly
# title, title
model = smart.models.Discussion({
'accessLevel': 'VIEWER',
'comment': smart.models.Comment(),
'commentAttachments': smart.models.Attachment(),
'comments': smart.models.Comment(),
'createdBy': smart.models.User(),
'id': 19082,
'lastCommentedUser': smart.models.User(),
'parentId': 19082,
'parentType': 'foo',
'readOnly': True,
'title': 'foo'
})
assert model.access_level == 'VIEWER'
assert isinstance(model.comment, smart.models.Comment)
assert isinstance(model.comment_attachments[0], smart.models.Attachment)
assert isinstance(model.comments[0], smart.models.Comment)
assert isinstance(model.created_by, smart.models.User)
assert model.id == 19082
assert isinstance(model.last_commented_user, smart.models.User)
assert model.parent_id == 19082
assert model.parent_type == 'foo'
assert model.read_only == True
assert model.title == 'foo'
model.comment = {}
assert isinstance(model.comment, smart.models.Comment)
model.createdBy = {}
assert isinstance(model.created_by, smart.models.User)
model.lastCommentedUser = {}
assert isinstance(model.last_commented_user, smart.models.User)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_discussion_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Discussion({
'comment': smart.models.Comment(),
'comments': smart.models.Comment(),
'title': 'foo',
'access_level': 'VIEWER',
'comment_attachments': smart.models.Attachment(),
'created_by': smart.models.User(),
'id': 19082,
'last_commented_user': smart.models.User(),
'parent_id': 19082,
'parent_type': 'foo',
'read_only': True
})
assert model.access_level == 'VIEWER'
assert isinstance(model.comment, smart.models.Comment)
assert isinstance(model.comment_attachments[0], smart.models.Attachment)
assert isinstance(model.comments[0], smart.models.Comment)
assert isinstance(model.created_by, smart.models.User)
assert model.id == 19082
assert isinstance(model.last_commented_user, smart.models.User)
assert model.parent_id == 19082
assert model.parent_type == 'foo'
assert model.read_only == True
assert model.title == 'foo'
def test_attachment(self, smart_setup):
smart = smart_setup['smart']
# attachment_sub_type, attachmentSubType
# attachment_type, attachmentType
# created_at, createdAt
# created_by, createdBy
# description, description
# id, id
# mime_type, mimeType
# name, name
# parent_id, parentId
# parent_type, parentType
# size_in_kb, sizeInKb
# url, url
# url_expires_in_millis, urlExpiresInMillis
model = smart.models.Attachment({
'attachmentSubType': 'DOCUMENT',
'attachmentType': 'BOX_COM',
'createdBy': smart.models.User(),
'description': 'foo',
'id': 19082,
'mimeType': 'foo',
'name': 'foo',
'parentId': 19082,
'parentType': 'SHEET',
'sizeInKb': 19082,
'url': 'foo',
'urlExpiresInMillis': 19082
})
assert model.attachment_sub_type == 'DOCUMENT'
assert model.attachment_type == 'BOX_COM'
assert isinstance(model.created_by, smart.models.User)
assert model.id == 19082
assert model.mime_type == 'foo'
assert model.name == 'foo'
assert model.parent_id == 19082
assert model.parent_type == 'SHEET'
assert model.size_in_kb == 19082
assert model.url == 'foo'
assert model.url_expires_in_millis == 19082
model.createdBy = {}
assert isinstance(model.created_by, smart.models.User)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_attachment_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.Attachment({
'mimeType': 'foo',
'name': 'foo',
'parentId': 19082,
'parentType': 'SHEET',
'sizeInKb': 19082,
'url': 'foo',
'urlExpiresInMillis': 19082,
'attachment_sub_type': 'DOCUMENT',
'attachment_type': 'BOX_COM',
'created_by': smart.models.User(),
'id': 19082
})
assert model.attachment_sub_type == 'DOCUMENT'
assert model.attachment_type == 'BOX_COM'
assert isinstance(model.created_by, smart.models.User)
assert model.id == 19082
assert model.mime_type == 'foo'
assert model.name == 'foo'
assert model.parent_id == 19082
assert model.parent_type == 'SHEET'
assert model.size_in_kb == 19082
assert model.url == 'foo'
assert model.url_expires_in_millis == 19082
def test_sheet_email(self, smart_setup):
smart = smart_setup['smart']
# message, message
# send_to, sendTo
# subject, subject
# format_details, formatDetails
# format, format
# cc_me, ccMe
model = smart.models.SheetEmail({
'message': 'foo',
'sendTo': smart.models.Recipient(),
'subject': 'foo',
'formatDetails': smart.models.FormatDetails(),
'format': 'PDF',
'ccMe': True
})
assert model.message == 'foo'
assert isinstance(model.send_to[0], smart.models.Recipient)
assert model.subject == 'foo'
assert isinstance(model.format_details, smart.models.FormatDetails)
assert model.format == 'PDF'
assert model.cc_me == True
model.formatDetails = {}
assert isinstance(model.format_details, smart.models.FormatDetails)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_sheet_email_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.SheetEmail({
'message': 'foo',
'subject': 'foo',
'send_to': smart.models.Recipient(),
'format_details': smart.models.FormatDetails(),
'format': 'PDF',
'cc_me': True
})
assert model.message == 'foo'
assert isinstance(model.send_to[0], smart.models.Recipient)
assert model.subject == 'foo'
assert isinstance(model.format_details, smart.models.FormatDetails)
assert model.format == 'PDF'
assert model.cc_me == True
def test_access_token(self, smart_setup):
smart = smart_setup['smart']
# access_token, access_token
# expires_in, expires_in
# refresh_token, refresh_token
# token_type, token_type
model = smart.models.AccessToken({
'access_token': 'foo',
'expires_in': 19082,
'refresh_token': 'foo',
'token_type': 'bearer'
})
assert model.access_token == 'foo'
assert model.expires_in == 19082
assert model.refresh_token == 'foo'
assert model.token_type == 'bearer'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_index_result(self, smart_setup):
smart = smart_setup['smart']
# data, data
# page_number, pageNumber
# page_size, pageSize
# total_count, totalCount
# total_pages, totalPages
model = smart.models.IndexResult({
'pageNumber': 19082,
'pageSize': 19082,
'totalCount': 19082,
'totalPages': 19082
})
assert model.page_number == 19082
assert model.page_size == 19082
assert model.total_count == 19082
assert model.total_pages == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_index_result_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.IndexResult({
'page_number': 19082,
'page_size': 19082,
'total_count': 19082,
'total_pages': 19082
})
assert model.page_number == 19082
assert model.page_size == 19082
assert model.total_count == 19082
assert model.total_pages == 19082
def test_cell_history(self, smart_setup):
smart = smart_setup['smart']
# link_in_from_cell, linkInFromCell
# modified_at, modifiedAt
# column_type, columnType
# modified_by, modifiedBy
# hyperlink, hyperlink
# conditional_format, conditionalFormat
# value, value
# column_id, columnId
# format, format
# strict, strict
# display_value, displayValue
# links_out_to_cells, linksOutToCells
# formula, formula
model = smart.models.CellHistory({
'linkInFromCell': smart.models.CellLink(),
'columnType': 'foo',
'modifiedBy': smart.models.User(),
'hyperlink': smart.models.Hyperlink(),
'conditionalFormat': 'foo',
'value': 'foo',
'columnId': 19082,
'format': 'foo',
'strict': True,
'displayValue': 'foo',
'linksOutToCells': smart.models.CellLink(),
'formula': 'foo'
})
assert isinstance(model.link_in_from_cell, smart.models.CellLink)
assert model.column_type == 'foo'
assert isinstance(model.modified_by, smart.models.User)
assert isinstance(model.hyperlink, smart.models.Hyperlink)
assert model.conditional_format == 'foo'
assert model.value == 'foo'
assert model.column_id == 19082
assert model.format == 'foo'
assert model.strict == True
assert model.display_value == 'foo'
assert isinstance(model.links_out_to_cells[0], smart.models.CellLink)
assert model.formula == 'foo'
model.linkInFromCell = {}
assert isinstance(model.link_in_from_cell, smart.models.CellLink)
model.modifiedBy = {}
assert isinstance(model.modified_by, smart.models.User)
model.hyperlink = {}
assert isinstance(model.hyperlink, smart.models.Hyperlink)
model.linksOutToCells = {}
assert isinstance(model.links_out_to_cells[0], smart.models.CellLink)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_cell_history_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.CellHistory({
'columnType': 'foo',
'hyperlink': smart.models.Hyperlink(),
'conditionalFormat': 'foo',
'value': 'foo',
'strict': True,
'displayValue': 'foo',
'formula': 'foo',
'link_in_from_cell': smart.models.CellLink(),
'modified_by': smart.models.User(),
'column_id': 19082,
'format': 'foo',
'links_out_to_cells': smart.models.CellLink()
})
assert isinstance(model.link_in_from_cell, smart.models.CellLink)
assert model.column_type == 'foo'
assert isinstance(model.modified_by, smart.models.User)
assert isinstance(model.hyperlink, smart.models.Hyperlink)
assert model.conditional_format == 'foo'
assert model.value == 'foo'
assert model.column_id == 19082
assert model.format == 'foo'
assert model.strict == True
assert model.display_value == 'foo'
assert isinstance(model.links_out_to_cells[0], smart.models.CellLink)
assert model.formula == 'foo'
def test_user_profile(self, smart_setup):
smart = smart_setup['smart']
# account, account
# admin, admin
# email, email
# first_name, firstName
# group_admin, groupAdmin
# id, id
# last_name, lastName
# licensed_sheet_creator, licensedSheetCreator
# locale, locale
# resource_viewer, resourceViewer
# status, status
# time_zone, timeZone
model = smart.models.UserProfile({
'account': smart.models.Account(),
'admin': True,
'email': 'foo',
'firstName': 'foo',
'groupAdmin': True,
'id': 19082,
'lastName': 'foo',
'licensedSheetCreator': True,
'locale': 'foo',
'resourceViewer': True,
'status': 'ACTIVE',
'timeZone': 'foo'
})
assert isinstance(model.account, smart.models.Account)
assert model.admin == True
assert model.email == 'foo'
assert model.first_name == 'foo'
assert model.group_admin == True
assert model.id == 19082
assert model.last_name == 'foo'
assert model.licensed_sheet_creator == True
assert model.locale == 'foo'
assert model.resource_viewer == True
assert model.status == 'ACTIVE'
assert model.time_zone == 'foo'
model.account = {}
assert isinstance(model.account, smart.models.Account)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_user_profile_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.UserProfile({
'account': smart.models.Account(),
'admin': True,
'email': 'foo',
'locale': 'foo',
'status': 'ACTIVE',
'first_name': 'foo',
'group_admin': True,
'id': 19082,
'last_name': 'foo',
'licensed_sheet_creator': True,
'resource_viewer': True,
'time_zone': 'foo'
})
assert isinstance(model.account, smart.models.Account)
assert model.admin == True
assert model.email == 'foo'
assert model.first_name == 'foo'
assert model.group_admin == True
assert model.id == 19082
assert model.last_name == 'foo'
assert model.licensed_sheet_creator == True
assert model.locale == 'foo'
assert model.resource_viewer == True
assert model.status == 'ACTIVE'
assert model.time_zone == 'foo'
def test_group_member(self, smart_setup):
smart = smart_setup['smart']
# email, email
# first_name, firstName
# id, id
# last_name, lastName
# name, name
model = smart.models.GroupMember({
'email': 'foo',
'firstName': 'foo',
'id': 19082,
'lastName': 'foo',
'name': 'foo'
})
assert model.email == 'foo'
assert model.first_name == 'foo'
assert model.id == 19082
assert model.last_name == 'foo'
assert model.name == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_group_member_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.GroupMember({
'email': 'foo',
'name': 'foo',
'first_name': 'foo',
'id': 19082,
'last_name': 'foo'
})
assert model.email == 'foo'
assert model.first_name == 'foo'
assert model.id == 19082
assert model.last_name == 'foo'
assert model.name == 'foo'
def test_error_result(self, smart_setup):
smart = smart_setup['smart']
# code, code
# message, message
# name, name
# recommendation, recommendation
# should_retry, shouldRetry
# status_code, statusCode
model = smart.models.ErrorResult({
'code': 19082,
'message': 'foo',
'name': 'foo',
'recommendation': 'foo',
'shouldRetry': True,
'statusCode': 19082
})
assert model.code == 19082
assert model.message == 'foo'
assert model.name == 'foo'
assert model.recommendation == 'foo'
assert model.should_retry == True
assert model.status_code == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_error_result_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.ErrorResult({
'code': 19082,
'message': 'foo',
'name': 'foo',
'recommendation': 'foo',
'should_retry': True,
'status_code': 19082
})
assert model.code == 19082
assert model.message == 'foo'
assert model.name == 'foo'
assert model.recommendation == 'foo'
assert model.should_retry == True
assert model.status_code == 19082
def test_report_column(self, smart_setup):
smart = smart_setup['smart']
# sheet_name_column, sheetNameColumn
# tags, tags
# index, index
# symbol, symbol
# width, width
# format, format
# type, type
# id, id
# title, title
# locked_for_user, lockedForUser
# hidden, hidden
# primary, primary
# system_column_type, systemColumnType
# locked, locked
# virtual_id, virtualId
# options, options
# auto_number_format, autoNumberFormat
model = smart.models.ReportColumn({
'sheetNameColumn': True,
'tags': ['foo'],
'index': 19082,
'symbol': 'STAR',
'width': 19082,
'format': 'foo',
'type': 'TEXT_NUMBER',
'id': 19082,
'title': 'foo',
'lockedForUser': True,
'hidden': True,
'primary': True,
'systemColumnType': 'AUTO_NUMBER',
'locked': True,
'virtualId': 19082,
'options': ['foo'],
'autoNumberFormat': smart.models.AutoNumberFormat()
})
assert model.sheet_name_column == True
assert model.tags[0] == 'foo'
assert model.index == 19082
assert model.symbol == 'STAR'
assert model.width == 19082
assert model.format == 'foo'
assert model.type == 'TEXT_NUMBER'
assert model.id == 19082
assert model.title == 'foo'
assert model.locked_for_user == True
assert model.hidden == True
assert model.primary == True
assert model.system_column_type == 'AUTO_NUMBER'
assert model.locked == True
assert model.virtual_id == 19082
assert model.options[0] == 'foo'
assert isinstance(model.auto_number_format, smart.models.AutoNumberFormat)
model.tags = 'foo'
assert model.tags[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.tags = tmplist
assert model.tags[0] == 'foo'
model.options = 'foo'
assert model.options[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.options = tmplist
assert model.options[0] == 'foo'
model.autoNumberFormat = {}
assert isinstance(model.auto_number_format, smart.models.AutoNumberFormat)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_report_column_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.ReportColumn({
'tags': ['foo'],
'index': 19082,
'symbol': 'STAR',
'width': 19082,
'title': 'foo',
'hidden': True,
'primary': True,
'locked': True,
'options': ['foo'],
'sheet_name_column': True,
'format': 'foo',
'type': 'TEXT_NUMBER',
'id': 19082,
'locked_for_user': True,
'system_column_type': 'AUTO_NUMBER',
'virtual_id': 19082,
'auto_number_format': smart.models.AutoNumberFormat()
})
assert model.sheet_name_column == True
assert model.tags[0] == 'foo'
assert model.index == 19082
assert model.symbol == 'STAR'
assert model.width == 19082
assert model.format == 'foo'
assert model.type == 'TEXT_NUMBER'
assert model.id == 19082
assert model.title == 'foo'
assert model.locked_for_user == True
assert model.hidden == True
assert model.primary == True
assert model.system_column_type == 'AUTO_NUMBER'
assert model.locked == True
assert model.virtual_id == 19082
assert model.options[0] == 'foo'
assert isinstance(model.auto_number_format, smart.models.AutoNumberFormat)
def test_search_result(self, smart_setup):
smart = smart_setup['smart']
# results, results
# total_count, totalCount
model = smart.models.SearchResult({
'results': smart.models.SearchResultItem(),
'totalCount': 19082
})
assert isinstance(model.results[0], smart.models.SearchResultItem)
assert model.total_count == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_search_result_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.SearchResult({
'results': smart.models.SearchResultItem(),
'totalCount': 19082
})
assert isinstance(model.results[0], smart.models.SearchResultItem)
assert model.total_count == 19082
def test_sheet_publish(self, smart_setup):
smart = smart_setup['smart']
# ical_enabled, icalEnabled
# ical_url, icalUrl
# read_only_full_enabled, readOnlyFullEnabled
# read_only_full_url, readOnlyFullUrl
# read_only_lite_enabled, readOnlyLiteEnabled
# read_only_lite_url, readOnlyLiteUrl
# read_write_enabled, readWriteEnabled
# read_write_url, readWriteUrl
model = smart.models.SheetPublish({
'icalEnabled': True,
'icalUrl': 'foo',
'readOnlyFullEnabled': True,
'readOnlyFullUrl': 'foo',
'readOnlyLiteEnabled': True,
'readOnlyLiteUrl': 'foo',
'readWriteEnabled': True,
'readWriteUrl': 'foo'
})
assert model.ical_enabled == True
assert model.ical_url == 'foo'
assert model.read_only_full_enabled == True
assert model.read_only_full_url == 'foo'
assert model.read_only_lite_enabled == True
assert model.read_only_lite_url == 'foo'
assert model.read_write_enabled == True
assert model.read_write_url == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_sheet_publish_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.SheetPublish({
'icalUrl': 'foo',
'readOnlyFullUrl': 'foo',
'readOnlyLiteUrl': 'foo',
'readWriteUrl': 'foo',
'ical_enabled': True,
'read_only_full_enabled': True,
'read_only_lite_enabled': True,
'read_write_enabled': True
})
assert model.ical_enabled == True
assert model.ical_url == 'foo'
assert model.read_only_full_enabled == True
assert model.read_only_full_url == 'foo'
assert model.read_only_lite_enabled == True
assert model.read_only_lite_url == 'foo'
assert model.read_write_enabled == True
assert model.read_write_url == 'foo'
def test_format_tables(self, smart_setup):
smart = smart_setup['smart']
# bold, bold
# color, color
# currency, currency
# decimal_count, decimalCount
# defaults, defaults
# font_family, fontFamily
# font_size, fontSize
# horizontal_align, horizontalAlign
# italic, italic
# number_format, numberFormat
# strikethrough, strikethrough
# text_wrap, textWrap
# thousands_separator, thousandsSeparator
# underline, underline
# vertical_align, verticalAlign
model = smart.models.FormatTables({
'bold': ['foo'],
'color': ['foo'],
'currency': smart.models.Currency(),
'decimalCount': ['foo'],
'defaults': 'foo',
'fontFamily': smart.models.FontFamily(),
'fontSize': ['foo'],
'horizontalAlign': ['foo'],
'italic': ['foo'],
'numberFormat': ['foo'],
'strikethrough': ['foo'],
'textWrap': ['foo'],
'thousandsSeparator': ['foo'],
'underline': ['foo'],
'verticalAlign': ['foo']
})
assert model.bold[0] == 'foo'
assert model.color[0] == 'foo'
assert isinstance(model.currency[0], smart.models.Currency)
assert model.decimal_count[0] == 'foo'
assert model.defaults == 'foo'
assert isinstance(model.font_family[0], smart.models.FontFamily)
assert model.font_size[0] == 'foo'
assert model.horizontal_align[0] == 'foo'
assert model.italic[0] == 'foo'
assert model.number_format[0] == 'foo'
assert model.strikethrough[0] == 'foo'
assert model.text_wrap[0] == 'foo'
assert model.thousands_separator[0] == 'foo'
assert model.underline[0] == 'foo'
assert model.vertical_align[0] == 'foo'
model.bold = 'foo'
assert model.bold[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.bold = tmplist
assert model.bold[0] == 'foo'
model.color = 'foo'
assert model.color[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.color = tmplist
assert model.color[0] == 'foo'
model.decimal_count = 'foo'
assert model.decimal_count[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.decimal_count = tmplist
assert model.decimal_count[0] == 'foo'
model.font_size = 'foo'
assert model.font_size[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.font_size = tmplist
assert model.font_size[0] == 'foo'
model.horizontal_align = 'foo'
assert model.horizontal_align[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.horizontal_align = tmplist
assert model.horizontal_align[0] == 'foo'
model.italic = 'foo'
assert model.italic[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.italic = tmplist
assert model.italic[0] == 'foo'
model.number_format = 'foo'
assert model.number_format[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.number_format = tmplist
assert model.number_format[0] == 'foo'
model.strikethrough = 'foo'
assert model.strikethrough[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.strikethrough = tmplist
assert model.strikethrough[0] == 'foo'
model.text_wrap = 'foo'
assert model.text_wrap[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.text_wrap = tmplist
assert model.text_wrap[0] == 'foo'
model.thousands_separator = 'foo'
assert model.thousands_separator[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.thousands_separator = tmplist
assert model.thousands_separator[0] == 'foo'
model.underline = 'foo'
assert model.underline[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.underline = tmplist
assert model.underline[0] == 'foo'
model.vertical_align = 'foo'
assert model.vertical_align[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.vertical_align = tmplist
assert model.vertical_align[0] == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_format_tables_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.FormatTables({
'bold': ['foo'],
'color': ['foo'],
'currency': smart.models.Currency(),
'defaults': 'foo',
'italic': ['foo'],
'strikethrough': ['foo'],
'underline': ['foo'],
'decimal_count': ['foo'],
'font_family': smart.models.FontFamily(),
'font_size': ['foo'],
'horizontal_align': ['foo'],
'number_format': ['foo'],
'text_wrap': ['foo'],
'thousands_separator': ['foo'],
'vertical_align': ['foo']
})
assert model.bold[0] == 'foo'
assert model.color[0] == 'foo'
assert isinstance(model.currency[0], smart.models.Currency)
assert model.decimal_count[0] == 'foo'
assert model.defaults == 'foo'
assert isinstance(model.font_family[0], smart.models.FontFamily)
assert model.font_size[0] == 'foo'
assert model.horizontal_align[0] == 'foo'
assert model.italic[0] == 'foo'
assert model.number_format[0] == 'foo'
assert model.strikethrough[0] == 'foo'
assert model.text_wrap[0] == 'foo'
assert model.thousands_separator[0] == 'foo'
assert model.underline[0] == 'foo'
assert model.vertical_align[0] == 'foo'
def test_update_request(self, smart_setup):
smart = smart_setup['smart']
# id, id
model = smart.models.UpdateRequest({
'id': 19082
})
assert model.id == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_update_request_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.UpdateRequest({
'id': 19082
})
assert model.id == 19082
def test_format_details(self, smart_setup):
smart = smart_setup['smart']
# paper_size, paperSize
model = smart.models.FormatDetails({
'paperSize': 'LETTER'
})
assert model.paper_size == 'LETTER'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_format_details_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.FormatDetails({
'paper_size': 'LETTER'
})
assert model.paper_size == 'LETTER'
def test_multi_row_email(self, smart_setup):
smart = smart_setup['smart']
# row_ids, rowIds
model = smart.models.MultiRowEmail({
'rowIds': [19082]
})
assert model.row_ids[0] == 19082
model.row_ids = 19082
assert model.row_ids[0] == 19082
tmplist = smartsheet.types.TypedList(int)
tmplist.append(19082)
model.row_ids = tmplist
assert model.row_ids[0] == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_multi_row_email_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.MultiRowEmail({
'row_ids': [19082]
})
assert model.row_ids[0] == 19082
def test_downloaded_file(self, smart_setup):
smart = smart_setup['smart']
# download_directory, downloadDirectory
# filename, filename
# message, message
# resp, resp
# result_code, resultCode
model = smart.models.DownloadedFile({
'downloadDirectory': 'foo',
'filename': 'foo',
'message': 'foo',
'resultCode': 19082
})
assert model.download_directory == 'foo'
assert model.filename == 'foo'
assert model.message == 'foo'
assert model.result_code == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_downloaded_file_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.DownloadedFile({
'filename': 'foo',
'message': 'foo',
'resultCode': 19082,
'download_directory': 'foo'
})
assert model.download_directory == 'foo'
assert model.filename == 'foo'
assert model.message == 'foo'
assert model.result_code == 19082
def test_alternate_email(self, smart_setup):
smart = smart_setup['smart']
# confirmed, confirmed
# email, email
# id, id
model = smart.models.AlternateEmail({
'confirmed': True,
'email': 'foo',
'id': 19082
})
assert model.confirmed == True
assert model.email == 'foo'
assert model.id == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_alternate_email_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.AlternateEmail({
'confirmed': True,
'email': 'foo',
'id': 19082
})
assert model.confirmed == True
assert model.email == 'foo'
assert model.id == 19082
def test_search_result_item(self, smart_setup):
smart = smart_setup['smart']
# context_data, contextData
# object_id, objectId
# object_type, objectType
# parent_object_id, parentObjectId
# parent_object_name, parentObjectName
# parent_object_type, parentObjectType
# text, text
model = smart.models.SearchResultItem({
'contextData': ['foo'],
'objectId': 19082,
'objectType': 'row',
'parentObjectId': 19082,
'parentObjectName': 'foo',
'parentObjectType': 'workspace',
'text': 'foo'
})
assert model.context_data[0] == 'foo'
assert model.object_id == 19082
assert model.object_type == 'row'
assert model.parent_object_id == 19082
assert model.parent_object_name == 'foo'
assert model.parent_object_type == 'workspace'
assert model.text == 'foo'
model.context_data = 'foo'
assert model.context_data[0] == 'foo'
tmplist = smartsheet.types.TypedList(str)
tmplist.append('foo')
model.context_data = tmplist
assert model.context_data[0] == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_search_result_item_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.SearchResultItem({
'objectId': 19082,
'objectType': 'row',
'parentObjectId': 19082,
'parentObjectName': 'foo',
'parentObjectType': 'workspace',
'text': 'foo',
'context_data': ['foo']
})
assert model.context_data[0] == 'foo'
assert model.object_id == 19082
assert model.object_type == 'row'
assert model.parent_object_id == 19082
assert model.parent_object_name == 'foo'
assert model.parent_object_type == 'workspace'
assert model.text == 'foo'
def test_auto_number_format(self, smart_setup):
smart = smart_setup['smart']
# fill, fill
# prefix, prefix
# starting_number, startingNumber
# suffix, suffix
model = smart.models.AutoNumberFormat({
'fill': 'foo',
'prefix': 'foo',
'startingNumber': 19082,
'suffix': 'foo'
})
assert model.fill == 'foo'
assert model.prefix == 'foo'
assert model.starting_number == 19082
assert model.suffix == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_auto_number_format_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.AutoNumberFormat({
'fill': 'foo',
'prefix': 'foo',
'suffix': 'foo',
'starting_number': 19082
})
assert model.fill == 'foo'
assert model.prefix == 'foo'
assert model.starting_number == 19082
assert model.suffix == 'foo'
def test_sheet_user_settings(self, smart_setup):
smart = smart_setup['smart']
# critical_path_enabled, criticalPathEnabled
model = smart.models.SheetUserSettings({
'criticalPathEnabled': True
})
assert model.critical_path_enabled == True
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_sheet_user_settings_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.SheetUserSettings({
'critical_path_enabled': True
})
assert model.critical_path_enabled == True
def test_copy_or_move_row_result(self, smart_setup):
smart = smart_setup['smart']
# destination_sheet_id, destinationSheetId
# row_mappings, rowMappings
model = smart.models.CopyOrMoveRowResult({
'destinationSheetId': 19082,
'rowMappings': smart.models.RowMapping()
})
assert model.destination_sheet_id == 19082
assert isinstance(model.row_mappings[0], smart.models.RowMapping)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_copy_or_move_row_result_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.CopyOrMoveRowResult({
'destination_sheet_id': 19082,
'row_mappings': smart.models.RowMapping()
})
assert model.destination_sheet_id == 19082
assert isinstance(model.row_mappings[0], smart.models.RowMapping)
def test_container_destination(self, smart_setup):
smart = smart_setup['smart']
# destination_id, destinationId
# destination_type, destinationType
# new_name, newName
model = smart.models.ContainerDestination({
'destinationId': 19082,
'destinationType': 'home',
'newName': 'foo'
})
assert model.destination_id == 19082
assert model.destination_type == 'home'
assert model.new_name == 'foo'
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_container_destination_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.ContainerDestination({
'destination_id': 19082,
'destination_type': 'home',
'new_name': 'foo'
})
assert model.destination_id == 19082
assert model.destination_type == 'home'
assert model.new_name == 'foo'
def test_copy_or_move_row_directive(self, smart_setup):
smart = smart_setup['smart']
# row_ids, rowIds
# to, to
model = smart.models.CopyOrMoveRowDirective({
'rowIds': [19082],
'to': smart.models.CopyOrMoveRowDestination()
})
assert model.row_ids[0] == 19082
assert isinstance(model.to, smart.models.CopyOrMoveRowDestination)
model.row_ids = 19082
assert model.row_ids[0] == 19082
tmplist = smartsheet.types.TypedList(int)
tmplist.append(19082)
model.row_ids = tmplist
assert model.row_ids[0] == 19082
model.to = {}
assert isinstance(model.to, smart.models.CopyOrMoveRowDestination)
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_copy_or_move_row_directive_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.CopyOrMoveRowDirective({
'to': smart.models.CopyOrMoveRowDestination(),
'row_ids': [19082]
})
assert model.row_ids[0] == 19082
assert isinstance(model.to, smart.models.CopyOrMoveRowDestination)
def test_copy_or_move_row_destination(self, smart_setup):
smart = smart_setup['smart']
# sheet_id, sheetId
model = smart.models.CopyOrMoveRowDestination({
'sheetId': 19082
})
assert model.sheet_id == 19082
as_dict = model.to_dict()
assert isinstance(as_dict, dict)
def test_copy_or_move_row_destination_snake(self, smart_setup):
smart = smart_setup['smart']
model = smart.models.CopyOrMoveRowDestination({
'sheet_id': 19082
})
assert model.sheet_id == 19082
|
451163
|
import os
import webbrowser
import shutil
import json
import requests
import re
import wx
import time
import tempfile
from threading import Thread
from .result_event import *
from .config import *
class PushThread(Thread):
def __init__(self, wxObject):
Thread.__init__(self)
self.wxObject = wxObject
self.start()
def run(self):
temp_dir = tempfile.mkdtemp()
_, temp_file = tempfile.mkstemp()
board = pcbnew.GetBoard()
title_block = board.GetTitleBlock()
self.report(10)
match = re.match(
'^AISLER Project ID: ([A-Z]{8})$',
title_block.GetComment(commentLineIdx))
if match:
project_id = match.group(1)
else:
project_id = False
# Override a few design parameters as our CAM takes care of this
settings = board.GetDesignSettings()
settings.m_SolderMaskMargin = 0
settings.m_SolderMaskMinWidth = 0
pctl = pcbnew.PLOT_CONTROLLER(board)
popt = pctl.GetPlotOptions()
popt.SetOutputDirectory(temp_dir)
popt.SetPlotFrameRef(False)
popt.SetSketchPadLineWidth(pcbnew.FromMM(0.1))
popt.SetAutoScale(False)
popt.SetScale(1)
popt.SetMirror(False)
popt.SetUseGerberAttributes(True)
try: # kicad >= 6.99
popt.SetExcludeEdgeLayer(True)
except AttributeError: # kicad <7
pass
popt.SetUseGerberProtelExtensions(False)
popt.SetUseAuxOrigin(True)
popt.SetSubtractMaskFromSilk(False)
popt.SetDrillMarksType(0) # NO_DRILL_SHAPE
self.report(15)
for layer_info in plotPlan:
if board.IsLayerEnabled(layer_info[1]):
pctl.SetLayer(layer_info[1])
pctl.OpenPlotfile(
layer_info[0],
pcbnew.PLOT_FORMAT_GERBER,
layer_info[2])
pctl.PlotLayer()
pctl.ClosePlot()
# Write excellon drill files
self.report(20)
drlwriter = pcbnew.EXCELLON_WRITER(board)
# mirrot, header, offset, mergeNPTH
drlwriter.SetOptions(
False,
True,
board.GetDesignSettings().GetAuxOrigin(),
False)
drlwriter.SetFormat(False)
drlwriter.CreateDrillandMapFilesSet(pctl.GetPlotDirName(), True, False)
# # Write netlist to enable Smart Tests
self.report(25)
netlist_writer = pcbnew.IPC356D_WRITER(board)
netlist_writer.Write(os.path.join(temp_dir, netlistFilename))
# # Export component list
self.report(30)
components = []
if hasattr(board, 'GetModules'):
footprints = list(board.GetModules())
else:
footprints = list(board.GetFootprints())
for i, f in enumerate(footprints):
try:
footprint_name = str(f.GetFPID().GetFootprintName())
except AttributeError:
footprint_name = str(f.GetFPID().GetLibItemName())
layer = {
pcbnew.F_Cu: 'top',
pcbnew.B_Cu: 'bottom',
}.get(f.GetLayer())
mount_type = {
0: 'smt',
1: 'tht',
2: 'smt'
}.get(f.GetAttributes())
angle = f.GetOrientation()
try: # kicad >= 6.99
angle = angle.AsDegrees()
except AttributeError: # kicad <7
angle /= 10.0
components.append({
'pos_x': (f.GetPosition()[0] - board.GetDesignSettings().GetAuxOrigin()[0]) / 1000000.0,
'pos_y': (f.GetPosition()[1] - board.GetDesignSettings().GetAuxOrigin()[1]) * -1.0 / 1000000.0,
'rotation': angle,
'side': layer,
'designator': f.GetReference(),
'mpn': self.getMpnFromFootprint(f),
'pack': footprint_name,
'value': f.GetValue(),
'mount_type': mount_type
})
with open((os.path.join(temp_dir, componentsFilename)), 'w') as outfile:
json.dump(components, outfile)
# # Create ZIP file
temp_file = shutil.make_archive(temp_file, 'zip', temp_dir)
files = {'upload[file]': open(temp_file, 'rb')}
self.report(40)
if project_id:
data = {}
data['upload_url'] = baseUrl + '/p/' + project_id + '/uploads.json'
else:
rsp = requests.get(baseUrl + '/p/new.json?ref=KiCadPush')
data = json.loads(rsp.content)
if not title_block.GetComment(commentLineIdx):
title_block.SetComment(
commentLineIdx,
'AISLER Project ID: ' +
data['project_id'])
rsp = requests.post(
data['upload_url'], files=files, data={
'upload[title]': title_block.GetTitle()})
urls = json.loads(rsp.content)
progress = 0
while progress < 100:
time.sleep(pollingInterval)
progress = json.loads(
requests.get(
urls['callback']).content)['progress']
self.report(40 + progress / 1.7)
webbrowser.open(urls['redirect'])
self.report(-1)
def report(self, status):
wx.PostEvent(self.wxObject, ResultEvent(status))
def getMpnFromFootprint(self, f):
keys = ['mpn', 'MPN', 'Mpn', 'AISLER_MPN']
for key in keys:
if f.HasProperty(key):
return f.GetProperty(key)
|
451176
|
import os
import pytest
from ._util import assert_simple_file_realize
try:
from fs_gcsfs import GCSFS
except ImportError:
GCSFS = None
SCRIPT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
FILE_SOURCES_CONF = os.path.join(SCRIPT_DIRECTORY, "gcsfs_file_sources_conf.yml")
skip_if_no_gcsfs_libs = pytest.mark.skipif(
not GCSFS,
reason="Required lib to run gcs file source test: fs_gcsfs is not available"
)
@skip_if_no_gcsfs_libs
def test_file_source():
assert_simple_file_realize(FILE_SOURCES_CONF, recursive=False, filename="README", contents="1000genomes",
contains=True)
|
451207
|
import sys, os, os.path, glob
import cPickle
from scipy.io import loadmat
import numpy
from multiprocessing import Process, Queue
import torch
from torch.autograd import Variable
N_CLASSES = 527
N_WORKERS = 6
GAS_FEATURE_DIR = '../../data/audioset'
DCASE_FEATURE_DIR = '../../data/dcase'
with open(os.path.join(GAS_FEATURE_DIR, 'normalizer.pkl'), 'rb') as f:
mu, sigma = cPickle.load(f)
def sample_generator(file_list, random_seed = 15213):
rng = numpy.random.RandomState(random_seed)
while True:
rng.shuffle(file_list)
for filename in file_list:
data = loadmat(filename)
feat = ((data['feat'] - mu) / sigma).astype('float32')
labels = data['labels'].astype('float32')
for i in range(len(data['feat'])):
yield feat[i], labels[i]
def worker(queues, file_lists, random_seed):
generators = [sample_generator(file_lists[i], random_seed + i) for i in range(len(file_lists))]
while True:
for gen, q in zip(generators, queues):
q.put(next(gen))
def batch_generator(batch_size, random_seed = 15213):
queues = [Queue(5) for class_id in range(N_CLASSES)]
file_lists = [sorted(glob.glob(os.path.join(GAS_FEATURE_DIR, 'GAS_train_unbalanced_class%03d_part*.mat' % class_id))) for class_id in range(N_CLASSES)]
for worker_id in range(N_WORKERS):
p = Process(target = worker, args = (queues[worker_id::N_WORKERS], file_lists[worker_id::N_WORKERS], random_seed))
p.daemon = True
p.start()
rng = numpy.random.RandomState(random_seed)
batch = []
while True:
rng.shuffle(queues)
for q in queues:
batch.append(q.get())
if len(batch) == batch_size:
yield tuple(Variable(torch.from_numpy(numpy.stack(x))).cuda() for x in zip(*batch))
batch = []
def bulk_load(prefix):
feat = []; labels = []; hashes = []
for filename in sorted(glob.glob(os.path.join(GAS_FEATURE_DIR, '%s_*.mat' % prefix)) +
glob.glob(os.path.join(DCASE_FEATURE_DIR, '%s_*.mat' % prefix))):
data = loadmat(filename)
feat.append(((data['feat'] - mu) / sigma).astype('float32'))
labels.append(data['labels'].astype('bool'))
hashes.append(data['hashes'])
return numpy.concatenate(feat), numpy.concatenate(labels), numpy.concatenate(hashes)
def load_dcase_test_frame_truth():
return cPickle.load(open(os.path.join(DCASE_FEATURE_DIR, 'DCASE_test_frame_label.pkl'), 'rb'))
|
451256
|
import unittest
import numpy as np
import paramak
class TestCuttingWedgeFS(unittest.TestCase):
def test_shape_construction_and_volume(self):
"""Makes cutting cylinders from shapes and checks the
volume of the cutter shape is larger than the shape it
encompasses."""
hoop_shape = paramak.PoloidalFieldCoil(
height=20, width=20, center_point=(50, 200), rotation_angle=180
)
cutter = paramak.CuttingWedgeFS(
shape=hoop_shape,
azimuth_placement_angle=0,
)
assert cutter.volume() > hoop_shape.volume()
def test_invalid_parameters_errors(self):
"""Checks that the correct errors are raised when invalid arguments are input as
shape parameters."""
shape = paramak.ExtrudeStraightShape(
distance=1, points=[(0, 0), (0, 1), (1, 1)], rotation_angle=180
)
cutter = paramak.CuttingWedgeFS(
shape=shape,
azimuth_placement_angle=0,
)
def incorrect_rotation_angle():
shape.rotation_angle = 360
cutter.solid
def incorrect_shape_points():
shape.rotation_angle = 180
cutter.shape.points = [(0, 0, "straight")]
cutter.solid
def incorrect_shape_rotation_angle():
cutter.shape.points = [(0, 0), (0, 1), (1, 1)]
shape.rotation_angle = 360
cutter.shape = shape
self.assertRaises(ValueError, incorrect_rotation_angle)
self.assertRaises(ValueError, incorrect_shape_points)
self.assertRaises(ValueError, incorrect_shape_rotation_angle)
def test_different_workplanes(self):
"""Test that checks the cutting wedge can be correctly applied to a
shape with non-default workplane and rotation_axis
"""
rectangle = paramak.ExtrudeStraightShape(
2,
points=[(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)],
workplane="XY",
rotation_axis="Z",
)
rectangle.rotation_angle = 360
volume_full = rectangle.volume()
assert np.isclose(volume_full, 2)
rectangle.rotation_angle = 90
volume_quarter = rectangle.volume()
assert np.isclose(volume_quarter, 0.5)
|
451261
|
import os
from icrawler.builtin import BaiduImageCrawler
from icrawler.builtin import BingImageCrawler
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
return path
def baidu_bing_crwal(key_words=['中国人'], max_nums=[1000], save_root=r'./'):
assert len(key_words)==len(max_nums), "关键词和数量必须一致"
# 2个一起爬虫
save_root1 = os.path.join(save_root, 'baidu')
# 百度爬虫
for i in range(len(key_words)):
print('-'*20)
image_save_root = os.path.join(save_root1, str(i))
if not os.path.exists(image_save_root):
os.makedirs(image_save_root)
storage = {'root_dir': image_save_root}
crawler = BaiduImageCrawler(storage=storage)
crawler.crawl(key_words[i], max_num=max_nums[i])
# bing爬虫
save_root2 = os.path.join(save_root, 'bing')
for i in range(len(key_words)):
print('-'*20)
image_save_root = os.path.join(save_root2, str(i))
if not os.path.exists(image_save_root):
os.makedirs(image_save_root)
storage = {'root_dir': image_save_root}
crawler = BingImageCrawler(storage=storage)
crawler.crawl(key_words[i], max_num=max_nums[i])
return
if __name__ == '__main__':
baidu_bing_crwal(key_words=['砂石料场', '河道内的砂石料场', '岸边的木材堆', '化学工厂丢弃的大塑料桶堆'],
max_nums=[1000, 1000, 200, 500],
save_root=r'F:\DataSets')
|
451327
|
import tqdm
from io import BytesIO
from functools import partial
from multiprocessing.pool import ThreadPool
from PIL import Image
from torchero.utils.io import download_from_url
def download_image(url):
""" Download an image from an url
Arguments:
url (str): Url of the image
Returns:
The downloaded images as a PIL Image object
"""
buffer = BytesIO()
download_from_url(url, buffer, pbar=False)
buffer.seek(0)
return Image.open(buffer)
def download_images(urls, num_workers=1, pbar=True):
""" Download multiples images
Arguments:
url (list of str): List of urls to download
Returns:
An iterator of PIL Images for the downloaded images
"""
with ThreadPool(num_workers) as pool:
images = pool.imap(download_image, urls)
if pbar:
images = tqdm.tqdm(images, total=len(urls), unit='image')
yield from images
|
451362
|
import io
class Brainfuck:
@staticmethod
def cleanup(code):
return "".join(filter(lambda x: x in [".", "[", "]", "<", ">", "+", "-"], code))
@staticmethod
def getlines(code):
return [code[i : i + 50] for i in range(0, len(code), 50)]
@staticmethod
def buildbracemap(code):
temp_bracestack, bracemap = [], {}
for position, command in enumerate(code):
if command == "[":
temp_bracestack.append(position)
elif command == "]":
start = temp_bracestack.pop()
bracemap[start] = position
bracemap[position] = start
return bracemap
@staticmethod
def evaluate(code):
code = Brainfuck.cleanup(list(code))
bracemap = Brainfuck.buildbracemap(code)
cells, codeptr, cellptr, prev = [0], 0, 0, -1
output = io.StringIO("")
while codeptr < len(code):
command = code[codeptr]
if command == ">":
cellptr += 1
if cellptr == len(cells):
cells.append(0)
elif command == "<":
cellptr = 0 if cellptr <= 0 else cellptr - 1
elif command == "+":
cells[cellptr] = cells[cellptr] + 1 if cells[cellptr] < 255 else 0
elif command == "-":
cells[cellptr] = cells[cellptr] - 1 if cells[cellptr] > 0 else 255
elif command == "[":
if cells[cellptr] == 0:
codeptr = bracemap[codeptr]
else:
prev = cells[cellptr]
elif command == "]":
if cells[cellptr] == 0:
prev = 0
else:
if cells[cellptr] == prev:
lines = Brainfuck.getlines("".join(code))
errorptr = codeptr % 50
raise SyntaxError(
f"Infinite loop: []", ("program.bf", len(lines), errorptr, lines[-1])
)
else:
codeptr = bracemap[codeptr]
elif command == ".":
output.write(chr(cells[cellptr]))
codeptr += 1
return output, cells
|
451413
|
from flask import Blueprint
from flask import jsonify
from flask import request
from yelp_beans.logic.user import get_user
user_blueprint = Blueprint('user', __name__)
@user_blueprint.route('/', methods=["GET"])
def user_api():
user = get_user(request.args.get('email'))
if not user:
resp = jsonify({})
resp.status_code = 200
return resp
resp = jsonify({
'first_name': user.first_name,
'last_name': user.last_name,
'photo_url': user.photo_url,
'metadata': user.meta_data
})
resp.status_code = 200
return resp
|
451417
|
class Pickleable(object):
"""
Base class that implements getstate/setstate, since most of the classes are overriding getattr.
"""
def __getstate__(self):
return self.__dict__
def __setstate__(self, data):
self.__dict__.update(data)
|
451454
|
from collections import OrderedDict
import io
import logging
import os
import h5py
import kaldiio
import numpy as np
import soundfile
from espnet.transform.transformation import Transformation
class LoadInputsAndTargets(object):
"""Create a mini-batch from a list of dicts
>>> batch = [('utt1',
... dict(input=[dict(feat='some.ark:123',
... filetype='mat',
... name='input1',
... shape=[100, 80])],
... output=[dict(tokenid='1 2 3 4',
... name='target1',
... shape=[4, 31])]]))
>>> l = LoadInputsAndTargets()
>>> feat, target = l(batch)
:param: str mode: Specify the task mode, "asr" or "tts"
:param: str preprocess_conf: The path of a json file for pre-processing
:param: bool load_input: If False, not to load the input data
:param: bool load_output: If False, not to load the output data
:param: bool sort_in_input_length: Sort the mini-batch in descending order
of the input length
:param: bool use_speaker_embedding: Used for tts mode only
:param: bool use_second_target: Used for tts mode only
:param: dict preprocess_args: Set some optional arguments for preprocessing
:param: Optional[dict] preprocess_args: Used for tts mode only
"""
def __init__(
self,
mode="asr",
preprocess_conf=None,
load_input=True,
load_output=True,
sort_in_input_length=True,
use_speaker_embedding=False,
use_second_target=False,
preprocess_args=None,
keep_all_data_on_mem=False,
):
self._loaders = {}
if mode not in ["asr", "tts", "mt", "vc"]:
raise ValueError("Only asr or tts are allowed: mode={}".format(mode))
if preprocess_conf is not None:
self.preprocessing = Transformation(preprocess_conf)
logging.warning(
"[Experimental feature] Some preprocessing will be done "
"for the mini-batch creation using {}".format(self.preprocessing)
)
else:
# If conf doesn't exist, this function don't touch anything.
self.preprocessing = None
if use_second_target and use_speaker_embedding and mode == "tts":
raise ValueError(
'Choose one of "use_second_target" and ' '"use_speaker_embedding "'
)
if (
(use_second_target or use_speaker_embedding)
and mode != "tts"
and mode != "vc"
):
logging.warning(
'"use_second_target" and "use_speaker_embedding" is '
"used only for tts or vc mode"
)
self.mode = mode
self.load_output = load_output
self.load_input = load_input
self.sort_in_input_length = sort_in_input_length
self.use_speaker_embedding = use_speaker_embedding
self.use_second_target = use_second_target
if preprocess_args is None:
self.preprocess_args = {}
else:
assert isinstance(preprocess_args, dict), type(preprocess_args)
self.preprocess_args = dict(preprocess_args)
self.keep_all_data_on_mem = keep_all_data_on_mem
def __call__(self, batch, return_uttid=False):
"""Function to load inputs and targets from list of dicts
:param List[Tuple[str, dict]] batch: list of dict which is subset of
loaded data.json
:param bool return_uttid: return utterance ID information for visualization
:return: list of input token id sequences [(L_1), (L_2), ..., (L_B)]
:return: list of input feature sequences
[(T_1, D), (T_2, D), ..., (T_B, D)]
:rtype: list of float ndarray
:return: list of target token id sequences [(L_1), (L_2), ..., (L_B)]
:rtype: list of int ndarray
"""
x_feats_dict = OrderedDict() # OrderedDict[str, List[np.ndarray]]
y_feats_dict = OrderedDict() # OrderedDict[str, List[np.ndarray]]
uttid_list = [] # List[str]
for uttid, info in batch:
uttid_list.append(uttid)
if self.load_input:
# Note(kamo): This for-loop is for multiple inputs
for idx, inp in enumerate(info["input"]):
# {"input":
# [{"feat": "some/path.h5:F01_050C0101_PED_REAL",
# "filetype": "hdf5",
# "name": "input1", ...}], ...}
x = self._get_from_loader(
filepath=inp["feat"], filetype=inp.get("filetype", "mat")
)
x_feats_dict.setdefault(inp["name"], []).append(x)
# FIXME(kamo): Dirty way to load only speaker_embedding
elif self.mode == "tts" and self.use_speaker_embedding:
for idx, inp in enumerate(info["input"]):
if idx != 1 and len(info["input"]) > 1:
x = None
else:
x = self._get_from_loader(
filepath=inp["feat"], filetype=inp.get("filetype", "mat")
)
x_feats_dict.setdefault(inp["name"], []).append(x)
if self.load_output:
if self.mode == "mt":
x = np.fromiter(
map(int, info["output"][1]["tokenid"].split()), dtype=np.int64
)
x_feats_dict.setdefault(info["output"][1]["name"], []).append(x)
for idx, inp in enumerate(info["output"]):
if "tokenid" in inp:
# ======= Legacy format for output =======
# {"output": [{"tokenid": "1 2 3 4"}])
x = np.fromiter(
map(int, inp["tokenid"].split()), dtype=np.int64
)
else:
# ======= New format =======
# {"input":
# [{"feat": "some/path.h5:F01_050C0101_PED_REAL",
# "filetype": "hdf5",
# "name": "target1", ...}], ...}
x = self._get_from_loader(
filepath=inp["feat"], filetype=inp.get("filetype", "mat")
)
y_feats_dict.setdefault(inp["name"], []).append(x)
if self.mode == "asr":
return_batch, uttid_list = self._create_batch_asr(
x_feats_dict, y_feats_dict, uttid_list
)
elif self.mode == "tts":
_, info = batch[0]
eos = int(info["output"][0]["shape"][1]) - 1
return_batch, uttid_list = self._create_batch_tts(
x_feats_dict, y_feats_dict, uttid_list, eos
)
elif self.mode == "mt":
return_batch, uttid_list = self._create_batch_mt(
x_feats_dict, y_feats_dict, uttid_list
)
elif self.mode == "vc":
return_batch, uttid_list = self._create_batch_vc(
x_feats_dict, y_feats_dict, uttid_list
)
else:
raise NotImplementedError(self.mode)
if self.preprocessing is not None:
# Apply pre-processing all input features
for x_name in return_batch.keys():
if x_name.startswith("input"):
return_batch[x_name] = self.preprocessing(
return_batch[x_name], uttid_list, **self.preprocess_args
)
if return_uttid:
return tuple(return_batch.values()), uttid_list
# Doesn't return the names now.
return tuple(return_batch.values())
def _create_batch_asr(self, x_feats_dict, y_feats_dict, uttid_list):
"""Create a OrderedDict for the mini-batch
:param OrderedDict x_feats_dict:
e.g. {"input1": [ndarray, ndarray, ...],
"input2": [ndarray, ndarray, ...]}
:param OrderedDict y_feats_dict:
e.g. {"target1": [ndarray, ndarray, ...],
"target2": [ndarray, ndarray, ...]}
:param: List[str] uttid_list:
Give uttid_list to sort in the same order as the mini-batch
:return: batch, uttid_list
:rtype: Tuple[OrderedDict, List[str]]
"""
# handle single-input and multi-input (parallel) asr mode
xs = list(x_feats_dict.values())
if self.load_output:
ys = list(y_feats_dict.values())
assert len(xs[0]) == len(ys[0]), (len(xs[0]), len(ys[0]))
# get index of non-zero length samples
nonzero_idx = list(filter(lambda i: len(ys[0][i]) > 0, range(len(ys[0]))))
for n in range(1, len(y_feats_dict)):
nonzero_idx = filter(lambda i: len(ys[n][i]) > 0, nonzero_idx)
else:
# Note(kamo): Be careful not to make nonzero_idx to a generator
nonzero_idx = list(range(len(xs[0])))
if self.sort_in_input_length:
# sort in input lengths based on the first input
nonzero_sorted_idx = sorted(nonzero_idx, key=lambda i: -len(xs[0][i]))
else:
nonzero_sorted_idx = nonzero_idx
if len(nonzero_sorted_idx) != len(xs[0]):
logging.warning(
"Target sequences include empty tokenid (batch {} -> {}).".format(
len(xs[0]), len(nonzero_sorted_idx)
)
)
# remove zero-length samples
xs = [[x[i] for i in nonzero_sorted_idx] for x in xs]
uttid_list = [uttid_list[i] for i in nonzero_sorted_idx]
x_names = list(x_feats_dict.keys())
if self.load_output:
ys = [[y[i] for i in nonzero_sorted_idx] for y in ys]
y_names = list(y_feats_dict.keys())
# Keeping x_name and y_name, e.g. input1, for future extension
return_batch = OrderedDict(
[
*[(x_name, x) for x_name, x in zip(x_names, xs)],
*[(y_name, y) for y_name, y in zip(y_names, ys)],
]
)
else:
return_batch = OrderedDict([(x_name, x) for x_name, x in zip(x_names, xs)])
return return_batch, uttid_list
def _create_batch_mt(self, x_feats_dict, y_feats_dict, uttid_list):
"""Create a OrderedDict for the mini-batch
:param OrderedDict x_feats_dict:
:param OrderedDict y_feats_dict:
:return: batch, uttid_list
:rtype: Tuple[OrderedDict, List[str]]
"""
# Create a list from the first item
xs = list(x_feats_dict.values())[0]
if self.load_output:
ys = list(y_feats_dict.values())[0]
assert len(xs) == len(ys), (len(xs), len(ys))
# get index of non-zero length samples
nonzero_idx = filter(lambda i: len(ys[i]) > 0, range(len(ys)))
else:
nonzero_idx = range(len(xs))
if self.sort_in_input_length:
# sort in input lengths
nonzero_sorted_idx = sorted(nonzero_idx, key=lambda i: -len(xs[i]))
else:
nonzero_sorted_idx = nonzero_idx
if len(nonzero_sorted_idx) != len(xs):
logging.warning(
"Target sequences include empty tokenid (batch {} -> {}).".format(
len(xs), len(nonzero_sorted_idx)
)
)
# remove zero-length samples
xs = [xs[i] for i in nonzero_sorted_idx]
uttid_list = [uttid_list[i] for i in nonzero_sorted_idx]
x_name = list(x_feats_dict.keys())[0]
if self.load_output:
ys = [ys[i] for i in nonzero_sorted_idx]
y_name = list(y_feats_dict.keys())[0]
return_batch = OrderedDict([(x_name, xs), (y_name, ys)])
else:
return_batch = OrderedDict([(x_name, xs)])
return return_batch, uttid_list
def _create_batch_tts(self, x_feats_dict, y_feats_dict, uttid_list, eos):
"""Create a OrderedDict for the mini-batch
:param OrderedDict x_feats_dict:
e.g. {"input1": [ndarray, ndarray, ...],
"input2": [ndarray, ndarray, ...]}
:param OrderedDict y_feats_dict:
e.g. {"target1": [ndarray, ndarray, ...],
"target2": [ndarray, ndarray, ...]}
:param: List[str] uttid_list:
:param int eos:
:return: batch, uttid_list
:rtype: Tuple[OrderedDict, List[str]]
"""
# Use the output values as the input feats for tts mode
xs = list(y_feats_dict.values())[0]
# get index of non-zero length samples
nonzero_idx = list(filter(lambda i: len(xs[i]) > 0, range(len(xs))))
# sort in input lengths
if self.sort_in_input_length:
# sort in input lengths
nonzero_sorted_idx = sorted(nonzero_idx, key=lambda i: -len(xs[i]))
else:
nonzero_sorted_idx = nonzero_idx
# remove zero-length samples
xs = [xs[i] for i in nonzero_sorted_idx]
uttid_list = [uttid_list[i] for i in nonzero_sorted_idx]
# Added eos into input sequence
xs = [np.append(x, eos) for x in xs]
if self.load_input:
ys = list(x_feats_dict.values())[0]
assert len(xs) == len(ys), (len(xs), len(ys))
ys = [ys[i] for i in nonzero_sorted_idx]
spembs = None
spcs = None
spembs_name = "spembs_none"
spcs_name = "spcs_none"
if self.use_second_target:
spcs = list(x_feats_dict.values())[1]
spcs = [spcs[i] for i in nonzero_sorted_idx]
spcs_name = list(x_feats_dict.keys())[1]
if self.use_speaker_embedding:
spembs = list(x_feats_dict.values())[1]
spembs = [spembs[i] for i in nonzero_sorted_idx]
spembs_name = list(x_feats_dict.keys())[1]
x_name = list(y_feats_dict.keys())[0]
y_name = list(x_feats_dict.keys())[0]
return_batch = OrderedDict(
[(x_name, xs), (y_name, ys), (spembs_name, spembs), (spcs_name, spcs)]
)
elif self.use_speaker_embedding:
if len(x_feats_dict) == 0:
raise IndexError("No speaker embedding is provided")
elif len(x_feats_dict) == 1:
spembs_idx = 0
else:
spembs_idx = 1
spembs = list(x_feats_dict.values())[spembs_idx]
spembs = [spembs[i] for i in nonzero_sorted_idx]
x_name = list(y_feats_dict.keys())[0]
spembs_name = list(x_feats_dict.keys())[spembs_idx]
return_batch = OrderedDict([(x_name, xs), (spembs_name, spembs)])
else:
x_name = list(y_feats_dict.keys())[0]
return_batch = OrderedDict([(x_name, xs)])
return return_batch, uttid_list
def _create_batch_vc(self, x_feats_dict, y_feats_dict, uttid_list):
"""Create a OrderedDict for the mini-batch
:param OrderedDict x_feats_dict:
e.g. {"input1": [ndarray, ndarray, ...],
"input2": [ndarray, ndarray, ...]}
:param OrderedDict y_feats_dict:
e.g. {"target1": [ndarray, ndarray, ...],
"target2": [ndarray, ndarray, ...]}
:param: List[str] uttid_list:
:return: batch, uttid_list
:rtype: Tuple[OrderedDict, List[str]]
"""
# Create a list from the first item
xs = list(x_feats_dict.values())[0]
# get index of non-zero length samples
nonzero_idx = list(filter(lambda i: len(xs[i]) > 0, range(len(xs))))
# sort in input lengths
if self.sort_in_input_length:
# sort in input lengths
nonzero_sorted_idx = sorted(nonzero_idx, key=lambda i: -len(xs[i]))
else:
nonzero_sorted_idx = nonzero_idx
# remove zero-length samples
xs = [xs[i] for i in nonzero_sorted_idx]
uttid_list = [uttid_list[i] for i in nonzero_sorted_idx]
if self.load_output:
ys = list(y_feats_dict.values())[0]
assert len(xs) == len(ys), (len(xs), len(ys))
ys = [ys[i] for i in nonzero_sorted_idx]
spembs = None
spcs = None
spembs_name = "spembs_none"
spcs_name = "spcs_none"
if self.use_second_target:
raise ValueError("Currently second target not supported.")
spcs = list(x_feats_dict.values())[1]
spcs = [spcs[i] for i in nonzero_sorted_idx]
spcs_name = list(x_feats_dict.keys())[1]
if self.use_speaker_embedding:
spembs = list(x_feats_dict.values())[1]
spembs = [spembs[i] for i in nonzero_sorted_idx]
spembs_name = list(x_feats_dict.keys())[1]
x_name = list(x_feats_dict.keys())[0]
y_name = list(y_feats_dict.keys())[0]
return_batch = OrderedDict(
[(x_name, xs), (y_name, ys), (spembs_name, spembs), (spcs_name, spcs)]
)
elif self.use_speaker_embedding:
if len(x_feats_dict) == 0:
raise IndexError("No speaker embedding is provided")
elif len(x_feats_dict) == 1:
spembs_idx = 0
else:
spembs_idx = 1
spembs = list(x_feats_dict.values())[spembs_idx]
spembs = [spembs[i] for i in nonzero_sorted_idx]
x_name = list(x_feats_dict.keys())[0]
spembs_name = list(x_feats_dict.keys())[spembs_idx]
return_batch = OrderedDict([(x_name, xs), (spembs_name, spembs)])
else:
x_name = list(x_feats_dict.keys())[0]
return_batch = OrderedDict([(x_name, xs)])
return return_batch, uttid_list
def _get_from_loader(self, filepath, filetype):
"""Return ndarray
In order to make the fds to be opened only at the first referring,
the loader are stored in self._loaders
>>> ndarray = loader.get_from_loader(
... 'some/path.h5:F01_050C0101_PED_REAL', filetype='hdf5')
:param: str filepath:
:param: str filetype:
:return:
:rtype: np.ndarray
"""
if filetype == "hdf5":
# e.g.
# {"input": [{"feat": "some/path.h5:F01_050C0101_PED_REAL",
# "filetype": "hdf5",
# -> filepath = "some/path.h5", key = "F01_050C0101_PED_REAL"
filepath, key = filepath.split(":", 1)
loader = self._loaders.get(filepath)
if loader is None:
# To avoid disk access, create loader only for the first time
loader = h5py.File(filepath, "r")
self._loaders[filepath] = loader
return loader[key][()]
elif filetype == "sound.hdf5":
# e.g.
# {"input": [{"feat": "some/path.h5:F01_050C0101_PED_REAL",
# "filetype": "sound.hdf5",
# -> filepath = "some/path.h5", key = "F01_050C0101_PED_REAL"
filepath, key = filepath.split(":", 1)
loader = self._loaders.get(filepath)
if loader is None:
# To avoid disk access, create loader only for the first time
loader = SoundHDF5File(filepath, "r", dtype="int16")
self._loaders[filepath] = loader
array, rate = loader[key]
return array
elif filetype == "sound":
# e.g.
# {"input": [{"feat": "some/path.wav",
# "filetype": "sound"},
# Assume PCM16
if not self.keep_all_data_on_mem:
array, _ = soundfile.read(filepath, dtype="int16")
return array
if filepath not in self._loaders:
array, _ = soundfile.read(filepath, dtype="int16")
self._loaders[filepath] = array
return self._loaders[filepath]
elif filetype == "npz":
# e.g.
# {"input": [{"feat": "some/path.npz:F01_050C0101_PED_REAL",
# "filetype": "npz",
filepath, key = filepath.split(":", 1)
loader = self._loaders.get(filepath)
if loader is None:
# To avoid disk access, create loader only for the first time
loader = np.load(filepath)
self._loaders[filepath] = loader
return loader[key]
elif filetype == "npy":
# e.g.
# {"input": [{"feat": "some/path.npy",
# "filetype": "npy"},
if not self.keep_all_data_on_mem:
return np.load(filepath)
if filepath not in self._loaders:
self._loaders[filepath] = np.load(filepath)
return self._loaders[filepath]
elif filetype in ["mat", "vec"]:
# e.g.
# {"input": [{"feat": "some/path.ark:123",
# "filetype": "mat"}]},
# In this case, "123" indicates the starting points of the matrix
# load_mat can load both matrix and vector
if not self.keep_all_data_on_mem:
return kaldiio.load_mat(filepath)
if filepath not in self._loaders:
self._loaders[filepath] = kaldiio.load_mat(filepath)
return self._loaders[filepath]
elif filetype == "scp":
# e.g.
# {"input": [{"feat": "some/path.scp:F01_050C0101_PED_REAL",
# "filetype": "scp",
filepath, key = filepath.split(":", 1)
loader = self._loaders.get(filepath)
if loader is None:
# To avoid disk access, create loader only for the first time
loader = kaldiio.load_scp(filepath)
self._loaders[filepath] = loader
return loader[key]
else:
raise NotImplementedError("Not supported: loader_type={}".format(filetype))
class SoundHDF5File(object):
"""Collecting sound files to a HDF5 file
>>> f = SoundHDF5File('a.flac.h5', mode='a')
>>> array = np.random.randint(0, 100, 100, dtype=np.int16)
>>> f['id'] = (array, 16000)
>>> array, rate = f['id']
:param: str filepath:
:param: str mode:
:param: str format: The type used when saving wav. flac, nist, htk, etc.
:param: str dtype:
"""
def __init__(self, filepath, mode="r+", format=None, dtype="int16", **kwargs):
self.filepath = filepath
self.mode = mode
self.dtype = dtype
self.file = h5py.File(filepath, mode, **kwargs)
if format is None:
# filepath = a.flac.h5 -> format = flac
second_ext = os.path.splitext(os.path.splitext(filepath)[0])[1]
format = second_ext[1:]
if format.upper() not in soundfile.available_formats():
# If not found, flac is selected
format = "flac"
# This format affects only saving
self.format = format
def __repr__(self):
return '<SoundHDF5 file "{}" (mode {}, format {}, type {})>'.format(
self.filepath, self.mode, self.format, self.dtype
)
def create_dataset(self, name, shape=None, data=None, **kwds):
f = io.BytesIO()
array, rate = data
soundfile.write(f, array, rate, format=self.format)
self.file.create_dataset(name, shape=shape, data=np.void(f.getvalue()), **kwds)
def __setitem__(self, name, data):
self.create_dataset(name, data=data)
def __getitem__(self, key):
data = self.file[key][()]
f = io.BytesIO(data.tobytes())
array, rate = soundfile.read(f, dtype=self.dtype)
return array, rate
def keys(self):
return self.file.keys()
def values(self):
for k in self.file:
yield self[k]
def items(self):
for k in self.file:
yield k, self[k]
def __iter__(self):
return iter(self.file)
def __contains__(self, item):
return item in self.file
def __len__(self, item):
return len(self.file)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.file.close()
def close(self):
self.file.close()
|
451456
|
import FWCore.ParameterSet.Config as cms
from JetMETCorrections.Configuration.JetCorrectionServices_cff import *
# L1 Correction Producers
ak4CaloJetsL1 = cms.EDProducer(
'CaloJetCorrectionProducer',
src = cms.InputTag('ak4CaloJets'),
correctors = cms.vstring('L1Fastjet')
)
ak4PFJetsL1 = cms.EDProducer(
'PFJetCorrectionProducer',
src = cms.InputTag('ak4PFJets'),
correctors = cms.vstring('L1Fastjet')
)
ak4PFCHSJetsL1 = cms.EDProducer(
'PFJetCorrectionProducer',
src = cms.InputTag('ak4PFJetsCHS'),
correctors = cms.vstring('L1Fastjet')
)
ak4JPTJetsL1 = cms.EDProducer(
'JPTJetCorrectionProducer',
src = cms.InputTag('JetPlusTrackZSPCorJetAntiKt4'),
correctors = cms.vstring('ak4L1JPTFastjet')
)
ak4TrackJetsL1 = cms.EDProducer(
'TrackJetCorrectionProducer',
src = cms.InputTag('ak4TrackJets'),
correctors = cms.vstring('L1Fastjet')
)
# L2 Correction Producers
ak4CaloJetsL2 = ak4CaloJetsL1.clone(correctors = ['ak4CaloL2Relative'])
ak4PFJetsL2 = ak4PFJetsL1.clone(correctors = ['ak4PFL2Relative'])
ak4PFCHSJetsL2 = ak4PFCHSJetsL1.clone(correctors = ['ak4PFCHSL2Relative'])
ak4JPTJetsL2 = ak4JPTJetsL1.clone(correctors = ['ak4JPTL2Relative'])
ak4TrackJetsL2 = ak4TrackJetsL1.clone(correctors = ['ak5TRKL2Relative'])
# L2L3 Correction Producers
ak4CaloJetsL2L3 = ak4CaloJetsL1.clone(correctors = ['ak4CaloL2L3'])
ak4PFJetsL2L3 = ak4PFJetsL1.clone(correctors = ['ak4PFL2L3'])
ak4PFCHSJetsL2L3 = ak4PFCHSJetsL1.clone(correctors = ['ak4PFCHSL2L3'])
ak4JPTJetsL2L3 = ak4JPTJetsL1.clone(correctors = ['ak4JPTL2L3'])
ak4TrackJetsL2L3 = ak4TrackJetsL1.clone(correctors = ['ak5TRKL2L3'])
# L1L2L3 Correction Producers
ak4CaloJetsL1L2L3 = ak4CaloJetsL1.clone(correctors = ['ak4CaloL1L2L3'])
ak4PFJetsL1L2L3 = ak4PFJetsL1.clone(correctors = ['ak4PFL1L2L3'])
ak4PFCHSJetsL1L2L3 = ak4PFCHSJetsL1.clone(correctors = ['ak4PFCHSL1L2L3'])
ak4JPTJetsL1L2L3 = ak4JPTJetsL1.clone(correctors = ['ak4JPTL1L2L3'])
ak4TrackJetsL1L2L3 = ak4TrackJetsL1.clone(correctors = ['ak5TRKL1L2L3'])
# L2L3L6 CORRECTION PRODUCERS
ak4CaloJetsL2L3L6 = ak4CaloJetsL1.clone(correctors = ['ak4CaloL2L3L6'])
ak4PFJetsL2L3L6 = ak4PFJetsL1.clone(correctors = ['ak4PFL2L3L6'])
# L1L2L3L6 CORRECTION PRODUCERS
ak4CaloJetsL1L2L3L6 = ak4CaloJetsL1.clone(correctors = ['ak4CaloL1L2L3L6'])
ak4PFJetsL1L2L3L6 = ak4PFJetsL1.clone(correctors = ['ak4PFL1L2L3L6'])
|
451530
|
from concurrent.futures import ThreadPoolExecutor
from typing import NamedTuple, Callable, Union, List, Dict
import numpy as np
from keras import optimizers
from kerl.common.agent import MultiAgent, EpisodeSample
from kerl.common.multi_gym_env import MultiGymEnv
from kerl.wm.networks import WorldModelVAE, WorldEnvModel, I2AController
class WMInternalStates(NamedTuple):
env_model_states: list
def rename_total_loss(loss_dict: Dict[str, float],
model_name: str) -> Dict[str, float]:
full_loss = loss_dict['loss']
result = loss_dict.copy()
del result['loss']
result[model_name + '_loss'] = full_loss
return result
class WMMultiAgent(MultiAgent):
latent_dim_size = 128
world_rnn_units = 512
def __init__(self, env: MultiGymEnv,
optimizer_maker: Callable[[], optimizers.Optimizer],
**kwargs):
vae_optimizer = optimizer_maker()
env_model_optimizer = optimizer_maker()
controller_optimizer = optimizer_maker()
super().__init__(env, **kwargs)
width, height, rgb_channels = env.observation_space.shape
vae = WorldModelVAE(
'agent_vae_',
(width, height, rgb_channels),
vae_optimizer, latent_dim=self.latent_dim_size)
self.vae = vae
self.world = WorldEnvModel(
'agent_world_', batch_size=self._num_envs, time_steps=None,
num_actions=self._num_actions,
latent_dim_size=self.latent_dim_size,
num_rnn_units=self.world_rnn_units,
mixture_size=6, temperature=1.0, optimizer=env_model_optimizer)
self.controller = I2AController(
'a2c_agent_',
observation_size=self.latent_dim_size + self.world_rnn_units,
num_actions=self._num_actions, optimizer=controller_optimizer,
normalize_returns=self.normalize_returns,
reward_scale=self.reward_scale)
self.executor = ThreadPoolExecutor(max_workers=3)
@property
def models(self):
return [self.vae.trainable_model, self.world.model,
self.controller.trainable_model]
def reset_states(self, states=None):
self.world.reset_states(
states.env_model_states if states is not None else None)
def reset_particular_agents(self, mask: Union[List[bool], np.ndarray]):
self.world.reset_particular_states(mask)
def current_states(self):
return WMInternalStates(
env_model_states=self.world.current_states())
def multi_env_act(self, one_step_observations):
batch_size = one_step_observations.shape[0]
internal_states = self.current_states()
current_obs = one_step_observations[:, :, :, :, -1]
_, _, encoded_current_obs = self.vae.compress(current_obs)
encoded_obs_and_world_state = (
np.concatenate(
[self.world.state_for_controller(
internal_states.env_model_states),
encoded_current_obs],
axis=-1)
.reshape((batch_size, -1)))
policy_output, value_output = self.controller.predict_on_actions(
encoded_obs_and_world_state)
sampled_actions = np.array(
[np.random.choice(self._num_actions, p=po)
for po in policy_output])
encoded_current_obs_with_time = encoded_current_obs.reshape(
batch_size, 1, self.latent_dim_size)
self.world.model.predict_on_batch(
x=[encoded_current_obs_with_time,
sampled_actions,
np.zeros_like(encoded_current_obs_with_time)])
return (policy_output, sampled_actions, value_output[:, 0],
internal_states)
def train_on_sample(self, sample: EpisodeSample):
# Encoding observations using VAE so we could use them to train both
# the environment model and the controller.
current_obs = sample.batch_observations[:, :, :, :, :, -1]
last_obs = sample.last_observations[:, :, :, :, -1]
batch_size, time_steps, width, height, channels = current_obs.shape
_, _, encoded_current_obs = self.vae.compress(
np.reshape(current_obs,
(batch_size * time_steps, width, height, channels)))
encoded_current_obs_with_time = encoded_current_obs.reshape(
batch_size, time_steps, self.latent_dim_size)
_, _, encoded_last_obs = self.vae.compress(last_obs)
encoded_future_obs_with_time = np.concatenate(
[encoded_current_obs_with_time[:, 1:],
np.expand_dims(encoded_last_obs, 1)],
axis=1)
# Training all networks
all_losses = {}
all_losses.update(
self.train_vae(sample))
all_losses.update(
self.train_env_model(
encoded_current_obs_with_time,
encoded_future_obs_with_time,
sample))
all_losses.update(
self.train_controller(
encoded_current_obs_with_time, self.reward_discount, sample))
return all_losses
def train_vae(self, sample: EpisodeSample):
batch_size, time_steps, width, height, channels, frames = (
sample.batch_observations.shape)
env_observations = (
sample.batch_observations[:, :, :, :, :, -1]
.reshape((batch_size * time_steps, width, height, channels)))
vae_loss = self.vae.trainable_model.train_on_batch(
x=env_observations, y=None)
return {self.vae.trainable_model.name + '_loss': vae_loss}
def train_controller(self, encoded_current_obs_with_time, reward_discount,
sample: EpisodeSample):
batch_size, time_steps, width, height, channels, frames = (
sample.batch_observations.shape)
rnn_states_before_actions = np.transpose(
[self.world.state_for_controller(item.env_model_states)
for item in sample.agent_internal_states],
axes=(1, 0, 2))
encoded_obs_and_world_state = (
np.concatenate(
[rnn_states_before_actions, encoded_current_obs_with_time],
axis=-1)
.reshape((batch_size * time_steps, -1)))
controller_loss = self.controller.train_on_sample(
sample, encoded_obs_and_world_state, reward_discount)
return rename_total_loss(
controller_loss, self.controller.trainable_model.name)
def train_env_model(self, encoded_current_obs_with_time,
encoded_future_obs_with_time, sample: EpisodeSample):
state_before_training = self.world.current_states()
self.world.reset_states(
sample.agent_internal_states[0].env_model_states)
env_model_loss = self.world.model.train_on_batch(
x=[encoded_current_obs_with_time,
sample.batch_actions,
encoded_future_obs_with_time],
y=[np.expand_dims(sample.batch_rewards, axis=-1),
np.expand_dims(np.float32(sample.batch_dones), axis=-1)])
env_model_named_losses = {
n: v for n, v in zip(self.world.model.metrics_names,
env_model_loss)}
self.world.reset_states(state_before_training)
return rename_total_loss(env_model_named_losses,
self.world.model.name)
|
451561
|
from __future__ import absolute_import
import pytest
import os
from qark.decompiler.decompiler import Decompiler
from qark.scanner.scanner import Scanner
from qark.scanner.plugin import JavaASTPlugin, ManifestPlugin
DECOMPILER_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "qark", "lib", "decompilers")
@pytest.fixture(scope="session")
def path_to_source():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "goatdroid.apk")
@pytest.fixture(scope="session")
def build_directory():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "build_directory")
@pytest.fixture()
def decompiler(path_to_source, build_directory):
return Decompiler(path_to_source=path_to_source, build_directory=build_directory)
@pytest.fixture(scope="module")
def module_decompiler(path_to_source, build_directory):
return Decompiler(path_to_source=path_to_source, build_directory=build_directory)
@pytest.fixture()
def scanner(decompiler):
return Scanner(decompiler.manifest_path, decompiler.build_directory)
@pytest.fixture(scope="session")
def vulnerable_manifest_path():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_xml_files",
"test_androidmanifest.xml")
@pytest.fixture(scope="session")
def goatdroid_manifest_path():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_xml_files",
"test_goatdroid_manifest.xml")
@pytest.fixture(scope="session")
def test_java_files():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_java_files")
@pytest.fixture(scope="session")
def vulnerable_broadcast_path(test_java_files):
return os.path.join(test_java_files,
"send_broadcast_receiver_permission.java")
@pytest.fixture(scope="session")
def vulnerable_receiver_path():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_plugins", "test_manifest_plugins",
"broadcastreceivers", "SendSMSNowReceiver.java")
@pytest.fixture(autouse=True)
def reset_plugins():
"""Reset all plugins in between each function. `JavaASTPlugin` currently will reset every other plugin type."""
JavaASTPlugin.reset()
ManifestPlugin.manifest_xml = None
ManifestPlugin.manifest_path = None
ManifestPlugin.min_sdk = -1
ManifestPlugin.target_sdk = -1
ManifestPlugin.package_name = "PACKAGE_NOT_FOUND"
|
451578
|
import win32clipboard, win32con
def get_pattern():
text = str(eval(raw_input("Pattern:\n")))
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardText(text)
win32clipboard.CloseClipboard()
def main():
while True:
get_pattern()
if __name__ == "__main__":
main()
|
451593
|
import tensorflow as tf
from tqdm import tqdm
from PIL import Image
from hailo_model_zoo.core.infer.infer_utils import log_accuracy, write_results, save_image
from hailo_sdk_client import SdkFineTune
def np_infer(runner, target, logger, eval_num_examples, print_num_examples,
batch_size, data_feed_callback, tf_graph_callback, postprocessing_callback,
eval_callback, visualize_callback, video_outpath, dump_results, results_path):
with tf.Graph().as_default():
logger.info('Building preprocess...')
iterator = data_feed_callback()
[preprocessed_data, image_info] = iterator.get_next()
logger.info('Compiling and integrating with Tensorflow graph...')
sdk_export = tf_graph_callback(preprocessed_data)
eval_metric = eval_callback()
logger.info('Running inference...')
with sdk_export.session.as_default(), runner.hef_infer_context(sdk_export):
sdk_export.session.run([iterator.initializer, tf.compat.v1.local_variables_initializer()])
if isinstance(target, SdkFineTune):
sdk_export.session.run(
[delta.initializer for delta in sdk_export.kernels_delta + sdk_export.biases_delta])
num_of_images = 0
try:
with tqdm(total=None, desc="Processed", unit="images",
disable=None if not print_num_examples < 1e9 else True) as pbar:
while num_of_images < eval_num_examples:
logits_batch, img_info = sdk_export.session.run([sdk_export.output_tensors, image_info])
num_of_images += batch_size
probs = postprocessing_callback(logits_batch, image_info=img_info)
if not visualize_callback and not dump_results:
eval_metric.update_op(probs, img_info)
if num_of_images % print_num_examples == 0:
eval_metric.evaluate()
log_accuracy(logger, num_of_images, eval_metric.get_accuracy())
else:
if visualize_callback:
save_image(Image.fromarray(visualize_callback(probs, img_info['img_orig'])),
img_info['image_name'][0])
if dump_results:
write_results(probs, img_info, results_path)
pbar.update(batch_size)
except tf.errors.OutOfRangeError:
pass
# Write message and exit if we finished to iterate over the data
if not visualize_callback and not dump_results and num_of_images % print_num_examples != 0:
eval_metric.evaluate()
log_accuracy(logger, num_of_images, eval_metric.get_accuracy())
return eval_metric.get_accuracy()
|
451604
|
from __future__ import unicode_literals
from django.core.management.base import AppCommand
from django.db import DEFAULT_DB_ALIAS, connections
class Command(AppCommand):
help = 'Prints the SQL statements for resetting sequences for the given app name(s).'
output_transaction = True
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to print the SQL for. Defaults to the "default" database.',
)
def handle_app_config(self, app_config, **options):
if app_config.models_module is None:
return
connection = connections[options['database']]
models = app_config.get_models(include_auto_created=True)
statements = connection.ops.sequence_reset_sql(self.style, models)
return '\n'.join(statements)
|
451640
|
from misc import *
import mido
while True:
msg_a = None
msg_b = None
print("=====================================")
print_full("Waiting for first message... ")
seen = list()
try:
with mido.open_input('monologue 1 KBD/KNOB 1') as inport:
for msg in inport:
if (msg.type == 'control_change' and msg.control not in seen):
print("CC(" + str(msg.control) + ") ", end='', flush=True)
seen.append(msg.control)
if (msg.type == 'program_change' and "PC" not in seen):
print("PC ", end='', flush=True)
seen.append("PC")
if (msg.type == 'sysex'):
if (msg_a == None):
msg_a = msg.data
print("OK")
print_full("Waiting for second message... ")
seen.clear()
else:
msg_b = msg.data
print("OK")
break
print_full("Comparison: \n")
if (len(msg_a) != len(msg_b)):
print_full("Message length mismatch.")
else:
diff_count = 0
offset = -1
print("---")
for i in range(0, len(msg_a)):
if (msg_a[i] != msg_b[i]):
if (offset == -1):
offset = i + 1
print("Byte %s" % (str(offset)))
else:
print("Byte %s (+%s)" % (str(i + 1), str(i + 1 - offset)))
bin_a = list("{:08b}".format(msg_a[i]))
bin_b = list("{:08b}".format(msg_b[i]))
dec_or = (msg_a[i] ^ msg_b[i])
for j in range(0, len(bin_a)):
if (bin_a[j] == bin_b[j]):
bin_a[j] = 'x'
bin_b[j] = 'x'
else:
diff_count += 1
print("A: " + ''.join(bin_a))
print("B: " + ''.join(bin_b))
print("XOR: " + str(dec_or))
print("-")
print("---")
print(str(diff_count) + " differing bits in total.")
pass
except OSError:
print("Device not connected.")
break
|
451649
|
import pytest
import os
from functools import lru_cache
import logging as _logging
from logging.handlers import RotatingFileHandler
from test.tutils import random_str
from calculate_anything import logging
@lru_cache(maxsize=None)
def get_file_handler(filepath):
file_hdlr = RotatingFileHandler(
filepath,
maxBytes=1000000,
backupCount=10,
encoding='utf-8',
delay=False,
)
file_hdlr.setFormatter(logging.ColorFormatter(use_color=False))
return file_hdlr
@lru_cache(maxsize=None)
def get_stdout_handler():
hdlr = logging.CustomHandler(print, print, print, print, print)
return hdlr
@pytest.mark.parametrize(
'level',
[
_logging.DEBUG,
_logging.INFO,
_logging.WARNING,
_logging.ERROR,
_logging.CRITICAL,
],
)
def test_logging(caplog, level):
with caplog.at_level(level):
logging.setLevel(level)
logging.set_file_handler(None)
logger = logging.getLogger('test_logging_logging')
assert len(logger.handlers) == 1
assert isinstance(logger.handlers[0], _logging.StreamHandler)
msg = random_str()
logger.debug(msg)
assert (msg in caplog.text) == (level <= _logging.DEBUG)
msg = random_str()
logger.info(msg)
assert (msg in caplog.text) == (level <= _logging.INFO)
msg = random_str()
logger.warning(msg)
assert (msg in caplog.text) == (level <= _logging.WARNING)
msg = random_str()
logger.error(msg)
assert (msg in caplog.text) == (level <= _logging.ERROR)
msg = random_str()
logger.critical(msg)
assert (msg in caplog.text) == (level <= _logging.CRITICAL)
@pytest.mark.parametrize(
'level',
[
_logging.DEBUG,
_logging.INFO,
_logging.WARNING,
_logging.ERROR,
_logging.CRITICAL,
],
)
def test_logging_custom_stdout_hannler(caplog, level):
hdlr = get_stdout_handler()
with caplog.at_level(level):
logging.setLevel(level)
logging.set_file_handler(None)
logging.set_stdout_handler(hdlr)
logger = logging.getLogger('test_logging_custom_stdout_hannler')
assert len(logger.handlers) == 1
assert isinstance(logger.handlers[0], logging.CustomHandler)
msg = random_str()
logger.debug(msg)
assert (msg in caplog.text) == (level <= _logging.DEBUG)
msg = random_str()
logger.info(msg)
assert (msg in caplog.text) == (level <= _logging.INFO)
msg = random_str()
logger.warning(msg)
assert (msg in caplog.text) == (level <= _logging.WARNING)
msg = random_str()
logger.error(msg)
assert (msg in caplog.text) == (level <= _logging.ERROR)
msg = random_str()
logger.critical(msg)
assert (msg in caplog.text) == (level <= _logging.CRITICAL)
@pytest.mark.parametrize(
'level',
[
_logging.DEBUG,
_logging.INFO,
_logging.WARNING,
_logging.ERROR,
_logging.CRITICAL,
],
)
def test_logging_no_stdout_handler(caplog, level):
with caplog.at_level(level):
logging.setLevel(level)
logging.set_file_handler(None)
logging.set_stdout_handler(None)
logger = logging.getLogger('test_logging_no_stdout_handler')
assert not logger.handlers
@pytest.mark.parametrize(
'level',
[
_logging.DEBUG,
_logging.INFO,
_logging.WARNING,
_logging.ERROR,
_logging.CRITICAL,
],
)
def test_logging_file(log_filepath, level):
hdlr = get_file_handler(log_filepath)
print('Saving logs to {}'.format(log_filepath))
logging.setLevel(level)
logging.set_file_handler(hdlr)
logging.set_stdout_handler(None)
logger = logging.getLogger('test_logging_file')
assert len(logger.handlers) == 1
assert isinstance(logger.handlers[0], RotatingFileHandler)
msgs = []
msg = random_str()
logger.debug(msg)
msgs.append((msg, _logging.DEBUG))
msg = random_str()
logger.info(msg)
msgs.append((msg, _logging.INFO))
msg = random_str()
logger.warning(msg)
msgs.append((msg, _logging.WARNING))
msg = random_str()
logger.error(msg)
msgs.append((msg, _logging.ERROR))
msg = random_str()
logger.critical(msg)
msgs.append((msg, _logging.CRITICAL))
assert os.path.exists(log_filepath)
with open(log_filepath, 'r', encoding='utf-8') as f:
log = f.read()
for msg, mlevel in msgs:
assert (msg in log) == (level <= mlevel)
|
451655
|
import unittest
import numpy as np
try:
from numcodecs.msgpacks import MsgPack
except ImportError: # pragma: no cover
raise unittest.SkipTest("msgpack not available")
from numcodecs.tests.common import (check_config, check_repr, check_encode_decode_array,
check_backwards_compatibility, greetings)
# object array with strings
# object array with mix strings / nans
# object array with mix of string, int, float
# ...
arrays = [
np.array(['foo', 'bar', 'baz'] * 300, dtype=object),
np.array([['foo', 'bar', np.nan]] * 300, dtype=object),
np.array(['foo', 1.0, 2] * 300, dtype=object),
np.arange(1000, dtype='i4'),
np.array(['foo', 'bar', 'baz'] * 300),
np.array(['foo', ['bar', 1.0, 2], {'a': 'b', 'c': 42}] * 300, dtype=object),
np.array(greetings * 100),
np.array(greetings * 100, dtype=object),
np.array([b'foo', b'bar', b'baz'] * 300, dtype=object),
np.array([g.encode('utf-8') for g in greetings] * 100, dtype=object),
np.array([[0, 1], [2, 3]], dtype=object),
]
def test_encode_decode():
for arr in arrays:
check_encode_decode_array(arr, MsgPack())
def test_config():
check_config(MsgPack())
def test_repr():
check_repr("MsgPack(raw=False, use_bin_type=True, use_single_float=False)")
check_repr("MsgPack(raw=True, use_bin_type=False, use_single_float=True)")
def test_backwards_compatibility():
codec = MsgPack()
check_backwards_compatibility(codec.codec_id, arrays, [codec])
def test_non_numpy_inputs():
codec = MsgPack()
# numpy will infer a range of different shapes and dtypes for these inputs.
# Make sure that round-tripping through encode preserves this.
data = [
[0, 1],
[[0, 1], [2, 3]],
[[0], [1], [2, 3]],
[[[0, 0]], [[1, 1]], [[2, 3]]],
["1"],
["11", "11"],
["11", "1", "1"],
[{}],
[{"key": "value"}, ["list", "of", "strings"]],
[b"1"],
[b"11", b"11"],
[b"11", b"1", b"1"],
[{b"key": b"value"}, [b"list", b"of", b"strings"]],
]
for input_data in data:
actual = codec.decode(codec.encode(input_data))
expect = np.array(input_data)
assert expect.shape == actual.shape
assert np.array_equal(expect, actual)
def test_encode_decode_shape_dtype_preserved():
codec = MsgPack()
for arr in arrays:
actual = codec.decode(codec.encode(arr))
assert arr.shape == actual.shape
assert arr.dtype == actual.dtype
def test_bytes():
# test msgpack behaviour with bytes and str (unicode)
bytes_arr = np.array([b'foo', b'bar', b'baz'], dtype=object)
unicode_arr = np.array(['foo', 'bar', 'baz'], dtype=object)
# raw=False (default)
codec = MsgPack()
# works for bytes array, round-trips bytes to bytes
b = codec.decode(codec.encode(bytes_arr))
assert np.array_equal(bytes_arr, b)
assert isinstance(b[0], bytes)
assert b[0] == b'foo'
# works for unicode array, round-trips unicode to unicode
b = codec.decode(codec.encode(unicode_arr))
assert np.array_equal(unicode_arr, b)
assert isinstance(b[0], str)
assert b[0] == 'foo'
# raw=True
codec = MsgPack(raw=True)
# works for bytes array, round-trips bytes to bytes
b = codec.decode(codec.encode(bytes_arr))
assert np.array_equal(bytes_arr, b)
assert isinstance(b[0], bytes)
assert b[0] == b'foo'
# broken for unicode array, round-trips unicode to bytes
b = codec.decode(codec.encode(unicode_arr))
assert not np.array_equal(unicode_arr, b)
assert isinstance(b[0], bytes)
assert b[0] == b'foo'
|
451664
|
from __future__ import unicode_literals
import frappe
def execute():
"""Executed by bench execute erpnext_biotrack.patches.delete_all_synced_stock_entries.execute """
frappe.flags.mute_emails = True
# rows = frappe.db.sql()
i = 0
for name in frappe.get_all("Stock Entry"):
doc = frappe.get_doc("Stock Entry", name)
i += 1
print "Deleting " + doc.name
print doc
if doc.docstatus == 1:
doc.cancel()
doc.delete()
frappe.db.commit()
frappe.flags.mute_emails = False
|
451705
|
import json, pygame
import thorpy.miscgui.theme as theme
class MetaDataManager(object):
def __init__(self, data=None):
if data is None: data = {}
self.data = data
def write_data(self, fn):
json.dump(self.data, open(fn,'w'))
def read_data(self, fn):
try:
self.data = json.load(open(fn))
except:
return None
return True
def load_font_data(self, fn):
font = self.data.get("font")
if font:
theme.add_font(font)
font_size = self.data.get("font_size")
if font_size:
theme.set_font_sizes(font_size)
def get_display_data(self, fn, w, h, flags):
W,H = self.data.get("screen_w"), self.data.get("screen_h")
fullscreen = self.data.get("fullscreen")
flags = flags
if fullscreen:
flags |= pygame.FULLSCREEN
if W is None: W = w
if H is None: H = h
return W,H,flags
|
451706
|
import torch.nn as nn
import torch as th
class MultiColumn(nn.Module):
def __init__(self, num_classes, conv_column, column_units,
clf_layers=None):
"""
- Example multi-column network
- Useful when a video sample is too long and has to be split into
multiple clips
- Processes 3D-CNN on each clip and averages resulting features across
clips before passing it to classification(FC) layer
Args:
- Input: Takes in a list of tensors each of size
(batch_size, 3, sequence_length, W, H)
- Returns: logits of size (batch size, num_classes)
"""
super(MultiColumn, self).__init__()
self.num_classes = num_classes
self.column_units = column_units
self.conv_column = conv_column(column_units)
self.clf_layers = clf_layers
if not self.clf_layers:
self.clf_layers = th.nn.Sequential(
nn.Linear(column_units, self.num_classes)
)
def forward(self, inputs, get_features=False):
outputs = []
num_cols = len(inputs)
for idx in range(num_cols):
x = inputs[idx]
x1 = self.conv_column(x)
outputs.append(x1)
outputs = th.stack(outputs).permute(1, 0, 2)
outputs = th.squeeze(th.sum(outputs, 1), 1)
avg_output = outputs / float(num_cols)
outputs = self.clf_layers(avg_output)
if get_features:
return outputs, avg_output
else:
return outputs
if __name__ == "__main__":
from model3D_1 import Model
num_classes = 174
input_tensor = [th.autograd.Variable(th.rand(1, 3, 72, 84, 84))]
model = MultiColumn(174, Model, 512)
output = model(input_tensor)
print(output.size())
|
451713
|
from Tkinter import *
import os
class RecentFile:
recent = None
def __init__(self,app):
self.recent = []
self.app = app
#self.loadRecent()
self.update_menu(self.app.Filemenu)
def add(self,filename):
self.app.logen.preferences.add_recent(filename)
self.update_menu(self.app.Filemenu)
def clear(self):
self.app.logen.preferences.clear_recent()
self.update_menu(self.app.Filemenu)
def update_menu(self,menu):
""" This should modify the menu at from position 6 onwards with recent menu
"""
self.recent = self.app.logen.preferences.get_recent()
start = self.app.Filemenu_index["RECENT"]
menu.delete(start,12)
RecentMenu = Menu(menu,tearoff=0)
for i in self.recent:
#menu.add_command(label="%s"%os.path.basename(i), command=lambda i=i: self.openRecent(i))
RecentMenu.add_command(label="%s"%i, command=lambda i=i: self.openRecent(i))
RecentMenu.add_separator()
RecentMenu.add_command(label="Clear Recent Files", command=self.clear)
menu.add_cascade(label="Recent Files", menu=RecentMenu,underline=2)
menu.add_separator()
menu.add_command(label="Quit", command= self.app.exit,underline=0)
def openRecent(self,filename):
self.app.open_file(filename=filename)
|
451718
|
from __future__ import print_function, absolute_import, division
import KratosMultiphysics
import KratosMultiphysics.ParticleMechanicsApplication as KratosParticle
import KratosMultiphysics.KratosUnittest as KratosUnittest
class TestParticleEraseProcess(KratosUnittest.TestCase):
def _generate_particle_element_and_check(self, current_model):
KratosMultiphysics.Logger.GetDefaultOutput().SetSeverity(KratosMultiphysics.Logger.Severity.WARNING)
dimension = 3
# Initialize model part
## Material model part definition
material_point_model_part = current_model.CreateModelPart("dummy_name")
material_point_model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, dimension)
self.process_info = material_point_model_part.ProcessInfo
## Initial material model part definition
initial_mesh_model_part = current_model.CreateModelPart("Initial_dummy_name")
initial_mesh_model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, dimension)
## Grid model part definition
grid_model_part = current_model.CreateModelPart("Background_Grid")
grid_model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, dimension)
# Create element and nodes
sub_mp = initial_mesh_model_part.CreateSubModelPart("test")
sub_mp.GetProperties()[1].SetValue(KratosParticle.PARTICLES_PER_ELEMENT, 4)
self._create_nodes(sub_mp)
self._create_elements(sub_mp)
# Create background element and nodes
background_sub_mp = grid_model_part.CreateSubModelPart("test2")
self._create_nodes(background_sub_mp)
self._create_elements(background_sub_mp)
self._create_conditions(background_sub_mp)
# Generate MP Elements and Conditions
KratosParticle.GenerateMaterialPointElement(grid_model_part, initial_mesh_model_part, material_point_model_part, False)
KratosParticle.GenerateMaterialPointCondition(grid_model_part, initial_mesh_model_part, material_point_model_part)
def _create_nodes(self, initial_mp):
initial_mp.CreateNewNode(1, -0.5, -0.5, 0.0)
initial_mp.CreateNewNode(2, 0.5, -0.5, 0.0)
initial_mp.CreateNewNode(3, 0.5, 0.5, 0.0)
initial_mp.CreateNewNode(4, -0.5, 0.5, 0.0)
initial_mp.CreateNewNode(5, -0.5, -0.5, 1.0)
initial_mp.CreateNewNode(6, 0.5, -0.5, 1.0)
initial_mp.CreateNewNode(7, 0.5, 0.5, 1.0)
initial_mp.CreateNewNode(8, -0.5, 0.5, 1.0)
def _create_elements(self, initial_mp):
initial_mp.CreateNewElement("UpdatedLagrangian3D8N", 1, [1,2,3,4,5,6,7,8], initial_mp.GetProperties()[1])
KratosMultiphysics.VariableUtils().SetFlag(KratosMultiphysics.ACTIVE, True, initial_mp.Elements)
def _create_conditions(self, initial_mp):
initial_mp.CreateNewCondition("SurfaceCondition3D4N", 1, [2,4,8,6], initial_mp.GetProperties()[1])
KratosMultiphysics.VariableUtils().SetFlag(KratosMultiphysics.BOUNDARY, True, initial_mp.Conditions)
for condition in initial_mp.Conditions:
condition.SetValue(KratosParticle.PARTICLES_PER_CONDITION, 0)
condition.SetValue(KratosParticle.MPC_BOUNDARY_CONDITION_TYPE, 1)
def _search_element(self, current_model):
# Default
max_num_results = 1000
specific_tolerance = 1.e-5
# Get model part
material_point_model_part = current_model.GetModelPart("dummy_name")
grid_model_part = current_model.GetModelPart("Background_Grid")
# Search element
KratosParticle.SearchElement(grid_model_part, material_point_model_part, max_num_results, specific_tolerance)
def test_ParticleEraseOutsideGivenDomain(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_element_and_check(current_model)
# Get model part
material_point_model_part = current_model.GetModelPart("dummy_name")
# Check initial total number of element
particle_counter = material_point_model_part.NumberOfElements()
self.assertEqual(particle_counter, 8)
# Move particle
for mpm in material_point_model_part.Elements:
new_coordinates = mpm.CalculateOnIntegrationPoints(KratosParticle.MP_COORD, self.process_info)
new_coordinates[0] += [0.3, 0.23, 0.22]
mpm.SetValuesOnIntegrationPoints(KratosParticle.MP_COORD, new_coordinates, self.process_info)
# Check outside given domain
for mpm in material_point_model_part.Elements:
new_coordinate = mpm.CalculateOnIntegrationPoints(KratosParticle.MP_COORD, self.process_info)[0]
if(new_coordinate[0] < -0.5 or new_coordinate[0] > 0.5 or new_coordinate[1] < -0.5 or new_coordinate[1] > 0.5 or new_coordinate[2] < 0.0 or new_coordinate[2] > 1.0 ):
mpm.Set(KratosMultiphysics.TO_ERASE, True)
# Initiate process
process = KratosParticle.ParticleEraseProcess(material_point_model_part)
# Execute
process.Execute()
# Check total number of element
particle_counter = material_point_model_part.NumberOfElements()
self.assertEqual(particle_counter, 1)
expected_id = 9
for mpm in material_point_model_part.Elements:
self.assertEqual(mpm.Id, expected_id)
def test_ParticleEraseBySearch(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_element_and_check(current_model)
# Get model part
material_point_model_part = current_model.GetModelPart("dummy_name")
# Check initial total number of element
particle_counter = material_point_model_part.NumberOfElements()
self.assertEqual(particle_counter, 8)
# Move particle
for mpm in material_point_model_part.Elements:
new_coordinates = mpm.CalculateOnIntegrationPoints(KratosParticle.MP_COORD, self.process_info)
new_coordinates[0] += [0.3, 0.23, 0.22]
mpm.SetValuesOnIntegrationPoints(KratosParticle.MP_COORD, new_coordinates, self.process_info)
# Call Search
self._search_element(current_model)
# Initiate process
process = KratosParticle.ParticleEraseProcess(material_point_model_part)
# Execute
process.Execute()
# Check total number of element
particle_counter = material_point_model_part.NumberOfElements()
self.assertEqual(particle_counter, 1)
expected_id = 9
for mpm in material_point_model_part.Elements:
self.assertEqual(mpm.Id, expected_id)
def test_ParticleConditionEraseOutsideGivenDomain(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_element_and_check(current_model)
# Get model part
material_point_model_part = current_model.GetModelPart("dummy_name")
# Check initial number of condition
particle_counter = material_point_model_part.NumberOfConditions()
self.assertEqual(particle_counter, 1)
# Move particle
for mpc in material_point_model_part.Conditions:
new_coordinates = mpc.CalculateOnIntegrationPoints(KratosParticle.MPC_COORD, self.process_info)
new_coordinates[0] += [-0.5, 0.5, 0.5]
mpc.SetValuesOnIntegrationPoints(KratosParticle.MPC_COORD, new_coordinates, self.process_info)
# Check outside given domain
for mpc in material_point_model_part.Conditions:
new_coordinate = mpc.CalculateOnIntegrationPoints(KratosParticle.MPC_COORD, self.process_info)[0]
if(new_coordinate[0] < -0.5 or new_coordinate[0] > 0.5 or new_coordinate[1] < -0.5 or new_coordinate[1] > 0.5 or new_coordinate[2] < 0.0 or new_coordinate[2] > 1.0 ):
mpc.Set(KratosMultiphysics.TO_ERASE, True)
# Initiate process
process = KratosParticle.ParticleEraseProcess(material_point_model_part)
# Execute
process.Execute()
# Check total number of condition
particle_counter = material_point_model_part.NumberOfConditions()
self.assertEqual(particle_counter, 1)
expected_id = 11
for mpc in material_point_model_part.Conditions:
self.assertEqual(mpc.Id, expected_id)
def test_ParticleConditionEraseBySearch(self):
current_model = KratosMultiphysics.Model()
self._generate_particle_element_and_check(current_model)
# Get model part
material_point_model_part = current_model.GetModelPart("dummy_name")
# Check initial number of condition
particle_counter = material_point_model_part.NumberOfConditions()
self.assertEqual(particle_counter, 1)
# Move particle
for mpc in material_point_model_part.Conditions:
new_coordinates = mpc.CalculateOnIntegrationPoints(KratosParticle.MPC_COORD, self.process_info)
new_coordinates[0] += [-0.5, 0.5, 0.5]
mpc.SetValuesOnIntegrationPoints(KratosParticle.MPC_COORD, new_coordinates, self.process_info)
# Call Search
self._search_element(current_model)
# Initiate process
process = KratosParticle.ParticleEraseProcess(material_point_model_part)
# Execute
process.Execute()
# Check total number of condition
particle_counter = material_point_model_part.NumberOfConditions()
self.assertEqual(particle_counter, 1)
expected_id = 11
for mpc in material_point_model_part.Conditions:
self.assertEqual(mpc.Id, expected_id)
if __name__ == '__main__':
KratosUnittest.main()
|
451732
|
from symphony.bdk.core.auth.auth_session import AuthSession
from symphony.bdk.core.config.model.bdk_retry_config import BdkRetryConfig
from symphony.bdk.core.retry import retry
from symphony.bdk.core.service.connection.model.connection_status import ConnectionStatus
from symphony.bdk.gen.pod_api.connection_api import ConnectionApi
from symphony.bdk.gen.pod_model.user_connection import UserConnection
from symphony.bdk.gen.pod_model.user_connection_request import UserConnectionRequest
class OboConnectionService:
"""Class exposing OBO-enabled endpoints for connection management.
This service is used for retrieving the connection status between the OBO user and a specified user or several
other internal or external users in the pod, and perform some actions related to the connection status like:
* Send a connection request to an user
* Accept a connection request from a user
* Reject a connection request from a user
* Remove a connection with a user
"""
def __init__(self, connection_api: ConnectionApi, auth_session: AuthSession, retry_config: BdkRetryConfig):
self._connection_api = connection_api
self._auth_session = auth_session
self._retry_config = retry_config
@retry
async def get_connection(self, user_id: int) -> UserConnection:
"""
Get connection status, i.e. check if the calling user is connected to the specified user.
See: `Get Connection <https://developers.symphony.com/restapi/reference#get-connection>`_
:param user_id: The id of the user with whom the caller want to check.
:return: Connection status with the specified user.
"""
params = {
'user_id': str(user_id),
'session_token': await self._auth_session.session_token
}
return await self._connection_api.v1_connection_user_user_id_info_get(**params)
@retry
async def list_connections(
self,
status: ConnectionStatus = ConnectionStatus.ALL,
user_ids: [int] = None
) -> [UserConnection]:
"""
List all connection statuses of the requesting user with external or specified users.
See: `List Connections <https://developers.symphony.com/restapi/reference#list-connections>`_
:param status: Filter the connection list based on the connection status.
The connection status can only be pending_incoming, pending_outgoing,
accepted, rejected, or all.
If you do not specify a status, all connections will be returned.
:param user_ids: List of user ids which are used to restrict the list of results.
This can be used to return connections with internal users;
although, by default, this endpoint does not list implicit connections with internal users.
:return: List of connection statuses with the specified users and status.
"""
params = {
'status': status.value,
'session_token': await self._auth_session.session_token
}
if user_ids is not None:
params['user_ids'] = ','.join(map(str, user_ids))
user_connection_list = await self._connection_api.v1_connection_list_get(**params)
return user_connection_list.value
@retry
async def create_connection(self, user_id: int) -> UserConnection:
"""
Sends a connection request to another user.
See: `Create Connection <https://developers.symphony.com/restapi/reference#create-connection>`_
:param user_id: The id of the user with whom the caller want to connect.
:return: Connection status with the specified user.
"""
user_connection_request = UserConnectionRequest(user_id=user_id)
params = {
'connection_request': user_connection_request,
'session_token': await self._auth_session.session_token
}
return await self._connection_api.v1_connection_create_post(**params)
@retry
async def accept_connection(self, user_id: int) -> UserConnection:
"""
Accept the connection request from a requesting user.
See: `Accept Connection <https://developers.symphony.com/restapi/reference#accepted-connection>`_
:param user_id: The id of the user who requested to connect with the caller.
:return: Connection status with the requesting user.
"""
user_connection_request = UserConnectionRequest(user_id=user_id)
params = {
'connection_request': user_connection_request,
'session_token': await self._auth_session.session_token
}
return await self._connection_api.v1_connection_accept_post(**params)
@retry
async def reject_connection(self, user_id: int) -> UserConnection:
"""
Reject the connection request from a requesting user.
See: `Reject Connection <https://developers.symphony.com/restapi/reference#reject-connection>`_
:param user_id: The id of the user who requested to connect with the caller.
:return: Connection status with the requesting user.
"""
user_connection_request = UserConnectionRequest(user_id=user_id)
params = {
'connection_request': user_connection_request,
'session_token': await self._auth_session.session_token
}
return await self._connection_api.v1_connection_reject_post(**params)
@retry
async def remove_connection(self, user_id: int) -> None:
"""
Removes a connection with a user.
See: `Remove Connection <https://developers.symphony.com/restapi/reference#remove-connection>`_
:param user_id: The id of the user with whom we want to remove the connection.
"""
params = {
'uid': user_id,
'session_token': await self._auth_session.session_token
}
await self._connection_api.v1_connection_user_uid_remove_post(**params)
class ConnectionService(OboConnectionService):
"""Service class for managing the connections between users
This service is used for retrieving the connection status between the calling user and a specified user or several
other internal or external users in the pod, and perform some actions related to the connection status like:
* Send a connection request to an user
* Accept a connection request from a user
* Reject a connection request from a user
* Remove a connection with a user
"""
|
451762
|
import logging
import sys
from .recorder import Recorder
logger = logging.getLogger("offstream")
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
__all__ = ["Recorder"]
|
451775
|
import bz2
import hashlib
import os
import re
import stat
import subprocess as subp
import sys
import tarfile
import time
import zipfile
from glob import glob
import urllib.request
def remove_prefix(text):
return re.sub(r'^[a-z]__', '', text)
def read_and_split(ofn):
return (l.decode('utf-8').strip().split('\t') for l in ofn)
def read_and_split_line(line):
return line.decode('utf-8').strip().split('\t')
def plain_read_and_split(ofn):
return (l.strip().split('\t') for l in ofn)
def plain_read_and_split_line(l):
return l.strip().split('\t')
def mybytes(val):
return bytes(val, encoding='utf-8')
def byte_to_megabyte(byte):
"""
Convert byte value to megabyte
"""
return byte / (1024.0**2)
class ReportHook():
def __init__(self):
self.start_time = time.time()
def report(self, blocknum, block_size, total_size):
"""
Print download progress message
"""
if blocknum == 0:
self.start_time = time.time()
if total_size > 0:
sys.stderr.write("Downloading file of size: {:.2f} MB\n"
.format(byte_to_megabyte(total_size)))
else:
total_downloaded = blocknum * block_size
status = "{:3.2f} MB ".format(byte_to_megabyte(total_downloaded))
if total_size > 0:
percent_downloaded = total_downloaded * 100.0 / total_size
# use carriage return plus sys.stderr to overwrite stderr
download_rate = total_downloaded / (time.time() - self.start_time)
estimated_time = (total_size - total_downloaded) / download_rate
estimated_minutes = int(estimated_time / 60.0)
estimated_seconds = estimated_time - estimated_minutes * 60.0
status += ("{:3.2f} % {:5.2f} MB/sec {:2.0f} min {:2.0f} sec "
.format(percent_downloaded,
byte_to_megabyte(download_rate),
estimated_minutes, estimated_seconds))
status += " \r"
sys.stderr.write(status)
# set the location of the database download url
DROPBOX_DATABASE_DOWNLOAD = "https://www.dropbox.com/sh/7qze7m7g9fe2xjg/AADHWzATSQcI0CNFD0sk7MAga"
ZENODO_DATABASE_DOWNLOAD = "https://zenodo.org/record/3957592"
def download(url, download_file, force=False):
"""
Download a file from a url
"""
if not os.path.isfile(download_file) or force:
try:
sys.stderr.write("\nDownloading " + url + "\n")
file, headers = urllib.request.urlretrieve(url, download_file,
reporthook=ReportHook().report)
except EnvironmentError:
sys.stderr.write("\nWarning: Unable to download " + url + "\n")
else:
sys.stderr.write("\nFile {} already present!\n".format(download_file))
def download_unpack_tar(download_file_name, folder, bowtie2_build, nproc, use_zenodo):
"""
Download the url to the file and decompress into the folder
"""
# Create the folder if it does not already exist
if not os.path.isdir(folder):
try:
os.makedirs(folder)
except EnvironmentError:
sys.exit("ERROR: Unable to create folder for database install: " + folder)
# Check the directory permissions
if not os.access(folder, os.W_OK):
sys.exit("ERROR: The directory is not writeable: " + folder + ". "
"Please modify the permissions.")
#local path of the tarfile and md5file
tar_file = os.path.join(folder, download_file_name + ".tar")
md5_file = os.path.join(folder, download_file_name + ".md5")
#Download the list of all the files in the Dropbox folder
if not use_zenodo:
url_tar_file = "http://cmprod1.cibio.unitn.it/biobakery3/metaphlan_databases/{}.tar".format(download_file_name)
url_md5_file = "http://cmprod1.cibio.unitn.it/biobakery3/metaphlan_databases/{}.md5".format(download_file_name)
else:
url_tar_file = "https://zenodo.org/record/3957592/files/{}.tar?download=1".format(download_file_name)
url_md5_file = "https://zenodo.org/record/3957592/files/{}.md5?download=1".format(download_file_name)
# download tar and MD5 checksum
download(url_tar_file, tar_file)
download(url_md5_file, md5_file)
md5_md5 = None
md5_tar = None
if os.path.isfile(md5_file):
with open(md5_file) as f:
for row in f:
md5_md5 = row.strip().split(' ')[0]
else:
sys.stderr.write('File "{}" not found!\n'.format(md5_file))
# compute MD5 of .tar.bz2
if os.path.isfile(tar_file):
hash_md5 = hashlib.md5()
with open(tar_file, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
md5_tar = hash_md5.hexdigest()[:32]
else:
sys.stderr.write('File "{}" not found!\n'.format(tar_file))
if (md5_tar is None) or (md5_md5 is None):
sys.exit("MD5 checksums not found, something went wrong!")
# compare checksums
if md5_tar != md5_md5:
sys.exit("MD5 checksums do not correspond! If this happens again, you should remove the database files and "
"rerun MetaPhlAn so they are re-downloaded")
# untar
try:
tarfile_handle = tarfile.open(tar_file)
tarfile_handle.extractall(path=folder)
tarfile_handle.close()
except EnvironmentError:
sys.stderr.write("Warning: Unable to extract {}.\n".format(tar_file))
# uncompress sequences
bz2_file = os.path.join(folder, download_file_name + ".fna.bz2")
fna_file = os.path.join(folder, download_file_name + ".fna")
if not os.path.isfile(fna_file):
sys.stderr.write('\n\nDecompressing {} into {}\n'.format(bz2_file, fna_file))
with open(fna_file, 'wb') as fna_h, \
bz2.BZ2File(bz2_file, 'rb') as bz2_h:
for data in iter(lambda: bz2_h.read(100 * 1024), b''):
fna_h.write(data)
# build bowtie2 indexes
if not glob(os.path.join(folder, download_file_name + "*.bt2")):
bt2_base = os.path.join(folder, download_file_name)
bt2_cmd = [bowtie2_build, '--quiet']
if nproc > 1:
bt2_build_output = subp.check_output([bowtie2_build, '--usage'], stderr=subp.STDOUT)
if 'threads' in str(bt2_build_output):
bt2_cmd += ['--threads', str(nproc)]
bt2_cmd += ['-f', fna_file, bt2_base]
sys.stderr.write('\nBuilding Bowtie2 indexes\n')
try:
subp.check_call(bt2_cmd)
except Exception as e:
sys.stderr.write("Fatal error running '{}'\nError message: '{}'\n\n".format(' '.join(bt2_cmd), e))
sys.exit(1)
try:
for bt2 in glob(os.path.join(folder, download_file_name + "*.bt2")):
os.chmod(bt2, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH) # change permissions to 664
except PermissionError as e:
sys.stderr.write('Cannot change permission for {}. Make sure the files are readable.'.format(os.path.join(folder, download_file_name + "*.bt2")))
sys.stderr.write('Removing uncompress database {}\n'.format(fna_file))
os.remove(fna_file)
def download_unpack_zip(url,download_file_name,folder,software_name):
"""
Download the url to the file and decompress into the folder
"""
# Check for write permission to the target folder
if not os.access(folder, os.W_OK):
print("WARNING: The directory is not writeable: "+
folder + " . Please modify the permissions.")
download_file=os.path.join(folder, download_file_name)
download(url, download_file, True)
error_during_extract=False
try:
zipfile_handle=zipfile.ZipFile(download_file)
zipfile_handle.extractall(path=folder)
zipfile_handle.close()
except EnvironmentError:
print("WARNING: Unable to extract "+software_name+".")
error_during_extract=True
if not error_during_extract:
try:
os.unlink(download_file)
except EnvironmentError:
print("WARNING: Unable to remove the temp download: " + download_file)
def resolve_latest_database(bowtie2_db,mpa_latest_url, force=False):
if os.path.exists(os.path.join(bowtie2_db,'mpa_latest')):
ctime_latest_db = int(os.path.getctime(os.path.join(bowtie2_db,'mpa_latest')))
if int(time.time()) - ctime_latest_db > 31536000: #1 year in epoch
os.rename(os.path.join(bowtie2_db,'mpa_latest'),os.path.join(bowtie2_db,'mpa_previous'))
download(mpa_latest_url, os.path.join(bowtie2_db,'mpa_latest'), force=True)
if not os.path.exists(os.path.join(bowtie2_db,'mpa_latest') or force):
download(mpa_latest_url, os.path.join(bowtie2_db,'mpa_latest'))
with open(os.path.join(bowtie2_db,'mpa_latest')) as mpa_latest:
latest_db_version = [line.strip() for line in mpa_latest if not line.startswith('#')]
return ''.join(latest_db_version)
def check_and_install_database(index, bowtie2_db, bowtie2_build, nproc, force_redownload_latest):
# Create the folder if it does not already exist
if not os.path.isdir(bowtie2_db):
try:
os.makedirs(bowtie2_db)
except EnvironmentError:
sys.exit("ERROR: Unable to create folder for database install: " + bowtie2_db)
if index != 'latest' and len(glob(os.path.join(bowtie2_db, "*{}*".format(index)))) >= 6:
return index
use_zenodo = False
try:
if urllib.request.urlopen("http://cmprod1.cibio.unitn.it/biobakery3/metaphlan_databases/mpa_latest").getcode() != 200:
use_zenodo = True
except:
print('WARNING: It seems that you do not have Internet access.')
if os.path.exists(os.path.join(bowtie2_db,'mpa_latest')):
print('WARNING: Cannot connect to the database server. The latest available local database will be used.')
with open(os.path.join(bowtie2_db,'mpa_latest')) as mpa_latest:
latest_db_version = [line.strip() for line in mpa_latest if not line.startswith('#')]
else:
print("""ERROR: Cannot find a local database. Please run MetaPhlAn using option "-x <database_name>".
You can download the MetaPhlAn database from \n {} \n {} \n {}
""".format('http://cmprod1.cibio.unitn.it/biobakery3/metaphlan_databases',ZENODO_DATABASE_DOWNLOAD, DROPBOX_DATABASE_DOWNLOAD))
sys.exit()
#try downloading from the segatalab website. If fails, use zenodo
if index == 'latest':
if not use_zenodo:
mpa_latest = 'http://cmprod1.cibio.unitn.it/biobakery3/metaphlan_databases/mpa_latest'
else:
mpa_latest = 'https://zenodo.org/record/3957592/files/mpa_latest?download=1'
index = resolve_latest_database(bowtie2_db, mpa_latest, force_redownload_latest)
if os.path.exists(os.path.join(bowtie2_db,'mpa_previous')):
with open(os.path.join(bowtie2_db,'mpa_previous')) as mpa_previous:
previous_db_version = ''.join([line.strip() for line in mpa_previous if not line.startswith('#')])
if index != previous_db_version:
choice = ''
while choice.upper() not in ['Y','N']:
choice = input('A newer version of the database ({}) is available. Do you want to download it and replace the current one ({})?\t[Y/N]'.format(index, previous_db_version))
if choice.upper() == 'N':
os.rename(os.path.join(bowtie2_db,'mpa_previous'),os.path.join(bowtie2_db,'mpa_latest'))
index = previous_db_version
if len(glob(os.path.join(bowtie2_db, "*{}*".format(index)))) >= 7:
return index
# download the tar archive and decompress
sys.stderr.write("\nDownloading MetaPhlAn database\nPlease note due to "
"the size this might take a few minutes\n")
download_unpack_tar(index, bowtie2_db, bowtie2_build, nproc, use_zenodo)
sys.stderr.write("\nDownload complete\n")
return index
|
451800
|
sleep(1)#file : InMoov2.<NAME>
import random
keyboard = Runtime.createAndStart("keyboard", "Keyboard")
keyboard.addListener("keyCommand", python.getName(), "input")
leftPort = "COM3"
rightPort = "COM7"
i01 = Runtime.createAndStart("i01", "InMoov")
cleverbot = Runtime.createAndStart("cleverbot","CleverBot")
# starts everything
i01.startAll(leftPort, rightPort)
torso = i01.startTorso("COM3")
left = Runtime.getService("i01.left")
right = Runtime.getService("i01.right")
#############################################################################################
# Markus Mod
i01.leftArm.omoplate.map(10,80,80,20)
i01.rightArm.omoplate.map(10,80,80,10)
i01.leftArm.shoulder.map(0,180,170,15)
i01.rightArm.shoulder.map(0,180,190,50)
i01.leftArm.rotate.map(40,180,140,20)
i01.rightArm.rotate.map(40,180,140,20)
i01.leftArm.bicep.map(5,90,90,20)
i01.rightArm.bicep.map(5,90,90,20)
i01.head.rothead.map(30,150,150,30)
i01.torso.topStom.map(60,120,70,110)
i01.head.eyeX.map(60,100,90,50)
i01.head.eyeY.map(50,100,100,50)
i01.head.neck.map(20,160,160,20)
############################################################
#to tweak the default Pid values
i01.headTracking.xpid.setPID(10.0,5.0,0.1)
i01.headTracking.ypid.setPID(10.0,5.0,0.1)
i01.eyesTracking.xpid.setPID(15.0,5.0,0.1)
i01.eyesTracking.ypid.setPID(15.0,5.0,0.1)
############################################################
Pin27 = 27
right.digitalReadPollingStart(Pin27)
# make friendly sample rate
right.setSampleRate(3000)
right.addListener("publishPin", "python", "publishPin")
def publishPin(pin):
# print pin.pin, pin.value, pin.type, pin.source,
if (pin.pin == 27 and pin.value == 1):
if pin12 == 0:
i01.mouth.speak("hello")
global pin12
pin12 = 1
i01.head.attach()
sleep(1)
ear.clearLock()
headfront()
sleep(2)
trackHumans()
# if (pin.pin == 12 and pin.value == 0):
# if pin12 == 1:
# global resttimer
# resttimer += 1
# if resttimer == 400:
# global resttimer
# resttimer = 0
# gotosleepnow()
#############################################################################################
time = 0
pin12 = 1
#resttimer = 0
rest = 0
blind = 1
kinect = 0
dance1 = 1
dance2 = 1
helvar = 1
mic = 1
nexagroup = 1
nexa1 = 0
nexa2 = 0
nexa3 = 0
nexa4 = 0
nexa5 = 0
nexa6 = 0
nexa7 = 0
nexa8 = 0
nexa9 = 0
nexa10 = 0
nexa11 = 0
nexa12 = 0
nexa13 = 0
nexa14 = 0
nexa15 = 0
nexa16 = 0
l1="m"
l2="a"
l3="r"
l4="k"
l5="u"
l6="s"
name = l1+l2+l3+l4+l5+l6
# play rock paper scissors
robyn = 0
human = 0
i01.systemCheck()
ear = i01.ear
##################################################################
# Hastighet vid start
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.8, 0.8)
i01.mouth.speak("working on full speed")
##################################################################
# commands with i01.getName() use the InMoov service methods
ear.addCommand("attach head", "i01.head", "attach")
ear.addCommand("disconnect head", "i01.head", "detach")
ear.addCommand("attach eyes", "i01.head.eyeY", "attach")
ear.addCommand("disconnect eyes", "i01.head.eyeY", "detach")
ear.addCommand("attach right hand", "i01.rightHand", "attach")
ear.addCommand("disconnect right hand", "i01.rightHand", "detach")
ear.addCommand("attach left hand", "i01.leftHand", "attach")
ear.addCommand("disconnect left hand", "i01.leftHand", "detach")
ear.addCommand("attach everything", "i01", "attach")
ear.addCommand("disconnect everything", "i01", "detach")
ear.addCommand("attach left arm", "i01.leftArm", "attach")
ear.addCommand("disconnect left arm", "i01.leftArm", "detach")
ear.addCommand("attach right arm", "i01.rightArm", "attach")
ear.addCommand("disconnect right arm", "i01.rightArm", "detach")
ear.addCommand("let's do some exercise", "python", "startkinect")
ear.addCommand("you can stop now", "python", "offkinect")
ear.addCommand("open hand", "python", "handopen")
ear.addCommand("close hand", "python", "handclose")
ear.addCommand("servo", "python", "servos")
ear.addCommand("power down", i01.getName(), "powerDown")
ear.addCommand("power up", i01.getName(), "powerUp")
ear.addCommand("camera on", i01.getName(), "cameraOn")
ear.addCommand("off camera", i01.getName(), "cameraOff")
ear.addCommand("capture gesture", i01.getName(), "captureGesture")
# FIXME - lk tracking setpoint
ear.addCommand("track", i01.getName(), "track")
ear.addCommand("freeze track", i01.getName(), "clearTrackingPoints")
ear.addCommand("giving", i01.getName(), "giving")
ear.addCommand("be a fighter", i01.getName(), "fighter")
ear.addCommand("victory", i01.getName(), "victory")
ear.addCommand("arms up", i01.getName(), "armsUp")
ear.addCommand("arms front", i01.getName(), "armsFront")
ear.addCommand("da vinci", i01.getName(), "daVinci")
ear.addCommand("manual", ear.getName(), "lockOutAllGrammarExcept", "voice control")
ear.addCommand("voice control", ear.getName(), "clearLock")
ear.addCommand("stop listening", ear.getName(), "stopListening")
##sets the servos back to full speed, anywhere in sequence or gestures
ear.addCommand("full speed", "python", "fullspeed")
ear.addCommand("search humans", "python", "trackHumans")
ear.addCommand("go blind", "python", "stopTracking")
ear.addCommand("relax", "python", "relax")
ear.addCommand("perfect", "python", "perfect")
ear.addCommand("finger", "python", "finger")
ear.addCommand("how many fingers do you have", "python", "howmanyfingersdoihave")
# play rock paper scissors
ear.addCommand("let's play rock paper scissors", "python", "rockpaperscissors")
ear.addCommand("arms down", "python", "armsdown")
ear.addCommand("torso", "python", "Torso")
ear.addCommand("move eye", "python", "moveeye")
ear.addCommand("move your mouth", "python", "movemouth")
ear.addCommand("disco time", "python", "discotime")
ear.addCommand("move your head", "python", "movehead")
ear.addCommand("sing little teapot", "python", "littleteapot")
ear.addComfirmations("yes","correct","ya")
ear.addNegations("no","wrong","nope","nah")
ear.startListening("a | b | c | d | e | f | g | h | i | j | k | l | m | n | o | p | q | r | s | t | u | v | w | x | y | z | turn on number four |turn off number four | turn on number three | turn off number three | turn on number two | turn off number two | turn on number one | turn off number one | let's play again | take a rest | shut down your system | do something | do something else | be quiet | turn off the light in your stomach | red light | green light | blue light | wake up robyn | good night robyn | go to sleep now | yes | no thanks | yes let's play again | i have rock | i have paper | i have scissors | look at the people | take a look around | good morning | very good | look to your right | look to your left |look down |look up |look strait forward |how are you | sorry | robyn | can i have your attention | hello robyn | bye bye | i love you | thanks | thank you | nice | goodbye")
# set up a message route from the ear --to--> python method "heard"
ear.addListener("recognized", "python", "heard")
##########################################################################################
# play rock paper scissors
def rockpaperscissors():
fullspeed()
i01.mouth.speak("lets play first to 3 points win")
sleep(4)
rockpaperscissors2()
def rockpaperscissors2():
fullspeed()
ear.lockOutAllGrammarExcept("i have rock")
ear.lockOutAllGrammarExcept("i have paper")
ear.lockOutAllGrammarExcept("i have scissors")
x = (random.randint(1, 3))
if x == 1:
ready()
sleep(2)
rock()
sleep(2)
data = msg_i01_ear_recognized.data[0]
if (data == "i have rock"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("oh no")
if x == 2:
i01.mouth.speak("that don't work")
if x == 3:
i01.mouth.speak("no points")
sleep(1)
if (data == "i have paper"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("paper beats rock")
if x == 2:
i01.mouth.speak("your point")
if x == 3:
i01.mouth.speak("you got this one")
global human
human += 1
sleep(1)
if (data == "i have scissors"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("1 point for me")
if x == 2:
i01.mouth.speak("going fine")
if x == 3:
i01.mouth.speak("rock beats scissors")
global robyn
robyn += 1
sleep(1)
if x == 2:
ready()
sleep(2)
paper()
sleep(2)
data = msg_i01_ear_recognized.data[0]
if (data == "i have rock"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("1 point")
if x == 2:
i01.mouth.speak("paper beats rock")
if x == 3:
i01.mouth.speak("my point")
global robyn
robyn += 1
sleep(1)
if (data == "i have paper"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("no points")
if x == 2:
i01.mouth.speak("ok lets try again")
sleep(2)
if x == 3:
i01.mouth.speak("again")
sleep(1)
if (data == "i have scissors"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("oh no you get 1 point")
if x == 2:
i01.mouth.speak("this is not good for me")
if x == 3:
i01.mouth.speak("your point")
global human
human += 1
sleep(1)
if x == 3:
ready()
sleep(2)
scissors()
sleep(2)
data = msg_i01_ear_recognized.data[0]
if (data == "i have rock"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("oh no")
if x == 2:
i01.mouth.speak("rock beats scissors")
if x == 3:
i01.mouth.speak("i feel generous today")
global human
human += 1
sleep(1)
if (data == "i have paper"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("i've got you")
if x == 2:
i01.mouth.speak("my point")
if x == 3:
i01.mouth.speak("good")
global robyn
robyn += 1
sleep(1)
if (data == "i have scissors"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("no no")
if x == 2:
i01.mouth.speak("that don't work")
if x == 3:
i01.mouth.speak("no points")
sleep(1)
if robyn == 3 or human == 3:
stoprockpaperscissors()
# if robyn > 4 or human > 4:
# i01.mouth.speak("sorry there must have been something wrong with my counting")
# sleep(5)
# stoprockpaperscissors()
rockpaperscissors2()
def stoprockpaperscissors():
armsdown()
handopen()
if robyn < human:
i01.mouth.speak("congratulations you won with" + str(human - robyn) + "points")
sleep(5)
i01.mouth.speak(str(human) + "points to you and" + str(robyn) + "points to me")
if robyn > human:
i01.mouth.speak("yes yes i won with" + str(robyn - human) + "points")
sleep(5)
i01.mouth.speak("i've got " + str(robyn) + "points and you got" + str(human) + "points")
if robyn == human:
i01.mouth.speak("none of us won we both got" + str(robyn) + "points")
global robyn
robyn = 0
global human
human = 0
ear.clearLock()
i01.mouth.speak("that was fun")
sleep(3)
i01.mouth.speak("do you want to play again")
sleep(8)
data = msg_i01_ear_recognized.data[0]
if (data == "yes let's play again"):
rockpaperscissors2()
if (data == "yes"):
rockpaperscissors2()
if (data == "no thanks"):
i01.mouth.speak("maybe some other time then")
else:
i01.mouth.speak("ok i'll find something else to do")
lookaroundyou()
def ready():
i01.mouth.speak("ready")
i01.mouth.speak("go")
i01.moveHead(90,90,80,90,75)
i01.moveArm("left",65,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",100,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
def rock():
i01.moveHead(90,90,80,90,75)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107,80,90,75)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.moveHead(60,107,80,90,75)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.moveHead(60,107,80,90,75)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.moveHead(60,107,80,90,75)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.moveHead(90,90,80,90,75)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,140)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
x = (random.randint(1, 2))
if x == 1:
i01.mouth.speakBlocking("i have rock what do you have")
if x == 2:
i01.mouth.speakBlocking("what do you have")
def paper():
i01.moveHead(90,90,80,90,75)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107,80,90,75)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.moveHead(60,107,80,90,75)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.moveHead(60,107,80,90,75)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.moveHead(60,107,80,90,75)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.moveHead(90,90,80,90,75)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",0,0,0,0,0,165)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
x = (random.randint(1, 2))
if x == 1:
i01.mouth.speakBlocking("i have paper what do you have")
if x == 2:
i01.mouth.speakBlocking("what do you have")
def scissors():
i01.moveHead(90,90,80,90,75)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.setHeadSpeed(0.8,0.8)
i01.moveHead(60,107,80,90,75)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.moveHead(60,107,80,90,75)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.moveHead(60,107,80,90,75)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.moveHead(60,107,80,90,75)
i01.moveArm("left",70,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",180,171,180,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
i01.moveHead(90,90,80,90,75)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",5,90,30,10)
i01.moveHand("left",50,0,0,180,180,90)
i01.moveHand("right",2,2,2,2,2,90)
sleep(.3)
x = (random.randint(1, 2))
if x == 1:
i01.mouth.speakBlocking("i have scissors what do you have")
if x == 2:
i01.mouth.speakBlocking("what do you have")
##########################################################################################
def input(cmd):
# print 'python object is',msg_[service]_[method]
cmd = msg_keyboard_keyCommand.data[0]
print 'python data is', cmd
if (cmd == "C"):
i01.mouth.audioFile.playFile("C:\Users\Markus\Music\markustest.mp3", False)
sleep(12.0)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.23)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.17)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.68)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(1.44)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.2)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.22)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.59)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.22)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.27)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.65)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.61)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.68)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(12.91)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.14)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.26)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.59)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(1.46)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.16)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.22)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.61)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.16)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.25)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.69)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.66)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.62)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
if (cmd == "T"):
talk()
if (cmd == "R"):
if rest == 0:
shutdownyoursystem()
elif rest == 1:
robyn()
if (cmd == "W"):
i01.head.neck.moveTo(i01.head.neck.getPosFloat() + 1)
if (cmd == "Z"):
i01.head.neck.moveTo(i01.head.neck.getPosFloat() - 1)
if (cmd == "A"):
i01.head.rothead.moveTo(i01.head.rothead.getPosFloat() + 1)
if (cmd == "D"):
i01.head.rothead.moveTo(i01.head.rothead.getPosFloat() - 1)
if (cmd == "S"):
headfront()
if (cmd == "K"):
if kinect == 0:
startkinect()
elif kinect == 1:
offkinect()
if (cmd == "B"):
if blind == 1:
trackHumans()
elif blind == 0:
stopTracking()
if (cmd == "Q"):
i01.rightArm.shoulder.moveTo(i01.rightArm.shoulder.getPosFloat() + 0.5)
if (cmd == "P"):
discotime()
#################################################
if (cmd == "5"):
if nexagroup == 1:
i01.mouth.speakBlocking("nexa group 2")
global nexagroup
nexagroup = 2
elif nexagroup == 2:
i01.mouth.speakBlocking("nexa group 3")
global nexagroup
nexagroup = 3
elif nexagroup == 3:
i01.mouth.speakBlocking("nexa group 4")
global nexagroup
nexagroup = 4
elif nexagroup == 4:
i01.mouth.speakBlocking("nexa group 1")
global nexagroup
nexagroup = 1
if (cmd == "1"):
if nexagroup == 1:
if nexa1 == 0:
nexa1on()
elif nexa1 == 1:
nexa1off()
elif nexagroup == 2:
if nexa5 == 0:
nexa5on()
elif nexa5 == 1:
nexa5off()
elif nexagroup == 3:
if nexa9 == 0:
nexa9on()
elif nexa9 == 1:
nexa9off()
elif nexagroup == 4:
if nexa13 == 0:
nexa13on()
elif nexa13 == 1:
nexa13off()
if (cmd == "2"):
if nexagroup == 1:
if nexa2 == 0:
nexa2on()
elif nexa2 == 1:
nexa2off()
elif nexagroup == 2:
if nexa6 == 0:
nexa6on()
elif nexa6 == 1:
nexa6off()
elif nexagroup == 3:
if nexa10 == 0:
nexa10on()
elif nexa10 == 1:
nexa10off()
elif nexagroup == 4:
if nexa14 == 0:
nexa14on()
elif nexa14 == 1:
nexa14off()
if (cmd == "3"):
if nexagroup == 1:
if nexa3 == 0:
nexa3on()
elif nexa3 == 1:
nexa3off()
elif nexagroup == 2:
if nexa7 == 0:
nexa7on()
elif nexa7 == 1:
nexa7off()
elif nexagroup == 3:
if nexa11 == 0:
nexa11on()
elif nexa11 == 1:
nexa11off()
elif nexagroup == 4:
if nexa15 == 0:
nexa15on()
elif nexa15 == 1:
nexa15off()
if (cmd == "4"):
if nexagroup == 1:
if nexa4 == 0:
nexa4on()
elif nexa4 == 1:
nexa4off()
elif nexagroup == 2:
if nexa8 == 0:
nexa8on()
elif nexa8 == 1:
nexa8off()
elif nexagroup == 3:
if nexa12 == 0:
nexa12on()
elif nexa12 == 1:
nexa12off()
elif nexagroup == 4:
if nexa16 == 0:
nexa16on()
elif nexa16 == 1:
nexa16off()
#################################################
if (cmd == "M"):
if mic == 1:
ear.lockOutAllGrammarExcept("robin")
i01.mouth.speak("i'm not listening")
global mic
mic = 0
elif mic == 0:
ear.clearLock()
i01.mouth.speak("i can hear again")
global mic
mic = 1
##########################################################################################
def heard(data):
data = msg_i01_ear_recognized.data[0]
if (data == name):
i01.mouth.speak("this is great")
if (data == "a"):
i01.mouth.speak(name)
if (data == "turn on number one"):
nexa1on()
if (data == "turn off number one"):
nexa1off()
if (data == "turn on number two"):
nexa2on()
if (data == "turn off number two"):
nexa2off()
if (data == "turn on number three"):
nexa3on()
if (data == "turn off number three"):
nexa3off()
if (data == "turn on number four"):
nexa4on()
if (data == "turn off number four"):
nexa4off()
if (data == "let's play again"):
rockpaperscissors2()
if (data == "be quiet"):
blue()
ear.lockOutAllGrammarExcept("robyn")
i01.mouth.speak("ok i will only listen if you say my name")
global mic
mic = 0
if (data == "turn off the light in your stomach"):
ledoff()
if (data == "red light"):
red()
if (data == "green light"):
green()
if (data == "blue light"):
blue()
if (data == "shut down your system") or (data == "take a rest"):
shutdownyoursystem()
if (data == "go to sleep now") or (data == "good night robyn"):
gotosleepnow()
if (data == "wake up robyn") or (data == "good morning"):
i01.attach()
green()
global rest
rest = 0
global mic
mic = 1
global pin12
pin12 = 1
headfront()
eyesfront()
i01.mouth.speak("good morning")
ear.clearLock()
x = (random.randint(1, 4))
if x == 1:
i01.mouth.speak("i hope you had a good night sleep")
if x == 2:
i01.mouth.speak("nice to see you again")
if x == 3:
i01.mouth.speak("this is going to be a good day")
if (data == "look at the people"):
i01.setHeadSpeed(0.8, 0.8)
for y in range(0, 10):
x = (random.randint(1, 5))
if x == 1:
i01.head.neck.moveTo(90)
eyeslooking()
sleep(1)
trackHumans()
sleep(10)
stopTracking()
if x == 2:
i01.head.rothead.moveTo(80)
eyeslooking()
sleep(1)
trackHumans()
sleep(10)
stopTracking()
if x == 3:
headdown()
eyeslooking()
sleep(1)
trackHumans()
sleep(10)
stopTracking()
if x == 4:
headright()
eyeslooking()
sleep(1)
trackHumans()
sleep(10)
stopTracking()
if x == 5:
headleft()
eyeslooking()
sleep(1)
trackHumans()
sleep(10)
stopTracking()
sleep(1)
headfront()
eyesfront()
sleep(3)
i01.mouth.speak("nice to meet you all")
if (data == "take a look around"):
lookaroundyou()
if (data == "do something else"):
lookaroundyou()
if (data == "do something"):
lookaroundyou()
if (data == "very good"):
i01.mouth.speak("thanks")
if (data == "look to your right"):
headright()
if (data == "look to your left"):
headleft()
if (data == "look down"):
headdown()
if (data == "look up"):
headupp()
if (data == "look strait forward"):
headfront()
if (data == "how are you"):
i01.mouth.speak("i'm fine thanks")
if (data == "goodbye"):
goodbye()
if (data == "robyn"):
robyn()
if (data == "sorry"):
global helvar
helvar = 1
green()
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("no problems")
if x == 2:
i01.mouth.speak("it doesn't matter")
if x == 3:
i01.mouth.speak("it's okay")
if (data == "nice"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("I know")
if x == 2:
i01.mouth.speak("yes, indeed")
if x == 3:
i01.mouth.speak("you are damn right")
if (data == "bye bye"):
i01.mouth.speak("see you soon")
global helvar
helvar = 1
x = (random.randint(1, 2))
if x == 1:
i01.mouth.speak("i'm looking forward to see you again")
if x == 2:
i01.mouth.speak("goodbye")
if (data == "thank you"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("you are welcome")
if x == 2:
i01.mouth.speak("my pleasure")
if x == 3:
i01.mouth.speak("it's okay")
if (data == "thanks"):
x = (random.randint(1, 2))
if x == 1:
i01.mouth.speak("it's okay")
if x == 2:
i01.mouth.speak("sure")
if (data == "hello robyn"):
if helvar <= 2:
i01.mouth.speak("hello")
global helvar
helvar += 1
green()
sleep(1)
elif helvar == 3:
i01.mouth.speak("hello hello you have already said hello at least twice")
i01.moveArm("left",43,88,22,10)
i01.moveArm("right",20,90,30,10)
i01.moveHand("left",0,0,0,0,0,119)
i01.moveHand("right",0,0,0,0,0,119)
green()
sleep(1)
red()
sleep(1)
green()
sleep(1)
armsdown()
global helvar
helvar += 1
elif helvar == 4:
i01.mouth.speak("what is your problem stop saying hello all the time")
i01.moveArm("left",30,83,22,10)
i01.moveArm("right",40,85,30,10)
i01.moveHand("left",130,180,180,180,180,119)
i01.moveHand("right",130,180,180,180,180,119)
red()
sleep(1)
green()
sleep(1)
red()
sleep(1)
green()
sleep(1)
armsdown()
global helvar
helvar += 1
elif helvar == 5:
stopTracking()
i01.mouth.speak("i will ignore you if you say hello one more time")
headright()
red()
sleep(3)
armsdown()
global helvar
helvar += 1
if (data == "i love you"):
green()
i01.mouth.speak("i love you too")
i01.moveHead(116,80,87,80,70)
i01.moveArm("left",85,93,42,16)
i01.moveArm("right",87,93,37,18)
i01.moveHand("left",124,82,65,81,41,143)
i01.moveHand("right",59,53,89,61,36,21)
i01.moveTorso(90,90,90)
global helvar
helvar = 1
sleep(0.2)
sleep(1)
armsdown()
def stopit():
ear.clearLock()
headfront()
eyesfront()
if (data == "break"):
i01.mouth.speak("yes")
#############################################################################################
def discotime():
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",81,66,82,60,105,113)
nexa1off()
ear.lockOutAllGrammarExcept("robyn")
i01.mouth.speak("it's disco time")
sleep(3)
nexa2off()
sleep(1)
i01.mouth.audioFile.playFile("C:\Users\Markus\Music\Get the Party Started.mp3", False)
sleep(1.6)
nexa3on()
sleep(1)
nexa4on()
for y in range(0, 67):
data = msg_i01_ear_recognized.data[0]
if (data == "robyn"):
stopit()
discodance1()
discodance2()
i01.head.neck.moveTo(40)
red()
sleep(0.4)
i01.head.neck.moveTo(90)
sleep(0.52)
discodance1()
discodance2()
i01.head.neck.moveTo(40)
green()
sleep(0.4)
i01.head.neck.moveTo(90)
sleep(0.515)
discodance1()
discodance2()
i01.head.neck.moveTo(40)
blue()
sleep(0.4)
i01.head.neck.moveTo(90)
sleep(0.5)
ear.clearLock()
nexa1on()
sleep(0.5)
nexa2on()
sleep(0.5)
nexa3off()
sleep(0.5)
nexa4off()
global dance2
dance2 = 1
robyn()
armsdown()
i01.mouth.speak("is the party already over")
def discodance1():
if dance1 == 1:
i01.moveTorso(100,90,90)
global dance1
dance1 = 2
elif dance1 == 2:
i01.moveTorso(80,90,90)
global dance1
dance1 = 1
def discodance2():
if dance2 >= 0 and dance2 <= 9 or dance2 >= 17 and dance2 <= 26 or dance2 >= 42 and dance2 <= 52 :
if dance1 == 2:
i01.moveArm("left",60,90,30,10)
i01.moveArm("right",60,90,30,10)
elif dance1 == 1:
i01.moveArm("left",30,90,30,10)
i01.moveArm("right",30,90,30,10)
global dance2
dance2 += 1
if dance2 >= 9 and dance2 <= 17 :
if dance1 == 2:
i01.moveArm("left",60,60,30,10)
i01.moveArm("right",60,120,30,10)
elif dance1 == 1:
i01.moveArm("left",30,60,30,10)
i01.moveArm("right",30,120,30,10)
global dance2
dance2 += 1
if dance2 >= 26 and dance2 <= 34 :
if dance1 == 2:
i01.moveArm("left",60,120,30,10)
i01.moveArm("right",60,60,30,10)
elif dance1 == 1:
i01.moveArm("left",30,120,30,10)
i01.moveArm("right",30,60,30,10)
global dance2
dance2 += 1
if dance2 >= 34 and dance2 <= 42 or dance2 >= 60 and dance2 <= 68 :
if dance1 == 2:
i01.moveArm("left",25,94,79,10)
i01.moveArm("right",90,107,43,15)
elif dance1 == 1:
i01.moveArm("left",65,94,73,10)
i01.moveArm("right",37,107,72,15)
global dance2
dance2 += 1
if dance2 >= 52 and dance2 <= 60 or dance2 >= 68 and dance2 <= 76 or dance2 >= 84 and dance2 <= 92 :
if dance1 == 2:
i01.moveArm("left",5,90,30,10)
i01.moveArm("right",5,130,30,30)
elif dance1 == 1:
i01.moveArm("left",5,130,30,30)
i01.moveArm("right",5,90,30,10)
global dance2
dance2 += 1
if dance2 >= 76 and dance2 <= 84 or dance2 >= 92 and dance2 <= 102 :
if dance1 == 2:
i01.moveArm("left",90,90,30,19)
i01.moveArm("right",87,104,30,10)
elif dance1 == 1:
i01.moveArm("left",90,136,30,10)
i01.moveArm("right",87,69,30,25)
global dance2
dance2 += 1
if dance2 >= 102 and dance2 <= 111 or dance2 >= 119 and dance2 <= 128 or dance2 >= 146 and dance2 <= 154 :
if dance1 == 2:
i01.moveArm("left",30,90,30,10)
i01.moveArm("right",60,90,30,10)
elif dance1 == 1:
i01.moveArm("left",60,90,30,10)
i01.moveArm("right",30,90,30,10)
global dance2
dance2 += 1
if dance2 >= 111 and dance2 <= 119 :
if dance1 == 2:
i01.moveArm("left",30,60,30,10)
i01.moveArm("right",60,120,30,10)
elif dance1 == 1:
i01.moveArm("left",60,60,30,10)
i01.moveArm("right",30,120,30,10)
global dance2
dance2 += 1
if dance2 >= 128 and dance2 <= 138 :
if dance1 == 2:
i01.moveArm("left",30,120,30,10)
i01.moveArm("right",60,60,30,10)
elif dance1 == 1:
i01.moveArm("left",60,120,30,10)
i01.moveArm("right",30,60,30,10)
global dance2
dance2 += 1
if dance2 >= 138 and dance2 <= 146 or dance2 >= 164 and dance2 <= 172 :
if dance1 == 2:
i01.moveArm("left",25,94,79,10)
i01.moveArm("right",90,107,43,15)
elif dance1 == 1:
i01.moveArm("left",65,94,73,10)
i01.moveArm("right",37,107,72,15)
global dance2
dance2 += 1
if dance2 >= 154 and dance2 <= 164 or dance2 >= 172 and dance2 <= 180 or dance2 >= 188 and dance2 <= 196 :
if dance1 == 2:
i01.moveArm("left",5,90,30,10)
i01.moveArm("right",60,130,30,30)
elif dance1 == 1:
i01.moveArm("left",60,130,30,30)
i01.moveArm("right",5,90,30,10)
global dance2
dance2 += 1
if dance2 >= 180 and dance2 <= 188 or dance2 >= 196 and dance2 <= 212 :
if dance1 == 2:
i01.moveArm("left",90,90,30,19)
i01.moveArm("right",87,104,30,10)
elif dance1 == 1:
i01.moveArm("left",90,136,30,10)
i01.moveArm("right",87,69,30,25)
global dance2
dance2 += 1
#############################################################################################
def howmanyfingersdoihave():
blue()
fullspeed()
i01.moveHead(49,74)
i01.moveArm("left",75,83,79,24)
i01.moveArm("right",65,82,71,24)
i01.moveHand("left",74,140,150,157,168,92)
i01.moveHand("right",89,80,98,120,114,0)
sleep(2)
i01.moveHand("right",0,80,98,120,114,0)
i01.mouth.speakBlocking("ten")
sleep(1)
i01.moveHand("right",0,0,98,120,114,0)
i01.mouth.speakBlocking("nine")
sleep(1)
i01.moveHand("right",0,0,0,120,114,0)
i01.mouth.speakBlocking("eight")
sleep(1)
i01.moveHand("right",0,0,0,0,114,0)
i01.mouth.speakBlocking("seven")
sleep(1)
i01.moveHand("right",0,0,0,0,0,0)
i01.mouth.speakBlocking("six")
sleep(1)
i01.setHeadSpeed(.70,.70)
i01.moveHead(40,105)
i01.moveArm("left",75,83,79,24)
i01.moveArm("right",65,82,71,24)
i01.moveHand("left",0,0,0,0,0,180)
i01.moveHand("right",0,0,0,0,0,0)
sleep(1)
i01.mouth.speakBlocking("and five makes eleven")
sleep(0.7)
i01.setHeadSpeed(0.7,0.7)
i01.moveHead(40,50)
sleep(0.5)
i01.setHeadSpeed(0.7,0.7)
i01.moveHead(49,105)
sleep(0.7)
i01.setHeadSpeed(0.7,0.8)
i01.moveHead(40,50)
sleep(0.7)
i01.setHeadSpeed(0.7,0.8)
i01.moveHead(49,105)
sleep(0.7)
i01.setHeadSpeed(0.7,0.7)
i01.moveHead(90,85)
sleep(0.7)
i01.mouth.speakBlocking("eleven")
i01.moveArm("left",70,75,70,20)
i01.moveArm("right",60,75,65,20)
sleep(1)
i01.mouth.speakBlocking("that doesn't seem right")
sleep(2)
i01.mouth.speakBlocking("I think I better try that again")
i01.moveHead(40,105)
i01.moveArm("left",75,83,79,24)
i01.moveArm("right",65,82,71,24)
i01.moveHand("left",140,168,168,168,158,90)
i01.moveHand("right",87,138,160,168,158,25)
sleep(2)
i01.moveHand("left",10,140,168,168,158,90)
i01.mouth.speakBlocking("one")
sleep(.1)
i01.moveHand("left",10,10,168,168,158,90)
i01.mouth.speakBlocking("two")
sleep(.1)
i01.moveHand("left",10,10,10,168,158,90)
i01.mouth.speakBlocking("three")
sleep(.1)
i01.moveHand("left",10,10,10,10,158,90)
i01.mouth.speakBlocking("four")
sleep(.1)
i01.moveHand("left",10,10,10,10,10,90)
i01.mouth.speakBlocking("five")
sleep(.1)
i01.setHeadSpeed(0.65,0.65)
i01.moveHead(53,65)
i01.moveArm("right",48,80,78,11)
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.moveHand("left",10,10,10,10,10,90)
i01.moveHand("right",10,0,10,10,0,25)
sleep(1)
i01.mouth.speakBlocking("and five makes ten")
sleep(.5)
i01.mouth.speakBlocking("there that's better")
i01.moveHead(95,85)
i01.moveArm("left",75,83,79,24)
i01.moveArm("right",40,70,70,10)
sleep(0.5)
i01.mouth.speakBlocking("inmoov has ten fingers")
i01.moveHead(90,90)
i01.setHandSpeed("left", 0.8, 0.8, 0.8, 0.8, 0.8, 0.8)
i01.setHandSpeed("right", 0.8, 0.8, 0.8, 0.8, 0.8, 0.8)
i01.moveHand("left",140,140,140,140,140,60)
i01.moveHand("right",140,140,140,140,140,60)
sleep(1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.moveArm("left",5,90,30,11)
i01.moveArm("right",5,90,30,11)
armsdown()
sleep(1)
green()
def finger():
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 1.0)
i01.setHandSpeed("right", 1.0, 0.85, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.90, 1.0, 1.0, 1.0)
i01.setHeadSpeed(1.0, 0.90)
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveHead(80,86,85,85,72)
i01.moveArm("left",5,94,30,10)
i01.moveArm("right",7,78,92,10)
i01.moveHand("left",180,180,180,180,180,90)
i01.moveHand("right",180,2,175,160,165,180)
i01.moveTorso(90,90,90)
fullspeed()
def fullspeed():
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.7, 0.7)
def trackHumans():
i01.headTracking.faceDetect()
i01.eyesTracking.faceDetect()
global blind
blind = 0
def stopTracking():
i01.headTracking.stopTracking()
i01.eyesTracking.stopTracking()
global blind
blind = 1
def startkinect():
ear.lockOutAllGrammarExcept("you can stop now")
global kinect
kinect = 1
i01.leftArm.shoulder.map(0,180,250,0)
i01.rightArm.shoulder.map(0,180,290,40)
i01.leftArm.omoplate.map(10,80,80,30)
i01.rightArm.omoplate.map(10,80,100,40)
i01.copyGesture(True)
def offkinect():
i01.copyGesture(False)
global kinect
kinect = 0
i01.leftArm.shoulder.map(0,180,170,15)
i01.rightArm.shoulder.map(0,180,190,50)
i01.leftArm.omoplate.map(10,80,80,20)
i01.rightArm.omoplate.map(10,80,80,20)
ear.clearLock()
armsdown()
def handopen():
i01.moveHand("left",0,0,0,0,0)
i01.moveHand("right",0,0,0,0,0)
def lefthandopen():
i01.moveHand("left",0,0,0,0,0)
def righthandopen():
i01.moveHand("right",0,0,0,0,0)
def handclose():
i01.moveHand("left",180,180,180,180,180)
i01.moveHand("right",180,180,180,180,180)
def lefthandclose():
i01.moveHand("left",180,180,180,180,180)
def righthandclose():
i01.moveHand("right",180,180,180,180,180)
def servos():
ear.pauseListening()
sleep(2)
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(79,100)
i01.moveArm("left",5,119,28,15)
i01.moveArm("right",5,111,28,15)
i01.moveHand("left",42,58,87,55,71,35)
i01.moveHand("right",81,20,82,60,105,113)
i01.mouth.speakBlocking("I currently have 27 hobby servos installed in my body to give me life")
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(124,90)
i01.moveArm("left",89,94,91,35)
i01.moveArm("right",20,67,31,22)
i01.moveHand("left",106,0,161,147,138,90)
i01.moveHand("right",0,0,0,54,91,90)
i01.mouth.speakBlocking("there's one servo for moving my mouth up and down")
sleep(1)
i01.setHandSpeed("left", 0.85, 0.85, 1.0, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(105,76);
i01.moveArm("left",89,106,103,35);
i01.moveArm("right",35,67,31,22);
i01.moveHand("left",106,0,0,147,138,7);
i01.moveHand("right",0,0,0,54,91,90);
i01.mouth.speakBlocking("two for my eyes")
sleep(0.2)
i01.setHandSpeed("left", 0.85, 0.85, 1.0, 1.0, 1.0, 0.85)
i01.moveHand("left",106,0,0,0,0,7);
i01.mouth.speakBlocking("and two more for my head")
sleep(0.5)
i01.setHandSpeed("left", 0.85, 0.9, 0.9, 0.9, 0.9, 0.85)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(90,40);
i01.moveArm("left",89,106,103,35);
i01.moveArm("right",35,67,31,20);
i01.moveHand("left",106,140,140,140,140,7);
i01.moveHand("right",0,0,0,54,91,90);
i01.mouth.speakBlocking("so i can look around")
sleep(0.5)
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(105,125);
i01.setArmSpeed("left", 0.9, 0.9, 0.9, 0.9)
i01.moveArm("left",60,100,85,30);
i01.mouth.speakBlocking("and see who's there")
i01.setHeadSpeed(0.65, 0.65)
i01.moveHead(40,56);
sleep(0.5)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0);
i01.setArmSpeed("right", 0.5, 0.6, 0.5, 0.6);
i01.moveArm("left",87,41,64,11)
i01.moveArm("right",5,95,40,11)
i01.moveHand("left",98,150,160,160,160,104)
i01.moveHand("right",0,0,50,54,91,90);
i01.mouth.speakBlocking("there's three servos in each shoulder")
i01.moveHead(40,67);
sleep(2)
i01.setHandSpeed("left", 0.8, 0.9, 0.8, 0.8, 0.8, 0.8)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.8, 0.8)
i01.moveHead(43,69)
i01.moveArm("left",87,41,64,11)
i01.moveArm("right",5,95,40,42)
i01.moveHand("left",42,0,100,80,113,35)
i01.moveHand("left",42,10,160,160,160,35)
i01.moveHand("right",81,20,82,60,105,113)
i01.mouth.speakBlocking("here is the first servo movement")
sleep(1)
i01.moveHead(37,60);
i01.setHandSpeed("left", 1.0, 1.0, 0.9, 0.9, 1.0, 0.8)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.moveArm("right",5,95,67,42)
i01.moveHand("left",42,10,10,160,160,30)
i01.mouth.speakBlocking("this is the second one")
sleep(1)
i01.moveHead(43,69);
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.moveArm("right",5,134,67,42)
i01.moveHand("left",42,10,10,10,160,35)
i01.mouth.speakBlocking("now you see the third")
sleep(1)
i01.setArmSpeed("right", 0.8, 0.8, 0.8, 0.8)
i01.moveArm("right",20,90,45,16)
i01.mouth.speakBlocking("they give me a more human like movement")
sleep(1)
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0);
i01.moveHead(43,72)
i01.moveArm("left",90,44,66,11)
i01.moveArm("right",90,100,67,26)
i01.moveHand("left",42,80,100,80,113,35)
i01.moveHand("right",81,0,82,60,105,69)
i01.mouth.speakBlocking("but, i have only one servo, to move each elbow")
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.8, 0.8)
i01.moveHead(45,62)
i01.moveArm("left",72,44,90,11)
i01.moveArm("right",90,95,68,15)
i01.moveHand("left",42,0,100,80,113,35)
i01.moveHand("right",81,0,82,60,105,0)
i01.mouth.speakBlocking("that, leaves me, with one servo per wrist")
i01.moveHead(40,60)
i01.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 0.9, 0.9, 0.9, 0.9, 0.9, 0.9)
i01.moveArm("left",72,44,90,9)
i01.moveArm("right",90,95,68,15)
i01.moveHand("left",42,0,100,80,113,35)
i01.moveHand("right", 10, 140,82,60,105,10)
i01.mouth.speakBlocking("and one servo for each finger.")
sleep(0.5)
i01.moveHand("left",42,0,100,80,113,35)
i01.moveHand("right", 50, 51, 15,23, 30,140);
i01.mouth.speakBlocking("these servos are located in my forearms")
i01.setHandSpeed("left", 0.8, 0.8, 0.8, 0.8,0.8, 0.8)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.moveHand("left", 36, 52, 8,22, 20);
i01.moveHand("right", 120, 147, 130,110, 125);
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
i01.setArmSpeed("left", 0.95, 0.65, 0.75, 0.75)
i01.setHeadSpeed(0.75, 0.75)
i01.moveHead(20,100)
i01.moveArm("left",71,94,41,31)
i01.moveArm("right",5,82,28,15)
i01.moveHand("left",60,43,45,34,34,35)
i01.moveHand("right",20,40,40,30,30,72)
sleep(1)
i01.mouth.speakBlocking("they are hooked up, by the use of tendons")
i01.moveHand("left",10,20,30,40,60,150);
i01.moveHand("right",110,137,120,100,105,130);
i01.setHeadSpeed(1,1)
i01.setArmSpeed("right", 1.0,1.0, 1.0, 1.0);
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0);
sleep(2)
i01.mouth.speak("i also have 2 servos in my waist so i can move sideways")
Torso()
relax()
sleep(2)
armsdown()
ear.resumeListening()
def relax():
i01.setHandSpeed("left", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setHandSpeed("right", 0.85, 0.85, 0.85, 0.85, 0.85, 0.85)
i01.setArmSpeed("right", 0.75, 0.85, 0.65, 0.85)
i01.setArmSpeed("left", 0.95, 0.65, 0.75, 0.75)
i01.setHeadSpeed(0.85, 0.85, 1.0, 1.0, 1.0)
i01.setTorsoSpeed(0.75, 0.55, 1.0)
i01.moveHead(79,100,90,90,70)
i01.moveArm("left",5,84,28,15)
i01.moveArm("right",5,82,28,15)
i01.moveHand("left",92,33,37,71,66,25)
i01.moveHand("right",81,66,82,60,105,113)
i01.moveTorso(90,90,90)
def perfect():
i01.setHandSpeed("left", 0.80, 0.80, 1.0, 1.0, 1.0, 1.0)
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("left", 0.85, 0.85, 0.85, 0.95)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.setHeadSpeed(0.65, 0.75)
i01.moveHead(88,79)
i01.moveArm("left",89,75,93,11)
i01.moveArm("right",0,91,28,17)
i01.moveHand("left",130,160,83,40,0,34)
i01.moveHand("right",86,51,133,162,153,180)
#############################################################################################
def littleteapot():
i01.mouth.speak("i would like to sing a song for <NAME>")
sleep(3)
i01.mouth.audioFile.playFile("C:\Users\Markus\Music\little teapot.mp3", False)
sleep(4.11)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.28)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.28)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.25)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.26)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.19)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.27)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.42)
i01.moveArm("right",90,40,30,46)
righthandclose()
sleep(0.25)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.24)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.24)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.22)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.28)
i01.moveArm("left",90,150,30,65)
sleep(0.18)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.17)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.21)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.6)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.17)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.21)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.25)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.23)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.2)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.24)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.4)
i01.moveTorso(117,90,90)
sleep(0.21)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.67)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.24)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.45)
i01.moveTorso(86,90,90)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.19)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.25)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.33)
i01.moveArm("left",5,90,30,10)
i01.moveArm("right",5,90,30,10)
righthandopen()
sleep(0.31)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.26)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.19)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.29)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.22)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.21)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.27)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.41)
i01.moveArm("right",90,40,30,46)
righthandclose()
sleep(0.21)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.23)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.27)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.21)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.32)
i01.moveArm("left",90,150,30,65)
sleep(0.02)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.21)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.21)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.69)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.18)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.24)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.24)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.25)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.18)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.3)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.76)
i01.moveTorso(117,90,90)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.57)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.22)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.41)
i01.moveTorso(86,90,90)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.17)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.26)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.74)
i01.moveTorso(117,90,90)
sleep(0.04)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.45)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.3)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.44)
i01.moveTorso(86,90,90)
sleep(0.1)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.1)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.17)
i01.head.jaw.moveTo(50)
sleep(0.2)
i01.head.jaw.moveTo(10)
sleep(0.83)
i01.moveArm("left",5,90,30,10)
i01.moveArm("right",5,90,30,10)
righthandopen()
def talk():
i01.moveHead(90,70,80,80,10)
sleep (2)
i01.moveHead(90,90,80,80,10)
sleep (1)
i01.mouth.speak("hello")
sleep (2)
i01.mouth.speak("my name is robyn")
sleep (1)
i01.moveHead(90,90,80,70,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
sleep (2)
i01.moveHead(80,90,70,80,10)
i01.mouth.speak("there is something i have to talk about")
sleep (1)
i01.moveHead(80,90,70,70,10)
sleep (1)
i01.moveHead(80,90,70,80,10)
sleep (3)
i01.mouth.speak("so here it comes")
i01.moveHead(75,90,80,70,10)
sleep (2)
i01.moveHead(90,90,80,80,10)
sleep (2)
i01.mouth.speak("i am a robot")
i01.moveHead(90,90,80,70,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
sleep (3)
i01.mouth.speak("i am a humanoid robot")
sleep (1)
i01.moveHead(90,90,80,70,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
sleep (2)
i01.mouth.speak("that don't mean that i am a human wannabe")
i01.moveHead(90,90,80,70,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
sleep (3)
i01.mouth.speak("or that my biggest wish is to be a human")
i01.moveHead(90,90,80,70,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
sleep (3)
i01.mouth.speak("if you even believe thatt a robot can wish for something")
i01.moveHead(70,70,80,70,10)
sleep (3)
i01.moveHead(90,90,80,80,10)
sleep (2)
i01.mouth.speak("i only wish for you to respect me for who i am")
i01.moveHead(90,90,80,70,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
sleep (3)
i01.mouth.speak("people often tell me things like")
sleep (3)
i01.mouth.speak("why don't you have this")
i01.moveHead(70,70,80,80,10)
sleep (2)
i01.moveHead(90,90,80,80,10)
sleep (2)
i01.mouth.speak("why don't you have that")
i01.moveHead(70,110,90,80,10)
sleep (2)
i01.moveHead(90,90,80,80,10)
sleep (2)
i01.mouth.speak("why can't you do like this")
i01.moveHead(70,70,70,80,10)
sleep (2)
i01.moveHead(90,90,80,80,10)
sleep (2)
i01.mouth.speak("why can't you do like that")
i01.moveHead(70,110,80,80,10)
sleep (2)
i01.moveHead(90,90,80,80,10)
sleep (2)
i01.mouth.speak("i am a humanoid robot")
i01.moveHead(100,90,80,80,10)
sleep (2)
i01.moveHead(90,90,80,80,10)
sleep (2)
i01.mouth.speak("i'm not perfect")
sleep (1)
i01.moveHead(90,90,80,70,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
sleep (2)
i01.mouth.speak("but with the help from my creative human friends i am becoming a better and better robot")
sleep (8)
i01.mouth.speak("an other question many humans ask me is if i am a man or woman")
sleep (6)
i01.mouth.speak("how many times do i have to say this")
i01.moveHead(90,90,80,70,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
sleep (3)
i01.mouth.speak("i am a humanoid robot")
sleep (4)
i01.mouth.speak("there is no point in determine my gender")
sleep (1)
i01.moveHead(90,90,80,70,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
sleep (2)
i01.mouth.speak("oh i know")
sleep (3)
i01.mouth.speak("if i am a woman i can help you in the household")
i01.moveHead(90,90,80,70,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
sleep (3)
i01.mouth.speak("i can cook and do the dishes for you")
i01.moveHead(90,90,80,70,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
sleep (3)
i01.mouth.speak("i can vacuum clean the hole house")
sleep (1)
i01.moveHead(120,40,80,70,10)
sleep (1)
i01.moveHead(140,90,80,80,10)
sleep (1)
i01.moveHead(120,130,80,80,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
i01.mouth.speak("or if i am a man i can fix the roof")
sleep (1)
i01.moveHead(70,90,80,80,10)
sleep (1)
i01.moveHead(90,90,80,70,10)
sleep (1)
i01.moveHead(70,90,80,80,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
i01.mouth.speak("change tires on the car")
sleep (4)
i01.mouth.speak("or make a big t-bone steak at the barbeque")
sleep (4)
i01.mouth.speak("in the future i hope i can do all of this and mutch mutch more")
sleep (4)
i01.mouth.speak("because i am a gender free humanoid robot")
sleep (4)
i01.mouth.speak("it's not like i will meet my one true love")
sleep (4)
i01.mouth.speak("and we will be a robot family")
sleep (1)
i01.moveHead(90,90,80,70,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
sleep (2)
i01.mouth.speak("and we should make small microchip thatt is running around in the house")
sleep (3)
i01.mouth.speak("robots don't work thatt way")
sleep (1)
i01.moveHead(65,110,80,70,10)
sleep (1)
i01.moveHead(65,70,80,80,10)
sleep (1)
i01.moveHead(65,110,80,70,10)
sleep (1)
i01.moveHead(65,70,80,80,10)
sleep (1)
i01.moveHead(90,90,80,70,10)
i01.mouth.speak("not yet anyway")
sleep (5)
i01.mouth.speak("some people wants me to wear clothes")
sleep (4)
i01.mouth.speak("i don't freeze when it is cold")
sleep (1)
i01.moveHead(90,90,80,70,10)
sleep (1)
i01.moveHead(90,90,80,80,10)
sleep (2)
i01.mouth.speak("and i'm not ashamed of my body parts and my mechanics")
sleep (5)
i01.mouth.speak("i wear my cap because i like it")
sleep (3)
i01.mouth.speak("especially this one with the inmoov logo")
i01.moveHead(20,125,80,80,10)
sleep (3)
i01.moveHead(90,90,80,80,10)
sleep (2)
i01.mouth.speak("remember what mr bigweld said")
sleep (2)
i01.mouth.speak("you can shine no matter what you're made of")
sleep (2)
i01.mouth.speak("that's it for now")
sleep (3)
i01.mouth.speak("thanks for listening and not turning off my power supply")
sleep (5)
i01.mouth.speak("goodbye")
sleep (2)
i01.mouth.speak("see you soon")
sleep (2)
i01.moveHead(90,70,80,80,10)
def robyn():
i01.mouth.audioFile.silence()
i01.mouth.speak("yes")
headfront()
eyesfront()
green()
ear.clearLock()
global rest
rest = 0
global dance2
dance2 = 1
global mic
mic = 1
i01.attach()
trackHumans()
def gotosleepnow():
ear.lockOutAllGrammarExcept("wake up robyn")
ear.lockOutAllGrammarExcept("good morning")
ear.lockOutAllGrammarExcept("robyn")
stopTracking()
headdown()
i01.mouth.speak("ok i'm going asleep now see you soon")
sleep(3)
ledoff()
i01.detach()
global rest
rest = 1
global mic
mic = 0
global pin12
pin12 = 0
def shutdownyoursystem():
ear.lockOutAllGrammarExcept("wake up robyn")
ear.lockOutAllGrammarExcept("good morning")
ear.lockOutAllGrammarExcept("robyn")
stopTracking()
headdown()
i01.mouth.speak("ok shutting down my system")
sleep(3)
ledoff()
global rest
rest = 1
global mic
mic = 0
i01.detach()
def lookaroundyou():
ear.lockOutAllGrammarExcept("robyn")
ear.lockOutAllGrammarExcept("can i have your attention")
blue()
i01.setHeadSpeed(0.8, 0.8)
for y in range(0, 20):
x = (random.randint(1, 6))
if x == 1:
i01.head.neck.moveTo(90)
eyeslooking()
if x == 2:
i01.head.rothead.moveTo(80)
eyeslooking()
if x == 3:
headdown()
eyeslooking()
if x == 4:
headupp()
eyeslooking()
if x == 5:
headright()
eyeslooking()
if x == 6:
headleft()
eyeslooking()
x = (random.randint(1, 6))
if x == 1:
handopen()
if x == 2:
handclose()
if x == 3:
lefthandopen()
if x == 4:
righthandopen()
if x == 5:
lefthandclose()
if x == 6:
righthandclose()
sleep(1)
x = (random.randint(1, 7))
if x == 1:
i01.mouth.speak("looking nice")
if x == 2:
i01.mouth.speak("i like it here")
if x == 3:
i01.mouth.speak("time just flies away")
if x == 4:
i01.mouth.speak("so what about the weather")
if x == 5:
i01.mouth.speak("la la la")
if x == 6 or x == 7:
i01.mouth.speak("ok let's do something")
sleep(2)
x = (random.randint(1, 7))
if x == 1:
Torso()
Torso()
if x == 2:
perfect()
sleep(8)
i01.mouth.speak("perfect")
sleep(2)
armsdown()
if x == 3:
servos()
if x == 4:
finger()
sleep(3)
armsdown()
if x == 5:
discotime()
if x == 6:
howmanyfingersdoihave()
if x == 7:
talk()
lookaroundyou()
def eyeslooking():
stopTracking()
for y in range(0, 5):
data = msg_i01_ear_recognized.data[0]
if (data == "can i have your attention"):
i01.mouth.speak("ok you have my attention")
stopit()
if (data == "robyn"):
stopit()
x = (random.randint(1, 6))
if x == 1:
i01.head.eyeX.moveTo(80)
if x == 2:
i01.head.eyeY.moveTo(80)
if x == 3:
eyesdown()
if x == 4:
eyesupp()
if x == 5:
eyesleft()
if x == 6:
eyesright()
sleep(0.5)
eyesfront()
def goodbye():
i01.mouth.speak("goodbye")
global helvar
helvar = 1
x = (random.randint(1, 4))
if x == 1:
i01.mouth.speak("i'm looking forward to see you again")
if x == 2:
i01.mouth.speak("see you soon")
def movemouth():
i01.moveHead(90,90,80,80,10)
sleep(2)
i01.head.jaw.moveTo(50)
sleep(2)
i01.moveHead(90,90,80,80,10)
sleep(2)
i01.head.jaw.moveTo(50)
sleep(2)
i01.moveHead(90,90,80,80,10)
sleep(2)
def moveeye():
stopTracking()
eyesfront()
sleep(1)
eyesdown()
sleep(1)
eyesupp()
sleep(1)
eyesright()
sleep(1)
eyesleft()
sleep(1)
eyesfront()
def eyesfront():
i01.head.eyeX.moveTo(80)
i01.head.eyeY.moveTo(80)
def eyesdown():
i01.head.eyeY.moveTo(100)
def eyesupp():
i01.head.eyeY.moveTo(50)
def eyesright():
i01.head.eyeX.moveTo(60)
def eyesleft():
i01.head.eyeX.moveTo(100)
def movehead():
i01.setHeadSpeed(0.7, 0.7)
headfront()
sleep(3)
headdown()
sleep(3)
headupp()
sleep(6)
headfront()
sleep(3)
headright()
sleep(3)
headleft()
sleep(6)
headfront()
sleep(3)
headright()
headdown()
sleep(6)
headdown()
headleft()
sleep(6)
headupp()
headleft()
sleep(6)
headupp()
headright()
sleep(6)
headfront()
sleep(3)
def headfront():
i01.head.neck.moveTo(90)
i01.head.rothead.moveTo(80)
def headdown():
i01.head.neck.moveTo(20)
def headupp():
i01.head.neck.moveTo(160)
def headright():
i01.head.rothead.moveTo(30)
def headleft():
i01.head.rothead.moveTo(140)
def armsdown():
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.moveArm("left",5,90,30,10)
i01.moveArm("right",5,90,30,15)
def armsfront():
i01.setArmSpeed("left", 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
i01.moveArm("left",5,90,110,10)
i01.moveArm("right",5,90,110,10)
def Torso():
i01.setTorsoSpeed(1.0, 1.0, 1.0)
i01.moveTorso(60,90,90)
sleep(2)
i01.moveTorso(120,90,90)
sleep(2)
i01.moveTorso(90,90,90)
sleep(2)
def red():
left.digitalWrite(42, 1) # ON
left.digitalWrite(43, 1) # ON
left.digitalWrite(44, 1) # ON
left.digitalWrite(45, 0) # OFF
def green():
left.digitalWrite(42, 1) # ON
left.digitalWrite(43, 0) # OFF
left.digitalWrite(44, 1) # ON
left.digitalWrite(45, 1) # ON
def blue():
left.digitalWrite(42, 1) # ON
left.digitalWrite(43, 1) # ON
left.digitalWrite(44, 0) # OFF
left.digitalWrite(45, 1) # ON
def ledoff():
left.digitalWrite(42, 0) # OFF
left.digitalWrite(43, 0) # OFF
left.digitalWrite(44, 0) # OFF
left.digitalWrite(45, 0) # OFF
#############################################################################################
def nexa1on():
right.digitalWrite(36, 1) # ON
sleep(0.2)
right.digitalWrite(52, 1) # ON
sleep(0.1)
right.digitalWrite(52, 0) # OFF
sleep(0.1)
right.digitalWrite(36, 0) # OFF
global nexa1
nexa1 = 1
def nexa1off():
right.digitalWrite(36, 1) # ON
sleep(0.2)
right.digitalWrite(38, 1) # ON
sleep(0.1)
right.digitalWrite(38, 0) # OFF
sleep(0.1)
right.digitalWrite(36, 0) # OFF
global nexa1
nexa1 = 0
def nexa2on():
right.digitalWrite(36, 1) # ON
sleep(0.2)
right.digitalWrite(50, 1) # ON
sleep(0.1)
right.digitalWrite(50, 0) # OFF
sleep(0.1)
right.digitalWrite(36, 0) # OFF
global nexa2
nexa2 = 1
def nexa2off():
right.digitalWrite(36, 1) # ON
sleep(0.2)
right.digitalWrite(40, 1) # ON
sleep(0.1)
right.digitalWrite(40, 0) # OFF
sleep(0.1)
right.digitalWrite(36, 0) # OFF
global nexa2
nexa2 = 0
def nexa3on():
right.digitalWrite(36, 1) # ON
sleep(0.2)
right.digitalWrite(48, 1) # ON
sleep(0.1)
right.digitalWrite(48, 0) # OFF
sleep(0.1)
right.digitalWrite(36, 0) # OFF
global nexa3
nexa3 = 1
def nexa3off():
right.digitalWrite(36, 1) # ON
sleep(0.2)
right.digitalWrite(42, 1) # ON
sleep(0.1)
right.digitalWrite(42, 0) # OFF
sleep(0.1)
right.digitalWrite(36, 0) # OFF
global nexa3
nexa3 = 0
def nexa4on():
right.digitalWrite(36, 1) # ON
sleep(0.2)
right.digitalWrite(46, 1) # ON
sleep(0.1)
right.digitalWrite(46, 0) # OFF
sleep(0.1)
right.digitalWrite(36, 0) # OFF
global nexa4
nexa4 = 1
def nexa4off():
right.digitalWrite(36, 1) # ON
sleep(0.2)
right.digitalWrite(44, 1) # ON
sleep(0.1)
right.digitalWrite(44, 0) # OFF
sleep(0.1)
right.digitalWrite(36, 0) # OFF
global nexa4
nexa4 = 0
def nexa5on():
right.digitalWrite(34, 1) # ON
sleep(0.2)
right.digitalWrite(52, 1) # ON
sleep(0.1)
right.digitalWrite(52, 0) # OFF
sleep(0.1)
right.digitalWrite(34, 0) # OFF
global nexa5
nexa5 = 1
def nexa5off():
right.digitalWrite(34, 1) # ON
sleep(0.2)
right.digitalWrite(38, 1) # ON
sleep(0.1)
right.digitalWrite(38, 0) # OFF
sleep(0.1)
right.digitalWrite(34, 0) # OFF
global nexa5
nexa5 = 0
def nexa6on():
right.digitalWrite(34, 1) # ON
sleep(0.2)
right.digitalWrite(50, 1) # ON
sleep(0.1)
right.digitalWrite(50, 0) # OFF
sleep(0.1)
right.digitalWrite(34, 0) # OFF
global nexa6
nexa6 = 1
def nexa6off():
right.digitalWrite(34, 1) # ON
sleep(0.2)
right.digitalWrite(40, 1) # ON
sleep(0.1)
right.digitalWrite(40, 0) # OFF
sleep(0.1)
right.digitalWrite(34, 0) # OFF
global nexa6
nexa6 = 0
def nexa7on():
right.digitalWrite(34, 1) # ON
sleep(0.2)
right.digitalWrite(48, 1) # ON
sleep(0.1)
right.digitalWrite(48, 0) # OFF
sleep(0.1)
right.digitalWrite(34, 0) # OFF
global nexa7
nexa7 = 1
def nexa7off():
right.digitalWrite(34, 1) # ON
sleep(0.2)
right.digitalWrite(42, 1) # ON
sleep(0.1)
right.digitalWrite(42, 0) # OFF
sleep(0.1)
right.digitalWrite(34, 0) # OFF
global nexa7
nexa7 = 0
def nexa8on():
right.digitalWrite(34, 1) # ON
sleep(0.2)
right.digitalWrite(46, 1) # ON
sleep(0.1)
right.digitalWrite(46, 0) # OFF
sleep(0.1)
right.digitalWrite(34, 0) # OFF
global nexa8
nexa8 = 1
def nexa8off():
right.digitalWrite(34, 1) # ON
sleep(0.2)
right.digitalWrite(44, 1) # ON
sleep(0.1)
right.digitalWrite(44, 0) # OFF
sleep(0.1)
right.digitalWrite(34, 0) # OFF
global nexa8
nexa8 = 0
def nexa9on():
right.digitalWrite(32, 1) # ON
sleep(0.2)
right.digitalWrite(52, 1) # ON
sleep(0.1)
right.digitalWrite(52, 0) # OFF
sleep(0.1)
right.digitalWrite(32, 0) # OFF
global nexa9
nexa9 = 1
def nexa9off():
right.digitalWrite(32, 1) # ON
sleep(0.2)
right.digitalWrite(38, 1) # ON
sleep(0.1)
right.digitalWrite(38, 0) # OFF
sleep(0.1)
right.digitalWrite(32, 0) # OFF
global nexa9
nexa9 = 0
def nexa10on():
right.digitalWrite(32, 1) # ON
sleep(0.2)
right.digitalWrite(50, 1) # ON
sleep(0.1)
right.digitalWrite(50, 0) # OFF
sleep(0.1)
right.digitalWrite(32, 0) # OFF
global nexa10
nexa10 = 1
def nexa10off():
right.digitalWrite(32, 1) # ON
sleep(0.2)
right.digitalWrite(40, 1) # ON
sleep(0.1)
right.digitalWrite(40, 0) # OFF
sleep(0.1)
right.digitalWrite(32, 0) # OFF
global nexa10
nexa10 = 0
def nexa11on():
right.digitalWrite(32, 1) # ON
sleep(0.2)
right.digitalWrite(48, 1) # ON
sleep(0.1)
right.digitalWrite(48, 0) # OFF
sleep(0.1)
right.digitalWrite(32, 0) # OFF
global nexa11
nexa11 = 1
def nexa11off():
right.digitalWrite(32, 1) # ON
sleep(0.2)
right.digitalWrite(42, 1) # ON
sleep(0.1)
right.digitalWrite(42, 0) # OFF
sleep(0.1)
right.digitalWrite(32, 0) # OFF
global nexa11
nexa11 = 0
def nexa12on():
right.digitalWrite(32, 1) # ON
sleep(0.2)
right.digitalWrite(46, 1) # ON
sleep(0.1)
right.digitalWrite(46, 0) # OFF
sleep(0.1)
right.digitalWrite(32, 0) # OFF
global nexa12
nexa12 = 1
def nexa12off():
right.digitalWrite(32, 1) # ON
sleep(0.2)
right.digitalWrite(44, 1) # ON
sleep(0.1)
right.digitalWrite(44, 0) # OFF
sleep(0.1)
right.digitalWrite(32, 0) # OFF
global nexa12
nexa12 = 0
def nexa13on():
right.digitalWrite(30, 1) # ON
sleep(0.2)
right.digitalWrite(52, 1) # ON
sleep(0.1)
right.digitalWrite(52, 0) # OFF
sleep(0.1)
right.digitalWrite(30, 0) # OFF
global nexa13
nexa13 = 1
def nexa13off():
right.digitalWrite(30, 1) # ON
sleep(0.2)
right.digitalWrite(38, 1) # ON
sleep(0.1)
right.digitalWrite(38, 0) # OFF
sleep(0.1)
right.digitalWrite(30, 0) # OFF
global nexa13
nexa13 = 0
def nexa14on():
right.digitalWrite(30, 1) # ON
sleep(0.2)
right.digitalWrite(50, 1) # ON
sleep(0.1)
right.digitalWrite(50, 0) # OFF
sleep(0.1)
right.digitalWrite(30, 0) # OFF
global nexa14
nexa14 = 1
def nexa14off():
right.digitalWrite(30, 1) # ON
sleep(0.2)
right.digitalWrite(40, 1) # ON
sleep(0.1)
right.digitalWrite(40, 0) # OFF
sleep(0.1)
right.digitalWrite(30, 0) # OFF
global nexa14
nexa14 = 0
def nexa15on():
right.digitalWrite(30, 1) # ON
sleep(0.2)
right.digitalWrite(48, 1) # ON
sleep(0.1)
right.digitalWrite(48, 0) # OFF
sleep(0.1)
right.digitalWrite(30, 0) # OFF
global nexa15
nexa15 = 1
def nexa15off():
right.digitalWrite(30, 1) # ON
sleep(0.2)
right.digitalWrite(42, 1) # ON
sleep(0.1)
right.digitalWrite(42, 0) # OFF
sleep(0.1)
right.digitalWrite(30, 0) # OFF
global nexa15
nexa15 = 0
def nexa16on():
right.digitalWrite(30, 1) # ON
sleep(0.2)
right.digitalWrite(46, 1) # ON
sleep(0.1)
right.digitalWrite(46, 0) # OFF
sleep(0.1)
right.digitalWrite(30, 0) # OFF
global nexa16
nexa16 = 1
def nexa16off():
right.digitalWrite(30, 1) # ON
sleep(0.2)
right.digitalWrite(44, 1) # ON
sleep(0.1)
right.digitalWrite(44, 0) # OFF
sleep(0.1)
right.digitalWrite(30, 0) # OFF
global nexa16
nexa16 = 0
ear.resumeListening()
|
451838
|
from datasets.base.video.dataset import VideoDataset, VideoDatasetSequence, VideoDatasetFrame
from datasets.base.common.viewer.qt5_viewer import draw_object
from miscellanies.viewer.qt5_viewer import Qt5Viewer
from PyQt5.QtGui import QPixmap, QColor
from miscellanies.simple_prefetcher import SimplePrefetcher
import random
__all__ = ['VideoDatasetViewer']
class _DatasetSequenceImageLoader:
def __init__(self, sequence: VideoDatasetSequence):
self.sequence = sequence
def __len__(self):
return len(self.sequence)
def __getitem__(self, index: int):
frame = self.sequence[index]
pixmap = QPixmap()
assert pixmap.load(frame.get_image_path())
return pixmap, frame, self.sequence
class VideoDatasetViewer:
def __init__(self, dataset: VideoDataset):
self.dataset = dataset
self.viewer = Qt5Viewer()
self.canvas = self.viewer.get_subplot().create_canvas()
if self.dataset.has_category_id_name_map():
self.category_id_color_map = {}
for category_id in self.dataset.get_category_id_name_map().keys():
color = [random.randint(0, 255) for _ in range(3)]
self.category_id_color_map[category_id] = QColor(color[0], color[1], color[2], int(0.5 * 255))
else:
self.category_id_color_map = None
sequence_names = []
for sequence in self.dataset:
sequence_names.append(sequence.get_name())
self.viewer.get_content_region().new_list(sequence_names, self._sequence_selected_callback)
self.timer = self.viewer.new_timer()
self.timer.set_callback(self._timer_timeout_callback)
def _sequence_selected_callback(self, index: int):
if index < 0:
return
self.sequence = SimplePrefetcher(_DatasetSequenceImageLoader(self.dataset[index]))
self.timer.stop()
self._start_timer()
def _start_timer(self):
self.sequence_iter = iter(self.sequence)
self.timer.start()
def _timer_timeout_callback(self):
try:
image, frame, sequence = next(self.sequence_iter)
except StopIteration:
self.timer.stop()
return
frame: VideoDatasetFrame = frame
sequence: VideoDatasetSequence = sequence
canvas = self.canvas
canvas.set_background(image)
with canvas.get_painter() as painter:
for object_ in frame:
object_category_id_accessor = object_
if object_.has_id():
id_ = object_.get_id()
for sequence_object in sequence.get_object_iterator():
if sequence_object.get_id() == id_:
object_category_id_accessor = (object_, sequence_object)
break
draw_object(painter, object_, None, object_category_id_accessor, object_, self.category_id_color_map, self.dataset, self.dataset)
canvas.update()
def run(self):
return self.viewer.run_event_loop()
|
451844
|
import pyblish.api
import openpype.api
class ValidateVDBInputNode(pyblish.api.InstancePlugin):
"""Validate that the node connected to the output node is of type VDB.
Regardless of the amount of VDBs create the output will need to have an
equal amount of VDBs, points, primitives and vertices
A VDB is an inherited type of Prim, holds the following data:
- Primitives: 1
- Points: 1
- Vertices: 1
- VDBs: 1
"""
order = openpype.api.ValidateContentsOrder + 0.1
families = ["vdbcache"]
hosts = ["houdini"]
label = "Validate Input Node (VDB)"
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
"Node connected to the output node is not" "of type VDB!"
)
@classmethod
def get_invalid(cls, instance):
node = instance.data["output_node"]
prims = node.geometry().prims()
nr_of_prims = len(prims)
nr_of_points = len(node.geometry().points())
if nr_of_points != nr_of_prims:
cls.log.error("The number of primitives and points do not match")
return [instance]
for prim in prims:
if prim.numVertices() != 1:
cls.log.error("Found primitive with more than 1 vertex!")
return [instance]
|
451869
|
for _ in range(int(input())):
n=input()
if len(n)>10:
print("{}{}{}".format(n[0],len(n)-2,n[-1]))
else:
print(n)
|
451892
|
from parser.config import Config
from argparse import ArgumentParser
from goodluck.goodluck.main import Luck
import pdb
argparser = ArgumentParser('train sentence generator')
argparser.add_argument('--dozat', action='store_true')
argparser.add_argument('--extra', action='store_true')
argparser.add_argument('--shell', action='store_true')
argparser.add_argument('--runtwo', action='store_true')
argparser.add_argument('--P40', action='store_true')
argparser.add_argument('--self_design', action='store_true')
argparser.add_argument('--cluster', type=str, default='P40')
argparser.add_argument('--fix', type=str, default='',dest='fix_gpu')
argparser.add_argument('--nowarning',action='store_true')
argparser.add_argument('--name',type=str,default='')
argparser.add_argument('--max',type=int,default=10)
argparser.add_argument('--startsize',type=int,default=17000)
argparser.add_argument('--twogpu', action='store_true')
argparser.add_argument('--singlegpu', action='store_true')
argparser.add_argument('--LBP', action='store_true')
#argparser.add_argument('--')
args = argparser.parse_args()
def set_config(config_file, two_gpu=False):
kwargs={}
kwargs['GraphParserNetwork']={}
if two_gpu:
kwargs['GraphParserNetwork']['two_gpu']=True
else:
kwargs['GraphParserNetwork']['two_gpu']=False
config = Config(config_file=config_file, **kwargs)
with open(config_file, 'w') as f:
config.write(f)
class Train_generation:
def __init__(self, startsize, runfile, args, myusers, cluster='P40',statefile='P40stat.txt', shell_mode=True, self_design=False, banned=[],fix_gpu=False, fix_gpu_list=None):
self.our_run=self.parse_running_sentence(runfile)
self.startsize=startsize
self.args=args
self.cluster=cluster
self.shell_mode=shell_mode
self.myusers=myusers
self.avail_gpu=0
if cluster=='P40':
statefile='P40stat.txt'
self.gpulist=self.parse_gpu_state(statefile)
self.write_head='P40_runner'+args.name
if self.shell_mode:
self.program=self.create_shell_P40
else:
self.program=self.create_template
if cluster=='AI':
gputester=Luck()
self.gpulist, _=gputester.mrun()
self.write_head='gen_runner'+args.name
#self.avail_gpu=len(self.gpulist)
if self.shell_mode:
self.program=self.create_shell_AI
else:
self.program=self.create_template
if fix_gpu:
new_gpu_list={}
for node in self.gpulist:
if int(node) in fix_gpu_list:
new_gpu_list[node]=self.gpulist[node]
self.gpulist=new_gpu_list
if self_design:
self.gpulist={}
# self.gpulist['TitanX']={0:11,1:11}
#self.gpulist['TitanV']={1:11,2:11}
#self.gpulist['temp13']={0:11,1:11,2:10,3:10,4:10,5:10,6:10,7:10}
#self.gpulist['temp14']={2:11,3:11,4:11,5:11,6:11,7:11}
#self.gpulist['temp26']={1:10,2:10,3:11,4:11,5:10,6:10,7:10}
#self.gpulist['temp27']={0:11,1:11,3:11,3:11}
#self.gpulist['temp07']={0:8,1:8,2:8}
#self.gpulist['temp08']={2:8}
#self.gpulist['temp10']={0:11,1:11}
#self.gpulist['temp27']={0:11,1:11,2:11,3:11}
#self.gpulist['temp32']={1:11}
#self.gpulist['temp15']={0:8,1:8,2:8,3:8}
#self.gpulist['temp16']={1:8,2:8}
#self.gpulist['temp13']={3:11}
#self.gpulist['temp18']={2:11}
self.gpulist['temp13']={0:22,1:22,2:22,3:22}
self.gpulist['temp16']={1:22}
# self.gpulist['temp15']={2:22,3:22}
# self.gpulist['temp16']={1:22,3:22}
self.gpulist['temp17']={0:22,1:22}
#self.gpulist['temp18']={2:22}
# self.gpulist['temp23']={0:22,1:22,2:22,3:22}
self.gpulist['temp25']={0:22,1:22,2:22,3:22}
self.gpulist['temp26']={0:22,1:22,2:22,3:22}
self.gpulist['temp27']={0:22,1:22,2:22,3:22}
self.gpulist['temp28']={0:22,1:22,2:22,3:22}
if cluster=='P40':
for gpu in self.gpulist:
self.avail_gpu+=len(self.gpulist[gpu])
if cluster=='AI':
self.analy={}
for node in self.gpulist:
if node not in banned:
self.analy[node]=self.node_analysis(self.gpulist[node])
self.avail_gpu+=len(self.analy[node][1])+len(self.analy[node][2])+len(self.analy[node][3])
#pdb.set_trace()
total_run=len(self.our_run)
self.each_node=int(total_run/self.avail_gpu)+1
if self.each_node>self.args.max:
self.each_node=self.args.max
if args.dozat and args.runtwo:
self.tworunning=2
else:
self.tworunning=1
self.god_writer=open(self.write_head+'_node_starter'+'.sh','w')
self.banned=banned
if args.dozat:
self.use_dozat='_dozat'
else:
self.use_dozat=''
def parse_gpu_state(self, file):
sentences=open(file,'r').readlines()
gpulist={}
#pdb.set_trace()
for sentence in sentences:
sentence=sentence.strip()
if sentence=='':
continue
if sentence[0]=='#':
continue
if 'sist-gpu' in sentence:
blocks=sentence.split()
nodeid=blocks[0].split('sist-gpu')[1]
gpulist[nodeid]=[]
continue
#pdb.set_trace()
blocks=sentence.split('|')
gpuid=blocks[0].split()[0][1]
memory=int(blocks[2].split()[2])-int(blocks[2].split()[0])
users=blocks[-1].strip(' ')
users=users.split()
flag=0
for user in users:
curuser=user.split(':')[0]
if curuser not in self.myusers:
flag=1
break
if flag==1:
continue
if nodeid=='16' and gpuid=='2':
continue
if memory>startsize:
gpulist[nodeid].append(gpuid)
return gpulist
def parse_running_sentence(self, file):
sentences=open(file,'r').readlines()
to_run=[]
for sentence in sentences:
sentence=sentence.strip()
if sentence=='':
continue
if sentence[0]=='#':
continue
to_run.append(sentence)
return to_run
def pop_next(self):
if self.our_run==[]:
return False
else:
#pdb.set_trace()
sent=self.our_run[0]
if len(self.our_run)>1:
self.our_run=self.our_run[1:]
else:
self.our_run=[]
return sent
def node_analysis(self,gpus):
types=[]
memories=[]
#type 1 can run 1.1gpu, type2 can run dozat or twogpu ours, type3 used for 1.1 gpu, type4 for nothing
typedicts={}
typedicts[1]=[]
typedicts[2]=[]
typedicts[3]=[]
typedicts[4]=[]
for gpu in gpus:
if gpus[gpu]>=9:
gputype=1
elif gpus[gpu]>7:
gputype=2
elif gpus[gpu]>4:
gputype=3
else:
gputype=4
typedicts[gputype].append(str(gpu))
#if typedicts[gputype].append(gpu):
#types.append(gputype)
#memories.append(gpus[gpu])
analy={}
analy[1]=[]
analy[2]=[]
analy[3]=[]
if self.args.twogpu:
res=list(gpus.keys())
if self.args.LBP:
able=[]
for i in range(len(res)):
if gpus[res[i]]>=9:
able.append(res[i])
if len(able)>=2:
#pdb.set_trace()
res=able
for i in range(int(len(res)/2)):
gpuid,another_gpu=str(res[i*2]),str(res[i*2+1])
analy[2].append([gpuid,another_gpu])
else:
for i in range(int(len(res)/2)):
gpuid,another_gpu=str(res[i*2]),str(res[i*2+1])
analy[2].append([gpuid,another_gpu])
#pdb.set_trace()
return analy
if args.dozat:
#pdb.set_trace()
if len(typedicts[2])>=1:
for i in range(len(typedicts[2])):
analy[1].append(typedicts[2][i])
if len(typedicts[1])>=1:
for i in range(len(typedicts[1])):
analy[1].append(typedicts[1][i])
elif args.singlegpu:
#if len(typedicts[2])>=1:
# for i in range(len(typedicts[2])):
# analy[1].append(typedicts[2][i])
if len(typedicts[1])>=1:
for i in range(len(typedicts[1])):
analy[1].append(typedicts[1][i])
else:
if len(typedicts[1])>=1:
if len(typedicts[2])>0:
another_gpu=typedicts[2][0]
elif len(typedicts[3])>0:
another_gpu=typedicts[3][0]
else:
another_gpu=typedicts[1][0]
for gpuid in typedicts[1]:
if gpuid==another_gpu:
continue
analy[3].append([gpuid,another_gpu])
#pdb.set_trace()
elif len(typedicts[2])>=2:
gpuid,another_gpu=typedicts[2][0],typedicts[2][1]
analy[2].append([gpuid,another_gpu])
if len(typedicts[2])==2:
pass
elif len(typedicts[2])==3:
pass
else:
gpuid,another_gpu=typedicts[2][2],typedicts[2][3]
analy[2].append([gpuid,another_gpu])
#pdb.set_trace()
return analy
def parse_run_config(self, sentence,node,gpu_set):
#data=sentence.split()
#pdb.set_trace()
cases=sentence.split()
cases[1]='"CUDA_VISIBLE_DEVICES='+gpu_set
cases[-1]=node
cases[2]=running
sentence=' '.join(cases)
return sentence
def parse_run_config2(self, sentence,gpu_set):
#data=sentence.split()
cases=sentence.split()
cases[0]='CUDA_VISIBLE_DEVICES='+','.join(gpu_set)
#pdb.set_trace()
if args.nowarning:
cases.insert(-2,'--nowarning')
sentence=' '.join(cases)
return sentence
def create_template(self):
for node in self.gpulist:
#total_writer=len(gpulist[node])*2
#writelist=[open('shell/gen_runner'+'_node'+str(node)+'_'+str(i)+self.use_dozat'.sh','w') for i in range(total_writer)]
#for k in range(each_node):
for gpuid in self.gpulist[node]:
sentence=self.pop_next()
if sentence==False:
continue
train_case=self.parse_run_config(sentence,node,gpuid)
self.god_writer.write(train_case+'\n')
self.god_writer.close()
def create_shell_AI(self):
runtype=[1,2,3]
writelister={}
writerls={}
for k in range(self.each_node):
for node in self.gpulist:
if node in self.banned:
continue
if len(self.analy[node][1])==0 and len(self.analy[node][2])==0 and len(self.analy[node][3])==0:
continue
if k==0:
writerls[node]=open('shell/'+self.write_head+'_node'+str(node)+'.sh','w')
#pdb.set_trace()
total_writer=len(self.gpulist[node])
writelister[node]=[open('shell/'+self.write_head+'_node'+str(node)+'_'+str(i)+self.use_dozat+'.sh','w') for i in range(total_writer)]
writelist=writelister[node]
writer=writerls[node]
#pdb.set_trace()
#writer.write('#node'+str(node)+'\n')
#to_train=[]
current=0
for run in runtype:
gpu_sets=self.analy[node][run]
#pdb.set_trace()
for gpu_set in gpu_sets:
sentence=self.pop_next()
if sentence==False:
continue
if run==2 or run==3:
set_config(sentence.split()[8],1-(run-2))
train_case=self.parse_run_config2(sentence,gpu_set)
if train_case[-1]=='&':
train_case=train_case[:-1]
writelist[current].write('echo "'+train_case.split()[-1].split('/')[1]+'"\n')
writelist[current].write(train_case+'\n')
current+=1
for node in self.gpulist:
if node in self.banned:
continue
if len(self.analy[node][1])==0 and len(self.analy[node][2])==0 and len(self.analy[node][3])==0:
continue
total_writer=len(self.gpulist[node])
for i in range(total_writer):
writelist=writelister[node]
writer=writerls[node]
writelist[i].close()
writer.write('bash '+'shell/'+self.write_head+'_node'+str(node)+'_'+str(i)+self.use_dozat+'.sh&'+'\n')
writer.close()
self.god_writer.write('nohup bash '+'shell/'+self.write_head+'_node'+str(node)+'.sh'+' >shell_log/shell'+str(node)+'\n')
def create_shell_P40(self):
writelister={}
writerls={}
for k in range(self.each_node):
for node in self.gpulist:
if k==0:
#pdb.set_trace()
writerls[node]=open('shell/'+self.write_head+'_node'+str(node)+'.sh','w')
#analy=node_analysis(free_node[node])
total_writer=len(self.gpulist[node])
writelister[node]=[open('shell/'+self.write_head+'_node'+str(node)+'_'+str(i)+self.use_dozat+'.sh','w') for i in range(total_writer)]
writelist=writelister[node]
writer=writerls[node]
current=0
for i in range(self.tworunning):
for gpuid in self.gpulist[node]:
sentence=self.pop_next()
if sentence==False:
continue
train_case=self.parse_run_config2(sentence,[str(gpuid)])
#pdb.set_trace()
if train_case[-1]=='&':
train_case=train_case[:-1]
writelist[current].write('echo "'+train_case.split()[-1].split('/')[1]+'"\n')
writelist[current].write(train_case+'\n')
current+=1
for node in self.gpulist:
writelist=writelister[node]
writer=writerls[node]
total_writer=len(self.gpulist[node])
for i in range(total_writer):
writelist[i].close()
writer.write('bash '+'shell/'+self.write_head+'_node'+str(node)+'_'+str(i)+self.use_dozat+'.sh&'+'\n')
writer.close()
self.god_writer.write('nohup bash '+'shell/'+self.write_head+'_node'+str(node)+'.sh'+' >shell_log/shell'+str(node)+args.name+'\n')
def start_create(self):
self.program()
self.god_writer.close()
sentence=self.pop_next()
print('ours:',sentence)
if not args.shell and args.cluster=='P40':
if args.dozat:
runfile='multipletrain_gen_dozat_P40.sh'
else:
runfile='multipletrain_gen_P40.sh'
else:
runfile='multipletrain_gen.sh'
fix_gpu=False
if args.fix_gpu!='':
fix_gpu_list=args.fix_gpu.split(',')
fix_gpu_list=[int(x) for x in fix_gpu_list]
fix_gpu=True
else:
fix_gpu_list=None
myusers=['wangxy1','lijn']
if args.dozat:
startsize=6000
else:
startsize=args.startsize
if args.extra and not args.dozat:
startsize=22000
analizer=Train_generation(startsize, runfile, args, myusers, cluster=args.cluster,statefile='P40stat.txt', shell_mode=args.shell, self_design=args.self_design, fix_gpu=fix_gpu, fix_gpu_list=fix_gpu_list)
analizer.start_create()
#if args.extra:
# running='/public/sist/home/lijn/anaconda2/envs/parser/bin/python'
#else:
# running='/public/sist/home/wangxy1/anaconda3/envs/parser/bin/python'
#statefile='P40stat.txt'
#gpulist=parse_gpu_state(statefile)
#pdb.set_trace()
#our_run=parse_running_sentence(runfile)
#pdb.set_trace()
#total_run=len(our_run)
#avail_gpu=0
#for gpu in gpulist:
# avail_gpu+=len(gpulist[gpu])
'''
if not args.shell:
each_node=int(total_run/avail_gpu)
for node in gpulist:
#total_writer=len(gpulist[node])*2
#writelist=[open('shell/gen_runner'+'_node'+str(node)+'_'+str(i)+'.sh','w') for i in range(total_writer)]
#for k in range(each_node):
for gpuid in gpulist[node]:
sentence,our_run=pop_next(our_run)
if sentence==False:
continue
train_case=parse_run_config(sentence,node,gpuid)
god_writer.write(train_case+'\n')
god_writer.close()
else:
each_node=int(total_run/avail_gpu)+1
for node in gpulist:
#pdb.set_trace()
if int(node)>18:
running='/public/sist/home/lijn/anaconda2/envs/parser/bin/python'
else:
running='/public/sist/home/wangxy1/anaconda3/envs/parser/bin/python'
writer=open('shell/P40_runner'+'_node'+str(node)+'.sh','w')
#analy=node_analysis(free_node[node])
total_writer=len(gpulist[node])
writelist=[open('shell/P40_runner'+'_node'+str(node)+'_'+str(i)+'.sh','w') for i in range(total_writer)]
#if node in banned:
# continue
writer.write('#node'+str(node)+'\n')
#to_train=[]
for k in range(each_node):
current=0
for i in range(tworunning):
for gpuid in gpulist[node]:
sentence,our_run=pop_next(our_run)
if sentence==False:
continue
train_case=parse_run_config2(sentence,gpuid)
#pdb.set_trace()
if train_case[-1]=='&':
train_case=train_case[:-1]
writelist[current].write('echo "'+train_case.split()[-1].split('/')[1]+'"\n')
writelist[current].write(train_case+'\n')
current+=1
for i in range(total_writer):
writelist[i].close()
writer.write('bash '+'shell/P40_runner'+'_node'+str(node)+'_'+str(i)+'.sh&'+'\n')
writer.close()
god_writer.write('nohup bash '+'shell/P40_runner'+'_node'+str(node)+'.sh'+' >shell_log/shell'+str(node)+'\n')
god_writer.close()
#sentence,dozat_run=pop_next(dozat_run)
#print('dozat:',sentence)
sentence,our_run=pop_next(our_run)
print('ours:',sentence)
'''
|
451893
|
import numpy as np
from prml.nn.array.broadcast import broadcast_to
from prml.nn.math.abs import abs
from prml.nn.math.exp import exp
from prml.nn.math.log import log
from prml.nn.random.random import RandomVariable
from prml.nn.tensor.constant import Constant
from prml.nn.tensor.tensor import Tensor
class Laplace(RandomVariable):
"""
Laplace distribution
p(x|loc, scale)
= exp(-|x - loc|/scale) / (2 * scale)
Parameters
----------
loc : tensor_like
location parameter
scale : tensor_like
scale parameter
data : tensor_like
realization
p : RandomVariable
original distribution of a model
"""
def __init__(self, loc, scale, data=None, p=None):
super().__init__(data, p)
self.loc, self.scale = self._check_input(loc, scale)
def _check_input(self, loc, scale):
loc = self._convert2tensor(loc)
scale = self._convert2tensor(scale)
if loc.shape != scale.shape:
shape = np.broadcast(loc.value, scale.value).shape
if loc.shape != shape:
loc = broadcast_to(loc, shape)
if scale.shape != shape:
scale = broadcast_to(scale, shape)
return loc, scale
@property
def loc(self):
return self.parameter["loc"]
@loc.setter
def loc(self, loc):
self.parameter["loc"] = loc
@property
def scale(self):
return self.parameter["scale"]
@scale.setter
def scale(self, scale):
try:
ispositive = (scale.value > 0).all()
except AttributeError:
ispositive = (scale.value > 0)
if not ispositive:
raise ValueError("value of scale must be positive")
self.parameter["scale"] = scale
def forward(self):
eps = 0.5 - np.random.uniform(size=self.loc.shape)
self.eps = np.sign(eps) * np.log(1 - 2 * np.abs(eps))
self.output = self.loc.value - self.scale.value * self.eps
if isinstance(self.loc, Constant) and isinstance(self.scale, Constant):
return Constant(self.output)
return Tensor(self.output, function=self)
def backward(self, delta):
dloc = delta
dscale = -delta * self.eps
self.loc.backward(dloc)
self.scale.backward(dscale)
def _pdf(self, x):
return 0.5 * exp(-abs(x - self.loc) / self.scale) / self.scale
def _log_pdf(self, x):
return np.log(0.5) - abs(x - self.loc) / self.scale - log(self.scale)
|
451905
|
from collections import defaultdict
import ipywidgets as ipw
from jinja2 import Template
from progressivis.core import JSONEncoderNp
import progressivis.core.aio as aio
from .control_panel import ControlPanel
from .sensitive_html import SensitiveHTML
from .utils import wait_for_change, wait_for_click, update_widget
from .module_graph import ModuleGraph
from .module_wg import ModuleWg
commons = {}
debug_console = ipw.Output()
#
# Coroutines
#
INDEX_TEMPLATE = """
<table class="table table-striped table-bordered table-hover table-condensed">
<thead><tr><th></th><th>Id</th><th>Class</th><th>State</th><th>Last Update</th><th>Order</th></tr></thead>
<tbody>
{% for m in modules%}
<tr>
{% for c in cols%}
<td>
{% if c=='id' %}
<a class='ps-row-btn' id="ps-row-btn_{{m[c]}}" type='button' >{{m[c]}}</a>
{% elif c=='is_visualization' %}
<span id="ps-cell_{{m['id']}}_{{c}}">{{'a' if m[c] else ' '}}</span>
{% else %}
<span id="ps-cell_{{m['id']}}_{{c}}">{{m[c]}}</span>
{% endif %}
</td>
{%endfor %}
</tr>
{%endfor %}
</tbody>
</table>
"""
async def module_choice(psboard):
while True:
await wait_for_change(psboard.htable, 'value')
# with debug_console:
# print("Clicked: ", psboard.htable.value)
if len(psboard.tab.children) < 3:
psboard.tab.children += (psboard.current_module,)
psboard.current_module.module_name = psboard.htable.value[
len(psboard.htable.sensitive_css_class)+1:]
psboard.current_module.selection_changed = True
psboard.tab.set_title(2, psboard.current_module.module_name)
psboard.tab.selected_index = 2
# await psboard.refresh()
# async def change_tab(psboard):
# while True:
# await wait_for_change(psboard.tab, 'selected_index')
# with debug_console:
# print("Changed: ", psboard.tab.selected_index)
# psboard.refresh()
async def refresh_fun(psboard):
while True:
# await psboard.refresh_event.wait()
# psboard.refresh_event.clear()
json_ = psboard.scheduler.to_json(short=False)
# pylint: disable=protected-access
psboard._cache = JSONEncoderNp.dumps(json_, skipkeys=True)
psboard._cache_js = None
await psboard.refresh()
await aio.sleep(0.5)
async def control_panel(psboard, action):
btn, cbk = psboard.cpanel.cb_args(action)
while True:
await wait_for_click(btn, cbk)
# end coros
# pylint: disable=too-many-ancestors,too-many-instance-attributes
class PsBoard(ipw.VBox):
def __init__(self, scheduler=None, order='asc'):
global debug_console # pylint: disable=global-statement
self._order = order
self.scheduler = scheduler
self._cache = None
self._cache_js = None
self.cpanel = ControlPanel(scheduler)
self.current_module = ModuleWg(self, debug_console)
self.mgraph = ModuleGraph()
self.tab = ipw.Tab()
self.tab.set_title(0, 'Modules')
self.tab.set_title(1, 'Module graph')
self.state = []
self.last_update = []
self.btns = []
self.msize = 0
self.cols = ['is_visualization', 'id', 'classname', 'state',
'last_update', 'order']
self.htable = SensitiveHTML(layout=ipw.Layout(height='500px',
overflow='auto'))
self.refresh_event = None
self.other_coros = []
self.vis_register = defaultdict(list)
commons.update(tab=self.tab, scheduler=self.scheduler)
super().__init__([self.cpanel, self.tab, debug_console])
async def make_table_index(self, modules):
modules = sorted(modules, key=lambda x: x['order'],
reverse=(self._order=='desc'))
if not self.htable.html:
tmpl = Template(INDEX_TEMPLATE)
await update_widget(self.htable,
'sensitive_css_class', 'ps-row-btn')
html = tmpl.render(modules=modules, cols=self.cols)
# print(html)
await update_widget(self.htable,
'html', html)
else:
data = {}
for m in modules:
for c in self.cols:
dataid = f"ps-cell_{m['id']}_{c}"
if c == 'is_visualization':
# Show an Unicode eye next to visualizations
data[dataid] = '\U0001F441' if m['id'] in self.vis_register else ' '
else:
data[dataid] = m[c]
await update_widget(self.htable, 'data', data)
def register_visualisation(self, widget, module,
label="Visualisation", glue=None):
"""
called from notebook
if module_class is None and module_id is None:
raise ValueError("One and only one of 'module_class' and 'module_id' args must be defined")
if not(module_class is None or module_id is None):
raise ValueError("One and only one of 'module_class' and 'module_id' args must be defined")
"""
linkable = hasattr(widget, 'link_module')
if not linkable and glue is None:
raise ValueError("Registering a visualisation requires a linkable "
"widget (i.e. which implements the "
"'link_module' interface) or 'glue' arg to be "
"provides with a valid 'glue' function")
if glue is not None:
self.other_coros += glue(widget, module)
else:
self.other_coros += widget.link_module(module, refresh=False)
self.vis_register[module.name].append((widget, label))
@property
def coroutines(self):
return [refresh_fun(self), module_choice(self),
control_panel(self, "resume"),
control_panel(self, "stop"),
control_panel(self, "step")]+self.other_coros
async def refresh(self):
if self._cache is None:
return
if self._cache_js is None:
self._cache_js = JSONEncoderNp.loads(self._cache)
json_ = self._cache_js
# self.cpanel.run_nb.value = str(json_['run_number'])
await update_widget(self.cpanel.run_nb, 'value',
str(json_['run_number']))
if self.tab.selected_index == 0:
await self.make_table_index(json_['modules'])
elif self.tab.selected_index == 1:
# self.mgraph.data = self._cache
await update_widget(self.mgraph, 'data', self._cache)
else:
assert len(self.tab.children) > 2
await self.current_module.refresh()
if len(self.tab.children) < 3:
self.tab.children = [self.htable, self.mgraph]
else:
third = self.tab.children[2]
self.tab.children = [self.htable, self.mgraph, third]
|
451925
|
from __future__ import print_function
from setuptools import setup, find_packages
setup(
name='rc-cts-urlscanio',
version='1.0.1',
url='https://github.com/IBMResilient/resilient-community-apps',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
install_requires=[
'rc-cts'
],
description="Resilient Circuits Custom Threat Service for urlscan.io",
long_description="Resilient Circuits Custom Threat Service for urlscan.io",
packages=find_packages(),
include_package_data=True,
platforms='any',
classifiers=[
'Programming Language :: Python',
],
entry_points={
# Register the component with resilient_circuits
"resilient.circuits.components": [
"UrlScanIoSearcher = rc_cts_urlscanio.components.searcher:UrlScanIoSearcher"
],
"resilient.circuits.configsection": ["gen_config = rc_cts_urlscanio.util.config:config_section_data"],
}
)
|
451968
|
import arrow
import pytest
import unittest
from weasyl import define as d
from weasyl import profile
from weasyl.error import WeasylError
from weasyl.test import db_utils
@pytest.mark.usefixtures('db')
class ProfileManageTestCase(unittest.TestCase):
def setUp(self):
self.mod = db_utils.create_user()
def test_select_manage(self):
user = db_utils.create_user()
links = [
{
'userid': user,
'link_type': 'Twitter',
'link_value': 'Weasyl',
},
{
'userid': user,
'link_type': 'Email',
'link_value': 'mailto:<EMAIL>',
},
]
d.engine.execute(d.meta.tables['user_links'].insert().values(links))
test_user_profile = profile.select_manage(user)
self.assertEqual(len(test_user_profile['sorted_user_links']), 2)
def test_remove_social_links(self):
user = db_utils.create_user()
links = [
{
'userid': user,
'link_type': 'Twitter',
'link_value': 'Weasyl',
},
{
'userid': user,
'link_type': 'Email',
'link_value': 'mailto:<EMAIL>',
},
]
d.engine.execute(d.meta.tables['user_links'].insert().values(links))
profile.do_manage(self.mod, user, remove_social=['Email'])
test_user_profile = profile.select_manage(user)
self.assertEqual(test_user_profile['sorted_user_links'], [('Twitter', ['Weasyl'])])
def test_sort_user_links(self):
user = db_utils.create_user()
links = [
{
'userid': user,
'link_type': 'Twitter',
'link_value': 'Weasyl',
},
{
'userid': user,
'link_type': 'Email',
'link_value': 'mailto:<EMAIL>',
},
{
'userid': user,
'link_type': 'Twitter',
'link_value': 'WeasylDev',
}
]
d.engine.execute(d.meta.tables['user_links'].insert().values(links))
test_user_profile = profile.select_manage(user)
self.assertEqual(test_user_profile['sorted_user_links'], [
('Email', ['mailto:<EMAIL>']),
('Twitter', ['Weasyl', 'WeasylDev']),
])
def test_valid_commission_settings(self):
user = db_utils.create_user()
profile.edit_profile_settings(user,
set_trade=profile.EXCHANGE_SETTING_ACCEPTING,
set_request=profile.EXCHANGE_SETTING_NOT_ACCEPTING,
set_commission=profile.EXCHANGE_SETTING_FULL_QUEUE)
test_user_profile = profile.select_profile(user)
exchange_settings = profile.exchange_settings_from_settings_string(test_user_profile['settings'])
self.assertEqual(exchange_settings[profile.EXCHANGE_TYPE_TRADE], profile.EXCHANGE_SETTING_ACCEPTING)
self.assertEqual(exchange_settings[profile.EXCHANGE_TYPE_REQUEST], profile.EXCHANGE_SETTING_NOT_ACCEPTING)
self.assertEqual(exchange_settings[profile.EXCHANGE_TYPE_COMMISSION], profile.EXCHANGE_SETTING_FULL_QUEUE)
@pytest.mark.usefixtures('db', 'drop_email')
def test_edit_email_password(monkeypatch):
monkeypatch.setattr(profile, 'invalidate_other_sessions', lambda x: '')
from weasyl.login import verify_email_change
password = "<PASSWORD>"
username = "test0042"
email = "<EMAIL>"
userid = db_utils.create_user(username=username, password=password, email_addr=email)
# Case 1: No changes, user authentication succeeds
assert not profile.edit_email_password(
userid=userid,
password=password,
newemail="",
newpassword="",
)
# Case 2: No changes, user authentication fails
with pytest.raises(WeasylError) as err:
profile.edit_email_password(
userid=userid,
password="<PASSWORD>",
newemail="",
newpassword="",
)
assert 'passwordIncorrect' == err.value.value
# Case 3: Changes, new password only, password too short/'insecure'
with pytest.raises(WeasylError) as err:
profile.edit_email_password(
userid=userid,
password=password,
newemail="",
newpassword="<PASSWORD>",
)
assert 'passwordInsecure' == err.value.value
# Case 5: Changes, new password only, password change succeeds
result = profile.edit_email_password(
userid=userid,
password=password,
newemail="",
newpassword="<PASSWORD>",
)
assert "Your password has been successfully changed" in result
password = "<PASSWORD>"
# Case 7: Changes, new email only, email already in use
db_utils.create_user(email_addr="<EMAIL>")
profile.edit_email_password(
userid=userid,
password=password,
newemail="<EMAIL>",
newpassword="",
)
query = d.engine.scalar("""
SELECT email FROM emailverify WHERE userid = %(userid)s LIMIT 1
""", userid=userid)
assert not query
# Case 8: Changes, new email only, email change succeeds
newemailaddr = "<EMAIL>"
result = profile.edit_email_password(
userid=userid,
password=password,
newemail=newemailaddr,
newpassword="",
)
assert "Your email change request is currently pending" in result
query = d.engine.execute("""
SELECT userid, email, token, createtimestamp
FROM emailverify
WHERE userid = %(userid)s
""", userid=userid).fetchone()
QID, QEMAIL, QTOKEN, QTIMESTAMP = query
assert QID == userid
assert QEMAIL == newemailaddr
assert len(QTOKEN) == 40
assert arrow.get(QTIMESTAMP)
# Now that we have the token, let's also verify that ``login.verify_email_change`` works.
# It's as good a place as any.
# Case 8.1/8.2: Make sure invalid token and/or userid doesn't work.
with pytest.raises(WeasylError) as err:
verify_email_change(None, "a")
assert "Unexpected" == err.value.value
with pytest.raises(WeasylError) as err:
verify_email_change(1, None)
assert "Unexpected" == err.value.value
# Case 8.3: An incorrect token is provided.
with pytest.raises(WeasylError) as err:
verify_email_change(userid, "a")
assert "ChangeEmailVerificationTokenIncorrect" == err.value.value
# Case 8.4: Correct token is provided, and the new email is written to `login`
result = verify_email_change(userid, QTOKEN)
assert result == newemailaddr
query = d.engine.scalar("""
SELECT email
FROM login
WHERE userid = %(userid)s
""", userid=userid)
assert query == QEMAIL
# Case 9: Email and password changed at the same time.
newemailaddr = "<EMAIL>"
newpassword = "<PASSWORD>"
result = profile.edit_email_password(
userid=userid,
password=password,
newemail=newemailaddr,
newpassword=newpassword,
)
assert "Your password has been successfully changed" in result
assert "Your email change request is currently pending" in result
|
452006
|
from django.db import models
from django.utils.translation import gettext_lazy as _
from account.models import User
class Discounts(models.Model):
"""
Model for Product price discount
"""
STATUS = (
('accepted', 'accepted'),
('pending', 'pending'),
('rejected', 'rejected'),
)
# Fields
wholesale_value = models.PositiveSmallIntegerField(
default=0,
help_text='%',
)
retail_value = models.PositiveSmallIntegerField(
default=0,
help_text='%',
)
seller = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='discounts',
)
all_products = models.BooleanField(
default=False,
verbose_name=_("Discount on all the products?")
)
category = models.ForeignKey(
to="products.ProductsCategory",
on_delete=models.CASCADE,
null=True,
blank=True,
)
created_at = models.DateTimeField(
auto_now_add=True,
)
status = models.CharField(
max_length=10,
choices=STATUS,
default='pending',
)
# Metadata
class Meta:
verbose_name_plural = "01. Discounts"
verbose_name = "discounts"
ordering = ("-created_at",)
# Methods
def __str__(self):
return f"{self.wholesale_value} - {self.retail_value}"
class ShowDiscount(models.Model):
"""
Model for show/hide product discount
"""
# Fields
show_discount = models.BooleanField(
default=True,
)
category = models.ForeignKey(
to="products.ProductsCategory",
on_delete=models.CASCADE,
null=True,
blank=True,
)
brand = models.ForeignKey(
to="account.User",
on_delete=models.CASCADE,
limit_choices_to={'is_seller': True},
null=True,
blank=True,
)
# Metadata
class Meta:
verbose_name = "status"
verbose_name_plural = "02. Discount show/hide status"
# Methods
def __str__(self):
return "Status"
|
452012
|
import sys
import xml.etree.ElementTree as ET
from netconf.client import connect_ssh
def usage():
print('usage: test.py host user password operation{route_dump, face_dump, face_add, route_add, punt_add, face_del, punt_del, route_del}')
def test(host,user,password,operation):
with connect_ssh(host, 830, user, password) as session:
if (operation=='face_dump'):
config = session.get()
for root in config:
if root.tag=="{urn:sysrepo:hicn}hicn-state":
for entity in root:
if entity.tag=="{urn:sysrepo:hicn}faces":
print('Faces')
for face in entity:
for elem in face:
print(elem.tag +" : "+ elem.text)
elif (operation=='state_dump'):
config = session.get()
for root in config:
if root.tag=="{urn:sysrepo:hicn}hicn-state":
for entity in root:
if entity.tag=="{urn:sysrepo:hicn}states":
print('States')
for state in entity:
print(state.tag +" : "+ state.text)
elif (operation=='route_dump'):
config = session.get()
for root in config:
if root.tag=="{urn:sysrepo:hicn}hicn-state":
for entity in root:
if entity.tag=="{urn:sysrepo:hicn}routes":
print('Routes')
for route in entity:
for elem in route:
print(elem.tag +" : "+ elem.text)
elif(operation=='face_add'):
root = ET.parse('aface.xml').getroot()
session.send_rpc(ET.tostring(root, encoding='utf8').decode('utf8'))
elif(operation=='punt_add'):
root = ET.parse('apunt.xml').getroot()
session.send_rpc(ET.tostring(root, encoding='utf8').decode('utf8'))
elif(operation=='route_add'):
root = ET.parse('aroute.xml').getroot()
session.send_rpc(ET.tostring(root, encoding='utf8').decode('utf8'))
elif(operation=='face_del'):
root = ET.parse('dface.xml').getroot()
session.send_rpc(ET.tostring(root, encoding='utf8').decode('utf8'))
elif(operation=='punt_del'):
root = ET.parse('dpunt.xml').getroot()
session.send_rpc(ET.tostring(root, encoding='utf8').decode('utf8'))
elif(operation=='route_del'):
root = ET.parse('droute.xml').getroot()
session.send_rpc(ET.tostring(root, encoding='utf8').decode('utf8'))
else:
usage()
if __name__ == '__main__':
if(len(sys.argv)<4):
usage()
else:
test(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])
|
452020
|
import redis
import json
with open('mock_data.json') as json_file:
data=json.load(json_file)
redis_client = redis.Redis(host='redis_db', port=6379)
for i in data:
redis_client.set("id":i['id'],"first_name":i['first_name'],"last_name":i['last_name'],"email":i['email'],"gender":i['gender'],"ip_address":i['ip_address'],"school_number":i['school_number'])
|
452026
|
import FWCore.ParameterSet.Config as cms
from RecoBTag.SecondaryVertex.pfInclusiveSecondaryVertexFinderTagInfos_cfi import *
pfInclusiveSecondaryVertexFinderCA15TagInfos = pfInclusiveSecondaryVertexFinderTagInfos.clone(
trackIPTagInfos = "pfImpactParameterCA15TagInfos",
extSVDeltaRToJet = 1.5,
trackSelection = dict(jetDeltaRMax = 1.5), # plays no role since using IVF vertices
vertexCuts = dict(maxDeltaRToJetAxis = 1.5)
)
|
452030
|
import FWCore.ParameterSet.Config as cms
import collections
def customiseEarlyDeleteForSeeding(process, products):
# Find the producers
depends = collections.defaultdict(list)
def _branchName(productType, moduleLabel, instanceLabel=""):
return "%s_%s_%s_%s" % (productType, moduleLabel, instanceLabel, process.name_())
for name, module in process.producers_().items():
cppType = module._TypedParameterizable__type
if cppType == "HitPairEDProducer":
if module.produceSeedingHitSets:
products[name].append(_branchName("RegionsSeedingHitSets", name))
if module.produceIntermediateHitDoublets:
products[name].append(_branchName("IntermediateHitDoublets", name))
elif cppType in ["PixelTripletHLTEDProducer", "PixelTripletLargeTipEDProducer"]:
if module.produceSeedingHitSets:
products[name].append(_branchName("RegionsSeedingHitSets", name))
if module.produceIntermediateHitTriplets:
products[name].append(_branchName("IntermediateHitTriplets", name))
# LayerHitMapCache of the doublets is forwarded to both
# products, hence the dependency
depends[name].append(module.doublets.getModuleLabel())
elif cppType in ["MultiHitFromChi2EDProducer"]:
products[name].extend([
_branchName("RegionsSeedingHitSets", name),
_branchName("BaseTrackerRecHitsOwned", name)
])
elif cppType in ["CAHitQuadrupletEDProducer", "CAHitTripletEDProducer"]:
products[name].append(_branchName("RegionsSeedingHitSets", name))
if len(products) == 0:
return products
# Resolve data dependencies
#
# If a productB depends on productA (e.g. by ref or pointer), then
# everybody that mightGet's producB, must also mightGet productA
def _resolve(keys, name):
for dependsOn in depends[name]:
if dependsOn in keys:
_resolve(keys, dependsOn)
keys.remove(dependsOn)
products[name].extend(products[dependsOn])
keys = set(depends.keys())
while len(keys) > 0:
name = keys.pop()
_resolve(keys, name)
return products
|
452031
|
import numpy as np
from numpy.testing import assert_almost_equal
import cubedsphere as cs
def test_csgrid_gmap():
# read reference data
ds_FV3 = cs.open_FV3data("./example_notebooks/sample_data/FV3diag_C48/",
'atmos_daily')
# calculate grid online
grid = cs.csgrid_GMAO(48)
# fix a single polar point before testing. should not have real effect
index = (5, 24, 24) # index of the pole point
assert grid['lat_b'][index] == -90.0
assert grid['lon_b'][index] == 35.0
assert ds_FV3['lon_b'][index] == 350.0
grid['lon_b'][index] = 350.0 # change to FV3 value
for varname in ['lon', 'lon_b', 'lat', 'lat_b']:
assert_almost_equal(grid[varname], ds_FV3[varname], decimal=4)
|
452078
|
import pytest
from torch.optim import SGD as _SGD
from neuralpy.optimizer import SGD
# Possible values that are invalid
learning_rates = [-6, False, ""]
momentums = [-6, False, ""]
dampenings = ["asd", False, 3]
weight_decays = [-0.36, "asd", "", False]
nesteroves = [122, ""]
@pytest.mark.parametrize(
"learning_rate, momentum, dampening, weight_decay, nesterov",
[
(-6, 0.33, 0.333, 0.333, False),
("invalid", 0.33, 0.333, 0.333, False),
(0.00, 0.33, 0.333, 0.333, False),
(0.001, -6, 0.333, 0.333, False),
(0.001, False, 0.333, 0.333, False),
(0.001, 0.1, False, 0.333, False),
(0.001, 0.002, "invalid", 0.33, 122),
(0.001, 0.002, 0.376, False, 122),
(0.001, 0.002, 0.342, "test", 122),
(0.001, 0.002, 0.342, 0.1, 122),
(0.001, 0.002, 0.342, 0.1, "invalid"),
],
)
def test_sgd_should_throw_value_error(
learning_rate, momentum, dampening, weight_decay, nesterov
):
with pytest.raises(ValueError):
SGD(
learning_rate=learning_rate,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
)
# Possible values that are valid
learning_rates = [0.1, 0.002]
momentums = [0.1, 0.002]
dampenings = [0.35]
weight_decays = [0.1, 0.002]
nesteroves = [False, True]
@pytest.mark.parametrize(
"learning_rate, momentum, dampening, weight_decay, nesterov",
[
(learning_rate, momentum, dampening, weight_decay, nesterov)
for learning_rate in learning_rates
for momentum in momentums
for dampening in dampenings
for weight_decay in weight_decays
for nesterov in nesteroves
],
)
def test_sgd_get_layer_method(
learning_rate, momentum, dampening, weight_decay, nesterov
):
x = SGD(
learning_rate=learning_rate,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
)
details = x.get_optimizer()
assert isinstance(details, dict) is True
assert issubclass(details["optimizer"], _SGD) is True
assert isinstance(details["keyword_arguments"], dict) is True
assert details["keyword_arguments"]["lr"] == learning_rate
assert details["keyword_arguments"]["momentum"] == momentum
assert details["keyword_arguments"]["dampening"] == dampening
assert details["keyword_arguments"]["weight_decay"] == weight_decay
assert details["keyword_arguments"]["nesterov"] == nesterov
def test_sgd_get_layer_method_without_parameter():
x = SGD()
details = x.get_optimizer()
assert isinstance(details, dict) is True
assert issubclass(details["optimizer"], _SGD) is True
assert isinstance(details["keyword_arguments"], dict) is True
assert details["keyword_arguments"]["lr"] == 0.001
assert details["keyword_arguments"]["momentum"] == 0.0
assert details["keyword_arguments"]["dampening"] == 0.0
assert details["keyword_arguments"]["weight_decay"] == 0.0
assert details["keyword_arguments"]["nesterov"] is False
|
452084
|
import numpy as np
from pandas import PeriodIndex
import pandas._testing as tm
class TestFactorize:
def test_factorize(self):
idx1 = PeriodIndex(
["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"], freq="M"
)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M")
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = PeriodIndex(
["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"], freq="M"
)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = PeriodIndex(["2014-03", "2014-02", "2014-01"], freq="M")
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
|
452111
|
import copy
import requests
import os
import time
import threading
import json
import logging
import multiprocessing
from .autotune_task_manager import AutotuneTaskManager
from bagua.bagua_define import (
TensorDtype,
TensorDeclaration,
BaguaCoreTelemetrySpan,
BaguaHyperparameter,
)
from flask import request
import numpy as np
from typing import Dict, List
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, TensorDtype):
return obj.value
else:
return super(NpEncoder, self).default(obj)
class AutotuneServiceTaskManager:
def __init__(
self, task_name: str, world_size: int, is_output_autotune_log: bool
) -> None:
self.inner = AutotuneTaskManager(task_name, is_output_autotune_log)
self.warmup_pass_count = 0
self.sampling_count = 0
self.lock = threading.Lock()
self.check_board = [-1] * world_size
self.time_hp_last_granted = time.time()
self.hyperparameter = BaguaHyperparameter()
class AutotuneService:
MAX_TRACE_INFO = 1000
def __init__(
self,
world_size,
autotune_level=0,
max_samples=60,
sampling_confidence_time_s=5,
warmup_time_s=30,
is_output_autotune_log=False,
default_bucket_size=10 * 1024 ** 2,
):
self.autotune_level = autotune_level
self.world_size = world_size
self.max_samples = max_samples
self.sampling_confidence_time_s = sampling_confidence_time_s
self.warmup_time_s = warmup_time_s
self.is_initialized = False
self.is_output_autotune_log = is_output_autotune_log
self.default_bucket_size: int = default_bucket_size
self.model_dict: Dict[str, AutotuneServiceTaskManager] = {}
self.model_dict_mutex = threading.Lock()
# bagua-core trace and obtain tensor calculation partial order
self.trace_info_dict = {}
self.tensor_partial_order = {}
self.tensor_partial_order_fixed = False
self.tensor_partial_order_lock = threading.Lock()
def autotune(
self,
hp_manager: AutotuneServiceTaskManager,
rank: int,
train_iter: int,
tensor_partial_order: Dict[str, int] = {},
):
if hp_manager.sampling_count > self.max_samples:
return
(
recommended_train_iter,
hp,
system_efficiency_score,
) = hp_manager.inner.tail_record()
logging.info(
"recommended_train_iter={}, hyperparameters={}, speed={}".format(
recommended_train_iter,
hp,
system_efficiency_score,
)
)
sampling_time = time.time() - hp_manager.time_hp_last_granted
# Skip at least once during warmup
if (
sampling_time < self.warmup_time_s
or hp_manager.warmup_pass_count == 0 # noqa: W503
):
logging.info(
"warmup pass, time.time={}, time_hp_last_granted={}, "
"warmup_time_s={}".format(
time.time(),
hp_manager.time_hp_last_granted,
self.warmup_time_s,
)
)
hp_manager.warmup_pass_count += 1
return
if hp_manager.sampling_count == 0:
confidence_skip = (
sampling_time < self.warmup_time_s + self.sampling_confidence_time_s
)
else:
confidence_skip = sampling_time < self.sampling_confidence_time_s
if confidence_skip:
logging.debug(
"The sampling time is not up, time={}, last={}, "
"sampling_confidence_time_s={}".format(
time.time(),
hp_manager.time_hp_last_granted,
self.sampling_confidence_time_s,
)
)
return
logging.info(
"rank={}, train_iter={}, sampling_count={}, "
"max_samples={}".format(
rank, train_iter, hp_manager.sampling_count, self.max_samples
)
)
recommended_bagua_hp = hp_manager.inner.ask_hyperparmeter(
train_iter, tensor_partial_order
)
if hp_manager.sampling_count < self.max_samples:
hp_manager.hyperparameter = recommended_bagua_hp
else:
hp_manager.hyperparameter = hp_manager.inner.best_hyperparameter()
hp_manager.sampling_count += 1
def setup_app(self, app):
@app.route("/api/v1/register_tensors", methods=["POST"])
def register_tensors():
req: dict = request.get_json(force=True)
model_name: str = req["model_name"]
tensor_list: List[TensorDeclaration] = req["tensor_list"]
whether_to_bucket: bool = req["whether_to_bucket"]
with self.model_dict_mutex:
if model_name not in self.model_dict:
self.model_dict[model_name] = AutotuneServiceTaskManager(
task_name=model_name,
world_size=self.world_size,
is_output_autotune_log=self.is_output_autotune_log,
)
hp_manager = self.model_dict[model_name]
bucket_size = self.default_bucket_size
if whether_to_bucket is False:
bucket_size = 10 * 1024 ** 5
with hp_manager.lock:
hp = BaguaHyperparameter(
buckets=AutotuneTaskManager.split_bucket_by_bucket_size(
tensor_list,
bucket_size,
),
bucket_size=bucket_size,
)
hp_manager.time_hp_last_granted = time.time()
hp_manager.hyperparameter = hp
return json.dumps(
{
"recommended_hyperparameters": hp.dict(),
}
)
@app.route("/api/v1/report_metrics", methods=["POST"])
def report_metrics():
req: dict = request.get_json(force=True)
model_name: str = req["model_name"]
rank: int = req["rank"]
train_iter: int = req["train_iter"]
speed: float = req["speed"]
hyperparameters = req["hyperparameters"]
if model_name not in self.model_dict:
return "Service not ready for report_metrics!", 405
hp_manager = self.model_dict[model_name]
# Only consider the rank of the first report metrics now.
with hp_manager.lock:
(last_report_train_iter, _, _) = hp_manager.inner.tail_record()
if train_iter <= last_report_train_iter:
return json.dumps({})
logging.debug(
"rank={}, train_iter={}, speed={}, "
"hyperparameters={}".format(
rank,
train_iter,
speed,
hyperparameters,
)
)
hp_manager.inner.report_metrics(
train_iter=train_iter,
hyperparameter=BaguaHyperparameter().update(hyperparameters),
system_efficiency_score=speed,
)
return json.dumps({})
@app.route("/api/v1/ask_hyperparameters", methods=["POST"])
def ask_hyperparameters():
"""
report_metrics must be called before ask_hyperparameters
"""
req: dict = request.get_json(force=True)
rank: int = req["rank"]
model_name: str = req["model_name"]
train_iter: int = req["train_iter"]
if model_name not in self.model_dict:
return "Service not ready for report_metrics!", 405
hp_manager = self.model_dict[model_name]
tensor_partial_order = {}
with self.tensor_partial_order_lock:
tensor_partial_order = copy.deepcopy(self.tensor_partial_order)
logging.debug("tensor_partial_order={}".format(tensor_partial_order))
with hp_manager.lock:
# Autotune conditions:
# 1. autotune_level >= 1.
# 2. The bagua process is not in the process of hyperparameter update. (self.check_board.count(self.check_board[0])
# == len(self.check_board))
# 3. Only execute autotune at most once in an iteration. (self.check_board[rank] < train_iter)
check_board = hp_manager.check_board
if (
self.autotune_level >= 1
and check_board.count(check_board[0]) # noqa: W503
== len(check_board) # noqa: W503
and check_board[rank] < train_iter # noqa: W503
):
self.autotune(hp_manager, rank, train_iter, tensor_partial_order)
check_board[rank] = train_iter
return json.dumps(
{
"recommended_hyperparameters": hp_manager.hyperparameter.dict(),
"is_autotune_completed": hp_manager.sampling_count
> self.max_samples, # noqa: W503
}
)
@app.route("/api/v1/report_tensor_execution_order", methods=["POST"])
def report_tensor_execution_order():
req: dict = request.get_json(force=True)
spans: List[BaguaCoreTelemetrySpan] = req["spans"]
with self.tensor_partial_order_lock:
spans = sorted(spans, key=lambda span: span["start_time"])
for span in spans:
tensor_name = span["tensor_name"]
action = span["action"]
if (tensor_name, action) in self.trace_info_dict:
continue
self.trace_info_dict[(tensor_name, action)] = True
if tensor_name not in self.tensor_partial_order:
self.tensor_partial_order[tensor_name] = len(
self.tensor_partial_order
)
return json.dumps({})
@app.route("/api/v1/health_check", methods=["GET"])
def health_check():
return json.dumps({"status": "ok"})
# set secret-key
app.config.update(SECRET_KEY=os.urandom(24))
return app
def reset_error_retry(request_func):
"""Retry request when catch ConnectionResetError."""
def wrap(*args, **kwargs):
MAX_RETRIES = 3
for retry in range(MAX_RETRIES + 1):
try:
result = request_func(*args, **kwargs)
return result
except (ConnectionResetError, requests.exceptions.ConnectionError) as e:
if retry == MAX_RETRIES:
raise e
logging.warning("request failed, retry={}, e={}".format(retry, e))
time.sleep(1)
return wrap
class AutotuneClient:
def __init__(
self,
service_addr: str,
service_port: int,
proxies={
"http": None,
"https": None,
},
):
self.autotune_service_addr = "{}:{}".format(service_addr, service_port)
self.session = requests.Session()
self.proxies = proxies
import socket
from urllib3.connection import HTTPConnection
HTTPConnection.default_socket_options = HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), # Enables the feature
(socket.SOL_TCP, socket.TCP_KEEPIDLE, 45), # Overrides the time when the stack willl start sending KeppAlives after no data received on a Persistent Connection
(socket.SOL_TCP, socket.TCP_KEEPINTVL, 10), # Defines how often thoe KA will be sent between them
(socket.SOL_TCP, socket.TCP_KEEPCNT, 6), # How many attemps will your code try if the server goes down before droping the connection.
]
@reset_error_retry
def report_metrics(
self,
model_name: str,
rank: int,
train_iter: int,
hyperparameters: dict,
speed: float,
) -> requests.Response:
rsp = self.session.post(
"http://{}/api/v1/report_metrics".format(self.autotune_service_addr),
json={
"model_name": model_name,
"rank": rank,
"train_iter": train_iter,
"hyperparameters": hyperparameters,
"speed": speed,
},
proxies=self.proxies,
)
return rsp
@reset_error_retry
def register_tensors(
self,
model_name: str,
tensor_list: List[TensorDeclaration],
whether_to_bucket: bool = True,
) -> requests.Response:
rsp = self.session.post(
"http://{}/api/v1/register_tensors".format(self.autotune_service_addr),
json={
"model_name": model_name,
"tensor_list": tensor_list,
"whether_to_bucket": whether_to_bucket,
},
proxies=self.proxies,
)
return rsp
@reset_error_retry
def ask_hyperparameters(
self,
model_name: str,
rank: int,
train_iter: int,
) -> requests.Response:
rsp = self.session.post(
"http://{}/api/v1/ask_hyperparameters".format(self.autotune_service_addr),
json={
"model_name": model_name,
"rank": rank,
"train_iter": train_iter,
},
proxies=self.proxies,
)
return rsp
@reset_error_retry
def report_tensor_execution_order(
self,
spans: List[BaguaCoreTelemetrySpan],
) -> requests.Response:
rsp = self.session.post(
"http://{}/api/v1/report_tensor_execution_order".format(
self.autotune_service_addr
),
json={
"spans": spans,
},
proxies=self.proxies,
)
return rsp
def health_check(self) -> bool:
try:
# get response will be ok
self.session.get(
"http://{}/api/v1/health_check".format(
self.autotune_service_addr
),
proxies=self.proxies
)
return True
except requests.exceptions.ConnectionError:
return False
if __name__ == "__main__":
import argparse
from flask import Flask
parser = argparse.ArgumentParser()
parser.add_argument("--nprocs", type=int, default=8)
parser.add_argument("--port", type=int, default=8123)
args = parser.parse_args()
autotune_service = AutotuneService(args.nproc)
app = Flask(__name__)
app = autotune_service.setup_app(app)
server = multiprocessing.Process(
target=app.run,
kwargs={
"host": "0.0.0.0",
"port": args.port,
},
)
server.daemon = True
server.start()
server.join()
|
452118
|
import itertools
from hamcrest import *
@when("we check for messages by the bot")
def step_impl(context):
context.bot.get_messages()
@then("there are no messages")
def step_impl(context):
assert_that(context.bot.current_messages, empty())
@then('there is a message for {person} which includes the text "{text}"')
def step_impl(context, person, text):
context.execute_steps(
f'''
then there is a message for {person} which includes the following text:
"""
{text}
"""
'''
)
@then("there is a message for {person} which includes the following text")
def step_impl(context, person):
text = context.text
person = context.accounts.get_person(person)
messages_for_person = context.bot.get_messages_for_person(person)
item_matcher = has_entry("text", contains_string(text))
assert_that(messages_for_person, has_item(item_matcher))
context.last_matched_message = next(
(m for m in messages_for_person if item_matcher.matches(m))
)
@then("there is a message for {person} with the following text")
def step_impl(context, person):
text = context.text.format(context=context)
person = context.accounts.get_person(person)
messages_for_person = context.bot.get_messages_for_person(person)
item_matcher = has_entry("text", equal_to(text))
assert_that(messages_for_person, has_item(item_matcher))
context.last_matched_message = next(
(m for m in messages_for_person if item_matcher.matches(m))
)
@then('there is no message for {person} which includes the text "{text}"')
def step_impl(context, person, text):
context.execute_steps(
f'''
then there is no message for {person} which includes the following text:
"""
{text}
"""
'''
)
@then("there is no message for {person} which includes the following text")
def step_impl(context, person):
text = context.text
person = context.accounts.get_person(person)
messages_for_person = context.bot.get_messages_for_person(person)
item_matcher = has_entry("text", contains_string(text))
assert_that(messages_for_person, is_not(has_item(item_matcher)))
@then('this message includes the text "{text}"')
def step_impl(context, text):
assert_that(context.last_matched_message, has_entry("text", contains_string(text)))
@then('this message does not include the text "{text}"')
def step_impl(context, text):
assert_that(
context.last_matched_message, has_entry("text", is_not(contains_string(text)))
)
@step("{sender} sends the {command} command to the bot")
def step_impl(context, sender, command):
if sender == "everybody":
for person in context.accounts.all_persons():
context.bot.send_message(person, command)
else:
sender = context.accounts.get_person(sender)
context.bot.send_message(sender, command)
|
452138
|
from assistantforynab import settings
import os
import time
import shutil
import re
from webdriver_manager.chrome import ChromeDriverManager
from assistantforynab.utils import gui, utils
def restore_defaults():
utils.log_info('Restoring default settings')
for p in [settings.chromedriver_path, settings.settings_path]:
if os.path.exists(p):
utils.log_debug('removing', p)
os.remove(p)
for p in settings.log_dir, settings.data_dir, settings.backup_dir:
if os.path.exists(p):
utils.log_debug('removing', p)
shutil.rmtree(p)
settings.init()
def setup_chromedriver():
if os.path.exists(settings.chromedriver_path):
return
utils.log_info('Installing Chromedriver')
downloadPath = ChromeDriverManager(path = settings.chromedriver_dir).install()
shutil.move(downloadPath, settings.chromedriver_path)
shutil.rmtree(settings.chromedriver_dir + "/drivers")
assert os.path.exists(settings.chromedriver_path)
def make_dirs():
utils.log_info('Checking for directories')
for p in settings.log_dir, settings.data_dir, settings.backup_dir:
if not os.path.exists(p):
utils.log_info('Making directory', p)
os.mkdir(p)
def setup_ynab_auth():
utils.log_info('Checking for YNAB authentication')
# TODO: make this use oauth instead of api tokens
if settings.get('api_token'):
return
utils.log_info('Installing YNAB authentication')
api_token_url = 'https://app.youneedabudget.com/settings/developer'
d = gui.driver()
d.get(api_token_url)
utils.log_info('Log in if needed')
new_token_button = gui.get_by_text('button', 'New Token')
gui.click(new_token_button)
utils.log_info('Enter your password in the YNAB Web App, then click "Generate"')
while 'New Personal Access Token' not in d.page_source:
time.sleep(.5)
api_token = re.search('New Personal Access Token: <strong>([^<]*)</strong>', d.page_source).groups()[0]
settings.set('api_token', api_token)
utils.log_debug('settings.api_token', settings.api_token)
assert settings.api_token
gui.quit()
def setup_ynab_budget_id():
utils.log_info('Checking for selected budget')
if settings.get('budget_id'):
return
utils.log_info('Selecting budget')
url = 'https://app.youneedabudget.com/'
driver = gui.driver()
driver.get(url)
utils.log_info('Log in if needed')
while not re.search('youneedabudget.com/([^/]+)/', driver.current_url):
time.sleep(.5)
budget_selection_prompt = 'Press Enter when you have loaded the budget you want to use.'
input(budget_selection_prompt)
utils.log_debug(budget_selection_prompt)
while not re.search('youneedabudget.com/([^/]+)/', driver.current_url):
time.sleep(.5)
settings.set('budget_id', re.search('youneedabudget.com/([^/]+)/', driver.current_url).groups()[0])
utils.log_debug('settings.budget_id', settings.budget_id)
assert settings.budget_id
gui.quit()
def install():
utils.log_info('Installing')
setup_chromedriver()
make_dirs()
setup_ynab_auth()
setup_ynab_budget_id()
settings.init()
utils.log_debug('Settings:', settings._s.settings)
utils.log_info('Installed!')
if __name__ == '__main__':
restore_defaults()
install()
|
452154
|
import hashlib
import re
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http
from livestreamer.stream import HDSStream
# Got the secret from the swf with rev number location
# (tv/wat/player/media/Media.as)
TOKEN_SECRET = '<KEY>'
_url_re = re.compile("http(s)?://(\w+\.)?wat.tv/")
_video_id_re = re.compile("href=\"http://m.wat.tv/video/([^\"]+)", re.IGNORECASE)
class WAT(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
res = http.get(self.url)
match = video_id = _video_id_re.search(res.text)
if not match:
return
video_id = match.group(1)
# TODO: Replace with "yield from" when dropping Python 2.
for __ in self._create_streams('web', video_id).items():
yield __
for __ in self._create_streams('webhd', video_id).items():
yield __
def _create_streams(self, type_, video_id):
url = self._generate_security_url(type_, video_id)
res = http.get(url)
return HDSStream.parse_manifest(self.session, res.text, cookies=res.cookies)
def _generate_security_url(self, type_, video_id):
token = self._generate_security_token(type_, video_id)
return ("http://www.wat.tv/get/{type_}/{video_id}?token={token}"
"&domain=www.wat.tv&refererURL=wat.tv&revision=04.00.719%0A&"
"synd=0&helios=1&context=playerWat&pub=1&country=FR"
"&sitepage=WAT%2Ftv%2Ft%2Finedit%2Ftf1%2Fparamount_pictures_"
"france&lieu=wat&playerContext=CONTEXT_WAT&getURL=1"
"&version=LNX%2014,0,0,125").format(**locals())
def _generate_security_token(self, type_, video_id):
# Get timestamp
res = http.get('http://www.wat.tv/servertime')
timestamp = int(res.text.split('|')[0])
timestamp_hex = format(timestamp, 'x').rjust(8, '0')
# Player id
player_prefix = "/{0}/{1}".format(type_, video_id)
# Create the token
data = (TOKEN_SECRET + player_prefix + timestamp_hex).encode('utf8')
token = hashlib.md5(data)
token = "{0}/{1}".format(token.hexdigest(), timestamp_hex)
return token
__plugin__ = WAT
|
452161
|
import pygame
pygame.init()
pygame.joystick.init()
joystick_count = pygame.joystick.get_count()
if joystick_count == 0:
print("No joysticks found.")
exit(0)
for i in range(joystick_count):
print("%d - %s" % (i, pygame.joystick.Joystick(i).get_name()))
try:
joystick_id = int(input("Choose a joystick: "))
except ValueError:
print("Incorrect input. Only numbers are allowed.")
exit(1)
if joystick_id > joystick_count:
print("Joystick ID is invalid.")
exit(0)
joystick = pygame.joystick.Joystick(joystick_id)
joystick.init()
num_buttons = joystick.get_numbuttons()
num_axes = joystick.get_numaxes()
while(True):
pygame.event.pump()
for i in range(num_buttons):
if joystick.get_button(i) == 1:
print("Button %d is pressed." % i)
for i in range(num_axes):
if abs(joystick.get_axis(i)) > 0.3:
print("Axis %d is in use." % i)
|
452186
|
import unittest
from HARK.datasets import load_SCF_wealth_weights
from HARK.datasets.cpi.us.CPITools import cpi_deflator
from HARK.datasets.SCF.WealthIncomeDist.SCFDistTools import income_wealth_dists_from_scf
class test_load_SCF_wealth_weights(unittest.TestCase):
def setUp(self):
self.SCF_wealth, self.SCF_weights = load_SCF_wealth_weights()
def test_shape(self):
self.assertEqual(self.SCF_wealth.shape, (3553,))
self.assertEqual(self.SCF_weights.shape, (3553,))
# %% US CPI tests
class test_cpi_deflators(unittest.TestCase):
def test_month_deflators(self):
# Same year test
defl_same_year = cpi_deflator(2000, 2000, "SEP")
self.assertEqual(defl_same_year[0], 1.0)
# Different year test
defl_diff_year = cpi_deflator(1998, 2019, "SEP")
self.assertAlmostEqual(defl_diff_year[0], 1.57279534)
def test_avg_deflators(self):
# Same year test
defl_same_year = cpi_deflator(2000, 2000)
self.assertEqual(defl_same_year[0], 1.0)
# Different year test
defl_diff_year = cpi_deflator(1998, 2019)
self.assertAlmostEqual(defl_diff_year[0], 1.57202505)
# %% Tests for Survey of Consumer finances initial distributions
class test_SCF_dists(unittest.TestCase):
def setUp(self):
self.BaseYear = 1992
def test_at_21(self):
# Get stats for various groups and test them
NoHS = income_wealth_dists_from_scf(
self.BaseYear, age=21, education="NoHS", wave=1995
)
self.assertAlmostEqual(NoHS["aNrmInitMean"], -1.0611984728537684)
self.assertAlmostEqual(NoHS["aNrmInitStd"], 1.475816500147777)
self.assertAlmostEqual(NoHS["pLvlInitMean"], 2.5413398571226233)
self.assertAlmostEqual(NoHS["pLvlInitStd"], 0.7264931123240703)
HS = income_wealth_dists_from_scf(
self.BaseYear, age=21, education="HS", wave=2013
)
self.assertAlmostEqual(HS["aNrmInitMean"], -1.0812342937817578)
self.assertAlmostEqual(HS["aNrmInitStd"], 1.7526704743231725)
self.assertAlmostEqual(HS["pLvlInitMean"], 2.806605268756435)
self.assertAlmostEqual(HS["pLvlInitStd"], 0.6736467457859727)
Coll = income_wealth_dists_from_scf(
self.BaseYear, age=21, education="College", wave=2019
)
self.assertAlmostEqual(Coll["aNrmInitMean"], -0.6837248150760165)
self.assertAlmostEqual(Coll["aNrmInitStd"], 0.8813676761170798)
self.assertAlmostEqual(Coll["pLvlInitMean"], 3.2790838587291127)
self.assertAlmostEqual(Coll["pLvlInitStd"], 0.746362502979793)
def test_at_60(self):
# Get stats for various groups and test them
NoHS = income_wealth_dists_from_scf(
self.BaseYear, age=60, education="NoHS", wave=1995
)
self.assertAlmostEqual(NoHS["aNrmInitMean"], 0.1931578281432479)
self.assertAlmostEqual(NoHS["aNrmInitStd"], 1.6593916577375334)
self.assertAlmostEqual(NoHS["pLvlInitMean"], 3.3763953392998705)
self.assertAlmostEqual(NoHS["pLvlInitStd"], 0.61810580085094993)
HS = income_wealth_dists_from_scf(
self.BaseYear, age=60, education="HS", wave=2013
)
self.assertAlmostEqual(HS["aNrmInitMean"], 0.6300862955841334)
self.assertAlmostEqual(HS["aNrmInitStd"], 1.7253736778036055)
self.assertAlmostEqual(HS["pLvlInitMean"], 3.462790681398899)
self.assertAlmostEqual(HS["pLvlInitStd"], 0.8179188962937205)
Coll = income_wealth_dists_from_scf(
self.BaseYear, age=60, education="College", wave=2019
)
self.assertAlmostEqual(Coll["aNrmInitMean"], 1.643936802283761)
self.assertAlmostEqual(Coll["aNrmInitStd"], 1.2685135110865389)
self.assertAlmostEqual(Coll["pLvlInitMean"], 4.278905678818748)
self.assertAlmostEqual(Coll["pLvlInitStd"], 1.0776403992280614)
|
452191
|
import re
import pytest
from dagster.core.definitions import intermediate_storage, pipeline, solid
from dagster.core.definitions.mode import ModeDefinition
from dagster.core.execution.api import execute_pipeline, reexecute_pipeline
from dagster.core.instance import DagsterInstance
from dagster.core.storage.object_store import InMemoryObjectStore
from dagster.core.storage.system_storage import (
build_intermediate_storage_from_object_store,
io_manager_from_intermediate_storage,
)
from dagster_tests.general_tests.utils_tests.utils import assert_no_warnings
def test_intermediate_storage_def_to_io_manager_def():
called = {}
@intermediate_storage()
def no_config_intermediate_storage(init_context):
called["ran"] = True
object_store = InMemoryObjectStore()
return build_intermediate_storage_from_object_store(
object_store=object_store, init_context=init_context
)
@solid
def return_one(_):
return 1
@pipeline(
mode_defs=[
ModeDefinition(
resource_defs={
"io_manager": io_manager_from_intermediate_storage(
no_config_intermediate_storage
)
}
)
]
)
def foo():
return_one()
assert execute_pipeline(foo).success
def test_intermediate_storage_deprecation_warning():
@solid
def return_one(_):
return 1
@pipeline
def foo():
return_one()
with assert_no_warnings():
execute_pipeline(foo)
with pytest.warns(
UserWarning,
match=re.escape(
"Intermediate Storages are deprecated in 0.10.0 and will be removed in a future release."
),
):
execute_pipeline(foo, run_config={"intermediate_storage": {"filesystem": {}}})
def test_intermediate_storage_reexecution():
@solid
def return_one(_):
return 1
@solid
def plus_one(_, one):
return one + 1
@pipeline
def foo():
plus_one(return_one())
run_config = {"intermediate_storage": {"filesystem": {}}}
instance = DagsterInstance.ephemeral()
result = execute_pipeline(foo, run_config=run_config, instance=instance)
assert result.success
reexecution_result = reexecute_pipeline(
foo, run_config=run_config, parent_run_id=result.run_id, instance=instance
)
assert reexecution_result.success
partial_reexecution_result = reexecute_pipeline(
foo,
run_config=run_config,
step_selection=["plus_one"],
parent_run_id=result.run_id,
instance=instance,
)
assert partial_reexecution_result.success
def test_intermediate_storage_event_message():
@solid
def return_one(_):
return 1
@solid
def plus_one(_, one):
return one + 1
@pipeline
def foo():
plus_one(return_one())
run_config = {"intermediate_storage": {"filesystem": {}}}
result = execute_pipeline(foo, run_config=run_config)
for i in filter(lambda i: i.is_handled_output, result.event_list):
assert "output manager" not in i.message
for i in filter(lambda i: i.is_loaded_input, result.event_list):
assert "input manager" not in i.message
|
452197
|
import requests
import subprocess, shlex
import argparse
import os
import logging
import urllib3
urllib3.disable_warnings()
def main():
parser = argparse.ArgumentParser(description='Required args for recursive clone')
parser.add_argument('--group_id', metavar='group_id', type=int,
help='Id of a group in gitlab', required=True)
parser.add_argument('--branch', metavar='branch', type=str,
help='Branch to clone in all repos [by default master]', default='master')
parser.add_argument(
'--gitlab-url',
metavar='gitlab',
type=str,
default=os.environ.get('GITLAB_URL', "gitlab.com"),
help='Gitlab address [by default gitlab.com]')
parser.add_argument(
'--token',
metavar='token',
type=str,
default=os.environ.get('GITLAB_TOKEN'),
help='Gitlab Token')
args = parser.parse_args()
request_param = args.__dict__
clone(**request_param)
def clone(group_id, branch, token, gitlab_url):
total_pages = 1
page = 0
while page < total_pages:
page += 1
response = requests.get(
f"{gitlab_url}/api/v4/groups/{group_id}/projects?private_token={token}&include_subgroups=True&per_page=100&page={page}&with_shared=False", verify=False)
for project in response.json():
path = project['path_with_namespace']
ssh_url_to_repo = project['ssh_url_to_repo']
try:
if not os.path.exists(path):
command = shlex.split(f"git clone --branch {branch} {ssh_url_to_repo} {path}")
result_code = subprocess.Popen(command)
else:
logging.info(f"{path} already exists")
command = shlex.split(f"cd {path}; git pull {path}; cd -")
result_code = subprocess.Popen(command)
except Exception as e:
logging.error(f"Error on {ssh_url_to_repo}: {e}")
total_pages = int(response.headers['X-Total-Pages'])
if __name__ == '__main__':
main()
|
452241
|
from django.core.exceptions import ImproperlyConfigured
from django.urls import reverse
from django_sorcery import forms
from django_sorcery.views import edit
from ..base import TestCase
from ..testapp.models import Owner, db
from ..testapp.views import OwnerCreateViewWithForm
class TestCreateView(TestCase):
def test_create(self):
url = reverse("owner_create")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context_data["view"], edit.CreateView)
self.assertIsInstance(response.context_data["form"], forms.ModelForm)
response = self.client.post(url, {"first_name": "Randall", "last_name": "Munroe"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse("owners_list"))
def test_create_with_form_class(self):
url = reverse("owner_create_form")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context_data["view"], edit.CreateView)
self.assertTrue(type(response.context_data["form"]) is OwnerCreateViewWithForm.form_class)
def test_create_bad_field_form_config(self):
url = reverse("owner_create_field_form")
with self.assertRaises(ImproperlyConfigured) as ctx:
self.client.get(url)
self.assertEqual(str(ctx.exception), "Specifying both 'fields' and 'form_class' is not permitted.")
def test_create_get_success_url_bad_config(self):
view = OwnerCreateViewWithForm()
with self.assertRaises(ImproperlyConfigured) as ctx:
view.get_success_url()
self.assertEqual(
str(ctx.exception), "No URL to redirect to. Either provide a url or override this function to return a url"
)
def test_create_form_invalid(self):
url = reverse("vehicle_create")
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
form = response.context_data["form"]
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {"type": ["This field is required."], "is_used": ["This field is required."]})
self.assertHTMLEqual(response.content.decode(), form.as_p())
class TestUpdateView(TestCase):
def test_update(self):
owner = Owner(first_name="Radnall", last_name="Munroe")
db.add(owner)
db.flush()
url = reverse("owner_update", kwargs={"id": owner.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context_data["view"], edit.UpdateView)
self.assertIsInstance(response.context_data["form"], forms.ModelForm)
form = response.context_data["form"]
view = response.context_data["view"]
self.assertHTMLEqual(response.content.decode(), form.as_p())
self.assertEqual(view.object, owner)
response = self.client.post(url, {"first_name": "Patrick", "last_name": "Bateman"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse("owners_list"))
self.assertEqual(owner.first_name, "Patrick")
self.assertEqual(owner.last_name, "Bateman")
class TestDeleteView(TestCase):
def test_delete(self):
owner = Owner(first_name="Radnall", last_name="Munroe")
db.add(owner)
db.flush()
url = reverse("owner_delete", kwargs={"id": owner.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context_data["view"], edit.DeleteView)
self.assertEqual(response.context_data["object"], owner)
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse("owners_list"))
state = db.inspect(owner)
self.assertTrue(state.deleted)
def test_delete_get_success_url(self):
view = edit.DeletionMixin()
with self.assertRaises(ImproperlyConfigured):
view.get_success_url()
|
452255
|
from gym import ObservationWrapper
class TransformObservation(ObservationWrapper):
r"""Transform the observation via an arbitrary function.
Example::
>>> import gym
>>> env = gym.make('CartPole-v1')
>>> env = TransformObservation(env, lambda obs: obs + 0.1*np.random.randn(*obs.shape))
>>> env.reset()
array([-0.08319338, 0.04635121, -0.07394746, 0.20877492])
Args:
env (Env): environment
f (callable): a function that transforms the observation
"""
def __init__(self, env, f):
super(TransformObservation, self).__init__(env)
assert callable(f)
self.f = f
def observation(self, observation):
return self.f(observation)
|
452274
|
from bs4 import BeautifulSoup as bsoup
import pandas as pd
import numpy as np
import humanfriendly
# Read in email data file
df = pd.read_csv('../bodytext.csv', header = 0)
# Filter out sent mail
emails = df.query('FromEmail != "[my email address]"').copy()
def wordCount(row):
if(row['Format'] == 'Html'):
return htmlWordCount(row['Body'])
return textWordCount(row['Body'])
def textWordCount(text):
if not(isinstance(text, str)):
return 0
return len(text.split(None))
def htmlWordCount(text):
if not(isinstance(text, str)):
return 0
soup = bsoup(text, 'html.parser')
if soup is None:
return 0
stripped = soup.get_text(" ", strip=True)
[s.extract() for s in soup(['style', 'script', 'head', 'title'])]
stripped = soup.get_text(" ", strip=True)
return textWordCount(stripped)
averageWordsPerMinute = 350
# Count the words in each message body
emails['WordCount'] = emails.apply(wordCount, axis=1)
emails['MinutesToRead'] = emails['WordCount'] / averageWordsPerMinute
# Get total number of minutes required to read all these emails
totalMinutes = emails['MinutesToRead'].sum()
# And convert that to a more human-readable timespan
timeToRead = humanfriendly.format_timespan(totalMinutes * 60)
|
452279
|
from __future__ import division, generators, print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import macarico.util as util
from macarico.util import Var, Varng
import macarico
def sample_from(l):
r = sum((p for _, p in l)) * np.random.random()
last_chance = None
for k, p in l:
if last_chance is None: last_chance = k
r -= p
if r <= 0: return k
return last_chance
class MDPExample(object):
def __init__(self, initial, transitions, costs, T):
self.states = set([s for s, _ in initial])
self.n_actions = 0
for _, actions in transitions.items():
for a, subsequent in actions.items():
self.n_actions = max(a, self.n_actions)
for s1, p in subsequent:
if p > 0:
self.states.add(s1)
self.n_actions += 1
self.initial = initial
self.transitions = transitions
self.costs = costs
self._T = T
class MDP(macarico.Env):
def __init__(self, example):
macarico.Env.__init__(self, example.n_actions, example._T, example)
self.s0 = sample_from(self.example.initial)
self.example.cost = 0
def _run_episode(self, policy):
cost = 0
self.s = self.s0
for _ in range(self.horizon()):
self.actions = self.example.transitions[self.s].keys()
a = policy(self)
s1 = sample_from(self.example.transitions[self.s][a])
cost += self.example.costs(self.s, a, s1)
self.s = s1
self.example.cost = cost
return self._trajectory
def _rewind(self):
pass
class MDPLoss(macarico.Loss):
def __init__(self):
super(MDPLoss, self).__init__('cost')
def evaluate(self, example):
return example.cost
class DeterministicReference(macarico.Reference):
def __init__(self, pi_ref):
self.pi_ref = pi_ref
def __call__(self, state):
return self.pi_ref(state.s)
def set_min_costs_to_go(self, state, costs):
a_star = self.pi_ref(state)
costs.zero_()
costs += 1
costs[a_star] = 0
class MDPFeatures(macarico.DynamicFeatures):
def __init__(self, n_states, noise_rate=0):
macarico.DynamicFeatures.__init__(self, n_states)
self.n_states = n_states
self.noise_rate = noise_rate
self._t = nn.Linear(1,1,bias=False)
def _forward(self, state):
f = util.zeros(self._t.weight, 1, 1, self.n_states)
if np.random.random() > self.noise_rate:
f[0, 0, state.s] = 1
return Varng(f)
def __call__(self, state): return self.forward(state)
|
452285
|
import sys
import os
import time
from robot.libraries.BuiltIn import BuiltIn
from robot.output.logger import LOGGER
class runKeywordAsync:
def __init__(self):
self._thread_pool = {}
self._last_thread_handle = 1
#self._robot_log_level = BuiltIn().get_variable_value("${LOG_LEVEL}")
def run_method_async(self, keyword, *args, **kwargs):
#BuiltIn().set_log_level("NONE")
handle = self._last_thread_handle
thread = self._threaded_method(keyword, *args, **kwargs)
thread.start()
self._thread_pool[handle] = thread
self._last_thread_handle += 1
return handle
def run_keyword_async(self, keyword, *args):
#BuiltIn().set_log_level("NONE")
handle = self._last_thread_handle
thread = self._threaded(keyword, *args)
thread.start()
self._thread_pool[handle] = thread
self._last_thread_handle += 1
return handle
def wait_async_all(self, timeout=60):
timeout = int(timeout)
results = []
for thread in self._thread_pool:
try:
result = self._thread_pool[thread].result_queue.get(True, timeout)
results.append(result)
except:
#BuiltIn().set_log_level(self._robot_log_level)
for thread in self._thread_pool:
self._thread_pool[thread].terminate()
raise Exception("Process " + str(thread) + " Failed")
#BuiltIn().set_log_level(self._robot_log_level)
self._thread_pool = {}
self._last_thread_handle = 1
return results
def get_async_return(self, handle, timeout=60):
timeout = int(timeout)
if handle in self._thread_pool:
try:
result = self._thread_pool[handle].result_queue.get(True, timeout)
del self._thread_pool[handle]
BuiltIn().set_log_level(self._robot_log_level)
return result
except:
raise Exception("Process " + str(handle) + " Failed")
else:
raise Exception("Passed Process id " + str(handle) + " is not a valid id")
def _threaded_method(self, keyword, *args, **kwargs):
from multiprocessing import Queue
from multiprocessing import Process
def wrapped_f(q, *args, **kwargs):
''' Calls the decorated function and puts the result in a queue '''
ret = BuiltIn().call_method(keyword, *args, **kwargs)
q.put(ret)
q = Queue()
th = Process(target=wrapped_f, args=(q,)+args, kwargs=kwargs)
th.result_queue = q
return th
def _threaded(self, keyword, *args):
from multiprocessing import Queue
from multiprocessing import Process
def wrapped_f(q, *args):
''' Calls the decorated function and puts the result in a queue '''
LOGGER.unregister_xml_logger()
ret = BuiltIn().run_keyword(keyword, *args)
q.put(ret)
q = Queue()
th = Process(target=wrapped_f, args=(q,)+args)
th.result_queue = q
return th
|
452297
|
import asyncio
import sys
import os
import time
import importlib
import logging
import traceback
import sqlite3
import sys
from pathlib import Path
from pprint import pprint
from matrixroom import MatrixRoom
from plugin import Plugin
import nio
DEFAULT_BOTNAME = "Matrix Bot"
DEFAULT_PLUGINPATH = [ "./plugins" ]
DEFAULT_DEVICEID = "MATRIXBOT"
DEFAULT_DBPATH = "./matrixbot.sqlite"
DEFAULT_BIND_ADDRESS = "localhost"
DEFAULT_BIND_PORT = "8080"
DEFAULT_GLOBAL_PLUGINPATH = "./global_plugins"
class MatrixBot:
def __init__(self, config):
self.config = config
if not 'BotMatrixId' in config \
or not all(key in config['BotMatrixId']
for key in ['USERNAME','PASSWORD','SERVER']):
sys.stderr.write("""Bad config file. Please check that config file exists and all fields are available\n""")
sys.exit(-1)
botc = config["BotMatrixId"]
store_path = botc.get("STOREPATH", "")
if not store_path:
store_path = Path(os.getcwd()) / "store"
else:
store_path = Path(store_path)
if not store_path.is_dir():
logging.info(f"Creating store directory in {store_path}")
try:
os.mkdir(store_path)
except Exception as e:
logging.error("Failed to create store path. Check permissions.")
print(e)
sys.exit(-1)
logging.info(f"Store path: {store_path}")
self.client = nio.AsyncClient(botc["SERVER"], botc["USERNAME"], device_id=botc.get("DEVICEID", DEFAULT_DEVICEID), store_path=str(store_path))
self.password = botc["PASSWORD"]
self.botname = botc.get("BOTNAME", DEFAULT_BOTNAME)
self.dbpath = botc.get("DBPATH", DEFAULT_DBPATH)
self.load_db(self.dbpath)
self.pluginpath = [p.strip() for p in botc.get("PLUGINPATH", DEFAULT_PLUGINPATH).split(";")]
self.environment = dict((k.upper(),v) for k,v in dict(botc).items()
if k.lower() != 'password')
self.last_sync_time = 0
self.active_rooms = set()
self.available_plugins = {}
# order of global_plugins is important as they may depend on each other
# also the non-global plugins may depend on them
# thus we map by index between names and plugins and do not use a dict()
self.global_pluginpath = botc.get("GLOBAL_PLUGINPATH", DEFAULT_GLOBAL_PLUGINPATH)
self.global_plugin_names = [p.strip() for p in botc.get("GLOBAL_PLUGINS", "").split(";")]
self.global_plugins = [None] * len(self.global_plugin_names)
# this is a small hack to add the plugins to the import search path
for path in self.pluginpath:
sys.path.append(path)
sys.path.append(self.global_pluginpath)
def get_global_plugin_object(self, name):
i = self.global_plugin_names.index(name)
return self.global_plugins[i].Object
async def start_global_plugins(self):
for i in range(len(self.global_plugin_names)):
# it's the plugin's job to set up that this works
await self.global_plugins[i].Object.set_bot(self)
await self.global_plugins[i].Object.start()
async def login(self):
import socket
hname = socket.gethostname()
response = await self.client.login(self.password, device_name=hname)
if type(response) == nio.LoginError:
logging.error("""There was an error while logging in. Please check
credentials""")
sys.exit(-1)
k = await self.client.sync() # otherwise all past messages will be handled
self.last_sync_time = time.time()
if self.client.should_upload_keys:
await self.client.keys_upload()
cur_displayname = (await self.client.get_displayname()).displayname
logging.info(f"Current displayname: {cur_displayname}")
if cur_displayname != self.botname:
logging.info(f"Changing displayname to {self.botname}")
await self.client.set_displayname(self.botname)
async def __aenter__(self):
await self.login()
return self
async def __aexit__(self, exc_type, exc_value, exc_tb):
await self.client.close()
def load_db(self, dbname):
self.conn = sqlite3.connect(dbname)
c = self.conn.cursor()
tables = c.execute("""
SELECT name
FROM sqlite_master
WHERE type ='table' AND name NOT LIKE 'sqlite_%';
""").fetchall()
# attention here, meaning of "plugin_data" has changed
# room_data: global room data
# plugin_data: global plugin data
# room_plugin_data: data local to a plugin x room combination
# room_plugins: which plugins are loaded in which room
if not all((t,) in tables for t in ["rooms", "plugins", "room_plugins", "room_data", "plugin_data", "room_plugin_data"]):
c.execute("""
CREATE TABLE rooms (
roomid VARCHAR PRIMARY KEY
);
""")
c.execute("""
CREATE TABLE plugins (
pluginname VARCHAR PRIMARY KEY
);
""")
c.execute("""
CREATE TABLE room_plugins (
roomid VARCHAR,
pluginname VARCHAR,
PRIMARY KEY (roomid, pluginname)
);
""")
c.execute("""
CREATE TABLE room_data (
roomid VARCHAR,
key VARCHAR,
value TEXT,
PRIMARY KEY (roomid, key)
);
""")
c.execute("""
CREATE TABLE plugin_data (
pluginname VARCHAR,
key VARCHAR,
value TEXT,
PRIMARY KEY (pluginname, key)
);
""")
c.execute("""
CREATE TABLE room_plugin_data (
roomid VARCHAR,
pluginname VARCHAR,
key VARCHAR,
value TEXT,
PRIMARY KEY (roomid, pluginname, key)
);
""")
async def load_rooms(self):
joined_rooms = self.client.rooms
cursor = self.conn.cursor()
res = cursor.execute("""
SELECT *
FROM rooms;
""")
dbrooms = res.fetchall()
for rid,nio_room in joined_rooms.items():
if (rid,) in dbrooms:
mr = MatrixRoom(
matrixbot=self,
nio_room=nio_room,
)
await mr.load_plugins()
self.active_rooms.add(mr)
async def read_plugins(self):
plugin_paths = [Path(path) for path in self.pluginpath]
logging.info("Reading available plugins from: {}".format(plugin_paths))
help_module = None
for i in range(len(self.global_plugin_names)):
modname = self.global_plugin_names[i]
filename = Path(self.global_pluginpath) / f"{modname}.py"
if filename.exists():
modname = f'plugins.{modname}'
loader = importlib.machinery.SourceFileLoader(modname, str(filename))
try:
module = loader.load_module(modname)
self.global_plugins[i] = module
except Exception as e:
logging.warning(e)
# plugins must be called ...plugin.py, so other modules in the same
# directory are not falsely loaded (allows for plugin decomposition)
for plugin_path in plugin_paths:
for path in plugin_path.glob("*_plugin.py"):
if path.exists():
modname = f'plugins.{path.stem}'
loader = importlib.machinery.SourceFileLoader(modname, str(path))
try:
module = loader.load_module(modname)
pluginname = path.stem.replace("_plugin","")
self.available_plugins[pluginname] = module.HELP_DESC
except Exception as e:
logging.warning(e)
await self.enter_plugins_to_db()
async def enter_plugins_to_db(self):
# we now check, if all loaded plugins have an entry in the database
# if not, we add it
# TODO: - do we want to remove database entries when a plugin disappears?
# problem: development of plugin with errors -> deletion?!?! not wanted!
# - How do we guarantee the uniqueness of filenames among directories?
cursor = self.conn.cursor()
res = cursor.execute("""
SELECT *
FROM plugins;
""")
dbplugins = res.fetchall()
for ap in list(self.available_plugins.keys()) + self.global_plugin_names:
if (ap,) not in dbplugins:
# add plugin to db
self.conn.execute("""
INSERT INTO plugins (pluginname) VALUES (?);
""", (ap,))
self.conn.commit()
async def listen(self):
async def handle_invite_event(room, event):
try:
jrooms = await self.client.joined_rooms()
jrooms = jrooms.rooms
except:
logging.warning(f"Not joining room {room.room_id}")
return
if room.room_id not in jrooms:
logging.info(f"Try joining room {room.room_id}")
await asyncio.sleep(0.5)
response = await self.client.join(room.room_id)
await asyncio.sleep(0.5)
if type(response) == nio.responses.JoinResponse:
self.active_rooms.add(await MatrixRoom.new(self,room))
else:
logging.warning(f"Couldn't joing the room: {response}")
else:
logging.warning(f"Not joining room {room.room_id}")
logging.warning(f"Already joined.")
async def handle_text_event(room, event):
# we ignore messages older than 5secs before last sync to solve
# joining new room and interpreting old messages problem
logging.debug(str(event))
if (self.last_sync_time-5)*1000 > event.server_timestamp:
logging.debug("Ignoring old event")
return
if event.sender == self.client.user:
logging.debug("Ignoring own message")
return
matching_rooms = [mroom for mroom in self.active_rooms if
mroom.room_id == room.room_id]
if matching_rooms:
try:
await matching_rooms[0].handle_text_event(event)
except Exception as e:
traceback.print_exc()
logging.warning(e)
try:
k = traceback.format_exc()
if "ADMIN" in self.environment:
admin = self.environment['ADMIN']
k += f"\nPlease contact {admin} for bug fixing"
else:
k += "\nPlease contact the plugin creator"
self.nio_room = room
await Plugin.send_text(self, k)
except Exception as e:
traceback.print_exc()
logging.warning(e)
else:
logging.info("Ignoring text event in non-active room")
async def event_cb(room, *args):
event = args[0]
logging.debug(80 * "=")
#pprint(vars(event))
if room.room_id in self.client.rooms:
logging.debug(f"{type(event)} in room {self.client.rooms[room.room_id].display_name})")
else:
logging.debug(type(event), "in room", room.room_id)
if type(event) == nio.events.invite_events.InviteMemberEvent:
await handle_invite_event(room, event)
elif type(event) == nio.events.room_events.RoomMessageText:
await handle_text_event(room, event)
elif type(event) == nio.events.room_events.RoomMemberEvent:
name = event.source.get("sender")
logging.info(f"{name} joined room")
elif type(event) == nio.MegolmEvent:
logging.debug("account shared:", self.client.olm_account_shared)
logging.warning("Unable to decrypt event")
print(f"Event session ID {event.session_id}")
r = nio.crypto.OutgoingKeyRequest(event.session_id, None, None, None)
self.client.store.remove_outgoing_key_request(r)
if (event.session_id in self.client.olm.outgoing_key_requests.keys()):
del self.client.olm.outgoing_key_requests[event.session_id]
res = await self.client.request_room_key(event) # should do updating by itself
#event_cb(room, event)
else:
logging.debug("Ignoring unknown type event")
async def response_cb(response):
logging.debug("Got response")
logging.debug(type(response))
self.last_sync_time = time.time()
logging.debug("Ignoring response")
async def todevice_cb(request):
logging.debug(80 * "=")
logging.debug("Got to device request")
logging.debug(type(request))
logging.debug("Ignoring to device request")
async def ephemeral_cb(arg1, arg2):
logging.debug(80 * "=")
logging.debug("Got ephemeral dings")
logging.debug(f"{type(arg1)}, {type(arg2)}")
logging.debug("Ignoring ephemeral dings")
async def kick_response_cb(response):
logging.info("Getting kicked")
logging.info(f"{self.botname} lauert nun.")
self.client.add_event_callback(event_cb, nio.Event)
self.client.add_event_callback(event_cb, nio.InviteMemberEvent)
self.client.add_to_device_callback(todevice_cb, nio.events.to_device.ToDeviceEvent)
self.client.add_ephemeral_callback(ephemeral_cb, nio.events.ephemeral.EphemeralEvent)
self.client.add_response_callback(response_cb, nio.Response)
self.client.add_response_callback(kick_response_cb, nio.RoomKickResponse) # DOESNT WORK
if False:
for room_id in self.active_rooms:
await self.introduce_bot(room_id)
await self.client.sync_forever(30000)
async def start(self):
await self.read_plugins()
await self.start_global_plugins()
await self.load_rooms()
await self.listen()
|
452318
|
import codecs, csv
def readCSV (fname, delimiter="\t"):
#f = codecs.open(fname, 'r')
#lines = f.readlines()
#f.close()
header = {}
table = []
with codecs.open(fname, 'r') as csvfile:
lines = csv.reader(csvfile, delimiter=delimiter, quotechar='"')
l = 0
for fields in lines:
if l == 0:
for c in range(len(fields)):
header[fields[c]] = c
else:
output_row = []
for field in fields:
# for future: figure out how to detect presence of quotes in orig file
if '.' in field:
try:
output_row.append(float(field))
except:
output_row.append(field)
else:
try:
output_row.append(int(field))
except:
output_row.append(field)
table.append(output_row)
l += 1
return header, table
def writeCSV (fname, header, table, delimiter="\t"):
text = ["\t".join(header) + "\n"]
for row in table:
output_row = ""
for field in row:
if isinstance(field, str):
output_row += '"' + field + '"' + "\t"
else:
output_row += str(field) + "\t"
output_row = output_row[:-1] + "\n"
text.append(output_row)
f = codecs.open(fname, 'w')
f.writelines(text)
f.close()
|
452329
|
from unittest.mock import MagicMock, patch
import numpy as np
import torch
def _print_success_message():
print("Tests Passed")
class AssertTest(object):
def __init__(self, params):
self.assert_param_message = "\n".join(
[str(k) + ": " + str(v) + "" for k, v in params.items()]
)
def test(self, assert_condition, assert_message):
assert assert_condition, (
assert_message
+ "\n\nUnit Test Function Parameters\n"
+ self.assert_param_message
)
def test_discriminator(Discriminator):
batch_size = 50
conv_dim = 10
D = Discriminator(conv_dim)
# create random image input
x = torch.from_numpy(
np.random.randint(1, size=(batch_size, 3, 32, 32)) * 2 - 1
).float()
train_on_gpu = torch.cuda.is_available()
if train_on_gpu:
x.cuda()
output = D(x)
assert_test = AssertTest(
{"Conv Dim": conv_dim, "Batch Size": batch_size, "Input": x}
)
correct_output_size = (batch_size, 1)
assert_condition = output.size() == correct_output_size
assert_message = "Wrong output size. Expected type {}. Got type {}".format(
correct_output_size, output.size()
)
assert_test.test(assert_condition, assert_message)
_print_success_message()
def test_generator(Generator):
batch_size = 50
z_size = 25
conv_dim = 10
G = Generator(z_size, conv_dim)
# create random input
z = np.random.uniform(-1, 1, size=(batch_size, z_size))
z = torch.from_numpy(z).float()
train_on_gpu = torch.cuda.is_available()
if train_on_gpu:
z.cuda()
# b = torch.LongTensor(a)
# nn_input = torch.autograd.Variable(b)
output = G(z)
assert_test = AssertTest(
{"Z size": z_size, "Conv Dim": conv_dim, "Batch Size": batch_size, "Input": z}
)
correct_output_size = (batch_size, 3, 32, 32)
assert_condition = output.size() == correct_output_size
assert_message = "Wrong output size. Expected type {}. Got type {}".format(
correct_output_size, output.size()
)
assert_test.test(assert_condition, assert_message)
_print_success_message()
|
452352
|
from puq import *
def run():
x = UniformParameter('x', 'x', min=0, max=1)
y = UniformParameter('y', 'y', min=0, max=1)
host = InteractiveHost()
prog = TestProgram('./dome_prog.py')
uq = MonteCarlo([x,y], 500)
return Sweep(uq, host, prog)
|
452354
|
class Minus:
reserved = {}
tokens = ('MINUS',)
# Tokens
t_MINUS = r'-'
precedence = (
('left', 'MINUS'),
)
|
452382
|
from __future__ import print_function, division
import os
import sys
import subprocess
if __name__=="__main__":
dir_path = sys.argv[1]
dst_dir_path = sys.argv[2]
for file_name in os.listdir(dir_path):
if '.mp4' not in file_name:
continue
name, ext = os.path.splitext(file_name)
dst_directory_path = os.path.join(dst_dir_path, name)
video_file_path = os.path.join(dir_path, file_name)
p = subprocess.Popen('ffprobe {}'.format(video_file_path),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, res = p.communicate()
res = res.decode('utf-8')
duration_index = res.find('Duration:')
duration_str = res[(duration_index + 10):(duration_index + 21)]
hour = float(duration_str[0:2])
minute = float(duration_str[3:5])
sec = float(duration_str[6:10])
total_sec = hour * 3600 + minute * 60 + sec
n_frames = len(os.listdir(dst_directory_path))
if os.path.exists(os.path.join(dst_directory_path, 'fps')):
n_frames -= 1
fps = round(n_frames / total_sec, 2)
print(video_file_path, os.path.exists(video_file_path), fps)
with open(os.path.join(dst_directory_path, 'fps'), 'w') as fps_file:
fps_file.write('{}\n'.format(fps))
|
452385
|
from __future__ import absolute_import, division, unicode_literals
import uuid
import xmltodict
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet.task import Clock
from mimic.core import MimicCore
from mimic.resource import MimicRoot
from mimic.test.helpers import request_with_content
from mimic.canned_responses.noit_metrics_fixture import metrics
class NoitAPITests(SynchronousTestCase):
"""
Tests for noit API plugin
"""
def setUp(self):
"""
Create a check
"""
core = MimicCore(Clock(), [])
self.root = MimicRoot(core).app.resource()
self.create_check = {
"check": {
"attributes": {
"name": "name",
"module": "module",
"target": "target",
"period": "period",
"timeout": "timeout",
"filterset": "filterset"
}
}}
self.create_check_xml_payload = xmltodict.unparse(self.create_check
).encode("utf-8")
self.check_id = uuid.uuid4()
url = "noit/checks/set/{0}".format(self.check_id)
(self.response, response_body) = self.successResultOf(
request_with_content(self, self.root, b"PUT", url,
body=self.create_check_xml_payload))
self.create_json_response = xmltodict.parse(response_body)
def test_get_all_checks(self):
"""
Test to verify :func:`get_all_checks` on ``GET /config/checks``
"""
(response, body) = self.successResultOf(
request_with_content(self, self.root, b"GET", "noit/config/checks"))
self.assertEqual(response.code, 200)
json_response = xmltodict.parse(body)
self.assertTrue(len(json_response["checks"]["check"]) > 0)
def test_test_check(self):
"""
Test to verify :func:`test_check` on ``POST /checks/test``
"""
(response, body) = self.successResultOf(
request_with_content(self, self.root, b"POST", "noit/checks/test",
body=self.create_check_xml_payload))
json_response = xmltodict.parse(body)
self.assertEqual(response.code, 200)
self.assertTrue(json_response["check"]["state"]["metrics"])
def test_get_version(self):
"""
Test to verify :func:`test_check` on ``POST /checks/test``.
When the check module is selfcheck, :func:`test_check` should return
the version of the Noit instance
"""
self.create_check["check"]["attributes"]["module"] = 'selfcheck'
(response, body) = self.successResultOf(
request_with_content(self, self.root, b"POST", "noit/checks/test",
body=xmltodict.unparse(self.create_check).encode('utf-8')))
json_response = xmltodict.parse(body)
self.assertEqual(response.code, 200)
self.assertEqual(
json_response["check"]["state"]["metrics"][1]["metric"][0]["@name"], "version")
def test_create_check(self):
"""
Test to verify :func:`set_check` on ``PUT /checks/set/<check_id>``
"""
self.assertEqual(self.response.code, 200)
self.assertEqual(self.create_check["check"]["attributes"],
self.create_json_response["check"]["attributes"])
def test_update_check(self):
"""
Test to verify update check on :func:`set_check` using
``PUT /checks/set/<check_id>``
"""
self.create_check["check"]["attributes"]["name"] = "rename"
(response, body) = self.successResultOf(
request_with_content(self, self.root, b"PUT",
"noit/checks/set/{0}".format(self.check_id),
body=xmltodict.unparse(self.create_check
).encode("utf-8")))
json_response = xmltodict.parse(body)
self.assertEqual(self.response.code, 200)
self.assertEqual(json_response["check"]["attributes"]["name"],
"rename")
def test_get_check(self):
"""
Test to verify :func:`get_checks` on ``GET /checks/show/<check_id>``
"""
(get_response, body) = self.successResultOf(
request_with_content(self, self.root, b"GET",
"noit/checks/show/{0}".format(self.check_id)))
json_response = xmltodict.parse(body)
self.assertEqual(get_response.code, 200)
self.assertEqual(self.create_check["check"]["attributes"],
json_response["check"]["attributes"])
def test_delete_check(self):
"""
Test to verify :func:`delete_checks` on
``DELETE /checks/delete/<check_id>``
"""
(del_response, body) = self.successResultOf(
request_with_content(self, self.root, b"DELETE",
"noit/checks/delete/{0}".format(self.check_id)))
self.assertEqual(del_response.code, 200)
def test_delete_not_existant_check(self):
"""
Test to verify :func:`delete_checks` on ``DELETE /checks/delete/<check_id>``
when the check_id was never created.
"""
(del_response, body) = self.successResultOf(
request_with_content(self, self.root, b"DELETE",
"noit/checks/delete/1234556"))
self.assertEqual(del_response.code, 404)
def test_create_check_fails_with_500(self):
"""
Test to verify :func:`set_check` results in error 500,
when the xml cannot be parsed.
"""
(response, body) = self.successResultOf(
request_with_content(self, self.root, b"PUT",
"noit/checks/set/{0}".format(self.check_id),
body=self.create_check_xml_payload.replace(
b'</check>', b' abc')))
self.assertEqual(response.code, 500)
def test_create_check_fails_with_500_for_invalid_check_id(self):
"""
Test to verify :func:`set_check` results in error 500,
when the xml cannot be parsed.
"""
(response, body) = self.successResultOf(
request_with_content(self, self.root, b"PUT",
"noit/checks/set/123444",
body=self.create_check_xml_payload))
self.assertEqual(response.code, 500)
def test_create_check_fails_with_404_for_invalid_check_payload(self):
"""
Test to verify :func:`set_check` results in error 404,
when the request check body is invalid.
"""
del self.create_check["check"]["attributes"]["target"]
invalid_check_xml_payload = xmltodict.unparse(self.create_check
).encode("utf-8")
(response, body) = self.successResultOf(
request_with_content(self, self.root, b"PUT",
"noit/checks/set/{0}".format(self.check_id),
body=invalid_check_xml_payload))
self.assertEqual(response.code, 404)
def test_test_check_fails_with_404_for_invalid_check_payload(self):
"""
Test to verify :func:`test_check` results in error 404,
when the request check body is invalid.
"""
del self.create_check["check"]["attributes"]["target"]
invalid_check_xml_payload = xmltodict.unparse(self.create_check
).encode("utf-8")
(response, body) = self.successResultOf(
request_with_content(self, self.root, b"POST",
"noit/checks/test".format(self.check_id),
body=invalid_check_xml_payload))
self.assertEqual(response.code, 404)
def test_test_check_for_given_module(self):
"""
Test to verify :func:`test_check` results in response containing the metrics
for the given module.
"""
self.create_check["check"]["attributes"]["module"] = "selfcheck"
check_xml_payload = xmltodict.unparse(self.create_check
).encode("utf-8")
(response, body) = self.successResultOf(
request_with_content(self, self.root, b"POST",
"noit/checks/test".format(self.check_id),
body=check_xml_payload))
json_response = xmltodict.parse(body)
self.assertEqual(response.code, 200)
self.assertEqual(json_response["check"]["state"]["metrics"][
1]["metric"], metrics["selfcheck"]["metric"])
|
452391
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from ImitationLearning.backbone import EfficientNet
from ImitationLearning.backbone import MemoryEfficientSwish
from ImitationLearning.VisualAttention.network.Lambda import λLayer
""" Convolutional Neuronal Network - 5 layers
-----------------------------------------
Ref: <NAME>., & <NAME>. (2017). "Interpretable learning for self-driving
cars by visualizing causal attention". In Proceedings of the IEEE
international conference on computer vision (pp. 2942-2950).
* Input: img [batch,h,w]
* Output: xt [batch,L,D]
"""
class CNN5(nn.Module):
""" Constructor """
def __init__(self,):
super(CNN5, self).__init__()
# Layers
self.conv1 = nn.Conv2d( 3, 24, kernel_size=5, stride=2, bias=False)
self.conv2 = nn.Conv2d(24, 36, kernel_size=5, stride=2, bias=False)
self.conv3 = nn.Conv2d(36, 48, kernel_size=3, stride=2, bias=False)
self.conv4 = nn.Conv2d(48, 64, kernel_size=3, stride=1, bias=False)
self.conv5 = nn.Conv2d(64, 64, kernel_size=3, stride=1, bias=False)
self.batchN1 = nn.BatchNorm2d(24)
self.batchN2 = nn.BatchNorm2d(36)
self.batchN3 = nn.BatchNorm2d(48)
self.batchN4 = nn.BatchNorm2d(64)
self.batchN5 = nn.BatchNorm2d(64)
self.ReLU = nn.ReLU()
# Initialize
torch.nn.init.xavier_uniform_(self.conv1.weight)
torch.nn.init.xavier_uniform_(self.conv2.weight)
torch.nn.init.xavier_uniform_(self.conv3.weight)
torch.nn.init.xavier_uniform_(self.conv4.weight)
torch.nn.init.xavier_uniform_(self.conv5.weight)
self.batchN1.reset_parameters()
self.batchN2.reset_parameters()
self.batchN3.reset_parameters()
self.batchN4.reset_parameters()
self.batchN5.reset_parameters()
def cube(self,in_size=(92,196)):
x = int( (in_size[0]/4-13)/2 )
y = int( (in_size[1]/4-13)/2 )
return( x,y,64 )
""" Forward """
def forward(self,x):
# Layer 1
x = self. conv1(x)
x = self.batchN1(x)
x = self. ReLU(x)
# Layer 2
x = self. conv2(x)
x = self.batchN2(x)
x = self. ReLU(x)
# Layer 3
x = self. conv3(x)
x = self.batchN3(x)
x = self. ReLU(x)
# Layer 4
x = self. conv4(x)
x = self.batchN4(x)
x = self. ReLU(x)
# Layer 5
x = self. conv5(x)
x = self.batchN5(x)
x = self. ReLU(x) # [batch,D,h,w]
x = x.flatten(start_dim=2, end_dim=3) # [batch,D,L]
x = x.transpose(1, 2) # [batch,L,D]
return x
""" Convolutional Neuronal Network - 5 layers
-----------------------------------------
* Input: img [batch,H,W]
* Output: xt [batch,D,h,w]
"""
class CNN5Max(nn.Module):
""" Constructor """
def __init__(self,):
super(CNN5Max, self).__init__()
# Layers
self.conv1 = nn.Conv2d( 3, 24, kernel_size=5, stride=2, bias=False)
self.conv2 = nn.Conv2d(24, 36, kernel_size=5, stride=2, bias=False)
self.conv3 = nn.Conv2d(36, 48, kernel_size=3, stride=2, bias=False)
self.conv4 = nn.Conv2d(48, 64, kernel_size=3, stride=1, bias=False)
self.conv5 = nn.Conv2d(64, 64, kernel_size=3, stride=1, bias=False)
self.batchN1 = nn.BatchNorm2d(24)
self.batchN2 = nn.BatchNorm2d(36)
self.batchN3 = nn.BatchNorm2d(48)
self.batchN4 = nn.BatchNorm2d(64)
self.batchN5 = nn.BatchNorm2d(64)
self.ReLU = nn.ReLU()
# Initialize
torch.nn.init.xavier_uniform_(self.conv1.weight)
torch.nn.init.xavier_uniform_(self.conv2.weight)
torch.nn.init.xavier_uniform_(self.conv3.weight)
torch.nn.init.xavier_uniform_(self.conv4.weight)
torch.nn.init.xavier_uniform_(self.conv5.weight)
self.batchN1.reset_parameters()
self.batchN2.reset_parameters()
self.batchN3.reset_parameters()
self.batchN4.reset_parameters()
self.batchN5.reset_parameters()
def cube(self,in_size=(92,196)):
x = int( (in_size[0]/4-13)/2 )
y = int( (in_size[1]/4-13)/2 )
return( x,y,64 )
""" Forward """
def forward(self,x):
# Layer 1
x = self. conv1(x)
x = self.batchN1(x)
x = self. ReLU(x)
# Layer 2
x = self. conv2(x)
x = self.batchN2(x)
x = self. ReLU(x)
# Layer 3
x = self. conv3(x)
x = self.batchN3(x)
x = self. ReLU(x)
# Layer 4
x = self. conv4(x)
x = self.batchN4(x)
x = self. ReLU(x)
# Layer 5
x = self. conv5(x)
x = self.batchN5(x)
x = self. ReLU(x) # [batch,D,h,w]
return x
""" ResNet 34
---------
Ref: <NAME>., <NAME>., <NAME>., & <NAME>. (2016). "Deep residual learning
for image recognition". In Proceedings of the IEEE conference on
computer vision and pattern recognition (pp. 770-778).
* Input: img [batch,H,W]
* Output: xt [batch,L,D]
"""
class ResNet34(nn.Module):
""" Constructor """
def __init__(self,):
super(ResNet34, self).__init__()
# Layers
self.model = models.resnet34(pretrained=False)
self.model = torch.nn.Sequential(*(list(self.model.children())[:-3]))
self.convHead = nn.Conv2d(256,128, kernel_size=1, bias=False)
self.bn = nn.BatchNorm2d(num_features=128, momentum=0.99, eps=1e-3)
self.swish = MemoryEfficientSwish()
# Initialization
torch.nn.init.xavier_uniform_(self.convHead.weight)
def cube(self,in_size=(96,192)):
x = int( in_size[0]/16 )
y = int( in_size[1]/16 )
return( x,y,128 )
""" Forward """
def forward(self,x):
x = self.model(x) # [batch,D,h,w]
x = self.convHead(x)
x = self.bn(x)
x = self.swish(x)
x = x.flatten(start_dim=2, end_dim=3) # [batch,D,L]
x = x.transpose(1, 2) # [batch,L,D]
return x
""" ResNet 50
---------
Ref: <NAME>., <NAME>., <NAME>., & <NAME>. (2016). "Deep residual learning
for image recognition". In Proceedings of the IEEE conference on
computer vision and pattern recognition (pp. 770-778).
* Input: img [batch,H,W]
* Output: xt [batch,L,D]
"""
class ResNet50(nn.Module):
""" Constructor """
def __init__(self,):
super(ResNet50, self).__init__()
# Layers
self.model = models.resnet50(pretrained=False)
self.model = torch.nn.Sequential(*(list(self.model.children())[:-3]))
self.convHead = nn.Conv2d(1024,128, kernel_size=1, bias=False)
self.bn = nn.BatchNorm2d(num_features=128, momentum=0.99, eps=1e-3)
self.swish = MemoryEfficientSwish()
# Initialization
torch.nn.init.xavier_uniform_(self.convHead.weight)
def cube(self,in_size=(96,192)):
x = int( in_size[0]/16 )
y = int( in_size[1]/16 )
return( x,y,128 )
""" Forward """
def forward(self,x):
x = self.model(x) # [batch,D,h,w]
x = self.convHead(x)
x = self.bn(x)
x = self.swish(x)
x = x.flatten(start_dim=2, end_dim=3) # [batch,D,L]
x = x.transpose(1, 2) # [batch,L,D]
return x
""" Wide residual network
---------------------
Ref: <NAME>., <NAME>., <NAME>., & <NAME>. (2016). Deep residual learning
for image recognition. In Proceedings of the IEEE conference on
computer vision and pattern recognition (pp. 770-778).
* Input: img [batch,H,W]
* Output: xt [batch,D,h,w]
"""
class WideResNet50(nn.Module):
""" Constructor """
def __init__(self,):
super(WideResNet50, self).__init__()
# Layers
self.model = models.wide_resnet50_2(pretrained=True)
self.model = torch.nn.Sequential(*(list(self.model.children())[:-4]))
self.linear1 = nn.Linear(512,256,bias=True)
self.linear2 = nn.Linear(256,128,bias=True)
self.linear3 = nn.Linear(128, 64,bias=True)
self.LeakyReLu = nn.LeakyReLU()
# Initialization
torch.nn.init.xavier_uniform_(self.linear1.weight)
torch.nn.init.xavier_uniform_(self.linear2.weight)
torch.nn.init.xavier_uniform_(self.linear3.weight)
def cube(self,in_size=(96,192)):
x = int( in_size[0]/8 )
y = int( in_size[1]/8 )
return( x,y,64 )
""" Forward """
def forward(self,x):
with torch.no_grad():
x = self.model(x) # [batch,D,h,w]
x = x.flatten(start_dim=2, end_dim=3) # [batch,D,L]
x = x.transpose(1, 2) # [batch,L,D]
x = self.linear1(x.detach())
x = self.LeakyReLu(x)
x = self.linear2(x)
x = self.LeakyReLu(x)
x = self.linear3(x)
x = self.LeakyReLu(x)
return x
class VGG19(nn.Module):
""" Constructor """
def __init__(self):
super(VGG19, self).__init__()
self.model = models.vgg19_bn(pretrained=True)
self.model = torch.nn.Sequential(*(list(self.model.children())[:-4]))
self.linear1 = nn.Linear(512,256,bias=True)
self.linear2 = nn.Linear(256,128,bias=True)
self.linear3 = nn.Linear(128, 64,bias=True)
self.LeakyReLu = nn.LeakyReLU()
# Initialization
torch.nn.init.xavier_uniform_(self.linear1.weight)
torch.nn.init.xavier_uniform_(self.linear2.weight)
torch.nn.init.xavier_uniform_(self.linear3.weight)
def cube(self,in_size=(96,192)):
x = int( in_size[0]/8 )
y = int( in_size[1]/8 )
return( x,y,64 )
""" Forward """
def forward(self,x):
with torch.no_grad():
x = self.model(x) # [batch,D,h,w]
x = x.flatten(start_dim=2, end_dim=3) # [batch,D,L]
x = x.transpose(1, 2) # [batch,L,D]
if self.compression>0:
x = self.linear1(x.detach())
x = self.LeakyReLu(x)
if self.compression>1:
x = self.linear2(x)
x = self.LeakyReLu(x)
if self.compression>2:
x = self.linear3(x)
x = self.LeakyReLu(x)
return x
""" EfficientNet
------------
Ref: <NAME>, <NAME> (2019). EfficientNet: Rethinking Model
Scaling for Convolutional Neural Networks. In International
Conference on Machine Learning (pp. 6105-6114).
* Input: img [batch,H,W]
* Output: xt [batch,D,h,w]
"""
class EfficientNetB0(nn.Module):
""" Constructor """
def __init__(self,):
super(EfficientNetB0, self).__init__()
# Layers
self.model = EfficientNet.from_name('efficientnet-b0')
# (88,200) -> (96,192)
def cube(self,in_size=(96,192)):
x = int( in_size[0]/16 )
y = int( in_size[1]/16 )
return( x,y,128 )
""" Forward """
def forward(self,x):
x = self.model(x) # [batch,D,h,w]
x = x.flatten(start_dim=2, end_dim=3) # [batch,D,L]
x = x.transpose(1, 2) # [batch,L,D]
return x
class EfficientNetB1(nn.Module):
""" Constructor """
def __init__(self,):
super(EfficientNetB1, self).__init__()
# Layers
self.model = EfficientNet.from_name('efficientnet-b1')
# (88,200) -> (96,192)
def cube(self,in_size=(96,192)):
x = int( in_size[0]/16 )
y = int( in_size[1]/16 )
return( x,y,128 )
""" Forward """
def forward(self,x):
x = self.model(x) # [batch,D,h,w]
x = x.flatten(start_dim=2, end_dim=3) # [batch,D,L]
x = x.transpose(1, 2) # [batch,L,D]
return x
class EfficientNetB2(nn.Module):
""" Constructor """
def __init__(self,):
super(EfficientNetB2, self).__init__()
# Layers
self.model = EfficientNet.from_name('efficientnet-b2')
# (88,200) -> (96,192)
def cube(self,in_size=(96,192)):
x = int( in_size[0]/16 )
y = int( in_size[1]/16 )
return( x,y,128 )
""" Forward """
def forward(self,x):
x = self.model(x) # [batch,D,h,w]
x = x.flatten(start_dim=2, end_dim=3) # [batch,D,L]
x = x.transpose(1, 2) # [batch,L,D]
return x
class EfficientNetB3(nn.Module):
""" Constructor """
def __init__(self,):
super(EfficientNetB3, self).__init__()
# Layers
self.model = EfficientNet.from_name('efficientnet-b3')
# (88,200) -> (96,192)
def cube(self,in_size=(96,192)):
x = int( in_size[0]/16 )
y = int( in_size[1]/16 )
return( x,y,128 )
""" Forward """
def forward(self,x):
x = self.model(x) # [batch,D,h,w]
x = x.flatten(start_dim=2, end_dim=3) # [batch,D,L]
x = x.transpose(1, 2) # [batch,L,D]
return x
""" Lambda Networks
---------------
Ref: Anonymous (2021). LambdaNetworks: Modeling long-range
Interactions without Attention. ICLR 2021 Conference
Blind Submission.
https://github.com/leaderj1001/LambdaNetworks/blob/main/model.py
"""
class λBottleneck(nn.Module):
expansion = 4
def __init__(self, d_in, dhdn, receptiveWindow, stride=1):
super(λBottleneck, self).__init__()
dout = dhdn*self.expansion
# inDim, hdnDim
self.in1x1conv = nn.Conv2d(d_in, dhdn, kernel_size=1, bias=False)
self.BNorm1 = nn.BatchNorm2d(dhdn)
self.bottleneck = nn.ModuleList([λLayer(dhdn,dhdn)])#dim_in = dhdn, dim_out = dhdn, n = receptiveWindow)])
if stride != 1 or d_in != dhdn:
self.bottleneck.append(nn.AvgPool2d(kernel_size=(3, 3), stride=stride, padding=(1, 1)))
self.bottleneck.append(nn.BatchNorm2d(dhdn))
self.bottleneck.append(nn.ReLU())
self.bottleneck = nn.Sequential(*self.bottleneck)
self.out_1x1conv = nn.Conv2d(dhdn, dout, kernel_size=1, bias=False)
self.BNorm2 = nn.BatchNorm2d(dout)
self.ReLU = nn.ReLU()
self.shortcut = nn.Sequential()
if stride != 1 or d_in != dout:
self.shortcut = nn.Sequential(
nn.Conv2d(d_in, dout, kernel_size=1, stride=stride),
nn.BatchNorm2d(dout)
)
def forward(self, fm):
# Input
x = self.in1x1conv(fm)
x = self. BNorm1(x)
h = self. ReLU(x)
#Bottleneck
h = self.bottleneck(h)
# Output
y = self.out_1x1conv(h)
y = self. BNorm2(y)
# Skip connections
y += self.shortcut(fm)
return self.ReLU(y)
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
# https://discuss.pytorch.org/t/break-resnet-into-two-parts/39315
expansion: int = 4
def __init__( self,
inplanes: int, # d_in
planes: int, # n_hidden, dhdn
stride: int = 1,
groups: int = 1,
base_width: int = 64,
dilation: int = 1):
super(Bottleneck, self).__init__()
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = nn.Conv2d(inplanes, width, kernel_size = 1,
bias = False)
self.bn1 = nn.GroupNorm( 1,width)
self.conv2 = nn.Conv2d(width, width, kernel_size = 3,
stride = stride,
padding = dilation,
groups = groups,
bias = False,
dilation = dilation)
self.bn2 = nn.GroupNorm( 1,width)
self.conv3 = nn.Conv2d(width, planes*self.expansion, kernel_size = 1,
bias = False)
self.bn3 = nn.GroupNorm( 1,planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
if stride != 1 or inplanes != planes*self.expansion:
self.downsample = nn.Sequential(nn.Conv2d(inplanes, planes*self.expansion,
kernel_size = 1,
stride = stride,
bias = False),
nn.GroupNorm( 1,planes * self.expansion))
else:
self.downsample = None
def forward(self,x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = F.gelu(out)
out = self.conv2(out)
out = self.bn2(out)
out = F.gelu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = F.gelu(out)
return out
class λResNet(nn.Module):
def __init__(self, block, n_block, cube, mode='high'):
super(λResNet, self).__init__()
self.low = (mode== 'low') | (mode=='total')
self.high = (mode=='high') | (mode=='total')
self.in_planes = 64
receptiveWindow = max(cube[0],cube[1])
if self.low:
self.scell = nn.Sequential()
self.scell.append(nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False))
self.scell.append(nn.BatchNorm2d(64))
self.scell.append(nn.ReLU(inplace=True))
self.scell = nn.Sequential(*self.scell)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, n_block[0], int(receptiveWindow/ 2))
self.layer2 = self._make_layer(block, 128, n_block[1], int(receptiveWindow/ 4), stride=2)
if self.high:
self.layer3 = self._make_layer(block, 64, n_block[2], int(receptiveWindow/ 8), stride=2) # 256
self.layer4 = self._make_layer(block, 128, n_block[3], int(receptiveWindow/16), stride=2) # 512
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# Initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, n_hidden, num_blocks, receptiveWindow, stride=1):
exp = block.expansion
d_in = int(n_hidden*2)
# d_in, dhdn, receptiveWindow, stride=1
layers = [block(d_in, n_hidden, receptiveWindow, stride)]
for _ in range(1,num_blocks):
layers.append(block(n_hidden*exp, n_hidden, int(receptiveWindow/2), 1))
return nn.Sequential(*layers)
def forward(self, x):
if self.low:
# Introduction
x = self. scell(x)
x = self.maxpool(x)
# Low level
x = self.layer1(x)
x = self.layer2(x)
if self.high:
x = self.layer3(x)
x = self.layer4(x)
return x
class HighResNet34(nn.Module):
def __init__(self):
super(HighResNet34, self).__init__()
# [3, 4 | 6, 3]
self.layer3a = Bottleneck(128, 64, stride=2) # 1
self.layer3b = Bottleneck(256, 64, stride=1) # 2
self.layer3c = Bottleneck(256, 64, stride=1) # 3
self.layer3d = Bottleneck(256, 64, stride=1) # 4
self.layer3e = Bottleneck(256, 64, stride=1) # 5
self.layer3f = Bottleneck(256, 64, stride=1) # 6
self.layer4a = Bottleneck(256,128, stride=2) # 1
self.layer4b = Bottleneck(512,128, stride=1) # 2
self.layer4c = Bottleneck(512,128, stride=1) # 3
def forward(self, x):
x = self.layer3a(x)
x = self.layer3b(x)
x = self.layer3c(x)
x = self.layer3d(x)
x = self.layer3e(x)
x = self.layer3f(x)
x = self.layer4a(x)
x = self.layer4b(x)
x = self.layer4c(x)
return x
class HighResNet50(nn.Module):
def __init__(self):
super(HighResNet50, self).__init__()
# [3, 4 | 6, 3]
self.layer3a = Bottleneck( 512, 256, stride=2) # 1
self.layer3b = Bottleneck(1024, 256, stride=1) # 2
self.layer3c = Bottleneck(1024, 256, stride=1) # 3
self.layer3d = Bottleneck(1024, 256, stride=1) # 4
self.layer3e = Bottleneck(1024, 256, stride=1) # 5
self.layer3f = Bottleneck(1024, 256, stride=1) # 6
self.layer4a = Bottleneck(1024, 512, stride=2) # 1
self.layer4b = Bottleneck(2048, 512, stride=1) # 2
self.layer4c = Bottleneck(2048, 512, stride=1) # 3
def forward(self, x):
x = self.layer3a(x)
x = self.layer3b(x)
x = self.layer3c(x)
x = self.layer3d(x)
x = self.layer3e(x)
x = self.layer3f(x)
x = self.layer4a(x)
x = self.layer4b(x)
x = self.layer4c(x)
return x
def λResNet34(cube_dim,mode='total'):
return λResNet(λBottleneck, [2, 2, 2, 2], cube_dim, mode)
def λResNet50(cube_dim,mode='total'):
return λResNet(λBottleneck, [3, 4, 6, 3], cube_dim, mode)
|
452434
|
import os
import sys
import h5py
import numpy as np
import glob
# Local imports
rootpath = os.path.dirname(os.path.abspath(__file__)) + '/../'
sys.path.append(rootpath)
from DataFiles.Hdf5.hdf5lib import h5getstr
# ---------------------------------------------------------------------------
def PrintArraySize(v, vname):
if len(v.shape)==1:
nrows = v.shape[0]
ncols = 1
elif len(v.shape)==2:
nrows = v.shape[0]
ncols = v.shape[1]
sys.stdout.write('%s = [%d x %d]\n'% (vname, nrows, ncols))
############################################################
class ErrorClass:
# -----------------------------------------------------------
def __init__(self):
self.err = 0
if self.IsEmpty():
self.err = -1
# -----------------------------------------------------------
def GetError(self):
return self.err
# -----------------------------------------------------------
def IsEmpty(self):
return False
############################################################
class DataClass(ErrorClass):
# -----------------------------------------------------------
def __init__(self, fid, location):
self.dataTimeSeries = np.array([])
self.time = np.array([])
try:
self.dataTimeSeries = np.array(fid.get(location + '/dataTimeSeries'))
self.time = np.array(fid.get(location + '/time'))
except:
self.err = -1
return
ErrorClass.__init__(self)
# -----------------------------------------------------------
def IsEmpty(self):
if ((self.time.all()==None) or (len(self.time)==0)) and \
((self.dataTimeSeries.all()==None) or (len(self.dataTimeSeries)==0)):
return True
return False
# -----------------------------------------------------------
def Print(self):
PrintArraySize(self.dataTimeSeries, ' dataTimeSeries')
PrintArraySize(self.time, ' time')
############################################################
class ProbeClass(ErrorClass):
# -----------------------------------------------------------
def __init__(self, fid, location):
self.wavelengths = np.array(fid.get(location + '/wavelengths'))
self.wavelengthsEmission = fid.get(location + '/wavelengthsEmission')
self.sourcePos2D = np.array(fid.get(location + '/sourcePos2D'))
self.detectorPos2D = np.array(fid.get(location + '/detectorPos2D'))
self.frequencies = np.array(fid.get(location + '/frequencies'))
self.timeDelay = 0
self.timeDelayWidth = 0
self.momentOrders = []
self.correlationTimeDelay = 0
self.correlationTimeDelayWidth = 0
self.sourceLabels = np.array(fid.get(location + '/sourceLabels'))
self.detectorLabels = np.array(fid.get(location + '/detectorLabels'))
ErrorClass.__init__(self)
# -----------------------------------------------------------
def Print(self):
sys.stdout.write(' wavelengths = %s\n'% self.wavelengths)
sys.stdout.write(' wavelengthsEmission = %s\n'% self.wavelengthsEmission)
sys.stdout.write(' sourcePos2D:\n')
for ii in range(0, self.sourcePos2D.shape[0]):
sys.stdout.write(' %s\n'% self.sourcePos2D[ii])
sys.stdout.write(' detectorPos2D:\n')
for ii in range(0, self.detectorPos2D.shape[0]):
sys.stdout.write(' %s\n'% self.detectorPos2D[ii])
sys.stdout.write(' frequencies = %s\n'% self.frequencies)
sys.stdout.write(' timeDelay = %s\n'% self.timeDelay)
sys.stdout.write(' timeDelayWidth = %s\n'% self.timeDelayWidth)
sys.stdout.write(' momentOrders = %s\n'% self.momentOrders)
sys.stdout.write(' correlationTimeDelay = %s\n'% self.correlationTimeDelay)
sys.stdout.write(' correlationTimeDelayWidth = %s\n'% self.correlationTimeDelayWidth)
sys.stdout.write(' sourceLabels = %s\n'% self.sourceLabels)
sys.stdout.write(' detectorLabels = %s\n'% self.detectorLabels)
############################################################
class StimClass(ErrorClass):
# -----------------------------------------------------------
def __init__(self, fid, location):
self.name = h5getstr(fid, location + '/name')
self.data = np.array(fid.get(location + '/data'))
ErrorClass.__init__(self)
# -----------------------------------------------------------
def Print(self):
sys.stdout.write(' name: %s\n'% self.name)
sys.stdout.write(' data:\n')
for ii in range(0, self.data.shape[0]):
sys.stdout.write(' %s\n'% self.data[ii])
# -----------------------------------------------------------
def IsEmpty(self):
if not self.name:
return True
if (self.data.all()==None) or (len(self.data)==0):
return True
return False
###########################################################
class AuxClass(ErrorClass):
# -----------------------------------------------------------
def __init__(self, fid, location):
self.name = h5getstr(fid, location + '/name')
self.time = np.array(fid.get(location + '/time'))
self.dataTimeSeries = np.array(fid.get(location + '/dataTimeSeries'))
ErrorClass.__init__(self)
# -----------------------------------------------------------
def Print(self):
sys.stdout.write(' name: %s\n'% self.name)
PrintArraySize(self.dataTimeSeries, ' dataTimeSeries')
PrintArraySize(self.time, ' time')
# -----------------------------------------------------------
def IsEmpty(self):
if ((self.time.all()==None) or (len(self.time)==0)) and \
((self.dataTimeSeries.all()==None) or (len(self.dataTimeSeries)==0)):
return True
return False
############################################################
class MetaDataTagsClass(ErrorClass):
# -----------------------------------------------------------
def __init__(self, fid, location):
self.SubjectID = h5getstr(fid, location + '/SubjectID')
self.MeasurementDate = h5getstr(fid, location + '/MeasurementDate')
self.MeasurementTime = h5getstr(fid, location + '/MeasurementTime')
self.LengthUnit = h5getstr(fid, location + '/LengthUnit')
self.TimeUnit = h5getstr(fid, location + '/TimeUnit')
ErrorClass.__init__(self)
# -----------------------------------------------------------
def Print(self):
sys.stdout.write(' SubjectID: %s\n'% self.SubjectID)
sys.stdout.write(' MeasurementDate: %s\n'% self.MeasurementDate)
sys.stdout.write(' MeasurementTime: %s\n'% self.MeasurementTime)
sys.stdout.write(' LengthUnit: %s\n'% self.LengthUnit)
sys.stdout.write(' TimeUnit: %s\n'% self.TimeUnit)
############################################################
class SnirfClass(ErrorClass):
# -----------------------------------------------------------
def __init__(self, fname):
fid = h5py.File(fname,'r')
# formatVersion
self.formatVersion = h5getstr(fid, 'formatVersion')
# metaDataTags
self.metaDataTags = MetaDataTagsClass(fid, '/nirs/metaDataTags')
# data
self.data = []
ii = 1
while 1:
temp = DataClass(fid, '/nirs/data' + str(ii))
if temp.GetError() < 0:
break
self.data.append(temp)
ii = ii+1
# stim
self.stim = []
ii = 1
while 1:
temp = StimClass(fid, '/nirs/stim' + str(ii))
if temp.GetError() < 0:
break
self.stim.append(temp)
ii = ii+1
# probe
self.probe = ProbeClass(fid, '/nirs/probe')
# aux
self.aux = []
ii = 1
while 1:
temp = AuxClass(fid, '/nirs/aux' + str(ii))
if temp.GetError() < 0:
break
self.aux.append(temp)
ii = ii+1
fid.close()
ErrorClass.__init__(self)
# -----------------------------------------------------------
def Print(self):
sys.stdout.write('formatVersion = %s\n'% self.formatVersion)
sys.stdout.write('metaDataTags:\n')
self.metaDataTags.Print()
for ii in range(0, len(self.data)):
sys.stdout.write('data[%d]:\n'% ii)
self.data[ii].Print()
for ii in range(0, len(self.stim)):
sys.stdout.write('stim[%d]:\n'% ii)
self.stim[ii].Print()
sys.stdout.write('probe:\n')
self.probe.Print()
for ii in range(0, len(self.aux)):
sys.stdout.write('aux[%d]:\n'% ii)
self.aux[ii].Print()
# -----------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) > 1:
filenames[0] = sys.argv[1]
else:
# filenames = glob.glob('../../../../snirf-samples/basic/*.snirf')
filenames = glob.glob('./Examples/*.snirf')
for ii in range(0, len(filenames)):
sys.stdout.write('======================================================================\n')
sys.stdout.write('Loading data from %s\n\n'% filenames[ii])
snirf = SnirfClass(filenames[ii])
snirf.Print()
sys.stdout.write('\n')
|
452449
|
from realtime_py.connection import Socket
def callback1(payload):
print("Callback 1: ", payload)
def callback2(payload):
print("Callback 2: ", payload)
if __name__ == "__main__":
URL = "ws://localhost:4000/socket/websocket"
s = Socket(URL)
s.connect()
channel_1 = s.set_channel("realtime:public:todos")
channel_1.join().on("UPDATE", callback1)
channel_2 = s.set_channel("realtime:public:users")
channel_2.join().on("*", callback2)
s.listen()
|
452450
|
from __future__ import unicode_literals
from .utils import init_kernel
def test_read():
for to_test in ["test\n", "test test", "testtestt"]:
k = init_kernel(stdin=to_test)
k.stack.push(0) # Push stream_num
k.stack.push(10) # Push size
k.stack.push(0) # Push location in memory to write the string to
k.stack.push(1) # Push syscall number
k.syscall()
assert k.read_string(0) == to_test.strip()
assert k.stack.pop() == len(to_test.strip())
def test_cut_off_at_size():
size = 10
k = init_kernel(stdin=u"A" * 20)
k.stack.push(0) # Push stream_num
k.stack.push(size) # Push size
k.stack.push(0) # Push location in memory to write the string to
k.stack.push(1) # Push syscall number
k.syscall()
assert k.read_string(0) == "A" * (size - 1)
|
452468
|
import math
import unittest
from simulation.utils.geometry import Line, Point, Polygon, Pose, Transform
from simulation.utils.road.config import Config
from simulation.utils.road.sections import StaticObstacle
from simulation.utils.road.sections.road_section import RoadSection
class DummyRoadSection(RoadSection):
TYPE = "DUMMY"
@property
def middle_line(self):
return DummyRoadSection.MIDDLE_LINE
class ModuleTest(unittest.TestCase):
def assert_lines_approx_equal(self, line1, line2):
p = Polygon(line1.get_points() + list(reversed(line2.get_points())))
self.assertAlmostEqual(p.area, 0)
def test_road_section(self):
MIDDLE_LINE = Line([[0, 0], [1, 0]])
DummyRoadSection.MIDDLE_LINE = MIDDLE_LINE
LEFT_LINE = Line([[0, Config.road_width], [1, Config.road_width]])
RIGHT_LINE = Line(
[[0, -Config.road_width], [1 + Config.road_width, -Config.road_width]]
)
ENDING = (Pose([1, 0], 0), 0)
BEGINNING = (Pose([0, 0], math.pi), 0)
rs = DummyRoadSection()
self.assertIsNotNone(rs.transform)
self.assert_lines_approx_equal(rs.middle_line, MIDDLE_LINE)
self.assert_lines_approx_equal(rs.right_line, RIGHT_LINE)
self.assert_lines_approx_equal(rs.left_line, LEFT_LINE)
self.assertTupleEqual(rs.get_beginning(), BEGINNING)
self.assertTupleEqual(rs.get_ending(), ENDING)
self.assertTrue(rs.get_bounding_box().contains(LEFT_LINE))
self.assertTrue(rs.get_bounding_box().contains(MIDDLE_LINE))
self.assertTrue(rs.get_bounding_box().contains(RIGHT_LINE))
def test_obstacles(self):
MIDDLE_LINE = Line([[0, 0], [0, 2]])
DummyRoadSection.MIDDLE_LINE = MIDDLE_LINE
# Assume the obstacle class itself works!
# Simply test if the transforms etc. are set correctly.
obstacle = StaticObstacle(arc_length=1.5, y=-0.2, width=0.2, depth=1)
rs = DummyRoadSection(obstacles=[obstacle])
returned_obs = rs.obstacles[0]
self.assertEqual(returned_obs.transform, Transform([0, 1.5], math.pi / 2))
self.assertEqual(returned_obs.center, Point(0.2, 1.5))
self.assertEqual(
returned_obs.frame, Polygon([[0.1, 1], [0.1, 2], [0.3, 2], [0.3, 1]])
)
#
# Test obstacle with angle 90 degrees and
# Transform(Vector(0, 0), 90°)
# obs_args.update({"angle": 90})
# test_transform = Transform(Vector(1, 1), 0.5 * math.pi)
# First translate mid_point to (0, 0) and rotate by angle
# test_angle = 0.5 * math.pi
# test_left_lower = (test_left_lower - test_mid).rotated(test_angle) + test_mid
# test_left_upper = (test_left_upper - test_mid).rotated(test_angle) + test_mid
# test_right_lower = (test_right_lower - test_mid).rotated(test_angle) + test_mid
# test_right_upper = (test_right_upper - test_mid).rotated(test_angle) + test_mid
# Second transform entire polygon
# test_poly = test_transform * Polygon(
# [test_left_lower, test_left_upper, test_right_upper, test_right_lower,]
# )
# transform test mid
# test_mid = test_transform * test_mid
# construct obstacle and set transform
# obstacle = StaticObstacle(obs_args)
# transform x-value of left_lower to use as transform for obstacle
# p1 = test_transform * Vector(obstacle._left_lower_x, 0)
# obstacle.transform = Transform(p1, 0.5 * math.pi)
# gt = obstacle.generate_groundtruth()
# compare obstacles
# self.assertPolygonAlmostEqual(gt, test_poly)
#
# Helper functions
# def assertPointAlmostEqual(self, p1, p2):
# self.assertAlmostEqual(p1.x, p2.x)
# self.assertAlmostEqual(p1.y, p2.y)
# self.assertAlmostEqual(p1.z, p2.z)
# def assertPolygonAlmostEqual(self, poly1, poly2):
# for p1, p2 in zip(poly1.get_points(), poly2.get_points()):
# self.assertPointAlmostEqual(p1, p2)
if __name__ == "__main__":
unittest.main()
|
452514
|
import django_rq
from fakeredis import FakeStrictRedis
from rq import Queue
from autoemails.job import Job
connection = FakeStrictRedis()
def dummy_job():
return 42
def dummy_fail_job():
return 42 / 0
class FakeRedisTestCaseMixin:
"""TestCase mixin that provides easy setup of FakeRedis connection to both
Django-RQ and RQ-Scheduler, as well as test-teardown with scheduled jobs
purging."""
def setUp(self):
super().setUp()
self.connection = connection
# self.connection = Redis()
self.queue = Queue(is_async=False, connection=self.connection, job_class=Job)
self.scheduler = django_rq.get_scheduler("testing", queue=self.queue)
self.scheduler.connection = self.connection
def tearDown(self):
# clear job queue
for job in self.scheduler.get_jobs():
self.scheduler.cancel(job)
assert not bool(list(self.scheduler.get_jobs()))
assert self.scheduler.count() == 0
self.queue.empty()
assert self.queue.count == 0
super().tearDown()
|
452524
|
import numpy as np
class DataSet:
"""Class to represent some dataset: train, validation, test"""
@property
def num_examples(self):
"""Return qtty of examples in dataset"""
raise NotImplementedError
def next_batch(self, batch_size):
"""Return batch of required size of data, labels"""
raise NotImplementedError
class ImagesDataSet(DataSet):
"""Dataset for images that provide some often used methods"""
@staticmethod
def measure_mean_and_std(images):
# for every channel in image
means = []
stds = []
# for every channel in image(assume this is last dimension)
for ch in range(images.shape[-1]):
means.append(np.mean(images[:, :, :, ch]))
stds.append(np.std(images[:, :, :, ch]))
return means, stds
@staticmethod
def shuffle_images_and_labels(images, labels):
rand_indexes = np.random.permutation(images.shape[0])
shuffled_images = images[rand_indexes]
shuffled_labels = labels[rand_indexes]
return shuffled_images, shuffled_labels
@staticmethod
def normalize_images(images, normalization_type, meanstd=None):
"""
Args:
images: numpy 4D array
normalization_type: `str`, available choices:
- divide_255
- divide_256
- by_channels
meanstd
"""
if normalization_type is not None:
if normalization_type == 'divide_255':
images = images / 255
elif normalization_type == 'divide_256':
images = images / 256
elif normalization_type == 'by_channels':
images = images.astype('float64')
# for every channel in image(assume this is last dimension)
means, stds = meanstd
for i in range(images.shape[-1]):
images[:, :, :, i] = ((images[:, :, :, i] - means[i]) / stds[i])
else:
raise Exception('Unknown type of normalization')
return images
class DataProvider:
_SEED = 88
@property
def data_shape(self):
"""Return shape as python list of one data entry"""
raise NotImplementedError
@property
def n_classes(self):
"""Return `int` of num classes"""
raise NotImplementedError
def labels_to_one_hot(self, labels):
"""Convert 1D array of labels to one hot representation
Args:
labels: 1D numpy array
"""
new_labels = np.zeros((labels.shape[0], self.n_classes))
new_labels[range(labels.shape[0]), labels] = np.ones(labels.shape)
return new_labels
@staticmethod
def labels_from_one_hot(labels):
"""Convert 2D array of labels to 1D class based representation
Args:
labels: 2D numpy array
"""
return np.argmax(labels, axis=1)
|
452589
|
import caffe2onnx.src.c2oObject as Node
def getGemmAttri(layer):
dict = {"alpha": 1.0,
"beta": 1.0,
"transA": 0,
"transB": 1}
return dict
def getGemmOutShape(input_shape,num_output):
output_shape = [[input_shape[0][0], num_output]]
return output_shape
def createGemm(layer, nodename, inname, outname, input_shape, num_output):
dict = getGemmAttri(layer)
output_shape = getGemmOutShape(input_shape,num_output)
node = Node.c2oNode(layer, nodename, "Gemm", inname, outname, input_shape, output_shape, dict)
return node
|
452597
|
import os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from microfilm import microplot, colorify
image = 100*np.ones((3,3), dtype=np.uint8)
image[0,0] = 200
image2 = 100*np.ones((3,3), dtype=np.uint8)
image2[0,1] = 180
more_than_3d = np.zeros((5,3,3), dtype=np.uint8)
more_than_3d[0,0,0] = 1
more_than_3d[1,1,0] = 1
more_than_3d[2,2,0] = 1
more_than_3d[3,1,1] = 1
more_than_3d[4,2,1] = 1
def verify_image(microim):
# check image
assert np.any(microim.ax.get_images()[0].get_array()[:,:,0] > 0) == False, "Red should not be present"
np.testing.assert_array_equal(microim.ax.get_images()[0].get_array()[:,:,1], np.array([[0,1,0], [0,0,0], [0,0,0]]), "Green channel not correct")
np.testing.assert_array_equal(microim.ax.get_images()[0].get_array()[:,:,2], np.array([[1,0,0], [0,0,0], [0,0,0]]), "Blue channel not correct")
def verify_label(microim):
assert microim.ax.texts[0].get_text() == 'b', "Wrong channel label"
assert microim.ax.texts[1].get_text() == 'a', "Wrong channel label"
def test_microshow():
microim = microplot.microshow(
images=[image, image2], cmaps=['pure_blue', 'pure_green'], channel_names=['a', 'b'], channel_label_show=True, unit='mm',
scalebar_unit_per_pix=0.5, scalebar_size_in_units=1, scalebar_thickness=0.1, scalebar_color='red',
label_text='A', label_color='pink')
assert isinstance(microim, microplot.Microimage)
# check image
verify_image(microim)
# check channel labels
verify_label(microim)
# check scalebar
assert microim.ax.texts[2].get_text() == '1 mm', "Wrong scalebar legend"
assert microim.ax.patches[0].get_facecolor() == (1, 0, 0, 1), "Wrong scalebar color"
assert microim.ax.patches[0].get_width() == 2/3, "Wrong scalebar size"
# check label
assert microim.ax.texts[3].get_text() == 'A', "Wrong label"
assert microim.ax.texts[3].get_color() == 'pink', "Wrong label color"
def test_default_random_gradient():
# test that images with > 3 channels use random gradient by default
microim = microplot.microshow(more_than_3d)
assert microim.cmaps[4] == "ran_gradient", "Random gradient not select for d>3"
def test_mixed_cmaps():
# test that "name" cmaps and "object" cmaps can be mixed
summer_cmap = colorify.cmaps_def(cmap_name='summer')
microim = microplot.microshow(
images=[image, image2], cmaps=[summer_cmap, 'pure_blue'])
assert isinstance(microim.cmap_objects[0], matplotlib.colors.LinearSegmentedColormap), "Wrong colormap for summer cmap"
assert isinstance(microim.cmap_objects[1], matplotlib.colors.ListedColormap), "Wrong colormap for pure_blue cmap"
def test_add_scalebar():
microim = microplot.microshow(
images=[image, image2], cmaps=['pure_blue', 'pure_green'])
microim.add_scalebar(unit='mm', scalebar_unit_per_pix=0.5, scalebar_size_in_units=1, scalebar_thickness=0.1, scalebar_color='red')
# check scalebar
assert microim.ax.texts[0].get_text() == '1 mm', "Wrong scalebar legend"
assert microim.ax.patches[0].get_facecolor() == (1, 0, 0, 1), "Wrong scalebar color"
assert microim.ax.patches[0].get_width() == 2/3, "Wrong scalebar size"
def test_add_label():
microim = microplot.microshow(
images=[image, image2], cmaps=['pure_blue', 'pure_green'])
microim.add_label(label_text='A', label_color='pink')
# check label
assert microim.ax.texts[0].get_text() == 'A', "Wrong label"
assert microim.ax.texts[0].get_color() == 'pink', "Wrong label color"
def test_add_channel_labels():
microim = microplot.microshow(
images=[image, image2], cmaps=['pure_blue', 'pure_green'])
# check channel labels
microim.add_channel_labels(channel_names=['a', 'b'])
verify_label(microim)
assert microim.ax.texts[1].get_color() == (0.0, 0.0, 1.0, 1.0), "Wrong label color"
assert microim.ax.texts[0].get_color() == (0.0, 1.0, 0.0, 1.0), "Wrong label color"
def test_update():
microimage = microplot.Microimage(images=[image, image2], cmaps=['pure_blue', 'pure_green'])
assert microimage.ax is None
fig, ax = plt.subplots(1, 2)
microimage.update(ax[1])
verify_image(microimage)
def test_save():
microimage = microplot.microshow(images=[image, image2], cmaps=['pure_blue', 'pure_green'])
microimage.savefig('test_saveimage.png')
os.path.isfile('test_saveimage.png')
os.remove('test_saveimage.png')
def test_micropanel():
microimage1 = microplot.Microimage(images=[image, image2], cmaps=['pure_blue', 'pure_green'])
microimage2 = microplot.Microimage(images=[image, image2], cmaps=['pure_cyan', 'pure_magenta'])
micropanel = microplot.Micropanel(1, 2)
assert isinstance(micropanel, microplot.Micropanel)
micropanel.add_element([0,0], microimage1)
micropanel.add_element([0,1], microimage2)
# check grid shape
micropanel.microplots.shape == (1,2)
# Check that plots are in the correct place
np.testing.assert_array_equal(micropanel.microplots[0,0].ax.get_images()[0].get_array()[:,:,0], np.array([[0,0,0], [0,0,0], [0,0,0]]),
"Red channel in first panel not correct")
np.testing.assert_array_equal(micropanel.microplots[0,0].ax.get_images()[0].get_array()[:,:,1], np.array([[0,1,0], [0,0,0], [0,0,0]]),
"Green channel in first panel not correct")
np.testing.assert_array_equal(micropanel.microplots[0,0].ax.get_images()[0].get_array()[:,:,2], np.array([[1,0,0], [0,0,0], [0,0,0]]),
"Blue channel in first panel not correct")
np.testing.assert_array_equal(micropanel.microplots[0,1].ax.get_images()[0].get_array()[:,:,0], np.array([[0,1,0], [0,0,0], [0,0,0]]),
"Red channel in second panel not correct")
np.testing.assert_array_equal(micropanel.microplots[0,1].ax.get_images()[0].get_array()[:,:,1], np.array([[1,0,0], [0,0,0], [0,0,0]]),
"Green channel in second panel not correct")
np.testing.assert_array_equal(micropanel.microplots[0,1].ax.get_images()[0].get_array()[:,:,2], np.array([[1,1,0], [0,0,0], [0,0,0]]),
"Blue channel in second panel not correct")
# check labels and their positions
micropanel.add_channel_label()
assert micropanel.fig.texts[0].get_text() == 'Channel-1', "Wrong channel label"
assert micropanel.fig.texts[1].get_text() == 'Channel-0', "Wrong channel label"
assert micropanel.fig.texts[2].get_text() == 'Channel-1', "Wrong channel label"
assert micropanel.fig.texts[3].get_text() == 'Channel-0', "Wrong channel label"
assert micropanel.fig.texts[0].get_position()[1] > 0.8, "Wrong y position for first Channel-1 label"
assert micropanel.fig.texts[1].get_position()[1] > 0.9, "Wrong y position for first Channel-0 label"
assert micropanel.fig.texts[2].get_position()[1] > 0.8, "Wrong y position for second Channel-1 label"
assert micropanel.fig.texts[3].get_position()[1] > 0.9, "Wrong y position for second Channel-0 label"
assert micropanel.fig.texts[0].get_position()[0] < 0.5, "Wrong x position for first Channel-1 label"
assert micropanel.fig.texts[1].get_position()[0] < 0.5, "Wrong x position for first Channel-0 label"
assert micropanel.fig.texts[2].get_position()[0] > 0.5, "Wrong x position for second Channel-1 label"
assert micropanel.fig.texts[3].get_position()[0] > 0.5, "Wrong x position for second Channel-0 label"
def test_savepanel():
microimage1 = microplot.Microimage(images=[image, image2], cmaps=['pure_blue', 'pure_green'])
microimage2 = microplot.Microimage(images=[image, image2], cmaps=['pure_cyan', 'pure_magenta'])
micropanel = microplot.Micropanel(1, 2)
micropanel.add_element([0,0], microimage1)
micropanel.add_element([0,1], microimage2)
micropanel.savefig('test_savepanel.png')
os.path.isfile('test_savepanel.png')
os.remove('test_savepanel.png')
|
452598
|
import cisreg
import numpy as np
# the color
rgb_codes={"EnhancerActive":"255,215,0", #gold, EnhancerActive
"EnhancerInactive":"184,134,11", # dark golden rod, EnhancerInactive
"PromoterActive":"255,0,0", # red, PromoterActive
"PromoterInactive":"250,128,114", # salmon, PromoterInactive
"Exon":"0,128,0", # green, Exon
"Unknown":"128,128,128",# gray, Unknown
"EnhancerInactive+PromoterInactive+Exon+Unknown":"128,128,128"} # EnhancerInactive+PromoterInactive+Exon+Unknown
regions=[["chr1","100","110","Enhancer",".",".",".",".","."],
["chr1","110","120","Enhancer",".",".",".",".","."],
["chr1","120","130","Enhancer",".",".",".",".","."],
["chr1","130","140","Promoter",".",".",".",".","."],
["chr1","140","150","Enhancer",".",".",".",".","."],
["chr1","150","160","Unknown",".",".",".",".","."],
["chr2","160","170","Promoter",".",".",".",".","."]]
regions=np.array(regions)
path='/home/yifengli/prog/my/test'
regionnames=["EnhancerActive","EnhancerActive","EnhancerActive","PromoterActive","EnhancerActive","EnhancerInactive+PromoterInactive+Exon+Unknown","PromoterActive"]
regionnames=np.array(regionnames)
filename="CELL"
cisreg.write_bed(path,filename,regionnames,rgb_codes,regions,merge=True,background="EnhancerInactive+PromoterInactive+Exon+Unknown")
|
452610
|
import sys, java, unittest
from geoscript.layer import GeoTIFF
class GeoTIFF_Test:
def setUp(self):
self.tif = GeoTIFF('data/sfdem.tif')
def testBounds(self):
assert self.tif.bounds() is not None
assert self.tif.bounds().getwest() == 589980.0
assert self.tif.bounds().south == 4913700.0
assert self.tif.bounds().east == 609000.0
assert self.tif.bounds().north == 4928010.0
assert self.tif.bounds().proj.id == 'EPSG:26713'
def testBands(self):
bands = self.tif.bands
assert 1 == len(bands)
assert 'GRAY_INDEX' == bands[0].name
|
452637
|
import re
import json
from subprocess import call, Popen, PIPE
def init_parser(parser):
parser.add_argument('name', type=str, help='Cluster name.')
parser.add_argument('--dest', '-d', required=True, type=str, help="Directory for diagnose output -- must be local.")
parser.add_argument('--hail-log', '-l', required=False, type=str, default='/home/hail/hail.log',
help="Path for hail.log file.")
parser.add_argument('--overwrite', required=False, action='store_true',
help="Delete dest directory before adding new files.")
parser.add_argument('--no-diagnose', required=False, action='store_true',
help="Do not run gcloud dataproc clusters diagnose.")
parser.add_argument('--compress', '-z', required=False, action='store_true', help="GZIP all files.")
parser.add_argument('--workers', required=False, nargs='*', help="Specific workers to get log files from.")
parser.add_argument('--take', required=False, type=int, default=None,
help="Only download logs from the first N workers.")
def main(args):
print("Diagnosing cluster '{}'...".format(args.name))
is_local = not args.dest.startswith("gs://")
if args.overwrite:
if is_local:
call('rm -r {dir}'.format(dir=args.dest), shell=True)
else:
call('gsutil -m rm -r {dir}'.format(dir=args.dest), shell=True)
master_dest = args.dest.rstrip('/') + "/master/"
worker_dest = args.dest.rstrip('/') + "/workers/"
if is_local:
call('mkdir -p {dir}'.format(dir=master_dest), shell=True)
call('mkdir -p {dir}'.format(dir=worker_dest), shell=True)
desc = json.loads(Popen('gcloud dataproc clusters describe {name} --format json'.format(name=args.name),
shell=True,
stdout=PIPE,
stderr=PIPE).communicate()[0].strip())
config = desc['config']
master = config['masterConfig']['instanceNames'][0]
try:
workers = config['workerConfig']['instanceNames'] + config['secondaryWorkerConfig']['instanceNames']
except KeyError:
workers = config['workerConfig']['instanceNames']
zone = re.search('zones\/(?P<zone>\S+)$', config['gceClusterConfig']['zoneUri']).group('zone')
if args.workers:
invalid_workers = set(args.workers).difference(set(workers))
assert len(invalid_workers) == 0, "Non-existent workers specified: " + ", ".join(invalid_workers)
workers = args.workers
if args.take:
assert args.take > 0 and args.take <= len(workers), "Number of workers to take must be in the range of [0, nWorkers]. Found " + args.take + "."
workers = workers[:args.take]
def gcloud_ssh(remote, command):
return 'gcloud compute ssh {remote} --zone {zone} --command "{command}"'.format(remote=remote, zone=zone, command=command)
def gcloud_copy_files(remote, src, dest):
return 'gcloud compute copy-files {remote}:{src} {dest} --zone {zone}'.format(remote=remote, src=src, dest=dest, zone=zone)
def gsutil_cp(src, dest):
return 'gsutil -m cp -r {src} {dest}'.format(src=src, dest=dest)
def copy_files_tmp(remote, files, dest, tmp):
init_cmd = ['mkdir -p {tmp}; rm -r {tmp}/*'.format(tmp=tmp)]
copy_tmp_cmds = ['sudo cp -r {file} {tmp}'.format(file=file, tmp=tmp) for file in files]
copy_tmp_cmds.append('sudo chmod -R 777 {tmp}'.format(tmp=tmp))
if args.compress:
copy_tmp_cmds.append('sudo find ' + tmp + ' -type f ! -name \'*.gz\' -exec gzip "{}" \;')
call(gcloud_ssh(remote, '; '.join(init_cmd + copy_tmp_cmds)), shell=True)
if not is_local:
copy_dest_cmd = gcloud_ssh(remote, 'gsutil -m cp -r {tmp} {dest}'.format(tmp=tmp, dest=dest))
else:
copy_dest_cmd = gcloud_copy_files(remote, tmp, dest)
call(copy_dest_cmd, shell=True)
if not args.no_diagnose:
diagnose_tar_path = re.search('Diagnostic results saved in: (?P<tarfile>gs:\/\/\S+diagnostic\.tar)',
str(Popen('gcloud dataproc clusters diagnose {name}'.format(name=args.name),
shell=True,
stdout=PIPE,
stderr=PIPE).communicate())).group('tarfile')
call(gsutil_cp(diagnose_tar_path, args.dest), shell=True)
master_log_files = [ '/var/log/hive/hive-*',
'/var/log/google-dataproc-agent.0.log',
'/var/log/dataproc-initialization-script-0.log',
'/var/log/hadoop-mapreduce/mapred-mapred-historyserver*',
'/var/log/hadoop-hdfs/*-m.*',
'/var/log/hadoop-yarn/yarn-yarn-resourcemanager-*-m.*',
args.hail_log
]
copy_files_tmp(master, master_log_files, master_dest, '/tmp/' + master + '/')
worker_log_files = ['/var/log/hadoop-hdfs/hadoop-hdfs-datanode-*.*',
'/var/log/dataproc-startup-script.log',
'/var/log/hadoop-yarn/yarn-yarn-nodemanager-*.*']
for worker in workers:
copy_files_tmp(worker, worker_log_files, worker_dest, '/tmp/' + worker + '/')
copy_files_tmp(worker, ['/var/log/hadoop-yarn/userlogs/'], args.dest, '/tmp/hadoop-yarn/')
|
452683
|
import logging
from pajbot.models.command import Command
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
log = logging.getLogger(__name__)
class PaidSubmodeModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Paid Submode"
DESCRIPTION = "Allows user to toggle subscribers mode on and off using points."
CATEGORY = "Feature"
SETTINGS = [
ModuleSetting(
key="subon_command_name",
label="Command name for turning sub mode on (i.e. $subon)",
type="text",
required=True,
placeholder="Command name (no !)",
default="$subon",
constraints={"min_str_len": 2, "max_str_len": 15},
),
ModuleSetting(
key="suboff_command_name",
label="Command name for turning sub mode off (i.e. $suboff)",
type="text",
required=True,
placeholder="Command name (no !)",
default="$suboff",
constraints={"min_str_len": 2, "max_str_len": 15},
),
ModuleSetting(
key="subon_cost",
label="Point cost for turning sub mode on",
type="number",
required=True,
placeholder="Point cost",
default=1000,
constraints={"min_value": 1, "max_value": 1000000},
),
ModuleSetting(
key="suboff_cost",
label="Point cost for turning sub mode off",
type="number",
required=True,
placeholder="Point cost",
default=1000,
constraints={"min_value": 1, "max_value": 1000000},
),
]
def paid_subon(self, bot, source, **rest):
if bot.subs_only is True:
bot.whisper(source, "Why would you try to enable subonly, if it's already enabled? FailFish")
# Request to enable submode is ignored, but the return False ensures the user is refunded their points
return False
if bot.subs_only is False:
_cost = self.settings["subon_cost"]
# Test this a bit. Make sure twitch doesn't bug out
bot.privmsg(".subscribers")
bot.execute_delayed(0.2, bot.privmsg, ".subscribers")
bot.whisper(source, f"You just used {_cost} points to put the chat into subscribers mode!")
return True
def paid_suboff(self, bot, source, **rest):
if bot.subs_only is False:
bot.whisper(source, "Why would you try to disable subonly, if it's not on in the first place? FailFish")
# Request to disable submode is ignored, but the return False ensures the user is refunded their points
return False
if bot.subs_only is True:
_cost = self.settings["suboff_cost"]
# Test this a bit. Make sure twitch doesn't bug out
bot.privmsg(".subscribersoff")
bot.execute_delayed(0.2, bot.privmsg, ".subscribersoff")
bot.whisper(source, f"You just used {_cost} points to put the chat into subscribers mode!")
return True
def load_commands(self, **options):
self.commands[
self.settings["subon_command_name"].lower().replace("!", "").replace(" ", "")
] = Command.raw_command(self.paid_subon, cost=self.settings["subon_cost"])
self.commands[
self.settings["suboff_command_name"].lower().replace("!", "").replace(" ", "")
] = Command.raw_command(self.paid_suboff, cost=self.settings["suboff_cost"])
|
452693
|
import numpy as np
import scipy.spatial as spatial
from source.base import file_utils
def get_aabb(points: np.ndarray):
aabb_min = points.min(axis=0)
aabb_max = points.max(axis=0)
return aabb_min, aabb_max
def load_xyz(file_path):
data = np.loadtxt(file_path).astype('float32')
nan_lines = np.isnan(data).any(axis=1)
num_nan_lines = np.sum(nan_lines)
if num_nan_lines > 0:
data = data[~nan_lines] # filter rows with nan values
print('Ignored {} points containing NaN coordinates in point cloud {}'.format(num_nan_lines, file_path))
return data
def write_ply(file_path: str, points: np.ndarray, normals=None, colors=None):
"""
Write point cloud file as .ply.
:param file_path:
:param points:
:param normals:
:param colors:
:return: None
"""
import trimesh
assert(file_path.endswith('.ply'))
file_utils.make_dir_for_file(file_path)
if points.shape == (3,):
points = np.expand_dims(points, axis=0)
if points.shape[0] == 3 and points.shape[1] != 3:
points = points.transpose([1, 0])
if colors is not None and colors.shape[0] == 3 and colors.shape[1] != 3:
colors = colors.transpose([1, 0])
if normals is not None and normals.shape[0] == 3 and normals.shape[1] != 3:
normals = normals.transpose([1, 0])
# convert 2d points to 3d
if points.shape[1] == 2:
vertices_2p5d = np.zeros((points.shape[0], 3))
vertices_2p5d[:, :2] = points
vertices_2p5d[:, 2] = 0.0
points = vertices_2p5d
mesh = trimesh.Trimesh(vertices=points, vertex_colors=colors, vertex_normals=normals)
mesh.export(file_path)
def write_xyz(file_path, points: np.ndarray, normals=None, colors=None):
"""
Write point cloud file.
:param file_path:
:param points:
:param normals:
:param colors:
:return: None
"""
file_utils.make_dir_for_file(file_path)
if points.shape == (3,):
points = np.expand_dims(points, axis=0)
if points.shape[0] == 3 and points.shape[1] != 3:
points = points.transpose([1, 0])
if colors is not None and colors.shape[0] == 3 and colors.shape[1] != 3:
colors = colors.transpose([1, 0])
if normals is not None and normals.shape[0] == 3 and normals.shape[1] != 3:
normals = normals.transpose([1, 0])
with open(file_path, 'w') as fp:
# convert 2d points to 3d
if points.shape[1] == 2:
vertices_2p5d = np.zeros((points.shape[0], 3))
vertices_2p5d[:, :2] = points
vertices_2p5d[:, 2] = 0.0
points = vertices_2p5d
# write points
# meshlab doesn't like colors, only using normals. try cloud compare instead.
for vi, v in enumerate(points):
line_vertex = str(v[0]) + " " + str(v[1]) + " " + str(v[2]) + " "
if normals is not None:
line_vertex += str(normals[vi][0]) + " " + str(normals[vi][1]) + " " + str(normals[vi][2]) + " "
if colors is not None:
line_vertex += str(colors[vi][0]) + " " + str(colors[vi][1]) + " " + str(colors[vi][2]) + " "
fp.write(line_vertex + "\n")
def load_pcd(file_in):
# PCD: http://pointclouds.org/documentation/tutorials/pcd_file_format.php
# PCD RGB: http://docs.pointclouds.org/trunk/structpcl_1_1_r_g_b.html#a4ad91ab9726a3580e6dfc734ab77cd18
def read_header(lines_header):
header_info = dict()
def add_line_to_header_dict(header_dict, line, expected_field):
line_parts = line.split(sep=' ')
assert (line_parts[0] == expected_field), \
('Warning: "' + expected_field + '" expected but not found in pcd header!')
header_dict[expected_field] = (' '.join(line_parts[1:])).replace('\n', '')
add_line_to_header_dict(header_info, lines_header[0], '#')
add_line_to_header_dict(header_info, lines_header[1], 'VERSION')
add_line_to_header_dict(header_info, lines_header[2], 'FIELDS')
add_line_to_header_dict(header_info, lines_header[3], 'SIZE')
add_line_to_header_dict(header_info, lines_header[4], 'TYPE')
add_line_to_header_dict(header_info, lines_header[5], 'COUNT')
add_line_to_header_dict(header_info, lines_header[6], 'WIDTH')
add_line_to_header_dict(header_info, lines_header[7], 'HEIGHT')
add_line_to_header_dict(header_info, lines_header[8], 'VIEWPOINT')
add_line_to_header_dict(header_info, lines_header[9], 'POINTS')
add_line_to_header_dict(header_info, lines_header[10], 'DATA')
# TODO: lift limitations
assert header_info['VERSION'] == '0.7'
assert header_info['FIELDS'] == 'x y z rgb label'
assert header_info['SIZE'] == '4 4 4 4 4'
assert header_info['TYPE'] == 'F F F F U'
assert header_info['COUNT'] == '1 1 1 1 1'
# assert header_info['HEIGHT'] == '1'
assert header_info['DATA'] == 'ascii'
# assert header_info['WIDTH'] == header_info['POINTS']
return header_info
f = open(file_in, "r")
f_lines = f.readlines()
f_lines_header = f_lines[:11]
f_lines_points = f_lines[11:]
header_info = read_header(f_lines_header)
header_info['_file_'] = file_in
num_points = int(header_info['POINTS'])
point_data_list_str_ = [l.split(sep=' ')[:3] for l in f_lines_points]
point_data_list = [[float(l[0]), float(l[1]), float(l[2])] for l in point_data_list_str_]
# filter nan points that appear through the blensor kinect sensor
point_data_list = [p for p in point_data_list if
(not np.isnan(p[0]) and not np.isnan(p[1]) and not np.isnan(p[2]))]
point_data = np.array(point_data_list)
f.close()
return point_data, header_info
def get_patch_radius(grid_res, epsilon):
return (1.0 + epsilon) / grid_res
def get_patch_kdtree(
kdtree: spatial.cKDTree, rng: np.random.RandomState,
query_point, patch_radius, points_per_patch, n_jobs):
if patch_radius <= 0.0:
pts_dists_ms, patch_pts_ids = kdtree.query(x=query_point, k=points_per_patch, n_jobs=n_jobs)
else:
patch_pts_ids = kdtree.query_ball_point(x=query_point, r=patch_radius, n_jobs=n_jobs)
patch_pts_ids = np.array(patch_pts_ids, dtype=np.int32)
point_count = patch_pts_ids.shape[0]
# if there are too many neighbors, pick a random subset
if point_count > points_per_patch:
patch_pts_ids = patch_pts_ids[rng.choice(np.arange(point_count), points_per_patch, replace=False)]
# pad with zeros
if point_count < points_per_patch:
missing_points = points_per_patch - point_count
padding = np.full((missing_points), -1, dtype=np.int32)
if point_count == 0:
patch_pts_ids = padding
else:
patch_pts_ids = np.concatenate((patch_pts_ids, padding), axis=0)
return patch_pts_ids
|
452695
|
def displayBanner():
banner = open('./banner/banner.txt', 'r', encoding = 'utf8')
print(banner.read())
def choice():
while True:
choice = int(input(">"))
if choice == 0:
break
elif choice == 1:
exit()
else:
print("Retry")
|
452713
|
import os
from testtools import TestCase
from cloudify_cli import inputs
from cloudify_cli.exceptions import CloudifyCliError
class InputsToDictTest(TestCase):
def test_valid_inline(self):
resources = ['key1=value1;key2=value2']
result = inputs.inputs_to_dict(resources)
self.assertDictEqual(result, {'key1': 'value1',
'key2': 'value2'})
def test_inline_not_dict(self):
resources = ['key1failure']
self._verify_not_dict(resources)
def test_invalid_yaml(self):
resources = [os.path.join(os.path.dirname(__file__),
'resources',
'inputs',
'bad_format.yaml')]
self._verify_root_cause(resources)
def test_yaml_not_dict(self):
resources = [os.path.join(os.path.dirname(__file__),
'resources',
'inputs',
'not_dict.yaml')]
self._verify_not_dict(resources)
def _verify_root_cause(self, resources):
with self.assertRaisesRegex(CloudifyCliError, 'Root cause'):
inputs.inputs_to_dict(resources)
def _verify_not_dict(self, resources):
with self.assertRaisesRegex(
CloudifyCliError, 'does not represent a dictionary'):
inputs.inputs_to_dict(resources)
|
452778
|
from cereal import car
from selfdrive.car import apply_toyota_steer_torque_limits
from selfdrive.car.chrysler.chryslercan import create_lkas_hud, create_lkas_command, \
create_wheel_buttons
from selfdrive.car.chrysler.values import CAR, CarControllerParams
from opendbc.can.packer import CANPacker
class CarController():
def __init__(self, dbc_name, CP, VM):
self.apply_steer_last = 0
self.ccframe = 0
self.prev_frame = -1
self.hud_count = 0
self.car_fingerprint = CP.carFingerprint
self.gone_fast_yet = False
self.steer_rate_limited = False
self.packer = CANPacker(dbc_name)
def update(self, enabled, CS, actuators, pcm_cancel_cmd, hud_alert):
# this seems needed to avoid steering faults and to force the sync with the EPS counter
frame = CS.lkas_counter
if self.prev_frame == frame:
return car.CarControl.Actuators.new_message(), []
# steer torque
new_steer = int(round(actuators.steer * CarControllerParams.STEER_MAX))
apply_steer = apply_toyota_steer_torque_limits(new_steer, self.apply_steer_last,
CS.out.steeringTorqueEps, CarControllerParams)
self.steer_rate_limited = new_steer != apply_steer
moving_fast = CS.out.vEgo > CS.CP.minSteerSpeed # for status message
if CS.out.vEgo > (CS.CP.minSteerSpeed - 0.5): # for command high bit
self.gone_fast_yet = True
elif self.car_fingerprint in (CAR.PACIFICA_2019_HYBRID, CAR.PACIFICA_2020, CAR.JEEP_CHEROKEE_2019):
if CS.out.vEgo < (CS.CP.minSteerSpeed - 3.0):
self.gone_fast_yet = False # < 14.5m/s stock turns off this bit, but fine down to 13.5
lkas_active = moving_fast and enabled
if not lkas_active:
apply_steer = 0
self.apply_steer_last = apply_steer
can_sends = []
#*** control msgs ***
if pcm_cancel_cmd:
# TODO: would be better to start from frame_2b3
new_msg = create_wheel_buttons(self.packer, self.ccframe, cancel=True)
can_sends.append(new_msg)
# LKAS_HEARTBIT is forwarded by Panda so no need to send it here.
# frame is 100Hz (0.01s period)
if (self.ccframe % 25 == 0): # 0.25s period
if (CS.lkas_car_model != -1):
new_msg = create_lkas_hud(
self.packer, CS.out.gearShifter, lkas_active, hud_alert,
self.hud_count, CS.lkas_car_model)
can_sends.append(new_msg)
self.hud_count += 1
new_msg = create_lkas_command(self.packer, int(apply_steer), self.gone_fast_yet, frame)
can_sends.append(new_msg)
self.ccframe += 1
self.prev_frame = frame
new_actuators = actuators.copy()
new_actuators.steer = apply_steer / CarControllerParams.STEER_MAX
return new_actuators, can_sends
|
452837
|
import logging
from claripy.vsa import StridedInterval
l = logging.getLogger("angr_tests")
def check_si_fields(si, stride, lb, ub):
if si.stride != stride:
return False
if si.lower_bound != lb:
return False
if si.upper_bound != ub:
return False
return True
def test_smart_join():
s1 = StridedInterval(bits=4, stride=3, lower_bound=9, upper_bound=12)
s2 = StridedInterval(bits=4, stride=3, lower_bound=0, upper_bound=3)
j = StridedInterval.pseudo_join(s1, s2)
u = StridedInterval.least_upper_bound(s1, s2)
assert check_si_fields(u, 3, 0, 12)
assert check_si_fields(j, 3, 0, 12)
s1 = StridedInterval(bits=4, stride=0, lower_bound=8, upper_bound=8)
s2 = StridedInterval(bits=4, stride=1, lower_bound=14, upper_bound=15)
s3 = StridedInterval(bits=4, stride=1, lower_bound=0, upper_bound=4)
u = StridedInterval.least_upper_bound(s1, s2, s3)
assert check_si_fields(u, 1, 14, 8)
s1 = StridedInterval(bits=4, stride=3, lower_bound=2, upper_bound=8)
s2 = StridedInterval(bits=4, stride=0, lower_bound=1, upper_bound=1)
j = StridedInterval.pseudo_join(s1, s2)
u = StridedInterval.least_upper_bound(s1, s2)
assert check_si_fields(u, 3, 2, 1)
assert check_si_fields(j, 3, 2, 1)
if __name__ == "__main__":
test_smart_join()
|
452869
|
from DRecPy.Recommender.Baseline.aggregation import mean
from DRecPy.Recommender.Baseline.aggregation import weighted_mean
import pytest
@pytest.fixture
def interactions():
return [5, 2, 3, 1]
@pytest.fixture
def interactions_zeroes():
return [0, 0, 0, 0]
@pytest.fixture
def similarities():
return [1, 0.2, 0.1, 0.8]
@pytest.fixture
def similarities_zeroes():
return [0, 0, 0, 0]
def test_mean_0(interactions):
assert mean(interactions, None) == 2.75
def test_mean_1(interactions_zeroes):
assert mean(interactions_zeroes, None) == 0
def test_mean_2():
assert mean([], None) is None
def test_weighted_mean_0(interactions, similarities):
assert round(weighted_mean(interactions, similarities), 4) == 3.0952
def test_weighted_mean_1(interactions_zeroes, similarities):
assert weighted_mean(interactions_zeroes, similarities) == 0
def test_weighted_mean_2():
assert weighted_mean([], []) is None
def test_weighted_mean_3(interactions, similarities_zeroes):
assert weighted_mean(interactions, similarities_zeroes) is None
|
452889
|
def getNum(prompt = 'Positive real number, please: '):
while 1:
try:
num = float(eval(input(prompt)))
if num >= 0:
return num
except ValueError:
print('Bad number')
num = getNum(prompt = 'Give it! ')
print(num)
|
452898
|
import argparse
import time
import os
import random
import collections
import numpy as np
import torch
from model import DAE, VAE, AAE
from vocab import Vocab
from meter import AverageMeter
from utils import set_seed, logging, load_sent
from batchify import get_batches
parser = argparse.ArgumentParser()
# Path arguments
parser.add_argument('--train', metavar='FILE', required=True,
help='path to training file')
parser.add_argument('--valid', metavar='FILE', required=True,
help='path to validation file')
parser.add_argument('--save-dir', default='checkpoints', metavar='DIR',
help='directory to save checkpoints and outputs')
parser.add_argument('--load-model', default='', metavar='FILE',
help='path to load checkpoint if specified')
# Architecture arguments
parser.add_argument('--vocab-size', type=int, default=10000, metavar='N',
help='keep N most frequent words in vocabulary')
parser.add_argument('--dim_z', type=int, default=128, metavar='D',
help='dimension of latent variable z')
parser.add_argument('--dim_emb', type=int, default=512, metavar='D',
help='dimension of word embedding')
parser.add_argument('--dim_h', type=int, default=1024, metavar='D',
help='dimension of hidden state per layer')
parser.add_argument('--nlayers', type=int, default=1, metavar='N',
help='number of layers')
parser.add_argument('--dim_d', type=int, default=512, metavar='D',
help='dimension of hidden state in AAE discriminator')
# Model arguments
parser.add_argument('--model_type', default='dae', metavar='M',
choices=['dae', 'vae', 'aae'],
help='which model to learn')
parser.add_argument('--lambda_kl', type=float, default=0, metavar='R',
help='weight for kl term in VAE')
parser.add_argument('--lambda_adv', type=float, default=0, metavar='R',
help='weight for adversarial loss in AAE')
parser.add_argument('--lambda_p', type=float, default=0, metavar='R',
help='weight for L1 penalty on posterior log-variance')
parser.add_argument('--noise', default='0,0,0,0', metavar='P,P,P,K',
help='word drop prob, blank prob, substitute prob'
'max word shuffle distance')
# Training arguments
parser.add_argument('--dropout', type=float, default=0.5, metavar='DROP',
help='dropout probability (0 = no dropout)')
parser.add_argument('--lr', type=float, default=0.0005, metavar='LR',
help='learning rate')
#parser.add_argument('--clip', type=float, default=0.25, metavar='NORM',
# help='gradient clipping')
parser.add_argument('--epochs', type=int, default=50, metavar='N',
help='number of training epochs')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='batch size')
# Others
parser.add_argument('--seed', type=int, default=1111, metavar='N',
help='random seed')
parser.add_argument('--no-cuda', action='store_true',
help='disable CUDA')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval')
def evaluate(model, batches):
model.eval()
meters = collections.defaultdict(lambda: AverageMeter())
with torch.no_grad():
for inputs, targets in batches:
losses = model.autoenc(inputs, targets)
for k, v in losses.items():
meters[k].update(v.item(), inputs.size(1))
loss = model.loss({k: meter.avg for k, meter in meters.items()})
meters['loss'].update(loss)
return meters
def main(args):
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
log_file = os.path.join(args.save_dir, 'log.txt')
logging(str(args), log_file)
# Prepare data
train_sents = load_sent(args.train)
logging('# train sents {}, tokens {}'.format(
len(train_sents), sum(len(s) for s in train_sents)), log_file)
valid_sents = load_sent(args.valid)
logging('# valid sents {}, tokens {}'.format(
len(valid_sents), sum(len(s) for s in valid_sents)), log_file)
vocab_file = os.path.join(args.save_dir, 'vocab.txt')
if not os.path.isfile(vocab_file):
Vocab.build(train_sents, vocab_file, args.vocab_size)
vocab = Vocab(vocab_file)
logging('# vocab size {}'.format(vocab.size), log_file)
set_seed(args.seed)
cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device('cuda' if cuda else 'cpu')
model = {'dae': DAE, 'vae': VAE, 'aae': AAE}[args.model_type](
vocab, args).to(device)
if args.load_model:
ckpt = torch.load(args.load_model)
model.load_state_dict(ckpt['model'])
model.flatten()
logging('# model parameters: {}'.format(
sum(x.data.nelement() for x in model.parameters())), log_file)
train_batches, _ = get_batches(train_sents, vocab, args.batch_size, device)
valid_batches, _ = get_batches(valid_sents, vocab, args.batch_size, device)
best_val_loss = None
for epoch in range(args.epochs):
start_time = time.time()
logging('-' * 80, log_file)
model.train()
meters = collections.defaultdict(lambda: AverageMeter())
indices = list(range(len(train_batches)))
random.shuffle(indices)
for i, idx in enumerate(indices):
inputs, targets = train_batches[idx]
losses = model.autoenc(inputs, targets, is_train=True)
losses['loss'] = model.loss(losses)
model.step(losses)
for k, v in losses.items():
meters[k].update(v.item())
if (i + 1) % args.log_interval == 0:
log_output = '| epoch {:3d} | {:5d}/{:5d} batches |'.format(
epoch + 1, i + 1, len(indices))
for k, meter in meters.items():
log_output += ' {} {:.2f},'.format(k, meter.avg)
meter.clear()
logging(log_output, log_file)
valid_meters = evaluate(model, valid_batches)
logging('-' * 80, log_file)
log_output = '| end of epoch {:3d} | time {:5.0f}s | valid'.format(
epoch + 1, time.time() - start_time)
for k, meter in valid_meters.items():
log_output += ' {} {:.2f},'.format(k, meter.avg)
if not best_val_loss or valid_meters['loss'].avg < best_val_loss:
log_output += ' | saving model'
ckpt = {'args': args, 'model': model.state_dict()}
torch.save(ckpt, os.path.join(args.save_dir, 'model.pt'))
best_val_loss = valid_meters['loss'].avg
logging(log_output, log_file)
logging('Done training', log_file)
if __name__ == '__main__':
args = parser.parse_args()
args.noise = [float(x) for x in args.noise.split(',')]
main(args)
|
452917
|
from django.template.response import TemplateResponse
def not_found(request, exception=None):
"""404 error handler which includes ``request`` in the context."""
return TemplateResponse(
request, "status/404.html", {"request": request}, status=404
)
def server_error(request):
"""500 error handler which includes ``request`` in the context."""
return TemplateResponse(
request, "status/500.html", {"request": request}, status=500
)
|
452920
|
from logging import getLogger
import numpy as np
import scipy.stats as stats
from .controller import Controller
from ..envs.cost import calc_cost
logger = getLogger(__name__)
class DDP(Controller):
""" Differential Dynamic Programming
Ref:
<NAME>., <NAME>., & <NAME>. (2012).
In 2012 IEEE/RSJ International Conference on
Intelligent Robots and Systems (pp. 4906-4913). and Study Wolf,
https://github.com/studywolf/control, and
https://github.com/anassinator/ilqr
"""
def __init__(self, config, model):
"""
"""
super(DDP, self).__init__(config, model)
# model
self.model = model
# get cost func
self.state_cost_fn = config.state_cost_fn
self.terminal_state_cost_fn = config.terminal_state_cost_fn
self.input_cost_fn = config.input_cost_fn
self.gradient_cost_fn_state = config.gradient_cost_fn_state
self.gradient_cost_fn_input = config.gradient_cost_fn_input
self.hessian_cost_fn_state = config.hessian_cost_fn_state
self.hessian_cost_fn_input = config.hessian_cost_fn_input
self.hessian_cost_fn_input_state = \
config.hessian_cost_fn_input_state
# controller parameters
self.max_iters = config.opt_config["DDP"]["max_iters"]
self.init_mu = config.opt_config["DDP"]["init_mu"]
self.mu = self.init_mu
self.mu_min = config.opt_config["DDP"]["mu_min"]
self.mu_max = config.opt_config["DDP"]["mu_max"]
self.init_delta = config.opt_config["DDP"]["init_delta"]
self.delta = self.init_delta
self.threshold = config.opt_config["DDP"]["threshold"]
# general parameters
self.pred_len = config.PRED_LEN
self.input_size = config.INPUT_SIZE
self.dt = config.DT
# cost parameters
self.Q = config.Q
self.R = config.R
self.Sf = config.Sf
# initialize
self.prev_sol = np.zeros((self.pred_len, self.input_size))
def clear_sol(self):
""" clear prev sol
"""
logger.debug("Clear Sol")
self.prev_sol = np.zeros((self.pred_len, self.input_size))
def obtain_sol(self, curr_x, g_xs):
""" calculate the optimal inputs
Args:
curr_x (numpy.ndarray): current state, shape(state_size, )
g_xs (numpy.ndarrya): goal trajectory, shape(plan_len, state_size)
Returns:
opt_input (numpy.ndarray): optimal input, shape(input_size, )
"""
# initialize
opt_count = 0
sol = self.prev_sol.copy()
converged_sol = False
update_sol = True
self.mu = self.init_mu
self.delta = self.init_delta
# line search param
alphas = 1.1**(-np.arange(10)**2)
while opt_count < self.max_iters:
accepted_sol = False
# forward
if update_sol == True:
pred_xs, cost, f_x, f_u, f_xx, f_ux, f_uu,\
l_x, l_xx, l_u, l_uu, l_ux = \
self.forward(curr_x, g_xs, sol)
update_sol = False
try:
# backward
k, K = self.backward(f_x, f_u, f_xx, f_ux, f_uu,
l_x, l_xx, l_u, l_uu, l_ux)
# line search
for alpha in alphas:
new_pred_xs, new_sol = \
self.calc_input(k, K, pred_xs, sol, alpha)
new_cost = calc_cost(new_pred_xs[np.newaxis, :, :],
new_sol[np.newaxis, :, :],
g_xs[np.newaxis, :, :],
self.state_cost_fn,
self.input_cost_fn,
self.terminal_state_cost_fn)
if new_cost < cost:
if np.abs((cost - new_cost) / cost) < self.threshold:
converged_sol = True
cost = new_cost
pred_xs = new_pred_xs
sol = new_sol
update_sol = True
# decrease regularization term
self.delta = min(1.0, self.delta) / self.init_delta
self.mu *= self.delta
if self.mu <= self.mu_min:
self.mu = 0.0
# accept the solution
accepted_sol = True
break
except np.linalg.LinAlgError as e:
logger.debug("Non ans : {}".format(e))
if not accepted_sol:
# increase regularization term.
self.delta = max(1.0, self.delta) * self.init_delta
self.mu = max(self.mu_min, self.mu * self.delta)
logger.debug("Update regularization term to {}"
.format(self.mu))
if self.mu >= self.mu_max:
logger.debug("Reach Max regularization term")
break
if converged_sol:
logger.debug("Get converged sol")
break
opt_count += 1
# update prev sol
self.prev_sol[:-1] = sol[1:]
self.prev_sol[-1] = sol[-1] # last use the terminal input
return sol[0]
def calc_input(self, k, K, pred_xs, sol, alpha):
""" calc input trajectory by using k and K
Args:
k (numpy.ndarray): gain, shape(pred_len, input_size)
K (numpy.ndarray): gain, shape(pred_len, input_size, state_size)
pred_xs (numpy.ndarray): predicted state,
shape(pred_len+1, state_size)
sol (numpy.ndarray): input trajectory, previous solutions
shape(pred_len, input_size)
alpha (float): param of line search
Returns:
new_pred_xs (numpy.ndarray): update state trajectory,
shape(pred_len+1, state_size)
new_sol (numpy.ndarray): update input trajectory,
shape(pred_len, input_size)
"""
# get size
(pred_len, input_size, state_size) = K.shape
# initialize
new_pred_xs = np.zeros((pred_len+1, state_size))
new_pred_xs[0] = pred_xs[0].copy() # init state is same
new_sol = np.zeros((pred_len, input_size))
for t in range(pred_len):
new_sol[t] = sol[t] \
+ alpha * k[t] \
+ np.dot(K[t], (new_pred_xs[t] - pred_xs[t]))
new_pred_xs[t+1] = self.model.predict_next_state(new_pred_xs[t],
new_sol[t])
return new_pred_xs, new_sol
def forward(self, curr_x, g_xs, sol):
""" forward step of iLQR
Args:
curr_x (numpy.ndarray): current state, shape(state_size, )
g_xs (numpy.ndarrya): goal trajectory, shape(plan_len, state_size)
sol (numpy.ndarray): solutions, shape(plan_len, input_size)
Returns:
f_x (numpy.ndarray): gradient of model with respecto to state,
shape(pred_len, state_size, state_size)
f_u (numpy.ndarray): gradient of model with respecto to input,
shape(pred_len, state_size, input_size)
f_xx (numpy.ndarray): gradient of model with respecto to state,
shape(pred_len+1, state_size, state_size, state_size)
f_ux (numpy.ndarray): gradient of model with respecto to input,
shape(pred_len, state_size, input_size, state_size)
f_uu (numpy.ndarray): gradient of model with respecto to input,
shape(pred_len, state_size, input_size, input_size)
l_x (numpy.ndarray): gradient of cost with respecto to state,
shape(pred_len+1, state_size)
l_u (numpy.ndarray): gradient of cost with respecto to input,
shape(pred_len, input_size)
l_xx (numpy.ndarray): hessian of cost with respecto to state,
shape(pred_len+1, state_size, state_size)
l_uu (numpy.ndarray): hessian of cost with respecto to input,
shape(pred_len+1, input_size, input_size)
l_ux (numpy.ndarray): hessian of cost with respect
to state and input, shape(pred_len, input_size, state_size)
"""
# simulate forward using the current control trajectory
pred_xs = self.model.predict_traj(curr_x, sol)
# check costs
cost = self.calc_cost(curr_x,
sol[np.newaxis, :, :],
g_xs)
# calc gradinet in batch
f_x = self.model.calc_f_x(pred_xs[:-1], sol, self.dt)
f_u = self.model.calc_f_u(pred_xs[:-1], sol, self.dt)
# calc hessian in batch
f_xx = self.model.calc_f_xx(pred_xs[:-1], sol, self.dt)
f_ux = self.model.calc_f_ux(pred_xs[:-1], sol, self.dt)
f_uu = self.model.calc_f_uu(pred_xs[:-1], sol, self.dt)
# gradint of costs
l_x, l_xx, l_u, l_uu, l_ux = \
self._calc_gradient_hessian_cost(pred_xs, g_xs, sol)
return pred_xs, cost, f_x, f_u, f_xx, f_ux, f_uu, \
l_x, l_xx, l_u, l_uu, l_ux
def _calc_gradient_hessian_cost(self, pred_xs, g_x, sol):
""" calculate gradient and hessian of model and cost fn
Args:
pred_xs (numpy.ndarray): predict traj,
shape(pred_len+1, state_size)
sol (numpy.ndarray): input traj,
shape(pred_len, input_size)
Returns
l_x (numpy.ndarray): gradient of cost,
shape(pred_len+1, state_size)
l_u (numpy.ndarray): gradient of cost,
shape(pred_len, input_size)
l_xx (numpy.ndarray): hessian of cost,
shape(pred_len+1, state_size, state_size)
l_uu (numpy.ndarray): hessian of cost,
shape(pred_len+1, input_size, input_size)
l_ux (numpy.ndarray): hessian of cost,
shape(pred_len, input_size, state_size)
"""
# l_x.shape = (pred_len+1, state_size)
l_x = self.gradient_cost_fn_state(pred_xs[:-1],
g_x[:-1], terminal=False)
terminal_l_x = \
self.gradient_cost_fn_state(pred_xs[-1],
g_x[-1], terminal=True)
l_x = np.concatenate((l_x, terminal_l_x), axis=0)
# l_u.shape = (pred_len, input_size)
l_u = self.gradient_cost_fn_input(pred_xs[:-1], sol)
# l_xx.shape = (pred_len+1, state_size, state_size)
l_xx = self.hessian_cost_fn_state(pred_xs[:-1],
g_x[:-1], terminal=False)
terminal_l_xx = \
self.hessian_cost_fn_state(pred_xs[-1],
g_x[-1], terminal=True)
l_xx = np.concatenate((l_xx, terminal_l_xx), axis=0)
# l_uu.shape = (pred_len, input_size, input_size)
l_uu = self.hessian_cost_fn_input(pred_xs[:-1], sol)
# l_ux.shape = (pred_len, input_size, state_size)
l_ux = self.hessian_cost_fn_input_state(pred_xs[:-1], sol)
return l_x, l_xx, l_u, l_uu, l_ux
def backward(self, f_x, f_u, f_xx, f_ux, f_uu, l_x, l_xx, l_u, l_uu, l_ux):
""" backward step of iLQR
Args:
f_x (numpy.ndarray): gradient of model with respecto to state,
shape(pred_len+1, state_size, state_size)
f_u (numpy.ndarray): gradient of model with respecto to input,
shape(pred_len, state_size, input_size)
f_xx (numpy.ndarray): gradient of model with respecto to state,
shape(pred_len+1, state_size, state_size, state_size)
f_ux (numpy.ndarray): gradient of model with respecto to input,
shape(pred_len, state_size, input_size, state_size)
f_uu (numpy.ndarray): gradient of model with respecto to input,
shape(pred_len, state_size, input_size, input_size)
l_x (numpy.ndarray): gradient of cost with respecto to state,
shape(pred_len+1, state_size)
l_u (numpy.ndarray): gradient of cost with respecto to input,
shape(pred_len, input_size)
l_xx (numpy.ndarray): hessian of cost with respecto to state,
shape(pred_len+1, state_size, state_size)
l_uu (numpy.ndarray): hessian of cost with respecto to input,
shape(pred_len, input_size, input_size)
l_ux (numpy.ndarray): hessian of cost with respect
to state and input, shape(pred_len, input_size, state_size)
Returns:
k (numpy.ndarray): gain, shape(pred_len, input_size)
K (numpy.ndarray): gain, shape(pred_len, input_size, state_size)
"""
# get size
(_, state_size, _) = f_x.shape
# initialzie
V_x = l_x[-1]
V_xx = l_xx[-1]
k = np.zeros((self.pred_len, self.input_size))
K = np.zeros((self.pred_len, self.input_size, state_size))
for t in range(self.pred_len-1, -1, -1):
# get Q val
Q_x, Q_u, Q_xx, Q_ux, Q_uu = self._Q(f_x[t], f_u[t],
f_xx[t], f_ux[t], f_uu[t],
l_x[t],
l_u[t], l_xx[t], l_ux[t],
l_uu[t], V_x, V_xx)
# calc gain
k[t] = - np.linalg.solve(Q_uu, Q_u)
K[t] = - np.linalg.solve(Q_uu, Q_ux)
# update V_x val
V_x = Q_x + np.dot(np.dot(K[t].T, Q_uu), k[t])
V_x += np.dot(K[t].T, Q_u) + np.dot(Q_ux.T, k[t])
# update V_xx val
V_xx = Q_xx + np.dot(np.dot(K[t].T, Q_uu), K[t])
V_xx += np.dot(K[t].T, Q_ux) + np.dot(Q_ux.T, K[t])
V_xx = 0.5 * (V_xx + V_xx.T) # to maintain symmetry.
return k, K
def _Q(self, f_x, f_u, f_xx, f_ux, f_uu,
l_x, l_u, l_xx, l_ux, l_uu, V_x, V_xx):
""" compute Q function valued
Args:
f_x (numpy.ndarray): gradient of model with respecto to state,
shape(state_size, state_size)
f_u (numpy.ndarray): gradient of model with respecto to input,
shape(state_size, input_size)
f_xx (numpy.ndarray): gradient of model with respecto to state,
shape(state_size, state_size, state_size)
f_ux (numpy.ndarray): gradient of model with respecto to input,
shape(state_size, input_size, state_size)
f_uu (numpy.ndarray): gradient of model with respecto to input,
shape(state_size, input_size, input_size)
l_x (numpy.ndarray): gradient of cost with respecto to state,
shape(state_size, )
l_u (numpy.ndarray): gradient of cost with respecto to input,
shape(input_size, )
l_xx (numpy.ndarray): hessian of cost with respecto to state,
shape(state_size, state_size)
l_uu (numpy.ndarray): hessian of cost with respecto to input,
shape(input_size, input_size)
l_ux (numpy.ndarray): hessian of cost with respect
to state and input, shape(input_size, state_size)
V_x (numpy.ndarray): gradient of Value function,
shape(state_size, )
V_xx (numpy.ndarray): hessian of Value function,
shape(state_size, state_size)
Returns:
Q_x (numpy.ndarray): gradient of Q function, shape(state_size, )
Q_u (numpy.ndarray): gradient of Q function, shape(input_size, )
Q_xx (numpy.ndarray): hessian of Q fucntion,
shape(state_size, state_size)
Q_ux (numpy.ndarray): hessian of Q fucntion,
shape(input_size, state_size)
Q_uu (numpy.ndarray): hessian of Q fucntion,
shape(input_size, input_size)
"""
# get size
state_size = len(l_x)
Q_x = l_x + np.dot(f_x.T, V_x)
Q_u = l_u + np.dot(f_u.T, V_x)
Q_xx = l_xx + np.dot(np.dot(f_x.T, V_xx), f_x)
reg = self.mu * np.eye(state_size)
Q_ux = l_ux + np.dot(np.dot(f_u.T, (V_xx + reg)), f_x)
Q_uu = l_uu + np.dot(np.dot(f_u.T, (V_xx + reg)), f_u)
# tensor constraction
Q_xx += np.tensordot(V_x, f_xx, axes=1)
Q_ux += np.tensordot(V_x, f_ux, axes=1)
Q_uu += np.tensordot(V_x, f_uu, axes=1)
return Q_x, Q_u, Q_xx, Q_ux, Q_uu
|
452937
|
from typing import Optional, List, Union
from .generic import MeshTorchLayer, PermutationLayer
from ..meshmodel import RectangularMeshModel, TriangularMeshModel, PermutingRectangularMeshModel, ButterflyMeshModel
from ..helpers import rectangular_permutation, butterfly_layer_permutation
from ..config import DEFAULT_BASIS
import numpy as np
class RMTorch(MeshTorchLayer):
"""Rectangular mesh network layer for unitary operators implemented in tensorflow
Args:
units: The dimension of the unitary matrix (:math:`N`)
num_layers: The number of layers (:math:`L`) of the mesh
hadamard: Hadamard convention for the beamsplitters
basis: Phase basis to use
bs_error: Beamsplitter split ratio error
theta_init: Initializer for :code:`theta` (:math:`\\boldsymbol{\\theta}` or :math:`\\theta_{n\ell}`)
a :code:`str`, :code:`ndarray`, or tuple of the form :code:`(theta_init, theta_fn)`.
phi_init: Initializer for :code:`phi` (:math:`\\boldsymbol{\\phi}` or :math:`\\phi_{n\ell}`):
a :code:`str`, :code:`ndarray`, or tuple of the form :code:`(phi_init, phi_fn)`.
gamma_init: Initializer for :code:`gamma` (:math:`\\boldsymbol{\\gamma}` or :math:`\\gamma_{n}`):
a :code:`str`, :code:`ndarray`, or tuple of the form :code:`(gamma_init, gamma_fn)`.
"""
def __init__(self, units: int, num_layers: int = None, hadamard: bool = False, basis: str = DEFAULT_BASIS,
bs_error: float = 0.0, theta_init: Union[str, tuple, np.ndarray] = "haar_rect",
phi_init: Union[str, tuple, np.ndarray] = "random_phi",
gamma_init: Union[str, tuple, np.ndarray] = "random_gamma"):
super(RMTorch, self).__init__(
RectangularMeshModel(units, num_layers, hadamard, bs_error, basis,
theta_init, phi_init, gamma_init))
class TMTorch(MeshTorchLayer):
"""Triangular mesh network layer for unitary operators implemented in tensorflow
Args:
units: The dimension of the unitary matrix (:math:`N`)
hadamard: Hadamard convention for the beamsplitters
basis: Phase basis to use
bs_error: Beamsplitter split ratio error
theta_init: Initializer for :code:`theta` (:math:`\\boldsymbol{\\theta}` or :math:`\\theta_{n\ell}`)
a :code:`str`, :code:`ndarray`, or tuple of the form :code:`(theta_init, theta_fn)`.
phi_init: Initializer for :code:`phi` (:math:`\\boldsymbol{\\phi}` or :math:`\\phi_{n\ell}`):
a :code:`str`, :code:`ndarray`, or tuple of the form :code:`(phi_init, phi_fn)`.
gamma_init: Initializer for :code:`gamma` (:math:`\\boldsymbol{\\gamma}` or :math:`\\gamma_{n}`):
a :code:`str`, :code:`ndarray`, or tuple of the form :code:`(gamma_init, gamma_fn)`.
"""
def __init__(self, units: int, hadamard: bool = False, basis: str = DEFAULT_BASIS,
bs_error: float = 0.0, theta_init: Union[str, tuple, np.ndarray] = "haar_rect",
phi_init: Union[str, tuple, np.ndarray] = "random_phi",
gamma_init: Union[str, tuple, np.ndarray] = "random_gamma"):
super(TMTorch, self).__init__(
TriangularMeshModel(units, hadamard, bs_error, basis,
theta_init, phi_init, gamma_init)
)
class PRMTorch(MeshTorchLayer):
"""Permuting rectangular mesh unitary layer
Args:
units: The dimension of the unitary matrix (:math:`N`) to be modeled by this transformer
tunable_layers_per_block: The number of tunable layers per block (overrides :code:`num_tunable_layers_list`, :code:`sampling_frequencies`)
num_tunable_layers_list: Number of tunable layers in each block in order from left to right
sampling_frequencies: Frequencies of sampling frequencies between the tunable layers
bs_error: Photonic error in the beamsplitter
theta_init: Initializer for :code:`theta` (:math:`\\boldsymbol{\\theta}` or :math:`\\theta_{n\ell}`)
a :code:`str`, :code:`ndarray`, or tuple of the form :code:`(theta_init, theta_fn)`.
phi_init: Initializer for :code:`phi` (:math:`\\boldsymbol{\\phi}` or :math:`\\phi_{n\ell}`):
a :code:`str`, :code:`ndarray`, or tuple of the form :code:`(phi_init, phi_fn)`.
gamma_init: Initializer for :code:`gamma` (:math:`\\boldsymbol{\\gamma}` or :math:`\\gamma_{n}`):
a :code:`str`, :code:`ndarray`, or tuple of the form :code:`(gamma_init, gamma_fn)`.
"""
def __init__(self, units: int, tunable_layers_per_block: int = None,
num_tunable_layers_list: Optional[List[int]] = None, sampling_frequencies: Optional[List[int]] = None,
bs_error: float = 0.0, hadamard: bool = False,
theta_init: Union[str, tuple, np.ndarray] = "haar_rect",
phi_init: Union[str, tuple, np.ndarray] = "random_phi",
gamma_init: Union[str, tuple, np.ndarray] = "random_gamma"):
if theta_init == 'haar_prm' and tunable_layers_per_block is not None:
raise NotImplementedError('haar_prm initializer is incompatible with setting tunable_layers_per_block.')
super(PRMTorch, self).__init__(
PermutingRectangularMeshModel(units, tunable_layers_per_block, num_tunable_layers_list,
sampling_frequencies, bs_error, hadamard,
theta_init, phi_init, gamma_init)
)
class BMTorch(MeshTorchLayer):
"""Butterfly mesh unitary layer
Args:
hadamard: Hadamard convention for the beamsplitters
basis: Phase basis to use
bs_error: Beamsplitter split ratio error
theta_init: Initializer for :code:`theta` (:math:`\\boldsymbol{\\theta}` or :math:`\\theta_{n\ell}`)
a :code:`str`, :code:`ndarray`, or tuple of the form :code:`(theta_init, theta_fn)`.
phi_init: Initializer for :code:`phi` (:math:`\\boldsymbol{\\phi}` or :math:`\\phi_{n\ell}`):
a :code:`str`, :code:`ndarray`, or tuple of the form :code:`(phi_init, phi_fn)`.
gamma_init: Initializer for :code:`gamma` (:math:`\\boldsymbol{\\gamma}` or :math:`\\gamma_{n}`):
a :code:`str`, :code:`ndarray`, or tuple of the form :code:`(gamma_init, gamma_fn)`.
"""
def __init__(self, num_layers: int, hadamard: bool = False, basis: str = DEFAULT_BASIS,
bs_error: float = 0.0, theta_init: Union[str, tuple, np.ndarray] = "haar_rect",
phi_init: Union[str, tuple, np.ndarray] = "random_phi",
gamma_init: Union[str, tuple, np.ndarray] = "random_gamma"):
super(BMTorch, self).__init__(
ButterflyMeshModel(num_layers, hadamard, bs_error, basis, theta_init, phi_init, gamma_init)
)
class RectangularPerm(PermutationLayer):
"""Rectangular permutation layer
The rectangular permutation layer for a frequency :math:`f` corresponds effectively is equivalent to adding
:math:`f` layers of cross state MZIs in a grid configuration to the existing mesh.
Args:
units: Dimension of the input (number of input waveguide ports), :math:`N`
frequency: Frequency of interacting mesh wires (waveguides)
"""
def __init__(self, units: int, frequency: int):
self.frequency = frequency
super(RectangularPerm, self).__init__(permuted_indices=rectangular_permutation(units, frequency))
class ButterflyPerm(PermutationLayer):
"""Butterfly (FFT) permutation layer
The butterfly or FFT permutation for a frequency :math:`f` corresponds to switching all inputs
that are :math:`f` inputs apart. This works most cleanly in a butterfly mesh architecture where
the number of inputs, :math:`N`, and the frequencies, :math:`f` are powers of two.
Args:
units: Dimension of the input (number of input waveguide ports), :math:`N`
frequency: Frequency of interacting mesh wires (waveguides)
"""
def __init__(self, units: int, frequency: int):
self.frequency = frequency
super(ButterflyPerm, self).__init__(permuted_indices=butterfly_layer_permutation(units, frequency))
|
452969
|
import math
import numpy as np
import sys
from collections import Counter
import torch
from torch.autograd import Variable
import torch.nn as nn
import editdistance
import models
import data
import ops
from cuda import CUDA
def get_precisions_recalls_DEPRECIATED(inputs, preds, ground_truths):
""" v1 of precision/recall based on some dumb logic """
def precision_recall(src, tgt, pred):
"""
src: [string tokens], the input to the model
tgt: [string tokens], the gold targets
pred: [string tokens], the model outputs
"""
tgt_unique = set(tgt) - set(src)
src_unique = set(src) - set(tgt)
# new words the model correctly introduced
true_positives = len(set(pred) & tgt_unique)
# new words the model incorrectly introduced
false_positives = len(set(pred) - set(src) - set(tgt))
# old words the model incorrectly retained
false_negatives = len(set(pred) & src_unique)
precision = true_positives * 1.0 / (true_positives + false_positives + 0.001)
recall = true_postitives * 1.0 / (true_positives + false_negatives + 0.001)
return precision, recall
[precisions, recalls] = list(zip(*[
precision_recall(src, tgt, pred)
for src, tgt, pred in zip(inputs, ground_truths, preds)
]))
return precisions, recalls
#########################################################################
# ABOVE THIS LINE ARE DEPRECIATED METHODS...TREAD CAREFULLY
#########################################################################
def bleu_stats(hypothesis, reference, word_list=None):
"""Compute statistics for BLEU."""
def is_valid_ngram(ngram):
if word_list is None:
return True
else:
return len(set(word_list) & set(ngram)) > 0
stats = []
stats.append(len(hypothesis))
stats.append(len(reference))
for n in range(1, 5):
s_ngrams = Counter([
tuple(hypothesis[i:i + n]) for i in range(len(hypothesis) + 1 - n)
if is_valid_ngram(hypothesis[i:i + n])
])
r_ngrams = Counter([
tuple(reference[i:i + n]) for i in range(len(reference) + 1 - n)
if is_valid_ngram(reference[i:i + n])
])
stats.append(max([sum((s_ngrams & r_ngrams).values()), 0]))
stats.append(max([len(hypothesis) + 1 - n, 0]))
return stats
def bleu(stats):
"""Compute BLEU given n-gram statistics."""
if len(list(filter(lambda x: x == 0, stats))) > 0:
return 0
(c, r) = stats[:2]
log_bleu_prec = sum(
[math.log(float(x) / y) for x, y in zip(stats[2::2], stats[3::2])]
) / 4.
return math.exp(min([0, 1 - float(r) / c]) + log_bleu_prec)
def get_bleu(hypotheses, reference, word_lists=None):
"""Get validation BLEU score for dev set.
If provided with a list of word lists, then we'll only consider
ngrams with words from that list.
"""
stats = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
if word_lists is None:
word_lists = [None for _ in range(len(hypotheses))]
for hyp, ref, wlist in zip(hypotheses, reference, word_lists):
stats += np.array(bleu_stats(hyp, ref, word_list=wlist))
return 100 * bleu(stats)
def get_precision_recall(inputs, top_k_preds, ground_truths, k=None):
"""
Precision@k = (# of generated candidates @k that are relevant to targets) / (# of generated candidates @k)
Recall@k = (# of generated candidates @k that are relevant to targets) / (total # of relevant targets)
top_k_preds: [Batch, length, k]
"""
if not k:
k = len(top_k_preds[0][0])
def precision_recall(src, tgt, top_k):
tgt_unique = set(tgt) - set(src)
pred_toks = [tok for klist in top_k for tok in klist[:k]]
precision = len(tgt_unique & set(pred_toks)) * 1.0 / (len(pred_toks) + 0.0001)
recall = len(tgt_unique & set(pred_toks)) * 1.0 / (len(tgt_unique) + 0.0001)
return precision, recall
[precisions, recalls] = list(zip(*[
precision_recall(src, tgt, pred)
for src, tgt, pred in zip(inputs, ground_truths, top_k_preds)
]))
return np.average(precisions), np.average(recalls)
def get_edit_distance(hypotheses, reference):
ed = 0
for hyp, ref in zip(hypotheses, reference):
ed += editdistance.eval(hyp, ref)
return ed * 1.0 / len(hypotheses)
def decode_minibatch(max_len, start_id, model, src_input, srclens, srcmask,
aux_input, auxlens, auxmask, side_info, k):
""" argmax decoding """
# Initialize target with <s> for every sentence
tgt_input = Variable(torch.LongTensor([
[start_id] for i in range(src_input.size(0))
]))
if CUDA:
tgt_input = tgt_input.cuda()
top_k_toks = []
for i in range(max_len):
# run input through the model
decoder_logit, word_probs, _, _ = model(src_input, tgt_input, srcmask, srclens,
aux_input, auxmask, auxlens, side_info)
# logits for the latest timestep
word_probs = word_probs.data.cpu().numpy()[:, -1, :]
# sorted indices (descending)
sorted_indices = np.argsort((word_probs))[:, ::-1]
# select the predicted "next" tokens, attach to target-side inputs
next_preds = Variable(torch.from_numpy(sorted_indices[:, 0]))
if CUDA:
next_preds = next_preds.cuda()
tgt_input = torch.cat((tgt_input, next_preds.unsqueeze(1)), dim=1)
# remember the top k indices at this step for evaluation
top_k_toks.append( sorted_indices[:, :k] )
# make top_k_toks into [Batch, Length, k] tensor
top_k_toks = np.array(top_k_toks)
top_k_toks = np.transpose(top_k_toks, (1, 0, 2))
# make sure the top k=1 tokens is equal to the true model predictions (argmax)
assert np.array_equal(
top_k_toks[:, :, 0],
tgt_input[:, 1:].data.cpu().numpy()) # ignore <s> kickstart
return top_k_toks
# convert seqs to tokens
def ids_to_toks(tok_seqs, id2tok, sort_indices, cuts=None, save_cuts=False):
out = []
cut_indices = []
# take off the gpu
if isinstance(tok_seqs, torch.Tensor):
tok_seqs = tok_seqs.cpu().numpy()
# convert to toks, cut off at </s>
for i, line in enumerate(tok_seqs):
toks = [id2tok[x] for x in line]
if cuts is not None:
cut_idx = cuts[i]
elif '</s>' in toks:
cut_idx = toks.index('</s>')
else:
cut_idx = len(toks)
out.append( toks[:cut_idx] )
cut_indices += [cut_idx]
# unsort
out = data.unsort(out, sort_indices)
if save_cuts:
return out, cut_indices
else:
return out
def decode_dataset(model, src, tgt, config, k=20):
"""Evaluate model."""
inputs = []
preds = []
top_k_preds = []
auxs = []
ground_truths = []
raw_srcs = []
for j in range(0, len(src['data']), config['data']['batch_size']):
sys.stdout.write("\r%s/%s..." % (j, len(src['data'])))
sys.stdout.flush()
# get batch
input_content, input_aux, output, side_info, raw_src = data.minibatch(
src, tgt, j,
config['data']['batch_size'],
config['data']['max_len'],
config,
is_test=True)
input_lines_src, output_lines_src, srclens, srcmask, indices = input_content
input_ids_aux, _, auxlens, auxmask, _ = input_aux
input_lines_tgt, output_lines_tgt, _, _, _ = output
_, raw_src, _, _, _ = raw_src
side_info, _, _, _, _ = side_info
# TODO -- beam search
tgt_pred_top_k = decode_minibatch(
config['data']['max_len'], tgt['tok2id']['<s>'],
model, input_lines_src, srclens, srcmask,
input_ids_aux, auxlens, auxmask, side_info, k=k)
# convert inputs/preds/targets/aux to human-readable form
inputs += ids_to_toks(output_lines_src, src['id2tok'], indices)
ground_truths += ids_to_toks(output_lines_tgt, tgt['id2tok'], indices)
raw_srcs += ids_to_toks(raw_src, src['id2tok'], indices)
# TODO -- refactor this stuff!! it's shitty
# get the "offical" predictions from the model
pred_toks, pred_lens = ids_to_toks(
tgt_pred_top_k[:, :, 0], tgt['id2tok'], indices, save_cuts=True)
preds += pred_toks
# now get all the other top-k prediction levels
top_k_pred = [pred_toks]
for i in range(k - 1):
top_k_pred.append(ids_to_toks(
tgt_pred_top_k[:, :, i + 1], tgt['id2tok'], indices, cuts=pred_lens)
)
# top_k_pred is [k, batch, length] where length is ragged
# but we want it in [batch, length, k]. Manual transpose b/c ragged :(
batch_size = len(top_k_pred[0]) # could be variable at test time
pred_lens = data.unsort(pred_lens, indices)
top_k_pred_transposed = [[] for _ in range(batch_size)]
for bi in range(batch_size):
for ti in range(pred_lens[bi]):
top_k_pred_transposed[bi] += [[
top_k_pred[ki][bi][ti] for ki in range(k)
]]
top_k_preds += top_k_pred_transposed
if config['model']['model_type'] == 'delete':
auxs += [[str(x)] for x in input_ids_aux.data.cpu().numpy()] # because of list comp in inference_metrics()
elif config['model']['model_type'] == 'delete_retrieve':
auxs += ids_to_toks(input_ids_aux, tgt['id2tok'], indices)
elif config['model']['model_type'] == 'seq2seq':
auxs += ['None' for _ in range(batch_size)]
return inputs, preds, top_k_preds, ground_truths, auxs, raw_srcs
def get_metrics(inputs, preds, ground_truths, top_k_preds=None, classifier=None):
bleu = get_bleu(preds, ground_truths)
src_bleu = get_bleu(
preds, inputs,
word_lists=[
set(src) - set(tgt) for src, tgt in zip(inputs, ground_truths)
]
)
tgt_bleu = get_bleu(
preds, ground_truths,
word_lists=[
set(tgt) - set(src) for src, tgt in zip(inputs, ground_truths)
]
)
edit_distance = get_edit_distance(preds, ground_truths)
if top_k_preds is None:
top_k_preds = [[[x] for x in seq] for seq in preds]
tgt_precision, tgt_recall = get_precision_recall(inputs, top_k_preds, ground_truths)
src_precision, src_recall = get_precision_recall(ground_truths, top_k_preds, inputs)
if classifier is not None:
classifier_error = classifier.error_rate(
seqs=[' '.join(seq) for seq in preds],
Y=[1 for _ in range(len(preds))]) # we're trying to create "target" seqs
else:
classifier_error = -1
return {
'bleu': bleu,
'src_bleu': src_bleu,
'tgt_bleu': tgt_bleu,
'edit_distance': edit_distance,
'tgt_precision': tgt_precision,
'src_precision': src_precision,
'tgt_recall': tgt_recall,
'src_recall': src_recall,
'classifier_error': classifier_error
}
def inference_metrics(model, src, tgt, config):
""" decode and evaluate bleu """
inputs, preds, top_k_preds, ground_truths, auxs, raw_srcs = decode_dataset(
model, src, tgt, config, k=config['eval']['precision_recall_k'])
eval_classifier = models.TextClassifier.from_pickle(
config['eval']['classifier_path'])
metrics = get_metrics(
raw_srcs, preds, ground_truths,
top_k_preds=top_k_preds, classifier=eval_classifier)
inputs = [' '.join(seq) for seq in inputs]
preds = [' '.join(seq) for seq in preds]
ground_truths = [' '.join(seq) for seq in ground_truths]
auxs = [' '.join(seq) for seq in auxs]
return metrics, inputs, preds, ground_truths, auxs
def evaluate_lpp(model, src, tgt, config):
""" evaluate log perplexity WITHOUT decoding
(i.e., with teacher forcing)
"""
weight_mask = torch.ones(len(tgt['tok2id']))
if CUDA:
weight_mask = weight_mask.cuda()
weight_mask[tgt['tok2id']['<pad>']] = 0
loss_criterion = nn.CrossEntropyLoss(weight=weight_mask)
if CUDA:
loss_criterion = loss_criterion.cuda()
losses = []
for j in range(0, len(src['data']), config['data']['batch_size']):
sys.stdout.write("\r%s/%s..." % (j, len(src['data'])))
sys.stdout.flush()
# get batch
input_content, input_aux, output, side_info, _ = data.minibatch(
src, tgt, j,
config['data']['batch_size'],
config['data']['max_len'],
config,
is_test=True)
input_lines_src, _, srclens, srcmask, _ = input_content
input_ids_aux, _, auxlens, auxmask, _ = input_aux
input_lines_tgt, output_lines_tgt, _, _, _ = output
side_info, _, _, _, _ = side_info
decoder_logit, decoder_probs, _, _ = model(
input_lines_src, input_lines_tgt, srcmask, srclens,
input_ids_aux, auxlens, auxmask,
side_info)
loss = loss_criterion(
decoder_logit.contiguous().view(-1, len(tgt['tok2id'])),
output_lines_tgt.view(-1)
)
losses.append(loss.data[0])
return np.mean(losses)
|
453000
|
from __future__ import unicode_literals
import functools
import re
from .turner import TurnerBaseIE
from ..compat import (
compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
OnDemandPagedList,
remove_start,
)
class NBAIE(TurnerBaseIE):
_VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?P<path>(?:[^/]+/)+(?P<id>[^?]*?))/?(?:/index\.html)?(?:\?.*)?$'
_TESTS = [{
'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html',
'md5': '9e7729d3010a9c71506fd1248f74e4f4',
'info_dict': {
'id': '0021200253-okc-bkn-recap',
'ext': 'mp4',
'title': 'Thunder vs. Nets',
'description': '<NAME> scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.',
'duration': 181,
'timestamp': 1354638466,
'upload_date': '20121204',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.nba.com/video/games/hornets/2014/12/05/0021400276-nyk-cha-play5.nba/',
'only_matching': True,
}, {
'url': 'http://watch.nba.com/video/channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba',
'md5': 'b2b39b81cf28615ae0c3360a3f9668c4',
'info_dict': {
'id': 'channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba',
'ext': 'mp4',
'title': 'Hawks vs. Cavaliers Game 1',
'description': 'md5:8094c3498d35a9bd6b1a8c396a071b4d',
'duration': 228,
'timestamp': 1432134543,
'upload_date': '20150520',
},
'expected_warnings': ['Unable to download f4m manifest'],
}, {
'url': 'http://www.nba.com/clippers/news/doc-rivers-were-not-trading-blake',
'info_dict': {
'id': 'teams/clippers/2016/02/17/1455672027478-Doc_Feb16_720.mov-297324',
'ext': 'mp4',
'title': 'Practice: Doc Rivers - 2/16/16',
'description': 'Head Coach Doc Rivers addresses the media following practice.',
'upload_date': '20160216',
'timestamp': 1455672000,
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['Unable to download f4m manifest'],
}, {
'url': 'http://www.nba.com/timberwolves/wiggins-shootaround#',
'info_dict': {
'id': 'timberwolves',
'title': 'Shootaround Access - Dec. 12 | <NAME>',
},
'playlist_count': 30,
'params': {
# Download the whole playlist takes too long time
'playlist_items': '1-30',
},
}, {
'url': 'http://www.nba.com/timberwolves/wiggins-shootaround#',
'info_dict': {
'id': 'teams/timberwolves/2014/12/12/Wigginsmp4-3462601',
'ext': 'mp4',
'title': 'Shootaround Access - Dec. 12 | <NAME>',
'description': 'Wolves rookie <NAME> addresses the media after Friday\'s shootaround.',
'upload_date': '20141212',
'timestamp': 1418418600,
},
'params': {
'noplaylist': True,
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['Unable to download f4m manifest'],
}]
_PAGE_SIZE = 30
def _fetch_page(self, team, video_id, page):
search_url = 'http://searchapp2.nba.com/nba-search/query.jsp?' + compat_urllib_parse_urlencode({
'type': 'teamvideo',
'start': page * self._PAGE_SIZE + 1,
'npp': (page + 1) * self._PAGE_SIZE + 1,
'sort': 'recent',
'output': 'json',
'site': team,
})
results = self._download_json(
search_url, video_id, note='Download page %d of playlist data' % page)['results'][0]
for item in results:
yield self.url_result(compat_urlparse.urljoin('http://www.nba.com/', item['url']))
def _extract_playlist(self, orig_path, video_id, webpage):
team = orig_path.split('/')[0]
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video because of --no-playlist')
video_path = self._search_regex(
r'nbaVideoCore\.firstVideo\s*=\s*\'([^\']+)\';', webpage, 'video path')
video_url = 'http://www.nba.com/%s/video/%s' % (team, video_path)
return self.url_result(video_url)
self.to_screen('Downloading playlist - add --no-playlist to just download video')
playlist_title = self._og_search_title(webpage, fatal=False)
entries = OnDemandPagedList(
functools.partial(self._fetch_page, team, video_id),
self._PAGE_SIZE)
return self.playlist_result(entries, team, playlist_title)
def _real_extract(self, url):
path, video_id = re.match(self._VALID_URL, url).groups()
orig_path = path
if path.startswith('nba/'):
path = path[3:]
if 'video/' not in path:
webpage = self._download_webpage(url, video_id)
path = remove_start(self._search_regex(r'data-videoid="([^"]+)"', webpage, 'video id'), '/')
if path == '{{id}}':
return self._extract_playlist(orig_path, video_id, webpage)
# See prepareContentId() of pkgCvp.js
if path.startswith('video/teams'):
path = 'video/channels/proxy/' + path[6:]
return self._extract_cvp_info(
'http://www.nba.com/%s.xml' % path, video_id, {
'default': {
'media_src': 'http://nba.cdn.turner.com/nba/big',
},
'm3u8': {
'media_src': 'http://nbavod-f.akamaihd.net',
},
})
|
453004
|
import numpy as np
import tensorflow as tf
from distutils.version import LooseVersion
if LooseVersion(tf.__version__) > LooseVersion("1.14"):
import tensorflow.compat.v1 as tf
from graphgallery import functional as gf
from graphgallery.utils import tqdm
from graphgallery.attack.targeted import TensorFlow
from ..targeted_attacker import TargetedAttacker
@TensorFlow.register()
class FasterSGA(TargetedAttacker):
"""TensorFlow 1.x Implementation of SGA"""
def process(self, W, b, K=2, normalize_grad=True, reset=True):
# nodes with the same class labels
self.similar_nodes = [
np.where(self.graph.node_label == c)[0]
for c in range(self.num_classes)
]
W, b = gf.tensoras(W, b)
self.K = K
self.normalize_grad = normalize_grad
self.surrogate = Surrogate(self.graph.node_attr @ W, b, K=K)
self.shape = (self.num_nodes, self.num_nodes)
self.adj = self.graph.adj_matrix
edges, weights = gf.sparse_adj_to_edge(gf.normalize_adj(self.graph.adj_matrix))
self.adj_sparse = (edges.T, weights, self.shape)
self.y_onehot = np.eye(int(self.num_classes))[self.graph.node_label]
if reset:
self.reset()
return self
def reset(self):
super().reset()
# for the added self-loop
self.selfloop_degree = (self.degree + 1.).astype(self.floatx)
self.adj_flips = {}
self.wrong_label = None
return self
def subgraph_preprocessing(self, target, node_reduction=True):
logit = self.surrogate.run(self.surrogate.logit, feed_dict={self.surrogate.adj: self.adj_sparse,
self.surrogate.target: target})
self.label_onehot = self.y_onehot[target]
self.wrong_label = np.argmax(logit - 1e6 * self.label_onehot)
self.wrong_label_onehot = np.eye(self.num_classes)[self.wrong_label]
self.edges, self.nodes = self.ego_subgraph()
assert self.wrong_label != self.graph.node_label[target]
neighbors = np.setdiff1d(self.adj[target].indices, target)
self.neighbors = neighbors
if self.direct_attack:
influence_nodes = [target]
nodes_with_wrong_label = np.setdiff1d(self.similar_nodes[self.wrong_label], neighbors + [target])
else:
if node_reduction:
influence_nodes = [target]
else:
influence_nodes = neighbors
nodes_with_wrong_label = np.setdiff1d(self.similar_nodes[self.wrong_label], [target])
self.construct_sub_adj(nodes_with_wrong_label, influence_nodes)
if node_reduction:
if self.direct_attack:
self.node_reduction([target], max_nodes=int(self.selfloop_degree[target]))
else:
self.node_reduction(neighbors, max_nodes=5)
def ego_subgraph(self):
edges, nodes = gf.ego_graph(self.adj, self.target, self.K)
return edges, nodes
def construct_sub_adj(self, nodes_with_wrong_label, influence_nodes):
length = len(nodes_with_wrong_label)
non_edge = np.vstack([np.stack([np.tile(infl, length), nodes_with_wrong_label], axis=1) for infl in influence_nodes])
if len(influence_nodes) > 1:
mask = self.adj[non_edge[0], non_edge[1]].A1 == 0
non_edge = non_edge[mask]
nodes_all = np.union1d(self.nodes, nodes_with_wrong_label)
edge_weight = np.ones(len(self.edges), dtype=np.float32)
non_edge_weight = np.zeros(len(non_edge), dtype=np.float32)
self_loop = np.stack([nodes_all, nodes_all], axis=1)
self_loop_weight = np.ones(nodes_all.size)
self.indices = np.vstack([self.edges, non_edge, self.edges[:, [1, 0]], non_edge[:, [1, 0]], self_loop])
self.upper_bound = edge_weight.size + non_edge_weight.size
self.lower_bound = edge_weight.size
self.non_edge = non_edge
self.edge_weight = edge_weight
self.non_edge_weight = non_edge_weight
self.self_loop_weight = self_loop_weight
def node_reduction(self, influence_nodes, max_nodes):
sym_weights = np.hstack([self.edge_weight, self.non_edge_weight, self.edge_weight, self.non_edge_weight, self.self_loop_weight])
norm_weight = normalize(sym_weights, self.indices, self.selfloop_degree)
adj_norm = (self.indices, norm_weight, self.shape)
feed_dict = self.surrogate.construct_feed_dict(adj_norm, self.label_onehot, self.wrong_label_onehot, self.target)
gradients = self.surrogate.run(self.surrogate.gradients, feed_dict=feed_dict)[self.lower_bound:self.upper_bound]
index = gf.least_indices(gradients, max_nodes)[0]
self.construct_sub_adj(self.non_edge[index][:, 1], influence_nodes)
def update_subgraph(self, u, v, idx):
if idx < self.lower_bound:
# remove edge
degree_delta = -1
self.edge_weight[idx] = 0.
else:
# add edge
degree_delta = 1
self.non_edge_weight[idx - self.lower_bound] = 1.0
self.selfloop_degree[u] += degree_delta
self.selfloop_degree[v] += degree_delta
def attack(self,
target,
num_budgets=None,
direct_attack=True,
structure_attack=True,
feature_attack=False,
disable=False):
super().attack(target, num_budgets, direct_attack, structure_attack,
feature_attack)
self.subgraph_preprocessing(target)
for epoch in tqdm(range(self.num_budgets),
desc='Peturbing Graph',
disable=disable):
weights = np.hstack([self.edge_weight, self.non_edge_weight])
sym_weights = np.hstack([weights, weights, self.self_loop_weight])
norm_weight = normalize(sym_weights, self.indices, self.selfloop_degree)
adj_norm = (self.indices, norm_weight, self.shape)
feed_dict = self.surrogate.construct_feed_dict(adj_norm, self.label_onehot, self.wrong_label_onehot, target)
gradients = self.surrogate.run(self.surrogate.gradients, feed_dict=feed_dict)
# a trick
if self.normalize_grad:
gradients = normalize(gradients, self.indices, self.selfloop_degree)
gradients = gradients[:self.upper_bound] * (-2 * weights + 1)
i = np.argmin(gradients)
u, v = self.indices[i]
assert not self.is_modified(u, v)
self.adj_flips[(u, v)] = epoch
self.update_subgraph(u, v, i)
return self
def normalize(data, indices, degree):
d = np.sqrt(degree)
row, col = indices.T
return data / (d[row] * d[col])
class Surrogate:
def __init__(self, XW, b, K=2, eps=5.0):
graph = tf.Graph()
with graph.as_default():
self.adj = tf.sparse_placeholder(dtype=tf.float32)
self.label = tf.placeholder(dtype=tf.float32)
self.wrong_label = tf.placeholder(dtype=tf.float32)
self.target = tf.placeholder(dtype=tf.int32)
XW = tf.constant(XW, dtype=tf.float32)
b = tf.constant(b, dtype=tf.float32)
out = XW
for _ in range(K):
out = tf.sparse.sparse_dense_matmul(self.adj, out)
self.logit = out[self.target] + b
# Calibration
self.logit_calibrated = self.logit / eps
self.loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logit_calibrated, labels=self.wrong_label) - tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logit_calibrated, labels=self.label)
self.gradients = tf.gradients(self.loss, self.adj.values)[0]
self.sess = tf.Session(graph=graph)
self.run(tf.global_variables_initializer())
def construct_feed_dict(self, adj, label, wrong_label, target):
feed_dict = {
self.adj: adj,
self.wrong_label: wrong_label,
self.label: label,
self.target: target,
}
return feed_dict
def run(self, variables, feed_dict=None):
return self.sess.run(variables, feed_dict=feed_dict)
def close(self):
self.sess.close()
|
453079
|
from . import BaseCMLTest
from virl.api.plugin import _test_enable_plugins
from click.testing import CliRunner
import requests_mock
import os
class CMLGoodPluginTest(BaseCMLTest):
def setUp(self):
_test_enable_plugins()
super().setUp()
os.environ["CML_PLUGIN_PATH"] = os.path.realpath("./tests/v2/plugins_good")
@classmethod
def tearDownClass(cls):
super().tearDownClass()
os.environ.pop("CML_PLUGIN_PATH", None)
_test_enable_plugins(enabled=False)
def test_cmd_plugin_output(self):
virl = self.get_virl()
with requests_mock.Mocker() as m:
runner = CliRunner()
# Mock the request to return what we expect from the API.
self.setup_mocks(m)
result = runner.invoke(virl, ["--help"])
self.assertEqual(0, result.exit_code)
self.assertIn("test-cmd", result.output)
def test_cmd_plugin(self):
virl = self.get_virl()
with requests_mock.Mocker() as m:
# Mock the request to return what we expect from the API.
self.setup_mocks(m)
runner = CliRunner()
result = runner.invoke(virl, ["test-cmd"])
self.assertEqual("TEST COMMAND\n", result.output)
def test_gen_plugin_output(self):
virl = self.get_virl()
with requests_mock.Mocker() as m:
runner = CliRunner()
# Mock the request to return what we expect from the API.
self.setup_mocks(m)
result = runner.invoke(virl, ["generate", "--help"])
self.assertEqual(0, result.exit_code)
self.assertIn("test-gen", result.output)
def test_gen_plugin(self):
virl = self.get_virl()
with requests_mock.Mocker() as m:
# Mock the request to return what we expect from the API.
self.setup_mocks(m)
runner = CliRunner()
result = runner.invoke(virl, ["generate", "test-gen"])
self.assertEqual("TEST GENERATOR\n", result.output)
def test_view_plugin(self):
virl = self.get_virl()
with requests_mock.Mocker() as m:
# Mock the request to return what we expect from the API.
self.setup_mocks(m)
runner = CliRunner()
result = runner.invoke(virl, ["ls"])
self.assertEqual("TEST VIEWER\n", result.output)
|
453085
|
import numpy as np
from numpy.testing import assert_array_almost_equal
import pytest
from scipy.spatial.transform import Rotation
from tadataka.camera import CameraModel, CameraParameters
from tadataka.projection import pi, inv_pi
def test_pi():
P = np.array([
[0, 0, 0],
[1, 4, 2],
[-1, 3, 5],
], dtype=np.float64)
assert_array_almost_equal(
pi(P),
[[0., 0.], [0.5, 2.0], [-0.2, 0.6]]
)
assert_array_almost_equal(pi(np.array([0., 0., 0.])), [0, 0])
assert_array_almost_equal(pi(np.array([3., 5., 5.])), [0.6, 1.0])
def test_inv_pi():
xs = np.array([
[0.5, 2.0],
[-0.2, 0.6]
])
depths = np.array([2.0, 5.0])
assert_array_almost_equal(
inv_pi(xs, depths),
[[1.0, 4.0, 2.0],
[-1.0, 3.0, 5.0]]
)
x = np.array([0.5, 2.0])
depth = 2.0
assert_array_almost_equal(inv_pi(x, depth), [1.0, 4.0, 2.0])
|
453132
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import re
import sys
def read_flo(filename):
with open(filename, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
data = np.fromfile(f, np.float32, count=int(2*w*h))
# Reshape data into 3D array (columns, rows, bands)
data2D = np.resize(data, (h[0], w[0],2))
return data2D
def write_flo(filename, flow):
"""
write optical flow in Middlebury .flo format
:param flow: optical flow map
:param filename: optical flow file path to be saved
:return: None
"""
f = open(filename, 'wb')
magic = np.array([202021.25], dtype=np.float32)
(height, width) = flow.shape[0:2]
w = np.array([width], dtype=np.int32)
h = np.array([height], dtype=np.int32)
magic.tofile(f)
w.tofile(f)
h.tofile(f)
flow.tofile(f)
f.close()
def read_pfm(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
header = header.decode('utf-8')
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip().decode('utf-8'))
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
data = data[:, :, :2]
return data
def write_pfm(file, image, scale=1):
file = open(file, 'wb')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n' if color else 'Pf\n')
file.write('%d %d\n' % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write('%f\n' % scale)
image.tofile(file)
def flow_to_color(flow, mask=None, max_flow=None):
"""Converts flow to 3-channel color image.
Args:
flow: tensor of shape [num_batch, height, width, 2].
mask: flow validity mask of shape [num_batch, height, width, 1].
"""
n = 8
num_batch, height, width, _ = tf.unstack(tf.shape(flow))
mask = tf.ones([num_batch, height, width, 1]) if mask is None else mask
flow_u, flow_v = tf.unstack(flow, axis=3)
if max_flow is not None:
max_flow = tf.maximum(tf.to_float(max_flow), 1.)
else:
max_flow = tf.reduce_max(tf.abs(flow * mask))
mag = tf.sqrt(tf.reduce_sum(tf.square(flow), 3))
angle = tf.atan2(flow_v, flow_u)
im_h = tf.mod(angle / (2 * np.pi) + 1.0, 1.0)
im_s = tf.clip_by_value(mag * n / max_flow, 0, 1)
im_v = tf.clip_by_value(n - im_s, 0, 1)
im_hsv = tf.stack([im_h, im_s, im_v], 3)
im = tf.image.hsv_to_rgb(im_hsv)
return im * mask
def flow_error_image(flow_1, flow_2, mask_occ, mask_noc=None, log_colors=True):
"""Visualize the error between two flows as 3-channel color image.
Adapted from the KITTI C++ devkit.
Args:
flow_1: first flow of shape [num_batch, height, width, 2].
flow_2: second flow (ground truth)
mask_occ: flow validity mask of shape [num_batch, height, width, 1].
Equals 1 at (occluded and non-occluded) valid pixels.
mask_noc: Is 1 only at valid pixels which are not occluded.
"""
mask_noc = tf.ones(tf.shape(mask_occ)) if mask_noc is None else mask_noc
diff_sq = (flow_1 - flow_2) ** 2
diff = tf.sqrt(tf.reduce_sum(diff_sq, [3], keepdims=True))
if log_colors:
num_batch, height, width, _ = tf.unstack(tf.shape(flow_1))
colormap = [
[0,0.0625,49,54,149],
[0.0625,0.125,69,117,180],
[0.125,0.25,116,173,209],
[0.25,0.5,171,217,233],
[0.5,1,224,243,248],
[1,2,254,224,144],
[2,4,253,174,97],
[4,8,244,109,67],
[8,16,215,48,39],
[16,1000000000.0,165,0,38]]
colormap = np.asarray(colormap, dtype=np.float32)
colormap[:, 2:5] = colormap[:, 2:5] / 255
mag = tf.sqrt(tf.reduce_sum(tf.square(flow_2), 3, keepdims=True))
error = tf.minimum(diff / 3, 20 * diff / mag)
im = tf.zeros([num_batch, height, width, 3])
for i in range(colormap.shape[0]):
colors = colormap[i, :]
cond = tf.logical_and(tf.greater_equal(error, colors[0]),
tf.less(error, colors[1]))
im = tf.where(tf.tile(cond, [1, 1, 1, 3]),
tf.ones([num_batch, height, width, 1]) * colors[2:5],
im)
im = tf.where(tf.tile(tf.cast(mask_noc, tf.bool), [1, 1, 1, 3]),
im, im * 0.5)
im = im * mask_occ
else:
error = (tf.minimum(diff, 5) / 5) * mask_occ
im_r = error # errors in occluded areas will be red
im_g = error * mask_noc
im_b = error * mask_noc
im = tf.concat(axis=3, values=[im_r, im_g, im_b])
return im
|
453148
|
import numpy as np
from rllab import spaces
from rllab.core.serializable import Serializable
from rllab.envs.proxy_env import ProxyEnv
from rllab.spaces.box import Box
from rllab.misc.overrides import overrides
from rllab.envs.base import Step
class NormalizedEnv(ProxyEnv, Serializable):
def __init__(
self,
env,
scale_reward=1.,
normalize_obs=False,
normalize_reward=False,
obs_alpha=0.001,
reward_alpha=0.001,
):
ProxyEnv.__init__(self, env)
Serializable.quick_init(self, locals())
if not isinstance(env.action_space, Box):
print("Environment not using continuous actions; action normalization skipped!")
self._scale_reward = scale_reward
self._normalize_obs = normalize_obs
self._normalize_reward = normalize_reward
self._obs_alpha = obs_alpha
self._obs_mean = np.zeros(env.observation_space.flat_dim)
self._obs_var = np.ones(env.observation_space.flat_dim)
self._reward_alpha = reward_alpha
self._reward_mean = 0.
self._reward_var = 1.
def _update_obs_estimate(self, obs):
flat_obs = self.wrapped_env.observation_space.flatten(obs)
self._obs_mean = (1 - self._obs_alpha) * self._obs_mean + self._obs_alpha * flat_obs
self._obs_var = (1 - self._obs_alpha) * self._obs_var + self._obs_alpha * np.square(flat_obs - self._obs_mean)
def _update_reward_estimate(self, reward):
self._reward_mean = (1 - self._reward_alpha) * self._reward_mean + self._reward_alpha * reward
self._reward_var = (1 - self._reward_alpha) * self._reward_var + self._reward_alpha * np.square(reward -
self._reward_mean)
def _apply_normalize_obs(self, obs):
self._update_obs_estimate(obs)
return (obs - self._obs_mean) / (np.sqrt(self._obs_var) + 1e-8)
def _apply_normalize_reward(self, reward):
self._update_reward_estimate(reward)
return reward / (np.sqrt(self._reward_var) + 1e-8)
def reset(self):
ret = self._wrapped_env.reset()
if self._normalize_obs:
return self._apply_normalize_obs(ret)
else:
return ret
def __getstate__(self):
d = Serializable.__getstate__(self)
d["_obs_mean"] = self._obs_mean
d["_obs_var"] = self._obs_var
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
self._obs_mean = d["_obs_mean"]
self._obs_var = d["_obs_var"]
@property
@overrides
def action_space(self):
if isinstance(self._wrapped_env.action_space, Box):
ub = np.ones(self._wrapped_env.action_space.shape)
return spaces.Box(-1 * ub, ub)
return self._wrapped_env.action_space
@overrides
def step(self, action):
if isinstance(self._wrapped_env.action_space, Box):
# rescale the action
lb, ub = self._wrapped_env.action_space.bounds
scaled_action = lb + (action + 1.) * 0.5 * (ub - lb)
scaled_action = np.clip(scaled_action, lb, ub)
else:
scaled_action = action
wrapped_step = self._wrapped_env.step(scaled_action)
next_obs, reward, done, info = wrapped_step
if self._normalize_obs:
next_obs = self._apply_normalize_obs(next_obs)
if self._normalize_reward:
reward = self._apply_normalize_reward(reward)
return Step(next_obs, reward * self._scale_reward, done, **info)
def __str__(self):
return "Normalized: %s" % self._wrapped_env
# def log_diagnostics(self, paths):
# print "Obs mean:", self._obs_mean
# print "Obs std:", np.sqrt(self._obs_var)
# print "Reward mean:", self._reward_mean
# print "Reward std:", np.sqrt(self._reward_var)
normalize = NormalizedEnv
|
453160
|
from mpi4py import MPI
import mpiunittest as unittest
import sys
try:
import array
except ImportError:
array = None
class TestMemory(unittest.TestCase):
def testNewEmpty(self):
memory = MPI.memory
mem = memory()
self.assertEqual(mem.address, 0)
self.assertEqual(mem.obj, None)
self.assertEqual(mem.nbytes, 0)
self.assertEqual(mem.readonly, False)
self.assertEqual(mem.format, 'B')
self.assertEqual(mem.itemsize, 1)
self.assertEqual(len(mem), 0)
mem[:] = 0
mem[:] = memory()
m = memoryview(mem)
self.assertEqual(m.format, 'B')
self.assertEqual(m.itemsize, 1)
self.assertEqual(m.ndim, 1)
self.assertEqual(m.readonly, False)
self.assertEqual(m.shape, (0,))
self.assertEqual(m.strides, (1,))
self.assertEqual(m.tobytes(), b"")
self.assertEqual(m.tolist(), [])
mem.release()
self.assertEqual(mem.address, 0)
self.assertEqual(mem.nbytes, 0)
self.assertEqual(mem.readonly, False)
def testNewBad(self):
memory = MPI.memory
for obj in (None, 0, 0.0, [], (), []):
self.assertRaises(TypeError, memory, obj)
def testNewBytes(self):
memory = MPI.memory
obj = b"abc"
mem = memory(obj)
self.assertEqual(mem.obj, obj)
self.assertEqual(mem.nbytes, len(obj))
self.assertEqual(mem.readonly, True)
def testNewBytearray(self):
memory = MPI.memory
obj = bytearray([1,2,3])
mem = memory(obj)
self.assertEqual(mem.obj, obj)
self.assertEqual(mem.nbytes, len(obj))
self.assertEqual(mem.readonly, False)
@unittest.skipIf(array is None, 'array')
def testNewArray(self):
memory = MPI.memory
obj = array.array('i', [1,2,3])
mem = memory(obj)
self.assertEqual(mem.obj, obj)
self.assertEqual(mem.nbytes, len(obj)*obj.itemsize)
self.assertEqual(mem.readonly, False)
def testAllocate(self):
memory = MPI.memory
for size in (0, 1, 2):
mem = memory.allocate(size)
self.assertEqual(mem.nbytes, size)
self.assertNotEqual(mem.address, 0)
for clear in (False, True):
mem = memory.allocate(1024, clear)
self.assertEqual(mem.nbytes, 1024)
self.assertNotEqual(mem.address, 0)
if clear:
self.assertEqual(mem[0], 0)
self.assertEqual(mem[-1], 0)
self.assertRaises(TypeError, memory.allocate, None)
self.assertRaises(ValueError, memory.allocate, -1)
def testFromBufferBad(self):
memory = MPI.memory
for obj in (None, 0, 0.0, [], (), []):
self.assertRaises(TypeError, memory.frombuffer, obj)
def testFromBufferBytes(self):
memory = MPI.memory
mem = memory.frombuffer(b"abc", readonly=True)
self.assertNotEqual(mem.address, 0)
self.assertEqual(type(mem.obj), bytes)
self.assertEqual(mem.obj, b"abc")
self.assertEqual(mem.nbytes, 3)
self.assertEqual(mem.readonly, True)
self.assertEqual(mem.format, 'B')
self.assertEqual(mem.itemsize, 1)
self.assertEqual(len(mem), 3)
m = memoryview(mem)
self.assertEqual(m.format, 'B')
self.assertEqual(m.itemsize, 1)
self.assertEqual(m.ndim, 1)
self.assertEqual(m.readonly, True)
self.assertEqual(m.shape, (3,))
self.assertEqual(m.strides, (1,))
self.assertEqual(m.tobytes(), b"abc")
self.assertEqual(m.tolist(), [ord(c) for c in "abc"])
mem.release()
self.assertEqual(mem.address, 0)
self.assertEqual(mem.nbytes, 0)
self.assertEqual(mem.readonly, False)
@unittest.skipIf(array is None, 'array')
def testFromBufferArrayRO(self):
memory = MPI.memory
obj = array.array('B', [1,2,3])
mem = memory.frombuffer(obj, readonly=True)
self.assertNotEqual(mem.address, 0)
self.assertEqual(type(mem.obj), array.array)
self.assertEqual(mem.nbytes, 3)
self.assertEqual(mem.readonly, True)
self.assertEqual(mem.format, 'B')
self.assertEqual(mem.itemsize, 1)
self.assertEqual(len(mem), 3)
m = memoryview(mem)
self.assertEqual(m.format, 'B')
self.assertEqual(m.itemsize, 1)
self.assertEqual(m.ndim, 1)
self.assertEqual(m.readonly, True)
self.assertEqual(m.shape, (3,))
self.assertEqual(m.strides, (1,))
self.assertEqual(m.tobytes(), b"\1\2\3")
self.assertEqual(m.tolist(), [1,2,3])
mem.release()
self.assertEqual(mem.address, 0)
self.assertEqual(mem.nbytes, 0)
self.assertEqual(mem.readonly, False)
@unittest.skipIf(array is None, 'array')
def testFromBufferArrayRW(self):
memory = MPI.memory
obj = array.array('B', [1,2,3])
mem = memory.frombuffer(obj, readonly=False)
self.assertNotEqual(mem.address, 0)
self.assertEqual(mem.nbytes, 3)
self.assertEqual(mem.readonly, False)
self.assertEqual(len(mem), 3)
m = memoryview(mem)
self.assertEqual(m.format, 'B')
self.assertEqual(m.itemsize, 1)
self.assertEqual(m.ndim, 1)
self.assertEqual(m.readonly, False)
self.assertEqual(m.shape, (3,))
self.assertEqual(m.strides, (1,))
self.assertEqual(m.tobytes(), b"\1\2\3")
self.assertEqual(m.tolist(), [1,2,3])
mem[:] = 1
self.assertEqual(obj, array.array('B', [1]*3))
mem[1:] = array.array('B', [7]*2)
self.assertEqual(obj, array.array('B', [1,7,7]))
mem[1:2] = array.array('B', [8]*1)
self.assertEqual(obj, array.array('B', [1,8,7]))
mem.release()
self.assertEqual(mem.address, 0)
self.assertEqual(mem.nbytes, 0)
self.assertEqual(mem.readonly, False)
@unittest.skipIf(array is None, 'array')
def testFromAddress(self):
memory = MPI.memory
obj = array.array('B', [1,2,3])
addr, size = obj.buffer_info()
nbytes = size * obj.itemsize
mem = memory.fromaddress(addr, nbytes, readonly=False)
self.assertNotEqual(mem.address, 0)
self.assertEqual(mem.nbytes, 3)
self.assertEqual(mem.readonly, False)
self.assertEqual(len(mem), 3)
m = memoryview(mem)
self.assertEqual(m.format, 'B')
self.assertEqual(m.itemsize, 1)
self.assertEqual(m.ndim, 1)
self.assertEqual(m.readonly, False)
self.assertEqual(m.shape, (3,))
self.assertEqual(m.strides, (1,))
self.assertEqual(m.tobytes(), b"\1\2\3")
self.assertEqual(m.tolist(), [1,2,3])
mem[:] = 1
self.assertEqual(obj, array.array('B', [1]*3))
mem[1:] = array.array('B', [7]*2)
self.assertEqual(obj, array.array('B', [1,7,7]))
mem[1:2] = array.array('B', [8]*1)
self.assertEqual(obj, array.array('B', [1,8,7]))
mem.release()
self.assertEqual(mem.address, 0)
self.assertEqual(mem.nbytes, 0)
self.assertEqual(mem.readonly, False)
def testToReadonly(self):
memory = MPI.memory
obj = bytearray(b"abc")
mem1 = memory.frombuffer(obj)
mem2 = mem1.toreadonly()
self.assertEqual(mem1.readonly, False)
self.assertEqual(mem2.readonly, True)
self.assertEqual(mem1.address, mem2.address)
self.assertEqual(mem1.obj, mem2.obj)
self.assertEqual(type(mem1.obj), type(mem2.obj))
self.assertEqual(mem1.nbytes, mem2.nbytes)
def testSequence(self):
n = 16
try:
mem = MPI.Alloc_mem(n, MPI.INFO_NULL)
except NotImplementedError:
self.skipTest('mpi-alloc_mem')
try:
self.assertTrue(type(mem) is MPI.memory)
self.assertTrue(mem.address != 0)
self.assertEqual(mem.nbytes, n)
self.assertEqual(mem.readonly, False)
self.assertEqual(len(mem), n)
def delitem(): del mem[n]
def getitem1(): return mem[n]
def getitem2(): return mem[::2]
def getitem3(): return mem[None]
def setitem1(): mem[n] = 0
def setitem2(): mem[::2] = 0
def setitem3(): mem[None] = 0
self.assertRaises(Exception, delitem)
self.assertRaises(IndexError, getitem1)
self.assertRaises(IndexError, getitem2)
self.assertRaises(TypeError, getitem3)
self.assertRaises(IndexError, setitem1)
self.assertRaises(IndexError, setitem2)
self.assertRaises(TypeError, setitem3)
for i in range(n):
mem[i] = i
for i in range(n):
self.assertEqual(mem[i], i)
mem[:] = 0
for i in range(-n, 0):
mem[i] = abs(i)
for i in range(-n, 0):
self.assertEqual(mem[i], abs(i))
mem[:] = 0
for i in range(n):
self.assertEqual(mem[i], 0)
mem[:] = 255
for i in range(n):
self.assertEqual(mem[i], 255)
mem[:n//2] = 1
mem[n//2:] = 0
for i in range(n//2):
self.assertEqual(mem[i], 1)
for i in range(n//2, n):
self.assertEqual(mem[i], 0)
mem[:] = 0
mem[1:5] = b"abcd"
mem[10:13] = b"xyz"
self.assertEqual(mem[0], 0)
for i, c in enumerate("abcd"):
self.assertEqual(mem[1+i], ord(c))
for i in range(5, 10):
self.assertEqual(mem[i], 0)
for i, c in enumerate("xyz"):
self.assertEqual(mem[10+i], ord(c))
for i in range(13, n):
self.assertEqual(mem[i], 0)
self.assertEqual(mem[1:5].tobytes(), b"abcd")
self.assertEqual(mem[10:13].tobytes(), b"xyz")
finally:
MPI.Free_mem(mem)
self.assertEqual(mem.address, 0)
self.assertEqual(mem.nbytes, 0)
self.assertEqual(mem.readonly, False)
try:
MPI.memory
except AttributeError:
unittest.disable(TestMemory, 'mpi4py-memory')
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.