content
stringlengths 5
1.05M
|
---|
# Generated by Django 2.0 on 2017-12-12 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0013_auto_20171212_1955'),
]
operations = [
migrations.AlterField(
model_name='driverprofile',
name='car_pic',
field=models.ImageField(blank=True, default='car_pic/default_car.jpg', upload_to='car_pic'),
),
migrations.AlterField(
model_name='driverprofile',
name='prof_pic',
field=models.ImageField(blank=True, default='Driver/prof_pic/prof_pic.png', upload_to='Driver/prof_pic'),
),
migrations.AlterField(
model_name='riderprofile',
name='prof_pic',
field=models.ImageField(blank=True, default='Rider/prof_pic/prof_pic.png', upload_to='Rider/prof_pic'),
),
]
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretty print an operation."""
import d1_cli.impl.util
LEVEL_INDENT = 2
TAB = 30
class OperationFormatter(object):
"""Print an operation according to the template.
The template contains all parameters that can be in any of the operations and
determines the relative position of each parameter that is present in the operation.
"""
def __init__(self):
self._template = (
"comment",
"operation",
("CLI", "verbose", "editor"),
("Target Nodes", "cn-url", "mn-url"),
("Authentication", "anonymous", "cert-file", "key-file"),
("Slicing", "start", "count"),
(
"Searching",
"query",
"query-type",
"from-date",
"to-date",
"search-format-id",
),
(
"Parameters",
"identifier",
"identifier-new",
"identifier-old",
"identifier-package",
"identifier-science-meta",
"identifier-science-data",
"science-file",
("Misc", "format-id", "algorithm"),
("Reference Nodes", "authoritative-mn"),
("Subjects", "rights-holder"),
("Access Control", "allow"),
(
"Replication",
"replication-allowed",
"number-of-replicas",
"blocked-nodes",
"preferred-nodes",
),
),
)
def print_operation(self, operation):
# pprint.pprint(operation)
for line in self._format_operation(operation, self._template, 0):
d1_cli.impl.util.print_info(line)
def _format_operation(self, operation, template, indent):
lines = []
for v in template:
if isinstance(v, str):
lines.extend(self._format_value(operation, v, indent))
else:
lines_section = self._format_operation(
operation, v[1:], indent + LEVEL_INDENT
)
if len(lines_section):
lines.append(" " * indent + v[0] + ":")
lines.extend(lines_section)
return lines
def _format_value(self, operation, key, indent):
"""A value that exists in the operation but has value None is displayed.
A value that does not exist in the operation is left out entirely. The value
name in the operation must match the value name in the template, but the
location does not have to match.
"""
v = self._find_value(operation, key)
if v == "NOT_FOUND":
return []
if not isinstance(v, list):
v = [v]
if not len(v):
v = [None]
key = key + ":"
lines = []
for s in v:
# Access control rules are stored in tuples.
if isinstance(s, tuple):
s = "{}: {}".format(*s)
lines.append(
"{}{}{}{}".format(
" " * indent, key, " " * (TAB - indent - len(key) - 1), s
)
)
key = ""
return lines
def _find_value(self, operation, key):
for k in list(operation.keys()):
if k == key:
return operation[k]
if isinstance(operation[k], dict):
r = self._find_value(operation[k], key)
if r != "NOT_FOUND":
return r
return "NOT_FOUND"
|
from elmo.modules.elmo import Elmo, batch_to_ids
|
from django.conf.urls import url
from rest_framework import renderers
from .views import AssetViewSet, CatalogViewSet, CategoryViewSet,\
ProductViewSet, SiteViewSet, ProductByCategory, AssetsBulk
app_name = 'API'
category_list = CategoryViewSet.as_view({
'get': 'list'
})
category_detail = CategoryViewSet.as_view({
'get': 'retrieve'
})
product_list = ProductViewSet.as_view({
'get': 'list'
})
product_detail = ProductViewSet.as_view({
'get': 'retrieve'
})
catalog_list = CatalogViewSet.as_view({
'get': 'list'
})
catalog_detail = CatalogViewSet.as_view({
'get': 'retrieve'
})
site_list = SiteViewSet.as_view({
'get': 'list'
})
site_detail = SiteViewSet.as_view({
'get': 'retrieve'
})
asset_list = AssetViewSet.as_view({
'get': 'list'
})
asset_detail = AssetViewSet.as_view({
'get': 'retrieve'
})
urlpatterns = ([
url(r'^product/(?P<product_id>[\w\- ]+)/$', product_detail,
name='product-detail'),
url(r'^products/$', product_list, name='product-list'),
url(r'^products/(?P<category_name>[\w\- ]+)/$',
ProductByCategory.as_view(), name='products-category'),
url(r'^categories/$', category_list, name='category-list'),
url(r'^catalogs/$', catalog_list, name='catalog-list'),
url(r'^asset/(?P<pk>[0-9]+)/$', asset_detail, name='asset-detail'),
url(r'^site/(?P<config_name>[\w\- ]+)/$', site_detail,
name='site-config'),
url(r'^assets-bulk/$', AssetsBulk.as_view(), name='assets-bulk'),
])
|
"""
Code to test the hydrotools module.
"""
# Import the nwm Client
from hydrotools.nwm_client import http as nwm
import pandas as pd
# Path to server (NOMADS in this case)
server = "https://nomads.ncep.noaa.gov/pub/data/nccf/com/nwm/prod/"
# Instantiate model data service
model_data_service = nwm.NWMDataService(server)
# Set reference time
yesterday = pd.Timestamp.utcnow() - pd.Timedelta("1D")
reference_time = yesterday.strftime("%Y%m%dT%-HZ")
# Retrieve forecast data
# By default, only retrieves data at USGS gaging sites in
# CONUS that are used for model assimilation
forecast_data = model_data_service.get(
configuration = "short_range",
reference_time = reference_time
)
# Look at the data
print(forecast_data.info(memory_usage='deep'))
print(forecast_data[['value_time', 'value']].head()) |
import os
from dotenv import load_dotenv
load_dotenv()
BOT_TOKEN = os.getenv('bot_token') |
program1 = [ [] ,
[] ,
[] ,
[] ]
programs_dict = {"program1" : program1}
|
# Generated by Django 2.2.11 on 2020-05-19 16:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contentPages', '0036_auto_20200519_1615'),
]
operations = [
migrations.RenameField(
model_name='createnewresourcetype',
old_name='new_resource_type',
new_name='resource_type',
),
migrations.RemoveField(
model_name='createnewresourcetype',
name='available_resource_types',
),
]
|
"""
conference_helper.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
created by @Robert_Avram on 2015 June 6
"""
import endpoints
from google.appengine.ext import ndb
from google.appengine.api import search
from google.appengine.api import taskqueue
from google.appengine.api import memcache
from google.net.proto.ProtocolBuffer import ProtocolBufferDecodeError
from webapp2 import cached_property
from models import Conference
from models import ConferenceSession
from models import ConferenceSpeaker
from models import Profile
from settings import MEMCACHE_ANNOUNCEMENTS_KEY
from settings import MEMCACHE_FEATURED_SPEAKER_KEY
from settings import ANNOUNCEMENT_TPL
from settings import DEFAULTS
from settings import OPERATORS
from settings import FIELDS
import message_models as mm
import logging
import utils
from datetime import datetime
def user_required(handler):
"""Decorator that checks if there's a user associated with the current session."""
def check_login(self, *args, **kwargs):
# Make sure there is a user authenticated
if not self.auth_user:
raise endpoints.UnauthorizedException('Authorization Required')
# Make sure the current user has a profile in our DB
if not self.user:
raise endpoints.UnauthorizedException('Even though you are authorized, you do not have a Profile \
please update your account first, Profile Required')
else:
return handler(self, *args, **kwargs)
return check_login
class BaseHandler(object):
''' Basic Handler functions that can be inherited by any api '''
@cached_property
def user(self):
''' helper function that computes and caches current user profile
relies on auth_user, returns Profile or None'''
# check if there is a current user logged in
if self.auth_user:
# get user_id of current logged in auth_user
user_id = utils.getUserId(self.auth_user)
p_key = ndb.Key(Profile, user_id)
# get Profile from datastore
return p_key.get()
else:
return None
@cached_property
def auth_user(self):
''' helper function that computes and caches current_user '''
return endpoints.get_current_user()
class ApiHelper(BaseHandler):
''' Class meant to help the conference Api. Base: BaseHandler '''
@staticmethod
def get_websafe_key(urlsafeKey, modelkind):
''' takes a urlsafeKey and the kind of key it should be and
returns the ndb.Key or raises a BadRequest Error if the key
is not the propper format or not the right kind '''
try:
s_key = ndb.Key(urlsafe=urlsafeKey)
except ProtocolBufferDecodeError:
raise endpoints.BadRequestException(
'the key received is not a valid urlsafe key')
if (not s_key) or (not s_key.kind() == modelkind):
raise endpoints.BadRequestException(
'the key valid key for the kind %s' %
modelkind)
return s_key
@user_required
def _add_session_to_wishlist(self, request):
''' adds a session to the user's wishlist '''
# make sure that the websafeSessionKey is actually valid
s_key = self.get_websafe_key(
request.websafeSessionKey,
'ConferenceSession')
# check if the session exists in the db
session = s_key.get()
if not session:
raise endpoints.NotFoundException(
'The session you want to add does not exist')
# make sure the keys are not in the wishList already
if session.key.parent() not in self.user.wishList.conferences:
# if this conference doesn't exist in the wishList,
# add it since the session belongs to it
self.user.wishList.conferences.append(session.key.parent())
# this also implies that this session does not exist in the
# wishList
self.user.wishList.sessions.append(session.key)
self.user.put()
elif session.key not in self.user.wishList.sessions:
self.user.wishList.sessions.append(session.key)
self.user.put()
else:
raise endpoints.BadRequestException(
'the session is already in the wish list')
return True
def _query_index(self, qry):
''' Query the search index for sessions,
takes in search.Query '''
# Query the index.
index = search.Index(name='sessions')
try:
results = index.search(qry)
# Iterate through the search results.
items = []
for scored_document in results:
items.append(self._copy_session_doc_to_form(scored_document))
except search.Error as e:
logging.error(e)
return items
def _add_to_search_index(self, session, speaker, conference):
''' Create a search document based on session, speaker and conference,
and added to the search index '''
# define the index
index = search.Index(name='sessions')
# create the document object
doc = search.Document(
# the doc_id will be set to the key of the session
doc_id=session.key.urlsafe(),
fields=[
search.TextField(name='name', value=session.name),
search.TextField(name='type', value=session.type),
search.NumberField(name='duration', value=session.duration),
search.DateField(name="startDate", value=session.startDate),
search.NumberField(
name="startTime",
value=utils.time_to_minutes(
session.startTime)),
search.TextField(name='highlights', value=session.highlights),
search.TextField(
name='speakerName',
value=speaker.displayName),
search.TextField(name='conferenceName', value=conference.name),
search.TextField(name='conferenceTopics', value=" ".join(
[topic for topic in conference.topics])),
search.TextField(name='conferenceCity', value=conference.city),
search.TextField(
name='conferenceDescription',
value=conference.description),
])
try:
index.put(doc)
except search.PutError as e:
result = e.results[0]
if result.code == search.OperationResult.TRANSIENT_ERROR:
# if TRANSIENT_ERROR retry:
try:
index.put(result.object_id)
except search.Error as e:
logging.error(e)
except search.Error as e:
logging.error(e)
@user_required
def _remove_session_from_wishlist(
self, conf_sessionKey, removeConference=False):
''' Removes a session from the wishList '''
# make sure that the websafeSessionKey is actually valid
s_key = self.get_websafe_key(conf_sessionKey, 'ConferenceSession')
# check if the session exists in the db
session = s_key.get()
if not session:
raise endpoints.NotFoundException(
'The session you want to add does not exist')
# if key is in the wishList remove it otherwise BadRequestException
if session.key in self.user.wishList.sessions:
self.user.wishList.sessions.remove(session.key)
else:
raise endpoints.BadRequestException(
'the session is not in the wish list')
# if the user wants to remove the conference as well
if removeConference:
# check if there are any other sessions in the wishlist with the
# same conference
for sesskey in self.user.wishList.sessions:
if sesskey.parent() == session.key.parent():
raise endpoints.ConflictException(
"cannot remove conference because there are other sessions from this conference in the wish list")
self.user.wishList.conferences.remove(session.key.parent())
self.user.put()
return True
# cross-group needed because the speaker is not related to the session
@ndb.transactional(xg=True)
def _putSessionAndSpeaker(self, my_session, conf, speaker):
''' transactional put for session and speaker '''
my_session.put()
if conf.key not in speaker.conferences:
speaker.conferences.append(conf.key)
speaker.conferenceSessions.append(my_session.key)
speaker.put()
return (my_session, conf, speaker)
@user_required
def _get_wishlist(self):
return self.user.wishList.to_form()
@user_required
def _createSession(self, request):
'''creates a ConferenceSession, adds it as a child of the conference, returns the stored object'''
# make sure the speakerKey is for a valid speaker
speaker_key = self.get_websafe_key(
request.speakerKey,
"ConferenceSpeaker")
speaker = speaker_key.get()
# make sure there the speaker exists in the DB
if not speaker:
raise endpoints.NotFoundException(
"The speaker you requested was not found, \
Please register a speaker first")
# get Conference from the DB
wsck = self.get_websafe_key(request.websafeConferenceKey, "Conference")
conf = wsck.get()
# make sure conference exists and that it belongs to current user
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
if not conf.key.parent() == self.user.key:
raise endpoints.ForbiddenException(
'This conference was organized by a different user')
# get a key for the new session
s_id = ConferenceSession.allocate_ids(size=1, parent=conf.key)[0]
session_key = ndb.Key(ConferenceSession, s_id, parent=conf.key)
# put the session in the db and update conference
my_session = ConferenceSession.from_form(request, session_key)
# TODO: make sure that the session times fall between the conference
# times
# check if speaker already has a session within this conference
if conf.key in speaker.conferences:
# if yes retrieve the all the other session names for this speaker in this conference
# note the current session is not included because we don't want to
# retrieve it again, we can just pass the name
sessions_in_conference = [
skey.urlsafe() for skey in speaker.conferenceSessions if skey.parent() == conf.key]
# make this a featured speaker for this conference,
# as asked in task 4 of the project setting up a task to do this.
taskqueue.add(params={"speaker_name": speaker.displayName,
"sess_keys": sessions_in_conference,
"current_sess_name": my_session.name,
"conf": conf.name,
"conf_loc": conf.city},
url='/tasks/add_featured_speaker')
# use a transactional to make the updates
# current function would not allow a transactional because of the id
# allocation
self._putSessionAndSpeaker(my_session, conf, speaker)
# create an indexed document for the search API based on this session
self._add_to_search_index(my_session, speaker, conf)
return my_session.to_form(speaker)
@staticmethod
def _setFeaturedSpeaker(
speaker_name, sess_keys, current_sess_name, conf, conf_loc):
''' Sets the featured speaker in memchace '''
# get the sessions from sess_keys, we can assume that the sess_keys are valid since they
# are passed by the task
sessions = ndb.get_multi([ndb.Key(urlsafe=sk) for sk in sess_keys])
s_names = [s.name for s in sessions]
s_names.append(current_sess_name)
memcache.set(key=MEMCACHE_FEATURED_SPEAKER_KEY, value={"name": speaker_name,
"sessions": s_names,
"conf": conf,
"conf_loc": conf_loc})
@user_required
def _registerSpeaker(self, request):
'''registers a speaker, user needs to be logged in and conference organizer to register a speaker'''
# make sure the displayName received is valid format
if not utils.is_valid_name(request.displayName):
raise endpoints.BadRequestException(
"displayName is not valid: it must be between 3 and 50 characters with no special characters and title case")
# make sure user is has organizer privileges or has organized at least
# one conference
cnt = Conference.query(ancestor=self.user.key).count(limit=1)
if not cnt:
raise endpoints.ForbiddenException(
"You need to have organized at least one conference in order to register speakers")
speaker = ConferenceSpeaker(displayName=request.displayName)
speaker.put()
return speaker.to_form()
def _queryproblem(self, request):
''' session query method to search for unavailable after a certain time (in int hour blocks)
and exclude up to 3 types of sessions '''
# to reduce friction we will only allow 3 excludes
if len(request.exclude) > 3:
raise endpoints.BadRequestException(
"You are only allowed to exclude up to 3 types of Sessions.")
# list of all allowed timeslots
# ideally this list is created in order from the most popular session
# times
allowed_timeslots = [i for i in range(24)]
# compose a list of unavailable times
if request.afterTime:
dissalowed_timeslots = [i for i in xrange(request.afterTime, 24)]
else:
dissalowed_timeslots = []
# exclude dissalowedtimeslots
query_times = [
i for i in allowed_timeslots if i not in dissalowed_timeslots]
q = ConferenceSession.query()
q = q.filter(ConferenceSession.startTimeSlot.IN(query_times))
# filter out all excludes
for s_type in request.exclude:
q = q.filter(ConferenceSession.type != s_type)
# order by conference type first since that is the inequality filter
q.order(ConferenceSession.type)
q.order(ConferenceSession.startTime)
# fetch max 100 records
sessions = q.fetch(100)
speaker_keys = []
for sess in q:
speaker_keys.append(sess.speakerKey)
# get speakers for every session in order
speakers = ndb.get_multi(speaker_keys)
return mm.ConferenceSessionForms(
items=[sessions[i].to_form(speakers[i]) for i in range(len(sessions))])
def _copy_session_doc_to_form(self, doc):
''' copies a ScoredDocument to ConferenceSessionForm_search '''
form_out = mm.ConferenceSessionForm_search()
setattr(form_out, "websafeSessionKey", doc.doc_id)
for field in doc.fields:
if isinstance(field, search.NumberField):
if field.name == "startTime":
setattr(form_out,
field.name,
utils.minutes_to_timestring(int(field.value)))
continue
setattr(form_out, field.name, int(field.value))
elif isinstance(field, search.DateField):
setattr(form_out, field.name, str(field.value))
else:
setattr(form_out, field.name, field.value)
form_out.check_initialized()
return form_out
def _queryproblem2(self, request):
''' use the search API to query for specific sessions '''
# only allow up to 3 excludes and 3 includes
if len(request.exclude_types) > 3 or len(request.include_types) > 3:
raise endpoints.BadRequestException(
"you can only exclude or include max 3 types")
# limit the length of the search fields that someone sends
if (request.search_highlights and len(request.search_highlights) > 50)\
or (request.search_general and len(request.search_general) > 50):
raise endpoints.BadRequestException(
"your search query strings can only be up to 50 characters, longer blocks are useless anyway")
# start forming the query string qs
qs = ''
# check if the variables were passed in and update the qs accordingly
if request.before_time:
qs += 'startTime < ' + \
str(utils.time_to_minutes(request.before_time))
if request.after_time:
qs += ' startTime > ' + \
str(utils.time_to_minutes(request.after_time))
if request.exclude_types:
qs += " NOT type: ("
for i in range(len(request.exclude_types)):
qs += utils.clean_s(request.exclude_types[i])
if not i == len(request.exclude_types) - 1:
qs += " OR "
continue
qs += ")"
if request.include_types:
qs += " type: ("
for i in range(len(request.include_types)):
qs += utils.clean_s(request.include_types[i])
if not i == len(request.include_types) - 1:
qs += " OR "
continue
qs += ")"
if request.search_highlights:
qs += " highlights:" + utils.clean_s(request.search_highlights)
if request.search_general:
qs += " " + utils.clean_s(request.search_general)
# add some sorting options
sort1 = search.SortExpression(
expression='startDate',
direction=search.SortExpression.ASCENDING,
default_value=0)
# compose the sort options
# attn: Using match_scorer is more expensive to run but it sorts the
# documents based on relevance better.
sort_opts = search.SortOptions(
expressions=[sort1],
match_scorer=search.MatchScorer())
# add some query options, limit on 25 results
query_options = search.QueryOptions(
limit=25,
sort_options=sort_opts)
# compose the query
qry = search.Query(query_string=qs, options=query_options)
return self._query_index(qry)
# PREVIOUSLY EXISTING METHODS - - - - - - - - - - - - - - - - - - - - -
# - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = ANNOUNCEMENT_TPL % (
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = utils.getUserId(user)
# modify function to not allow creation of conferences without having a
# profile
if not self.user:
raise endpoints.ForbiddenException(
"Before creating conferences, you need a profile, run getProfile method first")
if not request.name:
raise endpoints.BadRequestException(
"Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {
field.name: getattr(
request,
field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound
# Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on
# start_date
if data['startDate']:
data['startDate'] = datetime.strptime(
data['startDate'][
:10],
"%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(
data['endDate'][
:10],
"%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = utils.getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {
field.name: getattr(
request,
field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
# by @Robert_Avram: replaced the self._copyConferenceToForm with
# conf.to_form
return conf.to_form(getattr(prof, 'displayName'))
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
filters = sorted(filters, key=lambda k: k['field'])
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(
filtr["field"],
filtr["operator"],
filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
current_fields = []
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {
field.name: getattr(
f,
field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException(
"Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is
# performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException(
"Inequality filter is allowed on only one field.")
elif filtr["field"] in ["city", "topics"]:
raise endpoints.BadRequestException(
"Inequality filter not allowed on city or topics.")
else:
inequality_field = filtr["field"]
if filtr["field"] in current_fields:
raise endpoints.BadRequestException(
"You cannot query multiple fields of one type, %s" %
filtr['field'])
current_fields.append(filtr['field'])
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
# TODO: replace _copyProfileToForm with a to_form method on the Profile
# model
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = mm.ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(
pf,
field.name,
getattr(
mm.TeeShirtSize,
getattr(
prof,
field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = utils.getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key=p_key,
displayName=user.nickname(),
mainEmail=user.email(),
teeShirtSize=str(mm.TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
# if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
# else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
|
# Zip Function
""" Zip function combines two or more into one... """
friend = {'Rolf','Jen','Bob','Anne'}
time_since_seen = [3, 7, 15, 4]
long_timers = dict(zip(friend, time_since_seen))
print(long_timers) # O/P - {'Anne': 3, 'Jen': 7, 'Rolf': 15, 'Bob': 4}
# Also we can make it in list too...
long_timers = list(zip(friend, time_since_seen))
print(long_timers) # O/P - [('Rolf', 3), ('Anne', 7), ('Bob', 15), ('Jen', 4)]
long_timers = list(zip(friend, time_since_seen, [1, 2, 3, 4, 5]))
print(long_timers) # O/P - [('Bob', 3, 1), ('Jen', 7, 2), ('Anne', 15, 3), ('Rolf', 4, 4)]
# Here zip will ignore the last element ( 5 ) in the list, and ignore matching to time_since_seen list...
# An interesting Fact -
long_timers = zip(friend, time_since_seen)
print(long_timers) # O/P - <zip object at 0x0000015FD8496DC0>
# we will study about the above output further...
|
"""
workplane
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from OpenGL import GL
from mcedit2.rendering.scenegraph import scenenode
from mcedit2.rendering.scenegraph.matrix import Translate
from mcedit2.rendering.scenegraph.vertex_array import VertexNode
from mcedit2.rendering.vertexarraybuffer import VertexArrayBuffer
log = logging.getLogger(__name__)
class WorkplaneNode(scenenode.Node):
def __init__(self):
super(WorkplaneNode, self).__init__()
self.translateState = Translate()
self.addState(self.translateState)
self.axis = 1
vertexNode = None
_axis = 1
@property
def axis(self):
return self._axis
@axis.setter
def axis(self, axis):
self._axis = axis
self.dirty = True
gridSize = 64
left = -gridSize//2
right = gridSize//2
gridArrayBuffer = VertexArrayBuffer((gridSize * 4,),
GL.GL_LINES, textures=False, lights=False)
gridArrayBuffer.rgba[:] = 255, 255, 255, 100
# y=0, move by translating
gridArrayBuffer.vertex[:, axis] = 0
axis1 = (axis-1) % 3
axis2 = (axis+1) % 3
# left edge
gridArrayBuffer.vertex[0:gridSize*2:2, axis2] = left
gridArrayBuffer.vertex[0:gridSize*2:2, axis1] = range(left, right)
# right edge
gridArrayBuffer.vertex[1:gridSize*2:2, axis2] = right-1
gridArrayBuffer.vertex[1:gridSize*2:2, axis1] = range(left, right)
# bottom edge
gridArrayBuffer.vertex[gridSize*2::2, axis1] = left
gridArrayBuffer.vertex[gridSize*2::2, axis2] = range(left, right)
# top edge
gridArrayBuffer.vertex[gridSize*2+1::2, axis1] = right-1
gridArrayBuffer.vertex[gridSize*2+1::2, axis2] = range(left, right)
if self.vertexNode:
self.removeChild(self.vertexNode)
self.vertexNode = VertexNode([gridArrayBuffer])
self.addChild(self.vertexNode)
@property
def position(self):
return self.translateState.translateOffset
@position.setter
def position(self, value):
self.translateState.translateOffset = value
|
import argparse
import yaml
import os
from common import MacrobaseArgAction
from macrobase_cmd import run_macrobase
from time import strftime
def bench_directory(workload):
sub_dir = os.path.join(
os.getcwd(),
'bench',
'workflows',
workload['macrobase.query.name'],
strftime('%m-%d-%H:%M:%S'))
os.system('mkdir -p %s' % sub_dir)
return sub_dir
def parse_args():
parser = argparse.ArgumentParser("""Example usage:
python script/benchmark/run_sweeping.py bench/conf/treekde-test.yaml \\
--macrobase-analysis-transformType MCD MAD KDE TREE_KDE
""")
parser.add_argument('experiment_yaml', type=argparse.FileType('r'))
parser.add_argument('--macrobase-analysis-transformType',
nargs='*', const='sweeping_args',
action=MacrobaseArgAction)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
experiment = yaml.load(args.experiment_yaml)
workload = experiment['macrobase']
all_experiments = {}
[(attr, values)] = args.sweeping_args.items()
for value in values:
kwargs = workload.copy()
kwargs[attr] = value
print kwargs
_dir = bench_directory(kwargs)
config_filename = os.path.join(_dir, 'config.yaml')
with open(config_filename, 'w') as outfile:
yaml.dump(kwargs, outfile)
os.system('cat {}'.format(config_filename))
run_macrobase(conf=config_filename)
|
from PIL.PngImagePlugin import PngImageFile, PngInfo
from utils import *
import numpy as np
import itertools
import PIL
import os
def rgbChannels(inFile: PIL.PngImagePlugin.PngImageFile, message: str="", quantizationWidths: list=[],
traversalOrder: list=[], outFile="./IO/outColor.png", verify: bool=False, verbose: bool=False):
"""
This function takes takes an image of the form
[
[<RGB 1>, <RGB 2>, <RGB 3>, ... ],
[<RGB a>, <RGB a+1>, <RGB a+2>, ... ],
[<RGB b>, <RGB b+1>, <RGB b+2>, ... ],
...
]
where RGB <index> is of the form [R, G, B] (if there is an Alpha channel present, it is ignored)
And utilizes a modified version Wu and Tsai's algorithm to encode a message into this nested array structure.
Because this image is RGB, an order of traversal is needed to ensure the correct encoding/retrieval order
while traversing the structure.
Define a general pair of RGB pixels as [[R1, G1, B1], [R2, G2, B2]] and flatten it into [R1, G1, B1, R2, G2, B2]
The traversal order is an array of that maps the corresponding value to a location it should be sorted to.
After mapping and sorting the pixel values, pair adjacent pixels
For example, a possible traversal order is the standard [1, 3, 5, 2, 4, 6]
Applying this traversal order concept to the RGB pixel pair
[[185, 75, 250], [255, 80, 200]]
results in these encodable groups of values:
[[185, 255], [75, 80], [250, 200]]
"""
# Verify image data
if verify:
print("Beginning verification...")
if message == "":
try:
verificationData = inFile.text["png:fingerprint"].split(":")
except:
raise Exception(f"No verification data found.")
# Retrieve verifiable data from image properties
imageWidth, imageHeight = rosenburgStrongPairing(int(verificationData[0]), reverse=True)
bitLength, messageHash = retrieveLength(verificationData[1])
# Image dimensions are incorrect
if inFile.size[0] != imageWidth or inFile.size[1] != imageHeight:
raise Exception(f"Image verification failed. Image dimensions don't match encoded verification data.")
# Execute function without verifying data option
retrievedBinary = rgbChannels(inFile, message, quantizationWidths, traversalOrder, outFile, verbose=verbose)
# Ensure entire message was encoded
if len(retrievedBinary) >= bitLength:
retrievedBinary = retrievedBinary[:bitLength]
# Ensure hashes match
if hashlib.sha256(retrievedBinary.encode()).hexdigest() == messageHash:
print("\nVerified.")
return retrievedBinary
else:
raise Exception(f"Message verification failed. Hash of retrieved binary doesn't match encoded verification data.")
raise Exception("Message verification failed. Length of retrieved message binary doesn't match encoded verification data.")
else:
# Get binary of message
if sorted(set(message)) == ["0", "1"]:
messageBinary = message
else:
messageBinary = "0" + str(bin(int.from_bytes(message.encode(), "big")))[2:]
returnValue = rgbChannels(inFile, messageBinary, quantizationWidths, traversalOrder, outFile, verbose=verbose)
# Build verification data to place in loaded image properties
verificationBuilder = ""
verificationBuilder += f"{str(rosenburgStrongPairing([inFile.size[0], inFile.size[1]]))}:"
verificationBuilder += f"{embedLength(str(len(messageBinary)), messageBinary)}"
# Edit PNG metadata to include fingerprint of this PVD algorithm
modifyMetadata = PngImageFile(outFile)
metadata = PngInfo()
metadata.add_text("png:fingerprint", f"{verificationBuilder}")
modifyMetadata.save(outFile, pnginfo=metadata)
print("\nVerified.")
return returnValue
print()
if message == "":
if verbose:
print("Verbose message: no message given, assuming retrieval of message")
else:
# Get binary of message
if sorted(set(message)) == ["0", "1"]:
messageBinary = message
if verbose:
print("Verbose message: message contains only binary values, assuming binary message")
else:
messageBinary = "0" + str(bin(int.from_bytes(message.encode(), "big")))[2:]
if verbose:
print("Verbose message: message contains non-binary values, assuming ascii message")
quantizationWidths = validateQuantization(quantizationWidths, verbose)
traversalOrder = validateTraversal(traversalOrder, verbose)
print()
# If there is an Alpha channel present in the image, it is ignored
pixelPairs = pixelArrayToZigZag(inFile, 3, 2)
# If function is run without message, assume retrieval of message
if message == "":
print(f"Retrieving binary from file \"{inFile.filename}\"")
print()
# Retrieval function
messageBinary = ""
currentPairCounter = 0
for pixelPair in pixelPairs:
currentPairCounter += 1
if len(pixelPair) == 2:
# Flatten pixel pair array into un-nested list
pixelArray = [pixel for pair in pixelPair for pixel in pair]
# Sort pixel array given traversal order and group into calculation ready pairs
pixelIndicesDict = dict(sorted(dict(zip(traversalOrder, pixelArray)).items()))
traversedPixelPairs = list(groupImagePixels(list(pixelIndicesDict.values()), 2))
currentTraversedCounter = 0
for traversedPixelPair in traversedPixelPairs:
currentTraversedCounter += 1
# d value
difference = traversedPixelPair[1] - traversedPixelPair[0]
# Determine number of bits storable between pixels
for width in quantizationWidths:
if width[0] <= abs(difference) <= width[1]:
lowerBound = width[0]
upperBound = width[1]
break
# Falling-off-boundary check; ensure 0 < calculated pixel value < 255
testingPair = pixelPairEncode(traversedPixelPair, upperBound, difference)
if testingPair[0] < 0 or testingPair[1] < 0 or testingPair[0] > 255 or testingPair[1] > 255:
# One of the values "falls-off" the range from 0 to 255 and hence is invalid
if verbose == True:
print(f"Verbose message: channel pair number {currentTraversedCounter} in pixel pair number {currentPairCounter} has the possibility of falling off, skipping")
else:
# Passes the check, continue with decoding
# Number of storable bits between two pixels
storableCount = int(math.log(upperBound - lowerBound + 1, 2))
# Extract encoded decimal
retrievedDecimal = difference - lowerBound if difference >= 0 else - difference - lowerBound
retrievedBinary = bin(retrievedDecimal).replace("0b", "")
# Edge case in which embedded data began with 0's
if storableCount > len(retrievedBinary):
retrievedBinary = "0" * (storableCount-len(retrievedBinary)) + retrievedBinary
messageBinary += retrievedBinary
return messageBinary
else:
print(f"Encoding binary \"{messageBinary}\" into file \"{inFile.filename}\"")
print()
# Encoding function
newPixels = []
currentMessageIndex = 0
currentPairCounter = 0
for pixelPair in pixelPairs:
currentPairCounter += 1
if len(pixelPair) == 2 and currentMessageIndex < len(messageBinary) - 1:
# Flatten pixel pair array into un-nested list
pixelArray = [pixel for pair in pixelPair for pixel in pair]
# Sort pixel array given traversal order and group into calculation ready pairs
traversalIndiceDict = list(zip(traversalOrder, [0,1,2,3,4,5]))
pixelIndicesDict = dict(sorted(dict(zip(traversalIndiceDict, pixelArray)).items()))
traversedPixelPairs = list(groupImagePixels(list(pixelIndicesDict.values()), 2))
postEncodingValues = []
currentTraversedCounter = 0
for traversedPixelPair in traversedPixelPairs:
currentTraversedCounter += 1
# d value
difference = traversedPixelPair[1] - traversedPixelPair[0]
# Determine number of bits storable between pixels
for width in quantizationWidths:
# Only need to check upper bound because widths are sorted
if abs(difference) <= width[1]:
lowerBound = width[0]
upperBound = width[1]
break
# Falling-off-boundary check; ensure 0 < calculated pixel value < 255
testingPair = pixelPairEncode(traversedPixelPair, upperBound, difference)
if testingPair[0] < 0 or testingPair[1] < 0 or testingPair[0] > 255 or testingPair[1] > 255:
# One of the values "falls-off" the range from 0 to 255 and hence is invalid
# Append original pixel pair and skip encoding
postEncodingValues += traversedPixelPair
if verbose:
print(f"Verbose message: channel pair number {currentTraversedCounter} in pixel pair number {currentPairCounter} has the possibility of falling off, skipping")
else:
# Passes the check, continue with encoding
# Number of storable bits between two pixels
storableCount = int(math.log(upperBound - lowerBound + 1, 2))
# Ensure haven't already finished encoding entire message
if currentMessageIndex + storableCount <= len(messageBinary):
# Encode as normal
storableBits = messageBinary[currentMessageIndex:currentMessageIndex+storableCount]
currentMessageIndex += storableCount
else:
if currentMessageIndex == len(messageBinary):
# Finished encoding entire message
postEncodingValues += traversedPixelPair
continue
else:
# Can encode more bits than available, encode what's left
storableBits = messageBinary[currentMessageIndex:]
# Ensure last bit doesn't get corrupted, fill empty space with 0's
storableBits += "0" * (storableCount - len(messageBinary[currentMessageIndex:]))
currentMessageIndex = len(messageBinary)
# Get value of the chunk of message binary
storableBitsValue = int(storableBits, 2)
# d' value
differencePrime = lowerBound + storableBitsValue if difference >= 0 else -(lowerBound + storableBitsValue)
# Calculate new pixel pair
newPixelPair = pixelPairEncode(traversedPixelPair, differencePrime, difference)
postEncodingValues += newPixelPair
# Un-sort pixel array given traversal order and group into calculation original RGB channels
pixelIndicesDict = dict(sorted(dict(zip([ key[1] for key in pixelIndicesDict.keys() ], postEncodingValues)).items()))
reversedPaired = list(groupImagePixels([pixel for pixel in pixelIndicesDict.values()], 3))
newPixels += reversedPaired
else:
# For case in which there's an odd number of pixels; append lone pixel value
newPixels += pixelPair
returnValue = True
if currentMessageIndex != len(messageBinary):
print(f"Warning: only encoded {len(messageBinary[0:currentMessageIndex])} of {len(messageBinary)} bits ({round(100*len(messageBinary[0:currentMessageIndex])/len(messageBinary), 2)}%)")
returnValue = False
# Verbose errors
if verbose == True:
# Underline section of binary that was encoded
# Get max printable width in current terminal
width = os.get_terminal_size()[0]
if len(messageBinary) > width * 5:
print("Unable to print verbose warning, return binary exceeds maximum length")
# Create array groupings of message lines and underlinings
printableMessageLines = list(groupImagePixels(messageBinary, width))
printableUnderlinings = list(groupImagePixels("~"*len(messageBinary[0:currentMessageIndex]), width))
# Zip and print
print("\nVerbose warning: only encoded underlined section of message:")
for printableMessageLine, printableUnderlining in itertools.zip_longest(printableMessageLines, printableUnderlinings, fillvalue=""):
print(f"{printableMessageLine}")
if printableUnderlining:
print(f"{printableUnderlining}")
# Create new image structure, save file
newPixels = list(groupImagePixels(newPixels, inFile.size[0]))
newPixels = pixelArrayToZigZag(newPixels, 1, inFile.size[0], inFile.size[0], inFile.size[1])
array = np.array(newPixels, dtype=np.uint8)
savedImage = PIL.Image.fromarray(array)
savedImage.save(outFile)
return returnValue |
'''
To convert this Fetch code to be a member function of a class:
1. The sqliteConnection should be a data member of the class and connected
2. The function should take a string parameter for select statement. See line 16.
3. It should return 'true' if connected, otherwise 'false'
'''
import sqlite3
#con should be a data member
con = sqlite3.connect('SQLite_Python.db') # this name should be variable
def Fetch(condb,nm):
cursorObj = con.cursor()
con.cursor()
sel = 'SELECT id FROM {0} WHERE name == "{1}"'.format(condb,nm)
cursorObj.execute(sel)
rows = cursorObj.fetchall()
for row in rows:
print(row)
Fetch('Database','name') |
def inverse(x: int, p: int) -> int:
inv1 = 1
inv2 = 0
while p != 1 and p != 0:
inv1, inv2 = inv2, inv1 - inv2 * (x // p)
x, p = p, x % p
return inv2
def double(pt: tuple, p: int) -> tuple:
if pt is None:
return
(x, y) = pt
if y == 0:
return
# Calculate 3*x^2/(2*y) modulus p
slope = 3 * pow(x, 2, p) * inverse(2 * y, p)
xsum = pow(slope, 2, p) - 2 * x
ysum = slope * (x - xsum) - y
return xsum % p, ysum % p
def add(p1: tuple, p2: tuple, p: int) -> tuple:
if p1 is None or p2 is None:
return None
(x1, y1) = p1
(x2, y2) = p2
if x1 == x2:
return double(p1, p)
slope = (y1 - y2) * inverse(x1 - x2, p)
xsum = pow(slope, 2, p) - (x1 + x2)
ysum = slope * (x1 - xsum) - y1
return xsum % p, ysum % p
def multiply(gen: tuple, a: int, p: int) -> tuple:
scale = gen
acc = None
while a:
if a & 1:
if acc is None:
acc = scale
else:
acc = add(acc, scale, p)
scale = double(scale, p)
a >>= 1
return acc
|
# Generated by Django 2.2.1 on 2021-02-17 05:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapi', '0052_image_video'),
]
operations = [
migrations.RemoveField(
model_name='image',
name='image_url',
),
migrations.AddField(
model_name='image',
name='image',
field=models.ImageField(default='', upload_to='images/'),
preserve_default=False,
),
]
|
import bpy
from math import radians
from . import utils
from .utils import update
from .utils import generate_armature
from .utils import generate_shapekey_dict
from .utils import bone_convert
from .armature_rename import armature_rename, bone_rename
def anim_armature(action):
satproperties = bpy.context.scene.satproperties
satinfo = bpy.context.scene.satinfo
def generate_rigify(action): #Creates Rigify armature and fills in all the Rigify parameters
#Armature creation
generate_armature('anim', action)
unit = satinfo.unit
#Creation
if action == 0:
armature = utils.arm.animation_armature
#Selects animation armature
update(1, armature)
#Hides all but the first layer
for i in [1,2,3,5,4,6,7]:
armature.data.layers[i] = False
#Checks how many spine bones there are for the spines.basic_spine Rigify parameter (At least 3 are required)
spines = 0
for container, bone in utils.arm.central_bones.items():
if container.count('spine'):
if bone:
spines += 1
#Creates 2 pelvis bones for whatever Rigify does with em
rigify_pelvis = ['Pelvis_L', 'Pelvis_R']
if spines > 2:
if utils.arm.central_bones['pelvis']:
prefix, bone = bone_convert(utils.arm.central_bones['pelvis'][0])
ppelvis = armature.pose.bones[prefix + bone]
epelvis = armature.data.edit_bones[prefix + bone]
for index, bone in enumerate(rigify_pelvis):
ebone = armature.data.edit_bones.new(bone)
ebone.head = epelvis.head
ebone.parent = epelvis
ebone.layers[3] = True
ebone.layers[0] = False
ebone.layers[8] = False
ebone.layers[9] = False
#New pelvis bone positioning
if satinfo.sbox:
ebone.tail.yz = ppelvis.head.y-10*unit, ppelvis.head.z+12*unit
else:
ebone.tail.yz = ppelvis.head.y-3*unit, ppelvis.head.z+4*unit
if index == 0:
if satinfo.sbox:
ebone.tail.x = ppelvis.head.x+10*unit
else:
ebone.tail.x = ppelvis.head.x+3.25*unit
elif index == 1:
if satinfo.sbox:
ebone.tail.x = ppelvis.head.x-10*unit
else:
ebone.tail.x = ppelvis.head.x-3.25*unit
rigify_palm = {}
if utils.arm.symmetrical_bones['fingers'].get('indexmeta') or utils.arm.symmetrical_bones['fingers'].get('middlemeta') or utils.arm.symmetrical_bones['fingers'].get('ringmeta'):
for container, bone in utils.arm.symmetrical_bones['fingers'].items():
if container.count('meta'):
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
pbone = armature.pose.bones[prefix + bone]
if container.count('indexmeta'):
pbone.rigify_type = 'limbs.super_palm'
else:
pbone.rigify_type = ''
elif utils.arm.symmetrical_bones['fingers'].get('fingercarpal'):
for bone in utils.arm.symmetrical_bones['fingers']['fingercarpal']:
if bone:
prefix, bone = bone_convert(bone)
pbone = armature.pose.bones[prefix + bone]
pbone.rigify_type = 'basic.super_copy'
pbone.rigify_parameters.super_copy_widget_type = 'bone'
#Disabled, more trouble than they're worth and rather purposeless anyways, only for S&Box armatures (Since they already have palm bones)
'''else:
#Creates multiple palm bones for fingers
rigify_palm = {'finger1': [], 'finger2': [], 'finger3': [], 'finger4': []}
#How many finger roots there are (There must be at least 2 for palm bones)
fingers = 0
for container, bone in utils.arm.symmetrical_bones['fingers'].items():
if container == 'finger1' or container == 'finger2' or container == 'finger3' or container == 'finger3' or container == 'finger4':
if bone:
fingers += 1
if fingers > 1:
for container, bone in utils.arm.symmetrical_bones['fingers'].items():
if container == 'finger1' or container == 'finger2' or container == 'finger3' or container == 'finger3' or container == 'finger4':
for index, bone in enumerate(bone):
if bone:
prefix, bone = bone_convert(bone)
if satinfo.scheme == 0 and not satinfo.sbox:
bone2 = bone_rename(1, bone, index)
palm = 'Palm_' + bone2
else:
palm = 'Palm_' + bone
efinger = armature.data.edit_bones[prefix + bone]
epalm = armature.data.edit_bones.new(palm)
rigify_palm[container].append(palm)
efinger.layers[5] = True
efinger.layers[0] = False
epalm.layers[5] = True
epalm.layers[0] = False
epalm.layers[8] = False
efinger.parent = epalm
if utils.arm.symmetrical_bones['arms']['hand'] and utils.arm.symmetrical_bones['arms']['hand'][index]:
prefix, bone = bone_convert(utils.arm.symmetrical_bones['arms']['hand'][index])
ehand = armature.data.edit_bones[prefix + bone]
epalm.parent = ehand
epalm.tail = efinger.head
epalm.head.xyz = ehand.head.x, epalm.tail.y, ehand.head.z'''
#Creates heels for easier leg tweaking
rigify_heel = ['Heel_L', 'Heel_R']
#Creates heel bone if none are present
rigify_toe = ['Toe_L', 'Toe_R']
if utils.arm.symmetrical_bones['legs']['foot']:
for index, bone in enumerate(utils.arm.symmetrical_bones['legs']['foot']):
prefix, bone = bone_convert(utils.arm.symmetrical_bones['legs']['foot'][index])
pfoot = armature.pose.bones[prefix + bone]
efoot = armature.data.edit_bones[prefix + bone]
ebone = armature.data.edit_bones.new(rigify_heel[index])
if index == 0:
if satinfo.goldsource:
ebone.head.xyz = efoot.head.x - 2*unit, efoot.head.y + 3*unit, 0
ebone.tail.xyz = efoot.head.x + 2*unit, efoot.head.y + 3*unit, 0
elif satinfo.sbox:
ebone.head.xyz = efoot.head.x - 2*unit, efoot.head.y + 5*unit, 0
ebone.tail.xyz = efoot.head.x + 2*unit, efoot.head.y + 5*unit, 0
else:
ebone.head.xyz = efoot.head.x - 2*unit, efoot.head.y + 2.4*unit, 0
ebone.tail.xyz = efoot.head.x + 2*unit, efoot.head.y + 2.4*unit, 0
elif index == 1:
if satinfo.goldsource:
ebone.head.xyz = efoot.head.x + 2*unit, efoot.head.y + 3*unit, 0
ebone.tail.xyz = efoot.head.x - 2*unit, efoot.head.y + 3*unit, 0
elif satinfo.sbox:
ebone.head.xyz = efoot.head.x + 2*unit, efoot.head.y + 5*unit, 0
ebone.tail.xyz = efoot.head.x - 2*unit, efoot.head.y + 5*unit, 0
else:
ebone.head.xyz = efoot.head.x + 2*unit, efoot.head.y + 2.4*unit, 0
ebone.tail.xyz = efoot.head.x - 2*unit, efoot.head.y + 2.4*unit, 0
ebone.parent = efoot
if index == 0:
ebone.layers[13] = True
elif index == 1:
ebone.layers[16] = True
ebone.layers[0] = False
ebone.layers[8] = False
ebone.layers[9] = False
if not utils.arm.symmetrical_bones['legs']['toe0']:
ebone = armature.data.edit_bones.new(rigify_toe[index])
ebone.head = pfoot.tail
if pfoot.tail.y < 0:
ebone.tail.xyz = pfoot.tail.x, pfoot.tail.y*1.25, pfoot.tail.z
elif pfoot.tail.y > 0:
ebone.tail.xyz = pfoot.tail.x, pfoot.tail.y*-1.25, pfoot.tail.z
ebone.parent = efoot
ebone.use_connect = True
if index == 0:
ebone.layers[13] = True
elif index == 1:
ebone.layers[16] = True
ebone.layers[0] = False
ebone.layers[8] = False
#Creates hand bones
rigify_hands = ['Hand_L', 'Hand_R']
if utils.arm.symmetrical_bones['arms']['forearm']:
for index, bone in enumerate(utils.arm.symmetrical_bones['arms']['forearm']):
prefix, bone = bone_convert(bone)
eforearm = armature.data.edit_bones[prefix + bone]
if not utils.arm.symmetrical_bones['arms']['hand'] or not utils.arm.symmetrical_bones['arms']['hand'][index]:
ebone = armature.data.edit_bones.new(rigify_hands[index])
ebone.head = eforearm.tail
length = eforearm.length
eforearm.length = eforearm.length*1.4
ebone.tail = eforearm.tail
eforearm.length = length
ebone.parent = eforearm
ebone.use_connect = True
#Creates camera target if armature is a viewmodel
if satinfo.viewmodel:
marked = False
if utils.arm.attachment_bones['viewmodel'].get('attach_camera'):
bone = utils.arm.attachment_bones['viewmodel']['attach_camera'][0]
marked = True
elif utils.arm.attachment_bones['viewmodel'].get('camera'):
bone = utils.arm.attachment_bones['viewmodel']['camera'][0]
marked = True
if marked:
prefix, bone = bone_convert(bone)
pcamera = armature.pose.bones[prefix + bone]
etarget = armature.data.edit_bones.new('Camera_Target')
prefix, bone = bone_convert(utils.arm.central_bones['pelvis'][0])
ppelvis = armature.pose.bones[prefix + bone]
etarget.head.xyz = pcamera.head.x, -ppelvis.head.z, pcamera.head.z
etarget.tail.xyz = etarget.head.x, etarget.head.y*0.5*unit, etarget.head.z
etarget.length = 2*unit
update(0)
#Parent and rigify parameters
if spines > 2:
#Rigify pelvis
if utils.arm.central_bones['pelvis']:
for bone in rigify_pelvis:
pbone = armature.pose.bones[bone]
pbone.rigify_type = 'basic.super_copy'
pbone.rigify_parameters.make_control = False
#Rigify palm
if rigify_palm:
for bone in rigify_palm['finger1']:
pbone = armature.pose.bones[bone]
pbone.rigify_type = 'limbs.super_palm'
#For reference:
#Face (Primary) = Layer 0
#Face (Secondary) = Layer 1
#Central bones + Clavicle = Layer 3
#Finger = Layer 5
#Left arm = Layer 7
#Right arm = Layer 10
#Left leg = Layer 13
#Right leg = Layer 16
#Symmetrical
for cat in utils.arm.symmetrical_bones.keys():
if cat == 'fingers':
for container, bone in utils.arm.symmetrical_bones[cat].items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone]
pbone = armature.pose.bones[prefix + bone]
param = pbone.rigify_parameters
if container == 'finger0' or container == 'finger1' or container == 'finger2' or container == 'finger3' or container == 'finger4':
if ebone.children:
pbone.rigify_type = 'limbs.super_finger'
if utils.arm.symmetrical_bones['legs'].get('thighlow'):
if container == 'finger0':
param.primary_rotation_axis = 'Z'
else:
param.primary_rotation_axis = '-X'
elif satinfo.viewmodel and not satinfo.special_viewmodel:
if container == 'finger0':
param.primary_rotation_axis = '-Z'
else:
pbone.rigify_type = 'basic.super_copy'
param.tweak_layers[6] = True
param.tweak_layers[1] = False
ebone.layers[5] = True
ebone.layers[0] = False
ebone.layers[1] = False
ebone.layers[2] = False
else:
ebone.layers[5] = True
ebone.layers[0] = False
ebone.layers[1] = False
ebone.layers[2] = False
elif cat == 'arms':
for container, bone in utils.arm.symmetrical_bones[cat].items():
for index, bone in enumerate(bone):
if bone:
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone]
pbone = armature.pose.bones[prefix + bone]
param = pbone.rigify_parameters
if index == 0:
ebone.layers[7] = True
elif index == 1:
ebone.layers[10] = True
if container == 'clavicle':
pbone.rigify_type = 'basic.super_copy'
param.make_widget = False
if not satinfo.viewmodel:
ebone.layers[3] = True
elif container == 'upperarm':
pbone.rigify_type = 'limbs.super_limb'
param.tweak_layers[1] = False
param.fk_layers[1] = False
if index == 0:
param.fk_layers[8] = True
param.tweak_layers[9] = True
elif index == 1:
param.fk_layers[11] = True
param.tweak_layers[12] = True
param.segments = 1
ebone.layers[0] = False
ebone.layers[1] = False
ebone.layers[2] = False
elif cat == 'legs':
for container, bone in utils.arm.symmetrical_bones[cat].items():
for index, bone in enumerate(bone):
if bone:
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone]
pbone = armature.pose.bones[prefix + bone]
param = pbone.rigify_parameters
if index == 0:
ebone.layers[13] = True
elif index == 1:
ebone.layers[16] = True
if container == 'thigh':
if utils.arm.symmetrical_bones['legs']['calf'] and utils.arm.symmetrical_bones['legs']['foot']:
pbone.rigify_type = 'limbs.super_limb'
if utils.arm.symmetrical_bones['legs'].get('thighlow'):
param.limb_type = 'paw'
else:
param.limb_type = 'leg'
param.tweak_layers[1] = False
param.fk_layers[1] = False
else:
pbone.rigify_type = 'basic.copy_chain'
if index == 0:
param.fk_layers[14] = True
param.tweak_layers[15] = True
elif index == 1:
param.fk_layers[17] = True
param.tweak_layers[18] = True
param.segments = 1
elif container == 'hip':
pbone.rigify_type = 'basic.super_copy'
ebone.layers[0] = False
ebone.layers[3] = False
ebone.layers[4] = False
#Central
for container, bone in utils.arm.central_bones.items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone]
pbone = armature.pose.bones[prefix + bone]
param = pbone.rigify_parameters
ebone.layers[3] = True
if not satinfo.viewmodel:
if container == 'pelvis':
if spines > 2:
pbone.rigify_type = 'spines.basic_spine'
param.pivot_pos = 2
param.tweak_layers[1] = False
param.tweak_layers[4] = True
param.fk_layers[1] = False
param.fk_layers[4] = True
else:
pbone.rigify_type = 'basic.copy_chain'
if container == 'neck':
if utils.arm.central_bones['head']:
pbone.rigify_type = 'spines.super_head'
if utils.arm.central_bones['pelvis']:
param.connect_chain = True
param.tweak_layers[1] = False
param.tweak_layers[4] = True
else:
pbone.rigify_type = 'basic.super_copy'
ebone.layers[0] = False
for cat in utils.arm.helper_bones.keys():
for container, bone in utils.arm.helper_bones[cat].items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone]
pbone = armature.pose.bones[prefix + bone]
param = pbone.rigify_parameters
ebone.layers[28] = True
ebone.layers[0] = False
ebone.layers[5] = False
pbone.rigify_type = 'basic.super_copy'
param.super_copy_widget_type = 'bone'
for cat in utils.arm.attachment_bones.keys():
for container, bone in utils.arm.attachment_bones[cat].items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone]
pbone = armature.pose.bones[prefix + bone]
param = pbone.rigify_parameters
if cat == 'weapon':
ebone.layers[20] = True
ebone.layers[0] = False
ebone.layers[7] = False
pbone.rigify_type = 'basic.super_copy'
param.super_copy_widget_type = 'bone'
elif cat == 'attachment':
ebone.layers[19] = True
ebone.layers[0] = False
ebone.layers[6] = False
ebone.layers[7] = False
pbone.rigify_type = 'basic.super_copy'
param.super_copy_widget_type = 'bone'
marked = False
if satinfo.viewmodel:
if utils.arm.attachment_bones['viewmodel'].get('attach_camera'):
bone = utils.arm.attachment_bones['viewmodel']['attach_camera'][0]
marked = True
elif utils.arm.attachment_bones['viewmodel'].get('camera'):
bone = utils.arm.attachment_bones['viewmodel']['camera'][0]
marked = True
if marked:
prefix, bone = bone_convert(bone)
pbone = armature.pose.bones[prefix + bone]
param = pbone.rigify_parameters
ebone = armature.data.edit_bones[prefix + bone]
ebone.layers[24] = True
ebone.layers[0] = False
ebone.layers[8] = False
pbone.rigify_type = 'basic.super_copy'
param.super_copy_widget_type = 'bone'
etarget = armature.data.edit_bones['Camera_Target']
ptarget = armature.pose.bones['Camera_Target']
param = ptarget.rigify_parameters
ptarget.rigify_type = 'basic.raw_copy'
param.optional_widget_type = 'circle'
ptarget.lock_location[1] = True
ptarget.lock_rotation[0] = True
ptarget.lock_rotation[2] = True
etarget.layers[24] = True
etarget.layers[0] = False
etarget.layers[7] = False
etarget.layers[8] = False
#Custom bones
for container, bone in utils.arm.custom_bones.items():
for bone in bone:
if bone:
if bone.count('eye') or bone.count('lid_upper') or bone.count('lid_lower'):
continue
prefix, bone2 = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone2]
pbone = armature.pose.bones[prefix + bone2]
param = pbone.rigify_parameters
ebone.layers[21] = True
ebone.layers[0] = False
ebone.layers[9] = False
if utils.arm.chain_start.count(bone):
pbone.rigify_type = 'basic.copy_chain'
param.super_copy_widget_type = 'bone'
elif utils.arm.chainless_bones.count(bone):
pbone.rigify_type = 'basic.super_copy'
param.super_copy_widget_type = 'bone'
armature = utils.arm.animation_armature_real
#Creates bone groups
for group in ['Root', 'IK', 'Special', 'Tweak', 'FK', 'Extra']:
color = armature.rigify_colors.add()
color.name = group
armature.rigify_colors[group].select = (0.3140000104904175, 0.7839999794960022, 1.0)
armature.rigify_colors[group].active = (0.5490000247955322, 1.0, 1.0)
armature.rigify_colors[group].standard_colors_lock = True
if group == 'Root':
armature.rigify_colors[group].normal = (0.43529415130615234, 0.18431372940540314, 0.41568630933761597)
if group == 'IK':
armature.rigify_colors[group].normal = (0.6039215922355652, 0.0, 0.0)
if group== 'Special':
armature.rigify_colors[group].normal = (0.9568628072738647, 0.7882353663444519, 0.0470588281750679)
if group== 'Tweak':
armature.rigify_colors[group].normal = (0.03921568766236305, 0.21176472306251526, 0.5803921818733215)
if group== 'FK':
armature.rigify_colors[group].normal = (0.11764706671237946, 0.5686274766921997, 0.03529411926865578)
if group== 'Extra':
armature.rigify_colors[group].normal = (0.9686275124549866, 0.250980406999588, 0.0941176563501358)
#Creates layers
for i in range(29):
armature.rigify_layers.add()
#Rigify layers
names = ['Face', 'Face (Primary)','Face (Secondary)','Torso', 'Torso (Tweak)', 'Fingers', 'Fingers (Detail)', 'Arm.L (IK)', 'Arm.L (FK)', 'Arm.L (Tweak)', 'Arm.R (IK)', 'Arm.R (FK)', 'Arm.R (Tweak)', 'Leg.L (IK)', 'Leg.L (FK)', 'Leg.L (Tweak)', 'Leg.R (IK)', 'Leg.R (FK)', 'Leg.R (Tweak)', 'Attachments', 'Weapon', 'Custom (FK)', 'Custom (IK)', 'Custom (Tweak)', 'Others']
row_groups = [1,2,2,3,4,5,6,7,8,9,7,8,9,10,11,12,10,11,12,13,13,14,14,15,15]
layer_groups = [5,2,3,3,4,6,5,2,5,4,2,5,4,2,5,4,2,5,4,6,6,5,2,4,6]
for i, name, row, group in zip(range(25), names, row_groups, layer_groups):
armature.rigify_layers[i].name = name
armature.rigify_layers[i].row = row
armature.rigify_layers[i]['group_prop'] = group
armature.rigify_layers[28].name = 'Root'
armature.rigify_layers[28].row = 14
armature.rigify_layers[28]['group_prop'] = 1
for i in range(0, 32):
armature.layers[i] = False
for i in [1,2,3,5,7,10,13,16,19,20,21,22]:
armature.layers[i] = True
bpy.ops.object.mode_set(mode='OBJECT')
#Renames armature to allow it being compatible with pose symmetry
if satinfo.scheme == 0 and not satinfo.sbox:
armature_rename(1, utils.arm.animation_armature)
print("Animation armature created!")
elif action == 1:
print("Animation armature deleted")
def face_flex_setup(): #Sets up drivers for face flexes that will be controlled by face bones
unit = satinfo.unit
armature = utils.arm.animation_armature
armature['target_object'] = None
armature['material_eyes'] = False
armature['has_shapekeys'] = False
bpy.ops.object.mode_set(mode='EDIT')
#Shapekey drivers
if satproperties.target_object:
armature['target_object'] = satproperties.target_object
armature['has_shapekeys'] = True
satproperties.target_object = None
target_object = armature['target_object']
try:
shapekeys_raw = target_object.data.shape_keys.key_blocks.keys()
except:
shapekeys_raw = None
print("No shape keys detected")
utils.arm.facial_bones = []
utils.arm.unused_shapekeys = ['AU6L+AU6R', 'AU25L+AU25R', 'AU22L+AU22R', 'AU20L+AU20R', 'AU18L+AU18R', 'AU26ZL+AU26ZR', 'AU12AU25L+AU12AU25R', 'upper_right', 'upper_right.001', 'lower_right', 'lower_right.001', 'upper_left', 'upper_left.001', 'lower_left', 'lower_left.001']
utils.arm.shapekeys = {'basis': {'basis': ''}, 'eyebrows': {'inner_eyebrow_raise': '', 'outer_eyebrow_raise': '', 'eyebrow_drop': '', 'eyebrow_raise': '', 'outer_eyebrow_drop': '', 'inner_eyebrow_drop': ''}, 'eyes': {'upper_eyelid_close': '', 'upper_eyelid_raise': '', 'lower_eyelid_drop': '', 'lower_eyelid_raise': '', 'upper_eyelid_drop': ''}, 'cheek': {'squint': '', 'cheek_puff': ''}, 'nose': {'nose_wrinkler': '', 'breath': ''}, 'mouth': {'smile': '', 'frown': '', 'upper_lip_raise': '', 'lower_lip_raise': '', 'lower_lip_drop': '', 'bite': '', 'tightener': '', 'puckerer': '', 'light_puckerer': '', 'mouth_left': '', 'mouth_right': ''}, 'chin': {'chin_clench': '', 'light_chin_drop': '', 'medium_chin_drop': '', 'full_chin_drop': '', 'chin_left': '', 'chin_right': '', 'chin_raise': ''}}
if shapekeys_raw:
object_data = target_object.data.copy()
object_data.name = target_object.data.name + '.anim'
object_data['original_data'] = target_object.data
target_object.data = object_data
utils.arm.shapekeys = generate_shapekey_dict(utils.arm.shapekeys, shapekeys_raw)
#Generates widgets for easier representation of every driver bone
create_widgets()
#Checks to make sure bones aren't repeated
eyebrows = False
eyes = False
cheek = False
nose = False
mouth = False
lower_lip = False
upper_lip = False
middle_lip = False
chin = False
## Bone creation ##
for cat in utils.arm.shapekeys.keys():
for container, shapekey in utils.arm.shapekeys[cat].items():
if cat == 'eyebrows':
if container:
if not eyebrows:
eyebrows = True
#Inner, outer and full eyebrows
for bone in ['Eyebrow_L', 'Eyebrow_R', 'Inner_Eyebrow_L', 'Inner_Eyebrow_R', 'Outer_Eyebrow_L', 'Outer_Eyebrow_R']:
utils.arm.facial_bones.append(bone)
ebone = armature.data.edit_bones.new(bone)
ebone.use_deform = False
if bone == 'Eyebrow_L':
ebone.head.xyz = 1.18783*unit, -4.17032*unit, 68.5886*unit
elif bone == 'Eyebrow_R':
ebone.head.xyz = -1.18783*unit, -4.17032*unit, 68.5886*unit
elif bone == 'Inner_Eyebrow_L':
ebone.head.xyz = 0.574764*unit, -4.17032*unit, 68.3012*unit
elif bone == 'Inner_Eyebrow_R':
ebone.head.xyz = -0.574764*unit, -4.17032*unit, 68.3012*unit
elif bone == 'Outer_Eyebrow_L':
ebone.head.xyz = 1.82008*unit, -4.17032*unit, 68.3012*unit
elif bone == 'Outer_Eyebrow_R':
ebone.head.xyz = -1.82008*unit, -4.17032*unit, 68.3012*unit
ebone.tail.xyz = ebone.head.x, ebone.head.y + 0.5*unit, ebone.head.z
elif cat == 'eyes':
if container:
if not eyes:
eyes = True
#Upper and lower eyelids
for bone in ['UpperEye_L', 'UpperEye_R', 'LowerEye_L', 'LowerEye_R']:
utils.arm.facial_bones.append(bone)
ebone = armature.data.edit_bones.new(bone)
ebone.use_deform = False
if bone == 'UpperEye_L':
ebone.head.xyz = 1.18783*unit, -3.53386*unit, 68.0906*unit
elif bone == 'UpperEye_R':
ebone.head.xyz = -1.18783*unit, -3.53386*unit, 68.0906*unit
elif bone == 'LowerEye_L':
ebone.head.xyz = 1.18783*unit, -3.53386*unit, 67.5157*unit
elif bone == 'LowerEye_R':
ebone.head.xyz = -1.18783*unit, -3.53386*unit, 67.5157*unit
ebone.tail.xyz = ebone.head.x, ebone.head.y + 0.5*unit, ebone.head.z
elif cat == 'cheek':
if container:
if not cheek:
cheek = True
#Cheeks for cheek_puffing and squinting
for bone in ['Cheek_L', 'Cheek_R']:
utils.arm.facial_bones.append(bone)
ebone = armature.data.edit_bones.new(bone)
ebone.use_deform = False
if bone == 'Cheek_L':
ebone.head.xyz = 1.91587*unit, -3.25701*unit, 65.6189*unit
ebone.tail.xyz = 1.76452*unit, -2.78046*unit, ebone.head.z
elif bone == 'Cheek_R':
ebone.head.xyz = -1.91587*unit, -3.25701*unit, 65.6189*unit
ebone.tail.xyz = -1.76452*unit, -2.78046*unit, ebone.head.z
ebone.length = 0.5*unit
elif cat == 'nose':
if container:
if not nose:
nose = True
#Nostrils
for bone in ['Nostril_L', 'Nostril_R']:
utils.arm.facial_bones.append(bone)
ebone = armature.data.edit_bones.new(bone)
ebone.use_deform = False
if bone == 'Nostril_L':
ebone.head.xyz = 0.766339*unit, -3.92756*unit, 65.6*unit
elif bone == 'Nostril_R':
ebone.head.xyz = -0.766339*unit, -3.92756*unit, 65.6*unit
ebone.tail.xyz = ebone.head.x, ebone.head.y + 0.5*unit, ebone.head.z
if cat == 'mouth':
if container:
if not mouth:
#Mouth corners
if container == 'smile' or container == 'frown' or container == 'tightener' or container == 'puckerer' or container == 'light_puckerer':
mouth = True
for bone in ['MouthCorner_L', 'MouthCorner_R']:
utils.arm.facial_bones.append(bone)
ebone = armature.data.edit_bones.new(bone)
ebone.use_deform = False
if bone == 'MouthCorner_L':
ebone.head.xyz = 1.20563*unit, -3.80961*unit, 64.8528*unit
ebone.tail.xyz = 0.976012*unit, -3.36545*unit, ebone.head.z
elif bone == 'MouthCorner_R':
ebone.head.xyz = -1.20563*unit, -3.80961*unit, 64.8528*unit
ebone.tail.xyz = -0.976012*unit, -3.36545*unit, ebone.head.z
ebone.length = 0.5*unit
elif not upper_lip:
#Upper lip
if container == 'upper_lip_raise':
upper_lip = True
for bone in ['UpperLip_L', 'UpperLip_R']:
utils.arm.facial_bones.append(bone)
ebone = armature.data.edit_bones.new(bone)
ebone.use_deform = False
if bone == 'UpperLip_L':
ebone.head.xyz = 0.459803*unit, -4.21496*unit, 65.1402*unit
elif bone == 'UpperLip_R':
ebone.head.xyz = -0.459803*unit, -4.21496*unit, 65.1402*unit
ebone.tail.xyz = ebone.head.x, ebone.head.y + 0.5*unit, ebone.head.z
elif not lower_lip:
#Lower lip
if container == 'lower_lip_raise' or container == 'lower_lip_drop' or container == 'bite':
lower_lip = True
for bone in ['LowerLip_L', 'LowerLip_R']:
utils.arm.facial_bones.append(bone)
ebone = armature.data.edit_bones.new(bone)
ebone.use_deform = False
if bone == 'LowerLip_L':
ebone.head.xyz = 0.459803*unit, -4.13831*unit, 64.5654*unit
elif bone == 'LowerLip_R':
ebone.head.xyz = -0.459803*unit, -4.13831*unit, 64.5654*unit
ebone.tail.xyz = ebone.head.x, ebone.head.y + 0.5*unit, ebone.head.z
elif not middle_lip:
#Middle lip
if container == 'mouth_left' or container == 'mouth_right':
middle_lip = True
for bone in ['MiddleLip']:
utils.arm.facial_bones.append(bone)
ebone = armature.data.edit_bones.new(bone)
ebone.use_deform = False
ebone.head.xyz = 0, -4.40654*unit, 64.8528*unit
ebone.tail.xyz = ebone.head.x, ebone.head.y + 0.5*unit, ebone.head.z
if cat == 'chin':
if container:
if not chin:
chin = True
for bone in ['Chin']:
utils.arm.facial_bones.append(bone)
ebone = armature.data.edit_bones.new(bone)
ebone.use_deform = False
ebone.head.xyz = 0, -4.31075*unit, 62.8409*unit
ebone.tail.xyz = ebone.head.x, -3.83612*unit, 62.9982*unit
ebone.length = 0.5*unit
## Linking ##
keyblocks = target_object.data.shape_keys.key_blocks
#Vertex group creation
left_group = target_object.vertex_groups.new(name='Left')
right_group = target_object.vertex_groups.new(name='Right')
#Left side
for vertex in target_object.data.vertices:
#Left side
if vertex.co[0] > 0.005*unit:
left_group.add([vertex.index], 1, 'REPLACE')
#Right side
elif vertex.co[0] < -0.005*unit:
right_group.add([vertex.index], 1, 'REPLACE')
elif vertex.co[0] < 0.005*unit and vertex.co[0] > -0.005*unit:
left_group.add([vertex.index], 0.75, 'REPLACE')
right_group.add([vertex.index], 0.75, 'REPLACE')
#Divides old shapekeys from generated ones
target_object.shape_key_add(name='----------', from_mix=False)
target_object.show_only_shape_key = False
utils.arm.rigify_shapekeys = {'basis': {'basis': ''}, 'eyebrows': {'inner_eyebrow_raise': [], 'outer_eyebrow_raise': [], 'eyebrow_drop': [], 'eyebrow_raise': [], 'outer_eyebrow_drop': [], 'inner_eyebrow_drop': []}, 'eyes': {'upper_eyelid_close': [], 'upper_eyelid_raise': [], 'lower_eyelid_drop': [], 'lower_eyelid_raise': [], 'upper_eyelid_drop': []}, 'cheek': {'squint': [], 'cheek_puff': []}, 'nose': {'nose_wrinkler': [], 'breath': []}, 'mouth': {'smile': [], 'frown': [], 'upper_lip_raise': [], 'lower_lip_raise': [], 'lower_lip_drop': [], 'bite': [], 'tightener': [], 'puckerer': [], 'light_puckerer': [], 'mouth_left': [], 'mouth_right': []}, 'chin': {'chin_clench': [], 'light_chin_drop': [], 'medium_chin_drop': [], 'full_chin_drop': [], 'chin_left': [], 'chin_right': [], 'chin_raise': []}}
for cat in utils.arm.shapekeys.keys():
for container, shapekey in utils.arm.shapekeys[cat].items():
if shapekey:
#Makes sure no other shapekey is active
if container != 'basis':
keyblocks[shapekey].value = 0
#Appends central shapekeys, since they don't need L/R versions of them
if container == 'chin_raise' or container == 'light_chin_drop' or container == 'medium_chin_drop' or container == 'full_chin_drop' or container == 'chin_left' or container == 'chin_right' or container == 'light_puckerer' or container == 'mouth_left' or container == 'mouth_right':
utils.arm.rigify_shapekeys[cat][container].append(shapekey)
continue
if container != 'basis':
keyblocks[shapekey].value = 1
left_shapekey = target_object.shape_key_add(name=shapekey + '_L', from_mix=True)
right_shapekey = target_object.shape_key_add(name=shapekey + '_R', from_mix=True)
utils.arm.rigify_shapekeys[cat][container].append(left_shapekey.name)
utils.arm.rigify_shapekeys[cat][container].append(right_shapekey.name)
#Assigns shapekeys to group
left_shapekey.vertex_group = left_group.name
right_shapekey.vertex_group = right_group.name
keyblocks[shapekey].value = 0
#Removes single shapekeys as well as unused shapekeys
for container, shapekey in utils.arm.shapekeys[cat].items():
if shapekey:
if container == 'basis' or container == 'chin_raise' or container == 'light_chin_drop' or container == 'medium_chin_drop' or container == 'full_chin_drop' or container == 'chin_left' or container == 'chin_right' or container == 'light_puckerer' or container == 'mouth_left' or container == 'mouth_right':
continue
else:
shapekey = target_object.data.shape_keys.key_blocks[shapekey]
target_object.shape_key_remove(shapekey)
for shapekey in utils.arm.unused_shapekeys:
try:
shapekey = target_object.data.shape_keys.key_blocks[shapekey]
target_object.shape_key_remove(shapekey)
except:
pass
del utils.arm.shapekeys
del utils.arm.unused_shapekeys
utils.arm.eye_left = ''
utils.arm.eye_right = ''
## Material eyes ##
for material in target_object.data.materials:
if material.name.title().count('Eyeball'):
armature['material_eyes'] = True
name = material.name
edriver = armature.data.edit_bones.new('driver_' + name)
edriver.use_deform = False
if name.title().count('L_') or name.title().count('_L'):
edriver.head.xyz = 1.18783*unit, -15*unit, 67.8032*unit
elif name.title().count('R_') or name.title().count('_R'):
edriver.head.xyz = -1.18783*unit, -15*unit, 67.8032*unit
edriver.tail.xyz = edriver.head.x, edriver.head.y+0.5*unit, edriver.head.z
if utils.arm.central_bones['head']:
prefix, bone = bone_convert(utils.arm.central_bones['head'][0])
edriver.parent = armature.data.edit_bones[prefix + bone]
edriver.layers[1] = True
edriver.layers[0] = False
edriver.layers[8] = False
edriver.layers[9] = False
update(0)
pdriver = armature.pose.bones['driver_' + name]
param = pdriver.rigify_parameters
#Locks rotation and scale since they aren't meant to be used
pdriver.lock_location = False, True, False
pdriver.lock_rotation_w = True
pdriver.lock_rotation = True, True, True
pdriver.lock_scale = True, True, True
pdriver.custom_shape_scale = 3
param.optional_widget_type = 'circle'
pdriver.rigify_type = 'basic.raw_copy'
eye_texture = False
if not material.use_nodes:
material.use_nodes = True
link = material.node_tree.links
node = material.node_tree.nodes
try:
imgtexture = node['Image Texture']
output_loc = imgtexture.location
eye_texture = True
except:
try:
output_loc = node['Material Output'].location
except:
output_loc = (0,0)
#Checks if mapping node already exists
try:
mapping = node['SAT Eye Movement']
except:
mapping = node.new('ShaderNodeMapping')
mapping.name = "SAT Eye Movement"
mapping.width = 315 #So all the label is visible
if eye_texture:
mapping.location = output_loc[0] - 400, output_loc[1]
else:
mapping.location = output_loc[0], output_loc[1] + 420
mapping.label = "Connect to iris(+Normal/Specular) texture's vector input"
#Checks if texture coordinates node already exists
try:
texcoord = node['SAT Eye Movement Origin']
except:
texcoord = node.new('ShaderNodeTexCoord')
texcoord.name = "SAT Eye Movement Origin"
texcoord.location = mapping.location[0] - 200, mapping.location[1]
if not texcoord.outputs['UV'].links:
link.new(texcoord.outputs['UV'], mapping.inputs['Vector'])
if eye_texture:
if not mapping.outputs['Vector'].links:
link.new(mapping.outputs['Vector'], imgtexture.inputs['Vector'])
imgtexture.extension = 'EXTEND'
#Driver portion
driver = mapping.inputs['Location'].driver_add('default_value')
if not driver[0].driver.variables:
variable = driver[0].driver.variables.new() #Creates new variable onto the shapekey
else:
variable = driver[0].driver.variables[0]
variable.name = "eye_x"
driver[0].driver.expression = variable.name #Changes expression to created variable's name
variable.type = 'TRANSFORMS' #Changes type of variable to transform
target = variable.targets[0]
target.id = utils.arm.animation_armature
target.transform_space = 'LOCAL_SPACE'
target.transform_type = 'LOC_X'
target.bone_target = 'driver_' + material.name
if material.name.title().count('L_') or material.name.title().count('_L'):
driver[0].modifiers[0].coefficients[1] = -0.25/unit
utils.arm.eye_left = 'driver_' + material.name
elif material.name.title().count('R_') or material.name.title().count('_R'):
driver[0].modifiers[0].coefficients[1] = 0.25/unit
utils.arm.eye_right = 'driver_' + material.name
if not driver[1].driver.variables:
variable = driver[1].driver.variables.new() #Creates new variable onto the shapekey
else:
variable = driver[1].driver.variables[0]
variable.name = "eye_z"
driver[1].driver.expression = variable.name #Changes expression to created variable's name
variable.type = 'TRANSFORMS' #Changes type of variable to transform
target = variable.targets[0]
target.id = utils.arm.animation_armature
target.transform_space = 'LOCAL_SPACE'
target.transform_type = 'LOC_Z'
driver[1].modifiers[0].coefficients[1] = -0.25/unit
target.bone_target = 'driver_' + material.name
## Driver bone constraints ##
for bone in utils.arm.facial_bones:
pbone = armature.pose.bones[bone]
ebone = armature.data.edit_bones[bone]
pbone.rigify_type = 'basic.raw_copy'
ebone.layers[1] = True
if utils.arm.central_bones['head']:
prefix2, bone2 = bone_convert(utils.arm.central_bones['head'][0])
elif utils.arm.central_bones['neck']:
prefix2, bone2 = bone_convert(utils.arm.central_bones['neck'][0])
ebone.parent = armature.data.edit_bones[prefix2 + bone2]
#Locks rotation and scale since they aren't meant to be used
pbone.lock_rotation_w = True
pbone.lock_rotation[0] = True
pbone.lock_rotation[1] = True
pbone.lock_rotation[2] = True
pbone.lock_scale[0] = True
pbone.lock_scale[1] = True
pbone.lock_scale[2] = True
#Locks axis locations for bones who don't need it
if bone.count('Cheek') or bone.count('LowerLip') or bone.count('UpperLip'):
pbone.lock_location[0] = True
elif bone.count('Eyebrow') or bone.count('UpperEye') or bone.count('LowerEye'):
pbone.lock_location[0] = True
pbone.lock_location[1] = True
elif bone.count('MiddleLip'):
pbone.lock_location[2] = True
elif bone.count('MouthCorner'):
pass
else:
pbone.lock_location[1] = True
limit_loc = pbone.constraints.new('LIMIT_LOCATION')
limit_loc.owner_space = 'LOCAL'
limit_loc.use_transform_limit = True
#Min X
limit_loc.use_min_x = True
if bone.count('MouthCorner'):
limit_loc.min_x = -0.5*unit
elif bone == 'Nostril_L':
limit_loc.min_x = 0
elif bone == 'Nostril_R':
limit_loc.min_x = -0.5*unit
else:
limit_loc.min_x = -1*unit
#Max X
limit_loc.use_max_x = True
if bone.count('MouthCorner') or bone == 'Nostril_L':
limit_loc.max_x = 0.5*unit
elif bone == 'Nostril_R':
limit_loc.max_x = 0
else:
limit_loc.max_x = 1*unit
#Min Y
limit_loc.use_min_y = True
if bone.count('Cheek'):
limit_loc.min_y = -1*unit
elif bone.count('MiddleLip') or bone.count('LowerLip'):
limit_loc.min_y = -0.5*unit
else:
limit_loc.min_y = 0
#Max Y
limit_loc.use_max_y = True
limit_loc.max_y = 0
#Min Z
limit_loc.use_min_z = True
if bone.count('Cheek') or bone.count('Nostril') or bone.count('UpperLip'):
limit_loc.min_z = 0
elif bone.count('UpperEye') or bone.count('LowerEye'):
limit_loc.min_z = -0.2*unit
elif bone.count('Eyebrow') or bone.count('LowerLip') or bone.count('MouthCorner'):
limit_loc.min_z = -0.5*unit
elif bone.count('Chin'):
limit_loc.min_z = -1.5*unit
else:
limit_loc.min_z = -1*unit
#Max Z
limit_loc.use_max_z = True
if bone.count('UpperEye') or bone.count('LowerEye'):
limit_loc.max_z = 0.2*unit
elif bone.count('Eyebrow') or bone.count('UpperLip') or bone.count('MouthCorner') or bone.count('Nostril') or bone.count('LowerLip'):
limit_loc.max_z = 0.5*unit
else:
limit_loc.max_z = 1*unit
#Assings Widgets to bone drivers
if bone.count('Eyebrow') or bone.count('UpperEye') or bone.count('LowerEye'):
widget = bpy.data.objects['UpDown']
pbone.custom_shape = widget
if bone.count('Eyebrow'):
pbone.custom_shape_scale = 0.3
elif bone.count('UpperEye') or bone.count('LowerEye'):
pbone.custom_shape_scale = 0.25
ebone.layers[1] = True
ebone.layers[0] = False
elif bone.count('Cheek'):
widget = bpy.data.objects['Cheek']
pbone.custom_shape = widget
pbone.custom_shape_scale = 0.5
ebone.layers[1] = True
ebone.layers[0] = False
elif bone.count('Nostril'):
if bone.endswith('_L'):
widget = bpy.data.objects['Nostril_L']
elif bone.endswith('_R'):
widget = bpy.data.objects['Nostril_R']
pbone.custom_shape = widget
pbone.custom_shape_scale = 0.35
ebone.layers[1] = True
ebone.layers[0] = False
elif bone.count('UpperLip'):
widget = bpy.data.objects['UpperLip']
pbone.custom_shape = widget
pbone.custom_shape_scale = 0.25
ebone.layers[1] = True
ebone.layers[0] = False
elif bone.count('MiddleLip'):
widget = bpy.data.objects['MiddleLip']
pbone.custom_shape = widget
pbone.custom_shape_scale = 0.35
ebone.layers[1] = True
ebone.layers[0] = False
elif bone.count('LowerLip'):
widget = bpy.data.objects['LowerLip']
pbone.custom_shape = widget
pbone.custom_shape_scale = 0.25
ebone.layers[1] = True
ebone.layers[0] = False
elif bone.count('Chin'):
widget = bpy.data.objects['4Directions']
pbone.custom_shape = widget
pbone.custom_shape_scale = 0.7
ebone.layers[1] = True
ebone.layers[0] = False
elif bone.count('MouthCorner'):
widget = bpy.data.objects['4Directions']
pbone.custom_shape = widget
pbone.custom_shape_scale = 0.4
ebone.layers[1] = True
ebone.layers[0] = False
if bone.count('Inner_Eyebrow') or bone.count('Outer_Eyebrow') or bone.count('Nostril') or bone.count('Cheek') or bone.count('MiddleLip'):
ebone.layers[2] = True
ebone.layers[0] = False
ebone.layers[1] = False
ebone.layers[8] = False
ebone.layers[9] = False
armature.data.layers[1] = True
for i in range(2, 28):
armature.data.layers[i] = False
## Eye drivers ##
if not armature['material_eyes']:
if not satinfo.titanfall and not utils.arm.symmetrical_bones['legs'].get('thighlow'):
for container, bone in utils.arm.custom_bones.items():
for bone in bone:
if bone:
if bone.count('eye'):
if bone.count('eyebrow') or bone.count('eyelid'):
pass
else:
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone]
edriver = armature.data.edit_bones.new('driver_' + bone)
edriver.layers[1] = True
edriver.layers[0] = False
if satinfo.sbox:
edriver.head.xyz = ebone.head.x, -50*unit, ebone.head.z
edriver.tail.xyz = edriver.head.x, edriver.head.y - 5*unit, edriver.head.z
else:
edriver.head.xyz = ebone.head.x, -50*unit, ebone.head.z
edriver.tail.xyz = edriver.head.x, edriver.head.y - 5*unit, edriver.head.z
if satinfo.titanfall:
if utils.arm.central_bones['neck2']:
prefix2, bone2 = bone_convert(utils.arm.central_bones['neck2'][0])
ebone.parent = armature.data.edit_bones[prefix2+ bone2]
else:
if utils.arm.central_bones['head']:
prefix2, bone2 = bone_convert(utils.arm.central_bones['head'][0])
edriver.parent = armature.data.edit_bones[prefix2 + bone2]
update(0)
pbone = armature.pose.bones[prefix + bone]
pdriver = armature.pose.bones['driver_' + bone]
param = pdriver.rigify_parameters
#Locks rotation and scale since they aren't meant to be used
pdriver.lock_location = False, True, False
pdriver.lock_rotation_w = True
pdriver.lock_rotation = True, True, True
pdriver.lock_scale = True, True, True
pdriver.custom_shape_scale = 1
param.optional_widget_type = 'circle'
pbone.rigify_type = ''
pdriver.rigify_type = 'basic.raw_copy'
## S&Box Eyelid drivers ##
if satinfo.sbox:
create_widgets(['UpDown'])
for container, bone in utils.arm.custom_bones.items():
for bone in bone:
if bone:
if bone.count('lid_upper') or bone.count('lid_lower'):
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone]
edriver = armature.data.edit_bones.new('driver_' + prefix + bone)
edriver.head.xyz = ebone.head.x, ebone.head.y - 5*unit, ebone.head.z
if bone.count('lid_upper'):
edriver.head.z = edriver.head.z + 1*unit
edriver.tail.xyz = edriver.head.x, edriver.head.y - 5*unit, edriver.head.z
elif bone.count('lid_lower'):
edriver.head.z = edriver.head.z - 5*unit
edriver.tail.xyz = edriver.head.x, edriver.head.y - 5*unit, edriver.head.z
edriver.length = 5*unit
if utils.arm.central_bones['head']:
prefix2, bone2 = bone_convert(utils.arm.central_bones['head'][0])
edriver.parent = armature.data.edit_bones[prefix2 + bone2]
edriver.layers[1] = True
edriver.layers[9] = False
update(0)
pbone = armature.pose.bones[prefix + bone]
pdriver = armature.pose.bones['driver_' + prefix + bone]
transform = pbone.constraints.new('TRANSFORM')
transform.target = armature
transform.subtarget = pdriver.name
transform.target_space = 'LOCAL'
transform.owner_space = 'LOCAL'
transform.from_min_z = -4*unit
transform.from_max_z = 4*unit
transform.map_to = 'ROTATION'
transform.to_min_z_rot = radians(-90)
transform.to_max_z_rot = radians(90)
limit_loc = pdriver.constraints.new('LIMIT_LOCATION')
limit_loc.owner_space = 'LOCAL'
limit_loc.use_transform_limit = True
limit_loc.use_min_z = True
limit_loc.use_max_z = True
limit_loc.min_z = -4*unit
limit_loc.max_z = 4*unit
#Bone should only be able to move on the Z axis
pdriver.lock_location = True, True, False
pdriver.lock_rotation_w = True
pdriver.lock_rotation = True, True, True
pdriver.lock_scale = True, True, True
pbone.rigify_type = ''
pdriver.rigify_type = 'basic.raw_copy'
pdriver.custom_shape = bpy.data.objects['UpDown']
pdriver.custom_shape_scale = 0.1
for i in [1,2,3,5,7,10,13,16,19,20,21,22,23,24]:
armature.data.layers[i] = True
bpy.ops.object.mode_set(mode='OBJECT')
def link(): #Organizes armature after empty creation
def retarget(bone, type=''): #Creates empties and links them to Rigify armature/Source armature
armature = bpy.data.objects[utils.arm.armature.name + '.anim']
#Retarget empties creation
try:
collection = bpy.data.collections["Retarget Empties ({})".format(utils.arm.armature.name)[0:60]] #Name length limit
except:
collection = bpy.data.collections.new("Retarget Empties ({})".format(utils.arm.armature.name)[0:60])
bpy.context.scene.collection.children.link(collection)
collection.hide_viewport = True
#Creates base empty and links
base = bpy.data.objects.new('base_{} ({})'.format(bone, utils.arm.armature.name)[0:60], None)
collection.objects.link(base)
base.empty_display_type = 'CUBE'
base.hide_select = True
base.empty_display_size = unit
#Location constraint
loc = base.constraints.new('COPY_LOCATION')
loc.name = "Location Retarget"
loc.target = armature
if type == 'helper':
loc.subtarget = 'ORG-' + prefix + bone
else:
loc.subtarget = 'ORG-' + prefix + bone + '.isolated'
if type != 'helper':
#Rotation constraint
rot = base.constraints.new('COPY_ROTATION')
rot.name = "Rotation Retarget"
rot.target = armature
rot.subtarget = 'ORG-' + prefix + bone + '.isolated'
#Creates target empty and links
target = bpy.data.objects.new('target_{} ({})'.format(bone, utils.arm.armature.name)[0:60], None)
collection.objects.link(target)
target.empty_display_type = 'SPHERE'
target.empty_display_size = unit
#Parent to base
base.parent = parent
target.parent = base
#Bone connection
armature = utils.arm.armature
loc = armature.pose.bones[prefix + bone].constraints.new('COPY_LOCATION')
loc.name = "Retarget Location"
loc.target = target
if type != 'helper':
rot = armature.pose.bones[prefix + bone].constraints.new('COPY_ROTATION')
rot.name = "Retarget Rotation"
rot.target = target
unit = satinfo.unit
#Creates parent for all bases for easier storage/manipulation
parent = bpy.data.objects.new('parent_' + utils.arm.armature.name, None)
for cat in utils.arm.symmetrical_bones:
for container, bone in utils.arm.symmetrical_bones[cat].items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
retarget(bone)
for container, bone in utils.arm.central_bones.items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
retarget(bone)
for cat in utils.arm.helper_bones:
for container, bone in utils.arm.helper_bones[cat].items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
retarget(bone, 'helper')
for cat in utils.arm.attachment_bones.keys():
for container, bone in utils.arm.attachment_bones[cat].items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
retarget(bone)
for container, bone in utils.arm.custom_bones.items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
retarget(bone)
#Connects parent to collection
collection = bpy.data.collections["Retarget Empties ({})".format(utils.arm.armature.name)[0:60]]
collection.objects.link(parent)
armature = bpy.data.objects[utils.arm.armature.name + '.anim']
armature.data.name = utils.arm.armature_real.name + '.anim'
update(1, armature)
#Parents isolated bones
for cat in utils.arm.symmetrical_bones.keys():
for container, bone in utils.arm.symmetrical_bones[cat].items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
try:
armature.data.edit_bones['ORG-' + prefix + bone + '.isolated'].parent = armature.data.edit_bones['DEF-' + prefix + bone]
except:
pass
for cat in utils.arm.helper_bones.keys():
for container, bone in utils.arm.helper_bones[cat].items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones[prefix + bone]
try:
ebone.parent = armature.data.edit_bones[ebone.parent.name.replace('ORG-', 'DEF-')]
except:
pass
#Deletes generated armature
generate_armature('anim', 2)
armature = utils.arm.animation_armature
#Creates camera at camera bone if armature is a viewmodel
if satinfo.viewmodel:
marked = False
if utils.arm.attachment_bones['viewmodel'].get('attach_camera'):
bone = utils.arm.attachment_bones['viewmodel']['attach_camera'][0]
marked = True
elif utils.arm.attachment_bones['viewmodel'].get('camera'):
bone = utils.arm.attachment_bones['viewmodel']['camera'][0]
marked = True
if marked:
prefix, bone = bone_convert(bone)
pcamera = armature.pose.bones[prefix + bone]
ecamera = armature.data.edit_bones[prefix + bone]
track = pcamera.constraints.new('DAMPED_TRACK')
track.target = armature
track.subtarget = 'Camera_Target'
track.track_axis = 'TRACK_Z'
rot = pcamera.constraints.new('COPY_ROTATION')
rot.target = armature
rot.subtarget = 'Camera_Target'
rot.use_x = False
rot.use_z = False
rot.target_space = 'LOCAL'
etarget = armature.data.edit_bones['Camera_Target']
#ecamera.parent = None
#etarget.parent = None
#Relocates the position of the camera to where it would be in game
if ecamera.head.z >= 0.25:
if prefix.count('attach'):
pcamera.location[0] = -ecamera.head.z*0.0713206
else:
pcamera.location[2] = -ecamera.head.z*0.0713206
#pcamera.location[1] = -ecamera.head.z
#armature.pose.bones['Camera_Target'].location[2] = -ecamera.head.z
armature.pose.bones['root'].location[2] = -ecamera.head.z
##Camera##
camera_data = bpy.data.cameras.new('viewmodel_camera')
camera = bpy.data.objects.new('viewmodel_camera', camera_data)
camera.data.angle = 0.942478
camera.rotation_euler[0] = radians(90)
if prefix.count('attach'):
camera.rotation_euler[2] = radians(90)
else:
camera.rotation_euler[2] = radians(180)
loc = camera.constraints.new('COPY_LOCATION')
loc.target = utils.arm.animation_armature
loc.subtarget = pcamera.name
rot = camera.constraints.new('COPY_ROTATION')
rot.target = utils.arm.animation_armature
rot.subtarget = pcamera.name
rot.invert_x = True
rot.invert_z = True
rot.target_space = 'LOCAL'
rot.owner_space = 'LOCAL'
collection = utils.arm.armature.users_collection[0]
collection.objects.link(camera)
bpy.context.scene.camera = camera
if not armature['material_eyes']:
for container, bone in utils.arm.custom_bones.items():
for bone in bone:
if bone:
if bone.count('eye'):
if bone.count('eyebrow') or bone.count('eyelid'):
continue
else:
prefix, bone = bone_convert(bone)
ebone = armature.data.edit_bones['ORG-' + prefix + bone]
pbone = armature.pose.bones['ORG-' + prefix + bone]
trackto = pbone.constraints.new('TRACK_TO')
trackto.target = armature
trackto.subtarget = 'driver_' + bone
if satinfo.sbox:
trackto.track_axis = 'TRACK_X'
ebone.layers[27] = True
ebone.layers[0] = False
utils.arm.animation_armature_real.layers[24] = False
if armature['target_object'] and armature['has_shapekeys']:
keyblocks = armature['target_object'].data.shape_keys.key_blocks
## Driver bone parameters ##
for cat in utils.arm.rigify_shapekeys.keys():
for container, shapekey in utils.arm.rigify_shapekeys[cat].items():
for shapekey in shapekey:
#Creates driver
driver = keyblocks[shapekey].driver_add('value') #Creates new driver for shapekey
#Parameters and target
variable = driver.driver.variables.new() #Creates new variable onto the shapekey
variable.name = "flex"
driver.driver.expression = variable.name #Changes expression to created variable's name
variable.type = 'TRANSFORMS' #Changes type of variable to transform
target = variable.targets[0]
target.id = utils.arm.animation_armature #Links variable to animation armature
#Specific tweaks for each bone
#Eyebrows
if cat == 'eyebrows':
target.transform_space = 'LOCAL_SPACE'
target.transform_type = 'LOC_Z'
if container == 'inner_eyebrow_raise' or container == 'inner_eyebrow_drop':
if shapekey.endswith('_L'):
target.bone_target = "Inner_Eyebrow_L"
elif shapekey.endswith('_R'):
target.bone_target = "Inner_Eyebrow_R"
elif container == 'outer_eyebrow_raise' or container == 'outer_eyebrow_drop':
if shapekey.endswith('_L'):
target.bone_target = "Outer_Eyebrow_L"
elif shapekey.endswith('_R'):
target.bone_target = "Outer_Eyebrow_R"
elif container == 'eyebrow_drop' or container == 'eyebrow_raise':
if shapekey.endswith('_L'):
target.bone_target = "Eyebrow_L"
elif shapekey.endswith('_R'):
target.bone_target = "Eyebrow_R"
if container == 'eyebrow_drop' or container == 'outer_eyebrow_drop' or container == 'inner_eyebrow_drop':
driver.modifiers[0].coefficients[1] = -2/unit
elif container == 'inner_eyebrow_raise' or container == 'outer_eyebrow_raise' or container == 'eyebrow_raise':
driver.modifiers[0].coefficients[1] = 2/unit
if cat == 'eyes':
target.transform_space = 'LOCAL_SPACE'
target.transform_type = 'LOC_Z'
if container == 'upper_eyelid_drop':
driver.driver.expression = variable.name + '/5'
elif container != 'upper_eyelid_close':
#Creates another driver controlled by the corresponding eye bone
variable2 = driver.driver.variables.new()
variable2.name = "eye"
driver.driver.expression = '{} + {}/5'.format(variable.name, variable2.name) #Combines the old driver with the new driver, making the latter have less influence
variable2.type = 'TRANSFORMS'
target2 = variable2.targets[0]
target2.id = utils.arm.animation_armature
target2.transform_space = 'LOCAL_SPACE'
target2.transform_type = 'LOC_Z'
if container == 'upper_eyelid_close':
if shapekey.endswith('_L'):
target.bone_target = "UpperEye_L"
elif shapekey.endswith('_R'):
target.bone_target = "UpperEye_R"
elif container == 'upper_eyelid_raise':
if shapekey.endswith('_L'):
target.bone_target = "UpperEye_L"
if utils.arm.eye_left:
target2.bone_target = utils.arm.eye_left
elif shapekey.endswith('_R'):
target.bone_target = "UpperEye_R"
if utils.arm.eye_right:
target2.bone_target = utils.arm.eye_right
elif container == 'lower_eyelid_drop' or container == 'lower_eyelid_raise':
if shapekey.endswith('_L'):
target.bone_target = "LowerEye_L"
if utils.arm.eye_left:
target2.bone_target = utils.arm.eye_left
elif shapekey.endswith('_R'):
target.bone_target = "LowerEye_R"
if utils.arm.eye_right:
target2.bone_target = utils.arm.eye_right
elif container == 'upper_eyelid_drop':
if shapekey.endswith('_L'):
if utils.arm.eye_left:
target.bone_target = utils.arm.eye_left
elif shapekey.endswith('_R'):
if utils.arm.eye_right:
target.bone_target = utils.arm.eye_right
if container == 'upper_eyelid_close' or container == 'lower_eyelid_drop' or container == 'upper_eyelid_drop':
driver.modifiers[0].coefficients[1] = -5/unit
elif container == 'upper_eyelid_raise' or container == 'lower_eyelid_raise':
driver.modifiers[0].coefficients[1] = 5/unit
if cat == 'cheek':
target.transform_space = 'LOCAL_SPACE'
if shapekey.endswith('_L'):
target.bone_target = "Cheek_L"
elif shapekey.endswith('_R'):
target.bone_target = "Cheek_R"
if container == 'squint':
target.transform_type = 'LOC_Z'
driver.modifiers[0].coefficients[1] = 1/unit
elif container == 'cheek_puff':
target.transform_type = 'LOC_Y'
driver.modifiers[0].coefficients[1] = -1/unit
elif cat == 'nose':
target.transform_space = 'LOCAL_SPACE'
if shapekey.endswith('_L'):
target.bone_target = "Nostril_L"
elif shapekey.endswith('_R'):
target.bone_target = "Nostril_R"
if container == 'nose_wrinkler':
target.transform_type = 'LOC_Z'
driver.modifiers[0].coefficients[1] = 2/unit
elif container == 'breath':
target.transform_type = 'LOC_X'
if shapekey.endswith('_L'):
driver.modifiers[0].coefficients[1] = 2/unit
elif shapekey.endswith('_R'):
driver.modifiers[0].coefficients[1] = -2/unit
if cat == 'mouth':
target.transform_space = 'LOCAL_SPACE'
#Mouth corners
if container == 'smile' or container == 'frown' or container == 'tightener' or container == 'puckerer':
if shapekey.endswith('_L'):
target.bone_target = "MouthCorner_L"
elif shapekey.endswith('_R'):
target.bone_target = "MouthCorner_R"
if container == 'smile' or container == 'frown':
target.transform_type = 'LOC_Z'
if container == 'smile':
driver.modifiers[0].coefficients[1] = 2/unit
elif container == 'frown':
driver.modifiers[0].coefficients[1] = -2/unit
elif container == 'tightener' or container == 'puckerer':
target.transform_type = 'LOC_X'
if container == 'tightener':
if shapekey.endswith('_L'):
driver.modifiers[0].coefficients[1] = 2/unit
elif shapekey.endswith('_R'):
driver.modifiers[0].coefficients[1] = -2/unit
elif container == 'puckerer':
if shapekey.endswith('_L'):
driver.modifiers[0].coefficients[1] = -2/unit
elif shapekey.endswith('_R'):
driver.modifiers[0].coefficients[1] = 2/unit
#Upper lips
elif container == 'upper_lip_raise':
target.transform_type = 'LOC_Z'
target.transform_space = 'LOCAL_SPACE'
if shapekey.endswith('_L'):
target.bone_target = "UpperLip_L"
elif shapekey.endswith('_R'):
target.bone_target = "UpperLip_R"
driver.modifiers[0].coefficients[1] = 2/unit
#Lower lips
elif container == 'lower_lip_raise' or container == 'lower_lip_drop' or container == 'bite':
target.transform_space = 'LOCAL_SPACE'
if shapekey.endswith('_L'):
target.bone_target = "LowerLip_L"
elif shapekey.endswith('_R'):
target.bone_target = "LowerLip_R"
if container == 'lower_lip_raise' or container == 'lower_lip_drop':
target.transform_type = 'LOC_Z'
if container == 'lower_lip_drop':
driver.modifiers[0].coefficients[1] = -3/unit
keyblocks[shapekey].slider_max = 1.5
elif container == 'lower_lip_raise':
driver.modifiers[0].coefficients[1] = 2/unit
elif container == 'bite':
target.transform_type = 'LOC_Y'
driver.modifiers[0].coefficients[1] = -2/unit
#MiddleLip
elif container == 'mouth_left' or container == 'mouth_right' or container == 'light_puckerer':
target.bone_target = "MiddleLip"
target.transform_space = 'LOCAL_SPACE'
if container == 'mouth_left' or container == 'mouth_right':
target.transform_type = 'LOC_X'
if shapekey.endswith('L'):
driver.modifiers[0].coefficients[1] = 1/unit
elif shapekey.endswith('R'):
driver.modifiers[0].coefficients[1] = -1/unit
elif container == 'light_puckerer':
target.transform_type = 'LOC_Y'
driver.modifiers[0].coefficients[1] = -2/unit
elif cat == 'chin':
target.bone_target = "Chin"
target.transform_space = 'LOCAL_SPACE'
#Upwards/Downwards movement
if container == 'chin_raise' or container == 'light_chin_drop' or container == 'medium_chin_drop' or container == 'full_chin_drop':
target.transform_type = 'LOC_Z'
#Documentation (Since i may be the first human on earth to find and/or utilize this)
#driver.keyframe_points.add(count) = Add keyframe
#driver.keyframe_points[keyframe] = Keyframe
#driver.keyframe_points[0].co_ui[0] = Horizontal position
#driver.keyframe_points[0].co_ui[1] = Vertical position
#driver.keyframe_points[0].handle_(left/right) = Keyframe handle (Location and type)
#driver.keyframe_points[0].handle_(left/right)_type = Interpolation type
if container == 'chin_raise':
driver.modifiers[0].coefficients[1] = 1/unit
#Chin lowers
if container == 'light_chin_drop':
driver.keyframe_points.add(3)
#Keyframe positions and values
driver.keyframe_points[0].co_ui[0] = 0
driver.keyframe_points[0].co_ui[1] = 0
driver.keyframe_points[1].co_ui[0] = -0.6*unit
driver.keyframe_points[1].co_ui[1] = 1
driver.keyframe_points[2].co_ui[0] = -1.2*unit
driver.keyframe_points[2].co_ui[1] = 0
#Handles
driver.keyframe_points[1].handle_left_type = 'FREE'
driver.keyframe_points[1].handle_right_type = 'FREE'
driver.keyframe_points[1].handle_left[0] = -0.75*unit
driver.keyframe_points[1].handle_left[1] = 1
driver.keyframe_points[1].handle_right[0] = -0.45*unit
driver.keyframe_points[1].handle_right[1] = 0.5
driver.keyframe_points[2].handle_left_type = 'ALIGNED'
driver.keyframe_points[2].handle_right_type = 'ALIGNED'
driver.keyframe_points[2].handle_left[0] = -1.3*unit
driver.keyframe_points[2].handle_left[1] = 0
driver.keyframe_points[2].handle_right[0] = -0.945*unit
driver.keyframe_points[2].handle_right[1] = 0
#Forces refresh
driver.auto_smoothing = 'CONT_ACCEL'
try:
driver.modifiers.remove(driver.modifiers[0])
except:
pass
elif container == 'medium_chin_drop':
driver.keyframe_points.add(4)
driver.keyframe_points[0].co_ui[0] = 0
driver.keyframe_points[0].co_ui[1] = 0
driver.keyframe_points[1].co_ui[0] = -0.6*unit
driver.keyframe_points[1].co_ui[1] = 0
driver.keyframe_points[2].co_ui[0] = -1*unit
driver.keyframe_points[2].co_ui[1] = 0.95
driver.keyframe_points[3].co_ui[0] = -1.5*unit
driver.keyframe_points[3].co_ui[1] = 0
driver.keyframe_points[1].handle_left_type = 'AUTO'
driver.keyframe_points[1].handle_right_type = 'AUTO'
driver.keyframe_points[2].handle_left_type = 'ALIGNED'
driver.keyframe_points[2].handle_right_type = 'ALIGNED'
driver.keyframe_points[2].handle_left[0] = -1.1*unit
driver.keyframe_points[2].handle_left[1] = 0.95
driver.keyframe_points[2].handle_right[0] = -0.9*unit
driver.keyframe_points[2].handle_right[1] = 0.95
driver.keyframe_points[3].handle_left_type = 'ALIGNED'
driver.keyframe_points[3].handle_right_type = 'ALIGNED'
driver.keyframe_points[3].handle_left[0] = -2*unit
driver.keyframe_points[3].handle_left[1] = 0
driver.keyframe_points[3].handle_right[0] = -1.3*unit
driver.keyframe_points[3].handle_right[1] = 0
#Forces refresh
driver.auto_smoothing = 'CONT_ACCEL'
try:
driver.modifiers.remove(driver.modifiers[0])
except:
pass
elif container == 'full_chin_drop':
driver.keyframe_points.add(2)
driver.keyframe_points[0].co_ui[0] = -0.95*unit
driver.keyframe_points[0].co_ui[1] = 0
driver.keyframe_points[1].co_ui[0] = -1.5*unit
driver.keyframe_points[1].co_ui[1] = 1
driver.keyframe_points[0].handle_left_type = 'ALIGNED'
driver.keyframe_points[0].handle_right_type = 'ALIGNED'
driver.keyframe_points[0].handle_left[0] = -1.135*unit
driver.keyframe_points[0].handle_left[1] = 0.275
driver.keyframe_points[0].handle_right[0] = -0.825*unit
driver.keyframe_points[0].handle_right[1] = -0.185
driver.keyframe_points[1].handle_left_type = 'ALIGNED'
driver.keyframe_points[1].handle_right_type = 'ALIGNED'
driver.keyframe_points[1].handle_left[0] = -1.6*unit
driver.keyframe_points[1].handle_left[1] = 1
driver.keyframe_points[1].handle_right[0] = -1.3*unit
driver.keyframe_points[1].handle_right[1] = 1
#Forces refresh
driver.auto_smoothing = 'CONT_ACCEL'
try:
driver.modifiers.remove(driver.modifiers[0])
except:
pass
#Sideways movement
elif container == 'chin_left' or container == 'chin_right':
target.transform_type = 'LOC_X'
if container == 'chin_left':
driver.modifiers[0].coefficients[1] = 1/unit
elif container == 'chin_right':
driver.modifiers[0].coefficients[1] = -1/unit
#Updates bone list in case it was modified
utils.arm.get_bones(False)
if action == 0 or action == 1: #Usual creation/deletion
generate_rigify(action)
if action == 0:
face_flex_setup()
elif action == 2: #Creates empties and links it to Source armature, also creates widgets and setups facial flexes
if satinfo.scheme == 0 and not satinfo.sbox:
armature_rename(1, utils.arm.armature)
link()
armature_rename(0, utils.arm.armature)
else:
link()
satproperties.retarget_constraints = True
if utils.arm.armature.visible_get():
utils.arm.armature.hide_set(True)
try:
bpy.context.scene.collection.objects.unlink(utils.arm.animation_armature)
except:
pass
try:
collection = utils.arm.armature.users_collection[0]
collection.objects.link(utils.arm.animation_armature)
except:
pass
bpy.context.view_layer.objects.active = utils.arm.animation_armature
bpy.ops.object.mode_set(mode='OBJECT')
def create_widgets(widget=[]):
#Creates widgets collection before Rigify
try:
collection = bpy.data.collections['Widgets']
except:
collection = bpy.data.collections.new('Widgets')
bpy.context.scene.collection.children.link(collection)
collection.hide_viewport = True
#Empty that stores all the generated widgets for easier storage/manipulation
try:
parent = bpy.data.objects['parent_widgets']
except:
parent = bpy.data.objects.new('parent_widgets', None)
collection.objects.link(parent)
if not widget:
widget = ['4Directions', 'Cheek', 'LowerLip', 'MiddleLip', 'UpperLip', 'Nostril_L', 'Nostril_R', 'UpDown']
for widget in widget:
try:
bpy.data.objects[widget]
except:
#Creates mesh datablock and object
mesh = bpy.data.meshes.new(widget)
object = bpy.data.objects.new(widget, mesh)
object.parent = parent
#Gets Rigify's collection and links to it
collection = bpy.data.collections['Widgets']
collection.objects.link(object)
faces = []
if widget == '4Directions':
vertices = [(0.0000, 0.0000, 1.0000), (-0.3827, 0.0000, 0.9239), (-0.7071, 0.0000, 0.7071), (-0.9239, 0.0000, 0.3827), (-1.0000, 0.0000, 0.0000), (-0.9239, -0.0000, -0.3827), (-0.7071, -0.0000, -0.7071), (-0.3827, -0.0000, -0.9239), (0.0000, -0.0000, -1.0000), (0.3827, -0.0000, -0.9239), (0.7071, -0.0000, -0.7071), (0.9239, -0.0000, -0.3827), (1.0000, 0.0000, 0.0000), (0.9239, 0.0000, 0.3827), (0.7071, 0.0000, 0.7071), (0.3827, 0.0000, 0.9239), (-0.3718, 0.0000, 1.8058), (-0.6592, 0.0000, 1.3381), (0.3718, 0.0000, 1.8058), (0.6592, 0.0000, 1.3381), (0.4179, 0.0000, 1.0882), (0.7722, 0.0000, 0.8515), (-0.7722, 0.0000, 0.8515), (-0.4179, 0.0000, 1.0882), (0.0000, 0.0000, 1.1805), (0.0000, 0.0000, 2.2000), (-2.2000, 0.0000, 0.0000), (-1.1805, 0.0000, 0.0000), (-1.0882, 0.0000, -0.4179), (-0.8515, 0.0000, -0.7722), (-0.8515, 0.0000, 0.7722), (-1.0882, 0.0000, 0.4179), (-1.3381, 0.0000, 0.6592), (-1.8058, 0.0000, 0.3718), (-1.3381, 0.0000, -0.6592), (-1.8058, 0.0000, -0.3718), (2.2000, 0.0000, -0.0000), (1.1805, 0.0000, -0.0000), (1.0882, 0.0000, 0.4179), (0.8515, 0.0000, 0.7722), (0.8515, 0.0000, -0.7722), (1.0882, 0.0000, -0.4179), (1.3381, 0.0000, -0.6592), (1.8058, 0.0000, -0.3718), (1.3381, 0.0000, 0.6592), (1.8058, 0.0000, 0.3718), (0.3718, 0.0000, -1.8058), (0.6592, 0.0000, -1.3381), (-0.3718, 0.0000, -1.8058), (-0.6592, 0.0000, -1.3381), (-0.4179, 0.0000, -1.0882), (-0.7722, 0.0000, -0.8515), (0.7722, 0.0000, -0.8515), (0.4179, 0.0000, -1.0882), (-0.0000, 0.0000, -1.1805), (-0.0000, 0.0000, -2.2000)]
edges = [(1, 0), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 8), (10, 9), (11, 10), (12, 11), (13, 12), (14, 13), (15, 14), (0, 15), (16, 25), (17, 22), (18, 25), (19, 21), (17, 16), (19, 18), (24, 20), (20, 21), (22, 23), (23, 24), (28, 27), (29, 28), (31, 30), (27, 31), (32, 33), (34, 35), (32, 30), (33, 26), (34, 29), (35, 26), (38, 37), (39, 38), (41, 40), (37, 41), (42, 43), (44, 45), (42, 40), (43, 36), (44, 39), (45, 36), (46, 55), (47, 52), (48, 55), (49, 51), (47, 46), (49, 48), (54, 50), (50, 51), (52, 53), (53, 54)]
elif widget == 'Cheek':
vertices = [(0.0000, 0.0000, -1.0000), (0.3827, 0.0000, -0.9239), (0.7071, 0.0000, -0.7071), (0.9239, 0.0000, -0.3827), (1.0000, 0.0000, 0.0000), (0.9239, -0.0000, 0.3827), (0.7071, -0.0000, 0.7071), (0.3827, -0.0000, 0.9239), (-0.0000, -0.0000, 1.0000), (-0.3827, -0.0000, 0.9239), (-0.7071, -0.0000, 0.7071), (-0.9239, -0.0000, 0.3827), (-1.0000, 0.0000, -0.0000), (-0.9239, 0.0000, -0.3827), (-0.7071, 0.0000, -0.7071), (-0.3827, 0.0000, -0.9239), (-0.2679, 0.0000, -0.6467), (-0.4950, 0.0000, -0.4950), (-0.6467, 0.0000, -0.2679), (-0.7000, 0.0000, -0.0000), (-0.6467, -0.0000, 0.2679), (-0.4950, -0.0000, 0.4950), (-0.2679, -0.0000, 0.6467), (-0.0000, -0.0000, 0.7000), (0.2679, -0.0000, 0.6467), (0.4950, -0.0000, 0.4950), (0.6467, -0.0000, 0.2679), (0.7000, 0.0000, 0.0000), (0.6467, 0.0000, -0.2679), (0.4950, 0.0000, -0.4950), (0.2679, 0.0000, -0.6467), (0.0000, 0.0000, -0.7000), (-0.3718, 0.0000, 1.8058), (-0.6592, 0.0000, 1.3381), (0.3718, 0.0000, 1.8058), (0.6592, 0.0000, 1.3381), (0.4179, 0.0000, 1.0882), (0.7722, 0.0000, 0.8515), (-0.7722, 0.0000, 0.8515), (-0.4179, 0.0000, 1.0882), (0.0000, 0.0000, 1.1805), (-0.0000, 0.0000, 2.2000)]
edges = [(1, 0), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 8), (10, 9), (11, 10), (12, 11), (13, 12), (14, 13), (15, 14), (0, 15), (31, 16), (17, 18), (19, 20), (20, 21), (18, 19), (16, 17), (23, 24), (25, 26), (27, 28), (24, 25), (29, 30), (30, 31), (28, 29), (21, 22), (26, 27), (22, 23), (32, 41), (33, 38), (34, 41), (35, 37), (33, 32), (35, 34), (40, 36), (36, 37), (38, 39)]
elif widget == 'LowerLip':
vertices = [(0.0000, 0.0000, 1.0000), (-0.3827, 0.0000, 0.9239), (-0.7071, 0.0000, 0.7071), (-0.9239, 0.0000, 0.3827), (-1.0000, 0.0000, 0.0000), (-0.9239, -0.0000, -0.3827), (-0.7071, -0.0000, -0.7071), (-0.3827, -0.0000, -0.9239), (0.0000, -0.0000, -1.0000), (0.3827, -0.0000, -0.9239), (0.7071, -0.0000, -0.7071), (0.9239, -0.0000, -0.3827), (1.0000, 0.0000, 0.0000), (0.9239, 0.0000, 0.3827), (0.7071, 0.0000, 0.7071), (0.3827, 0.0000, 0.9239), (0.2679, 0.0000, 0.6467), (0.4950, 0.0000, 0.4950), (0.6467, 0.0000, 0.2679), (0.7000, 0.0000, 0.0000), (0.6467, -0.0000, -0.2679), (0.4950, -0.0000, -0.4950), (0.2679, -0.0000, -0.6467), (0.0000, -0.0000, -0.7000), (-0.2679, -0.0000, -0.6467), (-0.4950, -0.0000, -0.4950), (-0.6467, -0.0000, -0.2679), (-0.7000, 0.0000, 0.0000), (-0.6467, 0.0000, 0.2679), (-0.4950, 0.0000, 0.4950), (-0.2679, 0.0000, 0.6467), (0.0000, 0.0000, 0.7000), (0.3718, 0.0000, -1.8058), (0.6592, 0.0000, -1.3381), (-0.3718, 0.0000, -1.8058), (-0.6592, 0.0000, -1.3381), (-0.4179, 0.0000, -1.0882), (-0.7722, 0.0000, -0.8515), (0.7722, 0.0000, -0.8515), (0.4179, 0.0000, -1.0882), (-0.0000, 0.0000, -1.1805), (-0.0000, 0.0000, -2.2000), (-0.3718, 0.0000, 1.8058), (-0.6592, 0.0000, 1.3381), (0.3718, 0.0000, 1.8058), (0.6592, 0.0000, 1.3381), (0.4179, 0.0000, 1.0882), (0.7722, 0.0000, 0.8515), (-0.7722, 0.0000, 0.8515), (-0.4179, 0.0000, 1.0882), (0.0000, 0.0000, 1.1805), (-0.0000, 0.0000, 2.2000)]
edges = [(1, 0), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 8), (10, 9), (11, 10), (12, 11), (13, 12), (14, 13), (15, 14), (0, 15), (31, 16), (17, 18), (19, 20), (20, 21), (18, 19), (16, 17), (23, 24), (25, 26), (27, 28), (24, 25), (29, 30), (30, 31), (28, 29), (21, 22), (26, 27), (22, 23), (32, 41), (33, 38), (34, 41), (35, 37), (33, 32), (35, 34), (40, 36), (36, 37), (38, 39), (39, 40), (42, 51), (43, 48), (44, 51), (45, 47), (43, 42), (45, 44), (50, 46), (46, 47), (48, 49), (49, 50)]
elif widget == 'MiddleLip':
vertices = [(0.0000, 0.0000, -1.0000), (0.3827, 0.0000, -0.9239), (0.7071, 0.0000, -0.7071), (0.9239, 0.0000, -0.3827), (1.0000, 0.0000, 0.0000), (0.9239, -0.0000, 0.3827), (0.7071, -0.0000, 0.7071), (0.3827, -0.0000, 0.9239), (-0.0000, -0.0000, 1.0000), (-0.3827, -0.0000, 0.9239), (-0.7071, -0.0000, 0.7071), (-0.9239, -0.0000, 0.3827), (-1.0000, 0.0000, -0.0000), (-0.9239, 0.0000, -0.3827), (-0.7071, 0.0000, -0.7071), (-0.3827, 0.0000, -0.9239), (-0.2679, 0.0000, -0.6467), (-0.4950, 0.0000, -0.4950), (-0.6467, 0.0000, -0.2679), (-0.7000, 0.0000, -0.0000), (-0.6467, -0.0000, 0.2679), (-0.4950, -0.0000, 0.4950), (2.2000, 0.0000, 0.0000), (1.1805, 0.0000, -0.0000), (1.0882, 0.0000, 0.4179), (0.8515, 0.0000, 0.7722), (0.8515, 0.0000, -0.7722), (1.0882, 0.0000, -0.4179), (1.3381, 0.0000, -0.6592), (1.8058, 0.0000, -0.3718), (1.3381, 0.0000, 0.6592), (1.8058, 0.0000, 0.3718), (-2.2000, 0.0000, -0.0000), (-1.1805, 0.0000, 0.0000), (-1.0882, 0.0000, -0.4179), (-0.8515, 0.0000, -0.7722), (-0.8515, 0.0000, 0.7722), (-1.0882, 0.0000, 0.4179), (-1.3381, 0.0000, 0.6592), (-1.8058, 0.0000, 0.3718), (-1.3381, 0.0000, -0.6592), (-1.8058, 0.0000, -0.3718), (-0.2679, -0.0000, 0.6467), (-0.0000, -0.0000, 0.7000), (0.2679, -0.0000, 0.6467), (0.4950, -0.0000, 0.4950), (0.6467, -0.0000, 0.2679), (0.7000, 0.0000, 0.0000), (0.6467, 0.0000, -0.2679), (0.4950, 0.0000, -0.4950), (0.2679, 0.0000, -0.6467), (0.0000, 0.0000, -0.7000)]
edges = [(1, 0), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 8), (10, 9), (11, 10), (12, 11), (13, 12), (14, 13), (15, 14), (0, 15), (16, 17), (51, 16), (17, 18), (18, 19), (19, 20), (20, 21), (24, 23), (25, 24), (27, 26), (23, 27), (28, 29), (30, 31), (28, 26), (29, 22), (30, 25), (31, 22), (34, 33), (35, 34), (37, 36), (33, 37), (38, 39), (40, 41), (38, 36), (39, 32), (40, 35), (41, 32), (21, 42), (43, 44), (44, 45), (46, 47), (42, 43), (45, 46), (47, 48), (48, 49), (49, 50), (50, 51)]
elif widget == 'UpperLip':
vertices = [(0.0000, 0.0000, -1.0000), (0.3827, 0.0000, -0.9239), (0.7071, 0.0000, -0.7071), (0.9239, 0.0000, -0.3827), (1.0000, 0.0000, 0.0000), (0.9239, -0.0000, 0.3827), (0.7071, -0.0000, 0.7071), (0.3827, -0.0000, 0.9239), (-0.0000, -0.0000, 1.0000), (-0.3827, -0.0000, 0.9239), (-0.7071, -0.0000, 0.7071), (-0.9239, -0.0000, 0.3827), (-1.0000, 0.0000, -0.0000), (-0.9239, 0.0000, -0.3827), (-0.7071, 0.0000, -0.7071), (-0.3827, 0.0000, -0.9239), (-0.3718, 0.0000, 1.8058), (-0.6592, 0.0000, 1.3381), (0.3718, 0.0000, 1.8058), (0.6592, 0.0000, 1.3381), (0.4179, 0.0000, 1.0882), (0.7722, 0.0000, 0.8515), (-0.7722, 0.0000, 0.8515), (-0.4179, 0.0000, 1.0882), (0.0000, 0.0000, 1.1805), (-0.0000, 0.0000, 2.2000)]
edges = [(1, 0), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 8), (10, 9), (11, 10), (12, 11), (13, 12), (14, 13), (15, 14), (0, 15), (16, 25), (17, 22), (18, 25), (19, 21), (17, 16), (19, 18), (24, 20), (20, 21), (22, 23), (23, 24)]
elif widget == 'Nostril_L':
vertices = [(0.0000, 0.0000, -1.0000), (0.3827, 0.0000, -0.9239), (0.7071, 0.0000, -0.7071), (0.9239, 0.0000, -0.3827), (1.0000, 0.0000, 0.0000), (0.9239, -0.0000, 0.3827), (0.7071, -0.0000, 0.7071), (0.3827, -0.0000, 0.9239), (-0.0000, -0.0000, 1.0000), (-0.3827, -0.0000, 0.9239), (-0.7071, -0.0000, 0.7071), (-0.9239, -0.0000, 0.3827), (-1.0000, 0.0000, -0.0000), (-0.9239, 0.0000, -0.3827), (-0.7071, 0.0000, -0.7071), (-0.3827, 0.0000, -0.9239), (-0.3718, 0.0000, 1.8058), (-0.6592, 0.0000, 1.3381), (0.3718, 0.0000, 1.8058), (0.6592, 0.0000, 1.3381), (0.4179, 0.0000, 1.0882), (0.7722, 0.0000, 0.8515), (-0.7722, 0.0000, 0.8515), (-0.4179, 0.0000, 1.0882), (0.0000, 0.0000, 1.1805), (-0.0000, 0.0000, 2.2000), (1.8058, 0.0000, 0.3718), (1.3381, 0.0000, 0.6592), (1.8058, 0.0000, -0.3718), (1.3381, 0.0000, -0.6592), (1.0882, 0.0000, -0.4179), (0.8515, 0.0000, -0.7722), (0.8515, 0.0000, 0.7722), (1.0882, 0.0000, 0.4179), (1.1805, 0.0000, -0.0000), (2.2000, 0.0000, 0.0000)]
edges = [(1, 0), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 8), (10, 9), (11, 10), (12, 11), (13, 12), (14, 13), (15, 14), (0, 15), (16, 25), (17, 22), (18, 25), (19, 21), (17, 16), (19, 18), (24, 20), (20, 21), (22, 23), (23, 24), (26, 35), (27, 32), (28, 35), (29, 31), (27, 26), (29, 28), (34, 30), (30, 31), (32, 33), (33, 34)]
elif widget == 'Nostril_R':
vertices = [(-0.0000, 0.0000, -1.0000), (-0.3827, 0.0000, -0.9239), (-0.7071, 0.0000, -0.7071), (-0.9239, 0.0000, -0.3827), (-1.0000, 0.0000, 0.0000), (-0.9239, -0.0000, 0.3827), (-0.7071, -0.0000, 0.7071), (-0.3827, -0.0000, 0.9239), (0.0000, -0.0000, 1.0000), (0.3827, -0.0000, 0.9239), (0.7071, -0.0000, 0.7071), (0.9239, -0.0000, 0.3827), (1.0000, 0.0000, -0.0000), (0.9239, 0.0000, -0.3827), (0.7071, 0.0000, -0.7071), (0.3827, 0.0000, -0.9239), (0.3718, 0.0000, 1.8058), (0.6592, 0.0000, 1.3381), (-0.3718, 0.0000, 1.8058), (-0.6592, 0.0000, 1.3381), (-0.4179, 0.0000, 1.0882), (-0.7722, 0.0000, 0.8515), (0.7722, 0.0000, 0.8515), (0.4179, 0.0000, 1.0882), (-0.0000, 0.0000, 1.1805), (0.0000, 0.0000, 2.2000), (-1.8058, 0.0000, 0.3718), (-1.3381, 0.0000, 0.6592), (-1.8058, 0.0000, -0.3718), (-1.3381, 0.0000, -0.6592), (-1.0882, 0.0000, -0.4179), (-0.8515, 0.0000, -0.7722), (-0.8515, 0.0000, 0.7722), (-1.0882, 0.0000, 0.4179), (-1.1805, 0.0000, -0.0000), (-2.2000, 0.0000, 0.0000)]
edges = [(1, 0), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 8), (10, 9), (11, 10), (12, 11), (13, 12), (14, 13), (15, 14), (0, 15), (16, 25), (17, 22), (18, 25), (19, 21), (17, 16), (19, 18), (24, 20), (20, 21), (22, 23), (23, 24), (26, 35), (27, 32), (28, 35), (29, 31), (27, 26), (29, 28), (34, 30), (30, 31), (32, 33), (33, 34)]
elif widget == 'UpDown':
vertices = [(0.0000, 0.0000, 1.0000), (-0.3827, 0.0000, 0.9239), (-0.7071, 0.0000, 0.7071), (-0.9239, 0.0000, 0.3827), (-1.0000, 0.0000, 0.0000), (-0.9239, -0.0000, -0.3827), (-0.7071, -0.0000, -0.7071), (-0.3827, -0.0000, -0.9239), (0.0000, -0.0000, -1.0000), (0.3827, -0.0000, -0.9239), (0.7071, -0.0000, -0.7071), (0.9239, -0.0000, -0.3827), (1.0000, 0.0000, 0.0000), (0.9239, 0.0000, 0.3827), (0.7071, 0.0000, 0.7071), (0.3827, 0.0000, 0.9239), (0.3718, 0.0000, -1.8058), (0.6592, 0.0000, -1.3381), (-0.3718, 0.0000, -1.8058), (-0.6592, 0.0000, -1.3381), (-0.4179, 0.0000, -1.0882), (-0.7722, 0.0000, -0.8515), (0.7722, 0.0000, -0.8515), (0.4179, 0.0000, -1.0882), (-0.0000, 0.0000, -1.1805), (-0.0000, 0.0000, -2.2000), (-0.0000, 0.0000, 2.2000), (0.0000, 0.0000, 1.1805), (-0.4179, 0.0000, 1.0882), (-0.7722, 0.0000, 0.8515), (0.7722, 0.0000, 0.8515), (0.4179, 0.0000, 1.0882), (0.6592, 0.0000, 1.3381), (0.3718, 0.0000, 1.8058), (-0.6592, 0.0000, 1.3381), (-0.3718, 0.0000, 1.8058)]
edges = [(1, 0), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 8), (10, 9), (11, 10), (12, 11), (13, 12), (14, 13), (15, 14), (0, 15), (16, 25), (17, 22), (18, 25), (19, 21), (17, 16), (19, 18), (24, 20), (20, 21), (22, 23), (23, 24), (28, 27), (31, 30), (27, 31), (34, 35), (29, 28), (32, 33), (32, 30), (33, 26), (34, 29), (35, 26)]
object.data.from_pydata(vertices, edges, faces)
print("Facial flex widgets generated!")
def bake(mode):
#mode 0 = single
#mode 1 = all
def bake_strip(nla_track):
satproperties = bpy.context.scene.satproperties
if not armature.animation_data.action or armature.animation_data.action and armature.animation_data.nla_tracks.active:
strip = nla_track.strips[0]
action = strip.action
#Unmutes track and strip in case they were muted
nla_track.mute = False
strip.mute = False
else:
action = armature.animation_data.action
#Changes the anim armature's action name so the original armature can have it
if action.name.startswith('_'):
name = action.name[1:]
else:
name = action.name
action.name = '_' + action.name
#Original armature's NLA tracks
nla_track2 = armature2.animation_data.nla_tracks
#Creates new action to store baked data in, or overrides existing one
#Recontructs actions, strips (And tracks if in bulk) to account for any change made in the animation armature track
try:
action2 = bpy.data.actions[name]
bpy.data.actions.remove(action2)
action2 = bpy.data.actions.new(name)
except:
action2 = bpy.data.actions.new(name)
try:
bake_track = nla_track2[name]
if mode == 1:
nla_track2.remove(bake_track)
bake_track = nla_track2.new()
bake_track.name = name
except:
bake_track = nla_track2.new()
bake_track.name = name
try:
bake_strip = bake_track.strips[name]
bake_track.strips.remove(bake_strip)
bake_strip = bake_track.strips.new(name, 0, action2)
except:
bake_strip = bake_track.strips.new(name, 0, action2)
#Selection, tweak mode and bake
bake_track.select = True
bake_strip.select = True
armature2.animation_data.use_tweak_mode = True
if not satproperties.bake_helper_bones or utils.arm.other_bones.get('ik'):
bpy.ops.pose.select_all(action='SELECT')
if utils.arm.other_bones.get('ik'):
for bone in utils.arm.other_bones['ik']:
if bone:
prefix, bone = bone_convert(bone)
armature2.data.bones[prefix + bone].select = False
bpy.ops.nla.bake(frame_start=action.frame_range[0], frame_end=action.frame_range[1], only_selected=True, visual_keying=True, use_current_action=True, bake_types={'POSE'})
#Removes the baked rotation values, only keeping positional values
armature2.animation_data.nla_tracks[0].strips[0].select = True
armature2.animation_data.use_tweak_mode = True
for cat in utils.arm.helper_bones.keys():
for container, bone in utils.arm.helper_bones[cat].items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
for i in range(int(action.frame_range[0]), int(action.frame_range[1])+1):
armature2.pose.bones[prefix + bone].keyframe_delete('rotation_quaternion', -1, i)
armature2.animation_data.use_tweak_mode = False
else:
bpy.ops.nla.bake(frame_start=action.frame_range[0], frame_end=action.frame_range[1], only_selected=False, visual_keying=True, use_current_action=True, bake_types={'POSE'})
armature2.animation_data.use_tweak_mode = False
if mode == 1:
track.mute = True
bake_track.mute = True
bake_track.select = False
bpy.ops.pose.transforms_clear()
armature = utils.arm.animation_armature
armature2 = utils.arm.armature
#Gets current mode to set it back after operation completion
current_mode = bpy.context.object.mode
selected_objects = bpy.context.selected_objects
active_object = bpy.context.view_layer.objects.active
bpy.ops.object.mode_set(mode='OBJECT')
#Resets the position of all bones for both armatures to avoid other actions from messing with the selected action
if armature.hide_get() or not armature.select_get():
armature.hide_set(False)
armature.select_set(True)
bpy.context.view_layer.objects.active = armature
bpy.ops.object.mode_set(mode='POSE')
if armature.animation_data.use_tweak_mode == True:
armature.animation_data.use_tweak_mode = False
#Selects the bones and resets any bone translations
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
bpy.ops.object.mode_set(mode='OBJECT')
armature.select_set(False)
if armature2.hide_get() or not armature2.select_get():
armature2.hide_set(False)
armature2.select_set(True)
bpy.context.view_layer.objects.active = armature2
if not armature2.animation_data:
armature2.animation_data_create()
bpy.ops.object.mode_set(mode='POSE')
if armature2.animation_data.use_tweak_mode == True:
armature2.animation_data.use_tweak_mode = False
#Selects the bones and resets any bone translations
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.transforms_clear()
unmuted_track = []
if not armature.animation_data.action:
for track in armature.animation_data.nla_tracks:
if track.mute == False:
unmuted_track.append(track.name)
track.mute = True
for track in armature2.animation_data.nla_tracks:
if track.mute == False:
track.mute = True
if mode == 0:
bake_strip(armature.animation_data.nla_tracks.active)
elif mode == 1:
for track in armature.animation_data.nla_tracks:
bake_strip(track)
#Reenables disabled tracks
for track in unmuted_track:
track = armature.animation_data.nla_tracks[track]
track.mute = False
for object in selected_objects:
object.select_set(True)
bpy.context.view_layer.objects.active = active_object
bpy.ops.object.mode_set(mode=current_mode)
def export():
armature = utils.arm.armature
current_mode = bpy.context.object.mode
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
if armature.hide_get() or not armature.select_get():
armature.hide_set(False)
armature.select_set(True)
bpy.context.view_layer.objects.active = armature
if armature.animation_data.use_tweak_mode == True:
armature.animation_data.use_tweak_mode = False
#Workaround due to exporting updating the scene invalidating the armature variable
track_count = len(armature.animation_data.nla_tracks)
for track in armature.animation_data.nla_tracks:
track.select = False
for index in range(track_count):
track = armature.animation_data.nla_tracks[index]
strip = track.strips[0]
armature.animation_data.nla_tracks.active = track
#Selection, tweak mode and bake
track.select = True
strip.select = True
armature.animation_data.use_tweak_mode = True
bpy.ops.export_scene.smd()
armature = utils.arm.armature
armature.animation_data.use_tweak_mode = False
track.select = False
strip.select = False
bpy.ops.object.mode_set(mode=current_mode)
def retarget_constraints(self, context):
satproperties = bpy.context.scene.satproperties
satinfo = bpy.context.scene.satinfo
armature = utils.arm.armature
armature2 = utils.arm.animation_armature
if satproperties.retarget_constraints:
value = False
else:
value = True
if satinfo.animation_armature and not satinfo.animation_armature_setup:
for cat in utils.arm.symmetrical_bones.keys():
for container, bone in utils.arm.symmetrical_bones[cat].items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
try:
armature.pose.bones[prefix + bone].constraints['Retarget Location'].mute = value
except:
pass
try:
armature.pose.bones[prefix + bone].constraints['Retarget Rotation'].mute = value
except:
pass
for container, bone in utils.arm.central_bones.items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
try:
armature.pose.bones[prefix + bone].constraints['Retarget Location'].mute = value
except:
pass
try:
armature.pose.bones[prefix + bone].constraints['Retarget Rotation'].mute = value
except:
pass
for cat in utils.arm.helper_bones.keys():
for container, bone in utils.arm.helper_bones[cat].items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
if satinfo.viewmodel:
try:
armature.pose.bones[prefix + bone].constraints['Procedural Bone'].mute = value
except:
pass
try:
armature.pose.bones[prefix + bone].constraints['Retarget Location'].mute = value
except:
pass
for cat in utils.arm.attachment_bones.keys():
for container, bone in utils.arm.attachment_bones[cat].items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
try:
armature.pose.bones[prefix + bone].constraints['Retarget Location'].mute = value
except:
pass
try:
armature.pose.bones[prefix + bone].constraints['Retarget Rotation'].mute = value
except:
pass
for container, bone in utils.arm.custom_bones.items():
for bone in bone:
if bone:
prefix, bone = bone_convert(bone)
try:
armature.pose.bones[prefix + bone].constraints['Retarget Location'].mute = value
except:
pass
try:
armature.pose.bones[prefix + bone].constraints['Retarget Rotation'].mute = value
except:
pass
if armature.animation_data:
for track in armature.animation_data.nla_tracks:
if satproperties.retarget_constraints:
if track.mute == False:
track.mute = True
else:
if track.mute == True:
track.mute = False
current_mode = bpy.context.object.mode
bpy.ops.object.mode_set(mode='OBJECT')
if not satproperties.retarget_constraints:
if armature.hide_get():
armature.hide_set(False)
if armature.visible_get():
bpy.ops.object.select_all(action='DESELECT')
armature.select_set(True)
bpy.context.view_layer.objects.active = armature
bpy.ops.object.mode_set(mode=current_mode)
else:
if armature2.hide_get():
armature2.hide_set(True)
if armature2.visible_get():
bpy.ops.object.select_all(action='DESELECT')
armature2.select_set(True)
bpy.context.view_layer.objects.active = armature2
bpy.ops.object.mode_set(mode=current_mode) |
"""Support for Velbus light."""
from __future__ import annotations
from typing import Any
from velbusaio.channels import (
Button as VelbusButton,
Channel as VelbusChannel,
Dimmer as VelbusDimmer,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_FLASH,
ATTR_TRANSITION,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_BRIGHTNESS,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity, EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import VelbusEntity
from .const import DOMAIN
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Velbus switch based on config_entry."""
await hass.data[DOMAIN][entry.entry_id]["tsk"]
cntrl = hass.data[DOMAIN][entry.entry_id]["cntrl"]
entities: list[Entity] = []
for channel in cntrl.get_all("light"):
entities.append(VelbusLight(channel))
for channel in cntrl.get_all("led"):
entities.append(VelbusButtonLight(channel))
async_add_entities(entities)
class VelbusLight(VelbusEntity, LightEntity):
"""Representation of a Velbus light."""
_channel: VelbusDimmer
_attr_supported_features = SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION
@property
def is_on(self) -> bool:
"""Return true if the light is on."""
return self._channel.is_on()
@property
def brightness(self) -> int:
"""Return the brightness of the light."""
return int((self._channel.get_dimmer_state() * 255) / 100)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Instruct the Velbus light to turn on."""
if ATTR_BRIGHTNESS in kwargs:
# Make sure a low but non-zero value is not rounded down to zero
if kwargs[ATTR_BRIGHTNESS] == 0:
brightness = 0
else:
brightness = max(int((kwargs[ATTR_BRIGHTNESS] * 100) / 255), 1)
attr, *args = (
"set_dimmer_state",
brightness,
kwargs.get(ATTR_TRANSITION, 0),
)
else:
attr, *args = (
"restore_dimmer_state",
kwargs.get(ATTR_TRANSITION, 0),
)
await getattr(self._channel, attr)(*args)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Instruct the velbus light to turn off."""
attr, *args = (
"set_dimmer_state",
0,
kwargs.get(ATTR_TRANSITION, 0),
)
await getattr(self._channel, attr)(*args)
class VelbusButtonLight(VelbusEntity, LightEntity):
"""Representation of a Velbus light."""
_channel: VelbusButton
_attr_entity_registry_enabled_default = False
_attr_entity_category = EntityCategory.CONFIG
_attr_supported_features = SUPPORT_FLASH
def __init__(self, channel: VelbusChannel) -> None:
"""Initialize the button light (led)."""
super().__init__(channel)
self._attr_name = f"LED {self._channel.get_name()}"
@property
def is_on(self) -> Any:
"""Return true if the light is on."""
return self._channel.is_on()
async def async_turn_on(self, **kwargs: Any) -> None:
"""Instruct the Velbus light to turn on."""
if ATTR_FLASH in kwargs:
if kwargs[ATTR_FLASH] == FLASH_LONG:
attr, *args = "set_led_state", "slow"
elif kwargs[ATTR_FLASH] == FLASH_SHORT:
attr, *args = "set_led_state", "fast"
else:
attr, *args = "set_led_state", "on"
else:
attr, *args = "set_led_state", "on"
await getattr(self._channel, attr)(*args)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Instruct the velbus light to turn off."""
attr, *args = "set_led_state", "off"
await getattr(self._channel, attr)(*args)
|
'''
Given an array of integers nums and an integer k, return the total number of continuous subarrays whose sum equals to k.
Example 1:
Input: nums = [1,1,1], k = 2
Output: 2
Example 2:
Input: nums = [1,2,3], k = 3
Output: 2
Constraints:
1 <= nums.length <= 2 * 10^4
-1000 <= nums[i] <= 1000
-10^7 <= k <= 10^7
'''
class Solution:
def subarraySum(self, nums: List[int], k: int) -> int:
sumCount = {}
count = 0
sumValue = 0
for i in nums:
sumValue = sumValue+i
if sumValue == k:
count = count + 1
if (sumValue - k) in sumCount.keys():
count = count + sumCount[sumValue-k]
if sumValue in sumCount.keys():
sumCount[sumValue] = sumCount[sumValue] + 1
else:
sumCount[sumValue] = 1
return count
|
N = 5
def add_3(a1, a2, a3):
return a1 + a2 + a3
|
# -----------------------------------------------------------------------------
# lex_optimize4.py
# -----------------------------------------------------------------------------
import re
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.lex as lex
tokens = [
"PLUS",
"MINUS",
"NUMBER",
]
t_PLUS = r'\+?'
t_MINUS = r'-'
t_NUMBER = r'(\d+)'
def t_error(t):
pass
# Build the lexer
lex.lex(optimize=True, lextab="opt4tab", reflags=re.UNICODE)
lex.runmain(data="3+4")
|
from aiogram import Dispatcher
from loguru import logger
from .common import register_common_handlers
from .decryption import register_decryption_handlers
from .encryption import register_encryption_handlers
from .exception import register_exception_handlers
def register_handlers(dp: Dispatcher):
"""Sets all bot handlers."""
logger.info("Configuring handlers...")
register_common_handlers(dp)
register_encryption_handlers(dp)
register_decryption_handlers(dp)
register_exception_handlers(dp)
|
# coding: utf8
from __future__ import unicode_literals, print_function
import pytest
import time
import os
from wasabi.printer import Printer
from wasabi.util import MESSAGES, NO_UTF8, supports_ansi
SUPPORTS_ANSI = supports_ansi()
def test_printer():
p = Printer(no_print=True)
text = "This is a test."
good = p.good(text)
fail = p.fail(text)
warn = p.warn(text)
info = p.info(text)
assert p.text(text) == text
if SUPPORTS_ANSI and not NO_UTF8:
assert good == "\x1b[38;5;2m\u2714 {}\x1b[0m".format(text)
assert fail == "\x1b[38;5;1m\u2718 {}\x1b[0m".format(text)
assert warn == "\x1b[38;5;3m\u26a0 {}\x1b[0m".format(text)
assert info == "\x1b[38;5;4m\u2139 {}\x1b[0m".format(text)
if SUPPORTS_ANSI and NO_UTF8:
assert good == "\x1b[38;5;2m[+] {}\x1b[0m".format(text)
assert fail == "\x1b[38;5;1m[x] {}\x1b[0m".format(text)
assert warn == "\x1b[38;5;3m[!] {}\x1b[0m".format(text)
assert info == "\x1b[38;5;4m[i] {}\x1b[0m".format(text)
if not SUPPORTS_ANSI and not NO_UTF8:
assert good == "\u2714 {}".format(text)
assert fail == "\u2718 {}".format(text)
assert warn == "\u26a0 {}".format(text)
assert info == "\u2139 {}".format(text)
if not SUPPORTS_ANSI and NO_UTF8:
assert good == "[+] {}".format(text)
assert fail == "[x] {}".format(text)
assert warn == "[!] {}".format(text)
assert info == "[i] {}".format(text)
def test_printer_print():
p = Printer()
text = "This is a test."
p.good(text)
p.fail(text)
p.info(text)
p.text(text)
def test_printer_no_pretty():
p = Printer(no_print=True, pretty=False)
text = "This is a test."
assert p.good(text) == text
assert p.fail(text) == text
assert p.warn(text) == text
assert p.info(text) == text
assert p.text(text) == text
def test_printer_custom():
colors = {"yellow": 220, "purple": 99}
icons = {"warn": "\u26a0\ufe0f", "question": "?"}
p = Printer(no_print=True, colors=colors, icons=icons)
text = "This is a test."
purple_question = p.text(text, color="purple", icon="question")
warning = p.warn(text)
if SUPPORTS_ANSI and not NO_UTF8:
assert purple_question == "\x1b[38;5;99m? {}\x1b[0m".format(text)
assert warning == "\x1b[38;5;3m\u26a0\ufe0f {}\x1b[0m".format(text)
if SUPPORTS_ANSI and NO_UTF8:
assert purple_question == "\x1b[38;5;99m? {}\x1b[0m".format(text)
assert warning == "\x1b[38;5;3m?? {}\x1b[0m".format(text)
if not SUPPORTS_ANSI and not NO_UTF8:
assert purple_question == "? {}".format(text)
assert warning == "\u26a0\ufe0f {}".format(text)
if not SUPPORTS_ANSI and NO_UTF8:
assert purple_question == "? {}".format(text)
assert warning == "?? {}".format(text)
def test_printer_counts():
p = Printer()
text = "This is a test."
for i in range(2):
p.good(text)
for i in range(1):
p.fail(text)
for i in range(4):
p.warn(text)
assert p.counts[MESSAGES.GOOD] == 2
assert p.counts[MESSAGES.FAIL] == 1
assert p.counts[MESSAGES.WARN] == 4
def test_printer_divider():
p = Printer(line_max=20, no_print=True)
p.divider() == "\x1b[1m\n================\x1b[0m"
p.divider("test") == "\x1b[1m\n====== test ======\x1b[0m"
p.divider("test", char="*") == "\x1b[1m\n****** test ******\x1b[0m"
assert (
p.divider("This is a very long text, it is very long")
== "\x1b[1m\n This is a very long text, it is very long \x1b[0m"
)
with pytest.raises(ValueError):
p.divider("test", char="~.")
@pytest.mark.parametrize("hide_animation", [False, True])
def test_printer_loading(hide_animation):
p = Printer(hide_animation=hide_animation)
print("\n")
with p.loading("Loading..."):
time.sleep(1)
p.good("Success!")
with p.loading("Something else..."):
time.sleep(2)
p.good("Yo!")
with p.loading("Loading..."):
time.sleep(1)
p.good("Success!")
def test_printer_loading_raises_exception():
def loading_with_exception():
p = Printer()
print("\n")
with p.loading():
raise Exception("This is an error.")
with pytest.raises(Exception):
loading_with_exception()
def test_printer_loading_no_print():
p = Printer(no_print=True)
with p.loading("Loading..."):
time.sleep(1)
p.good("Success!")
def test_printer_log_friendly():
text = "This is a test."
ENV_LOG_FRIENDLY = "WASABI_LOG_FRIENDLY"
os.environ[ENV_LOG_FRIENDLY] = "True"
p = Printer(no_print=True)
assert p.good(text) in ("\u2714 This is a test.", "[+] This is a test.")
del os.environ[ENV_LOG_FRIENDLY]
def test_printer_log_friendly_prefix():
text = "This is a test."
ENV_LOG_FRIENDLY = "CUSTOM_LOG_FRIENDLY"
os.environ[ENV_LOG_FRIENDLY] = "True"
p = Printer(no_print=True, env_prefix="CUSTOM")
assert p.good(text) in ("\u2714 This is a test.", "[+] This is a test.")
print(p.good(text))
del os.environ[ENV_LOG_FRIENDLY]
@pytest.mark.skip(reason="Now seems to raise TypeError: readonly attribute?")
def test_printer_none_encoding(monkeypatch):
"""Test that printer works even if sys.stdout.encoding is set to None. This
previously caused a very confusing error."""
monkeypatch.setattr("sys.stdout.encoding", None)
p = Printer() # noqa: F841
|
import pandas as pd
import matplotlib.pyplot as plt
from asreview.state.utils import open_state
from scipy.stats import spearmanr
def probability_matrix_from_h5_state(state_fp):
"""Get the probability matrix from an .h5 state file.
Arguments
----------
state_fp: str
Path to state file.
Returns
-------
pandas.DataFrame:
A dataframe of shape (num_papers, num_queries), with in (i,j) the probability
that paper i was relevant according to the model at query j. Note that the row
index starts at 0, but the column index starts at 1.
"""
proba_dict = {}
with open_state(state_fp, read_only=True) as state:
queries = [int(num) for num in state.f['results'].keys()]
total_queries = max(queries)
for i in range(1, total_queries+1):
proba_dict[i] = state.f[f'results/{i}/proba'][:]
proba_matrix = pd.DataFrame.from_dict(proba_dict)
return proba_matrix
def probability_plot(proba_matrix, row_nums, gap=1, **kwargs):
"""Plot the probability of a document being relevant, at different iterations of
the model.
Arguments
----------
proba_matrix: pandas.DataFrame
Probability matrix.
row_nums: list
List containing the row numbers you want to plot.
gap: int
Step size on the x-axis (the queries).
Returns
-------
Plot of the probability of the documents in 'num_rows' of the probability matrix at
query_{gap*i).
"""
plt.plot(proba_matrix.iloc[row_nums, ::gap].T, **kwargs)
plt.show()
def rank_plot(proba_matrix, row_nums, gap, **kwargs):
"""Plot the rank of the document, at different iterations of the model.
Arguments
----------
proba_matrix: pandas.DataFrame
Probability matrix.
row_nums: list
List containing the row numbers you want to plot.
gap: int
Step size on the x-axis (the queries).
Returns
-------
Plot of the rank of the documents in 'num_rows' of the probability matrix at
query_{gap*i).
"""
rank_matrix = proba_matrix.rank()
plt.plot(rank_matrix.iloc[row_nums, ::gap].T, **kwargs)
plt.show()
def rho_plot(proba_matrix, gap=1, **kwargs):
"""Plot the value of Spearman's rho, comparing different iterations of the active
learning model.
Arguments
----------
proba_matrix: pandas.DataFrame
Probability matrix.
gap: int
Calculate the rho of query_i and query_(i+gap), for i in
range(gap, num_rows, gap).
Returns
-------
Plot of value of rho. Note that per step on the x-axis, we take a step of 'gap'
queries.
"""
rho_list = [spearmanr(proba_matrix[i], proba_matrix[i-gap])
for i in range(gap + 1, proba_matrix.shape[1], gap)]
plt.plot(rho_list, **kwargs)
plt.show()
|
from asyncio import AbstractEventLoop
from collections import OrderedDict
from functools import partial
from typing import Any, Iterator, List
from aiohttp.http_writer import HttpVersion11
from aiohttp.web import BaseRunner
from aiohttp.web import Server as WebServer
from aiohttp.web_exceptions import HTTPExpectationFailed
from ..apps import AbstractApp
from ..middlewares import RootMiddleware
from ..requests import Request
from ..resolvers import Resolver
from ..responses import Response
__all__ = ("AppRunner",)
class AppRunner(BaseRunner):
def __init__(self, app: AbstractApp,
resolver: Resolver,
middlewares: List[RootMiddleware],
loop: AbstractEventLoop,
**kwargs: Any) -> None:
super().__init__(**kwargs)
self._app = app
self._resolver = resolver
self._middlewares = middlewares
self._loop = loop
def _merge_middlewares(self, root_middlewares: List[Any],
app_middlewares: List[Any],
handler_middlewares: List[Any]) -> Iterator[Any]:
middlewares: OrderedDict[Any, Any] = OrderedDict()
for middleware in app_middlewares:
middlewares[type(middleware.__self__)] = middleware
for middleware in handler_middlewares:
middlewares[type(middleware.__self__)] = middleware
for middleware in root_middlewares:
unique_key = type(middleware.__self__)
if unique_key not in middlewares:
middlewares[unique_key] = middleware
return reversed(middlewares.values())
async def _handle(self, request: Request) -> Response:
expect = request.headers.get("EXPECT")
if (expect is not None) and (request.version == HttpVersion11):
if expect.lower() == "100-continue":
await request.writer.write(b"HTTP/1.1 100 Continue\r\n\r\n")
else:
raise HTTPExpectationFailed()
try:
resolver = self._app.resolver # type: ignore
except (AttributeError, NotImplementedError):
resolver = self._resolver
handler = await resolver.resolve(request, self._app)
unwrapped = resolver.unwrap(handler)
root_middlewares = [middleware(handler) for middleware in self._middlewares]
app_middlewares = resolver.get_attribute("middlewares", type(self._app), [])
handler_middlewares = resolver.get_attribute("middlewares", unwrapped, [])
middlewares = self._merge_middlewares(root_middlewares,
app_middlewares, handler_middlewares)
for middleware in middlewares:
handler = partial(middleware, handler=handler, app=self._app)
response = await handler(request)
return response # type: ignore
def _make_request(self, *args: Any, **kwargs: Any) -> Request:
return Request(*args, **kwargs, loop=self._loop)
async def _make_server(self) -> WebServer:
return WebServer(self._handle, request_factory=self._make_request) # type: ignore
async def shutdown(self) -> None:
pass
async def _cleanup_server(self) -> None:
pass
|
import sys
#import cv2
import numpy as np
import os
import Calibration
WORKING_DIR = os.getcwd()
VIDEO_DIR = os.path.join(WORKING_DIR,'Projects')
mode = 'none'
if __name__ == '__main__':
if len(sys.argv) > 1:
if str(sys.argv[1]) == 'help':
print('')
print('============================================================================================')
print('=========================== Printing a list of possible modes... ===========================')
print('============================================================================================ \n')
print('--------------------------------------- MODE #1 -------------------------------------------- \n')
print('Save a calibration file to a given name inside the project folder')
print('<py file path> \"saveCalib\" <projectName> <videoName> <writeNameData = calibration_data> \n')
print('--------------------------------------- MODE #2 -------------------------------------------- \n')
print('Dewarp a given video using a given dewarp calibration file')
print('Note: file will be saved as \"Dewarped<videoName>\.MP4" ')
print('<py file path> \"dewarpVideo\" <projectName> <videoName> <dataFile = calibration_data> \n')
print('-------------------------------------------------------------------------------------------- \n')
exit()
if str(sys.argv[1].lower()) == 'savecalib':
mode = sys.argv[1] #Argument 1 ... mode
projectName = sys.argv[2] #Argument 2 ... project name
readNameVideo = sys.argv[3] #Argument 3 ... video name
if len(sys.argv) >= 5:
writeNameData = sys.argv[4] #Argument 4 ... calibration data write name
else:
writeNameData = 'calibration_data'
if len(sys.argv) > 5 :
print('Too many arguments... Exiting.'); exit()
readPathVideo = os.path.join(VIDEO_DIR,projectName,readNameVideo+'.MP4')
writePathData = os.path.join(VIDEO_DIR,projectName,(writeNameData+'.npz'))
if str(sys.argv[1].lower()) == 'dewarpvideo':
mode = str(sys.argv[1])
projectName = str(sys.argv[2])
readNameVideo = str(sys.argv[3])
#print("readNameVideo: ",readNameVideo)
if len(sys.argv) >= 5:
readNameData = str(sys.argv[4])
else:
readNameData = 'calibration_data'
if len(sys.argv) > 5 :
print('Too many arguments... Exiting.'); exit()
readPathVideo = os.path.join(VIDEO_DIR,projectName,readNameVideo)
readPathData = os.path.join(VIDEO_DIR,projectName,(readNameData + '.npz'))
writeNameVideo = ('Dewarped' + readNameVideo)
writePathVideo = os.path.join(VIDEO_DIR,projectName,('Dewarped' + readNameVideo))
try:
open(readPathData)
except IOError :
print('====================================================================================')
print('================ Could not find data file at given location ====================')
print('========== Have you saved the calibration file for this project yet? ===========')
print('====================================================================================')
exit()
if str(sys.argv[1]) == 'nextMode':
mode = str(sys.argv[1]).lower()
#whatever else...
if mode == 'none':
print(' ')
print('Hello, fellow idiot. I\'m not sure what you\'re trying to do. Exiting.'); exit()
print(' ')
if mode == 'savecalib':
print(' ')
print(('Generating calibration file for '+readNameVideo+' and saving as '+writeNameData))
print(' ')
Calibration.ImageCollect(readPathVideo)
Calibration.ImageProcessing(writePathData)
if mode == 'dewarpvideo':
print(' ')
print(('Dewarping movie file '+readNameVideo+' and saving as '+writeNameVideo))
print(' ')
Calibration.DewarpMovie(readPathVideo, writePathVideo, readPathData)
|
from __future__ import absolute_import, division, print_function
from libtbx import Auto
import os.path
import sys
master_phil_str = """
map_type = *2mFo-DFc mFo-DFc anom anom_residual llg
.type = choice
exclude_free_r_reflections = True
.type = bool
fill_missing_f_obs = False
.type = bool
b_sharp = None
.type = float
output_file = Auto
.type = path
"""
map_type_labels = {
"2mFo-DFc" : "2FOFCWT",
"mFo-DFc" : "FOFCWT",
"anom" : "ANOM",
"anom_residual" : "ANOM_DIFF",
"llg" : "LLG",
}
def master_phil():
from mmtbx.command_line import generate_master_phil_with_inputs
return generate_master_phil_with_inputs(
phil_string=master_phil_str,
enable_automatic_twin_detection=True)
def run(args, out=sys.stdout):
master_params = master_phil()
usage_string = """\
mmtbx.compute_map_coefficients model.pdb data.mtz map_type=MAP_TYPE [options]
Utility to compute a single set of map coefficients with minimal input.
"""
import mmtbx.command_line
from mmtbx import map_tools
import iotbx.mtz
cmdline = mmtbx.command_line.load_model_and_data(
args=args,
master_phil=master_params,
process_pdb_file=False,
prefer_anomalous=True,
usage_string=usage_string,
set_wavelength_from_model_header=True,
set_inelastic_form_factors="sasaki",
out=out)
fmodel = cmdline.fmodel
xray_structure = fmodel.xray_structure
params = cmdline.params
map_coeffs = fmodel.map_coefficients(
map_type=params.map_type,
exclude_free_r_reflections=params.exclude_free_r_reflections,
fill_missing=params.fill_missing_f_obs)
if (params.b_sharp is not None):
if (params.b_sharp is Auto):
params.b_sharp = - map_coeffs.d_min() * 10
map_coeffs = map_tools.sharp_map(
sites_frac=None,
map_coeffs=map_coeffs,
b_sharp=b_sharp)
dec = iotbx.mtz.label_decorator(phases_prefix="PH")
mtz_dataset = map_coeffs.as_mtz_dataset(
column_root_label=map_type_labels[params.map_type],
label_decorator=dec)
if (params.output_file is Auto):
pdb_file = os.path.basename(params.input.pdb.file_name[0])
params.output_file = os.path.splitext(pdb_file)[0] + "_%s.mtz" % \
params.map_type
mtz_dataset.mtz_object().write(params.output_file)
print("Wrote %s" % params.output_file, file=out)
if (__name__ == "__main__"):
run(sys.argv[1:])
|
import urllib.request
import os
import sys
def downloadImages(strQueryString, arrUrls):
for url in arrUrls:
downloadImage(strQueryString, url)
def downloadImage(strQueryString, url):
try:
strPath = setup(strQueryString)
print(f"Downloading {url} to {strQueryString}")
image_name = str(url).split('/')[-1]
download_jpg(url, strPath, image_name)
except Exception as error:
print(error)
def download_jpg(url, filePath, fileName):
fullPath = f"{filePath}/{fileName}"
urllib.request.urlretrieve(url, fullPath)
def setup(strQueryString):
dirName = 'images_' + strQueryString.replace(' ', '_')
strPath = f"{dirName}"
try:
os.mkdir(strPath)
except:
pass
return strPath
|
# Layout / Size
# How to adjust the size of cards on a page.
# ---
from h2o_wave import site, ui
# Every page has a grid system in place.
# The grid has 12 columns and 10 rows.
# A column is 134 pixels wide.
# A row is 76 pixels high.
# The gap between rows and columns is set to 15 pixels.
# Cards have a `box` attribute that specifies its column, row, width and height.
# box = 'column row width height'
# They indicate the 1-based column/row to position the top-left corner of the card.
# In this example, we place multiple cards on a page to demonstrate their `box` values.
page = site['/demo']
boxes = [
'1 1 1 1',
'2 1 2 1',
'4 1 3 1',
'7 1 4 1',
'11 1 2 2',
'1 2 1 9',
'2 2 1 4',
'3 2 1 2',
'2 6 1 5',
'3 4 1 7',
'4 2 7 9',
'11 9 2 2',
'11 3 2 6',
]
for box in boxes:
page[f'card_{box.replace(" ", "_")}'] = ui.markdown_card(box=box, title=box, content='')
page.save()
|
from machine import Pin
import machine
from time import sleep
# Global value to communicate with the IRQ routine
isAwake = 1
# GPIO16 (D0) is the internal LED for NodeMCU
led = Pin(16, Pin.OUT)
# Callback should be as short as possible
def timer_callback(timer):
# Require to specify that isPirActive and led are global for this function
global isAwake, led
isAwake = True
# Invert LED value
led.value(not led.value())
# Using Timer object specify timer type, period in ms and ISR function
timer = machine.Timer(0)
timer.init(period=10000, mode=machine.Timer.PERIODIC, callback=timer_callback)
while True:
# we want to disable interrupts before using/changing its shared global value
if isAwake:
state = machine.disable_irq()
isAwake = False
machine.enable_irq(state)
print("Just Woke Up!") |
"""
Copyright 2019, Aleksandar Stojimirovic <[email protected]>
Licence: MIT
Source: https://github.com/hooppler/wormnet/
"""
import pygame
class Button(object):
def __init__(self, width=60, height=20, pos_x=0, pos_y=0, title='Button'):
self.width = width
self.height = height
self.pos_x = pos_x
self.pos_y = pos_y
self.title = title
self.font_size = 20
self.state = 0
self.surface = pygame.Surface((width, height))
# Text Initialization
pygame.font.init()
font = pygame.font.SysFont('Arial', self.font_size)
self.text_surface = font.render(title, False, (0,0,0))
def update(self, events):
left_pressed, midle_pressed, right_pressed = pygame.mouse.get_pressed()
self.state = 0
for event in events:
if left_pressed:
pos_x, pos_y = pygame.mouse.get_pos()
pos_x = pos_x - self.pos_x
pos_y = pos_y - self.pos_y
if (pos_x > 0 and pos_x < self.width) and (pos_y > 0 and pos_y < self.height):
self.state = 1
pygame.draw.rect(self.surface, pygame.Color(130,130,74), (0, 0, self.width, self.height))
else:
pygame.draw.rect(self.surface, pygame.Color(195,192,105), (0, 0, self.width, self.height))
else:
pygame.draw.rect(self.surface, pygame.Color(195,192,105), (0, 0, self.width, self.height))
self.surface.blit(self.text_surface, ((self.width-(self.font_size/2)*len(self.title))/2, (self.height-self.font_size)/2-2))
def get_surface(self):
return self.surface
def get_state(self):
return self.state
|
import os
import sys
import scipy
import numpy as np
import imageio
import tensorflow as tf
from cityscapelabels import trainId2RGB
import os
os.environ['TF_CPP_MIN_LOG_LEVLE'] = '2'
def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="prefix")
return graph
def logits2image(logits):
logits = logits.astype(np.uint8)
image = np.empty([logits.shape[0],logits.shape[1],3],dtype=float)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
image[i,j,:] = trainId2RGB[logits[i,j]]
image = image.astype(np.uint8)
return image
graph = load_graph("model/frozen_inference_graph.pb")#load_graph(sys.argv[1])
image_dir = "sample-images/"#sys.argv[2]
# DeepLabv3+ input and output tensors
image_input = graph.get_tensor_by_name('prefix/ImageTensor:0')
softmax = graph.get_tensor_by_name('prefix/SemanticPredictions:0')
# Create output directories in the image folder
if not os.path.exists(image_dir+'/segmented_images/'):
os.mkdir(image_dir+'/segmented_images/')
if not os.path.exists(image_dir+'/segmented_images_colored/'):
os.mkdir(image_dir+'/segmented_images_colored/')
image_dir_segmented = image_dir+'segmented_images/'
image_dir_segmented_colored = image_dir+'segmented_images_colored/'
with tf.Session(graph=graph) as sess:
for fname in sorted(os.listdir(image_dir)):
if fname.endswith(".png"):
img = imageio.imread(os.path.join(image_dir, fname))
img = np.expand_dims(img, axis=0)
probs = sess.run(softmax, {image_input: img})
img = tf.squeeze(probs).eval()
print(img.shape)
img_colored = logits2image(img)
print(img_colored.shape)
# print(os.path.join(image_dir_segmented+fname))
imageio.imwrite(os.path.join(image_dir_segmented+fname), img.astype(np.uint8))
imageio.imwrite(os.path.join(image_dir_segmented_colored+fname), img_colored.astype(np.uint8))
# print(fname)
|
from rest_framework import request, response
from rest_framework.decorators import action
from rest_framework.exceptions import NotFound
from ee.clickhouse.models.person import delete_person
from ee.clickhouse.queries.clickhouse_retention import ClickhouseRetention
from ee.clickhouse.queries.clickhouse_stickiness import ClickhouseStickiness
from ee.clickhouse.queries.funnels.funnel_persons import ClickhouseFunnelPersons
from ee.clickhouse.queries.funnels.funnel_trends_persons import ClickhouseFunnelTrendsPersons
from ee.clickhouse.queries.trends.lifecycle import ClickhouseLifecycle
from posthog.api.person import PersonViewSet
from posthog.api.utils import format_next_absolute_url, format_next_url
from posthog.models import Event, Filter, Person
# TODO: Move grabbing all this to Clickhouse. See WIP-people-from-clickhouse branch.
class ClickhousePersonViewSet(PersonViewSet):
lifecycle_class = ClickhouseLifecycle
retention_class = ClickhouseRetention
stickiness_class = ClickhouseStickiness
@action(methods=["GET"], detail=False)
def funnel(self, request: request.Request, **kwargs) -> response.Response:
if request.user.is_anonymous or not request.user.team:
return response.Response(data=[])
filter = Filter(request=request)
team = request.user.team
results = ClickhouseFunnelPersons(filter, team).run()
next_url = format_next_absolute_url(request, filter.offset, 100) if len(results) > 99 else None
return response.Response(data={"results": results, "next": next_url})
@action(methods=["GET"], detail=False)
def funnel_trends(self, request: request.Request, **kwargs) -> response.Response:
if request.user.is_anonymous or not request.user.team:
return response.Response(data=[])
filter = Filter(request=request)
team = request.user.team
results = ClickhouseFunnelTrendsPersons(filter, team).run()
next_url = format_next_absolute_url(request, filter.offset, 100) if len(results) > 99 else None
return response.Response(data={"results": results, "next": next_url})
def destroy(self, request: request.Request, pk=None, **kwargs): # type: ignore
try:
person = Person.objects.get(team=self.team, pk=pk)
events = Event.objects.filter(team=self.team, distinct_id__in=person.distinct_ids)
events.delete()
delete_person(
person.uuid, person.properties, person.is_identified, delete_events=True, team_id=self.team.pk
)
person.delete()
return response.Response(status=204)
except Person.DoesNotExist:
raise NotFound(detail="Person not found.")
|
from domain.database import DBSession
from domain.subreply import SubReply
import datetime
# 从x位置获取n个跟帖的评论
def selectSubReplyFromXGetN(replyId, x, n):
try:
session = DBSession()
offset = x
num = x + n
subreply = session.query(SubReply).filter(SubReply.replyId == replyId).slice(offset, num).all()
if subreply is None:
result = None
else:
result = subreply
except Exception:
raise
else:
session.close()
return result
# 发表跟帖评论
def insertSubReply(userId, replyId, content):
effect_raw = 0
try:
session = DBSession()
subreplyTime = datetime.datetime.now()
adSubReply = SubReply(content=content, subreplyTime=subreplyTime, replyId=replyId, userId=userId, likes=0)
session.add(adSubReply)
except Exception:
session.rollback()
raise
else:
session.commit()
session.close()
effect_raw = 1
return effect_raw
# 修改跟帖评论的点赞数
def updateLikes(subreplyId, likes):
try:
session = DBSession()
subreply = session.query(SubReply).filter(SubReply.subreplyId == subreplyId).first()
if subreply is None:
return 0
else:
subreply.likes = subreply.likes + likes
except Exception:
session.rollback()
raise
else:
session.commit()
session.close()
return 1
# 根据userId查询跟帖评论
def selectFromXGetNSubReplyByUserId(userId, x, n):
try:
session = DBSession()
offset = x
num = x + n
subreply = session.query(SubReply).filter(SubReply.userId == userId).order_by(SubReply.subreplyTime.desc()).slice(offset, num).all()
if subreply is None:
result = None
else:
result = subreply
except Exception:
raise
else:
session.close()
return result
# 获取跟帖评论总数
def selectSumSubReply():
result = 0
try:
session = DBSession()
result = session.query(SubReply).count()
except Exception:
raise
else:
session.close()
return result
# 获取用户的跟帖评论总数
def selectSumSubReplyByUserId(userId):
result = 0
try:
session = DBSession()
result = session.query(SubReply).filter(SubReply.userId == userId).count()
except Exception:
raise
else:
session.close()
return result
# 获取某个跟帖的评论总数
def selectSumSubReplyByReplyId(replyId):
result = 0
try:
session = DBSession()
result = session.query(SubReply).filter(SubReply.replyId == replyId).count()
except Exception:
raise
else:
session.close()
return result
# 根据跟帖评论ID查询跟帖评论
def selectSubReplyBySubReplyId(subreplyId):
try:
session = DBSession()
subreply = session.query(SubReply).filter(SubReply.subreplyId == subreplyId).first()
if subreply is None:
result = None
else:
result = subreply
except Exception:
raise
else:
session.close()
return result
# 删除跟帖的评论
def deleteSubReply(subreplyId):
effect_raw = 0
try:
session = DBSession()
subreply = session.query(SubReply).filter(SubReply.subreplyId == subreplyId).first()
if subreply is None:
effect_raw = 0
return effect_raw
else:
session.query(SubReply).filter_by(subreplyId=subreplyId).delete()
effect_raw = 1
except:
session.rollback()
effect_raw = 0
else:
session.commit()
session.close()
return effect_raw |
import smtplib
from email.message import EmailMessage
from string import Template
from pathlib import Path
html=Template(Path('ht.html').read_text())
email=EmailMessage()
email['from']='Vasudeva'
email['to']='[email protected]'
email['subject']='Hi'
#email.set_content('greetings:hiiiii!')
email.set_content(html.substitute({'name':'dingDing'}),'html')
with smtplib.SMTP(host='smtp.gmail.com',port=587) as smtp :
smtp.ehlo()
smtp.starttls()
smtp.login('[email protected]','xxxxx@yyyy')
smtp.send_message(email)
print('ran well')
|
import tkinter as tk
from tkinter.ttk import *
from tkinter import ttk
from pandas import DataFrame
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import os
import sys
import matplotlib
import numpy as np
import pandas as pd
from matplotlib import interactive
import matplotlib.ticker as plticker
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib import cm
from matplotlib.colors import ListedColormap
import random
root= tk.Tk()
root.title("Security Metrics I")
lbl = Label(root, text="Security Metrics I", font=("Times", 18), foreground="#000280")
lbl.pack()
windows_paths = tk.Tk()
windows_paths.title("Security Metrics II")
lbl = Label(windows_paths, text="Security Metrics II", font=("Times", 18), foreground="#000280")
lbl.pack()
windows_average = tk.Tk()
windows_average.title("Security Metrics III")
lbl = Label(windows_average, text="Security Metrics III", font=("Times", 18), foreground="#000280")
lbl.pack()
def add_value_labels(ax, spacing=0):
"""Add labels to the end of each bar in a bar chart.
Arguments:
ax (matplotlib.axes.Axes): The matplotlib object containing the axes
of the plot to annotate.
spacing (int): The distance between the labels and the bars.
"""
# For each bar: Place a label
for rect in ax.patches:
# Get X and Y placement of label from rect.
y_value = rect.get_height()
x_value = rect.get_x() + rect.get_width() / 2
# Number of points between bar and label. Change to your liking.
space = spacing
# Vertical alignment for positive values
va = 'bottom'
# If value of bar is negative: Place label below bar
if y_value < 0:
# Invert space to place label below
space *= -1
# Vertically align label at top
va = 'center'
# Use Y value as label and format number with one decimal place
label = "{:.2f}".format(y_value)
# Create annotation
ax.annotate(
label, # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(3, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va) # Vertically align label differently for
# positive and negative values.
def plotMetrics(file):
my_colormap = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan', 'b', 'g', 'r', 'c', 'm', 'y', 'k'] # red, green, blue, black, etc.
# Plot Attack Cost, Attack Impact, Attack Risk
figure = plt.Figure(figsize=(5, 6), dpi=100)
ax1 = figure.add_subplot(221)
bar1 = FigureCanvasTkAgg(figure, root)
bar1.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
figure.subplots_adjust(hspace=0.4, wspace=0.4)
ax2 = figure.add_subplot(222)
ax3 = figure.add_subplot(223)
ax4 = figure.add_subplot(224)
# Plot Paths
figure_paths = plt.Figure(figsize=(5, 6), dpi=100)
ax5 = figure_paths.add_subplot(231)
bar_paths = FigureCanvasTkAgg(figure_paths, windows_paths)
bar_paths.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
figure_paths.subplots_adjust(hspace=0.4, wspace=0.4)
ax6 = figure_paths.add_subplot(232)
ax7 = figure_paths.add_subplot(233)
ax8 = figure_paths.add_subplot(234)
ax9 = figure_paths.add_subplot(235)
ax10 = figure_paths.add_subplot(236)
# Plot average
figure_average = plt.Figure(figsize=(5, 6), dpi=100)
bar_average = FigureCanvasTkAgg(figure_average, windows_average)
bar_average.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
figure_average.subplots_adjust(hspace=0.4, wspace=0.4)
ax11 = figure_average.add_subplot(121)
ax12 = figure_average.add_subplot(122)
for i in range(1, len(file) - 1):
data_loading = pd.read_csv(file, delimiter=',',skiprows=1,
names=["time",
"case_study_name",
"streets_and_houses_vector",
"attack_success_probability",
"attack_cost",
"attack_impact",
"attack_risk",
"number_of_devices",
"average_attack_cost",
"average_attack_impact",
"number_of_paths",
"rare_paths",
"unlilkely_paths",
"possible_paths",
"likely_paths",
"almost_certain_paths"
])
data_loading.head(5)
# print(data_loading)
ax1.bar("case_study_name", 'attack_success_probability', data=data_loading, color=my_colormap)
ax1.set_title('Attack Success Probability')
add_value_labels(ax1)
plt.setp(ax1.get_xticklabels(), rotation=15, horizontalalignment='right')
ax1.set_xlabel('Case Study')
ax1.set_ylabel('Attack Success Probability')
#Enable to save figure
#figure.savefig('Attack_Success_Probability.png')
ax2.bar("case_study_name", 'attack_cost', data=data_loading, color=my_colormap)
ax2.set_title('Attack Cost')
add_value_labels(ax2)
plt.setp(ax2.get_xticklabels(), rotation=15, horizontalalignment='right')
ax2.set_xlabel('Case Study')
ax2.set_ylabel('Attack Cost')
#Enable to save figure
#figure.savefig('Attack_Cost.png')
ax3.bar("case_study_name", 'attack_impact', data=data_loading, color=my_colormap)
ax3.set_title('Attack Impact')
plt.setp(ax3.get_xticklabels(), rotation=15, horizontalalignment='right')
ax3.set_xlabel('Case Study')
ax3.set_ylabel('Attack Impact')
add_value_labels(ax3)
#Enable to save figure
#figure.savefig('Attack_Impact.png')
ax4.bar("case_study_name", 'attack_risk', data=data_loading, color=my_colormap)
ax4.set_title('Attack Risk')
plt.setp(ax4.get_xticklabels(), rotation=15, horizontalalignment='right')
ax4.set_xlabel('Case Study')
ax4.set_ylabel('Attack_Risk')
add_value_labels(ax4)
#Enable to save figure
#figure.savefig('Attack_Risk.png')
# Number of Paths
ax5.bar("case_study_name", 'number_of_paths', data=data_loading, color=my_colormap)
ax5.set_title('Number of Paths')
plt.setp(ax5.get_xticklabels(), rotation=15, horizontalalignment='right')
ax5.set_xlabel('Case Study')
ax5.set_ylabel('Number of Paths')
add_value_labels(ax5)
#Enable to save figure
#figure_paths.savefig('Number_of_Paths.png')
# Rare Paths
ax6.bar("case_study_name", 'rare_paths', data=data_loading, color=my_colormap)
ax6.set_title('Rare Paths')
plt.setp(ax6.get_xticklabels(), rotation=15, horizontalalignment='right')
ax6.set_xlabel('Case Study')
ax6.set_ylabel('Rare Paths')
add_value_labels(ax6)
#Enable to save figure
#figure_paths.savefig('Rare_Paths.png')
# unlilkely_paths
ax7.bar("case_study_name", 'unlilkely_paths', data=data_loading, color=my_colormap)
ax7.set_title('Unlilkely Paths')
plt.setp(ax7.get_xticklabels(), rotation=15, horizontalalignment='right')
ax7.set_xlabel('Case Study')
ax7.set_ylabel('Unlilkely Paths')
add_value_labels(ax7)
#Enable to save figure
#figure_paths.savefig('Unlilkely_Paths.png')
# possible_paths
ax8.bar("case_study_name", 'possible_paths', data=data_loading, color=my_colormap)
ax8.set_title('Possible Paths')
plt.setp(ax8.get_xticklabels(), rotation=15, horizontalalignment='right')
ax8.set_xlabel('Case Study')
ax8.set_ylabel('Possible Paths')
add_value_labels(ax8)
#Enable to save figure
#figure_paths.savefig('Possible_Paths.png')
# likely_paths
ax9.bar("case_study_name", 'likely_paths', data=data_loading, color=my_colormap)
ax9.set_title('Likely Paths')
plt.setp(ax9.get_xticklabels(), rotation=15, horizontalalignment='right')
ax9.set_xlabel('Case Study')
ax9.set_ylabel('Likely Paths')
add_value_labels(ax9)
#Enable to save figure
#figure_paths.savefig('Likely_Paths.png')
# likely_paths
ax10.bar("case_study_name", 'almost_certain_paths', data=data_loading, color=my_colormap)
ax10.set_title('Almost Certain Paths')
plt.setp(ax10.get_xticklabels(), rotation=15, horizontalalignment='right')
ax10.set_xlabel('Case Study')
ax10.set_ylabel('Almost Certain Paths')
add_value_labels(ax10)
#Enable to save figure
#figure_paths.savefig('Almost_Certain_Paths.png')
# Average Attack Cost
ax11.bar("case_study_name", 'average_attack_cost', data=data_loading, color=my_colormap)
ax11.set_title('Average Attack Cost')
plt.setp(ax11.get_xticklabels(), rotation=15, horizontalalignment='right')
ax11.set_xlabel('Case Study')
ax11.set_ylabel('Average Attack Cost')
add_value_labels(ax11)
#Enable to save figure
#figure_average.savefig('Average_Attack_Cost.png')
# average_attack_impact
ax12.bar("case_study_name", 'average_attack_impact', data=data_loading, color=my_colormap)
ax12.set_title('Average Attack Impact')
plt.setp(ax12.get_xticklabels(), rotation=15, horizontalalignment='right')
ax12.set_xlabel('Case Study')
ax12.set_ylabel('Average Attack Impact')
add_value_labels(ax12)
#Enable to save figure
#figure_average.savefig('Average_Attack_Impact.png')
if __name__ == '__main__':
plotMetrics("Results/Results.csv")
root.mainloop()
windows_paths.mainloop()
windows_average.mainloop()
|
import errno
import os
import shutil
import subprocess
from pathlib import Path
class Utils:
def create_dir(self, path, permissions=0o755):
if not os.path.exists(path):
os.makedirs(path, permissions)
def write_to_file(self, file, content=""):
with open(file, 'wb') as f:
f.write(content)
def read_file(self, file):
file_path = Path(file)
if not file_path.is_file():
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), file_path)
with open(file, 'r') as f:
return f.read()
def zip_file(self, name, path):
file_path = Path(path)
if not file_path.exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), file_path)
shutil.make_archive(f"/tmp/{name}", 'zip', f"{path}")
def get_hostname_fqdn(self):
result = subprocess.Popen(["hostname", "--fqdn"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = result.communicate()
return [out.decode('utf-8'), err.decode('utf-8')]
def run_cmd(self, command):
result = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = result.communicate()
return [out.decode('utf-8'), err.decode('utf-8')]
|
from Jumpscale import j
# import asyncio
from urllib.parse import urlparse
# import logging
# class AllHandler(logging.Handler):
# def emit(self, record):
# print(record)
# class WgetReturnProtocol(asyncio.SubprocessProtocol):
# def __init__(self, exit_future):
# self.exit_future = exit_future
# self.output = bytearray()
# def pipe_data_received(self, fd, data):
# self.output.extend(data)
# print(data)
# def process_exited(self):
# self.exit_future.set_result(True)
# h = AllHandler()
# logging.getLogger("asyncio").addHandler(h)
JSBASE = j.baseclasses.object
class Offliner(j.baseclasses.object):
"""
functionality to inspect objectr structure and generate apifile
"""
__jslocation__ = "j.tools.offliner"
# @asyncio.coroutine
def getSiteDownloadCmd(self, url, dest="", level=5, docElementsOnly=True, restrictToDomain=True):
"""
download all docs from url to dest
if dest=="" then use current dir and use url as subdir
"""
cmd = ""
if dest != "":
cmd += "cd %s;" % dest
j.sal.fs.createDir(dest)
cmd += "wget --execute robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links"
cmd += " --restrict-file-names=windows --continue --user-agent=Mozilla"
cmd += " --no-parent"
cmd += " -l %s" % level
if docElementsOnly:
cmd += " --accept jpg,gif,png,jpeg,html,htm,css,js"
if restrictToDomain:
parsed = urlparse(url)
domain = parsed.netloc
cmd += " --domains %s" % domain
cmd += " %s" % url
self._log_debug(cmd)
return cmd
# # Create the subprocess, redirect the standard output into a pipe
# create = asyncio.create_subprocess_shell(cmd,stdout=asyncio.subprocess.PIPE)
# exit_future = asyncio.Future(loop=loop)
# # Create the subprocess controlled by the protocol DateProtocol,
# # redirect the standard output into a pipe
# create = loop.subprocess_exec(lambda: DateProtocol(exit_future),
# sys.executable, '-c', code,
# stdin=None, stderr=None)
# transport, protocol = yield from create
# # Wait for the subprocess exit using the process_exited() method
# # of the protocol
# yield from exit_future
# # Close the stdout pipe
# transport.close()
# # Read the output which was collected by the pipe_data_received()
# # method of the protocol
# data = bytes(protocol.output)
# return data.decode('ascii').rstrip()
def getPDFs(self, url, dest=""):
"--accept=pdf"
# def getSites(self,urls,dest="",level=5,docElementsOnly=True,restrictToDomain=True):
# # loop = asyncio.get_event_loop()
# # tasks=[]
# # for url in urls:
# # tasks.append(asyncio.ensure_future(self._getSite(url,dest,level,docElementsOnly,restrictToDomain)))
# # loop.run_until_complete(asyncio.wait(tasks))
# # loop.close()
# for url in urls:
# examples from http://www.labnol.org/software/wget-command-examples/28750/
|
#!/usr/bin/env python
import angr, claripy, time, sys, os, simuvex
import os, datetime, ntpath, struct, shutil
from xml.dom import minidom
TIMEOUT = 7
result_dir = os.path.join(os.getcwd(), "angr-out-" + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
def check_argv(argv):
xml_path = list()
if len(argv) == 2:
input_xml = argv[1]
if not os.path.isfile(input_xml):
print "[ERROR] input file \'" + input_xml + "\' does not exist!"
sys.exit()
xml_path.append(input_xml)
elif (len(argv) == 3) and (argv[1] == '-b'):
input_folder = os.path.abspath(argv[2])
if not os.path.isdir(input_folder):
print "[ERROR] Invalid input folder for batch mode: \'" + input_folder + "\'!"
sys.exit()
for f in os.listdir(input_folder):
if f.endswith('.xml'):
serialize_file_path = os.path.join(input_folder, (f + '.serialized'))
if not os.path.isfile(serialize_file_path):
print "[Warning] \'" + f + "\' does not have corresponding \'.serialized\'"
continue
xml_path.append(os.path.join(input_folder, f))
else:
print "[ERROR] Invalid argument!"
sys.exit()
return xml_path
def parse_xml_exe(parsed_xml):
target_exe = parsed_xml.getElementsByTagName('exec')[0].childNodes[0].nodeValue
if not os.path.isfile(target_exe):
print "[ERROR] target executable \'" + target_exe + "\' does not exist!"
sys.exit()
print "Target executable \'" + target_exe + "\' ..."
return target_exe
def parse_xml_argv(parsed_xml, dic_args):
xml_args = parsed_xml.getElementsByTagName('arg')
for s in xml_args:
index = int(s.attributes['index'].value)
value = str(s.attributes['value'].value)
size = 0
if s.attributes['size'].value:
size = int(s.attributes['size'].value)
if size < len(value):
size = len(value)
concolic = s.attributes['concolic'].value
if concolic == "true":
dic_args["argv_{0}".format(index)] = claripy.BVS("argv_{0}".format(index), size*8)
else:
dic_args["argv_{0}".format(index)] = value
for k,v in dic_args.iteritems():
if isinstance(v, str):
print k + ": string instance, concrete argument, value = " + v
elif isinstance(v, claripy.ast.Bits):
print k + ": claripy AST instance, symbolic argument"
else:
print "[Error] " + k + ": wrong type"
print type(v)
sys.exit()
return dic_args
def parse_xml_file(parsed_xml):
xml_files = parsed_xml.getElementsByTagName('file')
list_files = list()
for f in xml_files:
path = str(f.attributes['path'].value)
size = int(f.attributes['size'].value)
concolic = f.attributes['concolic'].value
if concolic == "true":
# filename = ntpath.basename(path)
list_files.append([path, size])
else:
print "[Error] Non-concolic file is given: " + path
sys.exit()
## XXX: sequence of files are not the same as it is in xml file
for k in list_files:
print "concolic file: " + k[0] + ", " + str(k[1])
return list_files
def parse_xml_stdin(parsed_xml):
xml_stdin = parsed_xml.getElementsByTagName('stdin')
if not xml_stdin:
return 0
if not len(xml_stdin) == 1:
print "[Error] more than one stdin is given, please check input xml file"
sys.exit()
size = int(xml_stdin[0].attributes['size'].value)
concolic = xml_stdin[0].attributes['concolic'].value
if not concolic == "true":
print "[Error] Non-concolic stdin is given"
sys.exit()
print "stdin_size from xml: " + str(size)
return size
def exec_angr(target_exe, dic_args, list_files, stdin_size):
print "========="
print "exec_angr"
print "========="
p = angr.Project(target_exe, load_options={'auto_load_libs':True})
## prepare arguments
arguments=list()
for i in range(0, len(dic_args)):
key = "argv_{0}".format(i)
if not key in dic_args:
print "[ERROR] incomplete argv list from xml: \'" + key + "\' not found"
sys.exit()
arguments.append(dic_args[key])
## prepare files
files = {}
for f in list_files:
file_path = f[0]
file_size = f[1]
files[file_path] = angr.storage.file.SimFile(file_path, "r", size = file_size)
arguments.append(file_path)
## prepare stdin
if not stdin_size == 0:
files['/dev/stdin'] = angr.storage.file.SimFile("/dev/stdin", "r", size = stdin_size)
## debug prints
for v in arguments:
if isinstance(v, str):
print "concrete argument: " + v
elif isinstance(v, claripy.ast.Bits):
print "symbolic argument"
else:
print "[Error] " + v + ": wrong type"
print type(v)
sys.exit()
for k,v in files.iteritems():
print "symbolic file: " + k
state = p.factory.entry_state(args=arguments, fs=files)
sm = p.factory.simgr(state)
start_time = time.time()
# sm.step(until=lambda lpg: (time.time() - start_time) > TIMEOUT)
sm.step(until=lambda lpg: len(lpg.active) > 1)
return sm
def get_simfile_content(s, file_path):
fd = s.posix.filename_to_fd(file_path)
if(fd == None):
print "No fd found, use dump_file_by_path(): " + file_path
return s.posix.dump_file_by_path(file_path)
else:
print "fd found \'" + str(fd) + "\': " + file_path
return s.posix.dumps(fd)
def get_test_case(s, dic_args, list_files, stdin_size, count):
print "--------------"
print "get_test_case:"
print "--------------"
output = open("{}.bin".format(str(count)), "wb")
elem_count = 0
output.write(struct.pack("i", 0))
for k,v in dic_args.iteritems():
if not isinstance(v, claripy.ast.Bits):
continue
elem_count = elem_count + 1
concrete_value = s.solver.any_str(v)
#4B for the size of the arg name
output.write(struct.pack("i", len(k)))
#name
output.write(str(k))
#4B for size of value
output.write(struct.pack("i", len(concrete_value)))
#value
output.write(str(concrete_value))
for f in list_files:
elem_count = elem_count + 1
file_path = f[0]
file_size = f[1]
concrete_value = get_simfile_content(s, file_path)
# print file_path + ": " + concrete_value + ", " + str(len(concrete_value))
filename = str(ntpath.basename(file_path))
#4B for the size of the file name
output.write(struct.pack("i", len(filename)))
#name
output.write(filename)
#4B for size of value
output.write(struct.pack("i", file_size))
#check if the string is longer than the file size, if so only take the first file_size bytes
if (len(concrete_value) > file_size):
strip = len(concrete_value) - file_size
#output.write(concrete_value)
concrete_value = concrete_value[:-strip]
# print str(len(concrete_value))
#write value
output.write(concrete_value)
#if string is shorter than the file size, fill the byte difference with 00
if (len(concrete_value) < file_size):
amount = file_size - len(concrete_value)
output.write(b'\x00' * amount)
if not stdin_size == 0:
elem_count = elem_count + 1
stdin_content = get_simfile_content(s, "/dev/stdin")
# print "stdin_content: " + stdin_content + ", " + str(len(stdin_content))
#4B for the size of the name
output.write(struct.pack("i", len("crete-stdin")))
#name
output.write("crete-stdin")
#4B for size of value
output.write(struct.pack("i", stdin_size))
#check if the string is longer than the file size, if so only take the first stdin_size bytes
if (len(stdin_content) > stdin_size):
strip = len(stdin_content) - stdin_size
stdin_content = stdin_content[:-strip]
#write value
output.write(stdin_content)
#if string is shorter than the stdin size, fill the byte difference with 00
if (len(stdin_content) < stdin_size):
amount = stdin_size - len(stdin_content)
output.write(b'\x00' * amount)
output.seek(0)
output.write(struct.pack("i", elem_count))
def setup_crete_out_folder(input_xml):
output_folder = os.path.join(result_dir, ntpath.basename(input_xml))
os.makedirs(output_folder)
## copy serialized xml to the correct place
guest_config_folder = os.path.join(output_folder, "guest-data")
os.makedirs(guest_config_folder)
serialized_xml = str(input_xml) + ".serialized"
# print "copy " + serialized_xml + " to " + os.path.join(guest_config_folder, "crete-guest-config.serialized")
shutil.copyfile(serialized_xml, os.path.join(guest_config_folder, "crete-guest-config.serialized"))
tc_folder = os.path.join(output_folder, "test-case-parsed")
os.makedirs(tc_folder)
os.chdir(tc_folder)
def collect_angr_result(sm, dic_args, list_files, stdin_size):
print "==================="
print "collect_angr_result"
print "==================="
print "deadended: " + str(len(sm.deadended))
print "active: " + str(len(sm.active))
tc_count = 0
for s in sm.deadended:
tc_count = tc_count + 1
get_test_case(s, dic_args, list_files, stdin_size, tc_count)
for s in sm.active:
tc_count = tc_count + 1
get_test_case(s, dic_args, list_files, stdin_size, tc_count)
def run_angr_with_xml(input_xml):
parsed_xml = minidom.parse(input_xml)
dic_args = {}
## 1. parse target executable
target_exe = parse_xml_exe(parsed_xml)
dic_args["argv_0"] = str(target_exe)
## 2. parse xml arguments
dic_args = parse_xml_argv(parsed_xml, dic_args)
## 3. parse xml files
list_files = parse_xml_file(parsed_xml)
## 4. parse xml stdin
stdin_size = parse_xml_stdin(parsed_xml)
## 5. start angr with parsed args, files and stdin
sm = exec_angr(target_exe, dic_args, list_files, stdin_size)
## 6. collect angr's result
setup_crete_out_folder(input_xml)
collect_angr_result(sm, dic_args, list_files, stdin_size)
return
def angr_xml_ui(argv):
list_xml = check_argv(argv)
os.makedirs(result_dir)
for xml in list_xml:
run_angr_with_xml(xml)
if __name__ == '__main__':
angr_xml_ui(sys.argv)
|
from primes import sieve
primes = sieve()
result = 0
for prime in primes:
if prime > 2000000:
break
result += prime
print result, prime
|
# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to preprocess the Cityscapes dataset."""
import os
import numpy as np
from tqdm import tqdm
from PIL import Image
import imp
resolution_map = {1.0: 'full', 0.5: 'half', 0.25: 'quarter'}
def resample(img, scale_factor=1.0, interpolation=Image.BILINEAR):
"""
Resample PIL.Image objects.
:param img: PIL.Image object
:param scale_factor: float
:param interpolation: PIL.Image interpoaltion method
:return: PIL.Image object
"""
width, height = img.size
basewidth = width * scale_factor
basewidth = int(basewidth)
wpercent = (basewidth / float(width))
hsize = int((float(height) * wpercent))
return img.resize((basewidth, hsize), interpolation)
def recursive_mkdir(nested_dir_list):
"""
Make the full nested path of directories provided. Order in list implies nesting depth.
:param nested_dir_list: list of strings
:return:
"""
nested_dir = ''
for dir in nested_dir_list:
nested_dir = os.path.join(nested_dir, dir)
if not os.path.isdir(nested_dir):
os.mkdir(nested_dir)
return
def preprocess(cf):
for set in list(cf.settings.keys()):
print('Processing {} set.'.format(set))
# image dir
image_dir = os.path.join(cf.raw_data_dir, 'leftImg8bit', set)
city_names = os.listdir(image_dir)
for city in city_names:
print('Processing {}'.format(city))
city_dir = os.path.join(image_dir, city)
image_names = os.listdir(city_dir)
image_specifiers = ['_'.join(img.split('_')[:3]) for img in image_names]
for img_spec in tqdm(image_specifiers):
for scale in cf.settings[set]['resolutions']:
recursive_mkdir([cf.out_dir, resolution_map[scale], set, city])
# image
img_path = os.path.join(city_dir, img_spec + '_leftImg8bit.png')
img = Image.open(img_path)
if scale != 1.0:
img = resample(img, scale_factor=scale, interpolation=Image.BILINEAR)
img_out_path = os.path.join(cf.out_dir, resolution_map[scale], set, city, img_spec + '_leftImg8bit.npy')
img_arr = np.array(img).astype(np.float32)
channel_axis = 0 if img_arr.shape[0] == 3 else 2
if cf.data_format == 'NCHW' and channel_axis != 0:
img_arr = np.transpose(img_arr, axes=[2,0,1])
np.save(img_out_path, img_arr)
# labels
for label_density in cf.settings[set]['label_densities']:
label_dir = os.path.join(cf.raw_data_dir, label_density, set, city)
for mod in cf.settings[set]['label_modalities']:
label_spec = img_spec + '_{}_{}'.format(label_density, mod)
label_path = os.path.join(label_dir, label_spec + '.png')
label = Image.open(label_path)
if scale != 1.0:
label = resample(label, scale_factor=scale, interpolation=Image.NEAREST)
label_out_path = os.path.join(cf.out_dir, resolution_map[scale], set, city, label_spec + '.npy')
np.save(label_out_path, np.array(label).astype(np.uint8))
if __name__ == "__main__":
cf = imp.load_source('cf', 'preprocessing_config.py')
preprocess(cf) |
import tensorflow as tf
in_ = tf.compat.v1.placeholder(tf.float32, shape=[1, 2, 2, 1], name="Hole")
pd_ = tf.constant([[0, 0], [0, 0]], name="Hole")
op_ = tf.space_to_batch(in_, pd_, 2)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: lingquan
"""
from statsmodels.tsa.arima_model import ARMA
import pandas
import numpy
import statsmodels.api as sm
prices = pandas.read_csv("prices.csv",parse_dates=['Date'],index_col=0)
tickers = prices.columns[:-2]
prices = prices.resample('W').agg(lambda x:x[-1])
prices.dropna(axis=0, how='any', inplace=True)
rf = prices['^TNX'].values[:-1]
rf /= (52*100)
returns = prices.iloc[:,:-1].pct_change()[1:]
rm = returns['^GSPC'].values
ri = returns.iloc[:,:-1].values
Ri = ri-rf[:,numpy.newaxis]
Rm = rm-rf
model = sm.OLS(Ri, sm.add_constant(Rm))
results = model.fit()
alpha,beta = results.params
epsilon = numpy.sqrt(Ri.var(axis=0) - beta**2*Rm.var(axis=0))
output = pandas.DataFrame(
columns=['alpha','beta','epsilon'],
index = tickers,
data=numpy.array([alpha,beta,epsilon]).T
)
output.to_csv("coefficients.csv")
from arch.univariate import ARX, GARCH
arx = ARX(rm, lags=1)
arx.volatility = GARCH()
res = arx.fit(disp='off')
pandas.DataFrame(res.params).to_csv("parameters.csv")
|
# -----------------------------------------------------.
# ポリゴンメッシュの重複頂点で、1つの頂点のみを選択.
#
# @title \en Select single vertex of polygon mesh \enden
# @title \ja ポリゴンメッシュの重複頂点の選択を単一に \endja
# -----------------------------------------------------.
import math
scene = xshade.scene()
# 階層をたどってポリゴンメッシュを格納.
def getPolygonmeshes (shape, polygonmeshList):
if shape.type == 7:
polygonmeshList.append(shape)
if shape.has_son:
s = shape.son
while s.has_bro:
s = s.bro
getPolygonmeshes(s, polygonmeshList)
# 2つの頂点座標が同一位置か判定.
def isSamePosition (p1, p2):
fMin = 0.0001
return (math.fabs(p1[0] - p2[0]) < fMin and math.fabs(p1[1] - p2[1]) < fMin and math.fabs(p1[2] - p2[2]) < fMin)
#-------------------------------------------------.
# 階層構造をたどってポリゴンメッシュのみ格納.
polygonmeshList = []
for shape in scene.active_shapes:
getPolygonmeshes(shape, polygonmeshList)
if len(polygonmeshList) == 0:
xshade.show_message_box('ポリゴンメッシュを選択してください。', False)
chkF = False
for shape in polygonmeshList:
if shape.number_of_active_control_points == 0:
continue
# 選択されている頂点番号の配列.
aVers = shape.active_vertex_indices
aVersCou = len(aVers)
# 同一頂点位置の場合、片側の選択を解除.
for i in range(aVersCou):
index0 = aVers[i]
if shape.vertex(index0).active == False: continue
v0 = shape.vertex(index0).position
for j in range(i + 1, aVersCou):
index1 = aVers[j]
if shape.vertex(index1).active == False: continue
v1 = shape.vertex(index1).position
if isSamePosition(v0, v1):
# index1の頂点の選択を解除.
shape.vertex(index1).active = False
chkF = True
if chkF == False:
xshade.show_message_box('選択された頂点で、重複が存在しませんでした。', False)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
对半搜索,
"""
import random
def test(search):
l = sorted(random.sample(range(99), 10))
i = random.randrange(len(l))
assert search(l, l[i]) == i
assert search(l, 999) == None
def _bsearch(l, x, a, b):
if a >= b:
return None
m = (a + b) // 2
if (x < l[m]):
return _bsearch(l, x, a, m)
elif (x > l[m]):
return _bsearch(l, x, m + 1, b)
else:
return m
def bsearch(l, x):
return _bsearch(l, x, 0, len(l))
test(bsearch)
def _bsearch1(l, x, a, b):
"""
感觉这样一半一半的才对应判定二叉树,
"""
if a >= b:
return None
m = (a + b) // 2
if (x < l[m]):
return _bsearch(l, x, a, m)
elif (x > l[m]):
return _bsearch(l, x, m, b)
def bsearch1(l, x):
return _bsearch(l, x, 0, len(l))
test(bsearch1)
|
"""mapper.py - defines mappers for domain objects, mapping operations"""
import zblog.tables as tables
import zblog.user as user
from zblog.blog import *
from sqlalchemy import *
import sqlalchemy.util as util
def zblog_mappers():
# User mapper. Here, we redefine the names of some of the columns
# to different property names. normally the table columns are all
# sucked in automatically.
mapper(user.User, tables.users, properties={
'id':tables.users.c.user_id,
'name':tables.users.c.user_name,
'group':tables.users.c.groupname,
'crypt_password':tables.users.c.password,
})
# blog mapper. this contains a reference to the user mapper,
# and also installs a "backreference" on that relationship to handle it
# in both ways. this will also attach a 'blogs' property to the user mapper.
mapper(Blog, tables.blogs, properties={
'id':tables.blogs.c.blog_id,
'owner':relation(user.User, lazy=False, backref=backref('blogs', cascade="all, delete-orphan")),
})
# topic mapper. map all topic columns to the Topic class.
mapper(Topic, tables.topics)
# TopicAssocation mapper. This is an "association" object, which is similar to
# a many-to-many relationship except extra data is associated with each pair
# of related data. because the topic_xref table doesnt have a primary key,
# the "primary key" columns of a TopicAssociation are defined manually here.
mapper(TopicAssociation,tables.topic_xref,
primary_key=[tables.topic_xref.c.post_id, tables.topic_xref.c.topic_id],
properties={
'topic':relation(Topic, lazy=False),
})
# Post mapper, these are posts within a blog.
# since we want the count of comments for each post, create a select that will get the posts
# and count the comments in one query.
posts_with_ccount = select(
[c for c in tables.posts.c if c.key != 'body'] + [
func.count(tables.comments.c.comment_id).label('comment_count')
],
from_obj = [
outerjoin(tables.posts, tables.comments)
],
group_by=[
c for c in tables.posts.c if c.key != 'body'
]
) .alias('postswcount')
# then create a Post mapper on that query.
# we have the body as "deferred" so that it loads only when needed,
# the user as a Lazy load, since the lazy load will run only once per user and
# its usually only one user's posts is needed per page,
# the owning blog is a lazy load since its also probably loaded into the identity map
# already, and topics is an eager load since that query has to be done per post in any
# case.
mapper(Post, posts_with_ccount, properties={
'id':posts_with_ccount.c.post_id,
'body':deferred(tables.posts.c.body),
'user':relation(user.User, lazy=True, backref=backref('posts', cascade="all, delete-orphan")),
'blog':relation(Blog, lazy=True, backref=backref('posts', cascade="all, delete-orphan")),
'topics':relation(TopicAssociation, lazy=False, private=True, association=Topic, backref='post')
}, order_by=[desc(posts_with_ccount.c.datetime)])
# comment mapper. This mapper is handling a hierarchical relationship on itself, and contains
# a lazy reference both to its parent comment and its list of child comments.
mapper(Comment, tables.comments, properties={
'id':tables.comments.c.comment_id,
'post':relation(Post, lazy=True, backref=backref('comments', cascade="all, delete-orphan")),
'user':relation(user.User, lazy=False, backref=backref('comments', cascade="all, delete-orphan")),
'parent':relation(Comment, primaryjoin=tables.comments.c.parent_comment_id==tables.comments.c.comment_id, foreignkey=tables.comments.c.comment_id, lazy=True, uselist=False),
'replies':relation(Comment,primaryjoin=tables.comments.c.parent_comment_id==tables.comments.c.comment_id, lazy=True, uselist=True, cascade="all"),
})
# we define one special find-by for the comments of a post, which is going to make its own "noload"
# mapper and organize the comments into their correct hierarchy in one pass. hierarchical
# data normally needs to be loaded by separate queries for each set of children, unless you
# use a proprietary extension like CONNECT BY.
def find_by_post(post):
"""returns a hierarchical collection of comments based on a given criterion.
uses a mapper that does not lazy load replies or parents, and instead
organizes comments into a hierarchical tree when the result is produced.
"""
q = session().query(Comment).options(noload('replies'), noload('parent'))
comments = q.select_by(post_id=post.id)
result = []
d = {}
for c in comments:
d[c.id] = c
if c.parent_comment_id is None:
result.append(c)
c.parent=None
else:
parent = d[c.parent_comment_id]
parent.replies.append(c)
c.parent = parent
return result
Comment.find_by_post = staticmethod(find_by_post)
def start_session():
"""creates a new session for the start of a request."""
trans.session = create_session(bind_to=zblog.database.engine )
def session():
return trans.session
|
class DexChangeEvent:
"""
general event when the dex is updated.
This event must be called if no other event is called to describe a modification of the dex
"""
NAME = "DEX_CHANGE_EVENT"
def __init__(self, directory, key, prevData, newData):
self.directory = directory
self.key = key
self.prevData = prevData
self.newData = newData
def serialize(self):
return {
"name": self.NAME,
"data": {
"directory": self.directory,
"key": self.key,
"prevData": self.prevData,
"newData": self.newData
}
}
@staticmethod
def deserialize(serializedHook):
data = serializedHook["data"]
directory = data["directory"] if "directory" in data else None
key = data["key"] if "key" in data else None
prevData = data["prevData"] if "prevData" in data else None
newData = data["newData"] if "newData" in data else None
return DexChangeEvent(directory, key, prevData, newData)
|
# Script to retrieve results in offline mode
import json, pickle
from authentication import ws
import pandas as pd
from azureml.core.model import Model, Dataset
from azureml.core.run import Run#, _OfflineRun
run_id = 'f96fd978-f615-4e34-8ae5-6a0c668d10e8' # main run, under Experiment
experiment = ws.experiments['double-pipeline']
run = next(run for run in experiment.get_runs() if run.id == run_id)
automl_run = next(r for r in run.get_children() if r.name =='Time_Series_Forecasting')
outputs = automl_run.get_outputs()
#print(type(outputs)) # <class 'dict'>
#print(outputs)
#{
# 'metrics_data': <azureml.pipeline.core.run.StepRunOutput object at 0x7f8d609d03d0>,
# 'default_model_Time_Series_Forecasting': <azureml.pipeline.core.run.StepRunOutput object at 0x7f8d609d0790>,
# 'best_model_data': <azureml.pipeline.core.run.StepRunOutput object at 0x7f8d609d05e0>,
# 'default_metrics_Time_Series_Forecasting': <azureml.pipeline.core.run.StepRunOutput object at 0x7f8d70b73fd0>
# }
metrics = outputs['default_metrics_Time_Series_Forecasting']
model = outputs['default_model_Time_Series_Forecasting']
metrics.get_port_data_reference().download('.')
model.get_port_data_reference().download('.')
|
from werkzeug.utils import secure_filename
from datetime import datetime as DT
from flask.helpers import flash, send_file
from getpass import getpass
from pathlib import Path
from passlib.hash import pbkdf2_sha256
from subprocess import Popen
from markupsafe import escape
from functools import wraps
from flask import Flask, abort, session, redirect, url_for, request, render_template
# from concurrent.futures.process import ProcessPoolExecutor
import os
import sys
import json
import signal
import logging
import shutil
import glob
import traceback
import argparse
import string
import random
import sqlite3
import concurrent.futures
from flask.blueprints import Blueprint
# Allows importing from: ../../
parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print("In [{0}], appending [{1}] to system path.".format(__file__, parent))
sys.path.append(parent)
if parent:
import books_scraper.scraper as SCR
vbp = Blueprint('bp', __name__, template_folder='templates')
CONFIG = None
UPLOAD_FOLDER = os.path.join(os.getcwd(), "gs_uploads")
HTML_DIR = "{0}/html/".format(os.getcwd())
Path(HTML_DIR).mkdir(parents=True, exist_ok=True)
logging.basicConfig(filename='scraper.log', level=logging.INFO)
Path(UPLOAD_FOLDER).mkdir(parents=True, exist_ok=True)
TPE = concurrent.futures.ThreadPoolExecutor(max_workers=5)
def get_ts_str():
return DT.now().strftime("%Y%m%d_%H%M%S")
def random_str(size=10):
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=size))
def auth_check(f):
@wraps(f)
def wrapper(*args, **kwargs):
if "login_id" not in session:
logging.warning("Illegal access to operation. Login required.")
return redirect(url_for('bp.login'))
else:
logging.info("User is authenticated already.")
return f(*args, **kwargs)
return wrapper
def _init_db():
with sqlite3.connect('app.db') as conn:
c = conn.cursor()
# Create table
c.execute('''CREATE TABLE IF NOT EXISTS users
(id INTEGER PRIMARY KEY AUTOINCREMENT,
login_id text NOT NULL UNIQUE,
pass_hashed text NOT NULL, full_name text NOT NULL,
role text NOT NULL)''')
conn.commit()
logging.info("DB initialized.")
def _authenticate(login_id, plain_pass):
valid = False
try:
with sqlite3.connect('app.db') as conn:
c = conn.cursor()
# Create table
c.execute(
'SELECT pass_hashed FROM users WHERE login_id=?', (login_id,))
row = c.fetchone()
if row:
valid = pbkdf2_sha256.verify(plain_pass, row[0])
except Exception as ex:
logging.exception("Error occurred when authenticating.")
return valid
def _add_user(login_id, pass_hashed, full_name, role="USER"):
with sqlite3.connect('app.db') as conn:
c = conn.cursor()
c.execute('SELECT count(*) FROM users WHERE login_id=?', (login_id,))
if c.fetchone()[0] != 0:
raise Exception("Login ID already exists.")
c.execute("""INSERT INTO users(login_id, pass_hashed, full_name, role)
VALUES (?,?,?,?)""", (login_id, pass_hashed, full_name, role))
conn.commit()
def _scrape_goodreads(query, max_rec, out_dir, dont_ucb, login_id):
pid_file = os.path.join(out_dir, "pid")
SCR.LOG_FILE = os.path.join(out_dir, "task.log")
try:
with open(pid_file, "w") as pif:
pif.write(query)
bs = SCR.BookScraper(query,
max_recs=max_rec,
use_cached_books=not dont_ucb,
out_dir=out_dir,
html_dir=HTML_DIR,
gr_login=CONFIG["gr_login"],
gr_password=CONFIG["gr_password"],
web_browser=CONFIG["browser"],
timeout=CONFIG["timeout"],
http_delay_sec=CONFIG["http_delay_sec"])
bs.scrape_goodreads_books()
except Exception as ex:
msg = "Error occurred when processing googreads scraping."
logging.exception(msg)
return render_template("home.html", error=msg,
name=escape(session['login_id']))
finally:
try:
if os.path.exists(pid_file):
os.remove(pid_file)
logging.info("Closed pending task for user "+login_id)
except Exception as ex2:
logging.exception("Failed to close the task.")
def _fmt_date_str(dt_str):
try:
d2 = DT.strptime(dt_str, "%Y%m%d_%H%M%S")
return d2.strftime("%Y-%b-%d@%H:%M:%S")
except Exception as ex:
logging.exception("Failed to format date.")
return dt_str
def _existing_tasks(login_id):
base_path = os.path.join(os.getcwd(), session['login_id'])
pid_files = glob.glob("{0}/**/pid".format(base_path))
pids = []
for pfile in pid_files:
with open(pfile, "r") as pf:
pids.append(pf.read())
return pids
@auth_check
def start():
login_id = session['login_id']
out_dir = os.path.join(os.getcwd(), login_id, get_ts_str())
Path(out_dir).mkdir(parents=True, exist_ok=True)
logging.info("Starting command in: "+os.getcwd())
max_rec = request.form['max_rec']
query = request.form['query']
timeout = request.form['timeout']
dont_ucb = request.form.get('dont_ucb') == "on"
TPE.submit(_scrape_goodreads, query, max_rec, out_dir, dont_ucb, login_id)
return redirect(url_for('bp.task_status'))
@auth_check
def task_status():
path = "{0}/{1}".format(os.getcwd(), session['login_id'])
if os.path.exists(path):
subfolders = [f.path for f in os.scandir(path) if f.is_dir()]
subfolders.sort(reverse=True)
data = [
{"folder": d.split("/")[-1],
"folder_label": _fmt_date_str(d.split("/")[-1]),
"files": [f.path for f in os.scandir(d) if f.is_file()],
"status": "RUNNING" if glob.glob(d+"/pid") else "FINISHED",
} for d in subfolders]
else:
data = []
return render_template('status.html',
data=data,
out_dir=path,
name=escape(session['login_id']))
@auth_check
def get_file(file_path):
base_path = "{0}/{1}".format(os.getcwd(), session['login_id'])
abs_path = os.path.join(base_path, file_path)
# Check if path is a file and serve
if os.path.isfile(abs_path):
if abs_path.endswith("task.log"):
with open(abs_path, "r") as logs_file:
return render_template('show_logs.html',
name=escape(session['login_id']),
data=escape(logs_file.read()))
else:
return send_file(abs_path, as_attachment=True)
else:
return abort(404)
@auth_check
def clear_dir(dir_path):
logging.debug("dir_path = "+dir_path)
if "/" in dir_path or ".." in dir_path:
logging.error("!!! Invalid path: "+dir_path)
return abort(401)
else:
base_path = "{0}/{1}".format(os.getcwd(), session['login_id'])
abs_path = os.path.join(base_path, dir_path)
for pid in _existing_tasks(session['login_id']):
try:
os.kill(int(pid), signal.SIGTERM)
except Exception as ex:
logging.exception("Error when deleting process.")
logging.info("!!! Deleting: "+abs_path)
shutil.rmtree(abs_path)
return redirect(url_for('bp.task_status'))
# def signup():
# error = None
# try:
# if request.method == 'POST':
# pw_hashed = pbkdf2_sha256.hash(request.form['password'])
# _add_user(request.form['login_id'], pw_hashed,
# request.form['full_name'])
# return render_template("index.html",
# error="User created. Please login with your credentials.")
# except Exception as ex:
# logging.exception("Error occurred when signing up.")
# error = str(ex)
# return render_template('signup.html', error=error)
def login():
error = None
try:
if request.method == 'POST':
if _authenticate(request.form['login_id'],
request.form['password']):
logging.info("Login successful.")
session['login_id'] = request.form['login_id']
return redirect(url_for('bp.home'))
else:
error = 'Invalid username/password'
except Exception as ex:
logging.exception("Error occurred when logging in.")
error = str(ex)
# the code below is executed if the request method
# was GET or the credentials were invalid
return render_template('index.html', error=error)
def index():
if "login_id" in session:
return redirect(url_for('bp.home'))
return render_template('index.html')
@auth_check
def home():
pids = _existing_tasks(session['login_id'])
return render_template('home.html',
name=escape(session['login_id']), pids=pids)
def logout():
# remove the username from the session if it's there
session.pop('login_id', None)
return redirect(url_for('bp.index'))
def _process_zip_upload(file_path, login_id, src_type):
logging.info("Processing: "+file_path)
out_dir = "{0}/{1}/{2}".format(os.getcwd(), login_id, get_ts_str())
Path(out_dir).mkdir(parents=True, exist_ok=True)
bs = SCR.BookScraper("", html_dir=HTML_DIR, out_dir=out_dir,
http_delay_sec=CONFIG["http_delay_sec"])
bs.extract_from_zip(file_path, src_type)
@auth_check
def upload_file():
login_id = session['login_id']
logging.info("Upload destination: "+UPLOAD_FOLDER)
try:
if request.method == 'POST':
# check if the post request has the file part
if 'zip_file' not in request.files:
logging.info("No file part found in request.")
return render_template('upload_gs.html',
error="No file part found!",
name=escape(login_id))
file = request.files['zip_file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
return render_template('upload_gs.html',
error="No file data found!",
name=escape(login_id))
if file and file.filename.endswith(".zip"):
sfn = secure_filename(file.filename)
file_path = os.path.join(UPLOAD_FOLDER, sfn)
file.save(file_path)
src_type = request.form.get("src_type")
_process_zip_upload(file_path, login_id, src_type)
return redirect(url_for('bp.task_status'))
else:
logging.error("File type not allowed!")
return render_template('upload_gs.html',
error="File type not allowed!",
name=escape(login_id))
else:
logging.info("GET request for upload.")
return render_template('upload_gs.html',
name=escape(login_id))
except Exception as ex:
logging.exception("Error when uploading.")
return render_template('upload_gs.html', error=str(ex),
name=escape(login_id))
def validate_fuzzy_request():
msg = []
if 'libcsv' not in request.files or request.files['libcsv'].filename == '':
msg.append("Library data CSV is missing.")
if 'grcsv' not in request.files or request.files['grcsv'].filename == '':
msg.append("Goodreads data CSV is missing.")
return msg
def _process_fuzzy_match(login_id, fp_gr, fp_lib, score, match_mode):
from webapp.fuzzy_match import find_fuzz
try:
out_file = os.path.join(os.getcwd(), login_id,
"{0}_fuzzy_result.csv".format(get_ts_str()))
log_file = os.path.join(os.getcwd(), login_id,
"fuzzy_task.log")
find_fuzz(fp_gr, fp_lib, score, match_mode,
out_file=out_file, log_file=log_file)
logging.info("Fuzzy check complete.")
except Exception as ex:
logging.exception("Error occurred.")
@auth_check
def fuzzy_check():
login_id = session['login_id']
out_dir = os.path.join(os.getcwd(), login_id)
Path(out_dir).mkdir(parents=True, exist_ok=True)
logs_file = os.path.join(out_dir, "fuzzy_task.log")
try:
if request.method == 'POST':
msg = validate_fuzzy_request()
if len(msg) > 0:
logging.error(". ".join(msg))
return render_template('fuzzy.html',
error=". ".join(msg),
name=escape(login_id))
lib_csv, gr_csv = request.files['libcsv'], request.files['grcsv']
sfn_libcsv = secure_filename(lib_csv.filename)
sfn_grcsv = secure_filename(gr_csv.filename)
fp_lib = os.path.join(out_dir, sfn_libcsv)
lib_csv.save(fp_lib)
fp_gr = os.path.join(out_dir, sfn_grcsv)
gr_csv.save(fp_gr)
match_mode = request.form['match_mode']
score = request.form['score']
TPE.submit(_process_fuzzy_match, login_id,
fp_gr, fp_lib, score, match_mode)
return redirect(url_for('bp.fuzzy_check'))
else:
logging.info("GET request for upload.")
res_files = glob.glob("{0}/*_fuzzy_result.csv".format(out_dir))
return render_template('fuzzy.html', name=escape(login_id),
data=res_files, logs_file=logs_file if Path(logs_file).exists() else None)
except Exception as ex:
logging.exception("Error when handling fuzzy check.")
return render_template('fuzzy.html', error=str(ex),
name=escape(login_id),
logs_file=logs_file if Path(logs_file).exists() else None)
# Add the view routes
vbp.add_url_rule('/fuzz', view_func=fuzzy_check, methods=['GET', 'POST'])
vbp.add_url_rule('/clear', view_func=clear_dir, methods=['GET'])
vbp.add_url_rule('/clear/<path:dir_path>', view_func=clear_dir, methods=['GET'])
vbp.add_url_rule('/<path:file_path>', view_func=get_file, methods=['GET'])
vbp.add_url_rule('/status', view_func=task_status, methods=['GET'])
vbp.add_url_rule('/start', view_func=start, methods=['GET', 'POST'])
vbp.add_url_rule('/logout', view_func=logout, methods=['GET'])
vbp.add_url_rule('/gs', view_func=upload_file, methods=['GET', 'POST'])
vbp.add_url_rule('/gr', view_func=home, methods=['GET'])
vbp.add_url_rule('/', view_func=index, methods=['GET'])
vbp.add_url_rule('/login', view_func=login, methods=['GET', 'POST'])
# vbp.add_url_rule('/signup', view_func=signup, methods=['GET', 'POST']) |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
##
## S E R I A L B O X
##
## This file is distributed under terms of BSD license.
## See LICENSE.txt for more information.
##
##===------------------------------------------------------------------------------------------===##
from numpy import logical_not, isclose, nditer
class ErrorList(object):
def __init__(self, input_field, reference_field, atol, rtol):
self.__error_positions = logical_not(
isclose(input_field, reference_field, atol, rtol))
self.__error_list = []
it_value = nditer(self.__error_positions, flags=["multi_index"])
it_input = nditer(input_field)
it_reference = nditer(reference_field)
while not it_value.finished:
if it_value.value:
self.__error_list += [[it_value.multi_index, it_input.value, it_reference.value]]
it_value.iternext()
it_input.iternext()
it_reference.iternext()
def list(self):
return self.__error_list
def positions(self):
return self.__error_positions
|
from django.conf import settings
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from swingers import models
from swingers.models import Audit
from swingers.utils.auth import make_nonce
from model_utils import Choices
from reversion import revision
import threading
import requests
import hashlib
class Job(models.Model):
STATES = Choices("queued", "running", "completed")
name = models.CharField(max_length=320, unique=True)
args = models.TextField(null=True, blank=True)
output = models.TextField(null=True, blank=True)
state = models.CharField(choices=STATES, default=STATES.queued,
max_length=64)
class Meta:
app_label = 'swingers'
class ApplicationLink(models.Audit):
AUTH_METHOD = Choices('basic', 'md5', 'sha1', 'sha224', 'sha256', 'sha384',
'sha512')
client_name = models.CharField(
max_length=320,
help_text="project/host of client, this app is {0}".format(
settings.SITE_NAME))
server_name = models.CharField(
max_length=320,
help_text="project/host of server, this app is {0}".format(
settings.SITE_NAME))
server_url = models.TextField(
help_text="URL service backend requests should be made to")
identifier = models.CharField(
max_length=320, null=True, blank=True,
help_text="IP or Hostname, optional for added security")
secret = models.CharField(max_length=320, help_text="Application secret")
timeout = models.IntegerField(default=600,
help_text="Timeout of oauth tokens in "
"seconds")
auth_method = models.CharField(choices=AUTH_METHOD,
default=AUTH_METHOD.sha256, max_length=20)
class Meta(Audit.Meta):
unique_together = ("client_name", "server_name")
app_label = 'swingers'
def natural_key(self):
return (self.client_name, self.server_name)
def get_by_natural_key(self, client_name, server_name):
return self.get(client_name=client_name, server_name=server_name)
def get_access_token(self, user_id, expires=600):
"""
Returns an access token for with the current user.
Note: uses a hardcoded URL when determining where to send the request.
"""
url = self.server_url + "/api/swingers/v1/{0}/request_token"
nonce = make_nonce()
r = requests.get(url.format(self.server_name), params={
"user_id": user_id,
"nonce": nonce,
"client_secret": self.get_client_secret(user_id, nonce),
"client_id": self.client_name,
"expires": expires
})
if r.ok:
return r.content
else:
r.raise_for_status()
def get_client_secret(self, user_id, nonce):
"""
Returns the client secret based on a user and a nonce.
"""
stringtohash = "{0}{1}{2}".format(self.secret, user_id, nonce)
return getattr(hashlib, self.auth_method)(stringtohash).hexdigest()
@python_2_unicode_compatible
class Token(models.Model):
link = models.ForeignKey(ApplicationLink)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name="%(app_label)s_%(class)s_user",
help_text="User token authenticates as")
url = models.TextField(help_text="Suburl this token is restricted to, "
"relative e.g. (/my/single/service/entrypoint)",
default="/")
secret = models.CharField(max_length=320, help_text="Token Secret",
unique=True)
modified = models.DateTimeField(default=timezone.now, editable=False)
timeout = models.IntegerField(default=600, help_text="Timeout token in "
"seconds, 0 means never times out")
class Meta:
app_label = 'swingers'
def save(self, *args, **kwargs):
try:
revision.unregister(self.__class__)
except:
pass
super(Token, self).save(*args, **kwargs)
def natural_key(self):
return (self.secret, )
def get_by_natural_key(self, secret):
return self.get(secret=secret)
def __str__(self):
return "{0} - {1}:{2}@{3}".format(self.pk, self.user, self.secret,
self.link.client_name)[:320]
|
#!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
# str is a built-in class
# RevStr is inheriting from str class and
# we are overriding the string representation method to instead of returning the string itself, to
# return a slice of the string where the step goes backwards. And so, this will reverse the string.
class RevStr(str):
def __str__(self):
return self[::-1]
def main():
hello = RevStr('Hello, World.')
print(hello)
if __name__ == '__main__':
main()
|
#!/usr/bin/python3
#
# MIT License
#
# Copyright (c) 2018 Erriez
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##
# This is a wxPython GUI example to control a single R421A08 relay board with a USB - RS485
# dongle.
#
# Source: https://github.com/Erriez/R421A08-rs485-8ch-relay-board
#
import os
import sys
import wx
import wx.adv
# Add system path to find relay_ Python packages
sys.path.append('.')
sys.path.append('..')
import relay_modbus
import relay_boards
# Required: Configure serial port, for example:
# On Windows: 'COMx'
# On Linux: '/dev/ttyUSB0'
SERIAL_PORT = 'COM3'
# Default relay board address (DIP switch 1 ON, others OFF)
ADDRESS = 1
# Path to window icon
ICO_PATH = '../images/relay.ico'
class RelayFrame(wx.Frame):
def __init__(self, parent, serial_port, address):
# Create frame
wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=u'GUI example', pos=wx.DefaultPosition,
size=wx.Size(275, 280), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)
# Define variables
self.m_relay_modbus = None
self.m_relay_board = None
self.m_statusBar = None
# Create modbus and relay object
self.CreateObjects(serial_port, address)
# Create window
self.CreateWindow(parent)
def CreateObjects(self, serial_port, address):
# Check argument types
assert type(serial_port) == str
assert type(address) == int
# Create MODBUS object and open serial port
self.m_relay_modbus = relay_modbus.Modbus(serial_port)
try:
self.m_relay_modbus.open()
except relay_modbus.SerialOpenException as err:
wx.MessageBox(str(err), u'Failure', style=wx.OK | wx.ICON_STOP)
sys.exit(1)
# Create relay board object
self.m_relay_board = relay_boards.R421A08(self.m_relay_modbus)
if address < 0 or address >= self.m_relay_board.num_addresses:
wx.MessageBox(u'Invalid address {}'.format(address), u'Failure',
style=wx.OK | wx.ICON_STOP)
sys.exit(1)
self.m_relay_board.address = address
# Set window title
self.SetTitle(u'{} address {}'.format(self.m_relay_modbus.serial_port,
self.m_relay_board.address))
def CreateWindow(self, parent):
self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
self.SetMinSize(wx.Size(250, 250))
self.SetBackgroundColour(wx.Colour(240, 240, 240))
if os.path.exists(ICO_PATH):
self.SetIcon(wx.Icon(ICO_PATH))
self.CreateMenuBar()
self.CreateRelayButtons()
self.CreateStatusbar()
self.Layout()
self.Centre(wx.BOTH)
def CreateMenuBar(self):
# Create menu
m_menubar = wx.MenuBar(0)
# File menu
m_menuFile = wx.Menu()
m_menuItemQuit = wx.MenuItem(m_menuFile, wx.ID_ANY, u'&Quit' + u'\t' + u'Ctrl+Q',
wx.EmptyString, wx.ITEM_NORMAL)
m_menuFile.Append(m_menuItemQuit)
m_menubar.Append(m_menuFile, u'&File')
# About menu
m_menuAbout = wx.Menu()
m_menuItemAbout = wx.MenuItem(m_menuAbout, wx.ID_ANY, u'&About' + u'\t' + u'Shift+?',
wx.EmptyString, wx.ITEM_NORMAL)
m_menuAbout.Append(m_menuItemAbout)
m_menubar.Append(m_menuAbout, u'&Help')
# Set menu
self.SetMenuBar(m_menubar)
self.Bind(wx.EVT_MENU, self.OnMenuQuit, id=m_menuItemQuit.GetId())
self.Bind(wx.EVT_MENU, self.OnMenuAbout, id=m_menuItemAbout.GetId())
def CreateRelayButtons(self):
gSizer = wx.GridSizer(0, 2, 0, 0)
# Create button 'all on'
m_btnAllOn = wx.Button(self, 9, u'All on', wx.DefaultPosition, wx.DefaultSize, 0)
gSizer.Add(m_btnAllOn, 0,
wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
m_btnAllOn.Bind(wx.EVT_BUTTON, self.OnBtnAllOnClick)
# Create button 'all off'
m_btnAllOff = wx.Button(self, 10, u'All off', wx.DefaultPosition, wx.DefaultSize, 0)
gSizer.Add(m_btnAllOff, 0,
wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
m_btnAllOff.Bind(wx.EVT_BUTTON, self.OnBtnAllOffClick)
# Create toggle buttons
for relay in range(self.m_relay_board.num_relays):
# Convert relay numbers to grid: First column 0..3, second column: 4..7
if relay & 1:
relay = 4 + int((relay - 1) / 2)
else:
relay = int(relay / 2)
button_text = u'Toggle ' + str(relay + 1)
m_btnToggleRelay = wx.Button(self, relay, button_text,
wx.DefaultPosition, wx.DefaultSize, 0)
gSizer.Add(m_btnToggleRelay, 0,
wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
m_btnToggleRelay.Bind(wx.EVT_BUTTON, self.OnBtnToggleClick)
self.SetSizer(gSizer)
def CreateStatusbar(self):
self.m_statusBar = self.CreateStatusBar(1, wx.STB_SIZEGRIP, wx.ID_ANY)
self.m_statusBar.SetStatusText(u'Click on a button')
def OnMenuQuit(self, event):
self.Close()
def OnMenuAbout(self, event):
info = wx.adv.AboutDialogInfo()
info.SetName('Relay example GUI')
if os.path.exists(ICO_PATH):
info.SetIcon(wx.Icon(ICO_PATH))
info.SetVersion('v1.0')
info.SetCopyright('(C) 2018 by Erriez')
info.SetDescription('Relay example with wxPython {}'.format(wx.version()))
info.SetWebSite('https://github.com/Erriez/R421A08-rs485-8ch-relay-board',
'Source & Documentation')
info.AddDeveloper('Erriez')
info.SetLicense('MIT License: Completely and totally open source!')
wx.adv.AboutBox(info)
def OnBtnAllOnClick(self, event):
try:
retval = self.m_relay_board.on_all()
except relay_modbus.TransferException as err:
self.m_statusBar.SetStatusText(u'Relays on - {}'.format(str(err)))
else:
if retval:
self.m_statusBar.SetStatusText('All relays on.')
else:
self.m_statusBar.SetStatusText(u'Error: Could not turn on relays!')
def OnBtnAllOffClick(self, event):
try:
retval = self.m_relay_board.off_all()
except relay_modbus.TransferException as err:
self.m_statusBar.SetStatusText(u'Relays off - {}'.format(str(err)))
else:
if retval:
self.m_statusBar.SetStatusText('All relays off.')
else:
self.m_statusBar.SetStatusText(u'Error: Could not turn off relays!')
def OnBtnToggleClick(self, event):
relay = event.GetId() + 1
try:
retval = self.m_relay_board.toggle(relay)
except relay_modbus.TransferException as err:
self.m_statusBar.SetStatusText(u'Relay {} - {}'.format(relay, str(err)))
else:
if retval:
self.m_statusBar.SetStatusText('Relay {} toggled.'.format(relay))
else:
self.m_statusBar.SetStatusText(u'Error: Could not toggle relay {}!'.format(relay))
def main():
if len(sys.argv) == 3:
serial_port = str(sys.argv[1])
address = int(sys.argv[2])
else:
serial_port = SERIAL_PORT
address = ADDRESS
app = wx.App()
form = RelayFrame(None, serial_port, address)
form.Show()
app.MainLoop()
if __name__ == '__main__':
main()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functionality for loading events from a record file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from .record_reader import RecordReader
from .proto import event_pb2
class raise_exception_on_not_ok_status(object):
"""Context manager to check for C API status."""
def __enter__(self):
return "Status not OK"
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
class RawEventFileLoader(object):
"""An iterator that yields Event protos as serialized bytestrings."""
def __init__(self, file_path):
if file_path is None:
raise ValueError('A file path is required')
with raise_exception_on_not_ok_status() as status:
self._reader = RecordReader(str(file_path), 0, None, status)
self._file_path = file_path
if not self._reader:
raise IOError('Failed to open a record reader pointing to %s' % file_path)
def Load(self):
"""Loads all new events from disk as raw serialized proto bytestrings.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields: All event proto bytestrings in the file that have not been yielded yet.
"""
get_next_args = inspect.getargspec(self._reader.GetNext).args
legacy_get_next = (len(get_next_args) > 1)
while True:
try:
if legacy_get_next:
with raise_exception_on_not_ok_status() as status:
self._reader.GetNext(status)
else:
self._reader.GetNext()
except:
break
yield self._reader.record()
class EventFileLoader(RawEventFileLoader):
"""An iterator that yields parsed Event protos."""
def Load(self):
"""Loads all new events from disk.
Yields: All events in the file that have not been yielded yet.
"""
for record in super(EventFileLoader, self).Load():
yield event_pb2.Event.FromString(record)
class TimestampedEventFileLoader(EventFileLoader):
"""An iterator that yields (UNIX timestamp float, Event proto) pairs."""
def Load(self):
"""Loads all new events and their wall time values from disk.
Yields: Pairs of (UNIX timestamp float, Event proto) for all events
in the file that have not been yielded yet.
"""
for event in super(TimestampedEventFileLoader, self).Load():
yield (event.wall_time, event)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 11:53:50 2021
@author: user
"""
import os
import tensorflow as tf
import numpy as np
from keras.models import load_model
from keras.optimizers import Adam
from keras.initializers import RandomNormal
from keras.models import Model
from keras.models import Input
from keras.layers import Conv2D
from keras.layers import ReLU
from keras.layers import Activation
from keras.layers import BatchNormalization
from os import listdir
from numpy import asarray
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
from flask import Flask, request
from flask import render_template
# from keras.backend import set_session
app = Flask(__name__)
UPLOAD_FOLDER = 'D:\\Paritosh\\_workspace\\webapp\\static'
global model
model = load_model("c_model_043080.h5")
# @app.route('/')
# def welc_msg():
# return "Stairway to heaven..."
@app.route('/', methods=['GET', 'POST'])
def upload_predict():
if request.method == 'POST':
image_file = request.files['image']
if image_file:
image_location = os.path.join(
UPLOAD_FOLDER,
image_file.filename
)
image_file.save(image_location)
img = load_img(os.path.join(UPLOAD_FOLDER, image_file.filename))
image_arr = img_to_array(img)
image_arr = image_arr[:,:,0]/255.0
image_arr = image_arr.reshape((1,256,256,1))
pred = model.predict(image_arr)
pred = np.around(pred, decimals=3)
return render_template("index.html", prediction=pred)
return render_template("index.html", prediction=0)
if __name__ == '__main__':
app.run(host='0.0.0.0') |
# Generated by Django 3.0.7 on 2020-06-12 18:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('kpis', '0002_auto_20200611_1535'),
]
operations = [
migrations.CreateModel(
name='KPICategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('description', models.CharField(max_length=600)),
],
),
migrations.AddField(
model_name='kpi',
name='kind',
field=models.IntegerField(choices=[(0, 'Bar Chart'), (1, 'Line Graph'), (2, 'Scatter Plot')], default=0),
),
migrations.AlterField(
model_name='kpientry',
name='beamline',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='entries', to='kpis.Beamline'),
),
migrations.AlterField(
model_name='kpientry',
name='kpi',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='entries', to='kpis.KPI'),
),
migrations.AddField(
model_name='kpi',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='kpis', to='kpis.KPICategory'),
),
]
|
__author__ = 'waziz'
import numpy as np
import sys
from collections import defaultdict
from chisel.smt import Solution
from chisel.util import npvec2str, fmap_dot
from chisel.smt import groupby
from chisel.util import obj2id
class EmpiricalDistribution(object):
"""
"""
def __init__(self,
derivations,
q_wmap, # q_features
p_wmap, # p_features
get_yield=lambda d: d.tree.projection,
empirical_q=True
):
"""
:param support: list of DerivationGroup objects, each of which represents derivations sharing the same yield (Dy)
:param p_features: list of features of the target
:param q_features: list of features of the proxy
:param get_yield: a function that returns the yield of a derivation
"""
# 0) organise the support
# group derivations by yield
support = groupby(derivations, key=get_yield)
# assign sequential ids to yields
y2i = defaultdict()
[obj2id(Dy.projection, y2i) for Dy in support]
# support of derivations
D = np.arange(len(derivations))
# support of strings
Y = np.arange(len(y2i))
# map derivation id to yield id
d2y = np.array([y2i[get_yield(d)] for d in derivations], int)
# these are the indices of the derivations projecting onto a certain string y
y2D = [[] for _ in xrange(len(Y))]
for d, y in enumerate(d2y):
y2D[y].append(d)
# helper function which selects statistics (from a given array) associated with derivations for which gamma_y(d) == 1 for a given y
select = lambda array, y: array[y2D[y]]
# 1) dot products
q_dot = np.array([fmap_dot(d.vector, q_wmap) for d in derivations])
p_dot = np.array([fmap_dot(d.vector, p_wmap) for d in derivations])
r_dot = p_dot - q_dot
# 2) counts: n(d) and n(y)
nd = np.array([d.count for d in derivations], float)
ny = np.array([select(nd, y).sum() for y in Y])
# 3) instrumental probability: q(d) and q(y)
if empirical_q:
Zn = nd.sum()
qd = nd / Zn # simply, the empirical distribution
log_qd = np.log(qd)
qy = ny / Zn
else:
log_uqd = np.log(nd) + q_dot
log_qd = log_uqd - np.logaddexp.reduce(log_uqd)
qd = np.exp(log_qd)
qy = np.array([select(qd, y).sum() for y in Y])
# 4) importance weight: r(d) = ur(d)/Zr
log_urd = r_dot + np.log(nd)
log_rd = log_urd - np.logaddexp.reduce(log_urd)
rd = np.exp(log_rd)
# 5) p(y)
# where log up(y) = \sum_{d in Dy} log ur(d)
log_upy = np.array([np.logaddexp.reduce(select(log_urd, y)) for y in Y])
log_py = log_upy - np.logaddexp.reduce(log_upy)
py = np.exp(log_py)
# 6) r_y(d) = ur(d)/sum_Dy ur(d)
log_rd_y = [log_urd[d] - log_upy[d2y[d]] for d in D]
rd_y = np.exp(log_rd_y)
# 7) expected feature vectors
fd = np.array([d.vector.as_array(p_wmap.features) for d in derivations])
fdpd = fd * rd[:,np.newaxis]
fdpd_y = fd * rd_y[:,np.newaxis]
# <f(d)>_p
p_expected_f = fdpd.sum(0)
# <\gamma_y(d) f(d)>_p
p_expected_f_y = np.array([select(fdpd_y, y).sum(0) for y in Y])
dpdt = (p_expected_f_y - p_expected_f) * py[:,np.newaxis]
gd = np.array([d.vector.as_array(q_wmap.features) for d in derivations])
gdqd = gd * qd[:,np.newaxis]
# <g(d)>_q
q_expected_g = gdqd.sum(0)
# 8) KL(q||up) where up(d) = exp(theta f(d)) = exp(p_dot(d))
# = \sum_d q(d) log (q(d)/up(d))
# = \sum_d q(d) (log q(d) - log up(d))
# = \sum_d q(d) (log q(d) - p_dot(d))
KL = (qd * (log_qd - log_rd)).sum()
# dKL/dlambda = \sum_d q(d)(g(d) - <g(d)>_q)(log q(d) - log up(d) + 1)
dKLdl = (((gd - q_expected_g).transpose() * qd) * (log_qd - log_rd + 1)).transpose().sum(0)
# Evidence lower bound
# = <log ~p(d)>_q - <log q(d)>_q
# = < theta * f(d) >_q - <log q(d)>_q
self.ELB_ = ((p_dot - log_qd) * qd).sum()
#dqdl = ((gd - q_expected_g).transpose() * qd).transpose()
dqdl = (gd - q_expected_g) * qd[:,np.newaxis]
#self.dELB_ = (dqdl.transpose() * (p_dot - log_qd - 1)).transpose().sum(0)
self.dELB_ = (dqdl * (p_dot - log_qd - 1)[:, np.newaxis]).sum(0)
# H(p)
# H(q)
#self.Hq_ = - (qd * log_qd).sum(0)
#self.dHq_ = - (((gd - q_expected_g).transpose() * qd) * log_qd).transpose().sum(0)
# 9) store data
self.support_ = support
self.p_wmap_ = p_wmap
self.q_wmap_ = q_wmap
self.ny_ = ny
self.qy_ = qy
self.py_ = py
self.dpdt_ = dpdt
self.kl_ = KL
self.dkldl_ = dKLdl
self.upy_ = np.exp(log_upy)
def __iter__(self):
return iter(self.support_)
def __getitem__(self, i):
return self.support_[i]
def __len__(self):
return len(self.support_)
@property
def support(self):
return self.support_
@property
def p_wmap(self):
return self.p_wmap_
@property
def q_wmap(self):
return self.q_wmap_
def n(self, i):
"""
Absolute counts of the i-the derivation group (0-based).
Note that in the case of importance sampling, this is only meaningful wrt the instrumental distribution,
in which case the normalised version represents the posterior q(y).
"""
return self.ny_[i]
def q(self, i):
"""a synonym for n(i)"""
return self.qy_[i]
def p(self, i):
"""
Posterior of the i-th derivation group (0-based).
That is, p(y) where support[i] = Dy = {d \in D: yield(d) = y}."""
return self.py_[i]
def copy_posterior(self):
return self.py_.copy()
def copy_dpdt(self):
return self.dpdt_.copy()
def kl(self):
return self.kl_, self.dkldl_
def elb(self):
return self.ELB_, self.dELB_
#def Hq(self):
# return self.Hq_, self.dHq_
def __str__(self):
strs = ['#p(y)\t#q(y)\t#y']
for i, Dy in enumerate(self.support_):
strs.append('{0}\t{1}\t{2}'.format(self.p(i),
self.q(i),
Dy.projection))
return '\n'.join(strs)
def solution(self, i):
return Solution(Dy=self.support_[i],
p=self.p(i),
q=self.q(i))
|
# mock backend classes from peeringdb-py client
from django_peeringdb.models import all_models
def reftag_to_cls(fn):
return fn
class Resource:
def __init__(self, tag):
self.tag = tag
class Interface:
REFTAG_RESOURCE = {
model.HandleRef.tag: Resource(model.HandleRef.tag) for model in all_models
}
def get_resource(self, concrete):
return self.REFTAG_RESOURCE[concrete.HandleRef.tag]
|
import pytest
from libband.commands.facilities import Facility
from libband.commands.helpers import (
lookup_command, make_command, lookup_packet
)
EXAMPLE_COMMANDS = [
# GET_TILES
(54400, Facility.ModuleInstalledAppList, True, 0),
# SET_THEME_COLOR
(55296, Facility.ModuleThemeColor, False, 0),
# SUBSCRIPTION_UNSUBSCRIBE_ID
(36616, Facility.LibraryRemoteSubscription, False, 8)
]
@pytest.mark.parametrize(
('command', 'facility', 'tx', 'index'), EXAMPLE_COMMANDS
)
def test_lookup_command(command, facility, tx, index):
assert lookup_command(command) == (facility, tx, index)
@pytest.mark.parametrize(
('command', 'facility', 'tx', 'index'), EXAMPLE_COMMANDS
)
def test_make_command(command, facility, tx, index):
assert make_command(facility, tx, index) == command
@pytest.mark.parametrize(
('packet', 'expected_result'),
[
(
'08f92e837601000000',
{
'arguments': b'',
'command': (Facility.LibraryJutil, True, 3),
'data_stage_size': 1
}
),
(
'08f92e88780c000000',
{
'arguments': b'',
'command': (Facility.LibraryConfiguration, True, 8),
'data_stage_size': 12
}
),
(
'0cf92e86c58000000080000000',
{
'arguments': b'\x80\x00\x00\x00',
'command': (Facility.ModuleProfile, True, 6),
'data_stage_size': 128
}
)
]
)
def test_lookup_packet(packet, expected_result):
assert lookup_packet(packet) == expected_result
|
default_app_config = 'common.apps.CommonConfig' |
class UserException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ServiceUnreachable(Exception):
def __init__(self, service_name: str):
self.value = service_name
def __str__(self):
return self.value + 'Service Unreachable'
class StoriesException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class NoUser(dict):
def __init__(self):
super().__init__()
self['code'] = 404
self['status'] = 'error'
self['data'] = 'User not exist'
class NoStats(dict):
def __init__(self):
super().__init__()
self['code'] = 403
self['status'] = 'error'
self['data'] = 'Stats not ready'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import boto3
import json
import time
import traceback
from ecs_crd.canaryReleaseDeployStep import CanaryReleaseDeployStep
from ecs_crd.destroyGreenStackStep import DestroyGreenStackStep
from ecs_crd.applyStrategyStep import CheckGreenHealthStep
from ecs_crd.destroyGreenStackStep import DestroyGreenStackStep
from ecs_crd.defaultJSONEncoder import DefaultJSONEncoder
class ScaleUpServiceStep(CanaryReleaseDeployStep):
def __init__(self, infos, logger):
"""initializes a new instance of the class"""
super().__init__(infos,'Scale Up Service', logger)
def _on_execute(self):
"""operation containing the processing performed by this step"""
try:
service_arn = self._find_service_arn()
self.logger.info('')
self._log_information(key='Service', value=self.infos.green_infos.stack_name)
self._log_information(key='Arn', value=service_arn)
self.logger.info('')
self.logger.info('Scaling up in progress ...')
self.logger.info('')
client = boto3.client('ecs', region_name=self.infos.region)
if self.infos.scale_infos.desired > 1:
client.update_service(
cluster=self.infos.cluster,
service=service_arn,
desiredCount=self.infos.scale_infos.desired,
deploymentConfiguration={
'maximumPercent': 100 * self.infos.scale_infos.desired,
'minimumHealthyPercent': 100
},
forceNewDeployment= True
)
else:
client.update_service(
cluster=self.infos.cluster,
service=service_arn,
desiredCount=self.infos.scale_infos.desired,
forceNewDeployment= True)
self._wait(self.infos.scale_infos.wait, 'Scaling up in progress')
self.logger.info('')
self.logger.info(f'Desired instances : {self.infos.scale_infos.desired}')
return CheckGreenHealthStep(self.infos, self.logger)
except Exception as e:
self.logger.error('ScaleUpServiceStep', exc_info=True)
self.infos.exit_exception = e
self.infos.exit_code = 13
return DestroyGreenStackStep(self.infos, self.logger)
def _find_service_arn(self):
"""find AWS ARN of service"""
client = boto3.client('cloudformation', region_name=self.infos.region)
response = client.describe_stacks(StackName= self.infos.green_infos.stack_name)
output = next(x for x in response['Stacks'][0]['Outputs'] if x['OutputKey']=='ServiceArn')
return output['OutputValue'] |
# !/usr/bin/env python3
#######################################################################################
# #
# Program purpose: Get the size of a file. #
# Program Author : Happi Yvan <[email protected]> #
# Creation Date : August 29, 2019 #
# #
#######################################################################################
import os
if __name__ == "__main__":
try:
file_path = input("Enter path to file: ")
print(f"\nThe size of {file_path} is: {os.path.getsize(file_path)} bytes")
except FileNotFoundError as fileNotFound:
print(f"Path is not valid file.\n{fileNotFound}")
|
import numpy as np
import csv
from scipy.stats import circmean, circvar, binned_statistic
def load_channel(mouse='28', session='140313', channel='1', freq=20000):
'''
freq is frequency in Hz
'''
mouse = 'Mouse' + mouse + '-' + session
basedata = mouse + '/'
data = []
with open(basedata + mouse + '.clu.' + channel) as csvfile:
reader = csv.reader(csvfile, quoting=csv.QUOTE_NONNUMERIC)
clusts = np.array([val[0] for val in reader])
nclusts = int(clusts[0])
clusts = clusts[1:].astype(int)
with open(basedata + mouse + '.res.' + channel) as csvfile:
reader = csv.reader(csvfile, quoting=csv.QUOTE_NONNUMERIC)
spiketimes = np.array([val[0] for val in reader])
for cluster in range(nclusts):
cluster_times = spiketimes[clusts == cluster]
data.append(cluster_times / freq)
return data
def load_channels(mouse='28', session='140313', channels=range(1, 9)):
data = []
for channel in channels:
data = data + load_channel(
mouse=mouse, session=session, channel=str(channel))
return data
def load_states(mouse='28', session='140313'):
'''
wake, REM and SWS
'''
mouse = 'Mouse' + mouse + '-' + session
basedata = mouse + '/'
data = []
for state in ['Wake', 'REM', 'SWS']:
with open(basedata + mouse + '.states.' + state) as csvfile:
reader = csv.reader(csvfile,
quoting=csv.QUOTE_NONNUMERIC,
delimiter='\t')
data.append(np.array([val for val in reader]))
return data[0], data[1], data[2]
def load_angs(mouse='28', session='140313'):
mouse = 'Mouse' + mouse + '-' + session
with open(mouse + '.ang') as csvfile:
reader = csv.reader(csvfile,
delimiter='\t',
quoting=csv.QUOTE_NONNUMERIC)
data = np.array([val for val in reader])
inds = (data[:, 1] >= 0) # only take timepoints with data
times = data[inds, 0]
angs = data[inds, 1]
return times, angs
def bin_data(data, binsize=1):
maxval = np.amax([dat[-1] for dat in data])
minval = 0
bins = np.arange(minval, np.ceil(maxval), binsize)
bindata = [np.histogram(dat, bins=bins)[0] for dat in data]
return bindata, bins
def bin_angs(times, angs, bins):
'''
can also do scipy.circvar if we want to check that the variability is reasonable
'''
def fmean(samples):
return circmean(samples, nan_policy='propagate')
binangs = binned_statistic(angs, times, statistic=fmean, bins=bins)[0]
def fvar(samples):
return circvar(samples, nan_policy='propagate')
binvars = binned_statistic(angs, times, statistic=fvar, bins=bins)[0]
return binangs, np.sqrt(binvars) # return mean and std
def bin_states(states, bins):
data = []
for state in states:
binstates = np.zeros(len(bins) - 1)
for row in state: # add data corresponding to each row
cond1 = (bins[:-1] > row[0])
cond2 = (bins[1:] < row[1])
binstates[(cond1 & cond2)] = 1
data.append(binstates.astype(bool))
return data[0], data[1], data[2]
|
from gensim.models import Word2Vec
import numpy as np
import pickle
import multiprocessing
import time
data_root = '/nosave/lange/cu-ssp/data/'
'''
EMB_DIM = 50
WINDOW_SIZE = 20
NB_NEG = 5
NB_ITER = 10
'''
EMB_DIM = 230
WINDOW_SIZE = 20
NB_NEG = 3
NB_ITER = 24
N_GRAM = 1
def onehot_to_seq(oh_seq, index):
s = ''
for o in oh_seq:
if np.max(o) != 0:
i = np.argmax(o)
s += index[i]
else:
break
return s
#def AA2index():
#def index2AA():
print('Program has started...')
primary_list_princeton = list('ACEDGFIHKMLNQPSRTWVYX') + ['NoSeq']
q8_list_princeton = list('LBEGIHST') + ['NoSeq']
def get_princeton_data(filename, max_len=700):
### filename = cb6133 for train, cb513 for test"
path = data_root+'data_princeton/'
primary_list=list('ACEDGFIHKMLNQPSRTWVYX') + ['NoSeq']
q8_list = list('LBEGIHST') + ['NoSeq']
data = np.load(path+filename+".npy")
data_reshape = data.reshape(data.shape[0], max_len, -1)
residue_onehot = data_reshape[:, :, 0:22]
residue_q8_onehot = data_reshape[:, :, 22:31]
profile = data_reshape[:, :, 35:57]
# pad profiles to same length
zero_arr = np.zeros((profile.shape[0], max_len - profile.shape[1], profile.shape[2]))
profile_padded = np.concatenate([profile, zero_arr], axis=1)
residue_array = np.array(primary_list)[residue_onehot.argmax(2)]
q8_array = np.array(q8_list)[residue_q8_onehot.argmax(2)]
residue_str_list = []
q8_str_list = []
for vec in residue_array:
x = ''.join(vec[vec != 'NoSeq'])
residue_str_list.append(x)
for vec in q8_array:
x = ''.join(vec[vec != 'NoSeq'])
q8_str_list.append(x)
return residue_str_list, residue_onehot, q8_str_list, residue_q8_onehot, profile_padded
def get_netsurf_data(filename, max_len=None):
# filename =
seq_list = list('ACDEFGHIKLMNPQRSTVWY')
path = data_root + 'netsurfp/'
input_onehot = np.load(path + filename + '_input.npy')
q8_onehot = np.load(path + filename + '_q9.npy')
profiles = np.load(path + filename + '_hmm.npy')
prim_seq = np.load(path + filename + '_q9_AA_str.npy')
'''
prim_seq = []
for i, oh in enumerate(input_onehot):
seq = onehot_to_seq(oh, seq_list)
prim_seq.append(seq)
np.save(path+filename + '_q9_AA_str.npy', prim_seq)
print('saved AA '+filename+' to disk.')
'''
return prim_seq, input_onehot, q8_onehot, profiles
def get_qzlshy_data(filename, maxlen=700):
### filename = train or test
path = data_root+'data_qzlshy/'
pssm = np.load(path+filename+'_pssm.npy')
hmm = np.load(path+filename+'_hmm.npy')
input_AA = np.load(path+filename+'_input.npy')
q8_AA = np.load(path+filename+'_q8.npy')
return input_AA, q8_AA, pssm, hmm
def load_data(dataname, mode):
if dataname=='princeton':
if mode=='train':
filename = 'cb6133'
if mode == 'test':
filename= 'cb513'
return get_princeton_data(filename)
if dataname == 'netsurfp':
if mode == 'train':
filename = 'train_full'
if mode == 'test':
filename = 'cb513_full'
return get_netsurf_data(filename)
if dataname == 'qzlshy':
return get_qzlshy_data(mode)
def seq2ngrams2(seqs, n = 3):
if n==1:
return seqs
else:
result = []
n_begin = int((n-1)/2)
n_end = (n-1) - n_begin
for seq in seqs:
seq = ('C'*n_begin)+seq+('C'*n_end)
result.append([seq[i:i + n] for i in range(int(len(seq)))])
return np.array(result)
def seq2ngrams(seqs, n):
"""
'AGAMQSASM' => [['AGA', 'MQS', 'ASM'], ['GAM','QSA'], ['AMQ', 'SAS']]
"""
result = []
for seq in seqs:
a, b, c = zip(*[iter(seq)] * n), zip(*[iter(seq[1:])] * n), zip(*[iter(seq[2:])] * n)
str_ngrams = []
for ngrams in [a, b, c]:
for ngram in ngrams:
str_ngrams.append("".join(ngram))
result.append(str_ngrams)
return result
def get_embedding(dataname='netsurfp', mode='train', data=None, n_gram = N_GRAM):
start_time=time.time()
# load input data
if data is None:
data = load_data(dataname, mode)
print('Load data..')
#onehot2AA
seqs = data[0]
#create n-grams from AA sequence
print('Create n-grams for n = {}...'.format(n_gram))
ngram_seq = seq2ngrams(seqs, n=n_gram)
print('Perform Word2Vec embedding...')
w2v = Word2Vec(ngram_seq, size=EMB_DIM, window=WINDOW_SIZE,
negative=NB_NEG, iter= NB_ITER, min_count=1, sg=1,
workers = multiprocessing.cpu_count())
embed_time = time.time()
m,s = divmod(embed_time-start_time, 60)
print("Needed {:.0f}min {:.0f}s for W2V embedding.".format(m, s))
word_vectors = w2v.wv
print('We have '+str(len(word_vectors.vocab))+ ' n-grams.')
return w2v
def embed_data(seqs, model, n_gram=N_GRAM):
embed_seq = np.zeros((len(seqs), 700, EMB_DIM))
ngram_seq = seq2ngrams(seqs, n=n_gram)
#new_model = model.train(ngram_seq)
for i, grams in enumerate(ngram_seq):
for j, g in enumerate(grams[:700]):
embed_seq[i, j, :] = model.wv[g]
print(embed_seq.shape)
return embed_seq
datanames = ['princeton', 'netsurfp', 'qzlshy']
nb_try = 4
try:
w2v_dict = Word2Vec.load('word2vec'+str(nb_try)+'.model')
print('Found model! Load...')
except:
print('No w2v model found. Get new embedding...')
start_time = time.time()
w2v_dict = get_embedding(mode='train')
time_end = time.time() - start_time
m, s = divmod(time_end, 60)
print("The program needed {:.0f}min {:.0f}s to generate the embedding.".format(m, s))
w2v_dict.save('word2vec' + str(nb_try) + '.model')
print('Saved model to '+'word2vec' + str(nb_try) + '.model')
start_time = time.time()
w2v_input = embed_data(get_netsurf_data('train_full')[0], w2v_dict)
np.save(data_root+'netsurfp/embedding/train_full_700_input_word2vec_'+str(nb_try)+'.npy', w2v_input)
print('Data has been saved to '+data_root+'netsurfp/embedding/train_full_700_input_word2vec_'+str(nb_try)+'.npy')
time_end = time.time()-start_time
m, s = divmod(time_end, 60)
print("The program needed {:.0f}min {:.0f}s to embed training data.".format(m, s))
w2v_input = embed_data(get_netsurf_data('cb513_full')[0], w2v_dict)
np.save(data_root+'netsurfp/embedding/cb513_full_700_input_word2vec_'+str(nb_try)+'.npy', w2v_input)
print('Data has been saved to '+data_root+'netsurfp/embedding/cb513_full_700_input_word2vec_'+str(nb_try)+'.npy')
w2v_input = embed_data(get_netsurf_data('casp12_full')[0], w2v_dict)
np.save(data_root+'netsurfp/embedding/casp12_full_700_input_word2vec_'+str(nb_try)+'.npy', w2v_input)
print('Data has been saved to '+data_root+'netsurfp/embedding/casp12_full_700_input_word2vec_'+str(nb_try)+'.npy')
w2v_input = embed_data(get_netsurf_data('ts115_full')[0], w2v_dict)
np.save(data_root+'netsurfp/embedding/ts115_full_700_input_word2vec_'+str(nb_try)+'.npy', w2v_input)
print('Data has been saved to '+data_root+'netsurfp/embedding/ts115_full_700_input_word2vec_'+str(nb_try)+'.npy')
|
import re
import socket
import requests
from BaseSearch import BaseSearch
from BeautifulSoup import BeautifulSoup, BeautifulStoneSoup
socket.setdefaulttimeout(15)
def getLink(url):
f = None
try:
r = requests.get(url,headers={'Accept-encoding':'gzip'})
f = r.text
soup = BeautifulSoup(f)
# Look for a Cloudfare redirect ?
formNode = soup.findAll('form', {'id' : 'challenge-form'})[0]
if formNode is not None:
urlpath = formNode['action']
params = ''
first = True
for child in soup.findAll('input', {'type' : 'hidden'}):
iname = child['name']
ivalue = None
try:
ivalue = child['value']
except:
pass
if ivalue is None:
ivalue = "wtf"
if not first:
params = params + '&'
params = params + iname + '=' + ivalue
first = False
newUrl = url + urlpath + '?' + params
print 'redirect to: %s' % newUrl
r = requests.get(newUrl,headers={'Accept-encoding':'gzip'})
f = r.text
except:
pass
return f
class Search(BaseSearch):
def __init__(self):
self.search_uris = ['https://limetorrents.cc/search/all/'
]
def search(self, terms, settings={}):
torrents = []
f = None
for url in self.search_uris:
final_url = url + terms.replace(' ','%20') + '/seeds/1/'
print 'search URL: %s' % final_url
f = getLink(final_url)
if f is not None:
break;
if not f:
raise Exception('Out of proxies')
soup = BeautifulSoup(f)
links = []
for details in soup.findAll('div', {'class': 'tt-name'}):
sub = details.findAll('a');
for a in sub:
if a['href'].find('.torrent?') == -1:
par = details.parent.parent
seedNode = par.find('td', {'class':'tdseed'})
leechNode = par.find('td', {'class':'tdleech'})
if seedNode is None or leechNode is None:
break;
name = a.text
seeds = int(seedNode.text.replace(',',''))
leechers = int(leechNode.text.replace(',',''))
trusted = False
if par.find('img', {'title':'Verified torrent'}) is not None:
trusted = True
turl = a['href']
# Follow the new link
baseUrl = final_url.split('/')[0:3]
turl = '/'.join(baseUrl) + turl
f = getLink(turl)
if not f:
raise Exception('Invalid link')
newSoup = BeautifulSoup(f)
for mag in newSoup.findAll('a'):
if mag['href'].startswith('magnet:?'):
url = mag['href']
# print "name : %s, seeds: %d, trusted: %s" % (name,seeds,trusted)
if trusted or 'trusted_uploaders' not in settings.keys() or str(settings['trusted_uploaders']).lower() != 'true':
torrents.append({
'url': url,
'name': name,
'seeds': seeds,
'leechers': leechers,
})
sorted_torrents = sorted(torrents,key = lambda k: k['seeds'], reverse=True)
return sorted_torrents
if __name__ == '__main__':
s = Search()
results = s.search('deadliest catch')
print results
|
# Author: He Ye (Alex)
# Date: 2019-11-13
import cv2
#import copy
from threading import Thread
from queue import Queue
#from Queue import Queue
VDEV = "/dev/video0"
def showVideoInfo(cap):
try:
fps = cap.get(cv2.CAP_PROP_FPS)
fourcc = cap.get(cv2.CAP_PROP_FOURCC)
#count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
ret, firstframe = cap.read()
if ret:
print("FPS: %.2f" % fps)
print("FOURCC: %s" % fourcc)
#print("COUNT: %.2f" % count)
print("WIDTH: %d" % size[0])
print("HEIGHT: %d" % size[1])
return cap, fps, size, firstframe
else:
print("Video can not read!")
except:
"Error in showVideoInfo"
def setVideoInfo(cap, fps, width, height):
fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
#fourcc = cv2.CV_FOURCC('M','J','P','G')
print("set fourcc, MJPG: 1196444237")
print(fourcc)
cap.set(cv2.CAP_PROP_FOURCC, fourcc)
cap.set(cv2.CAP_PROP_FPS, fps)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
def putVideoFrame(cap, qf):
while (True):
ret, frame = cap.read()
#cv2.imshow("preview", frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rotate_frame = cv2.rotate(gray, cv2.ROTATE_90_CLOCKWISE)
cv2.imshow("rotate", rotate_frame)
qf.put(rotate_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
qf.put(None)
break
print("Quit putVideoFrame")
#print(" Producer: Lost Frame Queue Size: %d" %(qf.qsize()))
def initCamera(cap, fps):
showVideoInfo(cap)
setVideoInfo(cap, fps, 640, 480)
showVideoInfo(cap)
class ImgCap(Thread):
def __init__(self, cap, queue_list, signal):
Thread.__init__(self)
self.cap = cap
self.qlist = queue_list
self.signal = signal
self.frame_count = 0
def run(self):
while (True):
for queue in self.qlist:
ret, frame = self.cap.read()
'''
Note:
Only one cv2 windows can be enabled for multi-thread
'''
cv2.imshow("Original", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
self.signal = "OFF"
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rotate_frame = cv2.rotate(gray, cv2.ROTATE_90_CLOCKWISE)
queue.put(rotate_frame)
self.frame_count += 1
if self.signal == "OFF":
for queue in self.qlist:
queue.put(None)
break
print("Quit ImgCap")
def terminate(self):
print("ImgCap OFF")
self.signal = "OFF"
def count_frame(self):
return self.frame_count
|
import MySQLdb
from model.endereco import Endereco
class EnderecoDao:
conexao = MySQLdb.connect(host='localhost', database='aulabd', user='root', passwd='')
cursor = conexao.cursor()
def listar_todos(self):
comando_sql_select = "SELECT * FROM endereco"
self.cursor.execute(comando_sql_select)
resultado = self.cursor.fetchall()
lista_enderecos_classe = self.converter_tabela_classe(resultado)
return lista_enderecos_classe
def buscar_por_id(self, id):
comando_sql_select = f"SELECT * FROM endereco WHERE ID= {id}"
self.cursor.execute(comando_sql_select)
resultado = self.cursor.fetchone()
return resultado
def converter_tabela_classe(self, lista_tuplas):
lista_pessoas = []
for e in lista_tuplas:
e1 = Endereco()
e1.id = e[0]
e1.logradouro = e[1]
e1.numero= e[2]
e1.complemento = e[3]
e1.bairro = e[4]
e1.cidade = e[5]
lista_enderecos.append(p1)
return lista_enderecos
def salvar(self,endereco):
comando=f"INSERT INTO endereco (LOGRADOURO,NUMERO,COMPLEMENTO,BAIRRO,CIDADE) VALUES ('{endereco.logradouro}','{endereco.numero}','{endereco.complemento}','{endereco.bairro}','{endereco.cidade}')"
self.cursor.execute(comando)
self.conexao.commit()
def alterar(self,pessoa):
comando=f"UPDATE endereco SET LOGRADOURO='{endereco.logradouro}',NUMERO={endereco.numero},COMPLEMENTO='{endereco.complemento}',BAIRRO='{endereco.bairro}',CIDADE='{endereco.cidade}'"
self.cursor.execute(comando)
self.conexao.commit()
def deletar(self,endereco):
comando=f'DELETE FROM endereco WHERE ID={id}'
self.cursor.execute(comando)
self.conexao.commit() |
class HeaderMismatchException(Exception): pass
class ChunkFailedToExist(Exception): pass
class EverythingWentFine(Exception): pass
class ProcessingOverlapError(Exception): pass
class BadTimecodeError(Exception): pass
class BadFileNameError(Exception): pass
|
#- Python 3 source code
#- compute-csc108-max-nodes.py ~~
#
# This program is for verifying claims made in the PMBS 2018 submission.
# There are prettier ways to do it, but I'm in a hurry.
#
# ~~ (c) SRW, 28 Aug 2018
# ~~ last updated 05 Dec 2018
import os
import sqlite3
###
def analyze(connection):
cursor = connection.cursor()
query = """
SELECT SampleID,
strftime("%d-%m-%Y %H:%M", SampleTime, "unixepoch") AS time,
max(nodes) AS n
FROM (
SELECT *,
CASE
WHEN ReqNodes IS NULL THEN
ReqProcs / 16
ELSE
ReqNodes
END nodes
FROM active
WHERE
Account = "CSC108"
AND User = "doleynik"
AND JobName LIKE "SAGA-Python-PBSJobScript.%"
)
;
"""
id = None
nodes = None
time = None
for row in cursor.execute(query):
id = row["SampleID"]
nodes = row["n"]
time = row["time"]
print("SampleID: %s (%s)" % (id, time))
print("# of nodes: %s" % (nodes))
query = """
SELECT count(DISTINCT JobID) AS n
FROM active
WHERE
SampleID = "{id}"
AND Account = "CSC108"
AND User = "doleynik"
AND JobName LIKE "SAGA-Python-PBSJobScript.%"
;
""".format(id = id)
for row in cursor.execute(query):
print("# of jobs: %s" % (row["n"]))
###
def main():
# Store current working directory.
cwd = os.getcwd()
# Find the data directory, where this script is running remotely at OLCF and
# locally on a personal laptop, for example.
if os.path.isdir("/lustre/atlas/proj-shared/csc108/data/moab/"):
data_dir = "/lustre/atlas/proj-shared/csc108/data/moab/"
elif os.path.isdir(os.path.join(cwd, "moab")):
data_dir = os.path.join(cwd, "moab")
else:
raise Exception("Data directory not found.")
# Create string to represent path to database file.
dbfilename = os.path.join(data_dir, "moab-data.sqlite")
# Open connection to the database (file).
connection = sqlite3.connect(dbfilename)
# Enable users to access columns by name instead of by index.
connection.row_factory = sqlite3.Row
# Ensure read-only access to the database
connection.execute("PRAGMA query_only = true;")
# Run custom analyis code.
analyze(connection)
# Commit any changes and close the connection to the database.
connection.commit()
connection.close()
###
if __name__ == "__main__":
main()
#- vim:set syntax=python:
|
"""Functions for interacting with the IAM service."""
import json
import boto3
from boto3_assistant import account
IAM = boto3.client('iam')
def create_role(name, description):
"""
Create a role that is assumable by the callers account.
The role will be created with no policies.
Parameters:
name (str): The name of the role to create.
description (str): The description for the role to create.
Returns:
role: Information about the role that is created.
"""
basic_policy = {
'Statement': [
{
'Principal': {
'AWS': account.get_account_id()
},
'Effect': 'Allow',
'Action': ['sts:AssumeRole']
}
]
}
response = IAM.create_role(
Path='/',
RoleName=name,
AssumeRolePolicyDocument=json.dumps(basic_policy),
Description=description
)
return response['Role']
def add_policy(role_name, policy_name):
"""
Attach an IAM Policy to a Role.
Parameters:
role_name (str): The name of the role to attach the policy to.
policy_name (str): The name of the policy to attach to the role.
"""
IAM.attach_role_policy(
RoleName=role_name,
PolicyArn='arn:aws:iam::aws:policy/{}'.format(policy_name)
)
def list_roles(prefix):
"""
List all the roles for the given path prefix.
Parameters:
prefix (str): The prefix to filter by.
"""
response = IAM.list_roles(
PathPrefix=prefix
)
return response['Roles']
def put_role_policy(role_name, policy_name, policy):
"""
Add or update an inline role policy document.
Parameters:
role_name (str): The name of the role to update.
policy_name (str): The name of the policy to update.
policy (obj): The policy document to update.
"""
IAM.put_role_policy(
RoleName=role_name,
PolicyName=policy_name,
PolicyDocument=json.dumps(policy)
)
|
"""
Test cases targeting streamingphish/database.py.
"""
import pytest
@pytest.fixture(scope="module")
def artifacts():
return {'classifier_name': 'classifier_v1', 'key_one': 'value_one'}
### Helper function ###
def spoofed_find_function():
return [{'_id': 'something', 'classifier_v1': 'blob'}]
def test_save(db, artifacts):
"""Verify transformation of artifacts and save to db."""
db.save_classifier(artifacts)
db._classifiers.insert_one.assert_called_with({'classifier_v1': {'key_one': 'value_one'}})
def test_delete(db):
db.delete_classifier('classifier_v1')
assert db._classifiers.delete_many.call_count == 1
def test_available_classifiers(db, monkeypatch):
"""Spoofing the return of the find method, verifying classifier name is returned."""
monkeypatch.setattr(db._classifiers, 'find', spoofed_find_function)
result = db.available_classifiers()
assert result == ['classifier_v1']
@pytest.mark.parametrize("evaluate, result",
[
('classifier_v1', True),
('nopenopenope', False)
])
def test_verify_classifier(db, monkeypatch, evaluate, result):
"""Spoofing the return of the find method, verifying classifier existence is correct."""
monkeypatch.setattr(db._classifiers, 'find', spoofed_find_function)
returned = db.verify_classifier_existence(evaluate)
assert returned == result
def test_fetch_classifier(db, monkeypatch):
def spoofed_find_one(name):
return {'_id': 'who_cares', 'classifier_v5': 'blah'}
monkeypatch.setattr(db._classifiers, 'find_one', spoofed_find_one)
result = db.fetch_classifier('classifier_v5')
assert 'classifier_v5' in result.keys()
|
import os
def pytest_configure():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_test_settings')
|
from __future__ import absolute_import
from __future__ import unicode_literals
import warnings
import hashlib
from quickcache.django_quickcache import get_django_quickcache
from quickcache import ForceSkipCache
from celery._state import get_current_task
from corehq.util.global_request import get_request
from corehq.util.soft_assert import soft_assert
quickcache_soft_assert = soft_assert(
notify_admins=True,
fail_if_debug=False,
skip_frames=5,
)
def get_session_key():
"""
returns a 7 character string that varies with the current "session" (task or request)
"""
session_id = None
current_task = get_current_task()
if current_task:
# at least in tests, current_task may have the same id between different tasks!
session_id = current_task.request.id
if not session_id:
request = get_request()
if request:
session_id = str(id(get_request()))
else:
session_id = None
if session_id:
session_id = session_id.encode('utf-8')
# hash it so that similar numbers end up very different (esp. because we're truncating)
return hashlib.md5(session_id).hexdigest()[:7]
else:
# quickcache catches this and skips the cache
# this happens during tests (outside a fake task/request context)
# and during management commands
raise ForceSkipCache("Not part of a session")
quickcache = get_django_quickcache(timeout=5 * 60, memoize_timeout=10,
assert_function=quickcache_soft_assert,
session_function=get_session_key)
__all__ = ['quickcache']
|
#!/usr/bin/env python
COPY_GOOGLE_DOC_KEY = '1exAtJogmunF-R70TvaWDUeSI_l3hBNGAQqIlfiz7uP8'
|
# reformat.py
# Trevor Pottinger
# Tue Sep 22 00:22:49 PDT 2020
import sys
def main() -> None:
for line in sys.stdin:
start_str, end_str, len_str, word = line.rstrip('\n').split('\t')
print('{start:0.3f}\t{end:0.3f}\t{duration:0.3f}\t{content}'.format(
start=float(start_str),
end=float(end_str),
duration=float(len_str),
content=word.lower()
))
return
if __name__ == '__main__':
main()
|
# Generated by Django 3.0.5 on 2020-10-07 04:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('moex', '0029_auto_20200816_2114'),
]
operations = [
migrations.AlterField(
model_name='security',
name='faceunit',
field=models.CharField(blank=True, choices=[('SUR', 'РУБ'), ('USD', 'USD'), ('EUR', 'EUR'), ('GBP', 'GBP'), ('CNY', 'CNY')], default='SUR', max_length=20, null=True),
),
]
|
from pandas import DataFrame
#---------------------------------------------------------------------------------------------------
#
# Order class
#---------------------------------------------------------------------------------------------------
class Order:
def __init__(self, exec_time, type, amount, uprice) -> None:
self._exec_time = exec_time
self._type = type
self._amount = amount
self._uprice = uprice
self._volume = 0
self._value = 0
if self._type == 'sell':
self._volume = amount
self._value = amount * uprice
else:
self._value = amount
self._volume = amount / uprice
@property
def exec_time(self) :
return self._exec_time
@property
def is_buy(self) :
return True if self._type == 'buy' else False
@property
def volume(self) :
return self._volume
@property
def uprice(self) :
return self._uprice
@property
def value(self) :
return self._value
def as_array(self):
'''
returns the order as an array [is_buy, volume, price, value]
buy is true for a buy order, otherwise False
'''
return [self.exec_time, self.is_buy, self.volume, self.uprice, self.value]
#---------------------------------------------------------------------------------------------------
#
# Wallet class
#---------------------------------------------------------------------------------------------------
class Wallet:
def __init__(self, base_assets, quote_assets) -> None:
self._base_assets = base_assets
self._quote_assets = quote_assets
self._orders = []
#---------------------------------------------------------------------------------
def sell(self, price, exec_time):
if self._base_assets <= 0:
print('Could not sell no base assets available')
return
order = Order(exec_time, 'sell', self._base_assets, price )
self._base_assets = 0.0
self._quote_assets = order._value
self._orders.append(order)
#---------------------------------------------------------------------------------
def buy(self, price, exec_time):
if self._quote_assets <= 0:
print('Could not buy no quote assets available')
return
order = Order(exec_time, 'buy', self._quote_assets, price )
self._base_assets = order._volume
self._quote_assets = 0.0
self._orders.append(order)
def orders_as_dataframe(self):
orders = []
for order in self._orders:
orders.append(order.as_array())
df = DataFrame(orders)
df.columns = ['time', 'buy', 'volume', 'price', 'value']
return df
|
#!/usr/bin/python
import os
import sys
import fnmatch
import glob
import shutil
import subprocess
def glob_recursive(base_path, pattern):
matches = []
for root, dirnames, filenames in os.walk(base_path):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
src_dir = 'browser'
build_dir = 'browser/build'
def run(exe):
p = os.popen(exe)
return p.read()
def compress(in_name, out_name):
# os.system('yui-compressor --type js --preserve-semi -o ' + out_name + ' ' + in_name)
os.system('node_modules/uglify-js2/bin/uglifyjs2 -c dead-code=false,unused=false -o ' + out_name + ' ' + in_name)
def compress_css(in_name, out_name):
cmd = 'java -jar node_modules/yui-compressor/lib/vendor/yuicompressor.jar --type css -o ' + out_name + ' ' + in_name
# print cmd
os.system(cmd)
# Make sure the output directory exists
if not os.path.exists(build_dir):
os.system('mkdir ' + build_dir)
# Debugger statements will make uglify barf, Make sure we haven't left any behind by accident.
print 'Checking for residual debugger statements...'
if os.system('grep -n "debugger;" ' + src_dir + '/plugins/*.js') == 0:
sys.exit(0)
if os.system('grep -n "debugger;" ' + src_dir + '/scripts/*.js') == 0:
sys.exit(0)
print 'Rebuilding...'
shutil.rmtree(build_dir)
os.mkdir(build_dir)
print 'Compressing scripts...'
scripts_path = src_dir + '/scripts/'
os.mkdir(build_dir + '/scripts/')
scripts = map(lambda x: x[len(scripts_path):], glob.glob(scripts_path + '*.js'))
for script in scripts:
print '\t' + script
compress(scripts_path + script, build_dir + '/scripts/' + script)
print 'Compressing presets...'
presets_path = src_dir + '/presets/'
os.mkdir(build_dir + '/presets/')
presets = map(lambda x: x[len(presets_path):], glob.glob(presets_path + '*.json'))
for preset in presets:
shutil.copy(presets_path + preset, build_dir + '/presets/' + preset)
plugins_path = src_dir + '/plugins/'
os.mkdir(build_dir + '/plugins/')
plugins = map(lambda x: x[len(plugins_path):], glob.glob(plugins_path + '*.js'))
print 'Concatenating plugins...'
plugin_data = []
for plugin in plugins:
'\tMunching ' + plugin
for line in open(plugins_path + plugin, 'r'):
plugin_data.append(line)
plugs_concat_filename = build_dir + '/plugins/all.plugins'
plugs_concat_file = open(plugs_concat_filename, 'w')
plugs_concat_file.write(''.join(plugin_data))
plugs_concat_file.close()
print '\tMinifying plugin package.'
compress(plugs_concat_filename, plugs_concat_filename + '.js')
os.remove(plugs_concat_filename)
print '\tCopying plugin catalogue.'
os.system('cp ' + plugins_path + 'plugins.json ' + build_dir + '/plugins')
print '\tProcessing plugin dependencies'
print '\t\tOSC Proxy'
oscproxy_file = '/plugins/osc/osc-proxy.js'
os.system('mkdir ' + os.path.dirname(build_dir + oscproxy_file))
compress(src_dir + oscproxy_file, build_dir + oscproxy_file)
print '\t\tACE Editor + plugins'
os.system('mkdir ' + build_dir + '/plugins/ace')
ace_files = glob.glob(src_dir + '/plugins/ace/*.js')
for ace_file in ace_files:
print('\t\t+ ' + ace_file)
compress(ace_file, build_dir + '/plugins/ace/' + os.path.basename(ace_file))
print '\t\tConstructive solid geometry'
csg_file = '/plugins/csg/csg.js'
os.system('mkdir ' + os.path.dirname(build_dir + csg_file))
compress(src_dir + csg_file, build_dir + csg_file)
print '\t\tWebSocket channel'
wschannel_file = '/plugins/wschannel/wschannel.js'
os.system('mkdir ' + os.path.dirname(build_dir + wschannel_file))
compress(src_dir + wschannel_file, build_dir + wschannel_file)
print '\t\tToggle button style'
os.system('mkdir ' + build_dir + '/plugins/toggle-button')
compress_css(plugins_path + 'toggle-button/style.css', build_dir + '/plugins/toggle-button/style.css')
print '\t\tModule player'
module_player_file = '/plugins/module_player/pt.js'
os.system('mkdir ' + os.path.dirname(build_dir + module_player_file))
compress(src_dir + module_player_file, build_dir + module_player_file)
print 'Compressing stylesheets...'
css_path = src_dir + '/style/'
os.mkdir(build_dir + '/style')
cssfiles = map(lambda x: x[len(css_path):], glob.glob(css_path + '*.css'))
for cssfile in cssfiles:
print '\tCompressing ' + cssfile
compress_css(css_path + cssfile, build_dir + '/style/' + os.path.basename(cssfile))
print 'Copying TrueType fonts.'
os.system('cp ' + css_path + '*.ttf ' + build_dir + '/style')
# Take care of files included directly from node modules.
print 'Copying relevant files from node_modules...'
os.system('mkdir -p ' + build_dir + '/node_modules/jquery/dist')
os.system('cp node_modules/jquery/dist/jquery.min.js ' + build_dir + '/node_modules/jquery/dist')
os.system('cp node_modules/jquery/dist/jquery.min.map ' + build_dir + '/node_modules/jquery/dist')
os.system('mkdir -p ' + build_dir + '/node_modules/handlebars/dist')
os.system('cp node_modules/handlebars/dist/handlebars.min.js ' + build_dir + '/node_modules/handlebars/dist')
print 'Compressing plugin icons to CSS sprite sheet...'
icons_path = build_dir + '/style/icons'
os.system('mkdir ' + icons_path)
os.system('tools/compress-plugin-icons.py')
# Copy the result back to the debug version.
shutil.copy(icons_path + '/style.css', src_dir + '/style/icons/style.css')
shutil.copy(icons_path + '/icons.png', src_dir + '/style/icons/icons.png')
print 'Copying remaining required files...'
print '\tCopying images folder.'
shutil.copytree(src_dir + '/images/', build_dir + '/images/')
print '\tCopying data folder.'
shutil.copytree(src_dir + '/data/', build_dir + '/data/')
print '\tCopying vendor folder.'
shutil.copytree(src_dir + '/vendor/', build_dir + '/vendor/')
print '\tCopying help folder.'
shutil.copytree(src_dir + '/help/', build_dir + '/help/')
print '\tCopying index.html.'
os.system('cp ' + src_dir + '/*.html ' + build_dir)
print '\tCopying scene.json.'
os.system('cp ' + src_dir + '/scene.json ' + build_dir)
print '\tCopying favicon.'
os.system('cp ' + src_dir + '/favicon.ico ' + build_dir)
|
from chainer import function
from chainer.utils import type_check
class Swapaxes(function.Function):
"""Swap two axes of an array."""
def __init__(self, axis1, axis2):
self.axis1 = axis1
self.axis2 = axis2
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1,)
@property
def label(self):
return 'Swapaxes'
def forward(self, inputs):
self.retain_inputs(())
x = inputs[0]
y = x.swapaxes(self.axis1, self.axis2)
return y,
def backward(self, inputs, grad_outputs):
gy = grad_outputs[0]
gx = gy.swapaxes(self.axis1, self.axis2)
return gx,
def swapaxes(x, axis1, axis2):
"""Swap two axes of a variable.
Args:
x (~chainer.Variable): Input variable.
axis1 (int): The first axis to swap.
axis2 (int): The second axis to swap.
Returns:
~chainer.Variable: Variable whose axes are swapped.
"""
return Swapaxes(axis1, axis2)(x)
|
# -*- coding: utf-8 -*-
from numpy import*
import numpy as np
import matplotlib.pyplot as plt
def loadDataSet(fileName):
dataMat=[];labelMat=[]
fr=open(fileName)
for line in fr.readlines():
lineArr=line.strip().split()
dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
labelMat.append(int(float(lineArr[2])))
return dataMat,labelMat
def sigmoid(inX):
return 1.0/(1+exp(-inX))
def plotBestFit(dataArr, labelMat,weights,recognitionLate,name):
xMat = np.mat(dataArr)
plt.figure()
xcord1 = []
ycord1 = []
xcord2 = []
ycord2 = []
for i in range(shape(xMat)[0]):
if labelMat[i] == 1:
xcord1.append(xMat[i, 1])
ycord1.append(xMat[i, 2])
else:
xcord2.append(xMat[i, 1])
ycord2.append(xMat[i, 2])
plt.scatter(xcord1, ycord1, s=10, c='blue', marker='+', label='x1')
plt.scatter(xcord2, ycord2, s=10, c='red', marker='^', label='x2')
min_x = xMat[:, 1].min() - 0.3
max_x = xMat[:, 1].max() + 0.3
x = linspace(min_x, max_x, 1000)
yHat = (-weights[0] - weights[1] * x) / weights[2]
# plt.rcParams['font.sans-serif'] = 'SimHei' # 用来正常显示中文标签
# plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.plot(mat(x).T, yHat.T, 'r', label='回归线')
plt.xlabel('$\mathrm{X}_{1}$', fontsize=12)
plt.ylabel('$\mathrm{X}_{2}$', fontsize=12)
plt.legend(loc='best', prop={'family': 'SimHei', 'size': 12})
plt.title('线性逻辑回归分类(' + name + ':' + str(shape(xMat)[0]) + ',识别率:' + str(recognitionLate) + '%)',
fontproperties='SimHei', fontsize=15)
plt.grid(True, linestyle="--", color="k")
plt.show()
def gradAscent(dataMatIn,classLabels):
dataMatrix=mat(dataMatIn)
labelMat=mat(classLabels).transpose()
m,n=shape(dataMatrix)
alpha=0.001
maxCycles=500
weights=ones((n,1))
for k in range(maxCycles):
h=sigmoid(dataMatrix*weights)
error=(labelMat-h)
weights=weights+alpha*dataMatrix.transpose()*error
return weights
def CaculateMse(xArr,yArr,weights):
dataMatrix=mat(xArr)
labelMat=mat(yArr).transpose()
yHat = dataMatrix * weights
ErroMat = np.zeros((len(set(yArr)), len(set(yArr))))
for i in range(shape(dataMatrix)[0]):
if ((labelMat[i] == 1) and (yHat[i] >= 0)):
ErroMat[0, 0] = ErroMat[0, 0] + 1
elif (labelMat[i] == 1 and yHat[i] <= 0):
ErroMat[0, 1] = ErroMat[0, 1] + 1
elif (labelMat[i] == -1 and yHat[i] >= 0):
ErroMat[1, 0] = ErroMat[1, 0] + 1
elif (labelMat[i] == -1 and yHat[i] <= 0):
ErroMat[1, 1] = ErroMat[1, 1] + 1
recognitionLate = ((ErroMat[0, 0] + ErroMat[1, 1]) / (
ErroMat[0, 0] + ErroMat[0, 1] + ErroMat[1, 0] + ErroMat[1, 1])) * 100
return recognitionLate
def main():
TrainDataMat, TrainLabelMat = loadDataSet('train_data.text')
print('TrainDataMat:\n', TrainDataMat)
print('TrainLabelMat:\n', TrainLabelMat)
weights = gradAscent(TrainDataMat, TrainLabelMat)
print('weights:\n', weights.tolist())
recognitionLate = CaculateMse(TrainDataMat, TrainLabelMat, weights)
plotBestFit(TrainDataMat, TrainLabelMat,weights,recognitionLate,"训练样本")
TestDataMat, TestLabelMat = loadDataSet('test_data.text')
recognitionLate = CaculateMse(TestDataMat, TestLabelMat, weights)
plotBestFit(TestDataMat, TestLabelMat, weights, recognitionLate, "测试样本")
if __name__ == '__main__':
main()
|
#
# Copyright (C) 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import unittest
import os
from dcipipeline.main import (
process_args,
overload_dicts,
get_prev_stages,
pre_process_stage,
post_process_stage,
upload_junit_files_from_dir,
)
class TestMain(unittest.TestCase):
def test_process_args_empty(self):
args = ["dci-pipeline"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [])
def test_process_args_single(self):
args = ["dci-pipeline", "stage:key=value"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": "value"}}])
def test_process_args_list(self):
args = ["dci-pipeline", "stage:key=value=toto,value2"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": ["value=toto", "value2"]}}])
def test_process_args_dict(self):
args = ["dci-pipeline", "stage:key=subkey:value", "stage:key=subkey2:value2"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(
result,
[
{"stage": {"key": {"subkey": "value"}}},
{"stage": {"key": {"subkey2": "value2"}}},
],
)
def test_process_args_dict_list(self):
args = ["dci-pipeline", "stage:key=subkey:value,value2"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": {"subkey": ["value", "value2"]}}}])
def test_process_args_list1(self):
args = ["dci-pipeline", "stage:key=value=toto,"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": ["value=toto"]}}])
def test_process_args_only_files(self):
args = ["dci-pipeline", "file1", "file2"]
result, args = process_args(args)
self.assertEqual(args, ["file1", "file2"])
self.assertEqual(result, [])
def test_process_args_http(self):
args = ["dci-pipeline", "stage:key=http://lwn.net/"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": "http://lwn.net/"}}])
def test_process_args_https(self):
args = ["dci-pipeline", "stage:key=https://lwn.net/"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": "https://lwn.net/"}}])
def test_overload_dicts_add(self):
stage = {"first": "value"}
overload = {"key": ["value=toto", "value2"]}
self.assertEqual(
overload_dicts(overload, stage),
{"first": "value", "key": ["value=toto", "value2"]},
)
def test_overload_dicts_replace_list(self):
overload = {"components": ["ocp=12", "ose-tests"]}
stage = {"components": ["ocp", "cnf-tests"], "topic": "OCP-4.4"}
self.assertEqual(
overload_dicts(overload, stage),
{"components": ["ocp=12", "cnf-tests", "ose-tests"], "topic": "OCP-4.4"},
)
def test_overload_dicts_add_dict(self):
overload = {"ansible_extravars": {"dci_comment": "universal answer"}}
stage = {"ansible_extravars": {"answer": 42}}
self.assertEqual(
overload_dicts(overload, stage),
{"ansible_extravars": {"answer": 42, "dci_comment": "universal answer"}},
)
def test_overload_dicts_add_list_in_dict(self):
overload = {"ansible_extravars": {"dci_comment": "universal answer"}}
stage = {"ansible_extravars": {"answer": 42}}
self.assertEqual(
overload_dicts(overload, stage),
{"ansible_extravars": {"answer": 42, "dci_comment": "universal answer"}},
)
def test_prev_stages(self):
stage1 = {"name": "1", "type": "ocp"}
stage2 = {
"name": "2",
"type": "ocp-upgrade",
"prev_stages": ["ocp-upgrade", "ocp"],
}
stage3 = {
"name": "3",
"type": "ocp-upgrade2",
"prev_stages": ["ocp-upgrade", "ocp"],
}
stage4 = {"name": "4", "type": "cnf2"}
pipeline = [stage1, stage2, stage3, stage4]
prev_stages = get_prev_stages(stage3, pipeline)
self.assertEqual(prev_stages, [stage2, stage1])
@mock.patch("dcipipeline.main.tempfile.mkdtemp")
def test_pre_process_stage(self, m):
stage = {"ansible_envvars": {"envvar": "/@tmpdir"}}
m.return_value = "/tmp/tmppath"
stage_metas, stage = pre_process_stage(stage)
self.assertEqual(stage_metas["tmpdirs"][0]["path"], "/tmp/tmppath")
@mock.patch("dcipipeline.main.shutil.rmtree")
@mock.patch("dcipipeline.main.upload_junit_files_from_dir")
def test_post_process_stage(self, m_upload_junit, m_rmtree):
metas = {
"tmpdirs": [{"name": "JUNIT_OUTPUT_DIR", "path": "/tmp/junit_tmppath"}]
}
post_process_stage("context", "stage", metas)
m_upload_junit.assert_called_with("context", "stage", "/tmp/junit_tmppath")
m_rmtree.assert_called_with("/tmp/junit_tmppath")
m_upload_junit.reset_mock()
m_rmtree.reset_mock()
metas = {"tmpdirs": [{"name": "envvar1", "path": "/tmp/tmppath"}]}
post_process_stage("context", "stage", metas)
self.assertTrue(not m_upload_junit.called)
m_rmtree.assert_called_with("/tmp/tmppath")
@mock.patch("dcipipeline.main.dci_file.create")
def test_upload_junit_files_from_dir(self, m):
try:
os.makedirs("/tmp/junit-tmppath")
except Exception:
pass
open("/tmp/junit-tmppath/junit-tests.xml", "a+").close()
stage = {"job_info": {"job": {"id": "1"}}}
upload_junit_files_from_dir("context", stage, "/tmp/junit-tmppath")
m.assert_called_with(
"context",
"junit-tests",
file_path="/tmp/junit-tmppath/junit-tests.xml",
mime="application/junit",
job_id="1",
)
if __name__ == "__main__":
unittest.main()
# test_main.py ends here
|
from neat import nn
def eval_mono_image(genome, width, height):
net = nn.create_feed_forward_phenotype(genome)
image = []
for r in range(height):
y = -2.0 + 4.0 * r / (height - 1)
row = []
for c in range(width):
x = -2.0 + 4.0 * c / (width - 1)
output = net.serial_activate([x, y])
gray = 255 if output[0] > 0.0 else 0
row.append(gray)
image.append(row)
return image
def eval_gray_image(genome, width, height):
net = nn.create_feed_forward_phenotype(genome)
image = []
for r in range(height):
y = -1.0 + 2.0 * r / (height - 1)
row = []
for c in range(width):
x = -1.0 + 2.0 * c / (width - 1)
output = net.serial_activate([x, y])
gray = int(round((output[0] + 1.0) * 255 / 2.0))
gray = max(0, min(255, gray))
row.append(gray)
image.append(row)
return image
def eval_color_image(genome, width, height):
net = nn.create_feed_forward_phenotype(genome)
image = []
for r in range(height):
y = -1.0 + 2.0 * r / (height - 1)
row = []
for c in range(width):
x = -1.0 + 2.0 * c / (width - 1)
output = net.serial_activate([x, y])
red = int(round((output[0] + 1.0) * 255 / 2.0))
green = int(round((output[1] + 1.0) * 255 / 2.0))
blue = int(round((output[2] + 1.0) * 255 / 2.0))
red = max(0, min(255, red))
green = max(0, min(255, green))
blue = max(0, min(255, blue))
row.append((red, green, blue))
image.append(row)
return image |
#!/usr/bin/env python3
from pathlib import Path
import re
#from itertools import groupby
from io import StringIO
def updatePathContents(path, newContents):
originalContents = ''
try:
with path.open('r') as tio:
originalContents = tio.read()
except FileNotFoundError:
pass
if originalContents != newContents:
with path.open('w') as tio:
tio.write(newContents)
class Signature(object):
def __init__(self, name, source, linenum):
self.source = source
self.name = name
self.linenum = linenum
@property
def prototype(self):
return 'extern void {}(void);'.format(self.name)
@property
def runCall(self):
return '\tUnityDefaultTestRun({}, "{}", {});\n'.format(self.name, self.name, self.linenum)
class TestRun(object):
def __init__(self, path):
self.path = path
@property
def cSource(self):
return self.path.parent / self.path.stem
@property
def testRunFunctionName(self):
return self.cSource.stem
def signatures(self):
pattern = re.compile('void\s+(test[A-Z]+\w*)\s*\(\s*void\s*\)')
for index, line in enumerate(self.cSource.open()):
match = pattern.search(line)
if match:
yield Signature(match.group(1), self.cSource, index)
def computeTestRunSource(self):
buffer = StringIO()
buffer.write('void {}(void) {{\n'.format(self.testRunFunctionName))
buffer.write('\tUnity.TestFile = "{}";\n'.format(self.cSource))
for signature in self.signatures():
buffer.write(signature.runCall);
buffer.write('}\n');
return buffer.getvalue()
def update(self):
updatePathContents(self.path, self.computeTestRunSource())
class AllTestsRun(object):
def __init__(self, path):
self.path = path
def update(self):
runFunctions = []
for runFile in self.path.glob('*.run'):
testRun = TestRun(runFile)
testRun.update()
runFunctions.append(testRun.testRunFunctionName)
buffer = StringIO()
buffer.write('#include "unity.h"\n\n')
for each in runFunctions:
buffer.write('void {}(void);\n'.format(each))
buffer.write('\nvoid allTestsRun(void *_) {\n')
buffer.write('\tUnityBegin("");\n')
for each in runFunctions:
buffer.write('\t{}();\n'.format(each))
buffer.write('\tUnityEnd();\n')
buffer.write('}\n');
updatePathContents(self.path / 'allTestsRun.c', buffer.getvalue())
if __name__ == '__main__':
AllTestsRun(Path('.')).update()
|
from flask import Flask, request, Response, jsonify
from sesamutils import sesam_logger, VariablesConfig, Dotdictify
from sesamutils.flask import serve
import sys
import msal
import pem
import json
from cryptography import x509
from cryptography.hazmat.backends import default_backend
import requests
app = Flask(__name__)
required_env_vars = ["client_id", "certificate", "cert_thumbprint", "token_url", "resource"]
optional_env_vars = ["LOG_LEVEL"]
config = VariablesConfig(required_env_vars, optional_env_vars=optional_env_vars)
if not config.validate():
sys.exit(1)
if hasattr(config, "LOG_LEVEL") and config.LOG_LEVEL == "DEBUG":
logger = sesam_logger("sharepoint", app=app)
else:
logger = sesam_logger("sharepoint")
class data_access_layer:
def __init__(self, config):
self.session = None
self.auth_token = None
self.config = config
def get_token(self):
client_credential = {"private_key": config.certificate,"thumbprint": config.cert_thumbprint}
try:
app = msal.ConfidentialClientApplication(client_id=config.client_id, client_credential=client_credential, authority=config.token_url, validate_authority=True, token_cache=None, verify=True, proxies=None, timeout=None, client_claims=None, app_name=None, app_version=None)
result = app.acquire_token_for_client([config.resource])
if not result["access_token"]:
logger.error(f"Access token request failed. Error: {resp.content}")
raise
except Exception as e:
logger.error(f"Failed to aquire access_token. Error: {e}")
raise
self.auth_token = result["access_token"]
logger.debug("token: " + self.auth_token)
def get_entities(self, sites):
if not self.session:
self.session = requests.Session()
self.get_token()
for site in sites:
weburl_key = str("webUrl")
logger.debug(site)
weburl = self.get_value(site, weburl_key)
logger.debug(weburl)
url = weburl + "/_api/web"
logger.debug("url: " + url)
req = requests.Request("GET", url, headers={'accept': 'application/json;odata=nometadata', 'Authorization': 'Bearer ' + self.auth_token})
try:
resp = self.session.send(req.prepare())
except Exception as e:
logger.error(f"Failed to send request. Error: {e}")
raise
if resp.status_code == 401:
logger.warning("Received status 401. Requesting new access token.")
self.get_token()
req = requests.Request("GET", url, headers={'accept': 'application/json;odata=nometadata', 'Authorization': 'Bearer ' + self.auth_token})
try:
resp = self.session.send(req.prepare())
except Exception as e:
logger.error(f"Failed to send request. Error: {e}")
raise
if not resp.ok:
error_text = f"Unexpected response status code: {resp.status_code} with response text {resp.text}"
logger.error(error_text)
continue
res = Dotdictify(resp.json())
id_key = str("_id")
res['_id'] = self.get_value(site, id_key)
logger.debug(res)
yield res
def get_value(self, entity, target_key):
for k, v in entity.items():
if k.split(":")[-1] == target_key:
res = v
else:
pass
return res
DAL = data_access_layer(config)
def stream_json(entities):
first = True
yield '['
for i, row in enumerate(entities):
if not first:
yield ','
else:
first = False
yield json.dumps(row)
yield ']'
@app.route("/<path:path>", methods=["POST"])
def get(path):
sites = request.get_json()
logger.debug(sites)
entities = DAL.get_entities(sites)
logger.debug(type(entities))
logger.debug(entities)
return Response(stream_json(entities), mimetype='application/json')
if __name__ == '__main__':
serve(app) |
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.test import TestCase
from models import Survey
class SurveyTestCase(TestCase):
def test_basic(self):
instance = Survey.objects.get(name='Personal Data')
qs = [e for e in instance.entity.objects.all()]
self.assertEqual(len(qs), 1)
johndoe = qs.pop()
self.assertEqual(johndoe.first_name, 'John')
self.assertEqual(johndoe.last_name, 'Doe')
self.assertEqual(johndoe.age, 31)
def test_remove_add_age(self):
age = Survey._attribute.objects.get(name='Age')
age.delete()
instance = Survey.objects.get(name='Personal Data')
qs = [e for e in instance.entity.objects.all()]
self.assertEqual(len(qs), 1)
johndoe = qs.pop()
self.assertEqual(johndoe.first_name, 'John')
self.assertEqual(johndoe.last_name, 'Doe')
self.assertRaises(AttributeError, getattr, johndoe, 'age')
Survey._attribute.objects.create(
name = 'Age',
slug = 'age',
attr_type = 'Integer',
schema = instance,
)
qs = [e for e in instance.entity.objects.all()]
self.assertEqual(len(qs), 1)
johndoe = qs.pop()
self.assertEqual(johndoe.first_name, 'John')
self.assertEqual(johndoe.last_name, 'Doe')
self.assertEqual(johndoe.age, None)
johndoe.age = 31
johndoe.save()
qs = [e for e in instance.entity.objects.all()]
self.assertEqual(len(qs), 1)
johndoe = qs.pop()
self.assertEqual(johndoe.first_name, 'John')
self.assertEqual(johndoe.last_name, 'Doe')
self.assertEqual(johndoe.age, 31)
def test_remove_add_rename_model(self):
instance = Survey.objects.get(name='Personal Data')
entity = instance.entity
app_label = entity._meta.app_label
object_name = entity._meta.object_name.lower()
ctype = ContentType.objects.get(app_label=app_label, model=object_name)
perms = Permission.objects.filter(content_type=ctype)
self.assertEqual(len(perms), 3)
instance.delete()
self.assertRaises(ObjectDoesNotExist, ContentType.objects.get, app_label=app_label, model=object_name)
perms = Permission.objects.filter(content_type=ctype)
self.assertEqual(len(perms), 0)
instance = Survey.objects.create(name='Personal Data', slug='personal-data')
Survey._attribute.objects.get_or_create(
name = 'First Name',
slug = 'first-name',
attr_type = 'ShortText',
schema = instance,
required = True,
)
Survey._attribute.objects.get_or_create(
name = 'Last Name',
slug = 'last-name',
attr_type = 'ShortText',
schema = instance,
required = True,
)
Survey._attribute.objects.get_or_create(
name = 'Age',
slug = 'age',
attr_type = 'Integer',
schema = instance,
)
Survey._attribute.objects.get_or_create(
name = 'Age',
slug = 'age',
attr_type = 'Integer',
schema = instance,
)
self.assertEqual(len(Survey._attribute.objects.all()), 3)
entity = instance.entity
app_label = entity._meta.app_label
object_name = entity._meta.object_name.lower()
ctype = ContentType.objects.get(app_label=app_label, model=object_name)
perms = Permission.objects.filter(content_type=ctype)
self.assertEqual(len(perms), 3)
johndoe = instance.entity.objects.create(
first_name = 'John',
last_name = 'Doe',
age = 31,
)
self.assertEqual(johndoe.first_name, 'John')
self.assertEqual(johndoe.last_name, 'Doe')
self.assertEqual(johndoe.age, 31)
age = Survey._attribute.objects.get(name='Age')
age.name = 'Eta'
age.slug = 'eta'
age.save()
instance = Survey.objects.get(name='Personal Data')
qs = [e for e in instance.entity.objects.all()]
self.assertEqual(len(qs), 1)
johndoe = qs.pop()
self.assertEqual(johndoe.first_name, 'John')
self.assertEqual(johndoe.last_name, 'Doe')
self.assertRaises(AttributeError, getattr, johndoe, 'age')
self.assertEqual(johndoe.eta, 31)
age.name = 'Age'
age.slug = 'age'
age.save()
self.assertEqual(len(Survey._attribute.objects.all()), 3) |
'''
T-primes
find how many divisors a integers have.
Oouput:
3 divisors return yes else no
3 important facts:
first it must be equally divided integer = x*x
second no other divisors besides 1 and itself
third: it is T-prime as long as it's sqrt is a prime number except 4
'''
import math
# find all prime no larger than 10^6
def find_all_primes(limit=1000000):
primes = [True] * (limit + 1)
primes[0] = False
primes[1] = False
for i in range(2, limit + 1):
if primes[i] == True:
for j in range(i * i, limit + 1, i):
primes[j] = False
return primes
def check_t_prime(number):
jud = False
if number == 4:
jud = True
elif number < 4 or number % 2 == 0:
jud = False
sqrt = int(math.sqrt(number))
if sqrt ** 2 == number and prime_nums[int(math.sqrt(number))] == True:
jud = True
return jud
prime_nums = find_all_primes()
n = int(input())
integers = list(map(int, input().split(' ')))
for integer in integers:
if check_t_prime(integer) is True:
print('YES')
else:
print('NO')
|
"""
Created on Mar 27, 2018
@author: lubo
"""
import pytest
import numpy as np
from dae.utils.regions import Region
@pytest.mark.parametrize(
"region,worst_effect",
[
(Region("1", 878109, 878109), ("missense", "missense")),
(Region("1", 901921, 901921), ("synonymous", "missense")),
(Region("1", 905956, 905956), ("frame-shift", "missense")),
],
)
def test_multi_alt_allele_genotype(variants_vcf, region, worst_effect):
fvars = variants_vcf("backends/effects_trio")
vs = list(fvars.query_variants(regions=[region]))
assert len(vs) == 1
for v in vs:
assert v.gt.shape == (2, 3)
assert np.all(v.gt[:, 0] == np.array([0, 1]))
assert np.all(v.gt[:, 2] == np.array([0, 1]))
assert len(v.genotype) == 3
assert v.genotype[0] == [0, 1]
assert v.genotype[1] == [0, 0]
assert v.genotype[2] == [0, 1]
@pytest.mark.parametrize(
"region,worst_effect",
[
(Region("1", 878109, 878109), ("missense", "missense")),
(Region("1", 901921, 901921), ("synonymous", "missense")),
(Region("1", 905956, 905956), ("frame-shift", "missense")),
],
)
def test_multi_alt_allele_genotype2(variants_vcf, region, worst_effect):
fvars = variants_vcf("backends/effects_trio_multi")
vs = list(fvars.query_variants(regions=[region]))
assert len(vs) == 1
for v in vs:
assert len(v.genotype) == 3
assert v.genotype[0] == [0, 1]
assert v.genotype[1] == [0, 2]
assert v.genotype[2] == [1, 2]
@pytest.mark.parametrize(
"region,gt",
[
(Region("1", 11500, 11500), np.array([[0, 1], [0, 0], [0, 0]])),
(Region("1", 11501, 11501), np.array([[0, 2], [0, 0], [0, 0]])),
(Region("1", 11502, 11502), np.array([[0, 0], [0, 0], [0, 0]])),
(Region("1", 11503, 11503), np.array([[0, -1], [0, 0], [0, 0]])),
(Region("1", 11504, 11504), np.array([[0, 1], [0, 2], [0, 0]])),
(Region("1", 11505, 11505), np.array([[0, 1], [0, 2], [0, 3]])),
],
)
def test_trios_multi_alt_allele_genotype2(variants_vcf, region, gt):
fvars = variants_vcf("backends/trios_multi")
vs = list(
fvars.query_variants(
regions=[region], return_reference=True, return_unknown=True
)
)
assert len(vs) == 1
for v in vs:
assert len(v.genotype) == 3
assert v.genotype[0] == list(gt[0, :])
assert v.genotype[1] == list(gt[1, :])
assert v.genotype[2] == list(gt[2, :])
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import unittest
from datetime import datetime
from urllib.parse import parse_qs
from bs4 import BeautifulSoup
from airflow.www import utils
from airflow.www.utils import wrapped_markdown
class TestUtils(unittest.TestCase):
def check_generate_pages_html(self, current_page, total_pages, window=7, check_middle=False):
extra_links = 4 # first, prev, next, last
search = "'>\"/><img src=x onerror=alert(1)>"
html_str = utils.generate_pages(current_page, total_pages, search=search)
assert search not in html_str, "The raw search string shouldn't appear in the output"
assert 'search=%27%3E%22%2F%3E%3Cimg+src%3Dx+onerror%3Dalert%281%29%3E' in html_str
assert callable(html_str.__html__), "Should return something that is HTML-escaping aware"
dom = BeautifulSoup(html_str, 'html.parser')
assert dom is not None
ulist = dom.ul
ulist_items = ulist.find_all('li')
assert min(window, total_pages) + extra_links == len(ulist_items)
page_items = ulist_items[2:-2]
mid = int(len(page_items) / 2)
for i, item in enumerate(page_items):
a_node = item.a
href_link = a_node['href']
node_text = a_node.string
if node_text == str(current_page + 1):
if check_middle:
assert mid == i
assert 'javascript:void(0)' == href_link
assert 'active' in item['class']
else:
assert re.search(r'^\?', href_link), 'Link is page-relative'
query = parse_qs(href_link[1:])
assert query['page'] == [str(int(node_text) - 1)]
assert query['search'] == [search]
def test_generate_pager_current_start(self):
self.check_generate_pages_html(current_page=0, total_pages=6)
def test_generate_pager_current_middle(self):
self.check_generate_pages_html(current_page=10, total_pages=20, check_middle=True)
def test_generate_pager_current_end(self):
self.check_generate_pages_html(current_page=38, total_pages=39)
def test_params_no_values(self):
"""Should return an empty string if no params are passed"""
assert '' == utils.get_params()
def test_params_search(self):
assert 'search=bash_' == utils.get_params(search='bash_')
def test_params_none_and_zero(self):
query_str = utils.get_params(a=0, b=None, c='true')
# The order won't be consistent, but that doesn't affect behaviour of a browser
pairs = list(sorted(query_str.split('&')))
assert ['a=0', 'c=true'] == pairs
def test_params_all(self):
query = utils.get_params(tags=['tag1', 'tag2'], status='active', page=3, search='bash_')
assert {
'tags': ['tag1', 'tag2'],
'page': ['3'],
'search': ['bash_'],
'status': ['active'],
} == parse_qs(query)
def test_params_escape(self):
assert 'search=%27%3E%22%2F%3E%3Cimg+src%3Dx+onerror%3Dalert%281%29%3E' == utils.get_params(
search="'>\"/><img src=x onerror=alert(1)>"
)
def test_state_token(self):
# It's shouldn't possible to set these odd values anymore, but lets
# ensure they are escaped!
html = str(utils.state_token('<script>alert(1)</script>'))
assert '<script>alert(1)</script>' in html
assert '<script>alert(1)</script>' not in html
def test_task_instance_link(self):
from airflow.www.app import cached_app
with cached_app(testing=True).test_request_context():
html = str(
utils.task_instance_link(
{'dag_id': '<a&1>', 'task_id': '<b2>', 'execution_date': datetime.now()}
)
)
assert '%3Ca%261%3E' in html
assert '%3Cb2%3E' in html
assert '<a&1>' not in html
assert '<b2>' not in html
def test_dag_link(self):
from airflow.www.app import cached_app
with cached_app(testing=True).test_request_context():
html = str(utils.dag_link({'dag_id': '<a&1>', 'execution_date': datetime.now()}))
assert '%3Ca%261%3E' in html
assert '<a&1>' not in html
def test_dag_link_when_dag_is_none(self):
"""Test that when there is no dag_id, dag_link does not contain hyperlink"""
from airflow.www.app import cached_app
with cached_app(testing=True).test_request_context():
html = str(utils.dag_link({}))
assert 'None' in html
assert '<a href=' not in html
def test_dag_run_link(self):
from airflow.www.app import cached_app
with cached_app(testing=True).test_request_context():
html = str(
utils.dag_run_link({'dag_id': '<a&1>', 'run_id': '<b2>', 'execution_date': datetime.now()})
)
assert '%3Ca%261%3E' in html
assert '%3Cb2%3E' in html
assert '<a&1>' not in html
assert '<b2>' not in html
class TestAttrRenderer(unittest.TestCase):
def setUp(self):
self.attr_renderer = utils.get_attr_renderer()
def test_python_callable(self):
def example_callable(unused_self):
print("example")
rendered = self.attr_renderer["python_callable"](example_callable)
assert '"example"' in rendered
def test_python_callable_none(self):
rendered = self.attr_renderer["python_callable"](None)
assert "" == rendered
def test_markdown(self):
markdown = "* foo\n* bar"
rendered = self.attr_renderer["doc_md"](markdown)
assert "<li>foo</li>" in rendered
assert "<li>bar</li>" in rendered
def test_markdown_none(self):
rendered = self.attr_renderer["python_callable"](None)
assert "" == rendered
class TestWrappedMarkdown(unittest.TestCase):
def test_wrapped_markdown_with_docstring_curly_braces(self):
rendered = wrapped_markdown("{braces}", css_class="a_class")
assert (
'''<div class="a_class" ><p>{braces}</p>
</div>'''
== rendered
)
def test_wrapped_markdown_with_some_markdown(self):
rendered = wrapped_markdown(
"""*italic*
**bold**
""",
css_class="a_class",
)
assert (
'''<div class="a_class" ><p><em>italic</em>
<strong>bold</strong></p>
</div>'''
== rendered
)
def test_wrapped_markdown_with_table(self):
rendered = wrapped_markdown(
"""
| Job | Duration |
| ----------- | ----------- |
| ETL | 14m |
"""
)
assert (
'''<div class="rich_doc" ><table>
<thead>
<tr>
<th>Job</th>
<th>Duration</th>
</tr>
</thead>
<tbody>
<tr>
<td>ETL</td>
<td>14m</td>
</tr>
</tbody>
</table>
</div>'''
== rendered
)
def test_wrapped_markdown_with_indented_lines(self):
rendered = wrapped_markdown(
"""
# header
1st line
2nd line
"""
)
assert (
'''<div class="rich_doc" ><h1>header</h1>\n<p>1st line\n2nd line</p>
</div>'''
== rendered
)
def test_wrapped_markdown_with_raw_code_block(self):
rendered = wrapped_markdown(
"""\
# Markdown code block
Inline `code` works well.
Code block
does not
respect
newlines
"""
)
assert (
'''<div class="rich_doc" ><h1>Markdown code block</h1>
<p>Inline <code>code</code> works well.</p>
<pre><code>Code block\ndoes not\nrespect\nnewlines\n</code></pre>
</div>'''
== rendered
)
def test_wrapped_markdown_with_nested_list(self):
rendered = wrapped_markdown(
"""
### Docstring with a code block
- And
- A nested list
"""
)
assert (
'''<div class="rich_doc" ><h3>Docstring with a code block</h3>
<ul>
<li>And
<ul>
<li>A nested list</li>
</ul>
</li>
</ul>
</div>'''
== rendered
)
def test_wrapped_markdown_with_collapsible_section(self):
rendered = wrapped_markdown(
"""
# A collapsible section with markdown
<details>
<summary>Click to expand!</summary>
## Heading
1. A numbered
2. list
* With some
* Sub bullets
</details>
"""
)
assert (
'''<div class="rich_doc" ><h1>A collapsible section with markdown</h1>
<details>
<summary>Click to expand!</summary>
<h2>Heading</h2>
<ol>
<li>A numbered</li>
<li>list
<ul>
<li>With some</li>
<li>Sub bullets</li>
</ul>
</li>
</ol>
</details>
</div>'''
== rendered
)
|
import random
import cv2
import numpy as np
'''
COCO order:
0: "nose",
1: "left_eye",
2: "right_eye",
3: "left_ear",
4: "right_ear",
5: "left_shoulder",
6: "right_shoulder",
7: "left_elbow",
8: "right_elbow",
9: "left_wrist",
10: "right_wrist",
11: "left_hip",
12: "right_hip",
13: "left_knee",
14: "right_knee",
15: "left_ankle",
16: "right_ankle"
Internal Order:
0: 'nose',
1: 'neck',
2: 'right_shoulder',
3: 'right_elbow',
4: 'right_wrist',
5: 'left_shoulder',
6: 'left_elbow',
7: 'left_wrist',
8: 'right_hip',
9: 'right_knee',
10: 'right_ankle',
11: 'left_hip',
12: 'left_knee',
13: 'left_ankle',
14: 'right_eye',
15: 'left_eye',
16: 'right_ear',
17: 'left_ear'
'''
class ConvertKeypoints:
def __call__(self, sample):
label = sample['label']
h, w, _ = sample['image'].shape
keypoints = label['keypoints']
for keypoint in keypoints: # keypoint[2] == 0: occluded, == 1: visible, == 2: not in image
if keypoint[0] == keypoint[1] == 0:
keypoint[2] = 2
if (keypoint[0] < 0
or keypoint[0] >= w
or keypoint[1] < 0
or keypoint[1] >= h):
keypoint[2] = 2
for other_label in label['processed_other_annotations']:
keypoints = other_label['keypoints']
for keypoint in keypoints:
if keypoint[0] == keypoint[1] == 0:
keypoint[2] = 2
if (keypoint[0] < 0
or keypoint[0] >= w
or keypoint[1] < 0
or keypoint[1] >= h):
keypoint[2] = 2
label['keypoints'] = self._convert(label['keypoints'], w, h)
for other_label in label['processed_other_annotations']:
other_label['keypoints'] = self._convert(other_label['keypoints'], w, h)
return sample
def _convert(self, keypoints, w, h):
# Nose, Neck, R hand, L hand, R leg, L leg, Eyes, Ears
reorder_map = [1, 7, 9, 11, 6, 8, 10, 13, 15, 17, 12, 14, 16, 3, 2, 5, 4]
converted_keypoints = list(keypoints[i - 1] for i in reorder_map)
converted_keypoints.insert(1, [(keypoints[5][0] + keypoints[6][0]) / 2,
(keypoints[5][1] + keypoints[6][1]) / 2, 0]) # Add neck as a mean of shoulders
if keypoints[5][2] == 2 or keypoints[6][2] == 2:
converted_keypoints[1][2] = 2
elif keypoints[5][2] == 1 and keypoints[6][2] == 1:
converted_keypoints[1][2] = 1
if (converted_keypoints[1][0] < 0
or converted_keypoints[1][0] >= w
or converted_keypoints[1][1] < 0
or converted_keypoints[1][1] >= h):
converted_keypoints[1][2] = 2
return converted_keypoints
class Scale:
def __init__(self, prob=1, min_scale=0.5, max_scale=1.1, target_dist=0.6):
self._prob = prob
self._min_scale = min_scale
self._max_scale = max_scale
self._target_dist = target_dist
def __call__(self, sample):
prob = random.random()
scale_multiplier = 1
if prob <= self._prob:
prob = random.random()
scale_multiplier = (self._max_scale - self._min_scale) * prob + self._min_scale
label = sample['label']
scale_abs = self._target_dist / label['scale_provided']
scale = scale_abs * scale_multiplier
sample['image'] = cv2.resize(sample['image'], dsize=(0, 0), fx=scale, fy=scale)
label['img_height'], label['img_width'], _ = sample['image'].shape
sample['mask'] = cv2.resize(sample['mask'], dsize=(0, 0), fx=scale, fy=scale)
label['objpos'][0] *= scale
label['objpos'][1] *= scale
for keypoint in sample['label']['keypoints']:
keypoint[0] *= scale
keypoint[1] *= scale
for other_annotation in sample['label']['processed_other_annotations']:
other_annotation['objpos'][0] *= scale
other_annotation['objpos'][1] *= scale
for keypoint in other_annotation['keypoints']:
keypoint[0] *= scale
keypoint[1] *= scale
return sample
class Rotate:
def __init__(self, pad, max_rotate_degree=40):
self._pad = pad
self._max_rotate_degree = max_rotate_degree
def __call__(self, sample):
prob = random.random()
degree = (prob - 0.5) * 2 * self._max_rotate_degree
h, w, _ = sample['image'].shape
img_center = (w / 2, h / 2)
R = cv2.getRotationMatrix2D(img_center, degree, 1)
abs_cos = abs(R[0, 0])
abs_sin = abs(R[0, 1])
bound_w = int(h * abs_sin + w * abs_cos)
bound_h = int(h * abs_cos + w * abs_sin)
dsize = (bound_w, bound_h)
R[0, 2] += dsize[0] / 2 - img_center[0]
R[1, 2] += dsize[1] / 2 - img_center[1]
sample['image'] = cv2.warpAffine(sample['image'], R, dsize=dsize,
borderMode=cv2.BORDER_CONSTANT, borderValue=self._pad)
sample['label']['img_height'], sample['label']['img_width'], _ = sample['image'].shape
sample['mask'] = cv2.warpAffine(sample['mask'], R, dsize=dsize,
borderMode=cv2.BORDER_CONSTANT, borderValue=(1, 1, 1)) # border is ok
label = sample['label']
label['objpos'] = self._rotate(label['objpos'], R)
for keypoint in label['keypoints']:
point = [keypoint[0], keypoint[1]]
point = self._rotate(point, R)
keypoint[0], keypoint[1] = point[0], point[1]
for other_annotation in label['processed_other_annotations']:
for keypoint in other_annotation['keypoints']:
point = [keypoint[0], keypoint[1]]
point = self._rotate(point, R)
keypoint[0], keypoint[1] = point[0], point[1]
return sample
def _rotate(self, point, R):
return [R[0, 0] * point[0] + R[0, 1] * point[1] + R[0, 2],
R[1, 0] * point[0] + R[1, 1] * point[1] + R[1, 2]]
class CropPad:
def __init__(self, pad, center_perterb_max=40, crop_x=368, crop_y=368):
self._pad = pad
self._center_perterb_max = center_perterb_max
self._crop_x = crop_x
self._crop_y = crop_y
def __call__(self, sample):
prob_x = random.random()
prob_y = random.random()
offset_x = int((prob_x - 0.5) * 2 * self._center_perterb_max)
offset_y = int((prob_y - 0.5) * 2 * self._center_perterb_max)
label = sample['label']
shifted_center = (label['objpos'][0] + offset_x, label['objpos'][1] + offset_y)
offset_left = -int(shifted_center[0] - self._crop_x / 2)
offset_up = -int(shifted_center[1] - self._crop_y / 2)
cropped_image = np.empty(shape=(self._crop_y, self._crop_x, 3), dtype=np.uint8)
for i in range(3):
cropped_image[:, :, i].fill(self._pad[i])
cropped_mask = np.empty(shape=(self._crop_y, self._crop_x), dtype=np.uint8)
cropped_mask.fill(1)
image_x_start = int(shifted_center[0] - self._crop_x / 2)
image_y_start = int(shifted_center[1] - self._crop_y / 2)
image_x_finish = image_x_start + self._crop_x
image_y_finish = image_y_start + self._crop_y
crop_x_start = 0
crop_y_start = 0
crop_x_finish = self._crop_x
crop_y_finish = self._crop_y
w, h = label['img_width'], label['img_height']
should_crop = True
if image_x_start < 0: # Adjust crop area
crop_x_start -= image_x_start
image_x_start = 0
if image_x_start >= w:
should_crop = False
if image_y_start < 0:
crop_y_start -= image_y_start
image_y_start = 0
if image_y_start >= w:
should_crop = False
if image_x_finish > w:
diff = image_x_finish - w
image_x_finish -= diff
crop_x_finish -= diff
if image_x_finish < 0:
should_crop = False
if image_y_finish > h:
diff = image_y_finish - h
image_y_finish -= diff
crop_y_finish -= diff
if image_y_finish < 0:
should_crop = False
if should_crop:
cropped_image[crop_y_start:crop_y_finish, crop_x_start:crop_x_finish, :] =\
sample['image'][image_y_start:image_y_finish, image_x_start:image_x_finish, :]
cropped_mask[crop_y_start:crop_y_finish, crop_x_start:crop_x_finish] =\
sample['mask'][image_y_start:image_y_finish, image_x_start:image_x_finish]
sample['image'] = cropped_image
sample['mask'] = cropped_mask
label['img_width'] = self._crop_x
label['img_height'] = self._crop_y
label['objpos'][0] += offset_left
label['objpos'][1] += offset_up
for keypoint in label['keypoints']:
keypoint[0] += offset_left
keypoint[1] += offset_up
for other_annotation in label['processed_other_annotations']:
for keypoint in other_annotation['keypoints']:
keypoint[0] += offset_left
keypoint[1] += offset_up
return sample
def _inside(self, point, width, height):
if point[0] < 0 or point[1] < 0:
return False
if point[0] >= width or point[1] >= height:
return False
return True
class Flip:
def __init__(self, prob=0.5):
self._prob = prob
def __call__(self, sample):
prob = random.random()
do_flip = prob <= self._prob
if not do_flip:
return sample
sample['image'] = cv2.flip(sample['image'], 1)
sample['mask'] = cv2.flip(sample['mask'], 1)
label = sample['label']
w, h = label['img_width'], label['img_height']
label['objpos'][0] = w - 1 - label['objpos'][0]
for keypoint in label['keypoints']:
keypoint[0] = w - 1 - keypoint[0]
label['keypoints'] = self._swap_left_right(label['keypoints'])
for other_annotation in label['processed_other_annotations']:
other_annotation['objpos'][0] = w - 1 - other_annotation['objpos'][0]
for keypoint in other_annotation['keypoints']:
keypoint[0] = w - 1 - keypoint[0]
other_annotation['keypoints'] = self._swap_left_right(other_annotation['keypoints'])
return sample
def _swap_left_right(self, keypoints):
right = [2, 3, 4, 8, 9, 10, 14, 16]
left = [5, 6, 7, 11, 12, 13, 15, 17]
for r, l in zip(right, left):
keypoints[r], keypoints[l] = keypoints[l], keypoints[r]
return keypoints
|
import os
import flanautils
os.environ |= flanautils.find_environment_variables('../.env')
import sys
import uvicorn
from fastapi import FastAPI
import flanaapis.geolocation.routes
import flanaapis.scraping.routes
import flanaapis.weather.routes
sub_app = FastAPI()
sub_app.include_router(flanaapis.geolocation.routes.router)
sub_app.include_router(flanaapis.scraping.routes.router)
sub_app.include_router(flanaapis.weather.routes.router)
app = FastAPI()
app.mount('/flanaapis', sub_app)
if __name__ == '__main__':
try:
host = sys.argv[1]
except IndexError:
host = os.environ.get('FLANAAPIS_HOST')
try:
port = sys.argv[2]
except IndexError:
port = os.environ.get('FLANAAPIS_PORT')
uvicorn.run('main:app', host=host, port=int(port))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Module
from typing import Tuple
from ProcessData.NormData import norm_feature
from ProcessData.NormData import *
class LSTMCell(nn.Module):
def __init__(self, input_size, hidden_size):
super(LSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.input_to_ingate = nn.Linear(input_size, hidden_size)
self.input_to_forgetgate = nn.Linear(input_size, hidden_size)
self.input_to_cellgate = nn.Linear(input_size, hidden_size)
self.input_to_outgate = nn.Linear(input_size, hidden_size)
self.hidden_to_ingate = nn.Linear(hidden_size, hidden_size, bias=False)
self.hidden_to_forgetgate = nn.Linear(hidden_size, hidden_size, bias=False)
self.hidden_to_cellgate = nn.Linear(hidden_size, hidden_size, bias=False)
self.hidden_to_outgate = nn.Linear(hidden_size, hidden_size, bias=False)
def forward(self, inp:torch.Tensor, hx:torch.Tensor, cx:torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
ingate = self.input_to_ingate(inp) + self.hidden_to_ingate(hx)
forgetgate = self.input_to_forgetgate(inp) + self.hidden_to_forgetgate(hx)
cellgate = self.input_to_cellgate(inp) + self.hidden_to_cellgate(hx)
outgate = self.input_to_outgate(inp) + self.hidden_to_outgate(hx)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, hy, cy
class MHUCell(nn.Module):
def __init__(self, input_size, hidden_size):
super(MHUCell, self).__init__()
assert (input_size == hidden_size)
self.Wz= nn.Linear(input_size, hidden_size)
self.Wv = nn.Linear(hidden_size, hidden_size)
self.Uvh = nn.Linear(hidden_size, hidden_size)
def forward(self, inp:torch.Tensor, hx:torch.Tensor) -> torch.Tensor:
v = self.Wv(torch.relu(self.Uvh(hx)))
z = torch.sigmoid(self.Wz(torch.relu(inp)))
out = (1-z) * v + z * inp
return out, v
class GRUCell(nn.Module):
def __init__(self, input_size, hidden_size):
super(GRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.input_to_reset = nn.Linear(input_size, hidden_size)
self.input_to_update = nn.Linear(input_size, hidden_size)
self.input_to_candidate = nn.Linear(input_size, hidden_size)
self.hidden_to_reset = nn.Linear(hidden_size, hidden_size)
self.hidden_to_update = nn.Linear(hidden_size, hidden_size)
self.outhidden_to_candidate = nn.Linear(hidden_size, hidden_size)
def forward(self, inp:torch.Tensor, hx:torch.Tensor) -> torch.Tensor:
resetgate = torch.sigmoid(self.input_to_reset(inp) + self.hidden_to_reset(hx))
updategate = torch.sigmoid(self.input_to_update(inp) + self.hidden_to_update(hx))
candidategate = torch.tanh(self.input_to_candidate(inp) + self.outhidden_to_candidate(resetgate * hx))
hiddengate = (1-updategate) * hx + updategate * candidategate
return hiddengate, hiddengate
class HighFrequency(Module):
def __init__(self, latentDim:int, featureDim:int, feature_red:int = 10, latent_red:int = 512):
super(HighFrequency, self).__init__()
self.latentDim = latentDim
self.featureDim = featureDim
self.latent_red = latent_red
self.feature_red = feature_red
self.Time_embedding_add = nn.Linear(3, 3 * self.latentDim + 3)
self.LL_in = nn.Linear(3 * self.latentDim + 3, self.latent_red)
self.LSTM = LSTMCell(self.latent_red , self.latent_red)
self.LL_out = nn.Linear(self.latent_red, self.latentDim)
def forward(self, feature:torch.Tensor, lastlatent:torch.Tensor, nextlatent:torch.Tensor, time:torch.tensor, h1:torch.Tensor, h2:torch.Tensor, current_state:torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
t = time[...,1]
time[...,1] = (t - Format.epRange[0]) / (Format.epRange[1] - Format.epRange[0])
t = time[...,0].unsqueeze(-1)
time = torch.cat((time,t), dim=-1)
inp = torch.cat((current_state, nextlatent, nextlatent - current_state, time), dim=-1) + self.Time_embedding_add(time)
in1 = torch.tanh(self.LL_in(inp))
out1, h1o, h2o = self.LSTM(in1, h1, h2)
correction = self.LL_out(out1)
current_state_up = current_state + correction
return current_state_up, h1o, h2o, current_state_up
def export(self, folder = "ONNX_networks", file = "HF"):
torch.onnx.export(self,
(torch.zeros(1, self.featureDim), torch.zeros(1, self.latentDim), torch.zeros(1, self.latentDim), torch.zeros(1, 2), torch.zeros(1, self.latent_red), torch.zeros(1, self.latent_red), torch.zeros(1, self.latent_red)),
"{}/{}.onnx".format(folder, file),
verbose=True,
input_names= ['Feature', 'LatentLast', 'LatentNext', 'Time_Frames', 'hidden1_in', 'hidden2_in', 'hidden3_in'],
output_names= ['Output', 'hidden1_out', 'hidden2_out', 'hidden3_out'],
export_params=True,
opset_version=10) |
from math import ceil
from ea import individual, musicPlayer, modelTrainer, initialisation, crossover, mutation, fitness, selection, \
constants, metrics, modelUpdater
from ea.individual import Individual
import random
import time
import sys
class Simulation:
pitch_matrix = None
backoff_matrix = None
duration_matrix = None
simulation = None
def __init__(self, duration_matrix=None, pitch_matrix=None):
if self.simulation is not None:
print('Two instances of simulation, killing one')
self.duration_matrix = duration_matrix
self.pitch_matrix = pitch_matrix
self.population: [Individual] = []
self.elitist_population: [Individual] = []
self.simulation = self
def run(self, pitch_matrix, duration_matrix, backoff_matrix):
print('Starting generation')
if pitch_matrix is None:
self.pitch_matrix = modelTrainer.train_pitch_matrix(None)
if duration_matrix is None:
self.duration_matrix = modelTrainer.train_duration_matrix(None)
if backoff_matrix is None:
self.backoff_matrix = modelTrainer.get_backoff_matrix()
initialisation.pitch_matrix = self.pitch_matrix
initialisation.duration_matrix = self.duration_matrix
initialisation.backoff_matrix = self.backoff_matrix
print('Initializing population')
self.population = initialisation.initialize_population(constants.POPULATION_SIZE)
converged_counter = 0.0
converged_iteration = -1
for i in range(constants.ITERATIONS):
self.population.sort(key=lambda x: x.fitness)
self.elitist_population = self.population[0:constants.ELITISM_SIZE]
next_generation = []
if constants.SYSTEM == "GA" or constants.SYSTEM == "HYBRID":
random.shuffle(self.population)
if constants.CROSSOVER == "NONE":
self.mutation_only()
next_generation.extend(self.population)
else:
crossover_generation = self.crossover_mutation()
crossover_generation.sort(key=lambda x: x.fitness)
if constants.SYSTEM == "HYBRID":
next_generation.extend(crossover_generation[0:constants.CROSSOVER_POPULATION])
else:
next_generation.extend(crossover_generation)
if constants.SYSTEM == "MODEL" or constants.SYSTEM == "HYBRID":
# Elitism
next_generation.extend(self.elitist_population)
sel = self.population[0:constants.SELECTION_SIZE]
if constants.LEARNING_RATE != 0.0:
self.update_matrices(sel)
if constants.SYSTEM == "HYBRID":
next_generation.extend(initialisation.initialize_population(constants.MODEL_POPULATION))
else:
next_generation.extend(
initialisation.initialize_population(constants.POPULATION_SIZE))
next_generation.sort(key=lambda x: x.fitness)
next_generation = next_generation[0:constants.POPULATION_SIZE]
self.population = next_generation
# Metrics
if constants.SYSTEM is not "MULTIPLE" and constants.METRIC_MODE is not "ALL":
metrics.write_population_metrics(i, self.population)
if constants.METRIC_MODE == "ALL":
metrics.write_individual_metrics(i, population=self.population)
if i % 25 == 0:
print(f"Iteration {i} done")
print(f'Highest fitness: {self.population[0].fitness}')
sys.stdout.flush()
self.population.sort(key=lambda x: x.fitness)
if constants.RUN_MODE == 'MULTIPLE':
metrics.write_average_runs(converged_iteration, self.population)
if constants.SYSTEM != 'GA':
metrics.write_matrices(self.pitch_matrix, self.backoff_matrix, self.duration_matrix)
play_pieces = [self.population[0], self.population[ceil(len(self.population) / 2)], self.population[-1]]
musicPlayer.write_music_midi(play_pieces)
if constants.RUN_MODE == "SINGLE":
print('-------------------------------------------------')
print('Done evolving, playing songs')
print(f'Population size: {constants.POPULATION_SIZE}')
print(f'Elitist population size: {len(self.elitist_population)}')
print(f'Tournament size: {constants.TOURNAMENT_SIZE}')
print(f'Iterations: {constants.ITERATIONS}')
print(f'Model updating: None, ratio = N/A')
sys.stdout.flush()
def crossover_mutation(self):
next_generation = []
random.shuffle(self.population)
for j in range(1, len(self.population), 2):
family = []
p1 = self.population[j - 1]
p2 = self.population[j]
c1, c2 = crossover.measure_crossover(p1, p2)
mutation.applyMutation(c1, self.elitist_population)
mutation.applyMutation(c2, self.elitist_population)
fitness.set_fitness(c1)
fitness.set_fitness(c2)
family.extend([c1, c2, p1, p2])
family.sort(key=lambda x: x.fitness)
next_generation.extend(family[0:2])
return next_generation
|
import unittest
from c_largest_prime_factor import *
class TestLargestPrimeFactor(unittest.TestCase):
def test_problem_statement(self):
self.assertEqual(29, solve(13195))
def test_given(self):
self.assertEqual(5, solve(10))
self.assertEqual(17, solve(17))
def test_one(self):
self.assertEqual(1, solve(1))
if __name__ == '__main__':
unittest.main() |
from bubble_sort import Solution
example = Solution()
def test_single_sort_c1():
assert example.single_sort([4, 3, 2, 1], 4) == [3, 2, 1, 4]
def test_single_sort_c2():
assert example.single_sort([2, 4, 3, 1], 4) == [2, 3, 1, 4]
def test_single_sort_c3():
assert example.single_sort([3, 1, 2, 4], 4) == [1, 2, 3, 4]
def test_single_sort_c4():
assert example.single_sort([3, 2, 1, 4], 4) == [2, 1, 3, 4]
def test_complete_sort_c1():
assert example.complete_sort([4, 3, 2, 1]) == [1, 2, 3, 4]
def test_complete_sort_c2():
assert example.complete_sort([2, 4, 3, 1]) == [1, 2, 3, 4]
def test_complete_sort_c3():
assert example.complete_sort([3, 1, 2, 4]) == [1, 2, 3, 4]
def test_complete_sort_c4():
assert example.complete_sort([3, 2, 1, 4]) == [1, 2, 3, 4]
|
import os
import json
import time
import datetime
from flask import Flask
from flask_redis import FlaskRedis
import requests
from .twilio_utils import send_runair_sms
# See .env.example for required environment variables
from dotenv import load_dotenv
load_dotenv()
app = Flask(__name__)
app.config['REDIS_URL'] = os.getenv("REDIS_URL")
redis_client = FlaskRedis(app, decode_responses=True)
AREAS = json.load(open("data/areas.json"))
try:
ACCEPTABLE_AQI = int(os.getenv("ACCEPTABLE_AQI", 100))
except ValueError:
ACCEPTABLE_AQI = 100
try:
GOOD_AQI = int(os.getenv("GOOD_AQI", 50))
except ValueError:
GOOD_AQI = 50
NOTIFICATION_INTERVAL_S = 60 * 60 * 24
ALERTS = [
{
'threshold': GOOD_AQI,
'key': 'last-notified-good',
'message': "AQI at {} is now {} 💚 (US EPA)! Green means GOOOO 🟢 \n{}\n{}",
},
{
'threshold': ACCEPTABLE_AQI,
'key': 'last-notified',
'message': "AQI at {} is now {} 💛 (US EPA)! Please still exercise caution!\n{}\n{}",
},
]
# From Purple
# r.prototype.aqiFromPM = function(t) {
# return t < 0 ? t : 350.5 < t ? n(t, 500, 401, 500, 350.5) : 250.5 < t ? n(t, 400, 301, 350.4, 250.5) : 150.5 < t ? n(t, 300, 201, 250.4, 150.5) : 55.5 < t ? n(t, 200, 151, 150.4, 55.5) : 35.5 < t ? n(t, 150, 101, 55.4, 35.5) : 12.1 < t ? n(t, 100, 51, 35.4, 12.1) : 0 <= t ? n(t, 50, 0, 12, 0) : -1
# }
# ,
# https://github.com/SANdood/homebridge-purpleair/blob/master/index.js#L178
PM_AND_AQI_RANGES = {
350.5: [350.5, 500, 401, 500],
250.5: [250.5, 350.4, 301, 400],
150.5: [150.5, 250.4, 201, 300],
55.5: [55.5, 150.4, 151, 200],
35.5: [35.5, 55.4, 101, 150],
12.0: [12.0, 35.4, 51, 100],
0.0: [0.0, 11.9, 1, 50],
}
def pm_to_aqi(pm_val, pm_low, pm_high, aqi_low, aqi_high):
"""PM2.5 reading to AQI via https://forum.airnowtech.org/t/the-aqi-equation/169"""
aqi_range = aqi_high - aqi_low
pm_range = pm_high - pm_low
scale_factor = aqi_range / pm_range
return (pm_val - pm_low) * scale_factor + aqi_low
# TODO 2020-09-15 Add some tests for this using known values
# See https://github.com/skalnik/aqi-wtf/blob/450ffb9163f840e101ee50e8ec7f658f99e5712a/app.js#L233
def calculate_aqi(pm):
"""PM2.5 reading to AQI via The AQI equation https://forum.airnowtech.org/t/the-aqi-equation/169"""
if pm > 500:
return 500
for pm_range_low in [350.5, 250.5, 150.5, 55.5, 35.5, 12.0, 0]:
if pm >= pm_range_low:
return pm_to_aqi(pm, *PM_AND_AQI_RANGES[pm_range_low])
return 0.0
def to_aqandu(val):
return .778 * val + 2.65
# From https://www.reddit.com/r/PurpleAir/comments/irs1j7/any_api_usage_tips_examples/
def to_lrapa(val):
return 0.5 * val - 0.66
# From PurpleAir Site
# See Paper: https://cfpub.epa.gov/si/si_public_record_report.cfm?dirEntryId=349513&Lab=CEMM&simplesearch=0&showcriteria=2&sortby=pubDate&timstype=&datebeginpublishedpresented=08/25/2018
def to_us_epa(pm25_cf1, humidity):
return 0.534 * pm25_cf1 - 0.0844 * humidity + 5.604
def add_number_for_areas(number, areas):
for area in areas:
if area not in AREAS:
print("Bad area: {}".format(area))
redis_client.sadd(area, number)
print("Added {} for area {}".format(number, area))
send_runair_sms(
number,
"Welcome to Runair 🟢🏃🏻♀️! You're all set to receive alerts the first time the AQI drops below {}💛 and {}💚 each 24-hour period (according to LRAPA conversion, powered by PurpleAir) for the following areas:\n{}".format(
ACCEPTABLE_AQI, GOOD_AQI, '\n'.join(areas)
)
)
def poll_air_and_notify():
for area_name, area in AREAS.items():
sensor_ids = area['sensors']
notify_numbers = redis_client.smembers(area_name)
area_aqis = {}
for sensor_id in sensor_ids:
# TODO 2020-09-17 Check timestamps for offline sensors!
url_to_poll = "https://www.purpleair.com/json?show={}".format(sensor_id)
resp = requests.get(url_to_poll)
if resp.status_code != 200:
print("Couldn't get AQI info from Purple for sensor {}".format(sensor_id))
continue
result_json = resp.json()
results = result_json['results']
if not results:
print("No results for sensor {}".format(sensor_id))
continue
result = results[0]
try:
humidity = float(result['humidity'])
except (IndexError, ValueError):
print("Couldn't get humidity for sensor {}".format(sensor_id))
continue
try:
location_label = result['Label']
except IndexError as e:
print(e)
location_label = "Sensor {}".format(sensor_id)
# TODO 2020-10-07: Double-check this?
# Slides say PA_cf1(avgAB)] = PurpleAir higher correction factor data averaged from the A and B channels
pm25s = []
for r in results:
try:
pm25 = float(r['pm2_5_cf_1'])
except (IndexError, ValueError):
print("Couldn't get PM2.5 CF=1 for sensor {}".format(sensor_id))
continue
pm25s.append(pm25)
print("PM 2.5 CF=1: {:2f}, sensor {}".format(pm25, r.get('Label', 'Unknown channel')))
pm25 = sum(pm25s) / len(pm25s)
print("PM2.5 CF=1 of {:2f}, humidity = {}".format(pm25, humidity))
aqi = int((calculate_aqi(to_us_epa(pm25, humidity))))
print("US-EPA from {}: {}".format(location_label, aqi))
area_aqis[location_label] = aqi
area_aqis_vals = area_aqis.values()
avg_aqi = int(sum(area_aqis_vals) / len(area_aqis_vals))
print("Average AQI for {}: {}".format(area_name, avg_aqi))
now_timestamp = int(time.time())
for alert in ALERTS:
if avg_aqi < alert['threshold']:
now_timestamp = int(time.time())
try:
last_notified = int(redis_client.get('{}:{}'.format(area_name, alert['key'])))
except (TypeError, ValueError):
last_notified = None
if not last_notified or last_notified < now_timestamp - NOTIFICATION_INTERVAL_S:
purple_link = area['link'].format(sensor_id)
success_str = alert['message'].format(
area_name, avg_aqi, '\n'.join(['{}: {}'.format(name, val) for name, val in area_aqis.items()]),
purple_link)
print(success_str)
last_notified_dt = datetime.datetime.fromtimestamp(now_timestamp)
redis_client.set('{}:{}'.format(area_name, alert['key']), now_timestamp)
print("Updated last notified to {}".format(last_notified_dt.isoformat(sep=' ')))
for number in notify_numbers:
print("Sending text to {}".format(number))
send_runair_sms(number, body=success_str)
else:
last_notified_dt = datetime.datetime.fromtimestamp(last_notified)
print("Not notifiying for {} because we last notified at {}".format(
area_name, last_notified_dt.isoformat(sep=' ')))
break
print("\n----\n")
|
# Generated by Django 3.2.6 on 2021-08-08 10:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0003_auto_20210808_1033'),
]
operations = [
migrations.CreateModel(
name='FavouriteDirections',
fields=[
('directions_id', models.AutoField(primary_key=True, serialize=False)),
('origin', models.TextField(default='Missing')),
('destination', models.TextField(default='Missing')),
('url', models.TextField(default='Missing')),
('user', models.ForeignKey(default='Missing', on_delete=django.db.models.deletion.CASCADE, related_name='favdirections', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddConstraint(
model_name='favouritedirections',
constraint=models.UniqueConstraint(fields=('user', 'directions_id'), name='unique_favourite_directions'),
),
]
|
from starlette.testclient import TestClient
from app.main import app
def test_home():
client = TestClient(app)
r = client.get('/')
assert r.status_code == 200
|
import timeit
import numpy as np
from psyneulink import *
from gym_forager.envs.forager_env import ForagerEnv
# Runtime Switches:
RENDER = False
PNL_COMPILE = False
PERCEPTUAL_DISTORT = False
RUN = False
SHOW_GRAPH = True
# *********************************************************************************************************************
# *********************************************** CONSTANTS ***********************************************************
# *********************************************************************************************************************
# These should probably be replaced by reference to ForagerEnv constants:
obs_len = 3
obs_coords = 2
action_len = 2
player_idx = 0
player_obs_start_idx = player_idx * obs_len
player_value_idx = player_idx * obs_len + obs_coords
player_coord_slice = slice(player_obs_start_idx,player_value_idx)
predator_idx = 1
predator_obs_start_idx = predator_idx * obs_len
predator_value_idx = predator_idx * obs_len + obs_coords
predator_coord_slice = slice(predator_obs_start_idx,predator_value_idx)
prey_idx = 2
prey_obs_start_idx = prey_idx * obs_len
prey_value_idx = prey_idx * obs_len + obs_coords
prey_coord_slice = slice(prey_obs_start_idx,prey_value_idx)
player_len = prey_len = predator_len = obs_coords
# *********************************************************************************************************************
# ************************************** MECHANISMS AND COMPOSITION *************************************************
# *********************************************************************************************************************
# Input Mechanisms
player_input = ProcessingMechanism(size=prey_len, name="PLAYER INPUT")
prey_input = ProcessingMechanism(size=prey_len, name="PREY INPUT")
predator_input = TransferMechanism(size=predator_len, name="PREDATOR INPUT")
# Perceptual Mechanisms
if PERCEPTUAL_DISTORT:
player_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS")
prey_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS")
predator_obs = TransferMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR OBS")
else:
player_obs = ProcessingMechanism(size=prey_len, name="PLAYER OBS")
prey_obs = ProcessingMechanism(size=prey_len, name="PREY OBS")
predator_obs = TransferMechanism(size=predator_len, name="PREDATOR OBS")
# Value and Reward Mechanisms (not yet used; for future use)
values = TransferMechanism(size=3, name="AGENT VALUES")
reward = TransferMechanism(name="REWARD")
# Action Mechanism
# Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey:
# note: unitization is done in main loop, to allow compilation of LinearCombination function) (TBI)
greedy_action_mech = ComparatorMechanism(name='ACTION',sample=player_obs,target=prey_obs)
# Create Composition
agent_comp = Composition(name='PREDATOR-PREY COMPOSITION')
agent_comp.add_node(player_obs)
agent_comp.add_node(predator_obs)
agent_comp.add_node(prey_obs)
# agent_comp.add_linear_processing_pathway([player_input, player_obs])
# agent_comp.add_linear_processing_pathway([predator_input, predator_obs])
# agent_comp.add_linear_processing_pathway([prey_input, prey_obs])
agent_comp.add_node(greedy_action_mech)
# ControlMechanism
# function for ObjectiveMechanism
def diff_fct(variable):
# Get difference in distance of player to predator vs. prey
if variable is None:
return 0
player_coord = variable[0]
player_percept = variable[1]
predator_coord = variable[2]
predator_percept = variable[3]
prey_coord = variable[4]
prey_percept = variable[5]
player_diff = np.sum(np.abs(player_percept - player_coord))
predator_diff = np.sum(np.abs(predator_percept - predator_coord))
prey_diff = np.sum(np.abs(prey_percept - prey_coord))
# return - (np.sum(player_diff) + np.sum(predator_diff))
return -(np.sum(player_diff))
def test_fct(variable):
if variable is None:
return 0
return variable[1] - variable[0]
if PERCEPTUAL_DISTORT:
CTL_PARAM = VARIANCE
else:
CTL_PARAM = SLOPE
# agent_comp._analyze_graph()
ocm = OptimizationControlMechanism(features=[player_obs.input_port, predator_obs.input_port, prey_obs.input_port],
agent_rep=agent_comp,
function=GridSearch(direction=MAXIMIZE,
save_values=True),
objective_mechanism=ObjectiveMechanism(
# function=diff_fct,
function=test_fct,
monitor=[player_obs,
player_obs.input_port,
predator_obs,
predator_obs.input_port,
prey_obs,
prey_obs.input_port
]
# monitored_output_ports=[player_input, player_obs,
# predator_input, predator_obs,
# prey_input, prey_obs
# ]
# monitored_output_ports=[agent_comp.input_CIM_ports[
# player_obs.input_port][1],
# player_obs,
# agent_comp.input_CIM_ports[
# predator_obs.input_port][1],
# predator_obs,
# agent_comp.input_CIM_ports[
# prey_obs.input_port][1],
# prey_obs
# ]
),
control_signals=[ControlSignal(modulates=(CTL_PARAM, player_obs),
# allocation_samples=[0, 1, 10, 100]),
# allocation_samples=[0, 10, 100]),
# allocation_samples=[10, 1]),
allocation_samples=[0, 100],
intensity_cost_function=Exponential(rate=-.1,
bias=5),
),
ControlSignal(modulates=(CTL_PARAM, predator_obs),
# allocation_samples=[0, 1, 10, 100]),
# allocation_samples=[0, 10, 100]),
# allocation_samples=[10, 1]),
allocation_samples=[0, 100],
intensity_cost_function=Exponential(rate=-.1,
bias=5),
),
ControlSignal(modulates=(CTL_PARAM, prey_obs),
# allocation_samples=[0, 1, 10, 100]),
# allocation_samples=[0, 10, 100]),
# allocation_samples=[10, 1]),
allocation_samples=[0, 100],
intensity_cost_function=Exponential(rate=-.1,
bias=5),
),
],
)
agent_comp.add_model_based_optimizer(ocm)
agent_comp.enable_model_based_optimizer = True
full_comp = Composition(name='FULL_COMPOSITION')
full_comp.add_node(agent_comp)
full_comp.add_node(player_input)
full_comp.add_node(predator_input)
full_comp.add_node(prey_input)
# full_comp.add_projection(sender=player_input, receiver=player_obs)
# full_comp.add_projection(sender=predator_input, receiver=predator_obs)
# full_comp.add_projection(sender=prey_input, receiver=prey_obs)
full_comp.add_linear_processing_pathway([player_input,player_obs])
full_comp.add_linear_processing_pathway([predator_input,predator_obs])
full_comp.add_linear_processing_pathway([prey_input,prey_obs])
if SHOW_GRAPH:
# agent_comp.show_graph(show_mechanism_structure='ALL')
# agent_comp.show_graph(show_controller=True)
full_comp.show_graph(show_controller=True)
# *********************************************************************************************************************
# ****************************************** RUN SIMULATION ********************************************************
# *********************************************************************************************************************
num_trials = 1
def main():
env = ForagerEnv()
reward = 0
done = False
if RENDER:
env.render() # If visualization is desired
else:
print("Running simulation...")
steps = 0
start_time = timeit.default_timer()
for _ in range(num_trials):
observation = env.reset()
while True:
if PNL_COMPILE:
BIN_EXECUTE = 'LLVM'
else:
BIN_EXECUTE = 'Python'
run_results = full_comp.run(inputs={player_input:[observation[player_coord_slice]],
predator_input:[observation[predator_coord_slice]],
prey_input:[observation[prey_coord_slice]],
},
bin_execute=BIN_EXECUTE
)
action = np.where(run_results[0] == 0, 0, run_results[0] / np.abs(run_results[0]))
# action = np.squeeze(np.where(greedy_action_mech.value==0,0,
# greedy_action_mech.value[0]/np.abs(greedy_action_mech.value[0])))
observation, reward, done, _ = env.step(action)
print('\nStep: ', steps)
print('Outcome: {}'.format(ocm.objective_mechanism.value))
print('OCM ControlSignals:')
print('\tPlayer OBS: {}\n\tPredator OBS: {}\n\tPrey OBS: {}'.
format(ocm.control_signals[0].value,
ocm.control_signals[1].value,
ocm.control_signals[2].value))
print('OCM ControlSignal Costs:')
print('\tPlayer OBS: {}\n\tPredator OBS: {}\n\tPrey OBS: {}'.
format(ocm.control_signals[0].cost,
ocm.control_signals[1].cost,
ocm.control_signals[2].cost))
print('SIMULATION (PREP FOR NEXT TRIAL):')
for sample, value in zip(ocm.saved_samples, ocm.saved_values):
print('\t\tSample: {} Value: {}'.format(sample, value))
steps += 1
if done:
break
stop_time = timeit.default_timer()
print(f'{steps / (stop_time - start_time):.1f} steps/second, {steps} total steps in '
f'{stop_time - start_time:.2f} seconds')
if RENDER:
env.render() # If visualization is desired
if RUN:
if __name__ == "__main__":
main()
|
"""Package for image processing."""
from .segmentation import *
__author__ = 'Seu Sim'
__email__ = '[email protected]'
__version__ = '0.0.1'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.