ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a55c821abcafb1b59ee8697e61ecbf6b2fe8172
|
from .add import add
from .subtract import subtract
from .multiply import multiply
|
py
|
1a55c83049ee15de6151cdbe439cb057866f3946
|
from nintendo.nex import backend, authentication, friends, nintendo_notification
from nintendo import account
import rpc
import time
client_id = '472185292636291082'
rpc_obj = rpc.DiscordIpcClient.for_platform(client_id)
print("RPC connection successful.")
# Wii U Console Details
DEVICE_ID = 1111111111
SERIAL_NUMBER = "xxxxxxxxxxxx"
SYSTEM_VERSION = 0x230 # 5.5.2E
REGION = 4 # Europe (PAL)
COUNTRY = "GB" # United Kingdom (Great Britain)
# Wii U Secondary User/Account Details
USERNAME = "PutSecondaryNNIDUsernameHere"
PASSWORD = "PutSecondaryNNIDPasswordHere"
# Wii U Main User/Account NNID
MAINID = "PutMainNNIDUsernameHere"
class NotificationHandler(nintendo_notification.NintendoNotificationHandler):
def __init__(self):
self.name_cache = {}
def process_notification_event(self, event):
pid = event.pid
if pid not in self.name_cache:
self.name_cache[pid] = api.get_nnid(pid)
name = self.name_cache[pid]
if event.type == nintendo_notification.NotificationType.LOGOUT:
if name == MAINID:
print("Peace!")
activity = {
}
rpc_obj.set_activity(activity)
elif event.type == nintendo_notification.NotificationType.PRESENCE_CHANGE:
presence = event.data
if name == MAINID:
print("Gotcha!")
title_id = "%016X" %(event.data.game_key.title_id)
if title_id == "0000000000000000":
title_name = "Wii U Menu"
elif title_id == "000500001010ED00":
title_name = "MARIO KART 8"
elif title_id == "000500001010CD00":
title_name = "MARIO KART 8"
elif title_id == "0005000010176A00":
title_name = "Splatoon"
elif title_id == "00050000101C9500":
title_name = "Breath of the Wild"
elif title_id == "0005000010180700":
title_name = "Captain Toad: Treasure Tracker"
elif title_id == "0005000010199500":
title_name = "Super Mario 64"
elif title_id == "0005000010195B00":
title_name = "NEW SUPER MARIO BROS."
elif title_id == "0005000010172700":
title_name = "BAYONETTA 2"
elif title_id == "000500301001420A":
title_name = "Nintendo eShop"
elif title_id == "000500301001620A":
title_name = "Miiverse"
elif title_id == "000500301001220A":
title_name = "Internet Browser"
elif title_id == "000500101004A200":
title_name = "Mii Maker"
elif title_id == "000500101005A200":
title_name = "Wii U Chat"
elif title_id == "0005000010105A00":
title_name = "Netflix"
elif title_id == "0005000010105700":
title_name = "YouTube"
elif title_id == "0005000010102F00":
title_name = "Amazon / LOVEFiLM"
elif title_id == "0005000010101E00":
title_name = "New SUPER MARIO BROS. U"
elif title_id == "000500001014B800":
title_name = "New SUPER MARIO BROS. U + New SUPER LUIGI U"
elif title_id == "0005000010145D00":
title_name = "SUPER MARIO 3D WORLD"
elif title_id == "000500001018DD00":
title_name = "Super Mario Maker"
else:
title_name = title_id
#idDash = title_id[:8] + "-" + title_id[8:]
#print("idDash: " + idDash)
start_time = time.time()
print(title_id + " / " + title_name)
activity = {
"details": title_name,
"timestamps": {
"start": start_time
},
"assets": {
"small_text": MAINID,
"small_image": "nn",
"large_text": title_name,
"large_image": title_id.lower()
}
}
rpc_obj.set_activity(activity)
else:
print("Unknown notification type %i (from %s)" %(event.type, name))
api = account.AccountAPI()
api.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)
api.set_title(friends.FriendsTitle.TITLE_ID_EUR, friends.FriendsTitle.LATEST_VERSION)
api.login(USERNAME, PASSWORD)
nex_token = api.get_nex_token(friends.FriendsTitle.GAME_SERVER_ID)
backend = backend.BackEndClient(
friends.FriendsTitle.ACCESS_KEY,
friends.FriendsTitle.NEX_VERSION,
backend.Settings("friends.cfg")
)
backend.connect(nex_token.host, nex_token.port)
backend.login(
nex_token.username, nex_token.password, None,
authentication.NintendoLoginData(nex_token.token)
)
backend.nintendo_notification_server.handler = NotificationHandler()
input("Press enter to disconnect and exit\n")
backend.close()
|
py
|
1a55c940d814b845c4bce402cd217cb56f0def04
|
"""The Met Office integration."""
import asyncio
import logging
import datapoint
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
DEFAULT_SCAN_INTERVAL,
DOMAIN,
METOFFICE_COORDINATES,
METOFFICE_DAILY_COORDINATOR,
METOFFICE_HOURLY_COORDINATOR,
METOFFICE_NAME,
MODE_3HOURLY,
MODE_DAILY,
)
from .data import MetOfficeData
from .helpers import fetch_data, fetch_site
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "weather"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a Met Office entry."""
latitude = entry.data[CONF_LATITUDE]
longitude = entry.data[CONF_LONGITUDE]
api_key = entry.data[CONF_API_KEY]
site_name = entry.data[CONF_NAME]
connection = datapoint.connection(api_key=api_key)
site = await hass.async_add_executor_job(
fetch_site, connection, latitude, longitude
)
if site is None:
raise ConfigEntryNotReady()
async def async_update_3hourly() -> MetOfficeData:
return await hass.async_add_executor_job(
fetch_data, connection, site, MODE_3HOURLY
)
async def async_update_daily() -> MetOfficeData:
return await hass.async_add_executor_job(
fetch_data, connection, site, MODE_DAILY
)
metoffice_hourly_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"MetOffice Hourly Coordinator for {site_name}",
update_method=async_update_3hourly,
update_interval=DEFAULT_SCAN_INTERVAL,
)
metoffice_daily_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"MetOffice Daily Coordinator for {site_name}",
update_method=async_update_daily,
update_interval=DEFAULT_SCAN_INTERVAL,
)
metoffice_hass_data = hass.data.setdefault(DOMAIN, {})
metoffice_hass_data[entry.entry_id] = {
METOFFICE_HOURLY_COORDINATOR: metoffice_hourly_coordinator,
METOFFICE_DAILY_COORDINATOR: metoffice_daily_coordinator,
METOFFICE_NAME: site_name,
METOFFICE_COORDINATES: f"{latitude}_{longitude}",
}
# Fetch initial data so we have data when entities subscribe
await asyncio.gather(
metoffice_hourly_coordinator.async_config_entry_first_refresh(),
metoffice_daily_coordinator.async_config_entry_first_refresh(),
)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
|
py
|
1a55c944f6990945f1202908f898951f9c17128d
|
# Copyright 2016-2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Views module for prngmgr API."""
from django.db.models import F, Q
from django_peeringdb.models import concrete as pdb_models
from prngmgr import models as prngmgr_models
from prngmgr.api import datatables, serializers
from rest_framework import permissions, viewsets
from rest_framework.decorators import list_route
from rest_framework.response import Response
class OrganizationViewSet(viewsets.ReadOnlyModelViewSet):
"""Organization view set."""
queryset = pdb_models.Organization.objects.all()
serializer_class = serializers.OrganizationSerializer
permission_classes = (permissions.IsAuthenticated,)
class FacilityViewSet(viewsets.ReadOnlyModelViewSet):
"""Facility view set."""
queryset = pdb_models.Facility.objects.all()
serializer_class = serializers.FacilitySerializer
permission_classes = (permissions.IsAuthenticated,)
class NetworkProxyViewSet(viewsets.ReadOnlyModelViewSet):
"""Network proxy view set."""
queryset = prngmgr_models.NetworkProxy.objects.all()
serializer_class = serializers.NetworkSerializer
permission_classes = (permissions.IsAuthenticated,)
@list_route()
def datatable(self, request, *args, **kwargs):
"""Render datatable query response."""
query_params = datatables.QueryParams(request)
query = datatables.QueryView(
query_set=self.queryset,
serializer_class=self.serializer_class,
query_params=query_params
)
return query.response
@list_route()
def tabledef(self, *args, **kwargs):
"""Render datatable table definition."""
columns = [
{'title': 'Network Name',
'data': 'name',
'name': 'name'},
{'title': 'Primary ASN',
'data': 'asn',
'name': 'asn'},
{'title': 'IRR Record',
'data': 'irr_as_set',
'name': 'irr_as_set'},
{'title': 'Looking Glass',
'data': 'looking_glass',
'name': 'looking_glass'},
{'title': 'Peering Policy',
'data': 'policy_general',
'name': 'policy_general'},
{'title': 'Possible Sessions',
'data': 'possible_sessions',
'name': 'possible_sessions',
'orderable': False,
'searchable': False},
{'title': 'Provisioned Sessions',
'data': 'provisioned_sessions',
'name': 'provisioned_sessions',
'orderable': False,
'searchable': False},
{'title': 'Established Sessions',
'data': 'established_sessions',
'name': 'established_sessions',
'orderable': False,
'searchable': False},
]
definition = datatables.TableDefView(columns=columns)
return definition.response
class InternetExchangeProxyViewSet(viewsets.ReadOnlyModelViewSet):
"""IXP proxy view set."""
queryset = prngmgr_models.InternetExchangeProxy.objects.all()
serializer_class = serializers.InternetExchangeSerializer
permission_classes = (permissions.IsAuthenticated,)
@list_route()
def datatable(self, request, *args, **kwargs):
"""Render datatable query response."""
query_params = datatables.QueryParams(request)
query = datatables.QueryView(
query_set=self.queryset,
serializer_class=self.serializer_class,
query_params=query_params
)
return query.response
@list_route()
def tabledef(self, *args, **kwargs):
"""Render datatable table definition."""
columns = [
{'title': 'IXP Name',
'data': 'name',
'name': 'name',
'path': 'value'},
{'title': 'Country',
'data': 'country',
'name': 'country'},
{'title': 'Region',
'data': 'region_continent',
'name': 'region_continent'},
{'title': 'Participants',
'data': 'participants',
'name': 'participants',
'orderable': True,
'searchable': False},
{'title': 'Possible Sessions',
'data': 'possible_sessions',
'name': 'possible_sessions',
'orderable': False,
'searchable': False},
{'title': 'Provisioned Sessions',
'data': 'provisioned_sessions',
'name': 'provisioned_sessions',
'orderable': False,
'searchable': False},
{'title': 'Established Sessions',
'data': 'established_sessions',
'name': 'established_sessions',
'orderable': False,
'searchable': False},
]
definition = datatables.TableDefView(columns=columns)
return definition.response
class InternetExchangeFacilityViewSet(viewsets.ReadOnlyModelViewSet):
"""IXP Facility proxy view set."""
queryset = pdb_models.InternetExchangeFacility.objects.all()
serializer_class = serializers.InternetExchangeFacilitySerializer
permission_classes = (permissions.IsAuthenticated,)
class IXLanViewSet(viewsets.ReadOnlyModelViewSet):
"""IXP LAN view set."""
queryset = pdb_models.IXLan.objects.all()
serializer_class = serializers.IXLanSerializer
permission_classes = (permissions.IsAuthenticated,)
class IXLanPrefixViewSet(viewsets.ReadOnlyModelViewSet):
"""IXP LAN prefix view set."""
queryset = pdb_models.IXLanPrefix.objects.all()
serializer_class = serializers.IXLanPrefixSerializer
permission_classes = (permissions.IsAuthenticated,)
class NetworkContactViewSet(viewsets.ReadOnlyModelViewSet):
"""Network contact view set."""
queryset = pdb_models.NetworkContact.objects.all()
serializer_class = serializers.NetworkContactSerializer
permission_classes = (permissions.IsAuthenticated,)
class NetworkFacilityViewSet(viewsets.ReadOnlyModelViewSet):
"""Network facility view set."""
queryset = pdb_models.NetworkFacility.objects.all()
serializer_class = serializers.NetworkFacilitySerializer
permission_classes = (permissions.IsAuthenticated,)
class NetworkIXLanViewSet(viewsets.ReadOnlyModelViewSet):
"""Network IX LAN view set."""
queryset = pdb_models.NetworkIXLan.objects.all()
serializer_class = serializers.NetworkIXLanSerializer
permission_classes = (permissions.IsAuthenticated,)
class PeeringRouterViewSet(viewsets.ModelViewSet):
"""Peering router view set."""
queryset = prngmgr_models.PeeringRouter.objects.all()
serializer_class = serializers.PeeringRouterSerializer
permission_classes = (permissions.IsAuthenticated,)
@list_route()
def datatable(self, request, *args, **kwargs):
"""Render datatable query response."""
query_params = datatables.QueryParams(request)
query = datatables.QueryView(
query_set=self.queryset,
serializer_class=self.serializer_class,
query_params=query_params
)
return query.response
@list_route()
def tabledef(self, *args, **kwargs):
"""Render datatable table definition."""
columns = [
{'title': 'Hostname',
'data': 'hostname',
'name': 'hostname'},
{'title': 'Peering Interfaces',
'data': 'peering_interfaces',
'name': 'peering_interfaces'},
{'title': 'Possible Sessions',
'data': 'possible_sessions',
'name': 'possible_sessions',
'orderable': False,
'searchable': False},
{'title': 'Provisioned Sessions',
'data': 'provisioned_sessions',
'name': 'provisioned_sessions',
'orderable': False,
'searchable': False},
{'title': 'Established Sessions',
'data': 'established_sessions',
'name': 'established_sessions',
'orderable': False,
'searchable': False},
]
definition = datatables.TableDefView(columns=columns)
return definition.response
class PeeringRouterIXInterfaceViewSet(viewsets.ModelViewSet):
"""Peering router IX interface view set."""
queryset = prngmgr_models.PeeringRouterIXInterface.objects.all()
serializer_class = serializers.PeeringRouterIXInterfaceSerializer
permission_classes = (permissions.IsAuthenticated,)
class PeeringSessionViewSet(viewsets.ModelViewSet):
"""Peering session view set."""
model_manager = prngmgr_models.PeeringSession.objects
queryset = model_manager.all()
serializer_class = serializers.PeeringSessionSerializer
permission_classes = (permissions.IsAuthenticated,)
@list_route()
def status_summary(self, *args, **kwargs):
"""Render status summary response."""
summary = self.model_manager.status_summary()
return Response(summary)
@list_route()
def state_changes(self, request, *args, **kwargs):
"""Render state changes query response."""
query_params = datatables.QueryParams(request)
query = datatables.QueryView(
query_set=self.queryset,
serializer_class=self.serializer_class,
query_params=query_params,
static_exclude=Q(**{"session_state": F("previous_state")}),
static_order='-state_changed',
)
return query.response
@list_route()
def datatable(self, request, *args, **kwargs):
"""Render datatable query response."""
query_params = datatables.QueryParams(request)
query = datatables.QueryView(
query_set=self.queryset,
serializer_class=self.serializer_class,
query_params=query_params
)
return query.response
@list_route()
def tabledef(self, *args, **kwargs):
"""Render datatable table definition."""
columns = [
{'title': 'IXP',
'data': 'ixp_name',
'name': 'ixp_name',
'responsivePriority': 5},
{'title': 'Peer Name',
'data': 'remote_network_name',
'name': 'remote_network_name',
'responsivePriority': 1},
{'title': 'Peer AS',
'data': 'remote_network_asn',
'name': 'remote_network_asn',
'responsivePriority': 2},
{'title': 'Address Family',
'data': 'address_family',
'name': 'address_family',
'responsivePriority': 3},
{'title': 'Peer Address',
'data': 'remote_address',
'name': 'remote_address'},
{'title': 'Router',
'data': 'router_hostname',
'name': 'router_hostname'},
{'title': 'State',
'data': 'session_state',
'name': 'session_state',
'responsivePriority': 4},
{'title': 'Accepted Prefixes',
'data': 'accepted_prefixes',
'name': 'accepted_prefixes',
'responsivePriority': 6}
]
definition = datatables.TableDefView(columns=columns)
return definition.response
|
py
|
1a55c9a4d0ab076081aa3c631ce6f7525cfe9eec
|
import os
from fabric.api import run, sudo, env, cd, local, prefix, put, lcd, settings
from fabric.contrib.project import rsync_project
from fabric.contrib.files import exists, sed
from fabric.utils import puts
server_dir = '/root/wxBot'
tmp_dir = '/tmp/wxBot' + str(os.getpid())
def _set_user_dir():
global server_dir
with settings(warn_only=True):
issue = run('id root').lower()
def _prepare_local_website():
local('mkdir -p %s' % tmp_dir)
local('cp *.py *.jpg %s' % tmp_dir)
def prepare_remote_dirs():
_set_user_dir()
if not exists(server_dir):
sudo('mkdir -p %s' % server_dir)
sudo('chmod -R 755 %s' % server_dir)
sudo('chown %s %s' % ('root', server_dir))
def _clean_local_dir():
local('rm -r %s' % tmp_dir)
def host_type():
run('uname -s')
def deploy():
_prepare_local_website()
prepare_remote_dirs()
rsync_project(local_dir=tmp_dir + '/', remote_dir=server_dir, delete=True)
_clean_local_dir()
|
py
|
1a55ca65fd688653194f87a7281e511b36f3439a
|
import dbProvider as db
import json
from operator import itemgetter
from flask import Flask, url_for, render_template, abort, make_response, redirect
app = Flask(__name__)
serverName = '146.185.179.193:5000'
# serverName = 'otkachkaseptika.ru'
def getStaticPath(relativePath):
# return '/static/' + relativePath
return url_for('static', filename=relativePath)
app.config['SERVER_NAME'] = serverName
# Helpers
@app.context_processor
def utility_processor():
def getLinkForRegionService(regionId = None, serviceId = None, order = False, subdomain = None):
if regionId == None:
if serviceId == None:
return url_for("RegionNoService", subdomain = 'www' if subdomain == None else subdomain)
else:
service = db.getServiceById(serviceId)
return url_for("RegionService", routeString = service['nameTranslit'], subdomain = 'www' if subdomain == None else subdomain)
else:
region = db.getRegionById(regionId)
subdomain = db.getSubdomainByMainRegionId(regionId)
if subdomain == None:
isMainRegion = False
subdomain = db.getSubdomainByRegionId(regionId)
else:
isMainRegion = True
if serviceId == None:
if isMainRegion:
return url_for("RegionNoService", subdomain = subdomain)
else:
return url_for("RegionService", routeString = getPathForRegionId(regionId), subdomain = subdomain)
else:
service = db.getServiceById(serviceId)
if isMainRegion:
return url_for("RegionService", routeString = service['nameTranslit'], subdomain = subdomain)
else:
if not (region['hasChildren']):
order = True
if order:
routeString = service['nameTranslit'] + "-v-" + region['dativeTranslit']
return url_for("RegionService", routeString = routeString, subdomain = subdomain)
else:
routeString = service['nameTranslit'] + getPathForRegionId(regionId)
return url_for("RegionService", routeString = routeString, subdomain = subdomain)
def getLen(array):
return len(array)
return dict(getLinkForRegionService=getLinkForRegionService, getLen=getLen, getServiceImgUrl=getServiceImgUrl)
def getPathForRegionId(regionId):
path = ""
parents = db.getRegionParentsSorted(regionId)
for parent in parents:
path += "/" + parent["nameTranslit"]
region = db.getRegionById(regionId)
path += "/" + region["nameTranslit"]
return path
def getRegionByPathAndParentId(path, parentId):
regions = path.split('/')
for regionName in regions:
region = db.getRegionByNameTranslitAndParentId(regionName, parentId)
if region == None:
return None
parentId = region['id']
return region
def getServiceImgUrl(service, region, size = None):
imgNumber = db.getServiceRandomImgNumber(service, region['id'])
if imgNumber == None:
service = db.getServiceById(5)
imgNumber = db.getServiceRandomImgNumber(service, region['id'])
sizeStr = ''
if size != None:
sizeStr = '-' + size
return getStaticPath('img/' + service['nameTranslit'] + '/' + service['nameTranslit'] + '-' + str(imgNumber) + sizeStr + '.jpg')
def replaceDataInContent(content, region, service):
result = []
for block in content:
imgUrl = getServiceImgUrl(service, region)
replaced = block.replace('{N}', region['dativeCaseName']).replace('{imgSrc}', imgUrl).replace('{imgAlt}', service['name'] + ' в ' + region['dativeCaseName'])
result.append(replaced)
return result
# Redirects from no subdomains to www
# @app.route('/')
# def Redirect():
# return redirect("http://www." + serverName + "/", code=301)
# @app.route('/<path:routeString>')
# def RedirectWithPath(routeString):
# return redirect("http://www." + serverName + "/" + routeString, code=301)
# With subdomain
@app.route('/')
@app.route('/', subdomain="<subdomain>")
def RegionNoService(subdomain = ''):
print('Subdomain ' + subdomain)
region = db.getRegionBySubdomain(subdomain)
if region == None:
region = 0
return render_template('selectServiceForRegion.html',
siteName = db.getText("header", "siteName"),
motto = db.getText("header", "motto").format(region['dativeCaseName']),
mainPhone = db.getPhoneByRegionId(region['id'])['phoneString'],
mainPhoneMeta = db.getText("phoneDescription", "other"),
mainPhoneLink = db.getPhoneByRegionId(region['id'])['phoneNormal'],
subdomain = subdomain,
title = db.getText("regionNoService", "title").format(region['dativeCaseName']),
description = db.getText("regionNoService", "description").format(region['dativeCaseName']),
keywords = db.getText("regionNoService", "keywords").format(region['dativeCaseName']),
h1 = db.getText("regionNoService", "h1").format(region['dativeCaseName']),
copyright = db.getText("footer", "copyright"),
services = db.getServices(),
region = region,
parentRegions = db.getRegionParentsSorted(region['id']),
regions = db.getRegionsTree(parentIds=[region['id']])
)
@app.route('/<path:routeString>', subdomain = '')
@app.route('/<path:routeString>', subdomain="<subdomain>")
def RegionService(routeString, subdomain):
print('Subdomain ' + subdomain)
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
serviceAndRegion = routeString.split("/")
service = db.getServiceByNameTranslit(serviceAndRegion[0])
if service != None:
regionPath = routeString.replace(service['nameTranslit'] + "/", "")
region = getRegionByPathAndParentId(path=regionPath, parentId=mainRegion['id'])
dativeRegionName = mainRegion['dativeCaseName']
parentIds=[mainRegion['id']]
parentRegions = db.getRegionParentsSorted(mainRegion['id'])
regionOrMainRegion = mainRegion
if region != None:
dativeRegionName = region['dativeCaseName']
parentIds=[region['id']]
parentRegions = db.getRegionParentsSorted(region['id'])
regionOrMainRegion = region
return render_template('selectRegionForService.html',
siteName = db.getText("header", "siteName"),
motto = db.getText("header", "motto").format(mainRegion['dativeCaseName']),
mainPhone = db.getPhoneByRegionId(mainRegion['id'])['phoneString'],
mainPhoneMeta = db.getText("phoneDescription", "other"),
mainPhoneLink = db.getPhoneByRegionId(mainRegion['id'])['phoneNormal'],
subdomain = subdomain,
title = db.getText("mainRegionService", "title").format(service['name'], dativeRegionName),
description = db.getText("mainRegionService", "description").format(service['name'], dativeRegionName, service['description']),
keywords = db.getText("mainRegionService", "keywords").format(service['name'], dativeRegionName),
h1 = db.getText("mainRegionService", "h1").format(service['name'], dativeRegionName),
service = service,
parentRegions = parentRegions,
copyright = db.getText("footer", "copyright"),
regions = db.getRegionsTree(parentIds, 2),
region = regionOrMainRegion
)
else:
serviceAndRegion = routeString.split("-v-")
service = db.getServiceByNameTranslit(serviceAndRegion[0])
if service == None:
region = getRegionByPathAndParentId(serviceAndRegion[0], mainRegion['id'])
if region == None:
region = 0
return render_template('selectServiceForRegion.html',
siteName = db.getText("header", "siteName"),
motto = db.getText("header", "motto").format(mainRegion['dativeCaseName']),
mainPhone = db.getPhoneByRegionId(mainRegion['id'])['phoneString'],
mainPhoneMeta = db.getText("phoneDescription", "other"),
mainPhoneLink = db.getPhoneByRegionId(mainRegion['id'])['phoneNormal'],
subdomain = subdomain,
title = db.getText("regionNoService", "title").format(region['dativeCaseName']),
description = db.getText("regionNoService", "description").format(region['dativeCaseName']),
keywords = db.getText("regionNoService", "keywords").format(region['dativeCaseName']),
h1 = db.getText("regionNoService", "h1").format(region['dativeCaseName']),
copyright = db.getText("footer", "copyright"),
services = db.getServices(),
region = region,
parentRegions = db.getRegionParentsSorted(region['id']),
regions = db.getRegionsTree(parentIds = [mainRegion['id']])
)
if len(serviceAndRegion) > 1:
region = db.getRegionByDativeTranslitAndMainRegion(serviceAndRegion[1], mainRegion['id'])
if region == None:
region = 0
services = db.getServices()[:]
services.remove(service)
content = db.getRandomizedTexts("orderService", subdomain, str(service['id']), region['id'])
content = replaceDataInContent(content, region, service)
return render_template('orderService.html',
siteName = db.getText("header", "siteName"),
motto = db.getText("header", "motto").format(mainRegion['dativeCaseName']),
mainPhone = db.getPhoneByRegionId(mainRegion['id'])['phoneString'],
mainPhoneLink = db.getPhoneByRegionId(mainRegion['id'])['phoneNormal'],
mainPhoneMeta = db.getText("phoneDescription", "other"),
subdomain = subdomain,
title = db.getText("orderService", "title").format(service['name'], region['dativeCaseName']),
description = db.getText("orderService", "description").format(service['name'], region['dativeCaseName']),
keywords = db.getText("orderService", "keywords").format(service['name'], region['dativeCaseName']),
h1 = db.getText("orderService", "h1").format(service['name'], region['dativeCaseName']),
copyright = db.getText("footer", "copyright"),
services = services,
region = region,
service = service,
parentRegions = db.getRegionParentsSorted(region['id']),
regions = db.getRegionsTree(parentIds = [mainRegion['id']]),
otherServicesHeader = "Другие услуги в {}".format(region['dativeCaseName']),
contentBlocks = content,
imgUrl = getServiceImgUrl(service, region)
)
#robots.txt
@app.route('/robots.txt', subdomain="<subdomain>")
def Robots(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
robots = 'User-agent: *\nAllow: /\nHost:' + subdomain + '.' + serverName + '\nsitemap: http://' + subdomain + '.' + serverName + '/sitemap.xml'
response= make_response(robots)
response.headers["Content-Type"] = "text/plain"
return response
#sitemap.xml
sitemapCount = 50
lastMod = '2017-07-16'
@app.route('/sitemap.xml', subdomain="<subdomain>")
def SitemapIndex(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
sitemapIndex = render_template('sitemapindex.xml',
urlRoot='http://' + subdomain + '.' + serverName,
sitemapCount = sitemapCount,
lastMod = lastMod)
response= make_response(sitemapIndex)
response.headers["Content-Type"] = "application/xml"
return response
@app.route('/sitemap<index>.xml', subdomain="<subdomain>")
def Sitemap(index, subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
index = int(index)
if index > sitemapCount:
abort(404)
services = db.getServices()
regions = db.getAllChildrenRegionIds(mainRegion['id'])
start = (index - 1) * len(regions)/sitemapCount
if start < 0:
abort(404)
if start > len(regions):
start = len(regions)
end = index * len(regions)/sitemapCount
if end > len(regions):
end = len(regions)
start = int(start)
end = int(end)
sitemapTemplate = 'sitemap1.xml'
if index == 1:
sitemapTemplate = 'sitemap.xml'
sitemapXml = render_template(sitemapTemplate,
urlRoot='http://' + subdomain + '.' + serverName,
services = services,
regions = regions[start:end],
lastMod = lastMod,
subdomain = subdomain)
response= make_response(sitemapXml)
response.headers["Content-Type"] = "application/xml"
return response
#verification
@app.route('/google450d69197dedc081.html', subdomain="<subdomain>")
def GoogleVerification(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
return 'google-site-verification: google450d69197dedc081.html'
@app.route('/df439bf5423b.html', subdomain="<subdomain>")
def YandexVerificationMsk(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
return 'd085889e17e4'
@app.route('/yandex_d6b8a19aaea0ecfe.html', subdomain="<subdomain>")
def YandexVerificationSpb(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
return render_template('yandex_d6b8a19aaea0ecfe.html')
@app.route('/wmail_557011f651d368ddfb70a33d8e147a72.html', subdomain="<subdomain>")
def MailVerificationSpb(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
return render_template('wmail_557011f651d368ddfb70a33d8e147a72.html')
@app.route('/yandex_fb5d169c5c36f5d3.html', subdomain="<subdomain>")
def YandexVerificationKrasnodar(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
return render_template('yandex_fb5d169c5c36f5d3.html')
@app.route('/wmail_076dfddb21e2e2bdee0afae71729a13a.html', subdomain="<subdomain>")
def MailVerificationKrasnodar(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
return render_template('wmail_076dfddb21e2e2bdee0afae71729a13a.html')
# Error handling
@app.errorhandler(404)
def page_not_found(error):
region = db.getRegionById(0)
return render_template('404.html',
mainPhone = db.getPhoneByRegionId(region['id'])['phoneString'],
mainPhoneMeta = db.getText("phoneDescription", "other"),
mainPhoneLink = db.getPhoneByRegionId(region['id'])['phoneNormal'],
siteName = db.getText("header", "siteName"),
motto = db.getText("header", "motto").format(region['dativeCaseName']),
subdomain = db.getSubdomainByMainRegionId(region['id']),
title = "Страница не найдена",
copyright = db.getText("footer", "copyright")),404
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
|
py
|
1a55cac3973009cc95fc291d9168bbd0587f4fd5
|
from django.conf.urls import url
from sensor import views
urlpatterns = [
url(r'^$', views.sensors, name='sensor.sensors'),
url(r'^sensors_api', views.sensors_api.as_view(), name='sensor.sensors_api'),
url(r'^get_sensors', views.get_sensors, name='sensor.get_sensors'),
]
|
py
|
1a55cb94c53ca897a7e16bbe13b53dac0a210d61
|
from typing import Callable, Union
class Rate(object):
def __init__(self, lambda_t: Union[str, Callable[[float], float]]):
if isinstance(lambda_t, str):
if hasattr(self, lambda_t):
self._lambda_t: Union[str, Callable[[float], float]] = \
getattr(self, lambda_t)()
else:
raise ValueError(f'Rate class has no method {lambda_t}')
else:
self._lambda_t: Callable[[float], float] = lambda_t
def __call__(self, t: float) -> float:
return self._lambda_t(t)
@classmethod
def linear(cls) -> 'Rate':
return Rate(lambda t: t)
@classmethod
def quadratic(cls) -> 'Rate':
return Rate(lambda t: t ** 2)
@classmethod
def cubic(cls) -> 'Rate':
return Rate(lambda t: t ** 3)
|
py
|
1a55cbac5106076bea76e63057a320f9252b702f
|
from torchvision import transforms
class PytorchTransforms(object):
def gettraintransforms(self, mean, std, size=224):
# Train Phase transformations
return transforms.Compose([
transforms.Resize(size),
# transforms.Pad(padding=1, padding_mode="edge"),
# transforms.RandomHorizontalFlip(p=1), # randomly flip and rotate
# transforms.RandomRotation(20),
transforms.ColorJitter(saturation=0.2, hue=0.2),
# transforms.RandomCrop(size=(64, 64), padding=4),
transforms.ToTensor(),
# transforms.Normalize(mean, std),
# transforms.RandomErasing(scale=(0.10, 0.10), ratio=(1, 1), p=1),
])
def gettesttransforms(self, mean, std):
# Test Phase transformations
return transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize(mean, std)
])
|
py
|
1a55ccd1fc71662a8d0a18d009cbc327133b18cd
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Copyright (c) 2019 Chaintope Inc.
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, msg_getdata, msg_verack, NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, disconnect_nodes, connect_nodes_bi, sync_blocks, wait_until
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def disconnect_all(self):
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 0)
disconnect_nodes(self.nodes[2], 1)
disconnect_nodes(self.nodes[2], 0)
disconnect_nodes(self.nodes[0], 2)
disconnect_nodes(self.nodes[1], 2)
def setup_network(self):
super(NodeNetworkLimitedTest, self).setup_network()
self.disconnect_all()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
expected_services = NODE_BLOOM | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
connect_nodes_bi(self.nodes, 0, 1)
blocks = self.nodes[1].generate(292, self.signblockprivkey)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, 1028)
self.nodes[0].disconnect_p2ps()
node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
connect_nodes_bi(self.nodes, 0, 2)
try:
sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at heigh 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
connect_nodes_bi(self.nodes, 1, 2)
# sync must be possible
sync_blocks(self.nodes)
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generate(10, self.signblockprivkey)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
connect_nodes_bi(self.nodes, 0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
|
py
|
1a55cd0d75d43823b1df6063f9b196e67833c168
|
import os
import argparse
import numpy as np
from algos import construct_classifier, classifier_types
from utils.data_utils import get_dataset, dataset_names
from utils.misc import increment_path
from utils.tf_utils import launch_tensorboard
from utils.vis_utils import plot_embeddings
masterdir = "/tmp/fairml-farm/"
base_datadir = masterdir + "data/"
base_logdir = masterdir + "logs/"
parser = argparse.ArgumentParser(description="Evaluate an individual fairness "
"algorithm.\nNOTE: classifier-specific "
"arguments should be specified in area "
"in the script itself.")
parser.add_argument("--experiment-name", default="default",
help="Name for the experiment base directory, "
"used as the extension relative to {}".format(base_logdir))
parser.add_argument("--load-dir",
help="Path to a previous experiment subdirectory, used to "
"load model weights, relative to {}.".format(base_logdir))
parser.add_argument("--train", action="store_true",
help="train the classifier")
parser.add_argument("-epochs", type=int, default=20)
parser.add_argument("--visualize", action="store_true", help="visualize "
"learned latent space")
parser.add_argument("-classifier", choices=[c.name for c in classifier_types],
default="simplenn",
help="Name of the type of fairness algorithm to use.")
parser.add_argument("-dataset", choices=dataset_names,
default="adult",
help="Name of dataset to train on.")
args = parser.parse_args()
loaddir = None
if args.load_dir is not None:
loaddir = os.path.join(base_logdir, args.load_dir)
logdir = increment_path(os.path.join(base_logdir, args.experiment_name, "run"))
os.makedirs(logdir, exist_ok=True)
print("Logging data to {}".format(logdir))
print("Loading {} dataset...".format(args.dataset))
train_dataset, validation_dataset = get_dataset(args.dataset,
base_datadir=base_datadir)
print("Launching Tensorboard.\nTo visualize, navigate to "
"http://0.0.0.0:6006/\nTo close Tensorboard,"
" press ctrl+C")
tensorboard_process = launch_tensorboard(logdir)
# ===== SPECIFY HYPERPARAMETERS (INCLUDING CLASSIFIER-TYPE) =====
inputsize = train_dataset["data"].shape[1]
layersizes = [100]
classifier_type = "paritynn"
hparams = {
"classifier_type": classifier_type,
"layersizes": layersizes,
"inputsize": inputsize,
}
# ===============================================================
print("Initializing classifier...")
classifier = construct_classifier(hparams, loaddir=loaddir)
if args.train:
print("Training network...")
classifier.train(train_dataset, logdir, epochs=args.epochs,
validation_dataset=validation_dataset)
savepath = classifier.save_model(logdir)
if args.visualize: # Plot out the learned embedding space
n = validation_dataset["label"].shape[0]
# get an equal number of male and female points
n_males = sum(validation_dataset["label"])
limiting_gender = n_males > n - n_males # 1 if men, 0 if women
n_limiting_gender = sum(validation_dataset["label"] == limiting_gender)
max_points_per_gender = 500
n_per_gender = min(max_points_per_gender, n_limiting_gender)
inds = np.concatenate([
np.where(validation_dataset["label"] == limiting_gender)[0][:n_per_gender],
np.where(validation_dataset["label"] != limiting_gender)[0][:n_per_gender]],
axis=0)
vis_dataset = {k:v[inds, ...] for k, v in validation_dataset.items()}
val_embeddings = classifier.compute_embedding(vis_dataset["data"])
plot_embeddings(val_embeddings,
vis_dataset["label"],
vis_dataset["protected"],
plot3d=True,
subsample=False,
label_names=["income<=50k", "income>50k"],
protected_names=["female", "male"])
tensorboard_process.join()
|
py
|
1a55cd6cc1ad1a84c6bea93adc95049968d8caac
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# font
# ----
@property
def font(self):
"""
Sets this axis' title font. Note that the title's font used to
be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.carpet.baxis.title.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.carpet.baxis.title.Font
"""
return self['font']
@font.setter
def font(self, val):
self['font'] = val
# offset
# ------
@property
def offset(self):
"""
An additional amount by which to offset the title from the tick
labels, given in pixels. Note that this used to be set by the
now deprecated `titleoffset` attribute.
The 'offset' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['offset']
@offset.setter
def offset(self, val):
self['offset'] = val
# text
# ----
@property
def text(self):
"""
Sets the title of this axis. Note that before the existence of
`title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['text']
@text.setter
def text(self, val):
self['text'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'carpet.baxis'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this axis' title font. Note that the title's font
used to be set by the now deprecated `titlefont`
attribute.
offset
An additional amount by which to offset the title from
the tick labels, given in pixels. Note that this used
to be set by the now deprecated `titleoffset`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, offset=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.carpet.baxis.Title
font
Sets this axis' title font. Note that the title's font
used to be set by the now deprecated `titlefont`
attribute.
offset
An additional amount by which to offset the title from
the tick labels, given in pixels. Note that this used
to be set by the now deprecated `titleoffset`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__('title')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.carpet.baxis.Title
constructor must be a dict or
an instance of plotly.graph_objs.carpet.baxis.Title"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.carpet.baxis import (title as v_title)
# Initialize validators
# ---------------------
self._validators['font'] = v_title.FontValidator()
self._validators['offset'] = v_title.OffsetValidator()
self._validators['text'] = v_title.TextValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('font', None)
self['font'] = font if font is not None else _v
_v = arg.pop('offset', None)
self['offset'] = offset if offset is not None else _v
_v = arg.pop('text', None)
self['text'] = text if text is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self['dtickrange']
@dtickrange.setter
def dtickrange(self, val):
self['dtickrange'] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['enabled']
@enabled.setter
def enabled(self, val):
self['enabled'] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['name']
@name.setter
def name(self, val):
self['name'] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['templateitemname']
@templateitemname.setter
def templateitemname(self, val):
self['templateitemname'] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['value']
@value.setter
def value(self, val):
self['value'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'carpet.baxis'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.carpet.baxis.Tickformatstop
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__('tickformatstops')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.carpet.baxis.Tickformatstop
constructor must be a dict or
an instance of plotly.graph_objs.carpet.baxis.Tickformatstop"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.carpet.baxis import (
tickformatstop as v_tickformatstop
)
# Initialize validators
# ---------------------
self._validators['dtickrange'] = v_tickformatstop.DtickrangeValidator()
self._validators['enabled'] = v_tickformatstop.EnabledValidator()
self._validators['name'] = v_tickformatstop.NameValidator()
self._validators['templateitemname'
] = v_tickformatstop.TemplateitemnameValidator()
self._validators['value'] = v_tickformatstop.ValueValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('dtickrange', None)
self['dtickrange'] = dtickrange if dtickrange is not None else _v
_v = arg.pop('enabled', None)
self['enabled'] = enabled if enabled is not None else _v
_v = arg.pop('name', None)
self['name'] = name if name is not None else _v
_v = arg.pop('templateitemname', None)
self['templateitemname'
] = templateitemname if templateitemname is not None else _v
_v = arg.pop('value', None)
self['value'] = value if value is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self['family']
@family.setter
def family(self, val):
self['family'] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'carpet.baxis'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.carpet.baxis.Tickfont
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__('tickfont')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.carpet.baxis.Tickfont
constructor must be a dict or
an instance of plotly.graph_objs.carpet.baxis.Tickfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.carpet.baxis import (tickfont as v_tickfont)
# Initialize validators
# ---------------------
self._validators['color'] = v_tickfont.ColorValidator()
self._validators['family'] = v_tickfont.FamilyValidator()
self._validators['size'] = v_tickfont.SizeValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('family', None)
self['family'] = family if family is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.graph_objs.carpet.baxis import title
|
py
|
1a55ce88943063c726dcfad654315657b9b208a1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
' a test module'
__author__ = 'DANTE FUNG'
import sys
def test():
args = sys.argv
if len(args) == 1:
print('Hello world!')
elif len(args) == 2:
print('Hello, %s!' % args[1])
else:
print('Too many arguments!')
if __name__ == '__main__':
test()
|
py
|
1a55cea41e297d2463239699924b1e951d206a7f
|
from typing import List, Any
def quick_sort(array: List[Any], arr_length: int) -> List[Any]:
def __quick_sort(start: int, end: int) -> None:
if start >= end:
return
pivot = array[(start + end) // 2]
left, right = start, end
while left <= right:
while array[left] < pivot:
left += 1
while array[right] > pivot:
right -= 1
if left <= right:
array[left], array[right] = array[right], array[left]
left += 1
right -= 1
__quick_sort(start, right)
__quick_sort(left, end)
__quick_sort(0, arr_length - 1)
return array
|
py
|
1a55cf8a9bcf496063a239d7b29aad52ae511312
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from abbyjergerContigFilter.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'abbyjergerContigFilter'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from abbyjergerContigFilter.abbyjergerContigFilterImpl import abbyjergerContigFilter # noqa @IgnorePep8
impl_abbyjergerContigFilter = abbyjergerContigFilter(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'abbyjergerContigFilter'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_abbyjergerContigFilter.run_abbyjergerContigFilter,
name='abbyjergerContigFilter.run_abbyjergerContigFilter',
types=[dict])
self.method_authentication['abbyjergerContigFilter.run_abbyjergerContigFilter'] = 'required' # noqa
self.rpc_service.add(impl_abbyjergerContigFilter.run_abbyjergerContigFilter_max,
name='abbyjergerContigFilter.run_abbyjergerContigFilter_max',
types=[dict])
self.method_authentication['abbyjergerContigFilter.run_abbyjergerContigFilter_max'] = 'required' # noqa
self.rpc_service.add(impl_abbyjergerContigFilter.status,
name='abbyjergerContigFilter.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'abbyjergerContigFilter ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
py
|
1a55cfd36c55e6138c8d87f68f35ad1a562c9570
|
import gc
gc.threshold((gc.mem_free() + gc.mem_alloc()) // 4)
|
py
|
1a55d0322f0ec50710d5b7168715de8efac78be9
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
dupe_emails = set()
for email_data in orm["auth.User"].objects.values("email").distinct().annotate(
num=models.Count("id")):
if email_data["num"] > 1:
dupe_emails.add(email_data["email"])
for email in dupe_emails:
i = 0
for user in orm["auth.User"].objects.filter(email=email).order_by("id"):
if i:
user.email = email + u"-dupe-" + unicode(i)
user.save()
i += 1
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.product': {
'Meta': {'ordering': "['name']", 'object_name': 'Product'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 3, 28, 0, 0)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'has_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 3, 28, 0, 0)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'own_team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'core.productversion': {
'Meta': {'ordering': "['product', 'order']", 'object_name': 'ProductVersion'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 3, 28, 0, 0)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'productversion'", 'symmetrical': 'False', 'to': "orm['environments.Environment']"}),
'has_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 3, 28, 0, 0)'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'own_team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['core.Product']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'environments.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 3, 28, 0, 0)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 3, 28, 0, 0)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'environments.element': {
'Meta': {'ordering': "['name']", 'object_name': 'Element'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'elements'", 'to': "orm['environments.Category']"}),
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 3, 28, 0, 0)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 3, 28, 0, 0)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'environments.environment': {
'Meta': {'object_name': 'Environment'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 3, 28, 0, 0)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'elements': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'environments'", 'symmetrical': 'False', 'to': "orm['environments.Element']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 3, 28, 0, 0)'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'environments'", 'null': 'True', 'to': "orm['environments.Profile']"})
},
'environments.profile': {
'Meta': {'object_name': 'Profile'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 3, 28, 0, 0)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 3, 28, 0, 0)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['core']
symmetrical = True
|
py
|
1a55d0b5e9963afdf2bcdbab13416e1e3e06a7fd
|
# Copyright (c) University of Utah
from IPython.display import display
from traitlets import Bool, Dict, HasTraits, Instance, Int, List, Tuple, Unicode, observe, Set, link
from ipywidgets import HBox, VBox, IntRangeSlider, FloatRangeSlider
import ipywidgets as widgets
from . import BaseTreeView
from .filters import AttrFilter, Trigger, GroupUIFilter
class TreeView(BaseTreeView):
options = List(Unicode(), ['span',
'fitness', 'parent_fitness', 'child_fitness', 'shared_fitness',
'coef_change', 'coef_similarity',
'inv_fitness',
'min', 'max', 'unique_max', 'unique_min',
'dim_parent', 'dim_child',
'dim_min', 'dim_max',
'q_fitness',
])
x_value = Tuple(default_value=(0, 1))
y_value = Tuple(default_value=(0, 1))
def __init__(self, src=None, auto=True, x=None, y=None, **kwargs):
super().__init__(**kwargs)
self._filters = {}
self.box = VBox()
self._links = []
self._group_filter = GroupUIFilter()
self._trigger = Trigger(self._group_filter, func=self._apply_filter)
self._filters = {}
self._auto = auto
self._auto_filter = None
self.show_measure = False
# setup controls
self._ctrls = HBox()
self._menu = widgets.Dropdown(
options=self.options,
description='Attribute:',
value=self.attr,
disabled=False,
)
self.x_slider = IntRangeSlider(min=0, max=1, value=(0, 1), description='Points:')
self.y_slider = FloatRangeSlider(min=0, max=1, value=(0,1), description='Persistence:', step=0.001)
self._ctrls = HBox([self._menu, self.y_slider, self.x_slider])
link((self, 'x_value'), (self.x_slider, 'value'))
link((self, 'y_value'), (self.y_slider, 'value'))
self._auto_filter = AttrFilter(attr=self._menu.value)
if self._auto:
self._group_filter.add(self._auto_filter, name='auto')
widgets.link((self, 'attr'), (self._menu, 'value'))
self.observe(self._auto_update, names=['attr'])
# setup view
self._links = [
widgets.link((self, 'x'), (self.x_slider, 'value')),
widgets.link((self, 'y'), (self.y_slider, 'value')),
]
self._update_children()
if src is not None:
self.src = src
if x is not None:
self.x = x
if y is not None:
self.y = y
def _apply_filter(self):
if self.tree is not None:
self.show = self.tree.filter(self._group_filter)
def _auto_update(self, change):
self._auto_filter.attr = self.attr
self._auto_filter.update_range(self.tree)
@observe('tree')
def tree_view_tree_changed(self, change):
if self.tree is None:
self.x_slider.value = (self.x_slider.min, self.x_slider.max)
else:
reset = self.x_slider.value[1] == self.x_slider.max
self.x_slider.max = self.tree.regulus.pts.size()
if reset:
self.x_slider.value = self.x_slider.value[0], self.x_slider.max
@property
def filters(self):
return self._group_filter
@filters.setter
def filters(self, f):
if f == self._group_filter:
return
self._trigger.remove(self._group_filter)
self._group_filter = f
if self._auto:
self._group_filter.insert(0, self._auto_filter, name='auto')
self._trigger.add(self._group_filter)
self._update_children()
@property
def opts(self):
return self._menu.options
@opts.setter
def opts(self, opts):
self._menu.options = opts
def add_option(self, attr):
if attr not in self._menu.options:
self._menu.options = list(self.options) + [attr]
self.attr = attr
def remove_option(self, attr):
if attr in self._menu.options:
opts = list(self._menu.options)
del opts[attr]
self._menu.options = opts
if self.attr == attr:
self.attr = opts[0] if len(opts) > 0 else None
def _update_children(self):
children = [self._ctrls, self, self._group_filter]
self.box.children = children
def find_filter(self, name):
return self._group_filter.find(name)
def add_filter(self, *args, **kwargs):
f = self._group_filter.add(*args, **kwargs)
if self.tree and hasattr(f, 'update_range'):
f.update_range(self.tree)
return f
def insert_filter(self, idx, *args, **kwargs):
f = self._group_filter.insert(idx, *args, **kwargs)
if self.tree and hasattr(f, 'update_range'):
f.update_range(self.tree)
def remove_filter(self, item):
self._group_filter.remove(item)
@property
def auto(self):
return self._auto
@auto.setter
def auto(self, value):
if value != self._auto:
self._auto = value
if self._auto:
self._group_filter.insert(0, self._auto_filter)
else:
self._group_filter.remove(self._auto_filter)
def _ipython_display_(self, **kwargs):
display(self.box)
|
py
|
1a55d2168a2c5b13aa2ea9a47b780a993154c9f4
|
import FWCore.ParameterSet.Config as cms
RU_ME1A = cms.PSet(
doCollisions = cms.bool(True),
enlarge = cms.bool(False),
chi2Norm_2D_ = cms.double(35),
chi2_str = cms.double(50.0),
chi2Max = cms.double(100.0),
dPhiIntMax = cms.double(0.005),
dPhiMax = cms.double(0.006),
wideSeg = cms.double(3.0),
minLayersApart = cms.int32(1),
dRIntMax = cms.double(2.0),
dRMax = cms.double(1.5)
)
RU_ME1B = cms.PSet(
doCollisions = cms.bool(True),
enlarge = cms.bool(False),
chi2Norm_2D_ = cms.double(35),
chi2_str = cms.double(50.0),
chi2Max = cms.double(100.0),
dPhiIntMax = cms.double(0.004),
dPhiMax = cms.double(0.005),
wideSeg = cms.double(3.0),
minLayersApart = cms.int32(1),
dRIntMax = cms.double(2.0),
dRMax = cms.double(1.5)
)
RU_ME12 = cms.PSet(
doCollisions = cms.bool(True),
enlarge = cms.bool(False),
chi2Norm_2D_ = cms.double(35),
chi2_str = cms.double(50.0),
chi2Max = cms.double(100.0),
dPhiIntMax = cms.double(0.003),
dPhiMax = cms.double(0.004),
wideSeg = cms.double(3.0),
minLayersApart = cms.int32(1),
dRIntMax = cms.double(2.0),
dRMax = cms.double(1.5)
)
RU_ME13 = cms.PSet(
doCollisions = cms.bool(True),
enlarge = cms.bool(False),
chi2Norm_2D_ = cms.double(20),
chi2_str = cms.double(30.0),
chi2Max = cms.double(60.0),
dPhiIntMax = cms.double(0.002),
dPhiMax = cms.double(0.003),
wideSeg = cms.double(3.0),
minLayersApart = cms.int32(1),
dRIntMax = cms.double(2.0),
dRMax = cms.double(1.5)
)
RU_MEX1 = cms.PSet(
doCollisions = cms.bool(True),
enlarge = cms.bool(False),
chi2Norm_2D_ = cms.double(60),
chi2_str = cms.double(80.0),
chi2Max = cms.double(180.0),
dPhiIntMax = cms.double(0.005),
dPhiMax = cms.double(0.007),
wideSeg = cms.double(3.0),
minLayersApart = cms.int32(1),
dRIntMax = cms.double(2.0),
dRMax = cms.double(1.5)
)
RU_MEX2 = cms.PSet(
doCollisions = cms.bool(True),
enlarge = cms.bool(False),
chi2Norm_2D_ = cms.double(35),
chi2_str = cms.double(50.0),
chi2Max = cms.double(100.0),
dPhiIntMax = cms.double(0.004),
dPhiMax = cms.double(0.006),
wideSeg = cms.double(3.0),
minLayersApart = cms.int32(1),
dRIntMax = cms.double(2.0),
dRMax = cms.double(1.5)
)
CSCSegAlgoRU = cms.PSet(
chamber_types = cms.vstring('ME1/a',
'ME1/b',
'ME1/2',
'ME1/3',
'ME2/1',
'ME2/2',
'ME3/1',
'ME3/2',
'ME4/1',
'ME4/2'),
algo_name = cms.string('CSCSegAlgoRU'),
algo_psets = cms.VPSet(
cms.PSet(
RU_ME1A
),
cms.PSet(
RU_ME1B
),
cms.PSet(
RU_ME12
),
cms.PSet(
RU_ME13
),
cms.PSet(
RU_MEX1
),
cms.PSet(
RU_MEX2
)),
parameters_per_chamber_type = cms.vint32(1, 2, 3, 4, 5,
6, 5, 6, 5, 6)
)
|
py
|
1a55d218eaf99279a9c506213cd7030427bed4e8
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opteryx.storage import BasePartitionScheme
def _safe_get_next_element(lst, item):
"""get the element from a list which follows a given element"""
try:
index = lst.index(item)
return lst[index + 1]
except IndexError:
return None
def _extract_as_at(path):
for part in path.split("/"):
if part.startswith("as_at_"):
return part
return ""
def _extract_by(path):
for part in path.split("/"):
if part.startswith("by_"):
return part
_is_complete = lambda blobs, as_at: any(
blob for blob in blobs if as_at + "/frame.complete" in blob
)
_is_invalid = lambda blobs, as_at: any(
blob for blob in blobs if (as_at + "/frame.ignore" in blob)
)
class MabelPartitionScheme(BasePartitionScheme):
"""
Handle reading data using the Mabel partition scheme.
"""
def partition_format(self):
return "year_{yyyy}/month_{mm}/day_{dd}"
def _inner_filter_blobs(self, list_of_blobs, statistics):
# The segments are stored in folders with the prefix 'by_', as in,
# segments **by** field name
list_of_segments = {
_extract_by(blob) for blob in list_of_blobs if "/by_" in blob
}
chosen_segment = ""
# If we have multiple 'by_' segments, pick one - pick the first one until
# we start making cost-based decisions
if list_of_segments:
list_of_segments = sorted(list_of_segments)
chosen_segment = list_of_segments.pop()
# Do the pruning
list_of_blobs = [
blob for blob in list_of_blobs if f"/{chosen_segment}/" in blob
]
# build a list of the segments we're going to read, for example, if we have
# data which are segmented by hour, this will be the hour=00 part
if chosen_segment == "":
segmented_folders = {""}
else:
segmented_folders = {
_safe_get_next_element(blob.split("/"), chosen_segment)
for blob in list_of_blobs
}
# count the segments we're planning to read
statistics.segments_scanned += len(segmented_folders)
# go through the list of segments, getting the active frame for each
for segment_folder in segmented_folders:
# we get the blobs for this segment by looking for the path to contain
# a combination of the segment and the segmented folder
segment_blobs = [
blob
for blob in list_of_blobs
if f"{chosen_segment}/{segment_folder}" in blob
]
# work out if there's an as_at part
as_ats = {
_extract_as_at(blob) for blob in segment_blobs if "as_at_" in blob
}
if as_ats:
as_ats = sorted(as_ats)
as_at = as_ats.pop()
while not _is_complete(segment_blobs, as_at) or _is_invalid(
segment_blobs, as_at
):
if len(as_ats) > 0:
as_at = as_ats.pop()
else:
return []
# get_logger().debug(f"Reading Frame `{as_at}`")
yield from (blob for blob in segment_blobs if (as_at in blob))
else:
yield from list_of_blobs
def filter_blobs(self, list_of_blobs, statistics):
return list(self._inner_filter_blobs(list_of_blobs, statistics))
|
py
|
1a55d2236f8fc03ef426f57e17d7b51f66d0cdd8
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2018-04-14 12:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('shortener', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ClickEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField(default=0)),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('short_url', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='shortener.ShortURL')),
],
),
]
|
py
|
1a55d3a34e1e65548ff39cb885271cde36e4434e
|
from typing import TypeVar, Optional
T = TypeVar("T")
def ensure(value: Optional[T]) -> T:
if value is None:
raise RuntimeError("Expected a non-None value to be present.")
return value
|
py
|
1a55d3d4178975f466c7d8c56f6542159a3b88c2
|
import collections.abc
import copy
import datetime
import decimal
import operator
import uuid
import warnings
from base64 import b64decode, b64encode
from functools import partialmethod, total_ordering
from django import forms
from django.apps import apps
from django.conf import settings
from django.core import checks, exceptions, validators
from django.db import connection, connections, router
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin
from django.utils import timezone
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.duration import duration_microseconds, duration_string
from django.utils.functional import Promise, cached_property
from django.utils.ipv6 import clean_ipv6_address
from django.utils.itercompat import is_iterable
from django.utils.text import capfirst
from django.utils.translation import gettext_lazy as _
__all__ = [
'AutoField', 'BLANK_CHOICE_DASH', 'BigAutoField', 'BigIntegerField',
'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField',
'DateField', 'DateTimeField', 'DecimalField', 'DurationField',
'EmailField', 'Empty', 'Field', 'FilePathField', 'FloatField',
'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED',
'NullBooleanField', 'PositiveBigIntegerField', 'PositiveIntegerField',
'PositiveSmallIntegerField', 'SlugField', 'SmallAutoField',
'SmallIntegerField', 'TextField', 'TimeField', 'URLField', 'UUIDField',
]
class Empty:
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
def return_None():
return None
@total_ordering
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
system_check_deprecated_details = None
system_check_removed_details = None
# Field flags
hidden = False
many_to_many = None
many_to_one = None
one_to_many = None
one_to_one = None
related_model = None
descriptor_class = DeferredAttribute
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=(),
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.remote_field = rel
self.is_relation = self.remote_field is not None
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
if isinstance(choices, collections.abc.Iterator):
choices = list(choices)
self.choices = choices
self.help_text = help_text
self.db_index = db_index
self.db_column = db_column
self._db_tablespace = db_tablespace
self.auto_created = auto_created
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = list(validators) # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
"""
Return "app_label.model_label.field_name" for fields attached to
models.
"""
if not hasattr(self, 'model'):
return super().__str__()
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""Display the module, class, and name of the field."""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__qualname__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
return [
*self._check_field_name(),
*self._check_choices(),
*self._check_db_index(),
*self._check_null_allowed_for_primary_keys(),
*self._check_backend_specific_checks(**kwargs),
*self._check_validators(),
*self._check_deprecation_details(),
]
def _check_field_name(self):
"""
Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk".
"""
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
obj=self,
id='fields.E001',
)
]
elif LOOKUP_SEP in self.name:
return [
checks.Error(
'Field names must not contain "%s".' % (LOOKUP_SEP,),
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
obj=self,
id='fields.E003',
)
]
else:
return []
@classmethod
def _choices_is_value(cls, value):
return isinstance(value, (str, Promise)) or not is_iterable(value)
def _check_choices(self):
if not self.choices:
return []
if not is_iterable(self.choices) or isinstance(self.choices, str):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
obj=self,
id='fields.E004',
)
]
choice_max_length = 0
# Expect [group_name, [value, display]]
for choices_group in self.choices:
try:
group_name, group_choices = choices_group
except (TypeError, ValueError):
# Containing non-pairs
break
try:
if not all(
self._choices_is_value(value) and self._choices_is_value(human_name)
for value, human_name in group_choices
):
break
if self.max_length is not None and group_choices:
choice_max_length = max([
choice_max_length,
*(len(value) for value, _ in group_choices if isinstance(value, str)),
])
except (TypeError, ValueError):
# No groups, choices in the form [value, display]
value, human_name = group_name, group_choices
if not self._choices_is_value(value) or not self._choices_is_value(human_name):
break
if self.max_length is not None and isinstance(value, str):
choice_max_length = max(choice_max_length, len(value))
# Special case: choices=['ab']
if isinstance(choices_group, str):
break
else:
if self.max_length is not None and choice_max_length > self.max_length:
return [
checks.Error(
"'max_length' is too small to fit the longest value "
"in 'choices' (%d characters)." % choice_max_length,
obj=self,
id='fields.E009',
),
]
return []
return [
checks.Error(
"'choices' must be an iterable containing "
"(actual value, human readable name) tuples.",
obj=self,
id='fields.E005',
)
]
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
app_label = self.model._meta.app_label
for db in connections:
if router.allow_migrate(db, app_label, model_name=self.model._meta.model_name):
return connections[db].validation.check_field(self, **kwargs)
return []
def _check_validators(self):
errors = []
for i, validator in enumerate(self.validators):
if not callable(validator):
errors.append(
checks.Error(
"All 'validators' must be callable.",
hint=(
"validators[{i}] ({repr}) isn't a function or "
"instance of a validator class.".format(
i=i, repr=repr(validator),
)
),
obj=self,
id='fields.E008',
)
)
return errors
def _check_deprecation_details(self):
if self.system_check_removed_details is not None:
return [
checks.Error(
self.system_check_removed_details.get(
'msg',
'%s has been removed except for support in historical '
'migrations.' % self.__class__.__name__
),
hint=self.system_check_removed_details.get('hint'),
obj=self,
id=self.system_check_removed_details.get('id', 'fields.EXXX'),
)
]
elif self.system_check_deprecated_details is not None:
return [
checks.Warning(
self.system_check_deprecated_details.get(
'msg',
'%s has been deprecated.' % self.__class__.__name__
),
hint=self.system_check_deprecated_details.get('hint'),
obj=self,
id=self.system_check_deprecated_details.get('id', 'fields.WXXX'),
)
]
return []
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
from django.db.models.expressions import Col
return Col(alias, self, output_field)
else:
return self.cached_col
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self)
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be
used by Django.
"""
return sql, params
def deconstruct(self):
"""
Return enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class() has
been run.
* The import path of the field, including the class:e.g.
django.db.models.IntegerField This should be the most portable
version, so less specific may be better.
* A list of positional arguments.
* A dict of keyword arguments.
Note that the positional or keyword arguments must contain values of
the following types (including inner values of collection types):
* None, bool, str, int, float, complex, set, frozenset, list, tuple,
dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their
full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this
time, just ensure that the resulting field is the same - prefer keyword
arguments over positional ones, and omit parameters with their default
values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": None,
"help_text": '',
"db_column": None,
"db_tablespace": None,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
"db_tablespace": "_db_tablespace",
}
equals_comparison = {"choices", "validators"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.abc.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
elif path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
elif path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
elif path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (self.name, path, [], keywords)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.remote_field:
obj.remote_field = copy.copy(self.remote_field)
if hasattr(self.remote_field, 'field') and self.remote_field.field is self:
obj.remote_field.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
state = self.__dict__.copy()
# The _get_default cached_property can't be pickled due to lambda
# usage.
state.pop('_get_default', None)
return _empty, (self.__class__,), state
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def get_pk_value_on_save(self, instance):
"""
Hook to generate new PK values on save. This method is called when
saving instances with no primary key value set. If this method returns
something else than None, then the returned value is used when saving
the new instance.
"""
if self.default:
return self.get_default()
return None
def to_python(self, value):
"""
Convert the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Return the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
"""
Some validators can't be created at field initialization time.
This method provides a way to delay their creation until required.
"""
return [*self.default_validators, *self._validators]
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validate value and raise ValidationError if necessary. Subclasses
should override this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self.choices is not None and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python() and validate() are propagated. Return the correct
value if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type_parameters(self, connection):
return DictWrapper(self.__dict__, connection.ops.quote_name, 'qn_')
def db_check(self, connection):
"""
Return the database column check constraint for this field, for the
provided connection. Works the same way as db_type() for the case that
get_internal_type() does not map to a preexisting model field.
"""
data = self.db_type_parameters(connection)
try:
return connection.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
return None
def db_type(self, connection):
"""
Return the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = self.db_type_parameters(connection)
try:
return connection.data_types[self.get_internal_type()] % data
except KeyError:
return None
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. For example, this method is called by ForeignKey and OneToOneField
to determine its data type.
"""
return self.db_type(connection)
def cast_db_type(self, connection):
"""Return the data type to use in the Cast() function."""
db_type = connection.ops.cast_data_types.get(self.get_internal_type())
if db_type:
return db_type % self.db_type_parameters(connection)
return self.db_type(connection)
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return values
(type, checks). This will look at db_type(), allowing custom model
fields to override it.
"""
type_string = self.db_type(connection)
check_string = self.db_check(connection)
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.data_types_suffix.get(self.get_internal_type())
def get_db_converters(self, connection):
if hasattr(self, 'from_db_value'):
return [self.from_db_value]
return []
@property
def unique(self):
return self._unique or self.primary_key
@property
def db_tablespace(self):
return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
@property
def db_returning(self):
"""
Private API intended only to be used by Django itself. Currently only
the PostgreSQL backend supports returning multiple fields on a model.
"""
return False
def set_attributes_from_name(self, name):
self.name = self.name or name
self.attname, self.column = self.get_attname_column()
self.concrete = self.column is not None
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, private_only=False):
"""
Register the field with the model class it belongs to.
If private_only is True, create a separate instance of this field
for every subclass of cls, even if cls is not an abstract model.
"""
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self, private=private_only)
if self.column:
# Don't override classmethods with the descriptor. This means that
# if you have a classmethod and a field with the same name, then
# such fields can't be deferred (we don't have a check for this).
if not getattr(cls, self.attname, None):
setattr(cls, self.attname, self.descriptor_class(self))
if self.choices is not None:
if not hasattr(cls, 'get_%s_display' % self.name):
setattr(
cls,
'get_%s_display' % self.name,
partialmethod(cls._get_FIELD_display, field=self),
)
def get_filter_kwargs_for_object(self, obj):
"""
Return a dict that when passed as kwargs to self.model.filter(), would
yield all instances having the same value for this field as obj has.
"""
return {self.name: getattr(obj, self.attname)}
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""Return field's value just before saving."""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""Perform preliminary non-db specific value checks and conversions."""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""
Return field's value prepared for interacting with the database backend.
Used by the default implementations of get_db_prep_save().
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""Return field's value prepared for saving into a database."""
return self.get_db_prep_value(value, connection=connection, prepared=False)
def has_default(self):
"""Return a boolean of whether this field has a default value."""
return self.default is not NOT_PROVIDED
def get_default(self):
"""Return the default value for this field."""
return self._get_default()
@cached_property
def _get_default(self):
if self.has_default():
if callable(self.default):
return self.default
return lambda: self.default
if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls:
return return_None
return str # return empty string
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=()):
"""
Return choices with a default blank choices included, for use
as <select> choices for this field.
"""
if self.choices is not None:
choices = list(self.choices)
if include_blank:
blank_defined = any(choice in ('', None) for choice, _ in self.flatchoices)
if not blank_defined:
choices = blank_choice + choices
return choices
rel_model = self.remote_field.model
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
choice_func = operator.attrgetter(
self.remote_field.get_related_field().attname
if hasattr(self.remote_field, 'get_related_field')
else 'pk'
)
qs = rel_model._default_manager.complex_filter(limit_choices_to)
if ordering:
qs = qs.order_by(*ordering)
return (blank_choice if include_blank else []) + [
(choice_func(x), str(x)) for x in qs
]
def value_to_string(self, obj):
"""
Return a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return str(self.value_from_object(obj))
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
if self.choices is None:
return []
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""Return a django.forms.Field instance for this field."""
defaults = {
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text,
}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices is not None:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial', 'disabled'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""Return the value of this field in the given model instance."""
return getattr(obj, self.attname)
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value must be either True or False.'),
'invalid_nullable': _('“%(value)s” value must be either True, False, or None.'),
}
description = _("Boolean (Either True or False)")
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if self.null and value in self.empty_values:
return None
if value in (True, False):
# 1/0 are equal to True/False. bool() converts former to latter.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid_nullable' if self.null else 'invalid'],
code='invalid',
params={'value': value},
)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return self.to_python(value)
def formfield(self, **kwargs):
if self.choices is not None:
include_blank = not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
form_class = forms.NullBooleanField if self.null else forms.BooleanField
# In HTML checkboxes, 'required' means "must be checked" which is
# different from the choices case ("must select some value").
# required=False allows unchecked checkboxes.
defaults = {'form_class': form_class, 'required': False}
return super().formfield(**{**defaults, **kwargs})
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_max_length_attribute(**kwargs),
]
def _check_max_length_attribute(self, **kwargs):
if self.max_length is None:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
obj=self,
id='fields.E120',
)
]
elif (not isinstance(self.max_length, int) or isinstance(self.max_length, bool) or
self.max_length <= 0):
return [
checks.Error(
"'max_length' must be a positive integer.",
obj=self,
id='fields.E121',
)
]
else:
return []
def cast_db_type(self, connection):
if self.max_length is None:
return connection.ops.cast_char_field_without_max_length
return super().cast_db_type(connection)
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, str) or value is None:
return value
return str(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
# TODO: Handle multiple backends with different feature flags.
if self.null and not connection.features.interprets_empty_strings_as_nulls:
defaults['empty_value'] = None
defaults.update(kwargs)
return super().formfield(**defaults)
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
system_check_removed_details = {
'msg': (
'CommaSeparatedIntegerField is removed except for support in '
'historical migrations.'
),
'hint': (
'Use CharField(validators=[validate_comma_separated_integer_list]) '
'instead.'
),
'id': 'fields.E901',
}
class DateTimeCheckMixin:
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_mutually_exclusive_options(),
*self._check_fix_default_value(),
]
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [self.auto_now_add, self.auto_now, self.has_default()]
enabled_options = [option not in (None, False) for option in mutually_exclusive_options].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
obj=self,
id='fields.E160',
)
]
else:
return []
def _check_fix_default_value(self):
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value has an invalid date format. It must be '
'in YYYY-MM-DD format.'),
'invalid_date': _('“%(value)s” value has the correct format (YYYY-MM-DD) '
'but it is an invalid date.'),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super().__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to do, as dates don't have tz information
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
offset = datetime.timedelta(days=1)
lower = (now - offset).date()
upper = (now + offset).date()
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
if not self.null:
setattr(
cls, 'get_next_by_%s' % self.name,
partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=True)
)
setattr(
cls, 'get_previous_by_%s' % self.name,
partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=False)
)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.DateField,
**kwargs,
})
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value has an invalid format. It must be in '
'YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format.'),
'invalid_date': _("“%(value)s” value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _('“%(value)s” value has the correct format '
'(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) '
'but it is an invalid date/time.'),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.date):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
lower = datetime.datetime(lower.year, lower.month, lower.day)
upper = now + second_offset
upper = datetime.datetime(upper.year, upper.month, upper.day)
value = datetime.datetime(value.year, value.month, value.day)
else:
# No explicit date / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
def get_prep_value(self, value):
value = super().get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
try:
name = '%s.%s' % (self.model.__name__, self.name)
except AttributeError:
name = '(unbound)'
warnings.warn("DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." %
(name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datetimefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.DateTimeField,
**kwargs,
})
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value must be a decimal number.'),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
digits_errors = [
*self._check_decimal_places(),
*self._check_max_digits(),
]
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
obj=self,
id='fields.E134',
)
]
return []
@cached_property
def validators(self):
return super().validators + [
validators.DecimalValidator(self.max_digits, self.decimal_places)
]
@cached_property
def context(self):
return decimal.Context(prec=self.max_digits)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, float):
return self.context.create_decimal_from_float(value)
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_save(self, value, connection):
return connection.ops.adapt_decimalfield_value(self.to_python(value), self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
return super().formfield(**{
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
**kwargs,
})
class DurationField(Field):
"""
Store timedelta objects.
Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint
of microseconds on other databases.
"""
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value has an invalid format. It must be in '
'[DD] [[HH:]MM:]ss[.uuuuuu] format.')
}
description = _("Duration")
def get_internal_type(self):
return "DurationField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(value)
except ValueError:
pass
else:
if parsed is not None:
return parsed
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.features.has_native_duration_field:
return value
if value is None:
return None
return duration_microseconds(value)
def get_db_converters(self, connection):
converters = []
if not connection.features.has_native_duration_field:
converters.append(connection.ops.convert_durationfield_value)
return converters + super().get_db_converters(connection)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else duration_string(val)
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.DurationField,
**kwargs,
})
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length=254 to be compliant with RFCs 3696 and 5321
kwargs.setdefault('max_length', 254)
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
return super().formfield(**{
'form_class': forms.EmailField,
**kwargs,
})
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs.setdefault('max_length', 100)
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_allowing_files_or_folders(**kwargs),
]
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return str(value)
def formfield(self, **kwargs):
return super().formfield(**{
'path': self.path() if callable(self.path) else self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
**kwargs,
})
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value must be a float.'),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
try:
return float(value)
except (TypeError, ValueError) as e:
raise e.__class__(
"Field '%s' expected a number but got %r." % (self.name, value),
) from e
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.FloatField,
**kwargs,
})
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value must be an integer.'),
}
description = _("Integer")
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_max_length_warning(),
]
def _check_max_length_warning(self):
if self.max_length is not None:
return [
checks.Warning(
"'max_length' is ignored when used with %s." % self.__class__.__name__,
hint="Remove 'max_length' from field",
obj=self,
id='fields.W122',
)
]
return []
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
validators_ = super().validators
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None and not any(
(
isinstance(validator, validators.MinValueValidator) and (
validator.limit_value()
if callable(validator.limit_value)
else validator.limit_value
) >= min_value
) for validator in validators_
):
validators_.append(validators.MinValueValidator(min_value))
if max_value is not None and not any(
(
isinstance(validator, validators.MaxValueValidator) and (
validator.limit_value()
if callable(validator.limit_value)
else validator.limit_value
) <= max_value
) for validator in validators_
):
validators_.append(validators.MaxValueValidator(max_value))
return validators_
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
try:
return int(value)
except (TypeError, ValueError) as e:
raise e.__class__(
"Field '%s' expected a number but got %r." % (self.name, value),
) from e
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.IntegerField,
**kwargs,
})
class BigIntegerField(IntegerField):
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
return super().formfield(**{
'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT,
**kwargs,
})
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
system_check_removed_details = {
'msg': (
'IPAddressField has been removed except for support in '
'historical migrations.'
),
'hint': 'Use GenericIPAddressField instead.',
'id': 'fields.E900',
}
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return str(value)
def get_internal_type(self):
return "IPAddressField"
class GenericIPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super().__init__(verbose_name, name, *args, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_blank_and_null_values(**kwargs),
]
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
'GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.',
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length") == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value is None:
return None
if not isinstance(value, str):
value = str(value)
value = value.strip()
if ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_ipaddressfield_value(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return str(value)
def formfield(self, **kwargs):
return super().formfield(**{
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
**kwargs,
})
class NullBooleanField(BooleanField):
default_error_messages = {
'invalid': _('“%(value)s” value must be either None, True or False.'),
'invalid_nullable': _('“%(value)s” value must be either None, True or False.'),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
class PositiveIntegerRelDbTypeMixin:
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. In most cases, a foreign key pointing to a positive integer
primary key will have an integer column data type but some databases
(e.g. MySQL) have an unsigned integer type. In that case
(related_fields_match_type=True), the primary key should return its
db_type.
"""
if connection.features.related_fields_match_type:
return self.db_type(connection)
else:
return IntegerField().db_type(connection=connection)
class PositiveBigIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _('Positive big integer')
def get_internal_type(self):
return 'PositiveBigIntegerField'
def formfield(self, **kwargs):
return super().formfield(**{
'min_value': 0,
**kwargs,
})
class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
return super().formfield(**{
'min_value': 0,
**kwargs,
})
class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
return super().formfield(**{
'min_value': 0,
**kwargs,
})
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, max_length=50, db_index=True, allow_unicode=False, **kwargs):
self.allow_unicode = allow_unicode
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super().__init__(*args, max_length=max_length, db_index=db_index, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if kwargs.get("max_length") == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
if self.allow_unicode is not False:
kwargs['allow_unicode'] = self.allow_unicode
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.SlugField,
'allow_unicode': self.allow_unicode,
**kwargs,
})
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if isinstance(value, str) or value is None:
return value
return str(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
return super().formfield(**{
'max_length': self.max_length,
**({} if self.choices is not None else {'widget': forms.Textarea}),
**kwargs,
})
class TimeField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value has an invalid format. It must be in '
'HH:MM[:ss[.uuuuuu]] format.'),
'invalid_time': _('“%(value)s” value has the correct format '
'(HH:MM[:ss[.uuuuuu]]) but it is an invalid time.'),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super().__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.time):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
value = datetime.datetime.combine(now.date(), value)
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc).time()
else:
# No explicit time / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs['blank']
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_timefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.TimeField,
**kwargs,
})
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs.setdefault('max_length', 200)
super().__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if kwargs.get("max_length") == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
return super().formfield(**{
'form_class': forms.URLField,
**kwargs,
})
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs.setdefault('editable', False)
super().__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
return [*super().check(**kwargs), *self._check_str_default_value()]
def _check_str_default_value(self):
if self.has_default() and isinstance(self.default, str):
return [
checks.Error(
"BinaryField's default cannot be a string. Use bytes "
"content instead.",
obj=self,
id='fields.E170',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.editable:
kwargs['editable'] = True
else:
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_placeholder(self, value, compiler, connection):
return connection.ops.binary_placeholder_sql(value)
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super().get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super().get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(self.value_from_object(obj)).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, str):
return memoryview(b64decode(value.encode('ascii')))
return value
class UUIDField(Field):
default_error_messages = {
'invalid': _('“%(value)s” is not a valid UUID.'),
}
description = _('Universally unique identifier')
empty_strings_allowed = False
def __init__(self, verbose_name=None, **kwargs):
kwargs['max_length'] = 32
super().__init__(verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "UUIDField"
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
if not isinstance(value, uuid.UUID):
value = self.to_python(value)
if connection.features.has_native_uuid_field:
return value
return value.hex
def to_python(self, value):
if value is not None and not isinstance(value, uuid.UUID):
input_form = 'int' if isinstance(value, int) else 'hex'
try:
return uuid.UUID(**{input_form: value})
except (AttributeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
return value
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.UUIDField,
**kwargs,
})
class AutoFieldMixin:
db_returning = True
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super().__init__(*args, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_primary_key(),
]
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def contribute_to_class(self, cls, name, **kwargs):
assert not cls._meta.auto_field, (
"Model %s can't have more than one auto-generated field."
% cls._meta.label
)
super().contribute_to_class(cls, name, **kwargs)
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class AutoFieldMeta(type):
"""
Metaclass to maintain backward inheritance compatibility for AutoField.
It is intended that AutoFieldMixin become public API when it is possible to
create a non-integer automatically-generated field using column defaults
stored in the database.
In many areas Django also relies on using isinstance() to check for an
automatically-generated field as a subclass of AutoField. A new flag needs
to be implemented on Field to be used instead.
When these issues have been addressed, this metaclass could be used to
deprecate inheritance from AutoField and use of isinstance() with AutoField
for detecting automatically-generated fields.
"""
@property
def _subclasses(self):
return (BigAutoField, SmallAutoField)
def __instancecheck__(self, instance):
return isinstance(instance, self._subclasses) or super().__instancecheck__(instance)
def __subclasscheck__(self, subclass):
return subclass in self._subclasses or super().__subclasscheck__(subclass)
class AutoField(AutoFieldMixin, IntegerField, metaclass=AutoFieldMeta):
def get_internal_type(self):
return 'AutoField'
def rel_db_type(self, connection):
return IntegerField().db_type(connection=connection)
class BigAutoField(AutoFieldMixin, BigIntegerField):
def get_internal_type(self):
return 'BigAutoField'
def rel_db_type(self, connection):
return BigIntegerField().db_type(connection=connection)
class SmallAutoField(AutoFieldMixin, SmallIntegerField):
def get_internal_type(self):
return 'SmallAutoField'
def rel_db_type(self, connection):
return SmallIntegerField().db_type(connection=connection)
|
py
|
1a55d44db89b9871362eb635e3f20f146c282c79
|
data = (
'jjwaels', # 0x00
'jjwaelt', # 0x01
'jjwaelp', # 0x02
'jjwaelh', # 0x03
'jjwaem', # 0x04
'jjwaeb', # 0x05
'jjwaebs', # 0x06
'jjwaes', # 0x07
'jjwaess', # 0x08
'jjwaeng', # 0x09
'jjwaej', # 0x0a
'jjwaec', # 0x0b
'jjwaek', # 0x0c
'jjwaet', # 0x0d
'jjwaep', # 0x0e
'jjwaeh', # 0x0f
'jjoe', # 0x10
'jjoeg', # 0x11
'jjoegg', # 0x12
'jjoegs', # 0x13
'jjoen', # 0x14
'jjoenj', # 0x15
'jjoenh', # 0x16
'jjoed', # 0x17
'jjoel', # 0x18
'jjoelg', # 0x19
'jjoelm', # 0x1a
'jjoelb', # 0x1b
'jjoels', # 0x1c
'jjoelt', # 0x1d
'jjoelp', # 0x1e
'jjoelh', # 0x1f
'jjoem', # 0x20
'jjoeb', # 0x21
'jjoebs', # 0x22
'jjoes', # 0x23
'jjoess', # 0x24
'jjoeng', # 0x25
'jjoej', # 0x26
'jjoec', # 0x27
'jjoek', # 0x28
'jjoet', # 0x29
'jjoep', # 0x2a
'jjoeh', # 0x2b
'jjyo', # 0x2c
'jjyog', # 0x2d
'jjyogg', # 0x2e
'jjyogs', # 0x2f
'jjyon', # 0x30
'jjyonj', # 0x31
'jjyonh', # 0x32
'jjyod', # 0x33
'jjyol', # 0x34
'jjyolg', # 0x35
'jjyolm', # 0x36
'jjyolb', # 0x37
'jjyols', # 0x38
'jjyolt', # 0x39
'jjyolp', # 0x3a
'jjyolh', # 0x3b
'jjyom', # 0x3c
'jjyob', # 0x3d
'jjyobs', # 0x3e
'jjyos', # 0x3f
'jjyoss', # 0x40
'jjyong', # 0x41
'jjyoj', # 0x42
'jjyoc', # 0x43
'jjyok', # 0x44
'jjyot', # 0x45
'jjyop', # 0x46
'jjyoh', # 0x47
'jju', # 0x48
'jjug', # 0x49
'jjugg', # 0x4a
'jjugs', # 0x4b
'jjun', # 0x4c
'jjunj', # 0x4d
'jjunh', # 0x4e
'jjud', # 0x4f
'jjul', # 0x50
'jjulg', # 0x51
'jjulm', # 0x52
'jjulb', # 0x53
'jjuls', # 0x54
'jjult', # 0x55
'jjulp', # 0x56
'jjulh', # 0x57
'jjum', # 0x58
'jjub', # 0x59
'jjubs', # 0x5a
'jjus', # 0x5b
'jjuss', # 0x5c
'jjung', # 0x5d
'jjuj', # 0x5e
'jjuc', # 0x5f
'jjuk', # 0x60
'jjut', # 0x61
'jjup', # 0x62
'jjuh', # 0x63
'jjweo', # 0x64
'jjweog', # 0x65
'jjweogg', # 0x66
'jjweogs', # 0x67
'jjweon', # 0x68
'jjweonj', # 0x69
'jjweonh', # 0x6a
'jjweod', # 0x6b
'jjweol', # 0x6c
'jjweolg', # 0x6d
'jjweolm', # 0x6e
'jjweolb', # 0x6f
'jjweols', # 0x70
'jjweolt', # 0x71
'jjweolp', # 0x72
'jjweolh', # 0x73
'jjweom', # 0x74
'jjweob', # 0x75
'jjweobs', # 0x76
'jjweos', # 0x77
'jjweoss', # 0x78
'jjweong', # 0x79
'jjweoj', # 0x7a
'jjweoc', # 0x7b
'jjweok', # 0x7c
'jjweot', # 0x7d
'jjweop', # 0x7e
'jjweoh', # 0x7f
'jjwe', # 0x80
'jjweg', # 0x81
'jjwegg', # 0x82
'jjwegs', # 0x83
'jjwen', # 0x84
'jjwenj', # 0x85
'jjwenh', # 0x86
'jjwed', # 0x87
'jjwel', # 0x88
'jjwelg', # 0x89
'jjwelm', # 0x8a
'jjwelb', # 0x8b
'jjwels', # 0x8c
'jjwelt', # 0x8d
'jjwelp', # 0x8e
'jjwelh', # 0x8f
'jjwem', # 0x90
'jjweb', # 0x91
'jjwebs', # 0x92
'jjwes', # 0x93
'jjwess', # 0x94
'jjweng', # 0x95
'jjwej', # 0x96
'jjwec', # 0x97
'jjwek', # 0x98
'jjwet', # 0x99
'jjwep', # 0x9a
'jjweh', # 0x9b
'jjwi', # 0x9c
'jjwig', # 0x9d
'jjwigg', # 0x9e
'jjwigs', # 0x9f
'jjwin', # 0xa0
'jjwinj', # 0xa1
'jjwinh', # 0xa2
'jjwid', # 0xa3
'jjwil', # 0xa4
'jjwilg', # 0xa5
'jjwilm', # 0xa6
'jjwilb', # 0xa7
'jjwils', # 0xa8
'jjwilt', # 0xa9
'jjwilp', # 0xaa
'jjwilh', # 0xab
'jjwim', # 0xac
'jjwib', # 0xad
'jjwibs', # 0xae
'jjwis', # 0xaf
'jjwiss', # 0xb0
'jjwing', # 0xb1
'jjwij', # 0xb2
'jjwic', # 0xb3
'jjwik', # 0xb4
'jjwit', # 0xb5
'jjwip', # 0xb6
'jjwih', # 0xb7
'jjyu', # 0xb8
'jjyug', # 0xb9
'jjyugg', # 0xba
'jjyugs', # 0xbb
'jjyun', # 0xbc
'jjyunj', # 0xbd
'jjyunh', # 0xbe
'jjyud', # 0xbf
'jjyul', # 0xc0
'jjyulg', # 0xc1
'jjyulm', # 0xc2
'jjyulb', # 0xc3
'jjyuls', # 0xc4
'jjyult', # 0xc5
'jjyulp', # 0xc6
'jjyulh', # 0xc7
'jjyum', # 0xc8
'jjyub', # 0xc9
'jjyubs', # 0xca
'jjyus', # 0xcb
'jjyuss', # 0xcc
'jjyung', # 0xcd
'jjyuj', # 0xce
'jjyuc', # 0xcf
'jjyuk', # 0xd0
'jjyut', # 0xd1
'jjyup', # 0xd2
'jjyuh', # 0xd3
'jjeu', # 0xd4
'jjeug', # 0xd5
'jjeugg', # 0xd6
'jjeugs', # 0xd7
'jjeun', # 0xd8
'jjeunj', # 0xd9
'jjeunh', # 0xda
'jjeud', # 0xdb
'jjeul', # 0xdc
'jjeulg', # 0xdd
'jjeulm', # 0xde
'jjeulb', # 0xdf
'jjeuls', # 0xe0
'jjeult', # 0xe1
'jjeulp', # 0xe2
'jjeulh', # 0xe3
'jjeum', # 0xe4
'jjeub', # 0xe5
'jjeubs', # 0xe6
'jjeus', # 0xe7
'jjeuss', # 0xe8
'jjeung', # 0xe9
'jjeuj', # 0xea
'jjeuc', # 0xeb
'jjeuk', # 0xec
'jjeut', # 0xed
'jjeup', # 0xee
'jjeuh', # 0xef
'jjyi', # 0xf0
'jjyig', # 0xf1
'jjyigg', # 0xf2
'jjyigs', # 0xf3
'jjyin', # 0xf4
'jjyinj', # 0xf5
'jjyinh', # 0xf6
'jjyid', # 0xf7
'jjyil', # 0xf8
'jjyilg', # 0xf9
'jjyilm', # 0xfa
'jjyilb', # 0xfb
'jjyils', # 0xfc
'jjyilt', # 0xfd
'jjyilp', # 0xfe
'jjyilh', # 0xff
)
|
py
|
1a55d593277c80cd1650a55861cba80c66467b65
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.mplot3d import Axes3D
import hanshu
proapp=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'省公司每月检测缺陷密度')
proapp0=hanshu.zyzh(proapp)
print(proapp0)
fig = plt.figure(5)
ax=fig.add_subplot(1,1,1,projection='3d') #绘制三维图
x=proapp['org_id']
y=proapp['yue'] #获取x轴数据,y轴数据
z=proapp['rat'] #获取z轴数据
#
ax.plot_surface(x,y,z,rstride=2,cstride=1,cmap=plt.cm.coolwarm,alpha=0.8) #绘制三维图表面
ax.set_xlabel('x-name') #x轴名称
ax.set_ylabel('y-name') #y轴名称
ax.set_zlabel('z-name') #z轴名称
plt.savefig('12.png',dpi=400,bbox_inches='tight')
plt.show()
|
py
|
1a55d5f29913f75b66d7b9308a40efead3e40b8f
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_dyn_principal_component_var [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_dyn_principal_component_var&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-dyn-pc-var).
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from arpym.statistics import simulate_var1, simulate_normal, multi_r2
from arpym.tools import transpose_square_root, add_logo
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-parameters)
n_ = 2 # number of target variables
k_ = 1 # number of factors
t_ = int(1e4) # length of VAR(1) process
j_ = int(1e2) # number of scenarios
delta_omega = 1e-3
sigma2 = np.eye(n_) # scale matrix
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step00): Setup parameters
# +
t_vec = np.arange(t_)
tau_vec = np.arange(-j_, j_+1)
omega_vec = np.arange(-np.pi, np.pi, delta_omega)
m_ = len(omega_vec)
gamma = (2 * np.random.rand(4) - 1) * 0.99
theta = gamma * np.pi / 2
b = np.array([[np.sin(theta[0]), 0],
[np.sin(theta[3])*np.sin(theta[2]),
np.sin(theta[3])*np.cos(theta[2])]])
mu_epsi = np.zeros(n_)
s_1 = np.cos(theta[0])
s_2 = np.cos(theta[3])
rho = np.sin(theta[1])
sigma2_epsi = np.array([[s_1**2, rho*s_1*s_2],
[rho*s_1*s_2, s_2**2]])
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step01): Simulate VAR(1) process
# +
mu_inf = np.linalg.solve(np.eye(n_) - b, mu_epsi)
sigma2_inf = np.linalg.solve(np.eye(n_**2) - np.kron(b, b),
sigma2.reshape(n_**2, 1)).reshape(n_, n_)
x_tnow = simulate_normal(mu_inf, sigma2_inf, 1).reshape(n_)
x = simulate_var1(x_tnow, b, mu_epsi, sigma2_epsi, t_, j_=1).squeeze()
mu_x = np.linalg.solve((np.eye(n_) - b), mu_epsi)
sigma2_x = np.linalg.solve(np.eye(n_ ** 2) - np.kron(b, b),
sigma2_epsi.reshape(n_ ** 2, 1)).reshape(n_, n_)
# -
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step02): Compute spectral density
# +
ktilde_x = np.zeros((m_, n_, n_), dtype=complex)
sigma_epsi = transpose_square_root(sigma2_epsi)
for m in range(m_):
ktilde_x_temp = np.linalg.solve(np.eye(n_, dtype=complex) -
np.exp(-omega_vec[m]*1j) * b, sigma_epsi)
ktilde_x[m, :, :] = ktilde_x_temp @ ktilde_x_temp.conj().T
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step03): Principal components decomposition
# +
lam, e = np.linalg.eigh(ktilde_x)
lam_k = lam[:, -k_:][:, ::-1]
e_k = e[:, :, -k_:][:, :, ::-1]
sigma = transpose_square_root(sigma2)
beta_tilde_f = np.einsum('ij,ljk->lik', sigma, e_k)
gamma_tilde_f = np.einsum('ijk,kl->ijl',
e_k.conj().transpose((0, 2, 1)),
np.linalg.inv(sigma))
# -
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step04): Computation of the filter h
# +
h_tilde_f = np.einsum('ijk,ikl->ijl', beta_tilde_f, gamma_tilde_f)
coef = np.exp(1j * np.outer(tau_vec, omega_vec))
h_f = np.real(np.tensordot(coef, h_tilde_f, axes=(1, 0)) *
delta_omega / (2 * np.pi))
gamma_f = np.tensordot(coef, gamma_tilde_f, axes=(1, 0)) * \
delta_omega / (2 * np.pi)
alpha_f = (np.eye(n_) - np.sum(h_f, axis=0)) @ mu_x
# -
# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step05): Compute the spectral density of predicted process
ktilde_x_pc_bar = np.einsum('ijk,ilk->ijl',
np.einsum('ijk,ikl->ijl', h_tilde_f, ktilde_x), h_tilde_f.conj())
# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step06): Compute the principal components predicted process
# +
t_vec_pc = t_vec[tau_vec[-1]:-tau_vec[-1]]
t_pc = t_vec_pc.shape[0]
x_pc_bar = np.zeros((t_pc, n_), dtype=complex)
z_pc = np.zeros((t_pc, k_), dtype=complex)
for t in range(t_pc):
x_tau = x[t_vec_pc[t] + tau_vec, :][::-1, :]
x_pc_bar[t, :] = np.einsum('ijk,ik->j', h_f, x_tau) + alpha_f
z_pc[t, :] = np.einsum('ijk,ik->j', gamma_f, x_tau)
x_pc_bar = np.real(x_pc_bar)
z_pc = np.real(z_pc)
# -
# ## [Step 7](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step07): update times of original process x
x = x[t_vec_pc, :]
# ## [Step 8](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step08): Compute r-squared
u = x - x_pc_bar
sigma2_u = np.einsum('ijk,ilk->ijl',
np.einsum('ijk,ikl->ijl', np.eye(n_) - h_tilde_f, ktilde_x),
(np.eye(n_) - h_tilde_f).conj())
sigma2_u = np.sum(np.real(sigma2_u), axis=0) * delta_omega / (2 * np.pi)
r_2 = multi_r2(sigma2_u, sigma2_x, sigma2)
# ## Plots
# +
plt.style.use('arpm')
t_plot = t_vec_pc[1:150]
xlim = [t_plot[0], t_plot[-1]]
ylim = [-4, 4]
fig1, axes = plt.subplots(1, 2)
axes[0].plot(t_plot, x[t_plot, 0], 'b')
axes[0].plot(t_plot, x[t_plot, 0], 'r--')
axes[0].set_xlabel('$t$')
axes[0].set_ylabel('$x_1$')
axes[0].set_xlim(xlim)
axes[0].set_ylim(ylim)
axes[0].legend(['Process', 'Predicted process'])
axes[1].plot(t_plot, x[t_plot, 1], 'b')
axes[1].plot(t_plot, x[t_plot, 1], 'r--')
axes[1].set_xlabel('$t$')
axes[1].set_ylabel('$x_2$')
axes[1].set_xlim(xlim)
axes[1].set_ylim(ylim)
axes[1].legend(['Process', 'Predicted process'])
add_logo(fig1, size_frac_x=1/8)
plt.tight_layout()
fig2 = plt.figure()
gs = gridspec.GridSpec(1, 3, width_ratios=[1, 3, 1])
ax0 = plt.subplot(gs[0])
ax0.plot(ylim, ylim, 'k')
ax0.plot(x[t_plot, 0], x_pc_bar[t_plot, 0], 'r.')
ax0.set_xlabel('$x_1$')
ax0.set_ylabel('$\overline{x}_{1}^{pc}$')
ax0.set_xlim(ylim)
ax0.set_ylim(ylim)
ax1 = plt.subplot(gs[1])
ax1.plot(t_plot, z_pc[t_plot, 0], 'b')
ax1.set_xlabel('$t$')
ax1.set_ylabel('$Z^{pc}$')
ax1.set_xlim(xlim)
ax2 = plt.subplot(gs[2])
ax2.plot(ylim, ylim, 'k')
ax2.plot(x[t_plot, 1], x_pc_bar[t_plot, 1], 'r.')
ax2.set_xlabel('$x_2$')
ax2.set_ylabel('$\overline{x}_{2}^{pc}$')
ax2.set_xlim(ylim)
ax1.set_ylim(ylim)
add_logo(fig2, size_frac_x=1/4)
plt.tight_layout()
fig3, axes = plt.subplots(2, 4)
for i in range(2):
for j in range(2):
axes[i, j].plot(omega_vec, np.real(ktilde_x[:, i, j]), 'b')
axes[i, j].plot(omega_vec, np.imag(ktilde_x[:, i, j]), 'r')
axes[i, j].set_xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
axes[i, j].set_xticklabels(['$-\pi$', '$-\pi/2$',
'$0$', '$\pi$', '$\pi/2$'])
axes[i, j].set_ylabel(r'$[\tilde{k}_x(\omega)]_{'+str(i+1)+str(j+1)+'}$')
for j in range(2):
axes[i, j+2].plot(omega_vec, np.real(ktilde_x_pc_bar[:, i, j]), 'b')
axes[i, j+2].plot(omega_vec, np.imag(ktilde_x_pc_bar[:, i, j]), 'r')
axes[i, j+2].set_xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
axes[i, j+2].set_xticklabels(['$-\pi$', '$-\pi/2$',
'$0$', '$\pi$', '$\pi/2$'])
axes[i, j+2].set_ylabel(r'$[\tilde{k}_{\bar{x}}(\omega)]^{pc}_{'+str(i+1)+str(j+1)+'}$')
add_logo(fig3, size_frac_x=1/4, location=1)
plt.tight_layout()
|
py
|
1a55d621ab449ad87c0fe12fe2babb2a00d0c676
|
# ================
# SOFTWARE LICENSE
# ================
# The MIT License (MIT)
# Copyright (c) 2018 Yutaka Sawai (Varipon)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================
# LICENSE FOR CONTENT PROCEDURALLY GENERATED USING THIS SOFTWARE
# ==============================================================
# All content procedurally generated by this software and its permutations
# are licensed under Creative Commons Attribution By 3.0:
# https://creativecommons.org/licenses/by/3.0/
#!/usr/bin/python
import bpy
from bpy import *
import mathutils
import math
from mathutils import *
from math import *
class Formula:
J = 18 #joint number
def __init__(self, P, A, move, part, helicity, start, end):
global interval
global frame_start
global frame_end
self.interval = interval
self.frame_start = frame_start
self.frame_end = frame_end
# pivot factor
self.P = P
# scale factor
self.A = A
# name
self.move = move
# element
self.part = part
# element helicity
self.helicity = helicity
self.start = start
self.end = end
bpy.ops.object.mode_set(mode='OBJECT')
# Create armature and object
self.amt = bpy.data.armatures.new(move + '.' + part + '.' + helicity + '.data')
self.rig = bpy.data.objects.new(move + '.' + part + '.' + helicity, self.amt)
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.a = [0 for i in range(4)] # Joint α
self.b = [0 for i in range(self.J)] # Joint β
self.y = [0 for i in range(self.J)] # Joint γ
self.o = [0 for i in range(self.J)] # Joint δ
# Configuration Movement
self.configMovement(self.P, self.A, self.J, self.a, self.b, self.y, self.o)
# Construction Movement
self.constructMovement(self.J, self.helicity, self.amt, self.rig, self.a, self.b, self.y, self.o)
# Construction Rotation
self.configRotation(self.rig, self.interval, self.frame_start, self.frame_end, self.start, self.end)
# Configuration Linkage
self.configLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Construction Linkage
self.constructLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
def configMovement(self, P, A, J, a, b, y, o):
mat_a = [0 for i in range(4)] # Joint α matrix
mat_b = [0 for i in range(self.J)] # Joint β matrix
mat_y = [0 for i in range(self.J)] # Joint γ matrix
mat_o = [0 for i in range(self.J)] # Joint δ matrix
a[1] = mathutils.Euler((P, A, 0), 'XYZ')
print ("a1 =", a[1])
a[2] = mathutils.Euler((A, -A, 0), 'XYZ')
print ("a2 =", a[2])
b[1] = mathutils.Euler((-A, A, 0), 'XYZ')
print ("b1 =", b[1])
o[1] = mathutils.Euler((A, A, 0), 'XYZ')
print ("o1 =", o[1])
B = A * 2 * sqrt (2)
C = B + (B * sqrt (2))
D = C * sqrt (2)
E = C + D
a[0] = mathutils.Euler((-A - E + (D * 0.5), -A - (D * 0.5), 0), 'XYZ')
print ("a0 =", a[0])
mat_a[0] = Matrix.Translation(a[0])
a[3] = mathutils.Euler((0-a[0].x, 0-a[0].y, 0-a[0].z), 'XYZ')
print ("a3 =", a[3])
mat_a[3] = Matrix.Translation(a[3])
y[1] = mathutils.Euler((-A, -A, 0), 'XYZ')
print ("y1 =", y[1])
mat_y[1] = Matrix.Translation(y[1])
### pattern A
b[2] = mathutils.Euler((a[0].x + E + (A * 2), a[0].y + (A * 2), 0), 'XYZ')
print ("b2 =", b[2])
mat_b[2] = Matrix.Translation(b[2])
b[3] = mathutils.Euler((a[0].x + E - (D * 0.5), a[0].y - (A * 2), 0), 'XYZ')
print ("b3 =", b[3])
mat_b[3] = Matrix.Translation(b[3])
y[2] = mathutils.Euler((a[0].x + E, a[0].y, 0), 'XYZ')
print ("y2 =", y[2])
mat_y[2] = Matrix.Translation(y[2])
y[3] = mathutils.Euler((a[0].x + E - (D * 0.5), a[0].y - (D * 0.5), 0), 'XYZ')
print ("y3 =", y[3])
mat_y[3] = Matrix.Translation(y[3])
o[2] = mathutils.Euler((a[0].x + E + (A * 2), a[0].y - (A * 2), 0), 'XYZ')
print ("o2 =", o[2])
mat_o[2] = Matrix.Translation(o[2])
o[3] = mathutils.Euler((a[0].x + E - (D * 0.5) - (A * 2), a[0].y - (D * 0.5) - (A * 2), 0), 'XYZ')
print ("o3 =", o[3])
mat_o[3] = Matrix.Translation(o[3])
### pattern A end
org_rot_mat = Matrix.Rotation(math.radians(0), 4, 'Z')
# define the rotation
rot_mat = Matrix.Rotation(math.radians(-45), 4, 'Z')
for j in range(2, J - 2):
mat_y[j + 2] = mat_a[0] * org_rot_mat * rot_mat * mat_a[3] * mat_y[j]
# obj.matrix_world = mat_y[j + 2]
# extract components back out of the matrix
loc, rot, sca = mat_y[j + 2].decompose()
y[j + 2] = mathutils.Euler(loc, 'XYZ')
print("y"+str(j + 2)+" = ", y[j + 2], rot, sca)
mat_b[j + 2] = mat_a[0] * org_rot_mat * rot_mat * mat_a[3] * mat_b[j]
# obj.matrix_world = mat_b[j + 2]
# extract components back out of the matrix
loc, rot, sca = mat_b[j + 2].decompose()
b[j + 2] = mathutils.Euler(loc, 'XYZ')
print("b"+str(j + 2)+" = ", b[j + 2], rot, sca)
mat_o[j + 2] = mat_a[0] * org_rot_mat * rot_mat * mat_a[3] * mat_o[j]
# obj.matrix_world = mat_o[j + 2]
# extract components back out of the matrix
loc, rot, sca = mat_o[j + 2].decompose()
o[j + 2] = mathutils.Euler(loc, 'XYZ')
print("o"+str(j + 2)+" = ", o[j + 2], rot, sca)
def constructMovement(self, J, helicity, amt, rig, a, b, y, o):
# Linkages
aa = [[0 for i in range(4)] for j in range(4)] # Link α(i) - α(j)
ab = [[0 for i in range(4)] for j in range(4)] # Link α(i) - β(j)
ya = [[0 for i in range(4)] for j in range(4)] # Link γ(i) - α(j)
ao = [[0 for i in range(4)] for j in range(4)] # Link α(i) - δ(j)
ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
rig.show_x_ray = True
amt.show_names = True
amt.draw_type = 'STICK'
# amt.draw_type = 'BBONE'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(rig)
scn.objects.active = rig
scn.update()
# Edit
bpy.ops.object.editmode_toggle()
# Construction Linkage
aa[2][1] = amt.edit_bones.new('a2a1')
aa[2][1].head = a[2]
aa[2][1].tail = a[1]
ab[1][1] = amt.edit_bones.new('a1b1')
ab[1][1].head = a[1]
ab[1][1].tail = b[1]
ab[1][1].parent = aa[2][1]
by[1][1] = amt.edit_bones.new('b1y1')
by[1][1].head = b[1]
by[1][1].tail = y[1]
by[1][1].parent = ab[1][1]
by[1][1].use_inherit_rotation = False
ya[1][2] = amt.edit_bones.new('y1a2')
ya[1][2].head = y[1]
ya[1][2].tail = a[2]
ya[1][2].parent = by[1][1]
ao[2][1] = amt.edit_bones.new('a2o1')
ao[2][1].head = a[2]
ao[2][1].tail = o[1]
ao[2][1].parent = ya[1][2]
ob[1][2] = amt.edit_bones.new('o1b2')
ob[1][2].head = o[1]
ob[1][2].tail = b[2]
ob[1][2].parent = ao[2][1]
yy[1][2] = amt.edit_bones.new('y1y2')
yy[1][2].head = y[1]
yy[1][2].tail = y[2]
yy[1][2].parent = by[1][1]
for j in range(2, J - 1):
by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j))
by[j][j].head = b[j]
by[j][j].tail = y[j]
by[j][j].parent = ob[j-1][j]
yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j))
yo[j][j].head = y[j]
yo[j][j].tail = o[j]
yo[j][j].parent = yy[j-1][j]
yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1))
yy[j][j+1].head = y[j]
yy[j][j+1].tail = y[j+1]
yy[j][j+1].parent = by[j][j]
if j < (J-2):
ob[j][j+1] = amt.edit_bones.new('o'+ str(j) + 'b'+ str(j+1))
ob[j][j+1].head = o[j]
ob[j][j+1].tail = b[j+1]
ob[j][j+1].parent = yo[j][j]
# all bones select
#bpy.ops.pose.select_all(action="SELECT")
for b in amt.edit_bones:
b.select = True
if helicity == 'right':
bpy.ops.armature.calculate_roll(type='GLOBAL_POS_Z')
else:
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_Z')
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = rig.pose.bones['y1a2'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'a2a1'
cns.chain_count = 2
cns.use_stretch = False
for j in range(2, J - 1):
cns = rig.pose.bones['b'+str(j) +'y'+str(j)].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
bpy.ops.object.mode_set(mode='OBJECT')
def configRotation(self, rig, interval, frame_start, frame_end, start, end):
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# key insert
keyframe_insert_interval = interval
rig.pose.bones["a1b1"].rotation_mode = 'XYZ'
rig.pose.bones["a1b1"].rotation_euler.z = math.radians(start)
rig.pose.bones["a1b1"].keyframe_insert(data_path="rotation_euler",frame=frame_start)
rig.pose.bones["a1b1"].rotation_mode = 'XYZ'
rig.pose.bones["a1b1"].rotation_euler.z = math.radians(end)
rig.pose.bones["a1b1"].keyframe_insert(data_path="rotation_euler",frame=frame_end)
for curve in bpy.context.active_object.animation_data.action.fcurves:
cycles = curve.modifiers.new(type='CYCLES')
cycles.mode_before = 'REPEAT_OFFSET'
cycles.mode_after = 'REPEAT_OFFSET'
for keyframe in curve.keyframe_points:
keyframe.interpolation = 'LINEAR'
bpy.ops.object.mode_set(mode='OBJECT')
def configLink(self, A, J, helicity, rig, move, part):
bpy.ops.object.mode_set(mode='OBJECT')
Q = (0.18648+0.146446)*A
# Z = -Q*2
Z = 0.0
obj_joint = bpy.data.objects["joint.gold.000"].copy()
obj_joint.location = (0.0, 0.0, -Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2a1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.silver.001"].copy()
obj_joint.location = (0.0, 0.0, +Q+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1a2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, +Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2o1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a1b1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for n in range(1, J - 1):
if n <= (J-2):
# Pattern 2 of by
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Pattern 2 of yy
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-3):
# Pattern 1 of ob
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2 + Q*(n % 2)*6 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Pattern 2 of yo
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n+1)+"o"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for ob in context.scene.objects:
if "mesh" in ob.name:
ob.select = True
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=True, texture=True, animation=True)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
def constructLink(self, A, J, helicity, rig, move, part):
# Move and rotate the tip bone in pose mode
bpy.context.scene.objects.active = rig
Y = 1.1838*A
for n in rig.pose.bones:
if n.name != "o" + str(J-2) + "b" + str(J-1):
# we can get the object from the pose bone
obj = n.id_data
matrix_final = obj.matrix_world * n.matrix
# Create armature and object
lnk = bpy.data.armatures.new(n.name[:len(n.name)]+'.data.' + helicity)
lnk_rig = bpy.data.objects.new(n.name[:len(n.name)]+'.link.' + helicity, lnk)
lnk_rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
# rig.show_x_ray = True
lnk.show_names = True
lnk.draw_type = 'STICK'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(lnk_rig)
scn.objects.active = lnk_rig
scn.update()
# Create bones
# mode='EDIT'
bpy.ops.object.editmode_toggle()
link = lnk.edit_bones.new(n.name[:len(n.name)])
link.head = (0, 0, 0)
link.tail = (0, Y, 0)
link_head = lnk.edit_bones.new('head')
link_head.head = (0, 0, 0.1)
link_head.tail = (0, 0, 0)
link_head.parent = link
link_head.use_inherit_scale = False
link_tail = lnk.edit_bones.new('tail')
link_tail.head = (0, Y, 0)
link_tail.tail = (0, Y, -0.1)
link_tail.parent = link
link_tail.use_inherit_scale = False
bpy.ops.object.mode_set(mode='OBJECT')
ob = bpy.data.objects[n.name[:len(n.name)]+'.mesh.' + move + '.' + part +'.' + helicity]
ob.location = mathutils.Euler((0, 0, 0), 'XYZ')
# Give mesh object an armature modifier, using vertex groups but
# not envelopes
mod = ob.modifiers.new('MyRigModif', 'ARMATURE')
mod.object = lnk_rig
mod.use_bone_envelopes = False
mod.use_vertex_groups = True
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# Copy rotation constraints Base -> Tip
pBase = lnk_rig.pose.bones[n.name[:len(n.name)]]
cns = pBase.constraints.new('COPY_LOCATION')
cns.name = 'Copy_Location'
cns.target = rig
cns.subtarget = n.name[:len(n.name)]
cns.owner_space = 'WORLD'
cns.target_space = 'WORLD'
# Copy rotation constraints Base -> Tip
pBase = lnk_rig.pose.bones[n.name[:len(n.name)]]
cns = pBase.constraints.new('COPY_ROTATION')
cns.name = 'Copy_Rotation'
cns.target = rig
cns.subtarget = n.name[:len(n.name)]
cns.owner_space = 'WORLD'
cns.target_space = 'WORLD'
# StretchTo constraint Mid -> Tip with influence 0.5
cns1 = pBase.constraints.new('STRETCH_TO')
cns1.name = 'Stretch'
cns1.target = rig
cns1.subtarget = n.name[:len(n.name)]
cns1.head_tail = 1
cns1.rest_length = Y
cns1.influence = 1
cns1.keep_axis = 'PLANE_Z'
cns1.volume = 'NO_VOLUME'
bpy.ops.object.mode_set(mode='OBJECT')
class Pitch(Formula):
J = 2 #joint number
# Overriding
def __init__(self, P, A, move, part, helicity, start, end,
body_loc, body_rot, body):
global interval
global frame_start
global frame_end
self.interval = interval
self.frame_start = frame_start
self.frame_end = frame_end
# pivot factor
self.P = P
# scale factor
self.A = A
# name
self.move = move
# element
self.part = part
# element helicity
self.helicity = helicity
self.start = start
self.end = end
# body
self.body_loc = body_loc
self.body_rot = body_rot
self.body = body
bpy.ops.object.mode_set(mode='OBJECT')
# Create armature and object
self.amt = bpy.data.armatures.new(move + '.' + part + '.' + helicity + '.data')
self.rig = bpy.data.objects.new(move + '.' + part + '.' + helicity, self.amt)
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n]
self.a = [0 for i in range(4)] # Joint α
self.b = [0 for i in range(self.J)] # Joint β
self.y = [0 for i in range(self.J)] # Joint γ
# Configuration Movement
self.configMovement(self.P, self.A, self.J, self.a, self.b, self.y)
# Construction Movement
self.constructMovement(self.J, self.helicity, self.amt, self.rig, self.a, self.b, self.y)
# Parent body to pitch
self.setParent(self.helicity, self.move, self.rig, self.body_loc, self.body_rot, self.body)
# Construction Rotation
self.configRotation(self.rig, self.interval, self.frame_start, self.frame_end, self.start, self.end)
# Configuration Linkage
self.configLink(self.A*0.3, self.J, self.helicity, self.rig, self.move, self.part)
# Construction Linkage
self.constructLink(self.A*0.3, self.J, self.helicity, self.rig, self.move, self.part)
def configMovement(self, P, A, J, a, b, y):
mat_a = [0 for i in range(4)] # Joint α matrix
mat_b = [0 for i in range(self.J)] # Joint β matrix
mat_y = [0 for i in range(self.J)] # Joint γ matrix
a[1] = mathutils.Euler((P, A, 0), 'XYZ')
print ("a1 =", a[1])
a[2] = mathutils.Euler((A, -A, 0), 'XYZ')
print ("a2 =", a[2])
b[1] = mathutils.Euler((-A, A, 0), 'XYZ')
print ("b1 =", b[1])
y[1] = mathutils.Euler((-A, -A, 0), 'XYZ')
print ("y1 =", y[1])
mat_y[1] = Matrix.Translation(y[1])
def constructMovement(self, J, helicity, amt, rig, a, b, y):
# Linkages
aa = [[0 for i in range(4)] for j in range(4)] # Link α(i) - α(j)
ab = [[0 for i in range(4)] for j in range(4)] # Link α(i) - β(j)
ya = [[0 for i in range(4)] for j in range(4)] # Link γ(i) - α(j)
by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
rig.show_x_ray = True
amt.show_names = True
amt.draw_type = 'STICK'
# amt.draw_type = 'BBONE'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(rig)
scn.objects.active = rig
scn.update()
# Edit
bpy.ops.object.editmode_toggle()
# Construction Linkage
aa[2][1] = amt.edit_bones.new('a2a1')
aa[2][1].head = a[2]
aa[2][1].tail = a[1]
ab[1][1] = amt.edit_bones.new('a1b1')
ab[1][1].head = a[1]
ab[1][1].tail = b[1]
ab[1][1].parent = aa[2][1]
by[1][1] = amt.edit_bones.new('b1y1')
by[1][1].head = b[1]
by[1][1].tail = y[1]
by[1][1].parent = ab[1][1]
by[1][1].use_inherit_rotation = False
ya[1][2] = amt.edit_bones.new('y1a2')
ya[1][2].head = y[1]
ya[1][2].tail = a[2]
ya[1][2].parent = by[1][1]
# all bones select
#bpy.ops.pose.select_all(action="SELECT")
for b in amt.edit_bones:
b.select = True
if helicity == 'right':
bpy.ops.armature.calculate_roll(type='GLOBAL_POS_Z')
else:
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_Z')
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = rig.pose.bones['y1a2'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'a2a1'
cns.chain_count = 2
cns.use_stretch = False
bpy.ops.object.mode_set(mode='OBJECT')
def configLink(self, A, J, helicity, rig, move, part):
bpy.ops.object.mode_set(mode='OBJECT')
Q = (0.18648+0.146446)*A
# Z = -Q*2
Z = 0.0
obj_joint = bpy.data.objects["joint.gold.000"].copy()
obj_joint.location = (0.0, 0.0, -Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2a1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.silver.A"].copy()
obj_joint.location = (0.0, 0.0, +Q+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1a2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a1b1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
n = 1
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for ob in context.scene.objects:
if "mesh" in ob.name:
ob.select = True
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=True, texture=True, animation=True)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
# Parent set fingers to arm
def setParent(self, helicity, move, rig,
body_loc, body_rot, body):
# body position
body.rig.location = body_loc
body.rig.rotation_euler = body_rot
# body to pitch
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.scene.frame_current = 0
bpy.ops.object.select_all(action='DESELECT')
rig.select = True
bpy.context.scene.objects.active = rig
bpy.ops.object.editmode_toggle()
parent_bone = 'b1y1' # choose the bone name which you want to be the parent
rig.data.edit_bones.active = rig.data.edit_bones[parent_bone]
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
body.rig.select = True
rig.select = True
bpy.context.scene.objects.active = rig #the active object will be the parent of all selected object
bpy.ops.object.parent_set(type='BONE', keep_transform=True)
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
# end
class Body(Formula):
J = 7 #joint number
# Overriding
def __init__(self, P, A, move, part, helicity, start, end,
arm_left_loc, arm_left_rot, arm_left,
scale_frame_start, scale_frame_mid, scale_frame_end, start_value, mid_value, end_value):
global interval
global frame_start
global frame_end
self.interval = interval
self.frame_start = frame_start
self.frame_end = frame_end
self.scale_frame_start = scale_frame_start
self.scale_frame_mid = scale_frame_mid
self.scale_frame_end = scale_frame_end
self.start_value = start_value
self.mid_value = mid_value
self.end_value = end_value
# pivot factor
self.P = P
# scale factor
self.A = A
# name
self.move = move
# element
self.part = part
# element helicity
self.helicity = helicity
self.start = start
self.end = end
bpy.ops.object.mode_set(mode='OBJECT')
# Create armature and object
self.amt = bpy.data.armatures.new(move + '.' + part + '.' + helicity + '.data')
self.rig = bpy.data.objects.new(move + '.' + part + '.' + helicity, self.amt)
self.arm_left_loc = arm_left_loc
self.arm_left_rot = arm_left_rot
self.arm_left = arm_left
# Centroid
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.a = [0 for i in range(3)] # Joint α
self.b = [0 for i in range(2)] # Joint β
self.y = [0 for i in range(2)] # Joint γ
self.o = [0 for i in range(2)] # Joint δ
# Upper body
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.upper_b = [0 for i in range(self.J)] # Joint β
self.upper_y = [0 for i in range(self.J)] # Joint γ
self.upper_o = [0 for i in range(self.J)] # Joint δ
# Joints ω(n) -> w[n]
self.upper_w = [0 for i in range(self.J)] # Joint ω
# Left shoulder
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.upper_left_b = [0 for i in range(self.J)] # Joint β
self.upper_left_y = [0 for i in range(self.J)] # Joint γ
self.upper_left_o = [0 for i in range(self.J)] # Joint δ
self.upper_left_w = [0 for i in range(self.J)] # Joint ω matrix
# Right shoulder
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.upper_right_b = [0 for i in range(self.J)] # Joint β
self.upper_right_y = [0 for i in range(self.J+1)] # Joint γ
self.upper_right_o = [0 for i in range(self.J)] # Joint δ
# Right arm
self.upper_right_w = [0 for i in range(self.J+2)] # Joint ω matrix
# y = 8
self.gimbal_upper_right_y = [0 for i in range(self.J+2)] # Joint γ
# w = 8
self.gimbal_upper_right_w = [0 for i in range(self.J-5)] # Joint ω matrix
# Lower body
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.lower_b = [0 for i in range(self.J)] # Joint β
self.lower_y = [0 for i in range(self.J)] # Joint γ
self.lower_o = [0 for i in range(self.J)] # Joint δ
# Joints ω(n) -> w[n]
self.lower_w = [0 for i in range(self.J)] # Joint ω
# Left leg
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.lower_left_b = [0 for i in range(self.J)] # Joint β
self.lower_left_y = [0 for i in range(self.J)] # Joint γ
self.lower_left_o = [0 for i in range(self.J)] # Joint δ
# Right leg
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.lower_right_b = [0 for i in range(self.J)] # Joint β
self.lower_right_y = [0 for i in range(self.J)] # Joint γ
self.lower_right_o = [0 for i in range(self.J)] # Joint δ
# gimbal
self.gimbal_lower_left_o = [0 for i in range(self.J)] # Joint δ
self.gimbal_lower_left_b = [0 for i in range(self.J)] # Joint β
self.gimbal_lower_left_y = [0 for i in range(self.J)] # Joint γ
self.gimbal_lower_right_o = [0 for i in range(self.J)] # Joint δ
self.gimbal_lower_right_b = [0 for i in range(self.J)] # Joint β
self.gimbal_lower_right_y = [0 for i in range(self.J)] # Joint γ
# Configuration Movement
self.configMovement(self.P, self.A, self.J, self.a, self.b, self.y, self.o,
self.upper_b, self.upper_y, self.upper_o, self.upper_w,
self.upper_left_b, self.upper_left_y, self.upper_left_o,
self.upper_right_b, self.upper_right_y, self.upper_right_o,
self.upper_right_w, self.gimbal_upper_right_y, self.gimbal_upper_right_w,
self.lower_b, self.lower_y, self.lower_o, self.lower_w,
self.lower_left_b, self.lower_left_y, self.lower_left_o,
self.lower_right_b, self.lower_right_y, self.lower_right_o,
self.gimbal_lower_left_o, self.gimbal_lower_left_b, self.gimbal_lower_left_y,
self.gimbal_lower_right_o, self.gimbal_lower_right_b, self.gimbal_lower_right_y)
# Construction Movement
self.constructMovement(self.J, self.amt, self.rig,
self.a, self.b, self.y, self.o,
self.upper_b, self.upper_y, self.upper_o, self.upper_w,
self.upper_left_b, self.upper_left_y, self.upper_left_o,
self.upper_right_b, self.upper_right_y, self.upper_right_o,
self.upper_right_w, self.gimbal_upper_right_y, self.gimbal_upper_right_w,
self.lower_b, self.lower_y, self.lower_o, self.lower_w,
self.lower_left_b, self.lower_left_y, self.lower_left_o,
self.lower_right_b, self.lower_right_y, self.lower_right_o,
self.gimbal_lower_left_o, self.gimbal_lower_left_b, self.gimbal_lower_left_y,
self.gimbal_lower_right_o, self.gimbal_lower_right_b, self.gimbal_lower_right_y)
# Parent set arms to body
self.setParent(self.helicity, self.move, self.rig,
self.arm_left_loc, self.arm_left_rot, self.arm_left)
# Configuration Rotation
self.configRotation(self.rig, self.interval, self.frame_start, self.frame_end, self.start, self.end)
self.configLink(self.A*0.7, self.J, self.helicity, self.rig, self.move, self.part)
# Construction Linkage
self.constructLink(self.A*0.7, self.J, self.helicity, self.rig, self.move, self.part)
# Configuration Scale
# w3w4.upper.right scale setting
self.configScale(self.rig, self.interval, self.scale_frame_start, self.scale_frame_mid, self.scale_frame_end, self.start_value, self.mid_value, self.end_value)
# Overriding Configuration Movement
def configMovement(self, P, A, J, a, b, y, o,
upper_b, upper_y, upper_o, upper_w,
upper_left_b, upper_left_y, upper_left_o,
upper_right_b, upper_right_y, upper_right_o,
upper_right_w, gimbal_upper_right_y, gimbal_upper_right_w,
lower_b, lower_y, lower_o, lower_w,
lower_left_b, lower_left_y, lower_left_o,
lower_right_b, lower_right_y, lower_right_o,
gimbal_lower_left_o, gimbal_lower_left_b, gimbal_lower_left_y,
gimbal_lower_right_o, gimbal_lower_right_b, gimbal_lower_right_y):
a[1] = mathutils.Euler((P, A, 0), 'XYZ')
print ("a1 =", a[1])
a[2] = mathutils.Euler((A, -A, 0), 'XYZ')
print ("a2 =", a[2])
b[1] = mathutils.Euler((-A, A, 0), 'XYZ')
print ("b1 =", b[1])
o[1] = mathutils.Euler((A, A, 0), 'XYZ')
print ("o1 =", o[1])
y[1] = mathutils.Euler((-A, -A, 0), 'XYZ')
print ("y1 =", y[1])
lower_b[2] = mathutils.Euler((1.35031, -1.93408, 0), 'XYZ')
print ("b2.lower =", lower_b[2])
lower_o[2] = mathutils.Euler((-1.18173, -3.18999, 0), 'XYZ')
print ("o2.lower =", lower_o[2])
lower_y[2] = mathutils.Euler((-0.761987, -3.11885, 0), 'XYZ')
print ("y2.lower =", lower_y[2])
lower_y[3] = mathutils.Euler((-0.425565, -8.51839, 0), 'XYZ')
print ("y3.lower =", lower_y[3])
lower_w[1] = mathutils.Euler((-0.425565, -8.51839, 2.50277), 'XYZ')
print ("w1.lower =", lower_w[1])
lower_left_o[3] = mathutils.Euler((1.76787, -8.43042, 1.81914), 'XYZ')
print ("o3.lower.left =", lower_left_o[3])
lower_left_b[4] = mathutils.Euler((1.76787, -19.2299, 5.0545), 'XYZ')
print ("b4.lower.left =", lower_left_b[4])
lower_left_y[4] = mathutils.Euler((1.76787, -27.4568, 2.13126), 'XYZ')
print ("y4.lower.left =", lower_left_y[4])
lower_left_y[5] = mathutils.Euler((1.76787, -29.0398, 4.39707), 'XYZ')
print ("y5.lower.left =", lower_left_y[5])
gimbal_lower_left_o[3] = mathutils.Euler((1.87361, -8.51839, 2.50277), 'XYZ')
print ("o3.gimbal.lower.left =", gimbal_lower_left_o[3])
gimbal_lower_left_b[4] = mathutils.Euler((1.87361, -19.7847, 2.50277), 'XYZ')
print ("b4.gimbal.lower.left =", gimbal_lower_left_b[4])
gimbal_lower_left_o[4] = mathutils.Euler((1.77335, -8.4289, 1.81649), 'XYZ')
print ("o4.gimbal.lower.left =", gimbal_lower_left_o[4])
gimbal_lower_left_b[5] = mathutils.Euler((5.14961, -19.1839, 1.81649), 'XYZ')
print ("b5.gimbal.lower.left =", gimbal_lower_left_b[5])
gimbal_lower_left_y[5] = mathutils.Euler((2.33473, -27.4476, 1.81649), 'XYZ')
print ("y5.gimbal.lower.left =", gimbal_lower_left_y[5])
gimbal_lower_left_y[6] = mathutils.Euler((2.91482, -27.429, 1.81649), 'XYZ')
print ("y6.gimbal.lower.left =", gimbal_lower_left_y[6])
lower_right_o[3] = mathutils.Euler((-2.89871, -8.60219, 1.75624), 'XYZ')
print ("o3.lower.right =", lower_right_o[3])
lower_right_b[4] = mathutils.Euler((-2.89871, -19.3735, 5.04104), 'XYZ')
print ("b4.lower.right =", lower_right_b[4])
lower_right_y[4] = mathutils.Euler((-2.89871, -27.5528, 1.98242), 'XYZ')
print ("y4.lower.right =", lower_right_y[4])
lower_right_y[5] = mathutils.Euler((-2.89871, -29.1751, 4.22026), 'XYZ')
print ("y5.lower.right =", lower_right_y[5])
gimbal_lower_right_o[3] = mathutils.Euler((-3.01028, -8.51839, 2.50277), 'XYZ')
print ("o3.gimbal.lower.right =", gimbal_lower_right_o[3])
gimbal_lower_right_b[4] = mathutils.Euler((-3.01028, -19.7726, 2.50277), 'XYZ')
print ("b4.gimbal.lower.right =", gimbal_lower_right_b[4])
gimbal_lower_right_o[4] = mathutils.Euler((-2.89871, -8.60219, 1.75624), 'XYZ')
print ("o4.gimbal.lower.right =", gimbal_lower_right_o[4])
gimbal_lower_right_b[5] = mathutils.Euler((-6.82237, -19.1528, 1.75624), 'XYZ')
print ("b5.gimbal.lower.right =", gimbal_lower_right_b[5])
gimbal_lower_right_y[5] = mathutils.Euler((-4.35508, -27.5285, 1.75624), 'XYZ')
print ("y5.gimbal.lower.right =", gimbal_lower_right_y[5])
gimbal_lower_right_y[6] = mathutils.Euler((-4.88072, -27.5137, 1.75624), 'XYZ')
print ("y6.gimbal.lower.right =", gimbal_lower_right_y[6])
upper_b[2] = mathutils.Euler((0.510293, 5.22315, 0), 'XYZ')
print ("b2.upper =", upper_b[2])
upper_o[2] = mathutils.Euler((-1.65578, 4.62023, 0), 'XYZ')
print ("o2.upper =", upper_o[2])
upper_y[2] = mathutils.Euler((-1.56747, 4.00093, 0), 'XYZ')
print ("y2.upper =", upper_y[2])
upper_w[3] = mathutils.Euler((-1.56747, 4.00093, 9.05079), 'XYZ')
print ("w3.upper =", upper_w[3])
upper_w[4] = mathutils.Euler((-1.65459, 3.99465, 9.05079), 'XYZ')
print ("w4.upper =", upper_w[4])
upper_w[5] = mathutils.Euler((-1.65459, 3.99465, 1.61675), 'XYZ')
print ("w5.upper =", upper_w[5])
upper_y[3] = mathutils.Euler((-1.65459, 4.6204, 0), 'XYZ')
print ("y3.upper.left =", upper_y[3])
upper_w[2] = mathutils.Euler((-1.65459, 4.6204, 9.05079), 'XYZ')
print ("w2.upper =", upper_w[2])
upper_o[3] = mathutils.Euler((-2.07892, 9.71201, 0), 'XYZ')
print ("o3.upper =", upper_o[3])
upper_w[1] = mathutils.Euler((-2.07852, 9.71278, 0.712845), 'XYZ')
print ("w1.upper =", upper_w[1])
upper_b[4] = mathutils.Euler((-2.07852, 10.4327, 0.669667), 'XYZ')
print ("o3.upper =", upper_o[3])
upper_left_y[3] = mathutils.Euler((-1.65578, 4.62023, 0), 'XYZ')
print ("y3.upper.left =", upper_left_y[3])
upper_left_b[3] = mathutils.Euler((0.349662, 4.8056, 0), 'XYZ')
print ("b3.upper.left =", upper_left_b[3])
upper_left_y[2] = mathutils.Euler((-1.56747, 3.99717, 0), 'XYZ')
print ("y2.upper.left =", upper_left_y[2])
upper_left_o[2] = mathutils.Euler((0.589122, 4.23164, 0), 'XYZ')
print ("o2.upper.left =", upper_left_o[2])
upper_right_y[3] = mathutils.Euler((-1.65578, 4.62023, 0), 'XYZ')
print ("y3.upper.right =", upper_right_y[3])
upper_right_b[3] = mathutils.Euler((-5.25711, 3.84599, 0), 'XYZ')
print ("b3.upper.right =", upper_right_b[3])
upper_right_y[2] = mathutils.Euler((-1.56747, 3.99717, 0), 'XYZ')
print ("y2.upper.right =", upper_right_y[2])
upper_right_o[2] = mathutils.Euler((-5.17039, 3.22555, 0), 'XYZ')
print ("o2.upper.right =", upper_right_o[2])
# Right arm
upper_right_y[4] = mathutils.Euler((-6.02955, 2.93315, 0.64604), 'XYZ')
print ("y4.upper.right =", upper_right_y[4])
upper_right_y[5] = mathutils.Euler((-6.02955, -3.75035, 0.64604), 'XYZ')
print ("y5.upper.right =", upper_right_y[5])
upper_right_y[6] = mathutils.Euler((-6.02956, -8.9087, 2.52353), 'XYZ')
print ("y6.upper.right =", upper_right_y[6])
upper_right_y[7] = mathutils.Euler((-6.02956, -8.9087, 4.09698), 'XYZ')
print ("y7.upper.right =", upper_right_y[7])
upper_right_w[1] = mathutils.Euler((-6.02955, -9.75705, 4.09698), 'XYZ')
print ("w1.upper.right =", upper_right_w[1])
upper_right_w[2] = mathutils.Euler((-6.02955, -9.75705, 4.0775), 'XYZ')
print ("w2.upper.right =", upper_right_w[2])
gimbal_upper_right_y[8] = mathutils.Euler((-7.603, -8.9087, 2.52353), 'XYZ')
print ("y8.upper.gimbal.right =", gimbal_upper_right_y[8])
gimbal_upper_right_w[1] = mathutils.Euler((-7.603, -9.75705, 2.52353), 'XYZ')
print ("w1.gimbal.upper.right =", gimbal_upper_right_w[1])
upper_right_w[3] = mathutils.Euler((7.39752, -3.75035, 2.21949), 'XYZ')
print ("w3.upper.right =", upper_right_w[3])
upper_right_w[4] = mathutils.Euler((-4.03109, -3.75035, 2.21949), 'XYZ')
print ("w4.upper.right =", upper_right_w[4])
upper_right_w[5] = mathutils.Euler((-4.03109, -3.75035, 0.64604), 'XYZ')
print ("w5.upper.right =", upper_right_w[5])
upper_right_w[6] = mathutils.Euler((-4.03109, -4.22665, 0.64604), 'XYZ')
print ("w6.upper.right =", upper_right_w[6])
gimbal_upper_right_y[4] = mathutils.Euler((-6.61626, 3.84599, 0), 'XYZ')
print ("y4.gimbal.upper.right =", gimbal_upper_right_y[4])
gimbal_upper_right_y[5] = mathutils.Euler((-6.61626, -2.8406, 0), 'XYZ')
print ("y5.gimbal.upper.right =", gimbal_upper_right_y[5])
gimbal_upper_right_y[6] = mathutils.Euler((-11.5189, -3.75035, 0.64604), 'XYZ')
print ("y6.gimbal.upper.right =", gimbal_upper_right_y[6])
gimbal_upper_right_y[7] = mathutils.Euler((-11.5189, -5.3238, 0.64604), 'XYZ')
print ("y7.gimbal.upper.right =", gimbal_upper_right_y[7])
upper_right_w[7] = mathutils.Euler((-8.0889, -2.17378, -4.08346), 'XYZ')
print ("w7.upper.right =", upper_right_w[7])
upper_right_w[8] = mathutils.Euler((-10.7061, -2.17378, -4.08346), 'XYZ')
print ("w8.upper.right =", upper_right_w[8])
def constructMovement(self, J, amt, rig, a, b, y, o,
upper_b, upper_y, upper_o, upper_w,
upper_left_b, upper_left_y, upper_left_o,
upper_right_b, upper_right_y, upper_right_o,
upper_right_w, gimbal_upper_right_y, gimbal_upper_right_w,
lower_b, lower_y, lower_o, lower_w,
lower_left_b, lower_left_y, lower_left_o,
lower_right_b, lower_right_y, lower_right_o,
gimbal_lower_left_o, gimbal_lower_left_b, gimbal_lower_left_y,
gimbal_lower_right_o, gimbal_lower_right_b, gimbal_lower_right_y):
# Linkages
aa = [[0 for i in range(3)] for j in range(3)] # Link α(i) - α(j)
ab = [[0 for i in range(3)] for j in range(3)] # Link α(i) - β(j)
ya = [[0 for i in range(3)] for j in range(3)] # Link γ(i) - α(j)
ao = [[0 for i in range(3)] for j in range(3)] # Link α(i) - δ(j)
by = [[0 for i in range(2)] for j in range(2)] # Link β(i) - γ(j)
upper_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
upper_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
upper_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
upper_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
upper_ow = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
upper_ww = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
upper_left_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
upper_left_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
upper_left_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
upper_left_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
upper_right_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
upper_right_yy = [[0 for i in range(self.J+1)] for j in range(self.J)] # Link γ(i) - γ(j)
upper_right_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
upper_right_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
# Right arm
upper_right_yw = [[0 for i in range(self.J)] for j in range(self.J+1)] # Link γ(i) - ω(j)
upper_right_ww = [[0 for i in range(self.J+2)] for j in range(self.J+1)] # Link ω(i) - ω(j)
gimbal_upper_right_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
gimbal_upper_right_yy = [[0 for i in range(self.J+2)] for j in range(self.J)] # Link γ(i) - γ(j)
gimbal_upper_right_yw = [[0 for i in range(self.J)] for j in range(self.J+2)] # Link γ(i) - ω(j)
lower_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
lower_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
lower_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
lower_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
lower_yw = [[0 for i in range(2)] for j in range(self.J)] # Link γ(i) - ω(j)
lower_left_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
lower_left_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
lower_left_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
lower_left_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
lower_right_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
lower_right_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
lower_right_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
lower_right_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
gimbal_lower_left_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
gimbal_lower_left_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
gimbal_lower_left_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
gimbal_lower_left_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
gimbal_lower_right_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
gimbal_lower_right_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
gimbal_lower_right_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
gimbal_lower_right_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
rig.show_x_ray = True
amt.show_names = True
amt.draw_type = 'STICK'
# amt.draw_type = 'BBONE'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(rig)
scn.objects.active = rig
scn.update()
# Edit
bpy.ops.object.editmode_toggle()
j = 1
# Construction Linkage
aa[j+1][j] = amt.edit_bones.new('a'+ str(j+1)+'a'+ str(j))
aa[j+1][j].head = a[j+1]
aa[j+1][j].tail = a[j]
# aa[j+1][j].parent = by[j][j]_body
ab[j][j] = amt.edit_bones.new('a'+ str(j)+'b'+ str(j))
ab[j][j].head = a[j]
ab[j][j].tail = b[j]
ab[j][j].parent = aa[j+1][j]
by[j][j] = amt.edit_bones.new('b'+ str(j)+'y'+ str(j))
by[j][j].head = b[j]
by[j][j].tail = y[j]
by[j][j].parent = ab[j][j]
by[j][j].use_inherit_rotation = False
ya[j][j+1] = amt.edit_bones.new('y'+ str(j)+'a'+ str(j+1))
ya[j][j+1].head = y[j]
ya[j][j+1].tail = a[j+1]
ya[j][j+1].parent = by[j][j]
ao[j+1][j] = amt.edit_bones.new('a'+ str(j+1)+'o'+str(j))
ao[j+1][j].head = a[j+1]
ao[j+1][j].tail = o[j]
ao[j+1][j].parent = ya[j][j+1]
lower_ob[j][j+1] = amt.edit_bones.new('o'+ str(j)+'b'+ str(j+1)+'.lower')
lower_ob[j][j+1].head = o[j]
lower_ob[j][j+1].tail = lower_b[j+1]
lower_ob[j][j+1].parent = ao[j+1][j]
lower_yy[j][j+1] = amt.edit_bones.new('y'+ str(j)+'y'+ str(j+1)+'.lower')
lower_yy[j][j+1].head = y[j]
lower_yy[j][j+1].tail = lower_y[j+1]
lower_yy[j][j+1].parent = by[j][j]
upper_ob[j][j+1] = amt.edit_bones.new('o'+ str(j)+'b'+ str(j+1)+'.upper')
upper_ob[j][j+1].head = o[j]
upper_ob[j][j+1].tail = upper_b[j+1]
upper_ob[j][j+1].parent = ao[j+1][j]
upper_yy[j][j+1] = amt.edit_bones.new('y'+ str(j)+'y'+ str(j+1)+'.upper')
upper_yy[j][j+1].head = y[j]
upper_yy[j][j+1].tail = upper_y[j+1]
upper_yy[j][j+1].parent = by[j][j]
j = 2
lower_by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j) + '.lower')
lower_by[j][j].head = lower_b[j]
lower_by[j][j].tail = lower_y[j]
lower_by[j][j].parent = lower_ob[j-1][j]
lower_yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1) + '.lower')
lower_yy[j][j+1].head = lower_y[j]
lower_yy[j][j+1].tail = lower_y[j+1]
lower_yy[j][j+1].parent = lower_by[j][j]
lower_yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j) + '.lower')
lower_yo[j][j].head = lower_y[j]
lower_yo[j][j].tail = lower_o[j]
lower_yo[j][j].parent = lower_yy[j-1][j]
upper_by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j) + '.upper')
upper_by[j][j].head = upper_b[j]
upper_by[j][j].tail = upper_y[j]
upper_by[j][j].parent = upper_ob[j-1][j]
upper_yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1) + '.upper')
upper_yy[j][j+1].head = upper_y[j]
upper_yy[j][j+1].tail = upper_y[j+1]
upper_yy[j][j+1].parent = upper_by[j][j]
# left shoulder gimbal
upper_left_yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j) + '.upper.left')
upper_left_yo[j][j].head = upper_y[j]
upper_left_yo[j][j].tail = upper_left_o[j]
upper_left_yo[j][j].parent = upper_yy[j-1][j]
upper_left_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) + '.upper.left')
upper_left_ob[j][j+1].head = upper_left_o[j]
upper_left_ob[j][j+1].tail = upper_left_b[j+1]
upper_left_ob[j][j+1].parent = upper_left_yo[j][j]
# right shoulder gimbal
upper_right_yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j) + '.upper.right')
upper_right_yo[j][j].head = upper_y[j]
upper_right_yo[j][j].tail = upper_right_o[j]
upper_right_yo[j][j].parent = upper_yy[j-1][j]
upper_right_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) + '.upper.right')
upper_right_ob[j][j+1].head = upper_right_o[j]
upper_right_ob[j][j+1].tail = upper_right_b[j+1]
upper_right_ob[j][j+1].parent = upper_right_yo[j][j]
j = 3
lower_yw[j][1] = amt.edit_bones.new('y'+ str(j) + 'w'+ str(1) + '.lower')
lower_yw[j][1].head = lower_y[j]
lower_yw[j][1].tail = lower_w[1]
lower_yw[j][1].parent = lower_yy[2][j]
lower_left_yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j) + '.lower.left')
lower_left_yo[j][j].head = lower_w[1]
lower_left_yo[j][j].tail = lower_left_o[j]
lower_left_yo[j][j].parent = lower_yw[j][1]
lower_left_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) + '.lower.left')
lower_left_ob[j][j+1].head = lower_left_o[j]
lower_left_ob[j][j+1].tail = lower_left_b[j+1]
lower_left_ob[j][j+1].parent = lower_left_yo[j][j]
lower_right_yo[j][j] = amt.edit_bones.new('y' + str(j) + 'o'+ str(j) +'.lower.right')
lower_right_yo[j][j].head = lower_w[1]
lower_right_yo[j][j].tail = lower_right_o[j]
lower_right_yo[j][j].parent = lower_yw[j][1]
lower_right_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) +'.lower.right')
lower_right_ob[j][j+1].head = lower_right_o[j]
lower_right_ob[j][j+1].tail = lower_right_b[j+1]
lower_right_ob[j][j+1].parent = lower_right_yo[j][j]
# gimbal
gimbal_lower_left_yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j) + '.gimbal.lower.left')
gimbal_lower_left_yo[j][j].head = lower_w[1]
gimbal_lower_left_yo[j][j].tail = gimbal_lower_left_o[j]
gimbal_lower_left_yo[j][j].parent = lower_yw[j][1]
gimbal_lower_left_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) + '.gimbal.lower.left')
gimbal_lower_left_ob[j][j+1].head = gimbal_lower_left_o[j]
gimbal_lower_left_ob[j][j+1].tail = gimbal_lower_left_b[j+1]
gimbal_lower_left_ob[j][j+1].parent = gimbal_lower_left_yo[j][j]
gimbal_lower_right_yo[j][j] = amt.edit_bones.new('y' + str(j) + 'o'+ str(j) +'.gimbal.lower.right')
gimbal_lower_right_yo[j][j].head = lower_w[1]
gimbal_lower_right_yo[j][j].tail = gimbal_lower_right_o[j]
gimbal_lower_right_yo[j][j].parent = lower_yw[j][1]
gimbal_lower_right_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) +'.gimbal.lower.right')
gimbal_lower_right_ob[j][j+1].head = gimbal_lower_right_o[j]
gimbal_lower_right_ob[j][j+1].tail = gimbal_lower_right_b[j+1]
gimbal_lower_right_ob[j][j+1].parent = gimbal_lower_right_yo[j][j]
# end
upper_left_by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j) + '.upper.left')
upper_left_by[j][j].head = upper_left_b[j]
upper_left_by[j][j].tail = upper_left_y[j]
upper_left_by[j][j].parent = upper_left_ob[j-1][j]
upper_right_by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j) + '.upper.right')
upper_right_by[j][j].head = upper_right_b[j]
upper_right_by[j][j].tail = upper_right_y[j]
upper_right_by[j][j].parent = upper_right_ob[j-1][j]
upper_yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j) + '.upper')
upper_yo[j][j].head = upper_y[j]
upper_yo[j][j].tail = upper_o[j]
upper_yo[j][j].parent = upper_yy[j-1][j]
upper_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) + '.upper')
upper_ob[j][j+1].head = upper_w[1]
upper_ob[j][j+1].tail = upper_b[j+1]
upper_ob[j][j+1].parent = upper_yo[j][j]
# Right arm
upper_right_by[j][j+1] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j+1) + '.upper.right')
upper_right_by[j][j+1].head = upper_right_b[j]
upper_right_by[j][j+1].tail = upper_right_y[j+1]
upper_right_by[j][j+1].parent = upper_right_ob[j-1][j]
gimbal_upper_right_by[j][j+1] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j+1) + '.gimbal.upper.right')
gimbal_upper_right_by[j][j+1].head = upper_right_b[j]
gimbal_upper_right_by[j][j+1].tail = gimbal_upper_right_y[j+1]
gimbal_upper_right_by[j][j+1].parent = upper_right_ob[j-1][j]
upper_right_ww[j][j+1] = amt.edit_bones.new('w' + str(j) + 'w'+ str(j+1) + '.upper.right')
upper_right_ww[j][j+1].head = upper_right_w[j]
upper_right_ww[j][j+1].tail = upper_right_w[j+1]
upper_right_ww[j][j+1].parent = aa[j-1][j-2]
upper_right_ww[j][j+1].use_inherit_rotation = False
upper_right_ww[j][j+1].use_local_location = False
j = 4
lower_left_by[j][j] = amt.edit_bones.new('b' + str(j) + 'y' + str(j) +'.lower.left')
lower_left_by[j][j].head = lower_left_b[j]
lower_left_by[j][j].tail = lower_left_y[j]
lower_left_by[j][j].parent = lower_left_ob[j-1][j]
lower_left_yy[j][j+1] = amt.edit_bones.new('y' + str(j) + 'y' + str(j+1) +'.lower.left')
lower_left_yy[j][j+1].head = lower_left_y[j]
lower_left_yy[j][j+1].tail = lower_left_y[j+1]
lower_left_yy[j][j+1].parent = lower_left_by[j][j]
# gimbal o4b5
gimbal_lower_left_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) + '.gimbal.lower.left')
gimbal_lower_left_ob[j][j+1].head = gimbal_lower_left_o[j]
gimbal_lower_left_ob[j][j+1].tail = gimbal_lower_left_b[j+1]
gimbal_lower_left_ob[j][j+1].parent = lower_left_yo[j-1][j-1]
lower_right_by[j][j] = amt.edit_bones.new('b' + str(j) + 'y' + str(j) +'.lower.right')
lower_right_by[j][j].head = lower_right_b[j]
lower_right_by[j][j].tail = lower_right_y[j]
lower_right_by[j][j].parent = lower_right_ob[j-1][j]
lower_right_yy[j][j+1] = amt.edit_bones.new('y' + str(j) + 'y' + str(j+1) +'.lower.right')
lower_right_yy[j][j+1].head = lower_right_y[j]
lower_right_yy[j][j+1].tail = lower_right_y[j+1]
lower_right_yy[j][j+1].parent = lower_right_by[j][j]
# gimbal o4b5
gimbal_lower_right_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) + '.gimbal.lower.right')
gimbal_lower_right_ob[j][j+1].head = gimbal_lower_right_o[j]
gimbal_lower_right_ob[j][j+1].tail = gimbal_lower_right_b[j+1]
gimbal_lower_right_ob[j][j+1].parent = lower_right_yo[j-1][j-1]
# Right arm
upper_right_yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1) + '.upper.right')
upper_right_yy[j][j+1].head = upper_right_y[j]
upper_right_yy[j][j+1].tail = upper_right_y[j+1]
upper_right_yy[j][j+1].parent = upper_right_by[j-1][j]
upper_right_yy[j][j+1].use_connect = True
gimbal_upper_right_yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1) + '.gimbal.upper.right')
gimbal_upper_right_yy[j][j+1].head = gimbal_upper_right_y[j]
gimbal_upper_right_yy[j][j+1].tail = gimbal_upper_right_y[j+1]
gimbal_upper_right_yy[j][j+1].parent = gimbal_upper_right_by[j-1][j]
upper_right_ww[j][j+1] = amt.edit_bones.new('w' + str(j) + 'w'+ str(j+1) + '.upper.right')
upper_right_ww[j][j+1].head = upper_right_w[j]
upper_right_ww[j][j+1].tail = upper_right_w[j+1]
upper_right_ww[j][j+1].parent = upper_right_ww[j-1][j]
j = 5
# gimbal b5y5
gimbal_lower_right_by[j][j] = amt.edit_bones.new('b' + str(j) + 'y'+ str(j) + '.gimbal.lower.right')
gimbal_lower_right_by[j][j].head = gimbal_lower_right_b[j]
gimbal_lower_right_by[j][j].tail = gimbal_lower_right_y[j]
gimbal_lower_right_by[j][j].parent = gimbal_lower_right_ob[j-1][j]
# gimbal y5y6
gimbal_lower_right_yy[j][j+1] = amt.edit_bones.new('y' + str(j) + 'y'+ str(j+1) + '.gimbal.lower.right')
gimbal_lower_right_yy[j][j+1].head = gimbal_lower_right_y[j]
gimbal_lower_right_yy[j][j+1].tail = gimbal_lower_right_y[j+1]
gimbal_lower_right_yy[j][j+1].parent = gimbal_lower_right_by[j][j]
# gimbal b5y5
gimbal_lower_left_by[j][j] = amt.edit_bones.new('b' + str(j) + 'y'+ str(j) + '.gimbal.lower.left')
gimbal_lower_left_by[j][j].head = gimbal_lower_left_b[j]
gimbal_lower_left_by[j][j].tail = gimbal_lower_left_y[j]
gimbal_lower_left_by[j][j].parent = gimbal_lower_left_ob[j-1][j]
# gimbal y5y6
gimbal_lower_left_yy[j][j+1] = amt.edit_bones.new('y' + str(j) + 'y'+ str(j+1) + '.gimbal.lower.left')
gimbal_lower_left_yy[j][j+1].head = gimbal_lower_left_y[j]
gimbal_lower_left_yy[j][j+1].tail = gimbal_lower_left_y[j+1]
gimbal_lower_left_yy[j][j+1].parent = gimbal_lower_left_by[j][j]
# Right arm
upper_right_yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1) + '.upper.right')
upper_right_yy[j][j+1].head = upper_right_y[j]
upper_right_yy[j][j+1].tail = upper_right_y[j+1]
upper_right_yy[j][j+1].parent = upper_right_yy[j-1][j]
gimbal_upper_right_yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1) + '.gimbal.upper.right')
gimbal_upper_right_yy[j][j+1].head = upper_right_y[j]
gimbal_upper_right_yy[j][j+1].tail = gimbal_upper_right_y[j+1]
gimbal_upper_right_yy[j][j+1].parent = upper_right_yy[j-1][j]
upper_right_ww[j][j+1] = amt.edit_bones.new('w' + str(j) + 'w'+ str(j+1) + '.upper.right')
upper_right_ww[j][j+1].head = upper_right_w[j]
upper_right_ww[j][j+1].tail = upper_right_w[j+1]
upper_right_ww[j][j+1].parent = upper_right_ww[j-1][j]
j = 6
# Right arm
upper_right_yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1) + '.upper.right')
upper_right_yy[j][j+1].head = upper_right_y[j]
upper_right_yy[j][j+1].tail = upper_right_y[j+1]
upper_right_yy[j][j+1].parent = upper_right_yy[j-1][j]
upper_right_yy[j][j+1].use_inherit_rotation = False
gimbal_upper_right_yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1) + '.gimbal.upper.right')
gimbal_upper_right_yy[j][j+1].head = gimbal_upper_right_y[j]
gimbal_upper_right_yy[j][j+1].tail = gimbal_upper_right_y[j+1]
gimbal_upper_right_yy[j][j+1].parent = gimbal_upper_right_yy[j-1][j]
gimbal_upper_right_yy[j][j+2] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+2) + '.gimbal.upper.right')
gimbal_upper_right_yy[j][j+2].head = upper_right_y[j]
gimbal_upper_right_yy[j][j+2].tail = gimbal_upper_right_y[j+2]
gimbal_upper_right_yy[j][j+2].parent = upper_right_yy[j-1][j]
j = 7
# Right arm
upper_right_yw[j][j-6] = amt.edit_bones.new('y'+ str(j) + 'w'+ str(j-6) + '.upper.right')
upper_right_yw[j][j-6].head = upper_right_y[j]
upper_right_yw[j][j-6].tail = upper_right_w[j-6]
upper_right_yw[j][j-6].parent = upper_right_yy[j-1][j]
gimbal_upper_right_yw[j+1][j-6] = amt.edit_bones.new('y'+ str(j+1) + 'w'+ str(j-6) + '.gimbal.upper.right')
gimbal_upper_right_yw[j+1][j-6].head = gimbal_upper_right_y[j+1]
gimbal_upper_right_yw[j+1][j-6].tail = gimbal_upper_right_w[j-6]
gimbal_upper_right_yw[j+1][j-6].parent = gimbal_upper_right_yy[j-1][j+1]
upper_right_ww[j-6][j-5] = amt.edit_bones.new('w'+ str(j-6) + 'w'+ str(j-5) + '.upper.right')
upper_right_ww[j-6][j-5].head = upper_right_w[j-6]
upper_right_ww[j-6][j-5].tail = upper_right_w[j-5]
upper_right_ww[j-6][j-5].parent = upper_right_yw[j][j-6]
upper_right_ww[j][j+1] = amt.edit_bones.new('w'+ str(j) + 'w'+ str(j+1) + '.upper.right')
upper_right_ww[j][j+1].head = upper_right_w[j]
upper_right_ww[j][j+1].tail = upper_right_w[j+1]
upper_right_ww[j][j+1].parent = aa[j-5][j-6]
upper_right_ww[j][j+1].use_inherit_rotation = False
upper_right_ww[j][j+1].use_local_location = False
# all bones select
#bpy.ops.pose.select_all(action="SELECT")
for b in amt.edit_bones:
b.select = True
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_Z')
for b in amt.edit_bones:
b.select = False
amt.edit_bones["o3b4.lower.left"].select = True
amt.edit_bones["b4y4.lower.left"].select = True
amt.edit_bones["y4y5.lower.left"].select = True
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_X')
for b in amt.edit_bones:
b.select = False
amt.edit_bones["o3b4.lower.right"].select = True
amt.edit_bones["b4y4.lower.right"].select = True
amt.edit_bones["y4y5.lower.right"].select = True
bpy.ops.armature.calculate_roll(type='GLOBAL_POS_X')
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
j = 1
cns = rig.pose.bones['y' +str(j) +'a' +str(j+1)].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'a'+str(j+1)+'a'+str(j)
cns.chain_count = 2
cns.use_stretch = False
j = 2
cns = rig.pose.bones['b'+str(j) +'y'+str(j)+'.lower'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)+'.lower'
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['b'+str(j) +'y'+str(j)+'.upper'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)+'.upper.left'
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
j = 3
cns = rig.pose.bones['b'+str(j) +'y'+str(j)+'.upper.left'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)+'.upper'
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['b'+str(j) +'y'+str(j)+'.upper.right'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)+'.upper'
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['o'+str(j) +'b'+str(j+1)+'.gimbal.lower.left'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'b'+str(j+1)+'y'+str(j+1)+'.lower.left'
cns.pole_target = rig
cns.pole_subtarget = 'o'+str(j)+'b'+str(j+1)+'.lower.left'
cns.pole_angle = 0
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['o'+str(j) +'b'+str(j+1)+'.gimbal.lower.right'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'b'+str(j+1)+'y'+str(j+1)+'.lower.right'
cns.pole_target = rig
cns.pole_subtarget = 'o'+str(j)+'b'+str(j+1)+'.lower.right'
cns.pole_angle = math.radians(180)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
j = 4
cns = rig.pose.bones['b'+str(j) +'y'+str(j)+'.lower.right'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j+1)+'y'+str(j+2)+'.gimbal.lower.right'
cns.pole_target = rig
cns.pole_subtarget = 'b'+str(j+1)+'y'+str(j+1)+'.gimbal.lower.right'
cns.pole_angle = math.radians(0)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['b'+str(j) +'y'+str(j)+'.lower.left'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j+1)+'y'+str(j+2)+'.gimbal.lower.left'
cns.pole_target = rig
cns.pole_subtarget = 'b'+str(j+1)+'y'+str(j+1)+'.gimbal.lower.left'
cns.pole_angle = math.radians(180)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['y'+str(j) +'y'+str(j+1)+'.gimbal.upper.right'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j+1)+'y'+str(j+2)+'.upper.right'
cns.pole_target = rig
cns.pole_subtarget = 'y'+str(j)+'y'+str(j+1)+'.upper.right'
cns.pole_angle = math.radians(180)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
j = 5
cns = rig.pose.bones['y'+str(j) +'y'+str(j+1)+'.upper.right'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'w'+str(j)+'w'+str(j+1)+'.upper.right'
cns.pole_target = rig
cns.pole_subtarget = 'w'+str(j+2)+'w'+str(j+3)+'.upper.right'
cns.pole_angle = math.radians(90)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
j = 6
cns = rig.pose.bones['y'+str(j) +'y'+str(j+1)+'.gimbal.upper.right'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j+1)+'w'+str(j-5)+'.upper.right'
cns.pole_target = rig
cns.pole_subtarget = 'y'+str(j)+'y'+str(j+1)+'.upper.right'
cns.pole_angle = math.radians(180)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
j = 8
cns = rig.pose.bones['y'+str(j) +'w'+str(j-7)+'.gimbal.upper.right'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'w'+str(j-7)+'w'+str(j-6)+'.upper.right'
cns.pole_target = rig
cns.pole_subtarget = 'y'+str(j-1)+'w'+str(j-7)+'.upper.right'
cns.pole_angle = math.radians(180)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
bpy.ops.object.mode_set(mode='OBJECT')
def configLink(self, A, J, helicity, rig, move, part):
bpy.ops.object.mode_set(mode='OBJECT')
Q = (0.18648+0.146446)*A
# Z = -Q*2
Z = 0.0
n = 1
obj_joint = bpy.data.objects["joint.gold.body.E"].copy()
obj_joint.location = (0.0, 0.0, -Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = 'a'+ str(n+1)+'a'+ str(n)+ ".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = 'a'+ str(n)+'b'+ str(n)+ ".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.silver.001"].copy()
obj_joint.location = (0.0, 0.0, +Q+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = 'y'+ str(n)+'a'+ str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, +Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = 'a'+ str(n+1)+'o'+ str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2 + Q*(n % 2)*6 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".lower.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".lower.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2 + Q*(n % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".upper.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".upper.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Right arm
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w"+str(n)+"w"+str(n+1)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
n = 2
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".lower.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".lower.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*(n % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".lower.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".upper.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".upper.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*(n % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".upper.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".upper.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*(n % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
n = 3
obj_joint = bpy.data.objects["joint.gold.A"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"w"+str(1)+".lower.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.A"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.B"].copy()
# obj_joint.location = (0.0, 0.0, -Q*2 + Q*(n % 2)*6 +Z)
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.A"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.B"].copy()
# obj_joint.location = (0.0, 0.0, -Q*2 + Q*(n % 2)*6 +Z)
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.g1.y.B"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".gimbal.lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.g1.z.B"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".gimbal.lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.g1.y.B"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".gimbal.lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.g1.z.B"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".gimbal.lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".upper.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*(n % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".upper.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2 + Q*(n % 2)*6 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".upper.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Right arm
obj_joint = bpy.data.objects["joint.gold.B3"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n+1)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.g1.y.C.R.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n+1)+".gimbal.upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Right arm
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w"+str(n)+"w"+str(n+1)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
n = 4
obj_joint = bpy.data.objects["joint.green.leg-left.A"].copy()
# obj_joint.location = (0.0, 0.0, Q +Z)
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, Q/2 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.green.leg-right.A"].copy()
# obj_joint.location = (0.0, 0.0, Q +Z)
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, Q/2 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".gimbal.lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".gimbal.lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Right arm
obj_joint = bpy.data.objects["joint.silver.g1.z.C.R.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".gimbal.upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.C.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w"+str(n)+"w"+str(n+1)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
n = 5
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".gimbal.lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".gimbal.lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".gimbal.lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".gimbal.lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Right arm
obj_joint = bpy.data.objects["joint.gold.g1.y.C.R.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".gimbal.upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.green.arm-left.A"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w"+str(n)+"w"+str(n+1)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
n = 6
# Right arm
obj_joint = bpy.data.objects["joint.silver.g1.z.C.R.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".gimbal.upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y6y7.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.green.y6y8.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+2)+".gimbal.upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
n = 7
# Right arm
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"w"+str(n-6)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w"+str(n)+"w"+str(n+1)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
n = 8
# Right arm
obj_joint = bpy.data.objects["joint.blue.y8w1.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"w"+str(n-7)+".gimbal.upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for ob in context.scene.objects:
if "mesh" in ob.name:
ob.select = True
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=True, texture=True, animation=True)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
# Parent set arms to body
def setParent(self, helicity, move, rig,
arm_left_loc, arm_left_rot, arm_left):
# arm_left_loc, arm_left_rot, arm_left,
# arm_right_loc, arm_right_rot, arm_right):
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.scene.frame_current = 0
# arm left
bpy.ops.object.select_all(action='DESELECT')
rig.select = True
bpy.context.scene.objects.active = rig
bpy.ops.object.editmode_toggle()
## parent_bone = 'o2b3.upper.left' # choose the bone name which you want to be the parent
# parent_bone = 'o2w5.gimbal.upper.left' # choose the bone name which you want to be the parent
parent_bone = 'o2b3.upper.left' # choose the bone name which you want to be the parent
rig.data.edit_bones.active = rig.data.edit_bones[parent_bone]
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
arm_left.rig.select = True
rig.select = True
bpy.context.scene.objects.active = rig #the active object will be the parent of all selected object
bpy.ops.object.parent_set(type='BONE', keep_transform=True)
# arm left end
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
# arms position
arm_left.rig.location = arm_left_loc
arm_left.rig.rotation_euler = arm_left_rot
### grab the long pole with the right hand
arm_left.rig.select = True
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint y5 -> right hand
cns = arm_left.rig.pose.bones['y5w1'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = rig
cns.subtarget = 'y6y7.upper.right'
cns.head_tail = 1
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
rig.select = True
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint y7 -> left hand
cns = rig.pose.bones['y7w1.upper.right'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = arm_left.rig
cns.subtarget = 'y4y5'
cns.head_tail = 1
# IK constraint y3 -> left hand
cns = rig.pose.bones['w3w4.upper.right'].constraints.new('COPY_LOCATION')
cns.name = 'Copy Location'
cns.target = arm_left.rig
cns.subtarget = 'y4y5'
cns.head_tail = 1
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
# w3w4.upper.right scale setting
def configScale(self, rig, interval, scale_frame_start, scale_frame_mid, scale_frame_end, start_value, mid_value, end_value):
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# key insert
keyframe_insert_interval = interval
rig.pose.bones["w3w4.upper.right"].scale.x = math.radians(start_value)
rig.pose.bones["w3w4.upper.right"].scale.y = math.radians(start_value)
rig.pose.bones["w3w4.upper.right"].scale.z = math.radians(start_value)
rig.pose.bones["w3w4.upper.right"].keyframe_insert(data_path="scale",frame=scale_frame_start)
rig.pose.bones["w3w4.upper.right"].scale.x = math.radians(mid_value)
rig.pose.bones["w3w4.upper.right"].scale.y = math.radians(mid_value)
rig.pose.bones["w3w4.upper.right"].scale.z = math.radians(mid_value)
rig.pose.bones["w3w4.upper.right"].keyframe_insert(data_path="scale",frame=scale_frame_mid)
rig.pose.bones["w3w4.upper.right"].scale.x = math.radians(end_value)
rig.pose.bones["w3w4.upper.right"].scale.y = math.radians(end_value)
rig.pose.bones["w3w4.upper.right"].scale.z = math.radians(end_value)
rig.pose.bones["w3w4.upper.right"].keyframe_insert(data_path="scale",frame=scale_frame_end)
# for curve in bpy.context.active_object.animation_data.action.fcurves:
# cycles = curve.modifiers.new(type='CYCLES')
# cycles.mode_before = 'REPEAT_OFFSET'
# cycles.mode_after = 'REPEAT_OFFSET'
# for keyframe in curve.keyframe_points:
# keyframe.interpolation = 'LINEAR'
bpy.ops.object.mode_set(mode='OBJECT')
class LeftArm(Formula):
J = 6 #joint number
# Overriding
def __init__(self, P, A, move, part, helicity, start, end):
global interval
global frame_start
global frame_end
self.interval = interval
self.frame_start = frame_start
self.frame_end = frame_end
# pivot factor
self.P = P
# scale factor
self.A = A
# name
self.move = move
# element
self.part = part
# element helicity
self.helicity = helicity
self.start = start
self.end = end
bpy.ops.object.mode_set(mode='OBJECT')
# Create armature and object
self.amt = bpy.data.armatures.new(move + '.' + part + '.' + helicity + '.data')
self.rig = bpy.data.objects.new(move + '.' + part + '.' + helicity, self.amt)
# Joints
self.a = [0 for i in range(4)] # Joint α
self.b = [0 for i in range(self.J)] # Joint β
self.y = [0 for i in range(self.J+1)] # Joint γ
self.o = [0 for i in range(self.J)] # Joint δ
self.w = [0 for i in range(self.J)] # Joint ω
# Configuration Movement
self.configMovement(self.P, self.A, self.J, self.a, self.b, self.y, self.o, self.w)
# Construction Movement
self.constructMovement(self.J, self.helicity, self.amt, self.rig, self.a, self.b, self.y, self.o, self.w)
# Construction Rotation
self.configRotation(self.rig, self.interval, self.frame_start, self.frame_end, self.start, self.end)
# Configuration Linkage
self.configLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Construction Linkage
self.constructLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Overriding Configuration Movement
def configMovement(self, P, A, J, a, b, y, o, w):
a[1] = mathutils.Euler((P, A, 0), 'XYZ')
print ("a1 =", a[1])
a[2] = mathutils.Euler((A, -A, 0), 'XYZ')
print ("a2 =", a[2])
b[1] = mathutils.Euler((-A, A, 0), 'XYZ')
print ("b1 =", b[1])
o[1] = mathutils.Euler((A, A, 0), 'XYZ')
print ("o1 =", o[1])
B = A * 2 * sqrt (2)
C = B + (B * sqrt (2))
D = C * sqrt (2)
E = C + D
y[1] = mathutils.Euler((-A, -A, 0), 'XYZ')
print ("y1 =", y[1])
y[2] = mathutils.Euler((0.405231, -0.871367, 0), 'XYZ')
print ("y2 =", y[2])
b[2] = mathutils.Euler((1.10081, -0.097826, 0), 'XYZ')
print ("b2 =", b[2])
o[2] = mathutils.Euler((-1.11656, -0.810155, 0), 'XYZ')
print ("o2 =", o[2])
y[3] = mathutils.Euler((6.58612, 1.68254, 0), 'XYZ')
print ("y3 =", y[3])
b[3] = mathutils.Euler((6.19272, 1.02113, 0), 'XYZ')
print ("b3 =", b[3])
o[3] = mathutils.Euler((6.13951, 1.51912, 0), 'XYZ')
print ("o3 =", o[3])
y[4] = mathutils.Euler((10.6357, 5.38492, 0), 'XYZ')
print ("y4 =", y[4])
b[4] = mathutils.Euler((10.1215, 5.14466, 0), 'XYZ')
print ("b4 =", b[4])
o[4] = mathutils.Euler((10.3915, 5.5772, 0), 'XYZ')
print ("o4 =", o[4])
y[5] = mathutils.Euler((11.9006, 6.53608, 0), 'XYZ')
print ("y5 =", y[5])
w[1] = mathutils.Euler((11.9006, 6.53608, 68.3593), 'XYZ')
print ("w1 =", w[1])
w[2] = mathutils.Euler((12.0181, 6.40694, 68.3593), 'XYZ')
print ("w2 =", w[2])
y[6] = mathutils.Euler((12.346, 5.38258, 0), 'XYZ')
print ("y6 =", y[6])
w[3] = mathutils.Euler((12.346, 5.38258, 68.3593), 'XYZ')
print ("w3 =", w[3])
def constructMovement(self, J, helicity, amt, rig, a, b, y, o, w):
# Linkages
aa = [[0 for i in range(4)] for j in range(4)] # Link α(i) - α(j)
ab = [[0 for i in range(4)] for j in range(4)] # Link α(i) - β(j)
ya = [[0 for i in range(4)] for j in range(4)] # Link γ(i) - α(j)
ao = [[0 for i in range(4)] for j in range(4)] # Link α(i) - δ(j)
ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
yy = [[0 for i in range(self.J+1)] for j in range(self.J)] # Link γ(i) - γ(j)
by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
yw = [[0 for i in range(self.J)] for j in range(self.J+1)] # Link γ(i) - ω(j)
ww = [[0 for i in range(self.J)] for j in range(self.J)] # Link ω(i) - ω(j)
rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
rig.show_x_ray = True
amt.show_names = True
amt.draw_type = 'STICK'
# amt.draw_type = 'BBONE'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(rig)
scn.objects.active = rig
scn.update()
# Edit
bpy.ops.object.editmode_toggle()
# Construction Linkage
aa[2][1] = amt.edit_bones.new('a2a1')
aa[2][1].head = a[2]
aa[2][1].tail = a[1]
ab[1][1] = amt.edit_bones.new('a1b1')
ab[1][1].head = a[1]
ab[1][1].tail = b[1]
ab[1][1].parent = aa[2][1]
by[1][1] = amt.edit_bones.new('b1y1')
by[1][1].head = b[1]
by[1][1].tail = y[1]
by[1][1].parent = ab[1][1]
by[1][1].use_inherit_rotation = False
ya[1][2] = amt.edit_bones.new('y1a2')
ya[1][2].head = y[1]
ya[1][2].tail = a[2]
ya[1][2].parent = by[1][1]
ao[2][1] = amt.edit_bones.new('a2o1')
ao[2][1].head = a[2]
ao[2][1].tail = o[1]
ao[2][1].parent = ya[1][2]
ob[1][2] = amt.edit_bones.new('o1b2')
ob[1][2].head = o[1]
ob[1][2].tail = b[2]
ob[1][2].parent = ao[2][1]
yy[1][2] = amt.edit_bones.new('y1y2')
yy[1][2].head = y[1]
yy[1][2].tail = y[2]
yy[1][2].parent = by[1][1]
for j in range(2, J - 1):
by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j))
by[j][j].head = b[j]
by[j][j].tail = y[j]
by[j][j].parent = ob[j-1][j]
yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j))
yo[j][j].head = y[j]
yo[j][j].tail = o[j]
yo[j][j].parent = yy[j-1][j]
yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1))
yy[j][j+1].head = y[j]
yy[j][j+1].tail = y[j+1]
yy[j][j+1].parent = by[j][j]
if j < (J-2):
ob[j][j+1] = amt.edit_bones.new('o'+ str(j) + 'b'+ str(j+1))
ob[j][j+1].head = o[j]
ob[j][j+1].tail = b[j+1]
ob[j][j+1].parent = yo[j][j]
yw[5][1] = amt.edit_bones.new('y5w1')
yw[5][1].head = y[5]
yw[5][1].tail = w[1]
yw[5][1].parent = yy[4][5]
ww[1][2] = amt.edit_bones.new('w1w2')
ww[1][2].head = w[1]
ww[1][2].tail = w[2]
ww[1][2].parent = yw[5][1]
yy[4][6] = amt.edit_bones.new('y4y6.gimbal')
yy[4][6].head = y[4]
yy[4][6].tail = y[6]
yy[4][6].parent = by[4][4]
yw[6][3] = amt.edit_bones.new('y6w3.gimbal')
yw[6][3].head = y[6]
yw[6][3].tail = w[3]
yw[6][3].parent = yy[4][6]
# all bones select
#bpy.ops.pose.select_all(action="SELECT")
for b in amt.edit_bones:
b.select = True
if helicity == 'right':
bpy.ops.armature.calculate_roll(type='GLOBAL_POS_Z')
else:
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_Z')
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = rig.pose.bones['y1a2'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'a2a1'
cns.chain_count = 2
cns.use_stretch = False
for j in range(2, J - 1):
cns = rig.pose.bones['b'+str(j) +'y'+str(j)].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['y6w3.gimbal'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'w1w2'
cns.pole_target = rig
cns.pole_subtarget = 'y5w1'
cns.pole_angle = math.radians(90)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
bpy.ops.object.mode_set(mode='OBJECT')
def configLink(self, A, J, helicity, rig, move, part):
bpy.ops.object.mode_set(mode='OBJECT')
Q = (0.18648+0.146446)*A
# Z = -Q*2
Z = 0.0
obj_joint = bpy.data.objects["joint.gold.arm-left.000"].copy()
obj_joint.location = (0.0, 0.0, -Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2a1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.silver.002"].copy()
obj_joint.location = (0.0, 0.0, +Q*(3+4)+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1a2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, +Q*(1+4)+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2o1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a1b1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for n in range(1, J - 1):
if n <= (J-2):
# Pattern 2 of by
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-3):
# Pattern 2 of yy
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-3):
# Pattern 1 of ob
obj_joint = bpy.data.objects["joint.blue.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, -Q*4 + Q*(n % 2)*8 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Pattern 2 of yo
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n+1)+"o"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y4y5.002"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y4y5.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y5w1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w1w2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.green.y4y6.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y4y6.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y6w3.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y6w3.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for ob in context.scene.objects:
if "mesh" in ob.name:
ob.select = True
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=True, texture=True, animation=True)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
class RightArm(Formula):
J = 5 #joint number
# Overriding
def __init__(self, P, A, move, part, helicity, start, end):
global interval
global frame_start
global frame_end
self.interval = interval
self.frame_start = frame_start
self.frame_end = frame_end
# pivot factor
self.P = P
# scale factor
self.A = A
# name
self.move = move
# element
self.part = part
# element helicity
self.helicity = helicity
self.start = start
self.end = end
bpy.ops.object.mode_set(mode='OBJECT')
# Create armature and object
self.amt = bpy.data.armatures.new(move + '.' + part + '.' + helicity + '.data')
self.rig = bpy.data.objects.new(move + '.' + part + '.' + helicity, self.amt)
# Joints
self.a = [0 for i in range(4)] # Joint α
self.b = [0 for i in range(self.J)] # Joint β
self.y = [0 for i in range(self.J+1)] # Joint γ
self.o = [0 for i in range(self.J)] # Joint δ
self.w = [0 for i in range(self.J)] # Joint ω
# Configuration Movement
self.configMovement(self.P, self.A, self.J, self.a, self.b, self.y, self.o, self.w)
# Construction Movement
self.constructMovement(self.J, self.helicity, self.amt, self.rig, self.a, self.b, self.y, self.o, self.w)
# Construction Rotation
self.configRotation(self.rig, self.interval, self.frame_start, self.frame_end, self.start, self.end)
# Configuration Linkage
self.configLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Construction Linkage
self.constructLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Overriding Configuration Movement
def configMovement(self, P, A, J, a, b, y, o, w):
a[1] = mathutils.Euler((P, A, 0), 'XYZ')
print ("a1 =", a[1])
a[2] = mathutils.Euler((A, -A, 0), 'XYZ')
print ("a2 =", a[2])
b[1] = mathutils.Euler((-A, A, 0), 'XYZ')
print ("b1 =", b[1])
o[1] = mathutils.Euler((A, A, 0), 'XYZ')
print ("o1 =", o[1])
B = A * 2 * sqrt (2)
C = B + (B * sqrt (2))
D = C * sqrt (2)
E = C + D
y[1] = mathutils.Euler((-A, -A, 0), 'XYZ')
print ("y1 =", y[1])
y[2] = mathutils.Euler((5.23572, -4.00436, 0), 'XYZ')
print ("y2 =", y[2])
b[2] = mathutils.Euler((5.80363, -3.26182, 0), 'XYZ')
print ("b2 =", b[2])
o[2] = mathutils.Euler((4.64316, -3.47289, 0), 'XYZ')
print ("o2 =", o[2])
y[3] = mathutils.Euler((10.4745, -5.64019, 0), 'XYZ')
print ("y3 =", y[3])
b[3] = mathutils.Euler((9.87603, -5.84587, 0), 'XYZ')
print ("b3 =", b[3])
o[3] = mathutils.Euler((10.042, -5.44267, 0), 'XYZ')
print ("o3 =", o[3])
y[4] = mathutils.Euler((12.3316, -5.71339, 0), 'XYZ')
print ("y4 =", y[4])
w[1] = mathutils.Euler((12.3316, -5.71339, -1.0), 'XYZ')
print ("w1 =", w[1])
w[2] = mathutils.Euler((12.3316, -5.61339, -1.0), 'XYZ')
print ("w2 =", w[2])
y[5] = mathutils.Euler((12.333, -5.6402, 0), 'XYZ')
print ("y5 =", y[5])
w[3] = mathutils.Euler((12.333, -5.6402, -1.0), 'XYZ')
print ("w3 =", w[3])
def constructMovement(self, J, helicity, amt, rig, a, b, y, o, w):
# Linkages
aa = [[0 for i in range(4)] for j in range(4)] # Link α(i) - α(j)
ab = [[0 for i in range(4)] for j in range(4)] # Link α(i) - β(j)
ya = [[0 for i in range(4)] for j in range(4)] # Link γ(i) - α(j)
ao = [[0 for i in range(4)] for j in range(4)] # Link α(i) - δ(j)
ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
yy = [[0 for i in range(self.J+1)] for j in range(self.J)] # Link γ(i) - γ(j)
by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
yw = [[0 for i in range(self.J)] for j in range(self.J+1)] # Link γ(i) - ω(j)
ww = [[0 for i in range(self.J)] for j in range(self.J)] # Link ω(i) - ω(j)
rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
rig.show_x_ray = True
amt.show_names = True
amt.draw_type = 'STICK'
# amt.draw_type = 'BBONE'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(rig)
scn.objects.active = rig
scn.update()
# Edit
bpy.ops.object.editmode_toggle()
# Construction Linkage
aa[2][1] = amt.edit_bones.new('a2a1')
aa[2][1].head = a[2]
aa[2][1].tail = a[1]
ab[1][1] = amt.edit_bones.new('a1b1')
ab[1][1].head = a[1]
ab[1][1].tail = b[1]
ab[1][1].parent = aa[2][1]
by[1][1] = amt.edit_bones.new('b1y1')
by[1][1].head = b[1]
by[1][1].tail = y[1]
by[1][1].parent = ab[1][1]
by[1][1].use_inherit_rotation = False
ya[1][2] = amt.edit_bones.new('y1a2')
ya[1][2].head = y[1]
ya[1][2].tail = a[2]
ya[1][2].parent = by[1][1]
ao[2][1] = amt.edit_bones.new('a2o1')
ao[2][1].head = a[2]
ao[2][1].tail = o[1]
ao[2][1].parent = ya[1][2]
ob[1][2] = amt.edit_bones.new('o1b2')
ob[1][2].head = o[1]
ob[1][2].tail = b[2]
ob[1][2].parent = ao[2][1]
yy[1][2] = amt.edit_bones.new('y1y2')
yy[1][2].head = y[1]
yy[1][2].tail = y[2]
yy[1][2].parent = by[1][1]
for j in range(2, J - 1):
by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j))
by[j][j].head = b[j]
by[j][j].tail = y[j]
by[j][j].parent = ob[j-1][j]
yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j))
yo[j][j].head = y[j]
yo[j][j].tail = o[j]
yo[j][j].parent = yy[j-1][j]
yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1))
yy[j][j+1].head = y[j]
yy[j][j+1].tail = y[j+1]
yy[j][j+1].parent = by[j][j]
if j < (J-2):
ob[j][j+1] = amt.edit_bones.new('o'+ str(j) + 'b'+ str(j+1))
ob[j][j+1].head = o[j]
ob[j][j+1].tail = b[j+1]
ob[j][j+1].parent = yo[j][j]
yw[4][1] = amt.edit_bones.new('y4w1')
yw[4][1].head = y[4]
yw[4][1].tail = w[1]
yw[4][1].parent = yy[3][4]
ww[1][2] = amt.edit_bones.new('w1w2')
ww[1][2].head = w[1]
ww[1][2].tail = w[2]
ww[1][2].parent = yw[4][1]
yy[3][5] = amt.edit_bones.new('y3y5.gimbal')
yy[3][5].head = y[3]
yy[3][5].tail = y[5]
yy[3][5].parent = by[3][3]
yw[5][3] = amt.edit_bones.new('y5w3.gimbal')
yw[5][3].head = y[5]
yw[5][3].tail = w[3]
yw[5][3].parent = yy[3][5]
# all bones select
#bpy.ops.pose.select_all(action="SELECT")
for b in amt.edit_bones:
b.select = True
if helicity == 'right':
bpy.ops.armature.calculate_roll(type='GLOBAL_POS_Z')
else:
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_Z')
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = rig.pose.bones['y1a2'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'a2a1'
cns.chain_count = 2
cns.use_stretch = False
for j in range(2, J - 1):
cns = rig.pose.bones['b'+str(j) +'y'+str(j)].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['y5w3.gimbal'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'w1w2'
cns.pole_target = rig
cns.pole_subtarget = 'y4w1'
cns.pole_angle = math.radians(90)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
bpy.ops.object.mode_set(mode='OBJECT')
def configLink(self, A, J, helicity, rig, move, part):
bpy.ops.object.mode_set(mode='OBJECT')
Q = (0.18648+0.146446)*A
# Z = -Q*2
Z = 0.0
obj_joint = bpy.data.objects["joint.gold.arm-right.006"].copy()
obj_joint.location = (0.0, 0.0, -Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2a1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.silver.002"].copy()
obj_joint.location = (0.0, 0.0, +Q*(3+4)+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1a2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, +Q*(1+4)+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2o1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a1b1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for n in range(1, J - 1):
if n <= (J-2):
# Pattern 2 of by
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-3):
# Pattern 2 of yy
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-3):
# Pattern 1 of ob
obj_joint = bpy.data.objects["joint.blue.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, -Q*4 + Q*(n % 2)*8 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Pattern 2 of yo
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n+1)+"o"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y3y4.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y3y4.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y4w1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w1w2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y3y5.gimbal.000"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y3y5.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y5w3.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y5w3.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for ob in context.scene.objects:
if "mesh" in ob.name:
ob.select = True
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=True, texture=True, animation=True)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
class LeftLeg(Formula):
J = 7 #joint number
# Overriding
def __init__(self, P, A, move, part, helicity, start, end,
leg_left_loc, leg_left_rot,
leg_right_loc, leg_right_rot, leg_right,
pitch_loc, pitch_rot, pitch):
global interval
global frame_start
global frame_end
self.interval = interval
self.frame_start = frame_start
self.frame_end = frame_end
# pivot factor
self.P = P
# scale factor
self.A = A
# name
self.move = move
# element
self.part = part
# element helicity
self.helicity = helicity
self.start = start
self.end = end
# leg_left
self.leg_left_loc = leg_left_loc
self.leg_left_rot = leg_left_rot
# leg_right
self.leg_right_loc = leg_right_loc
self.leg_right_rot = leg_right_rot
self.leg_right = leg_right
# pitch
self.pitch_loc = pitch_loc
self.pitch_rot = pitch_rot
self.pitch = pitch
# body
self.body = body
bpy.ops.object.mode_set(mode='OBJECT')
# Create armature and object
self.amt = bpy.data.armatures.new(move + '.' + part + '.' + helicity + '.data')
self.rig = bpy.data.objects.new(move + '.' + part + '.' + helicity, self.amt)
# Joints
self.a = [0 for i in range(4)] # Joint α
self.b = [0 for i in range(self.J)] # Joint β
self.y = [0 for i in range(self.J)] # Joint γ
self.o = [0 for i in range(self.J)] # Joint δ
self.w = [0 for i in range(self.J)] # Joint ω
# Configuration Movement
self.configMovement(self.P, self.A, self.J, self.a, self.b, self.y, self.o, self.w)
# Construction Movement
self.constructMovement(self.J, self.helicity, self.amt, self.rig, self.a, self.b, self.y, self.o, self.w)
# Parent set pitch and right leg to left leg
self.setParent(self.helicity, self.move, self.rig,
self.leg_left_loc, self.leg_left_rot,
self.leg_right_loc, self.leg_right_rot, self.leg_right,
self.pitch_loc, self.pitch_rot, self.pitch)
# Construction Rotation
self.configRotation(self.rig, self.interval, self.frame_start, self.frame_end, self.start, self.end)
# Configuration Linkage
self.configLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Construction Linkage
self.constructLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Overriding Configuration Movement
def configMovement(self, P, A, J, a, b, y, o, w):
a[1] = mathutils.Euler((P, A, 0), 'XYZ')
print ("a1 =", a[1])
a[2] = mathutils.Euler((A, -A, 0), 'XYZ')
print ("a2 =", a[2])
b[1] = mathutils.Euler((-A, A, 0), 'XYZ')
print ("b1 =", b[1])
o[1] = mathutils.Euler((A, A, 0), 'XYZ')
print ("o1 =", o[1])
B = A * 2 * sqrt (2)
C = B + (B * sqrt (2))
D = C * sqrt (2)
E = C + D
y[1] = mathutils.Euler((-A, -A, 0), 'XYZ')
print ("y1 =", y[1])
y[2] = mathutils.Euler((5.64394, -7.71944, 0), 'XYZ')
print ("y2 =", y[2])
b[2] = mathutils.Euler((6.96819, -6.39353, 0), 'XYZ')
print ("b2 =", b[2])
o[2] = mathutils.Euler((7.57679, -7.71912, 0), 'XYZ')
print ("o2 =", o[2])
y[3] = mathutils.Euler((5.64394, -17.9947, 0), 'XYZ')
print ("y3 =", y[3])
o[4] = mathutils.Euler((3.89305, -26.8512, 0), 'XYZ')
print ("o4 =", o[4])
o[3] = mathutils.Euler((-1.02857, -20.6204, 0), 'XYZ')
print ("o3 =", o[3])
b[4] = mathutils.Euler((12.94517, -7.71944, 0), 'XYZ')
print ("b4 =", b[4])
b[3] = mathutils.Euler((5.64394, -7.71944, -7.30119), 'XYZ')
print ("b3 =", b[3])
w[1] = mathutils.Euler((5.64394, -5.67138, -7.30119), 'XYZ')
print ("w1 =", w[1])
w[2] = mathutils.Euler((5.64394, -5.47138, -7.30119), 'XYZ')
print ("w2 =", w[2])
w[3] = mathutils.Euler((12.94517, -5.67138, 0), 'XYZ')
print ("w3 =", w[3])
b[6] = mathutils.Euler((1.7675, -A, 0), 'XYZ')
print ("b6 =", b[6])
b[5] = mathutils.Euler((-A, -A, -2.43247), 'XYZ')
print ("b5 =", b[5])
w[4] = mathutils.Euler((-A, A, -2.43247), 'XYZ')
print ("w4 =", w[4])
w[5] = mathutils.Euler((-A, 0.835105, -2.43247), 'XYZ')
print ("w5 =", w[5])
w[6] = mathutils.Euler((1.7675, A, 0), 'XYZ')
print ("w6 =", w[6])
def constructMovement(self, J, helicity, amt, rig, a, b, y, o, w):
# Linkages
aa = [[0 for i in range(4)] for j in range(4)] # Link α(i) - α(j)
ab = [[0 for i in range(4)] for j in range(4)] # Link α(i) - β(j)
ya = [[0 for i in range(4)] for j in range(4)] # Link γ(i) - α(j)
ao = [[0 for i in range(4)] for j in range(4)] # Link α(i) - δ(j)
ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
yb = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - β(j)
bw = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - ω(j)
ww = [[0 for i in range(self.J)] for j in range(self.J)] # Link ω(i) - ω(j)
rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
rig.show_x_ray = True
amt.show_names = True
amt.draw_type = 'STICK'
# amt.draw_type = 'BBONE'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(rig)
scn.objects.active = rig
scn.update()
# Edit
bpy.ops.object.editmode_toggle()
# Construction Linkage
aa[2][1] = amt.edit_bones.new('a2a1')
aa[2][1].head = a[2]
aa[2][1].tail = a[1]
ab[1][1] = amt.edit_bones.new('a1b1')
ab[1][1].head = a[1]
ab[1][1].tail = b[1]
ab[1][1].parent = aa[2][1]
by[1][1] = amt.edit_bones.new('b1y1')
by[1][1].head = b[1]
by[1][1].tail = y[1]
by[1][1].parent = ab[1][1]
by[1][1].use_inherit_rotation = False
ya[1][2] = amt.edit_bones.new('y1a2')
ya[1][2].head = y[1]
ya[1][2].tail = a[2]
ya[1][2].parent = by[1][1]
ao[2][1] = amt.edit_bones.new('a2o1')
ao[2][1].head = a[2]
ao[2][1].tail = o[1]
ao[2][1].parent = ya[1][2]
ob[1][2] = amt.edit_bones.new('o1b2')
ob[1][2].head = o[1]
ob[1][2].tail = b[2]
ob[1][2].parent = ao[2][1]
yy[1][2] = amt.edit_bones.new('y1y2')
yy[1][2].head = y[1]
yy[1][2].tail = y[2]
yy[1][2].parent = by[1][1]
for j in range(2, J - 4):
by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j))
by[j][j].head = b[j]
by[j][j].tail = y[j]
by[j][j].parent = ob[j-1][j]
yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j))
yo[j][j].head = y[j]
yo[j][j].tail = o[j]
yo[j][j].parent = yy[j-1][j]
yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1))
yy[j][j+1].head = y[j]
yy[j][j+1].tail = y[j+1]
yy[j][j+1].parent = by[j][j]
if j < (J-5):
ob[j][j+1] = amt.edit_bones.new('o'+ str(j) + 'b'+ str(j+1))
ob[j][j+1].head = o[j]
ob[j][j+1].tail = b[j+1]
ob[j][j+1].parent = yo[j][j]
yo[3][3] = amt.edit_bones.new('y3o3')
yo[3][3].head = y[3]
yo[3][3].tail = o[3]
yo[3][3].parent = yy[2][3]
yo[3][3].use_inherit_rotation = False
yo[3][4] = amt.edit_bones.new('y3o4')
yo[3][4].head = y[3]
yo[3][4].tail = o[4]
yo[3][4].parent = yy[2][3]
yo[3][4].use_inherit_rotation = False
yb[2][3] = amt.edit_bones.new('y2b3')
yb[2][3].head = y[2]
yb[2][3].tail = b[3]
yb[2][3].parent = yy[1][2]
bw[3][1] = amt.edit_bones.new('b3w1')
bw[3][1].head = b[3]
bw[3][1].tail = w[1]
bw[3][1].parent = yb[2][3]
ww[1][2] = amt.edit_bones.new('w1w2')
ww[1][2].head = w[1]
ww[1][2].tail = w[2]
ww[1][2].parent = bw[3][1]
yb[2][4] = amt.edit_bones.new('y2b4.gimbal')
yb[2][4].head = y[2]
yb[2][4].tail = b[4]
yb[2][4].parent = yy[1][2]
bw[4][3] = amt.edit_bones.new('b4w3.gimbal')
bw[4][3].head = b[4]
bw[4][3].tail = w[3]
bw[4][3].parent = yb[2][4]
yb[1][5] = amt.edit_bones.new('y1b5')
yb[1][5].head = y[1]
yb[1][5].tail = b[5]
yb[1][5].parent = by[1][1]
bw[5][4] = amt.edit_bones.new('b5w4')
bw[5][4].head = b[5]
bw[5][4].tail = w[4]
bw[5][4].parent = yb[1][5]
ww[4][5] = amt.edit_bones.new('w4w5')
ww[4][5].head = w[4]
ww[4][5].tail = w[5]
ww[4][5].parent = bw[5][4]
yb[1][6] = amt.edit_bones.new('y1b6.gimbal')
yb[1][6].head = y[1]
yb[1][6].tail = b[6]
yb[1][6].parent = by[1][1]
bw[6][6] = amt.edit_bones.new('b6w6.gimbal')
bw[6][6].head = b[6]
bw[6][6].tail = w[6]
bw[6][6].parent = yb[1][6]
# all bones select
#bpy.ops.pose.select_all(action="SELECT")
for b in amt.edit_bones:
b.select = True
if helicity == 'right':
bpy.ops.armature.calculate_roll(type='GLOBAL_POS_Z')
else:
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_Z')
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = rig.pose.bones['y1a2'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'a2a1'
cns.chain_count = 2
cns.use_stretch = False
for j in range(2, J - 4):
cns = rig.pose.bones['b'+str(j) +'y'+str(j)].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['b4w3.gimbal'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'w1w2'
cns.pole_target = rig
cns.pole_subtarget = 'b3w1'
cns.pole_angle = math.radians(0)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['b6w6.gimbal'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'w4w5'
cns.pole_target = rig
cns.pole_subtarget = 'b5w4'
cns.pole_angle = math.radians(0)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
bpy.ops.object.mode_set(mode='OBJECT')
# Parent set pitch and right leg to left leg
def setParent(self, helicity, move, rig,
leg_left_loc, leg_left_rot,
leg_right_loc, leg_right_rot, leg_right,
pitch_loc, pitch_rot, pitch):
# leg left position
rig.location = leg_left_loc
rig.rotation_euler = leg_left_rot
# leg right position
leg_right.rig.location = leg_right_loc
leg_right.rig.rotation_euler = leg_right_rot
# pitch position
pitch.rig.location = pitch_loc
pitch.rig.rotation_euler = pitch_rot
# pitch to left leg
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.scene.frame_current = 0
bpy.ops.object.select_all(action='DESELECT')
rig.select = True
bpy.context.scene.objects.active = rig
bpy.ops.object.editmode_toggle()
parent_bone = 'y3o4' # choose the bone name which you want to be the parent
rig.data.edit_bones.active = rig.data.edit_bones[parent_bone]
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
pitch.rig.select = True
rig.select = True
bpy.context.scene.objects.active = rig #the active object will be the parent of all selected object
bpy.ops.object.parent_set(type='BONE', keep_transform=True)
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
# end
rig.select = True
bpy.context.scene.objects.active = rig
bpy.ops.object.editmode_toggle()
parent_bone = 'y3o3' # choose the bone name which you want to be the parent
rig.data.edit_bones.active = rig.data.edit_bones[parent_bone]
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
leg_right.rig.select = True
rig.select = True
bpy.context.scene.objects.active = rig #the active object will be the parent of all selected object
bpy.ops.object.parent_set(type='BONE', keep_transform=True)
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
def configLink(self, A, J, helicity, rig, move, part):
bpy.ops.object.mode_set(mode='OBJECT')
Q = (0.18648+0.146446)*A
# Z = -Q*2
Z = 0.0
obj_joint = bpy.data.objects["joint.gold.000"].copy()
obj_joint.location = (0.0, 0.0, -Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2a1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.silver.001"].copy()
obj_joint.location = (0.0, 0.0, +Q+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1a2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, +Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2o1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a1b1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for n in range(1, J - 3):
if n <= (J-5):
# Pattern 2 of by
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Pattern 2 of yy
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-6):
# Pattern 1 of ob
obj_joint = bpy.data.objects["joint.blue.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, -Q*4 + Q*(n % 2)*8 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-6):
# Pattern 2 of yo
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n+1)+"o"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y3o3.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.y3o4.leg-left.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y3o4.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.y2b3.leg-left.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y2b3.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b3w1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w1w2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y3y5.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y2b4.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.b4w3.gimbal.leg-left.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b4w3.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y1b5.leg-left.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1b5.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b5w4.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w4w5.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y3y5.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1b6.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.b6w6.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b6w6.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for ob in context.scene.objects:
if "mesh" in ob.name:
ob.select = True
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=True, texture=True, animation=True)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
class RightLeg(Formula):
J = 7 #joint number
# Overriding
def __init__(self, P, A, move, part, helicity, start, end):
global interval
global frame_start
global frame_end
self.interval = interval
self.frame_start = frame_start
self.frame_end = frame_end
# pivot factor
self.P = P
# scale factor
self.A = A
# name
self.move = move
# element
self.part = part
# element helicity
self.helicity = helicity
self.start = start
self.end = end
bpy.ops.object.mode_set(mode='OBJECT')
# Create armature and object
self.amt = bpy.data.armatures.new(move + '.' + part + '.' + helicity + '.data')
self.rig = bpy.data.objects.new(move + '.' + part + '.' + helicity, self.amt)
# Joints
self.a = [0 for i in range(4)] # Joint α
self.b = [0 for i in range(self.J)] # Joint β
self.y = [0 for i in range(self.J)] # Joint γ
self.o = [0 for i in range(self.J)] # Joint δ
self.w = [0 for i in range(self.J)] # Joint ω
# Configuration Movement
self.configMovement(self.P, self.A, self.J, self.a, self.b, self.y, self.o, self.w)
# Construction Movement
self.constructMovement(self.J, self.helicity, self.amt, self.rig, self.a, self.b, self.y, self.o, self.w)
# Construction Rotation
self.configRotation(self.rig, self.interval, self.frame_start, self.frame_end, self.start, self.end)
# Configuration Linkage
self.configLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Construction Linkage
self.constructLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Overriding Configuration Movement
def configMovement(self, P, A, J, a, b, y, o, w):
a[1] = mathutils.Euler((P, A, 0), 'XYZ')
print ("a1 =", a[1])
a[2] = mathutils.Euler((A, -A, 0), 'XYZ')
print ("a2 =", a[2])
b[1] = mathutils.Euler((-A, A, 0), 'XYZ')
print ("b1 =", b[1])
o[1] = mathutils.Euler((A, A, 0), 'XYZ')
print ("o1 =", o[1])
B = A * 2 * sqrt (2)
C = B + (B * sqrt (2))
D = C * sqrt (2)
E = C + D
y[1] = mathutils.Euler((-A, -A, 0), 'XYZ')
print ("y1 =", y[1])
y[2] = mathutils.Euler((5.68545, -7.44271, 0), 'XYZ')
print ("y2 =", y[2])
b[2] = mathutils.Euler((6.95751, -6.17062, 0), 'XYZ')
print ("b2 =", b[2])
o[2] = mathutils.Euler((7.53988, -7.37879, 0), 'XYZ')
print ("o2 =", o[2])
y[3] = mathutils.Euler((7.73628, -17.1943, 0), 'XYZ')
print ("y3 =", y[3])
b[4] = mathutils.Euler((11.3434, -7.44271, 0), 'XYZ')
print ("b4 =", b[4])
b[3] = mathutils.Euler((5.68546, -7.44271, -5.65991), 'XYZ')
print ("b3 =", b[3])
w[1] = mathutils.Euler((5.68546, -5.67138, -8.65991), 'XYZ')
print ("w1 =", w[1])
w[2] = mathutils.Euler((5.68546, -5.47138, -8.65991), 'XYZ')
print ("w2 =", w[2])
w[3] = mathutils.Euler((14.3434, -5.67138, 0), 'XYZ')
print ("w3 =", w[3])
b[6] = mathutils.Euler((10.1687, -17.1943, 0), 'XYZ')
print ("b6 =", b[6])
b[5] = mathutils.Euler((7.73628, -17.1943, -2.43205), 'XYZ')
print ("b5 =", b[5])
w[4] = mathutils.Euler((7.73628, -15.42297, -5.43205), 'XYZ')
print ("w4 =", w[4])
w[5] = mathutils.Euler((7.73628, -15.22297, -5.43205), 'XYZ')
print ("w5 =", w[5])
w[6] = mathutils.Euler((13.1687, -15.42297, 0), 'XYZ')
print ("w6 =", w[6])
def constructMovement(self, J, helicity, amt, rig, a, b, y, o, w):
# Linkages
aa = [[0 for i in range(4)] for j in range(4)] # Link α(i) - α(j)
ab = [[0 for i in range(4)] for j in range(4)] # Link α(i) - β(j)
ya = [[0 for i in range(4)] for j in range(4)] # Link γ(i) - α(j)
ao = [[0 for i in range(4)] for j in range(4)] # Link α(i) - δ(j)
ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
yb = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - β(j)
bw = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - ω(j)
ww = [[0 for i in range(self.J)] for j in range(self.J)] # Link ω(i) - ω(j)
rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
rig.show_x_ray = True
amt.show_names = True
amt.draw_type = 'STICK'
# amt.draw_type = 'BBONE'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(rig)
scn.objects.active = rig
scn.update()
# Edit
bpy.ops.object.editmode_toggle()
# Construction Linkage
aa[2][1] = amt.edit_bones.new('a2a1')
aa[2][1].head = a[2]
aa[2][1].tail = a[1]
ab[1][1] = amt.edit_bones.new('a1b1')
ab[1][1].head = a[1]
ab[1][1].tail = b[1]
ab[1][1].parent = aa[2][1]
by[1][1] = amt.edit_bones.new('b1y1')
by[1][1].head = b[1]
by[1][1].tail = y[1]
by[1][1].parent = ab[1][1]
by[1][1].use_inherit_rotation = False
ya[1][2] = amt.edit_bones.new('y1a2')
ya[1][2].head = y[1]
ya[1][2].tail = a[2]
ya[1][2].parent = by[1][1]
ao[2][1] = amt.edit_bones.new('a2o1')
ao[2][1].head = a[2]
ao[2][1].tail = o[1]
ao[2][1].parent = ya[1][2]
ob[1][2] = amt.edit_bones.new('o1b2')
ob[1][2].head = o[1]
ob[1][2].tail = b[2]
ob[1][2].parent = ao[2][1]
yy[1][2] = amt.edit_bones.new('y1y2')
yy[1][2].head = y[1]
yy[1][2].tail = y[2]
yy[1][2].parent = by[1][1]
for j in range(2, J - 4):
by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j))
by[j][j].head = b[j]
by[j][j].tail = y[j]
by[j][j].parent = ob[j-1][j]
yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j))
yo[j][j].head = y[j]
yo[j][j].tail = o[j]
yo[j][j].parent = yy[j-1][j]
yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1))
yy[j][j+1].head = y[j]
yy[j][j+1].tail = y[j+1]
yy[j][j+1].parent = by[j][j]
if j < (J - 5):
ob[j][j+1] = amt.edit_bones.new('o'+ str(j) + 'b'+ str(j+1))
ob[j][j+1].head = o[j]
ob[j][j+1].tail = b[j+1]
ob[j][j+1].parent = yo[j][j]
yb[2][3] = amt.edit_bones.new('y2b3')
yb[2][3].head = y[2]
yb[2][3].tail = b[3]
yb[2][3].parent = yy[1][2]
bw[3][1] = amt.edit_bones.new('b3w1')
bw[3][1].head = b[3]
bw[3][1].tail = w[1]
bw[3][1].parent = yb[2][3]
ww[1][2] = amt.edit_bones.new('w1w2')
ww[1][2].head = w[1]
ww[1][2].tail = w[2]
ww[1][2].parent = bw[3][1]
yb[2][4] = amt.edit_bones.new('y2b4.gimbal')
yb[2][4].head = y[2]
yb[2][4].tail = b[4]
yb[2][4].parent = yy[1][2]
bw[4][3] = amt.edit_bones.new('b4w3.gimbal')
bw[4][3].head = b[4]
bw[4][3].tail = w[3]
bw[4][3].parent = yb[2][4]
yb[3][5] = amt.edit_bones.new('y3b5')
yb[3][5].head = y[3]
yb[3][5].tail = b[5]
yb[3][5].parent = yy[2][3]
bw[5][4] = amt.edit_bones.new('b5w4')
bw[5][4].head = b[5]
bw[5][4].tail = w[4]
bw[5][4].parent = yb[3][5]
ww[4][5] = amt.edit_bones.new('w4w5')
ww[4][5].head = w[4]
ww[4][5].tail = w[5]
ww[4][5].parent = bw[5][4]
yb[3][6] = amt.edit_bones.new('y3b6.gimbal')
yb[3][6].head = y[3]
yb[3][6].tail = b[6]
yb[3][6].parent = yy[2][3]
bw[6][6] = amt.edit_bones.new('b6w6.gimbal')
bw[6][6].head = b[6]
bw[6][6].tail = w[6]
bw[6][6].parent = yb[3][6]
# all bones select
#bpy.ops.pose.select_all(action="SELECT")
for b in amt.edit_bones:
b.select = True
if helicity == 'right':
bpy.ops.armature.calculate_roll(type='GLOBAL_POS_Z')
else:
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_Z')
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = rig.pose.bones['y1a2'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'a2a1'
cns.chain_count = 2
cns.use_stretch = False
for j in range(2, J - 4):
cns = rig.pose.bones['b'+str(j) +'y'+str(j)].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['b4w3.gimbal'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'w1w2'
cns.pole_target = rig
cns.pole_subtarget = 'b3w1'
cns.pole_angle = math.radians(0)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['b6w6.gimbal'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'w4w5'
cns.pole_target = rig
cns.pole_subtarget = 'b5w4'
cns.pole_angle = math.radians(0)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
bpy.ops.object.mode_set(mode='OBJECT')
def configLink(self, A, J, helicity, rig, move, part):
bpy.ops.object.mode_set(mode='OBJECT')
Q = (0.18648+0.146446)*A
# Z = -Q*2
Z = 0.0
obj_joint = bpy.data.objects["joint.gold.leg-right.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2a1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.silver.001"].copy()
obj_joint.location = (0.0, 0.0, +Q+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1a2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, +Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2o1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a1b1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for n in range(1, J - 3):
if n <= (J-5):
# Pattern 2 of by
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-6):
# Pattern 2 of yy
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-6):
# Pattern 1 of ob
obj_joint = bpy.data.objects["joint.blue.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, -Q*4 + Q*(n % 2)*8 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-6):
# Pattern 2 of yo
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n+1)+"o"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y2y3.leg-right.001"].copy()
obj_joint.location = (0.0, 0.0, +Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y2y3.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.y2b3.leg-right.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y2b3.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b3w1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w1w2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y3y5.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y2b4.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.b4w3.gimbal.leg-right.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b4w3.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y3b5.leg-right.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y3b5.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b5w4.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w4w5.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y3y5.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y3b6.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.b6w6.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b6w6.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for ob in context.scene.objects:
if "mesh" in ob.name:
ob.select = True
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=True, texture=True, animation=True)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
def formula():
# pivot factor
P = 0
# scale factor
A = 1
# name
move = 'formula'
# element
part = 'universe'
# left or right
helicity = 'left'
start = 0
end = start+360
formula = Formula(P, A, move, part, helicity, start, end)
def arms():
# scale factor
A = 0.380
# pivot factor
# P = 0.010708
P = 0
# name
move = 'kungfu'
# arm element
part = 'arm-left'
# left arm element
helicity = 'left'
start = 77.9422
end = start+360
global arm_left
arm_left = LeftArm(P, A, move, part, helicity, start, end)
def legs():
# name
move = 'kungfu'
# arm element
part = 'leg-right'
# scale factor
A = 0.638694
# pivot factor
P = -0.02853
# P = 0
## right leg element
helicity = 'right'
start = -180.491
end = start-360
global leg_right
leg_right = RightLeg(P, A, move, part, helicity, start, end)
leg_right_loc = (8.88082, 1.22518, 22.398)
leg_right_rot = mathutils.Euler((math.radians(-90.0), math.radians(173.618), math.radians(0.0)), 'XYZ')
# arm element
part = 'leg-left'
# scale factor
A = 0.664895
# pivot factor
P = -0.030131
# P = 0
# left leg element
helicity = 'right'
start = -0.491119
end = start+360
global pitch
pitch_loc = (11.7981, 1.49764, 26.9466)
pitch_rot = mathutils.Euler((math.radians(90.0), math.radians(34.4707), math.radians(-90)), 'XYZ')
global leg_left
leg_left_loc = (14.053, 5.91232, 1.8578)
leg_left_rot = mathutils.Euler((math.radians(-90.0), math.radians(-13.2686), math.radians(0.0)), 'XYZ')
leg_left = LeftLeg(P, A, move, part, helicity, start, end,
leg_left_loc, leg_left_rot,
leg_right_loc, leg_right_rot, leg_right,
pitch_loc, pitch_rot, pitch)
global body
bpy.ops.object.mode_set(mode='OBJECT')
body.rig.select = True
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = body.rig.pose.bones['o4b5.gimbal.lower.right'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = leg_right.rig
cns.subtarget = 'y2b3'
cns.head_tail = 1
cns = body.rig.pose.bones['b5y5.gimbal.lower.right'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = leg_right.rig
cns.subtarget = 'y3b5'
cns.head_tail = 1
cns = body.rig.pose.bones['o4b5.gimbal.lower.left'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = leg_left.rig
cns.subtarget = 'y2b3'
cns.head_tail = 1
cns = body.rig.pose.bones['b5y5.gimbal.lower.left'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = leg_left.rig
cns.subtarget = 'y1b5'
cns.head_tail = 1
bpy.ops.object.mode_set(mode='OBJECT')
leg_right.rig.select = True
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = leg_right.rig.pose.bones['b3w1'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = body.rig
cns.subtarget = 'y3o3.lower.right'
cns.head_tail = 1
cns = leg_right.rig.pose.bones['b5w4'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = body.rig
cns.subtarget = 'b5y5.gimbal.lower.right'
cns.head_tail = 0
bpy.ops.object.mode_set(mode='OBJECT')
leg_left.rig.select = True
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
cns = leg_left.rig.pose.bones['b3w1'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = body.rig
cns.subtarget = 'y3o3.lower.left'
cns.head_tail = 1
cns = leg_left.rig.pose.bones['b5w4'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = body.rig
cns.subtarget = 'b5y5.gimbal.lower.left'
cns.head_tail = 0
bpy.ops.object.mode_set(mode='OBJECT')
def body():
# scale factor
A = 1
# pivot factor
P = -(A * 0.862422)
# name
move = 'kungfu'
# arm element
part = 'body'
# helicity of element
helicity = 'right'
start = 10.0003
end = start - 360
# start = 295
# end = 632.6244
global arm_left
# global arm_right
arm_left_loc = (1.70332, 4.20638, 0.555307)
arm_left_rot = mathutils.Euler((math.radians(-22.535), math.radians(-149.379), math.radians(129.414)), 'XYZ')
# arm_right_loc = (-6.09865, 2.74983, 0.039255)
# arm_right_rot = mathutils.Euler((math.radians(-157.882), math.radians(-78.9597), math.radians(148.427)), 'XYZ')
global interval
factor = 57.296949
# scale start
scale_frame_start = 0.5*interval
start_value = 1.11391*factor
# lx = 0.459332*interval
# ly = value
# rx = 0.540668*interval
# ry = value
# scale mid
scale_frame_mid = 0.60417*interval
mid_value = 0.684769*factor
# lx = 0.563498*interval
# ly = value
# rx = 0.758704*interval
# ry = value
# scale end
scale_frame_end = 1*interval
end_value = 1.11391*factor
# lx = 0.84546*interval
# ly = value
# rx = 1.154542*interval
# ry = value
global body
body = Body(P, A, move, part, helicity, start, end,
arm_left_loc, arm_left_rot, arm_left,
scale_frame_start, scale_frame_mid, scale_frame_end, start_value, mid_value, end_value)
def pitch():
# scale factor
A = 2.0
# pivot factor
P = -1.37
# name
move = 'kungfu'
# arm element
part = 'pitch'
# helicity of element
helicity = 'left'
start = 180.008
end = 180.008
global body
body_loc = (-0.823266, 2.38084, -2.56963)
body_rot = mathutils.Euler((math.radians(-246.029), math.radians(90.3186), math.radians(-214.068)), 'XYZ')
global pitch
pitch = Pitch(P, A, move, part, helicity, start, end,
body_loc, body_rot, body)
def main(origin):
global interval
global frame_start
global frame_end
frame_start = 0
frame_end = 96
interval = frame_end - frame_start
# formula()
arms()
body() #roll
pitch()
legs()
if __name__ == "__main__":
# renaming of corrada objects
# for ob in context.scene.objects:
# if "joint_" in ob.name:
# ob.name = ob.name.replace("_", ".")
main((0,0,0))
|
py
|
1a55d63759851d007bfbdb6995131842d67d8ea6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the
# Pyedra Project (https://github.com/milicolazo/Pyedra/).
# Copyright (c) 2020, Milagros Colazo
# License: MIT
# Full Text: https://github.com/milicolazo/Pyedra/blob/master/LICENSE
# ======================================================================
# IMPORTS
# ======================================================================
import numpy as np
import pandas as pd
import pyedra.datasets
# =============================================================================
# TESTS
# =============================================================================
def test_load_carbognani2019():
result = pyedra.datasets.load_carbognani2019()
assert isinstance(result, pd.DataFrame)
np.testing.assert_almost_equal(result["id"].mean(), 283.042553, 6)
np.testing.assert_almost_equal(result["alpha"].mean(), 9.228085, 6)
np.testing.assert_almost_equal(result["v"].mean(), 9.114468, 6)
def test_load_penttila2016():
result = pyedra.datasets.load_penttila2016()
assert isinstance(result, pd.DataFrame)
np.testing.assert_almost_equal(result["alpha"].mean(), 40.951960, 6)
np.testing.assert_almost_equal(result["phi1"].mean(), 0.491135, 6)
np.testing.assert_almost_equal(result["phi2"].mean(), 0.610840, 6)
np.testing.assert_almost_equal(result["phi3"].mean(), 0.213223, 6)
def test_load_gaia():
result = pyedra.datasets.load_gaia()
assert isinstance(result, pd.DataFrame)
np.testing.assert_almost_equal(result["id"].mean(), 13374.904873, 6)
np.testing.assert_almost_equal(result["epoch_utc"].mean(), 2024.415650, 6)
np.testing.assert_almost_equal(result["jd"].mean(), 2457221.915650, 6)
np.testing.assert_almost_equal(result["r"].mean(), 2.834362, 6)
np.testing.assert_almost_equal(result["delta"].mean(), 2.609104, 6)
np.testing.assert_almost_equal(result["alpha"].mean(), 18.736902, 6)
np.testing.assert_almost_equal(result["g_mag"].mean(), 17.645928, 6)
np.testing.assert_almost_equal(result["g_red"].mean(), 13.454926, 6)
np.testing.assert_almost_equal(result["v"].mean(), 13.639460, 6)
np.testing.assert_almost_equal(result["v-r"].mean(), 0.428789, 6)
assert list(result.tax.unique()) == [
"",
"C",
"O",
"S",
"L",
"X",
"D",
"B",
"P",
"K",
"A",
"F",
"V",
"M",
"T",
"E",
"G",
"R",
"Q",
]
|
py
|
1a55d673af0cbd15bc694f14e830fb85686af97c
|
import logging
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parents[2]))
from nni.retiarii import Mutator
from base_mnasnet import RegularConv, DepthwiseConv, MobileConv
_logger = logging.getLogger(__name__)
class BlockMutator(Mutator):
def __init__(self, target: str):
super(BlockMutator, self).__init__()
self.target = target
def mutate(self, model):
nodes = model.get_nodes_by_label(self.target)
assert len(nodes) == 1
node = nodes[0]
graph = node.graph
related_info = node.operation.parameters
kernel_size = self.choice(related_info['kernel_size_options'])
op_type = self.choice(related_info['op_type_options'])
#self.choice(related_info['se_ratio_options'])
skip = self.choice(related_info['skip_options'])
n_filter = self.choice(related_info['n_filter_options'])
if related_info['in_ch'] is not None:
in_ch = related_info['in_ch']
else:
assert len(node.predecessors) == 1
the_node = node.predecessors[0]
_logger.debug(repr(the_node.operation.parameters))
_logger.debug(the_node.__repr__())
in_ch = the_node.operation.parameters['out_ch']
# update the placeholder to be a new operation
node.update_operation(op_type, {
'kernel_size': kernel_size,
'in_ch': in_ch,
'out_ch': n_filter,
'skip': 'no',
'exp_ratio': related_info['exp_ratio'],
'stride': related_info['stride']
})
# insert new nodes after the placeholder
n_layer = self.choice(related_info['n_layer_options'])
for i in range(1, n_layer):
node = graph.insert_node_on_edge(node.outgoing_edges[0],
'{}_{}'.format(self.target, i),
op_type,
{'kernel_size': kernel_size,
'in_ch': n_filter,
'out_ch': n_filter,
'skip': skip,
'exp_ratio': related_info['exp_ratio'],
'stride': 1})
# fix possible shape mismatch
# TODO: use formal method function to update parameters
if len(node.successors) == 1 and 'in_channels' in node.successors[0].operation.parameters:
node.successors[0].operation.parameters['in_channels'] = n_filter
|
py
|
1a55d6d9d3c240d13febb955ca3737053f772be8
|
from wrast.tools import wrastf, wrasts
|
py
|
1a55d6e5b663de5690e84c6bb1f325e4908f8533
|
# Something, something words with digit N times
import re
pattern_word = re.compile(r'\b\w+\b')
pattern_sentence = re.compile(r'^[A-Z].*[\.\!\?]$')
criteria = list(input())
letter, times = criteria[0], int(criteria[1])
result = []
while True:
user_input = input()
if user_input == 'end':
break
sentence = re.search(pattern_sentence, user_input)
if sentence is not None:
words = re.finditer(pattern_word, sentence.group())
for word in words:
cur_word = word.group()
letter_count = cur_word.count(letter)
if letter_count == times:
result.append(cur_word)
print(*result, sep = ', ')
|
py
|
1a55d859472a9f7b5c2c4a80db80875a48645b61
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: queriesOverTime.py #
# Tests: queries - Database
#
# Defect ID: none
#
# Programmer: Kathleen Bonnell
# Date: March 31, 2004
#
# Modifications:
#
# Hank Childs, Tue Apr 13 13:00:15 PDT 2004
# Rename surface area query.
#
# Kathleen Bonnell, Tue Apr 20 09:42:30 PDT 2004
# Added TestFilledBoundary.
#
# Kathleen Bonnell, Tue Apr 27 12:10:44 PDT 2004
# Added TestExpressions, TestOperators.
#
# Kathleen Bonnell, Thu Jun 24 09:49:35 PDT 2004
# Added TestTransientVariable.
#
# Kathleen Bonnell, Wed Jul 21 16:51:31 PDT 2004
# Added TestSpecifyTimeQueryWindow.
#
# Kathleen Bonnell, Wed Sep 8 10:53:58 PDT 2004
# Renamed 'WorldPick' as 'Pick'.
#
# Kathleen Bonnell, Mon Dec 20 15:54:04 PST 2004
# Changed 'Variable by Node' to 'PickByNode'.
#
# Kathleen Bonnell, Thu Jan 6 11:06:29 PST 2005
# Added TestTimeVaryingSIL.
#
# Kathleen Bonnell, Wed Mar 16 11:13:40 PST 2005
# Added TestQueryAfterQueryOverTime.
#
# Kathleen Bonnell, Wed Jul 6 16:21:34 PDT 2005
# Added TestMili.
#
# Kathleen Bonnell, Thu Nov 10 08:21:54 PST 2005
# Added TrajectoryByZone to TestMili.
#
# Mark C. Miller, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
#
# Cyrus Harrison, Fri Feb 5 09:27:37 PST 2010
# Turn off color cycling to avoid possible propagation of error from
# one failed test to several.
#
# Kathleen Bonnell, Thu Mar 3 11:47:09 PST 2011
# Added MultiVarTimePick tests.
#
# Kathleen Biagas, Thu Jul 14 10:44:55 PDT 2011
# Use named arguments.
#
# Alister Maguire, Tue Oct 17 16:54:48 PDT 2017
# Added TestPickRangeTimeQuery
#
# Alister Maguire, Wed May 9 10:13:26 PDT 2018
# Added TestReturnValue.
#
# Alister Maguire, Wed May 30 14:16:28 PDT 2018
# Added tests for performing pick ranges over time with and
# without plotting and returning the curves.
#
# Alister Maguire, Wed May 22 08:49:30 PDT 2019
# Updated mili tests to reflect new plugin changes.
#
# Alister Maguire, Tue Oct 1 11:48:15 MST 2019
# Make sure to set use_actual_data to true when we want
# to use data from the pipeline output.
#
# Alister Maguire, Fri Oct 11 13:12:36 PDT 2019
# Added TestDirectDatabaseRoute. I also updated several tests to
# use actual data so that they continue to test the old QOT route.
#
# Kathleen Biagas, Thu Jan 30 13:37:50 MST 2020
# Added TestOperatorCreatedVar. (github bugs #2842, #3489).
#
# Alister Maguire, Tue Feb 25 13:46:24 PST 2020
# Added tests for handling vectors in the direct database route.
#
# Alister Maguire, Mon Mar 9 15:16:36 PDT 2020
# I've removed the use_actual_data flag for Pick queries as this
# is now handled internally.
#
# ----------------------------------------------------------------------------
RequiredDatabasePlugin(("PDB", "Mili", "SAMRAI"))
def InitAnnotation():
# Turn off most annotations
a = AnnotationAttributes()
a.axes2D.visible = 1
a.axes2D.xAxis.label.visible = 1
a.axes2D.yAxis.label.visible = 1
a.axes2D.xAxis.title.visible = 1
a.axes2D.yAxis.title.visible = 1
a.axes3D.triadFlag = 0
a.axes3D.bboxFlag = 0
a.userInfoFlag = 0
a.databaseInfoFlag = 0
a.legendInfoFlag = 0
a.backgroundMode = a.Solid
a.foregroundColor = (0, 0, 0, 255)
a.backgroundColor = (255, 255, 255, 255)
SetAnnotationAttributes(a)
def SetCurvePlotDefaults():
# Disable Color Cycling, default to a blue curve.
catts = CurveAttributes()
catts.lineWidth = 0
catts.color = (0, 0, 255, 255)
catts.showLabels = 1
catts.designator = ""
catts.showPoints = 0
catts.showLegend = 1
catts.cycleColors = 0
catts.renderMode = catts.RenderAsLines
SetDefaultPlotOptions(catts)
def TestAllTimeQueries():
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("Pseudocolor", "pressure")
DrawPlots()
# Do some database queries.
QueryOverTime("3D surface area")
SetActiveWindow(2)
InitAnnotation()
Test("AllTimeQueries_01")
DeleteAllPlots()
SetActiveWindow(1)
QueryOverTime("Volume")
SetActiveWindow(2);
Test("AllTimeQueries_02")
DeleteAllPlots()
SetActiveWindow(1)
QueryOverTime("Min")
SetActiveWindow(2);
Test("AllTimeQueries_03")
DeleteAllPlots()
SetActiveWindow(1)
QueryOverTime("Max")
SetActiveWindow(2);
Test("AllTimeQueries_04")
DeleteAllPlots()
SetActiveWindow(1)
QueryOverTime("Variable Sum")
SetActiveWindow(2);
Test("AllTimeQueries_05")
DeleteAllPlots()
SetActiveWindow(1)
QueryOverTime("Weighted Variable Sum")
SetActiveWindow(2);
Test("AllTimeQueries_06")
DeleteAllPlots()
SetActiveWindow(1)
pa = GetPickAttributes()
pa.doTimeCurve = 1
pa.timePreserveCoord = 0
SetPickAttributes(pa)
PickByNode(15947)
# reset some defaults
pa.doTimeCurve = 0
pa.timePreserveCoord = 1
SetPickAttributes(pa)
SetActiveWindow(2);
Test("AllTimeQueries_07")
# delete window 2
DeleteWindow()
# remove plots from window 1
DeleteAllPlots()
def TestFilledBoundary():
# bug '4708
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("FilledBoundary", "Material")
DrawPlots()
TurnMaterialsOff(("1 barrier", "2 water"))
SetActiveWindow(1)
QueryOverTime("3D surface area")
SetActiveWindow(2)
InitAnnotation()
Test("FBTimeQuery_01")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
AddPlot("Pseudocolor", "pressure")
DrawPlots()
TurnMaterialsOff(("1 barrier", "2 water"))
QueryOverTime("3D surface area")
SetActiveWindow(2)
Test("FBTimeQuery_02")
# delete window 2
DeleteWindow()
# remove plots from window 1
TurnMaterialsOn()
DeleteAllPlots()
def TestOperators():
# bug '4818
OpenDatabase(silo_data_path("wave*.silo database"))
AddPlot("Pseudocolor", "pressure")
AddOperator("Isovolume")
iso = IsovolumeAttributes()
iso.lbound = 0.1
iso.ubound = 1.0
SetOperatorOptions(iso)
DrawPlots()
SetActiveWindow(1)
QueryOverTime("Volume", stride=10, use_actual_data=1)
SetActiveWindow(2)
InitAnnotation()
Test("TimeQuery_ops_01")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
AddPlot("Pseudocolor", "mesh_quality/jacobian")
AddOperator("Slice")
slice = SliceAttributes()
slice.axisType = slice.Arbitrary
slice.normal = (-0.689, -0.0416, 0.7233)
slice.originType = slice.Point
slice.originPoint = (2.0011, -0.4084, -1.1279)
slice.upAxis = (-0.08584, 0.996007, -0.0245)
slice.project2d = 1
SetOperatorOptions(slice)
DrawPlots()
QueryOverTime("2D area", stride=10, use_actual_data=1)
SetActiveWindow(2)
InitAnnotation()
Test("TimeQuery_ops_02")
# prepare for next test-set
# delete plots from window 2 & l
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
def TestExpressions():
#bug '4784
OpenDatabase(data_path("pdb_test_data/dbA00.pdb"))
AddPlot("Pseudocolor", "mesh/ireg")
pa = PseudocolorAttributes()
pa.minFlag = 1
pa.maxFlag = 1
pa.min = 1
pa.max = 4
SetPlotOptions(pa)
DrawPlots()
pt = (4., 3., 0.)
pick = GetPickAttributes()
pick.doTimeCurve = 1
SetPickAttributes(pick)
Pick(pt)
SetActiveWindow(2)
InitAnnotation()
Test("TimeQuery_expr_01")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
# test a scalar expression
OpenDatabase(silo_data_path("wave*.silo database"))
DefineScalarExpression("p2", "pressure*pressure")
AddPlot("Pseudocolor", "p2")
DrawPlots()
QueryOverTime("Variable Sum", stride=10)
SetActiveWindow(2)
Test("TimeQuery_expr_02")
# prepare for next test-set
# delete plots from window 2 & l
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
OpenDatabase(data_path("pdb_test_data/dbA00.pdb"))
DefineScalarExpression("m", "matvf(material, 1)")
AddPlot("Pseudocolor", "m")
DrawPlots()
QueryOverTime("Variable Sum")
SetActiveWindow(2)
Test("TimeQuery_expr_03")
# prepare for next test-set
# delete plots from window 2 & l
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
def TestTransientVariable():
#bug '4906
# Do what is necessary to get access to the transient variable,
# because QueryOverTime requires an active drawn plot.
db = silo_data_path("wave_tv*.silo database")
OpenDatabase(db)
SetTimeSliderState(17)
ReOpenDatabase(db)
AddPlot("Pseudocolor", "transient")
DrawPlots()
qt = GetQueryOverTimeAttributes()
qt.timeType = qt.Timestep
SetQueryOverTimeAttributes(qt)
QueryOverTime("Variable Sum")
SetActiveWindow(2)
InitAnnotation()
Test("TimeQuery_trans_01")
DeleteAllPlots()
SetActiveWindow(1)
pick = GetPickAttributes()
pick.doTimeCurve = 1
pick.timePreserveCoord = 0
SetPickAttributes(pick)
PickByNode(327)
pick.doTimeCurve = 0
pick.timePreserveCoord = 1
SetPickAttributes(pick)
SetActiveWindow(2)
InitAnnotation()
Test("TimeQuery_trans_02")
# Prepare for next test
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
def TestSpecifyTimeQueryWindow():
# bug '5163
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("Pseudocolor", "pressure")
DrawPlots()
qt = GetQueryOverTimeAttributes()
qt.timeType = qt.Timestep
SetQueryOverTimeAttributes(qt)
QueryOverTime("3D surface area")
SetActiveWindow(2)
InitAnnotation()
Test("SpecifyTimeQueryWindow_01")
DeleteAllPlots()
SetActiveWindow(1)
TurnMaterialsOff(("1 barrier"))
DrawPlots()
qot = GetQueryOverTimeAttributes()
qot.createWindow = 0
qot.windowId = 3
SetQueryOverTimeAttributes(qot)
QueryOverTime("3D surface area")
SetActiveWindow(3)
InitAnnotation()
Test("SpecifyTimeQueryWindow_02")
DeleteAllPlots()
SetActiveWindow(1)
TurnMaterialsOff(("2 water"))
DrawPlots()
qot.windowId = 2
SetQueryOverTimeAttributes(qot)
QueryOverTime("3D surface area")
SetActiveWindow(2)
InitAnnotation()
Test("SpecifyTimeQueryWindow_03")
# Prepare for next test
DeleteAllPlots()
DeleteWindow()
SetActiveWindow(3)
DeleteWindow()
SetActiveWindow(1)
DeleteAllPlots()
def TestTimeVaryingSIL():
#bug '5473
OpenDatabase(data_path("samrai_test_data/sil_changes/dumps.visit"))
cfileName = "./temp.curve"
curveFile = open(cfileName, "wt")
curveFile.write("#3D surface area\n")
nframes = TimeSliderGetNStates()
for i in range(nframes):
Query("3D surface area")
val = GetQueryOutputValue()
curveFile.write("%g %g\n" % (i, val))
TimeSliderNextState()
curveFile.close()
AddWindow()
SetActiveWindow(2)
DeleteAllPlots()
OpenDatabase(cfileName)
AddPlot("Curve", "3D surface area")
DrawPlots()
SetActiveWindow(1)
# Go ahead and use default plot for now.
qt = GetQueryOverTimeAttributes()
qt.timeType = qt.Timestep
qt.createWindow = 0
qt.windowId = 2
SetQueryOverTimeAttributes(qt)
QueryOverTime("3D surface area")
SetActiveWindow(2)
InitAnnotation()
cv = GetViewCurve();
cv.domainCoords = (-0.534115, 10.5341)
cv.rangeCoords = (4029.87, 5856.13)
SetViewCurve(cv)
SetActivePlots((0, 1))
c = CurveAttributes()
c.showPoints = 1
SetPlotOptions(c)
Query("Area Between Curves")
s = GetQueryOutputString()
text = CreateAnnotationObject("Text2D")
text.text = s
text.height = 0.02
text.position = (0.55, 0.4)
Test("TimeQuery_sil_01")
text.Delete()
os.unlink(cfileName)
# Prepare for next test
DeleteAllPlots()
SetActiveWindow(2)
DeleteWindow()
SetActiveWindow(1)
DeleteAllPlots()
def TestQueryAfterQueryOverTime():
# bug '5823
OpenDatabase(silo_data_path("wave_tv.visit"))
SetTimeSliderState(17)
ReOpenDatabase(silo_data_path("wave_tv.visit"))
AddPlot("Pseudocolor", "transient")
DrawPlots()
QueryOverTime("Volume")
Query("Volume")
s = GetQueryOutputString()
QueryOverTime("Max")
Query("Max")
s = s + GetQueryOutputString()
SetActiveWindow(2)
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
# bug '6042
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("Pseudocolor", "pressure")
DrawPlots()
TurnMaterialsOn()
QueryOverTime("3D surface area", stride=10)
SetActiveWindow(2)
DeleteAllPlots()
SetActiveWindow(1)
Query("3D surface area")
s = s + GetQueryOutputString()
TestText("QueryAfterQueryOverTime", s)
SetActiveWindow(2)
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
def TestMili():
# bug '6430
OpenDatabase(data_path("mili_test_data/single_proc/m_plot.mili"))
AddPlot("Pseudocolor", "Primal/node/nodvel/vz")
DrawPlots()
ResetQueryOverTimeAttributes()
QueryOverTime("Volume")
SetActiveWindow(2)
ResetView()
InitAnnotation()
Test("TimeQuery_mili_01")
DeleteAllPlots()
SetActiveWindow(1)
QueryOverTime("Max")
SetActiveWindow(2)
InitAnnotation()
Test("TimeQuery_mili_02")
DeleteAllPlots()
SetActiveWindow(1)
p = GetPickAttributes()
p.doTimeCurve = 1
p.timePreserveCoord = 0
SetPickAttributes(p)
NodePick(122, 161)
p.doTimeCurve = 0
SetPickAttributes(p)
SetActiveWindow(2)
InitAnnotation()
Test("TimeQuery_mili_03")
DeleteAllPlots()
SetActiveWindow(1)
qvars = ("Primal/shell/edv1", "Primal/shell/edv2")
QueryOverTime("TrajectoryByZone", element=242, vars=qvars)
SetActiveWindow(2)
ResetView()
InitAnnotation()
Test("TimeQuery_mili_04")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
def MultiVarTimePick():
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("Pseudocolor", "pressure")
DrawPlots()
pa = GetPickAttributes()
pa.doTimeCurve = 1
pa.timeCurveType = pa.Single_Y_Axis
SetPickAttributes(pa)
vars =("pressure", "v", "direction_magnitude")
PickByNode(8837, vars)
SetActiveWindow(2);
InitAnnotation()
Test("TimePick_MultiVar_01")
DeleteAllPlots()
SetActiveWindow(1)
pa.timeCurveType = pa.Multiple_Y_Axes
SetPickAttributes(pa)
PickByNode(8837, vars)
SetActiveWindow(2);
Test("TimePick_MultiVar_02")
DeleteAllPlots()
# remove plots from window 1
SetActiveWindow(1)
DeleteAllPlots()
OpenDatabase(data_path("mili_test_data/single_proc/m_plot.mili"))
AddPlot("Pseudocolor", "Primal/shell/inteng")
DrawPlots()
pa.timePreserveCoord = 0
pa.timeCurveType = pa.Single_Y_Axis
SetPickAttributes(pa)
vars = ("default", "Primal/shell/normal_magnitude")
PickByZone(233, vars)
SetActiveWindow(2);
Test("TimePick_MultiVar_03")
DeleteAllPlots()
SetActiveWindow(1)
pa.timeCurveType = pa.Multiple_Y_Axes
SetPickAttributes(pa)
PickByZone(233, vars)
SetActiveWindow(2);
Test("TimePick_MultiVar_04")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
def TestPickRangeTimeQuery():
OpenDatabase(silo_data_path("wave_tv.visit"))
SetTimeSliderState(17)
AddPlot("Pseudocolor", "v")
DrawPlots()
pickAtts = GetPickAttributes()
pickAtts.doTimeCurve = 0
pickAtts.variables = ("default", "v")
pickAtts.timeCurveType = pickAtts.Single_Y_Axis
SetPickAttributes(pickAtts)
#
# Return the curves without plotting, and show
# highlights.
#
pickAtts.showPickHighlight = 1
SetPickAttributes(pickAtts)
options = {}
options["pick_range"] = "100-105, 100, 1"
options["do_time"] = 0
options["return_curves"] = 1
output_dict = PickByZone(options)
s = str(output_dict)
Test("TimePickRange_00")
TestText("TimePickRangeDict_00",s)
ClearPickPoints()
#
# Plot the curves, but don't return them.
#
pickAtts.showPickHighlight = 0
SetPickAttributes(pickAtts)
options = {}
options["pick_range"] = "100-105, 100, 1"
options["do_time"] = 1
options["return_curves"] = 0
options["start_time"] = 10
options["end_time"] = 14
options["stride"] = 2
output_dict = PickByNode(options)
s = str(output_dict)
SetActiveWindow(2)
Test("TimePickRange_01")
TestText("TimePickRangeDict_01",s)
ClearPickPoints()
SetActiveWindow(1)
#
# Plot the curves, and return them.
#
pickAtts.showPickHighlight = 0
SetPickAttributes(pickAtts)
options = {}
options["pick_range"] = "100-105"
options["do_time"] = 1
options["return_curves"] = 1
options["start_time"] = 20
options["end_time"] = 60
options["stride"] = 2
output_dict = PickByNode(options)
s = str(output_dict)
SetActiveWindow(2)
Test("TimePickRange_02")
TestText("TimePickRangeDict_02",s)
SetActiveWindow(1)
ClearPickPoints()
DeleteAllPlots()
ResetPickLetter()
SetActiveWindow(1)
ClearPickPoints()
DeleteAllPlots()
ResetPickLetter()
def TestReturnValue():
#
# There used to be a bug where the return value
# from previous picks would propagate to the following
# time query. Let's make sure this isn't re-introduced.
#
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("Pseudocolor", "v")
DrawPlots()
pickAtts = GetPickAttributes()
pickAtts.doTimeCurve = 0
pickAtts.variables = ("default", "v")
pickAtts.timeCurveType = pickAtts.Single_Y_Axis
SetPickAttributes(pickAtts)
time1 = NodePick(coord=(3, .5, 3), do_time=1, start_time=0, end_time=70)
no_time = NodePick(coord=(2, .2, 2), do_time=0)
time2 = NodePick(coord=(3, .5, 3), do_time=1, start_time=0, end_time=70)
AssertEqual("Pick Updated", type(time1), type(time2))
ClearPickPoints()
DeleteAllPlots()
ResetPickLetter()
def TestDirectDatabaseRoute():
#
# Cleanup any plots that haven't been deleted yet.
#
SetActiveWindow(2)
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
OpenDatabase(data_path("mili_test_data/single_proc/d3samp6_10_longrun.plt.mili"))
AddPlot("Pseudocolor", "Primal/Shared/edrate")
DrawPlots()
element = 116
domain = 0
element = 116
preserve = 0
start = 0
stride = 1
stop = 10000
vars = ("default")
#
# First, let's time the query. This hard to predict because of it being dependent
# on the machine's architecture, but we can make an educated guess. The direct
# route should take under a second, and the old route should take at least
# 30 seconds. We'll give ourselves a threshold of 10 seconds to be safe.
#
import time
thresh = 10
timer_start = time.time()
PickByZone(curve_plot_type=0, vars=vars, do_time=1, domain=domain, element=element,
preserve_coord=preserve, end_time=stop, start_time=start, stride=stride)
timer_stop = time.time()
res = timer_stop - timer_start
AssertLTE("Timing Direct Database Query", res, thresh)
SetActiveWindow(2)
Test("Direct_Database_Route_00")
DeleteAllPlots()
SetActiveWindow(1)
#
# Like the original QOT, the direct route creates a clone, but this clone
# differs in that its resulting dataset will NOT match the original dataset.
# Let's make sure the active dataset is being updated to the old plot by
# performing a new pick (not through time).
#
PickByZone(do_time=0, domain=domain, element=element)
Test("Direct_Database_Route_01")
#
# Test basic range settings.
#
start = 100
stop = 900
stride = 10
PickByZone(curve_plot_type=0, vars=vars, do_time=1, domain=domain, element=element,
preserve_coord=preserve, end_time=stop, start_time=start, stride=stride)
stride = 1
start = 0
stop = 10000
SetActiveWindow(2)
Test("Direct_Database_Route_02")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
AddPlot("Pseudocolor", "Primal/node/nodacc/ax")
DrawPlots()
# This tests two things:
# 1. Plotting a node pick curve.
# 2. Using a direct route query on magnitude expression.
#
vars=("Primal/node/nodacc_magnitude")
PickByNode(curve_plot_type=0, vars=vars, do_time=1, domain=domain, element=element,
preserve_coord=preserve, end_time=stop, start_time=start, stride=stride)
SetActiveWindow(2)
Test("Direct_Database_Route_03")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
OpenDatabase(data_path("mili_test_data/single_proc/m_plot.mili"))
AddPlot("Pseudocolor", "Primal/brick/stress/sx")
DrawPlots()
#
# Test plotting multiple variables at once.
#
element = 489
vars=("Primal/brick/stress/sz", "Primal/brick/stress/sx")
PickByZone(curve_plot_type=0, vars=vars, do_time=1, domain=domain, element=element,
preserve_coord=preserve, end_time=stop, start_time=start, stride=stride)
SetActiveWindow(2)
Test("Direct_Database_Route_04")
DeleteAllPlots()
SetActiveWindow(1)
#
# Testing the multi curve plot.
#
PickByZone(curve_plot_type=1, vars=vars, do_time=1, domain=domain, element=element,
preserve_coord=preserve, end_time=stop, start_time=start, stride=stride)
SetActiveWindow(2)
Test("Direct_Database_Route_05")
DeleteAllPlots()
SetActiveWindow(1)
#
# Test multi-domain data.
#
DeleteAllPlots()
OpenDatabase(data_path("mili_test_data/multi_proc/d3samp6.plt.mili"))
AddPlot("Pseudocolor", "Primal/Shared/edrate")
DrawPlots()
domain = 1
element = 11
vars = ("default")
PickByZone(curve_plot_type=0, vars=vars, do_time=1, domain=domain, element=element,
preserve_coord=preserve, end_time=stop, start_time=start, stride=stride)
SetActiveWindow(2)
Test("Direct_Database_Route_06")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
#
# Now let's test a variable that is not defined on all
# timesteps.
#
db = silo_data_path("wave_tv*.silo database")
OpenDatabase(db)
SetTimeSliderState(17)
ReOpenDatabase(db)
AddPlot("Pseudocolor", "transient")
DrawPlots()
pick = GetPickAttributes()
pick.doTimeCurve = 1
pick.timePreserveCoord = 0
SetPickAttributes(pick)
PickByNode(element=327)
pick.doTimeCurve = 0
pick.timePreserveCoord = 1
SetPickAttributes(pick)
SetActiveWindow(2)
InitAnnotation()
Test("Direct_Database_Route_07")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
#
# Next, let's test a vector plot. The vectors should be reduced
# to their magnitudes.
#
AddPlot("Vector", "direction")
DrawPlots()
pick = GetPickAttributes()
pick.doTimeCurve = 1
pick.timePreserveCoord = 0
SetPickAttributes(pick)
PickByNode(element=10)
SetActiveWindow(2)
InitAnnotation()
Test("Direct_Database_Route_08")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
def TestOperatorCreatedVar():
OpenDatabase(silo_data_path("wave.visit"))
DefineVectorExpression("normals", "cell_surface_normal(quadmesh)")
AddPlot("Pseudocolor", "operators/Flux/quadmesh")
fluxAtts = FluxAttributes()
fluxAtts.flowField = "direction"
SetOperatorOptions(fluxAtts)
AddOperator("Slice")
sliceAtts = SliceAttributes()
sliceAtts.axisType = sliceAtts.Arbitrary
sliceAtts.normal = (0, 1, 0)
sliceAtts.originType = sliceAtts.Percent
sliceAtts.originPercent = 50
sliceAtts.project2d = 0
SetOperatorOptions(sliceAtts)
AddOperator("DeferExpression")
deferAtts = DeferExpressionAttributes()
deferAtts.exprs = ("normals")
SetOperatorOptions(deferAtts)
# we want slice before flux, so demote it
DemoteOperator(1)
DrawPlots()
qt = GetQueryOverTimeAttributes()
qt.timeType = qt.Cycle
SetQueryOverTimeAttributes(qt)
QueryOverTime("Weighted Variable Sum")
SetActiveWindow(2)
InitAnnotation()
Test("OperatorCreatedVar_01")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
DeleteExpression("normals")
CloseDatabase(silo_data_path("wave.visit"))
def TimeQueryMain():
TestAllTimeQueries()
TestFilledBoundary()
TestOperators()
TestExpressions()
TestTransientVariable()
TestSpecifyTimeQueryWindow()
TestTimeVaryingSIL()
TestQueryAfterQueryOverTime()
TestMili()
MultiVarTimePick()
TestPickRangeTimeQuery()
TestReturnValue()
TestDirectDatabaseRoute()
TestOperatorCreatedVar()
# main
InitAnnotation()
SetCurvePlotDefaults()
TimeQueryMain()
Exit()
|
py
|
1a55d8e56ee191f0fafbe8e602798de989b8aaa6
|
import numpy as np
"""
t: 0...1
x: -1...1
dz/dt = x
dz/dx = (1-t)*x
z|t>1 = 1.0
"""
def generator(mbsize):
delta = 0.2
while True:
s0 = np.random.random((mbsize,2))
s0[:,0] = s0[:,0]*2-1
x0 = s0[:,0]
t0 = s0[:,1]
direction = np.random.randint(0, 4, mbsize)
s1 = s0.copy()
s1[direction==0, 0] += delta # x+
s1[direction==1, 0] -= delta # x-
s1[direction==2, 1] += delta # t+
s1[direction==3, 1] -= delta # t-
x1 = s1[:,0]
t1 = s1[:,1]
dz = np.empty_like(x0)
dz[direction==0] = ((1-t0)*x0*delta)[direction==0]
dz[direction==1] = (-(1-t0)*x0*delta)[direction==1]
dz[direction==2] = (x0*delta)[direction==2]
dz[direction==3] = (-x0*delta)[direction==3]
boundary = (t1 > 1.0) # * (np.abs(x1) < 0.1)
dz[boundary] = 1.0
yield [s0, s1, np.asarray(boundary, dtype=np.float32)], dz
def sample(nx, nt, model):
x = np.arange(-1.0, 1.0, 2.0/nx)
t = np.arange(-0.1, 1.1, 1.2/nt)
xg, tg = np.meshgrid(x, t)
s = np.array((xg.reshape((-1,)),tg.reshape((-1,)))).T
print "s:",s.shape,s
z = model.predict_on_batch(s)
return xg, tg, z[:,0].reshape(xg.shape)
|
py
|
1a55da32a6ddeb546a84ada4134fa554acd5e731
|
from pydub import AudioSegment
import requests
import easygui
# get the stuff for making the mp3
text = easygui.enterbox(msg='Enter the text for the spooky man to say.', title='Damon, I love you!', default='', strip=True)
headers = {
'Connection': 'keep-alive',
'Accept': '*/*',
'Origin': 'https://fasthub.net',
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Referer': 'https://fasthub.net/',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
}
# fasthub.net macintalk voice whisper
data = {
'text': text,
'lang': 'en-us en en-US',
'langTrans': 'en-us en en-US',
'voiceType': 'whisper',
'amplitude': '109',
'pitch': '51',
'speed': '80',
'repeat': '0'
}
response = requests.post('https://fasthub.net/plauder', headers=headers, data=data)
mp3stop = response.text.split('#')
mp3url = 'https://fasthub.net/speak/' + mp3stop[0] + '.mp3'
mp3 = requests.get(mp3url, allow_redirects=True)
open('mp3ofVoice.mp3', 'wb').write(mp3.content)
#Put it together
voice = easygui.fileopenbox(title='Choose speech audio')
mp3fromweb = AudioSegment.from_mp3("mp3ofVoice.mp3")
mp3voice = AudioSegment.from_mp3(voice)
mp3guitar = AudioSegment.from_mp3("guitarwail.mp3")
length=len(mp3voice)
combined = mp3guitar.overlay(mp3voice, gain_during_overlay=-12)
final = mp3fromweb + combined
gaming = final.export(text+".mp3", format="mp3")
|
py
|
1a55da3a2024b2571e5347a6468ab60ad8caffd5
|
#!/usr/bin/env python
# encoding: utf-8
# Modifications copyright Amazon.com, Inc. or its affiliates.
# Carlos Rafael Giani, 2006 (dv)
# Tamas Pal, 2007 (folti)
# Nicolas Mercier, 2009
# Matt Clarkson, 2012
import os, sys, re, tempfile
from waflib import Utils, Task, Logs, Options, Errors
from waflib.Logs import debug, warn
from waflib.Tools import c_preproc, ccroot, c, cxx, ar
from waflib.Configure import conf
from waflib.TaskGen import feature, after, after_method, before_method
import waflib.Node
# The compiler will issue a warning if some flags are specified more than once.
# The command is constructed from subsets that may have conflicting flags
# This list of lists contains all the set of flags that are made unique
UNIQUE_FLAGS_LIST = [
["/arch:IA32", "/arch:SSE", "/arch:SSE2", "/arch:AVX", "/arch:AVX2"], # code gen for arch
["/clr", "/clr:initialAppDomain", "/clr:noAssembly", "/clr:nostdlib", "/clr:pure", "/clr:safe"], # common language runtime
["/EHs", "/EHa", "/EHac", "/EHsc"], # exception handling
["/errorReport:none", "/errorReport:prompt", "/errorReport:queue", "/errorReport:send"], # report internal compiler errors
["/favor:blend", "/favor:ATOM", "/favor:AMD64", "/favor:INTEL64"], # optimize for arch
["/fp:precise", "/fp:except", "/fp:except-", "/fp:fast", "/fp:strict"], # floating point behavior
["/Gd", "/Gr", "/Gv", "/Gz"], # calling convention
["/GL", "/GL-"], # whole program optimization
["/GR", "/GR-"], # runtime type information
["/GS", "/GS-"], # buffer security checks
["/Gs", "/Gs0", "/Gs4096"], # control stack checking calls
["/Gw", "/Gw-"], # global data optimization
["/Gy", "/Gy-"], # enable function level linking
["/O1", "/O2", "/Od", "/Ox"], # optimization level
["/Ob0", "/Ob1", "/Ob2"], # inline expansion
["/Oi", "/Oi-"], # intrinsics
["/Os", "/Ot"], # favor small code/ fast code
["/Oy", "/Oy-"], # frame pointer omission
["/MD", "/MT", "/LD", "/MDd", "/MTd", "/LDd"], # runtime library
["/RTC1","/RTCc","/RTCs","/RTCu"], # runtime error checks
["/volatile","/volatile:iso", "/volatile:ms"], # volatile keyword handling
["/vd0", "/vd1", "/vd2"], # disable construction displacements
["/ZW", "/ZW:nostdlib"], # windows runtime compilation
["/sdl", "/sdl-"], # enable additional security checks
["/vmb", "/vmg"], # always declare a class before using a pointer to it
["/vmm", "/vms", "/vmv"], # inheritance of yet-to-be-defined classes
["/W0", "/W1", "/W2", "/W3", "/W4"], # error level
["/WX", "/WX-", "/WX:NO"], # treat warnings as errors
["/Z7", "/Zi", "/ZI"], # debug information format
["/Za", "/Ze"], # disable language extensions
["/Zc:forScope", "/Zc:forScope-"], # for loop scope conformance
["/Zc:wchar_t", "/Zc:wchar_t-"], # wchar_t maps to __wchar_t
["/Zc:auto", "/Zc:auto-"], # deduce variable type
["/Zc:trigraphs", "/Zc:trigraphs-"], # character substitutions if character isn't in charpage
["/Zc:rvalueCast", "/Zc:rvalueCast-"], # enforce type conversion rules
["/Zc:strictStrings", "/Zc:strictStrings-"], # disable string literal type conversion
["/Zc:inline", "/Zc:inline-"], # remove unreferenced comdat sections
["/Zp", "/Zp:1", "/Zp:2", "/Zp:4", "/Zp:8", "/Zp:16"] # struct member alignment
]
# convert list of flags that must be unique to dictionary
UNIQUE_FLAGS_DICT = {}
for idx, flags in enumerate(UNIQUE_FLAGS_LIST):
assert(isinstance(flags,list))
for flag in flags:
UNIQUE_FLAGS_DICT[flag] = idx # all flags from the list have the same value, just using index as a dummy unique val
def exec_mf(self):
"""
Create the manifest file
"""
env = self.env
mtool = env['MT']
if not mtool:
return 0
self.do_manifest = False
outfile = self.outputs[0].abspath()
manifest = None
for out_node in self.outputs:
if out_node.name.endswith('.manifest'):
manifest = out_node.abspath()
break
if manifest is None:
# Should never get here. If we do, it means the manifest file was
# never added to the outputs list, thus we don't have a manifest file
# to embed, so we just return.
return 0
# embedding mode. Different for EXE's and DLL's.
# see: http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx
mode = ''
if 'cprogram' in self.generator.features or 'cxxprogram' in self.generator.features:
mode = '1'
elif 'cshlib' in self.generator.features or 'cxxshlib' in self.generator.features:
mode = '2'
debug('msvc: embedding manifest in mode %r' % mode)
lst = []
lst.append(env['MT'])
lst.extend(Utils.to_list(env['MTFLAGS']))
lst.extend(['-manifest',manifest])
if hasattr(self.generator, 'additional_manifests'):
if not isinstance(self.generator.additional_manifests, list): # the additional manifests could be a string
self.generator.additional_manifests = [self.generator.additional_manifests]
for element in self.generator.additional_manifests: # add each one with its own path
lst.append( self.generator.path.abspath() + '/' + element)
lst.append('-outputresource:%s;%s' % (outfile, mode))
# note that because we call exec_command and give it a list of params, these become the subprocess argv*
# and thus it is not necessary for us to escape them with quotes or anything like that.
lst = [lst]
return self.exec_command(*lst)
def quote_response_command(self, flag):
flag = flag.replace('\\', '\\\\') # escape any backslashes
flag = flag.replace('"', '\\"') # escape any quotes
if flag.find(' ') > -1:
for x in ('/LIBPATH:', '/IMPLIB:', '/OUT:', '/I'):
if flag.startswith(x):
flag = '%s"%s"' % (x, flag[len(x):])
break
else:
flag = '"%s"' % flag
return flag
def exec_response_command(self, cmd, **kw):
# not public yet
try:
tmp = None
if sys.platform.startswith('win') and isinstance(cmd, list) and len(' '.join(cmd)) >= 16384:
tmp_files_folder = self.generator.bld.get_bintemp_folder_node().make_node('TempFiles')
program = cmd[0] #unquoted program name, otherwise exec_command will fail
cmd = [self.quote_response_command(x) for x in cmd]
# Determine an appropriate filename for the output file (displayed by Incredibuild)
if self.outputs and len(self.outputs[0].abspath()):
tmp_file_name = os.path.basename(self.outputs[0].abspath())
else:
# strips quotes off the FRONT in case its a string like '"something="somethingelse"' which would cause issues
out_file = os.path.split(cmd[-1].strip('"'))
tmp_file_name = out_file[1]
(fd, tmp) = tempfile.mkstemp(prefix=tmp_file_name, dir=tmp_files_folder.abspath())
os.write(fd, '\r\n'.join(cmd[1:]).encode())
os.close(fd)
cmd = [program, '@' + tmp]
# no return here, that's on purpose
ret = self.generator.bld.exec_command(cmd, **kw)
finally:
if tmp:
try:
os.remove(tmp)
except OSError:
pass # anti-virus and indexers can keep the files open -_-
return ret
########## stupid evil command modification: concatenate the tokens /Fx, /doc, and /x: with the next token
def exec_command_msvc(self, *k, **kw):
"""
Change the command-line execution for msvc programs.
Instead of quoting all the paths and keep using the shell, we can just join the options msvc is interested in
"""
# If bullseye coverage tool is in the environment, and we are executing the CXX or C compiler, then
# we prefix with the bullseye coverage tool. Otherwise, just run the regular tools.
# Ideally we need a way to do this cleanly on other platforms, but this implies a post hook of some kind to change
# the CXX task run_str, and I could not immediately see a clean way to do that, especially conditionally as
# we need to do below.
if 'BULL_COVC' in self.env:
excluded_modules = getattr(self.generator.bld.options,'bullseye_excluded_modules',"").replace(' ', '').split(',')
if "" in excluded_modules:
excluded_modules.remove("")
# Figure out which package we are building, and check if it is in the list of packages we want coverage for
# If the package list is empty, then we do coverage building for the whole project.
# This applies to the CC/CXX steps. We must always link with coverage if we are going to get measurements.
included_modules = getattr(self.generator.bld.options,'bullseye_included_modules',"").replace(' ', '').split(',')
if "" in included_modules:
included_modules.remove("")
if self.generator.name not in excluded_modules and (not included_modules or self.generator.name in included_modules):
if k[0][0] == self.env['CXX'] or k[0][0] == self.env['CC']:
k = ([self.env['BULL_COVC'], '--file', self.env['BULL_COV_FILE']] + k[0],)
# We must link with bullseye regardless of which way the project is set (with or without coverage) to avoid link errors with included libraries.
if 'BULL_COVLINK' in self.env and (k[0][0] == self.env['LINK'] or k[0][0] == self.env['LINK_CXX'] or k[0][0] == self.env['LINK_CC']):
k = ([self.env['BULL_COVLINK']] + k[0],)
# 1) Join options that carry no space are joined e.g. /Fo FilePath -> /FoFilePath
# 2) Join options that carry a ':' as last character : e.g. /OUT: FilePath -> /OUT:FilePath
if isinstance(k[0], list):
lst = []
carry = ''
join_with_next_list_item = ['/Fo', '/doc', '/Fi', '/Fa']
for a in k[0]:
if a in join_with_next_list_item or a[-1] == ':':
carry = a
else:
lst.append(carry + a)
carry = ''
k = [lst]
bld = self.generator.bld
try:
if not kw.get('cwd', None):
kw['cwd'] = bld.cwd
except AttributeError:
bld.cwd = kw['cwd'] = bld.variant_dir
ret = self.exec_response_command(k[0], **kw)
if not ret and getattr(self, 'do_manifest', None):
ret = self.exec_mf()
return ret
def wrap_class(class_name):
"""
Manifest file processing and @response file workaround for command-line length limits on Windows systems
The indicated task class is replaced by a subclass to prevent conflicts in case the class is wrapped more than once
"""
cls = Task.classes.get(class_name, None)
if not cls:
return
derived_class = type(class_name, (cls,), {})
def exec_command(self, *k, **kw):
if self.env['CC_NAME'] == 'msvc':
return self.exec_command_msvc(*k, **kw)
else:
return super(derived_class, self).exec_command(*k, **kw)
# Chain-up monkeypatch needed since exec_command() is in base class API
derived_class.exec_command = exec_command
# No chain-up behavior needed since the following methods aren't in
# base class API
derived_class.exec_response_command = exec_response_command
derived_class.quote_response_command = quote_response_command
derived_class.exec_command_msvc = exec_command_msvc
derived_class.exec_mf = exec_mf
return derived_class
for k in 'c cxx cprogram cxxprogram cshlib cxxshlib cstlib cxxstlib'.split():
wrap_class(k)
@feature('cxxprogram', 'cxxshlib', 'cprogram', 'cshlib', 'cxx', 'c')
@after_method('apply_incpaths')
@after_method('add_pch_to_dependencies')
def set_pdb_flags(self):
if not 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME):
return
if not self.bld.is_option_true('generate_debug_info'):
return
# find the last debug symbol type of [/Z7, /Zi, /ZI] applied in cxxflags.
last_debug_option = ''
for opt in reversed(self.env['CXXFLAGS']):
if opt in ['/Z7', '/Zi', '/ZI']:
last_debug_option = opt
break
if last_debug_option in ['/Zi', '/ZI']:
# Compute PDB file path
pdb_folder = self.path.get_bld().make_node(str(self.idx))
pdb_cxxflag = '/Fd{}'.format(pdb_folder.abspath())
# Make sure the PDB folder exists
pdb_folder.mkdir()
# Add CXX and C Flags
for t in getattr(self, 'compiled_tasks', []):
t.env.append_value('CXXFLAGS', pdb_cxxflag)
t.env.append_value('CFLAGS', pdb_cxxflag)
# Add PDB also to Precompiled header. pch_task is not in compiled_tasks
if getattr(self, 'pch_task', None):
self.pch_task.env.append_value('CXXFLAGS', pdb_cxxflag)
self.pch_task.env.append_value('CFLAGS', pdb_cxxflag)
def is_node_qt_rc_generated(self,node):
if node.is_child_of(self.bld.bldnode):
raw_name = os.path.splitext(os.path.basename(node.abspath()))[0]
if raw_name.endswith('_rc'):
return True
return False
@feature('cxx')
@before_method('process_source')
def add_pch_msvc(self):
if not 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME):
return
if Utils.unversioned_sys_platform() != 'win32':
return
# Create Task to compile PCH
if not getattr(self, 'pch', ''):
return
if not self.bld.is_option_true('use_precompiled_header'):
return
# Always assume only one PCH File
pch_source = self.to_nodes(self.pch)[0]
self.pch_header = pch_source.change_ext('.h')
self.pch_header_name = os.path.split(self.pch_header.abspath())[1]
# Generate PCH per target project idx
# Avoids the case where two project have the same PCH output path but compile the PCH with different compiler options i.e. defines, includes, ...
self.pch_file = pch_source.change_ext('.%d.pch' % self.idx)
self.pch_object = pch_source.change_ext('.%d.obj' % self.idx)
# Create PCH Task
self.pch_task = pch_task = self.create_task('pch_msvc', pch_source, [self.pch_object, self.pch_file])
pch_task.env.append_value('PCH_NAME', self.pch_header_name)
pch_task.env.append_value('PCH_FILE', '/Fp' + self.pch_file.abspath())
pch_task.env.append_value('PCH_OBJ', self.pch_object.abspath())
@feature('cxx')
@after_method('apply_incpaths')
def add_pch_to_dependencies(self):
if not 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME):
return
# Create Task to compile PCH
if not getattr(self, 'pch_object', ''):
return
pch_abs_path = self.pch_file.abspath()
pch_flag = '/Fp' + pch_abs_path
pch_header = '/Yu' + self.pch_header_name
# Append PCH File to each compile task
for t in getattr(self, 'compiled_tasks', []):
input_file = t.inputs[0].abspath()
file_specific_settings = self.file_specifc_settings.get(input_file, None)
if file_specific_settings and 'disable_pch' in file_specific_settings and file_specific_settings['disable_pch'] == True:
continue # Don't append PCH to files for which we don't use them
if getattr(t, 'disable_pch', False) == True:
continue # Don't append PCH to files for which we don't use them
if t.__class__.__name__ in ['cxx','qxx']: #Is there a better way to ensure cpp only?
if is_node_qt_rc_generated(self,t.inputs[0]):
t.env.append_value('CXXFLAGS', '/Y-')
else:
t.env.append_value('CXXFLAGS', pch_header)
t.env.append_value('CXXFLAGS', pch_flag)
# Append PCH to task input to ensure correct ordering
t.dep_nodes.append(self.pch_object)
# Append the pch object to the link task
if getattr(self, 'link_task', None):
self.link_task.inputs.append(self.pch_object)
class pch_msvc(waflib.Task.Task):
run_str = '${CXX} ${PCH_CREATE_ST:PCH_NAME} ${CXXFLAGS} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${SRC} ${CXX_TGT_F}${PCH_OBJ} ${PCH_FILE}'
scan = c_preproc.scan
color = 'BLUE'
def exec_command(self, *k, **kw):
return exec_command_msvc(self, *k, **kw)
def exec_response_command(self, *k, **kw):
return exec_response_command(self, *k, **kw)
def quote_response_command(self, *k, **kw):
return quote_response_command(self, *k, **kw)
def exec_mf(self, *k, **kw):
return exec_mf(self, *k, **kw)
def strip_all_but_last_dependent_options(flags):
seen = set()
delete = []
for idx, flag in enumerate(reversed(flags)):
try:
val = UNIQUE_FLAGS_DICT[flag]
if val not in seen:
seen.add(val)
continue
# mark for delete
delete.append(len(flags) -1 -idx)
except:
pass
for idx in reversed(delete):
del flags[idx]
def verify_options_common(env):
strip_all_but_last_dependent_options(env.CFLAGS)
strip_all_but_last_dependent_options(env.CXXFLAGS)
@feature('c', 'cxx')
@after_method('apply_link')
@after_method('add_pch_to_dependencies')
def verify_compiler_options_msvc(self):
if not 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME):
return
if Utils.unversioned_sys_platform() != 'win32':
return
# Verify compiler option (strip all but last for dependant options)
for t in getattr(self, 'compiled_tasks', []):
verify_options_common(t.env)
# Verify pch_task options (strip all but last for dependant options)
if hasattr(self, 'pch_task'):
verify_options_common(self.pch_task.env)
# Strip unsupported ARCH linker option
if hasattr(self, 'link_task'):
del self.link_task.env['ARCH']
#############################################################################
# Code for auto-recognition of Visual Studio Compiler and Windows SDK Path
# Taken from the original WAF code
#############################################################################
all_msvc_platforms = [ ('x64', 'amd64')]
"""List of msvc platforms"""
@conf
def auto_detect_msvc_compiler(conf, version, target, windows_kit):
conf.env['MSVC_VERSIONS'] = [version]
conf.env['MSVC_TARGETS'] = [target]
conf.autodetect(windows_kit, True)
conf.find_msvc()
@conf
def autodetect(conf, windows_kit, arch = False):
v = conf.env
if arch:
compiler, version, path, includes, libdirs, arch = conf.detect_msvc(windows_kit, True)
v['DEST_CPU'] = arch
else:
compiler, version, path, includes, libdirs = conf.detect_msvc(windows_kit)
v['PATH'] = path
v['INCLUDES'] = includes
v['LIBPATH'] = libdirs
v['MSVC_COMPILER'] = compiler
try:
v['MSVC_VERSION'] = float(version)
except Exception:
v['MSVC_VERSION'] = float(version[:-3])
@conf
def detect_msvc(conf, windows_kit, arch = False):
versions = get_msvc_versions(conf, windows_kit)
return setup_msvc(conf, versions, arch)
def setup_msvc(conf, versions, arch = False):
platforms = getattr(Options.options, 'msvc_targets', '').split(',')
if platforms == ['']:
platforms=Utils.to_list(conf.env['MSVC_TARGETS']) or [i for i,j in all_msvc_platforms]
desired_versions = getattr(Options.options, 'msvc_version', '').split(',')
if desired_versions == ['']:
desired_versions = conf.env['MSVC_VERSIONS'] or [v for v,_ in versions][::-1]
versiondict = dict(versions)
for version in desired_versions:
try:
targets = dict(versiondict [version])
for target in platforms:
try:
arch,(p1,p2,p3) = targets[target]
compiler,revision = version.rsplit(' ', 1)
if arch:
return compiler,revision,p1,p2,p3,arch
else:
return compiler,revision,p1,p2,p3
except KeyError: continue
except KeyError: continue
conf.fatal('msvc: Impossible to find a valid architecture for building (in setup_msvc)')
MSVC_INSTALLED_VERSIONS = {}
@conf
def get_msvc_versions(conf, windows_kit):
"""
:return: list of compilers installed
:rtype: list of string
"""
global MSVC_INSTALLED_VERSIONS
if not windows_kit in MSVC_INSTALLED_VERSIONS:
MSVC_INSTALLED_VERSIONS[windows_kit] = ''
if len(MSVC_INSTALLED_VERSIONS[windows_kit]) == 0:
lst = []
conf.gather_wsdk_versions(windows_kit, lst)
conf.gather_msvc_versions(windows_kit, lst)
MSVC_INSTALLED_VERSIONS[windows_kit] = lst
return MSVC_INSTALLED_VERSIONS[windows_kit]
def gather_msvc_detected_versions():
#Detected MSVC versions!
version_pattern = re.compile('^(\d\d?\.\d\d?)(Exp)?$')
detected_versions = []
for vcver,vcvar in [('VCExpress','Exp'), ('VisualStudio','')]:
try:
prefix = 'SOFTWARE\\Wow6432node\\Microsoft\\'+vcver
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, prefix)
except WindowsError:
try:
prefix = 'SOFTWARE\\Microsoft\\'+vcver
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, prefix)
except WindowsError:
continue
index = 0
while 1:
try:
version = Utils.winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
match = version_pattern.match(version)
if not match:
continue
else:
versionnumber = float(match.group(1))
detected_versions.append((versionnumber, version+vcvar, prefix+"\\"+version))
def fun(tup):
return tup[0]
detected_versions.sort(key = fun)
return detected_versions
@conf
def gather_msvc_versions(conf, windows_kit, versions):
vc_paths = []
for (v,version,reg) in gather_msvc_detected_versions():
try:
try:
msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, reg + "\\Setup\\VC")
except WindowsError:
msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, reg + "\\Setup\\Microsoft Visual C++")
path,type = Utils.winreg.QueryValueEx(msvc_version, 'ProductDir')
vc_paths.append((version, os.path.abspath(str(path))))
except WindowsError:
continue
for version,vc_path in vc_paths:
vs_path = os.path.dirname(vc_path)
conf.gather_msvc_targets(versions, version, windows_kit, vc_path)
pass
@conf
def gather_msvc_targets(conf, versions, version, windows_kit, vc_path):
#Looking for normal MSVC compilers!
targets = []
if os.path.isfile(os.path.join(vc_path, 'vcvarsall.bat')):
for target,realtarget in all_msvc_platforms[::-1]:
try:
targets.append((target, (realtarget, conf.get_msvc_version('msvc', version, target, windows_kit, os.path.join(vc_path, 'vcvarsall.bat')))))
except conf.errors.ConfigurationError:
pass
elif os.path.isfile(os.path.join(vc_path, 'Common7', 'Tools', 'vsvars32.bat')):
try:
targets.append(('x86', ('x86', conf.get_msvc_version('msvc', version, 'x86', windows_kit, os.path.join(vc_path, 'Common7', 'Tools', 'vsvars32.bat')))))
except conf.errors.ConfigurationError:
pass
elif os.path.isfile(os.path.join(vc_path, 'Bin', 'vcvars32.bat')):
try:
targets.append(('x86', ('x86', conf.get_msvc_version('msvc', version, '', windows_kit, os.path.join(vc_path, 'Bin', 'vcvars32.bat')))))
except conf.errors.ConfigurationError:
pass
if targets:
versions.append(('msvc '+ version, targets))
def _get_prog_names(conf, compiler):
if compiler=='intel':
compiler_name = 'ICL'
linker_name = 'XILINK'
lib_name = 'XILIB'
else:
# assumes CL.exe
compiler_name = 'CL'
linker_name = 'LINK'
lib_name = 'LIB'
return compiler_name, linker_name, lib_name
@conf
def get_msvc_version(conf, compiler, version, target, windows_kit, vcvars):
"""
Create a bat file to obtain the location of the libraries
:param compiler: ?
:param version: ?
:target: ?
:vcvars: ?
:return: the location of msvc, the location of include dirs, and the library paths
:rtype: tuple of strings
"""
debug('msvc: get_msvc_version: %r %r %r %r', compiler, version, target, windows_kit)
batfile = conf.bldnode.make_node('waf-print-msvc.bat')
batfile.write("""@echo off
set INCLUDE=
set LIB=
call "%s" %s %s
echo PATH=%%PATH%%
echo INCLUDE=%%INCLUDE%%
echo LIB=%%LIB%%;%%LIBPATH%%
""" % (vcvars,target,windows_kit))
sout = conf.cmd_and_log(['cmd', '/E:on', '/V:on', '/C', batfile.abspath()])
lines = sout.splitlines()
if not lines[0]:
lines.pop(0)
MSVC_PATH = MSVC_INCDIR = MSVC_LIBDIR = None
for line in lines:
if line.startswith('PATH='):
path = line[5:]
MSVC_PATH = path.split(';')
elif line.startswith('INCLUDE='):
MSVC_INCDIR = [i for i in line[8:].split(';') if i]
elif line.startswith('LIB='):
MSVC_LIBDIR = [i for i in line[4:].split(';') if i]
if not MSVC_PATH or not MSVC_INCDIR or not MSVC_LIBDIR:
conf.fatal('msvc: Could not find a valid architecture for building (get_msvc_version_3)')
# Check if the compiler is usable at all.
# The detection may return 64-bit versions even on 32-bit systems, and these would fail to run.
env = dict(os.environ)
env.update(PATH = path)
compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
cxx = conf.find_program(compiler_name, path_list=MSVC_PATH, silent_output=True)
cxx = conf.cmd_to_list(cxx)
# delete CL if exists. because it could contain parameters wich can change cl's behaviour rather catastrophically.
if 'CL' in env:
del(env['CL'])
try:
try:
conf.cmd_and_log(cxx + ['/help'], env=env)
except Exception as e:
debug('msvc: get_msvc_version: %r %r %r %r -> failure' % (compiler, version, target, windows_kit))
debug(str(e))
conf.fatal('msvc: cannot run the compiler (in get_msvc_version)')
else:
debug('msvc: get_msvc_version: %r %r %r %r -> OK', compiler, version, target, windows_kit)
finally:
conf.env[compiler_name] = ''
# vcvarsall does not always resolve the windows sdk path with VS2015 + Win10, but we know where it is
winsdk_path = _get_win_sdk_path(windows_kit, target)
if winsdk_path:
MSVC_PATH.append(winsdk_path)
return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR)
def _get_win_sdk_path(windows_kit, arch):
path = _find_win_sdk_root(windows_kit)
if path:
is_valid, version, bin_path = _is_valid_win_sdk(path, windows_kit.startswith('10'), windows_kit)
if is_valid:
if version == windows_kit:
return str(os.path.join(bin_path, arch))
else:
Logs.debug('winsdk: Found a working windows SDK (%s), but it does not match the requested version (%s)' % (version, windows_kit))
return ''
def _is_valid_win_sdk(path, is_universal_versioning, desired_version=''):
# Successfully installed windows kits have rc.exe. This file is a downstream dependency of vcvarsall.bat.
def _check_for_rc_file(path):
rc_x64 = os.path.join(path, 'x64\\rc.exe')
rc_x86 = os.path.join(path, 'x86\\rc.exe')
return os.path.isfile(rc_x64) or os.path.isfile(rc_x86)
bin_dir = os.path.join(path, 'bin')
include_dir = os.path.join(path, 'include')
if is_universal_versioning:
potential_sdks = [desired_version] if desired_version else []
if os.path.isdir(include_dir):
# lexically sort the 10.xxx versions in reverse so that latest/highest is first
potential_sdks += sorted(os.listdir(include_dir), reverse=True)
sdk10_versions = [entry for entry in potential_sdks if entry.startswith('10.')]
for sub_version in sdk10_versions:
sub_version_folder = os.path.join(include_dir, sub_version)
if os.path.isdir(os.path.join(sub_version_folder, 'um')):
# check for rc.exe in the sub_version folder's bin, or in the root 10 bin, we just need at least one
for bin_path in (os.path.join(os.path.join(path, 'bin'), sub_version), bin_dir):
if _check_for_rc_file(bin_path):
return True, sub_version, bin_path
else:
if _check_for_rc_file(bin_dir):
version = path.split('\\')[-2]
return True, version, bin_dir
return False, '', ''
def _find_win_sdk_root(winsdk_hint):
"""
Use winreg to find a valid installed windows kit.
Returns empty string if no valid version was found.
See visual studio compatibility charts here:
https://www.visualstudio.com/en-us/productinfo/vs2015-compatibility-vs
"""
try:
installed_roots = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Windows Kits\\Installed Roots')
except WindowsError:
try:
installed_roots = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots')
except WindowsError:
return ''
if winsdk_hint.startswith('10'):
try:
path, type = Utils.winreg.QueryValueEx(installed_roots, 'KitsRoot10')
return path
except WindowsError:
pass
elif winsdk_hint.startswith('8'):
try:
path, type = Utils.winreg.QueryValueEx(installed_roots, 'KitsRoot81')
return path
except WindowsError:
pass
return ''
@conf
def find_valid_wsdk_version(conf):
path = _find_win_sdk_root("10")
if path:
is_valid, version, bin_path = _is_valid_win_sdk(path, True)
if is_valid:
return version
# No root for sdk 10 found, try 8.1
path = _find_win_sdk_root("8.1")
if path:
is_valid, version, bin_path = _is_valid_win_sdk(path, False)
if is_valid:
return version
return ''
@conf
def gather_wsdk_versions(conf, windows_kit, versions):
"""
Use winreg to add the msvc versions to the input list
:param versions: list to modify
:type versions: list
"""
version_pattern = re.compile('^v..?.?\...?.?')
try:
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Microsoft SDKs\\Windows')
except WindowsError:
try:
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows')
except WindowsError:
return
index = 0
while 1:
try:
version = Utils.winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
if not version_pattern.match(version):
continue
try:
msvc_version = Utils.winreg.OpenKey(all_versions, version)
path,type = Utils.winreg.QueryValueEx(msvc_version,'InstallationFolder')
except WindowsError:
continue
if os.path.isfile(os.path.join(path, 'bin', 'SetEnv.cmd')):
targets = []
for target,arch in all_msvc_platforms:
try:
targets.append((target, (arch, conf.get_msvc_version('wsdk', version, '/'+target, windows_kit, os.path.join(path, 'bin', 'SetEnv.cmd')))))
except conf.errors.ConfigurationError:
pass
versions.append(('wsdk ' + version[1:], targets))
pass
@conf
def find_msvc(conf):
"""Due to path format limitations, limit operation only to native Win32. Yeah it sucks."""
if sys.platform == 'cygwin':
conf.fatal('MSVC module does not work under cygwin Python!')
# the autodetection is supposed to be performed before entering in this method
v = conf.env
path = v['PATH']
compiler = v['MSVC_COMPILER']
version = v['MSVC_VERSION']
compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
v.MSVC_MANIFEST = (compiler == 'msvc' and version >= 8) or (compiler == 'wsdk' and version >= 6) or (compiler == 'intel' and version >= 11)
# compiler
cxx = None
if v['CXX']: cxx = v['CXX']
elif 'CXX' in conf.environ: cxx = conf.environ['CXX']
cxx = conf.find_program(compiler_name, var='CXX', path_list=path, silent_output=True)
cxx = conf.cmd_to_list(cxx)
# before setting anything, check if the compiler is really msvc
env = dict(conf.environ)
if path: env.update(PATH = ';'.join(path))
if not conf.cmd_and_log(cxx + ['/nologo', '/help'], env=env):
conf.fatal('the msvc compiler could not be identified')
# c/c++ compiler
v['CC'] = v['CXX'] = cxx[0]
v['CC_NAME'] = v['CXX_NAME'] = 'msvc'
# Bullseye code coverage
if conf.is_option_true('use_bullseye_coverage'):
# TODO: Error handling for this is opaque. This will fail the MSVS 2015 tool check,
# and not say anything about bullseye being missing.
try:
covc = conf.find_program('covc',var='BULL_COVC',path_list = path, silent_output=True)
covlink = conf.find_program('covlink',var='BULL_COVLINK',path_list = path, silent_output=True)
covselect = conf.find_program('covselect',var='BULL_COVSELECT',path_list = path, silent_output=True)
v['BULL_COVC'] = covc
v['BULL_COVLINK'] = covlink
v['BULL_COV_FILE'] = conf.CreateRootRelativePath(conf.options.bullseye_cov_file)
# Update the coverage file with the region selections detailed in the settings regions parameters
# NOTE: should we clear other settings at this point, or allow them to accumulate?
# Maybe we need a flag for that in the setup?
regions = conf.options.bullseye_coverage_regions.replace(' ','').split(',')
conf.cmd_and_log(([covselect] + ['--file', v['BULL_COV_FILE'], '-a'] + regions))
except:
Logs.error('Could not find the Bullseye Coverage tools on the path, or coverage tools are not correctly installed. Coverage build disabled.')
# linker
if not v['LINK_CXX']:
link = conf.find_program(linker_name, path_list=path, silent_output=True)
if link: v['LINK_CXX'] = link
else: conf.fatal('%s was not found (linker)' % linker_name)
v['LINK'] = link
if not v['LINK_CC']:
v['LINK_CC'] = v['LINK_CXX']
# staticlib linker
if not v['AR']:
stliblink = conf.find_program(lib_name, path_list=path, var='AR', silent_output=True)
if not stliblink: return
v['ARFLAGS'] = ['/NOLOGO']
# manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later
if v.MSVC_MANIFEST:
conf.find_program('MT', path_list=path, var='MT', silent_output=True)
v['MTFLAGS'] = ['/NOLOGO']
# call configure on the waflib winres module to setup the environment for configure
# conf.load('winres') caches the environment as part of the module load key, and we just modified
# the environment, causing the cache to miss, and extra calls import/load the module
# winres is loaded
try:
module = sys.modules['waflib.Tools.winres']
func = getattr(module,'configure',None)
if func:
func(conf)
except Error as e:
warn('Resource compiler not found. Compiling resource file is disabled')
|
py
|
1a55da770f7d7895895e60d4b2ee010dceb37374
|
import logging
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from .datasets import CIFAR10_truncated
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# generate the non-IID distribution for all methods
def read_data_distribution(filename='./data_preprocessing/non-iid-distribution/CIFAR10/distribution.txt'):
distribution = {}
with open(filename, 'r') as data:
for x in data.readlines():
if '{' != x[0] and '}' != x[0]:
tmp = x.split(':')
if '{' == tmp[1].strip():
first_level_key = int(tmp[0])
distribution[first_level_key] = {}
else:
second_level_key = int(tmp[0])
distribution[first_level_key][second_level_key] = int(tmp[1].strip().replace(',', ''))
return distribution
def read_net_dataidx_map(filename='./data_preprocessing/non-iid-distribution/CIFAR10/net_dataidx_map.txt'):
net_dataidx_map = {}
with open(filename, 'r') as data:
for x in data.readlines():
if '{' != x[0] and '}' != x[0] and ']' != x[0]:
tmp = x.split(':')
if '[' == tmp[-1].strip():
key = int(tmp[0])
net_dataidx_map[key] = []
else:
tmp_array = x.split(',')
net_dataidx_map[key] = [int(i.strip()) for i in tmp_array]
return net_dataidx_map
def record_net_data_stats(y_train, net_dataidx_map):
net_cls_counts = {}
for net_i, dataidx in net_dataidx_map.items():
unq, unq_cnt = np.unique(y_train[dataidx], return_counts=True)
tmp = {unq[i]: unq_cnt[i] for i in range(len(unq))}
net_cls_counts[net_i] = tmp
logging.debug('Data statistics: %s' % str(net_cls_counts))
return net_cls_counts
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_cifar10():
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
train_transform.transforms.append(Cutout(16))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def load_cifar10_data(datadir):
train_transform, test_transform = _data_transforms_cifar10()
cifar10_train_ds = CIFAR10_truncated(datadir, train=True, download=True, transform=train_transform)
cifar10_test_ds = CIFAR10_truncated(datadir, train=False, download=True, transform=test_transform)
X_train, y_train = cifar10_train_ds.data, cifar10_train_ds.target
X_test, y_test = cifar10_test_ds.data, cifar10_test_ds.target
return (X_train, y_train, X_test, y_test)
def partition_data(dataset, datadir, partition, n_nets, alpha):
logging.info("*********partition data***************")
X_train, y_train, X_test, y_test = load_cifar10_data(datadir)
n_train = X_train.shape[0]
# n_test = X_test.shape[0]
if partition == "homo":
total_num = n_train
idxs = np.random.permutation(total_num)
batch_idxs = np.array_split(idxs, n_nets)
net_dataidx_map = {i: batch_idxs[i] for i in range(n_nets)}
elif partition == "hetero":
min_size = 0
K = 10
N = y_train.shape[0]
logging.info("N = " + str(N))
net_dataidx_map = {}
while min_size < 10:
idx_batch = [[] for _ in range(n_nets)]
# for each class in the dataset
for k in range(K):
idx_k = np.where(y_train == k)[0]
np.random.shuffle(idx_k)
proportions = np.random.dirichlet(np.repeat(alpha, n_nets))
## Balance
proportions = np.array([p * (len(idx_j) < N / n_nets) for p, idx_j in zip(proportions, idx_batch)])
proportions = proportions / proportions.sum()
proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1]
idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))]
min_size = min([len(idx_j) for idx_j in idx_batch])
for j in range(n_nets):
np.random.shuffle(idx_batch[j])
net_dataidx_map[j] = idx_batch[j]
elif partition == "hetero-fix":
dataidx_map_file_path = './data_preprocessing/non-iid-distribution/CIFAR10/net_dataidx_map.txt'
net_dataidx_map = read_net_dataidx_map(dataidx_map_file_path)
if partition == "hetero-fix":
distribution_file_path = './data_preprocessing/non-iid-distribution/CIFAR10/distribution.txt'
traindata_cls_counts = read_data_distribution(distribution_file_path)
else:
traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map)
return X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts
# for centralized training
def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None):
return get_dataloader_CIFAR10(datadir, train_bs, test_bs, dataidxs)
# for local devices
def get_dataloader_test(dataset, datadir, train_bs, test_bs, dataidxs_train, dataidxs_test):
return get_dataloader_test_CIFAR10(datadir, train_bs, test_bs, dataidxs_train, dataidxs_test)
def get_dataloader_CIFAR10(datadir, train_bs, test_bs, dataidxs=None):
dl_obj = CIFAR10_truncated
transform_train, transform_test = _data_transforms_cifar10()
train_ds = dl_obj(datadir, dataidxs=dataidxs, train=True, transform=transform_train, download=True)
test_ds = dl_obj(datadir, train=False, transform=transform_test, download=True)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=True)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=True)
return train_dl, test_dl
def get_dataloader_test_CIFAR10(datadir, train_bs, test_bs, dataidxs_train=None, dataidxs_test=None):
dl_obj = CIFAR10_truncated
transform_train, transform_test = _data_transforms_cifar10()
train_ds = dl_obj(datadir, dataidxs=dataidxs_train, train=True, transform=transform_train, download=True)
test_ds = dl_obj(datadir, dataidxs=dataidxs_test, train=False, transform=transform_test, download=True)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=True)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=True)
return train_dl, test_dl
def load_partition_data_distributed_cifar10(process_id, dataset, data_dir, partition_method, partition_alpha,
client_number, batch_size):
X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts = partition_data(dataset,
data_dir,
partition_method,
client_number,
partition_alpha)
class_num = len(np.unique(y_train))
logging.info("traindata_cls_counts = " + str(traindata_cls_counts))
train_data_num = sum([len(net_dataidx_map[r]) for r in range(client_number)])
# get global test data
if process_id == 0:
train_data_global, test_data_global = get_dataloader(dataset, data_dir, batch_size, batch_size)
logging.info("train_dl_global number = " + str(len(train_data_global)))
logging.info("test_dl_global number = " + str(len(test_data_global)))
train_data_local = None
test_data_local = None
local_data_num = 0
else:
# get local dataset
dataidxs = net_dataidx_map[process_id - 1]
local_data_num = len(dataidxs)
logging.info("rank = %d, local_sample_number = %d" % (process_id, local_data_num))
# training batch size = 64; algorithms batch size = 32
train_data_local, test_data_local = get_dataloader(dataset, data_dir, batch_size, batch_size,
dataidxs)
logging.info("process_id = %d, batch_num_train_local = %d, batch_num_test_local = %d" % (
process_id, len(train_data_local), len(test_data_local)))
train_data_global = None
test_data_global = None
return train_data_num, train_data_global, test_data_global, local_data_num, train_data_local, test_data_local, class_num
def load_partition_data_cifar10(dataset, data_dir, partition_method, partition_alpha, client_number, batch_size):
X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts = partition_data(dataset,
data_dir,
partition_method,
client_number,
partition_alpha)
class_num = len(np.unique(y_train))
logging.info("traindata_cls_counts = " + str(traindata_cls_counts))
train_data_num = sum([len(net_dataidx_map[r]) for r in range(client_number)])
train_data_global, test_data_global = get_dataloader(dataset, data_dir, batch_size, batch_size)
logging.info("train_dl_global number = " + str(len(train_data_global)))
logging.info("test_dl_global number = " + str(len(test_data_global)))
test_data_num = len(test_data_global)
# get local dataset
data_local_num_dict = dict()
train_data_local_dict = dict()
test_data_local_dict = dict()
for client_idx in range(client_number):
dataidxs = net_dataidx_map[client_idx]
local_data_num = len(dataidxs)
data_local_num_dict[client_idx] = local_data_num
logging.info("client_idx = %d, local_sample_number = %d" % (client_idx, local_data_num))
# training batch size = 64; algorithms batch size = 32
train_data_local, test_data_local = get_dataloader(dataset, data_dir, batch_size, batch_size,
dataidxs)
logging.info("client_idx = %d, batch_num_train_local = %d, batch_num_test_local = %d" % (
client_idx, len(train_data_local), len(test_data_local)))
train_data_local_dict[client_idx] = train_data_local
test_data_local_dict[client_idx] = test_data_local
return train_data_num, test_data_num, train_data_global, test_data_global, \
data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num, traindata_cls_counts
|
py
|
1a55db0a8eabb9fceb3ac09fa296b0ddc790172c
|
from typing import Dict, Type
from abc import abstractmethod
from functools import lru_cache
import pafy
from .media_tag import MediaTag
from .pack import Packing, Unpacking, p_, u_
from .encoding_hierarchy import Encoded
class Media(Encoded['Media']):
@abstractmethod
def tags(self):
pass
@abstractmethod
def mrl(self):
pass
class StandardMedia(Media):
def __init__(self, tags, title):
self._tags = tags
self.title = title
super().__init__()
@classmethod
@abstractmethod
def decode_to_dict(cls, kwargs, unpacking: Unpacking, format_prefix):
part = unpacking.pop()
tags_raw, title = u_(part)
kwargs['tags'] = set(MediaTag(t) for t in u_(tags_raw))
kwargs['title'] = title
super().decode_to_dict(kwargs, unpacking, format_prefix)
def tags(self):
return self._tags
def encode(self, packing: Packing):
part = p_(p_(*self._tags), self.title)
packing.append(part)
super().encode(packing)
def __str__(self):
return self.title
class YoutubeMedia(StandardMedia):
def __init__(self, url, **kwargs):
super().__init__(**kwargs)
self.url = url
if not self.title:
self.title = self._pafy().title
@classmethod
def decode_to_dict(cls, kwargs, unpacking: Unpacking, format_prefix):
assert format_prefix in ['y0']
part = unpacking.pop()
kwargs['url'] = part
super().decode_to_dict(kwargs, unpacking, format_prefix)
def encode(self, packing: Packing):
packing.append(self.url)
super().encode(packing)
@classmethod
def format_prefix(cls):
return 'y0'
@lru_cache(maxsize=None)
def _pafy(self):
return pafy.new(self.url)
def mrl(self):
best = self._pafy().getbest()
play_url = best.url
return play_url
def __str__(self):
return super().__str__() + ' (source: youtube)'
def __hash__(self):
return hash(self.url)
def __eq__(self, other):
return self.url == other.url
class FileMedia(StandardMedia):
def __init__(self, path, **kwargs):
super().__init__(**kwargs)
self.path = path
@classmethod
def decode_to_dict(cls, kwargs, unpacking: Unpacking, format_prefix):
assert format_prefix in ['f0']
part = unpacking.pop()
kwargs['path'] = part
super().decode_to_dict(kwargs, unpacking, format_prefix)
def encode(self, packing: Packing):
packing.append(self.path)
super().encode(packing)
@classmethod
def format_prefix(cls):
return 'f0'
def mrl(self):
return self.path
def __str__(self):
return super().__str__() + ' (source: file)'
Media.prefix_dict: Dict[str, Type[Media]] = {'y0': YoutubeMedia, 'f0': FileMedia}
|
py
|
1a55dc007989360af1bef5295e0432769448e4ac
|
# pylint: disable=protected-access, unused-argument
import os
import glob
import radical.utils as ru
from .test_common import setUp
from radical.pilot.agent.launch_method.jsrun import JSRUN
try:
import mock
except ImportError:
from unittest import mock
# ------------------------------------------------------------------------------
#
def tearDown():
rs = glob.glob('%s/*.rs' % os.getcwd())
for fold in rs:
os.remove(fold)
# ------------------------------------------------------------------------------
#
@mock.patch.object(JSRUN, '__init__', return_value=None)
@mock.patch.object(JSRUN, '_configure',return_value='jsrun')
@mock.patch('radical.utils.raise_on')
def test_create_resource_set_file(mocked_init, mocked_configure, mocked_raise_on):
test_cases = setUp('lm', 'jsrun')
component = JSRUN(name=None, cfg=None, session=None)
for unit, _, resource_file, _ in test_cases:
slot = unit['slots']
uid = unit['uid']
component._create_resource_set_file(slots=slot, uid=uid, sandbox='.')
print(uid)
with open('%s.rs' % uid) as rs_layout:
assert rs_layout.readlines() == resource_file
tearDown()
# ------------------------------------------------------------------------------
#
@mock.patch.object(JSRUN, '__init__', return_value=None)
@mock.patch.object(JSRUN, '_configure', return_value='jsrun')
@mock.patch('radical.utils.raise_on')
def test_construct_command(mocked_init, mocked_configure, mocked_raise_on):
test_cases = setUp('lm', 'jsrun')
component = JSRUN(name=None, cfg=None, session=None)
component._create_resource_set_file = mock.Mock()
component._log = ru.Logger('dummy')
component.launch_command = 'jsrun'
for unit, result, _ , resource_filename in test_cases:
component._create_resource_set_file.return_value = resource_filename
command, hop = component.construct_command(unit, None)
assert([command, hop] == result)
# ------------------------------------------------------------------------------
|
py
|
1a55dc048ad30625d11dcd46a16649e7c24befc8
|
from setuptools import setup
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
## edit below variables as per your requirements -
REPO_NAME = "Movie-Recommender-System"
AUTHOR_USER_NAME = "Nitin"
SRC_REPO = "src"
LIST_OF_REQUIREMENTS = ['streamlit']
setup(
name=SRC_REPO,
version="0.0.1",
author=AUTHOR_USER_NAME,
description="A small package for Movie Recommender System",
long_description=long_description,
long_description_content_type="text/markdown",
url=f"https://github.com/{AUTHOR_USER_NAME}/{REPO_NAME}",
author_email="[email protected]",
packages=[SRC_REPO],
license="MIT",
python_requires=">=3.7",
install_requires=LIST_OF_REQUIREMENTS
)
|
py
|
1a55dd1504728fa603e49e50f84d6699608f6061
|
# Copyright (C) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
import unittest
from codebasin import util
class TestValidPath(unittest.TestCase):
"""
Test that valid_path correctly identifies null-byte, carriage return
and line feed characters.
"""
def test_valid(self):
"""Check that a valid path is accepted"""
self.assertTrue(util.valid_path("/valid/path/"))
def test_null_byte(self):
"""Check that a null-byte character is rejected"""
self.assertFalse(util.valid_path("/invalid/\x00/path/"))
def test_carriage_return(self):
"""Check that a carriage return character is rejected"""
self.assertFalse(util.valid_path("/invalid/\r/path/"))
def test_line_feed(self):
"""Check that a line feed character is rejected"""
self.assertFalse(util.valid_path("/invalid/\n/path/"))
if __name__ == '__main__':
unittest.main()
|
py
|
1a55dd516e459dd8ed0d597e8bab7d72aae2e033
|
import os
import pathlib
import subprocess
from sphinx.ext.doctest import (Any, Dict, DocTestBuilder, TestcodeDirective,
TestoutputDirective, doctest, sphinx)
from sphinx.locale import __
class JavaDocTestBuilder(DocTestBuilder):
"""
Runs java test snippets in the documentation.
"""
name = "javadoctest"
epilog = __(
"Java testing of doctests in the sources finished, look at the "
"results in %(outdir)s/output.txt."
)
def compile(
self, code: str, name: str, type: str, flags: Any, dont_inherit: bool
) -> Any:
# go to project that contains all your arrow maven dependencies
path_arrow_project = pathlib.Path(__file__).parent.parent / "source" / "demo"
# create list of all arrow jar dependencies
subprocess.check_call(
[
"mvn",
"-q",
"dependency:build-classpath",
"-DincludeTypes=jar",
"-Dmdep.outputFile=.cp.tmp",
],
cwd=path_arrow_project,
text=True,
)
if not (path_arrow_project / ".cp.tmp").exists():
raise RuntimeError(
__("invalid process to create jshell dependencies library")
)
# get list of all arrow jar dependencies
with open(path_arrow_project / ".cp.tmp") as f:
stdout_dependency = f.read()
if not stdout_dependency:
raise RuntimeError(
__("invalid process to list jshell dependencies library")
)
# execute java testing code thru jshell and read output
# JDK11 support '-' This allows the pipe to work as expected without requiring a shell
# Migrating to /dev/stdin to also support JDK9+
proc_jshell_process = subprocess.Popen(
["jshell", "--class-path", stdout_dependency, "-s", "/dev/stdin"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
text=True,
)
out_java_arrow, err_java_arrow = proc_jshell_process.communicate(code)
if err_java_arrow:
raise RuntimeError(__("invalid process to run jshell"))
# continue with python logic code to do java output validation
output = f"print('''{self.clean_output(out_java_arrow)}''')"
# continue with sphinx default logic
return compile(output, name, self.type, flags, dont_inherit)
def clean_output(self, output: str):
if output[-3:] == '-> ':
output = output[:-3]
if output[-1:] == '\n':
output = output[:-1]
output = (4*' ').join(output.split('\t'))
return output
def setup(app) -> Dict[str, Any]:
app.add_directive("testcode", TestcodeDirective)
app.add_directive("testoutput", TestoutputDirective)
app.add_builder(JavaDocTestBuilder)
# this config value adds to sys.path
app.add_config_value("doctest_path", [], False)
app.add_config_value("doctest_test_doctest_blocks", "default", False)
app.add_config_value("doctest_global_setup", "", False)
app.add_config_value("doctest_global_cleanup", "", False)
app.add_config_value(
"doctest_default_flags",
doctest.DONT_ACCEPT_TRUE_FOR_1
| doctest.ELLIPSIS
| doctest.IGNORE_EXCEPTION_DETAIL,
False,
)
return {"version": sphinx.__display_version__, "parallel_read_safe": True}
|
py
|
1a55de24a301623b45501b915a2587e96de5b4a3
|
import numpy as np
k = lambda x:3*np.sin(x)*np.exp(np.sqrt(x))/(2*x)
def cordes_para(f,x0,epsilon,gamma,maxiter=50):
xn = x0
for i in range(maxiter):
xn_ = xn
xn = xn-f(xn)/gamma
print(xn, f(xn))
return (xn, np.abs(xn_-xn)<epsilon)
print(cordes_para(lambda x:k(x)-0.25, 3.5, 1, 5))
|
py
|
1a55dee6dece5483a8e4c0e1ed7185e9c20d3d64
|
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255, blank=True, null=True)
def save(self, *args, **kwargs):
self.is_active = True
super().save(*args, **kwargs)
|
py
|
1a55df4f025b2c2d53fd53c375d4e566f7785601
|
"""
Copyright 2015 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pynos.versions.base.ras import RAS as RASBase
class RAS(RASBase):
"""RAS class containing all system level methods and attributes.
"""
pass
|
py
|
1a55df87be9b53985a5f4b42e90b0fbd31b38aa7
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# hydroengine documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import hydroengine
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'hydro-engine'
copyright = u"2018, Gennadii Donchyts"
author = u"Gennadii Donchyts"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = hydroengine.__version__
# The full version, including alpha/beta/rc tags.
release = hydroengine.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'hydroenginedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'hydroengine.tex',
u'hydro-engine Documentation',
u'Gennadii Donchyts', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hydroengine',
u'hydro-engine Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'hydroengine',
u'hydro-engine Documentation',
author,
'hydroengine',
'One line description of project.',
'Miscellaneous'),
]
|
py
|
1a55e02099b18746ab7895dc4d379b5166035978
|
import sys
import torch
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CppExtension
def trt_inc_dir():
return "/usr/include/aarch64-linux-gnu"
def trt_lib_dir():
return "/usr/lib/aarch64-linux-gnu"
ext_modules = []
exclude_dir = ["torch2trt/contrib","torch2trt/contrib.*"]
plugins_ext_module = CUDAExtension(
name='plugins',
sources=[
'torch2trt/plugins/plugins.cpp'
],
include_dirs=[
trt_inc_dir()
],
library_dirs=[
trt_lib_dir()
],
libraries=[
'nvinfer'
],
extra_compile_args={
'cxx': ['-DUSE_DEPRECATED_INTLIST'] if torch.__version__ < "1.5" else [],
'nvcc': []
}
)
if '--plugins' in sys.argv:
ext_modules.append(plugins_ext_module)
sys.argv.remove('--plugins')
if '--contrib' in sys.argv:
exclude_dir=[]
sys.argv.remove('--contrib')
setup(
name='torch2trt',
version='0.3.0',
description='An easy to use PyTorch to TensorRT converter',
packages=find_packages(exclude=exclude_dir),
ext_package='torch2trt',
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExtension}
)
|
py
|
1a55e0b5e59d109a6aefddcdae7718b5b0656b43
|
from train import CoordParser
def cluster(file_list, output, n_clusters=None, max_files=None):
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from mpl_toolkits.basemap import Basemap
import numpy as np
if n_clusters is None: n_clusters = 100
# Parse the coordinates
parser = CoordParser()
c = np.array([parser(l) for l in open(file_list,'r')])
# Create the basemap parameters
bnd = 0
basemap_params = dict(projection='merc',llcrnrlat=np.min(c[:,0])-bnd,urcrnrlat=np.max(c[:,0])+bnd, llcrnrlon=np.min(c[:,1])-bnd,urcrnrlon=np.max(c[:,1])+bnd)
# Select a subset of the coordinates to cluster
if max_files is None:
max_files = 100000
np.random.shuffle(c)
c = c[:max_files]
# Project the coordinates into x, y coordinates
m = Basemap(**basemap_params)
x,y = m(c[:,1],c[:,0])
from sklearn import cluster
km = cluster.MiniBatchKMeans(n_clusters=n_clusters).fit(np.concatenate((x[:,None],y[:,None]),axis=1))
np.save(output,(basemap_params,km.cluster_centers_))
def main():
from argparse import ArgumentParser
from time import time
parser = ArgumentParser()
parser.add_argument('--file-list', type=str, default='/fastdata/finder/streetview_train.txt', help='path to the streetview training file')
parser.add_argument('-n', '--n-clusters', type=int, default=100, help='number of cluster')
parser.add_argument('--max-files', type=int, help='maximum number of files to cluster')
parser.add_argument('output', type=str, help='output file (e.g. clusters.npy)')
args = parser.parse_args()
cluster(args.file_list, args.output, args.n_clusters, args.max_files)
if __name__ == "__main__":
main()
|
py
|
1a55e174afec8deba452a112ac043e6d0ca19b79
|
from django.test import TestCase
from django.shortcuts import resolve_url as r
class PeditoApiTest(TestCase):
def setUp(self):
self.resp = self.client.get(r('api_name:pedidos'))
def test_get_status(self):
self.assertEqual(200, self.resp.status_code)
|
py
|
1a55e1e7e252265df73c2f2c2ae3038ff1bb21e0
|
#
# Licensed to Dagda under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Dagda licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import requests
import json
import traceback
from threading import Thread
from analysis.static.os import os_info_extractor
from analysis.static.dependencies import dep_info_extractor
from analysis.static.av import malware_extractor
from api.internal.internal_server import InternalServer
from log.dagda_logger import DagdaLogger
from analysis.static.util.utils import extract_filesystem_bundle
from analysis.static.util.utils import clean_up
# Analyzer class
class Analyzer:
# -- Public methods
# Analyzer Constructor
def __init__(self, dagda_server_url=None):
super(Analyzer, self).__init__()
self.is_remote = False
if dagda_server_url is not None:
self.dagda_server_url = dagda_server_url
self.is_remote = True
else:
self.mongoDbDriver = InternalServer.get_mongodb_driver()
self.dockerDriver = InternalServer.get_docker_driver()
# Evaluate image from image name or container id
def evaluate_image(self, image_name, container_id, file_path):
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
"ENTRY to the method for analyzing a docker image"
)
# Init
data = {}
# -- Static analysis
if not file_path:
self.dockerDriver.get_docker_image_name_by_container_id(
container_id
) if container_id else image_name
os_packages = []
malware_binaries = []
dependencies = []
temp_dir = None
try:
# Get OS packages
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
"Retrieving OS packages from the docker image ..."
)
if file_path:
# no OS packages to scan because not contained in a docker image
temp_dir = extract_filesystem_bundle(
image_name=image_name,
image_path=file_path,
)
elif container_id is None: # Scans the docker image
os_packages = os_info_extractor.get_soft_from_docker_image(
docker_driver=self.dockerDriver, image_name=image_name
)
temp_dir = extract_filesystem_bundle(
docker_driver=self.dockerDriver, image_name=image_name
)
else: # Scans the docker container
os_packages = os_info_extractor.get_soft_from_docker_container_id(
docker_driver=self.dockerDriver, container_id=container_id
)
temp_dir = extract_filesystem_bundle(
docker_driver=self.dockerDriver, container_id=container_id
)
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
"OS packages from the docker image retrieved"
)
# Get malware binaries in a parallel way
malware_thread = Thread(
target=Analyzer._threaded_malware,
args=(self.dockerDriver, temp_dir, malware_binaries),
)
malware_thread.start()
# Get programming language dependencies in a parallel way
dependencies_thread = Thread(
target=Analyzer._threaded_dependencies,
args=(self.dockerDriver, image_name, temp_dir, dependencies),
)
dependencies_thread.start()
# Waiting for the threads
malware_thread.join()
dependencies_thread.join()
except Exception as ex:
message = "Unexpected exception of type {0} occurred: {1!r}".format(
type(ex).__name__,
ex.get_message() if type(ex).__name__ == "DagdaError" else ex.args,
)
DagdaLogger.get_logger().error(message)
if InternalServer.is_debug_logging_enabled():
traceback.print_exc()
data["status"] = message
# -- Cleanup
if temp_dir is not None:
clean_up(temporary_dir=temp_dir)
# -- Prepare output
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug("Preparing analysis output ...")
if "status" not in data or data["status"] is None:
data["status"] = "Completed"
data["image_name"] = image_name
data["timestamp"] = datetime.datetime.now().timestamp()
data["static_analysis"] = self.generate_static_analysis(
image_name, os_packages, dependencies, malware_binaries
)
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug("Analysis output completed")
# -- Return
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
"EXIT from the method for analyzing a docker image"
)
return data
# Generates the result of the static analysis
def generate_static_analysis(
self, image_name, os_packages, dependencies, malware_binaries
):
data = {}
data["os_packages"] = self.generate_os_report(image_name, os_packages)
data["prog_lang_dependencies"] = self.generate_dependencies_report(
image_name, dependencies
)
data["malware_binaries"] = malware_binaries
return data
# Generates dependencies report
def generate_dependencies_report(self, image_name, dependencies):
data = {}
dep_details = {}
dep_details["java"] = []
dep_details["python"] = []
dep_details["nodejs"] = []
dep_details["js"] = []
dep_details["ruby"] = []
dep_details["php"] = []
fp_count = 0
for dependency in dependencies:
d = {}
splitted_dep = dependency.split("#")
d["product"] = splitted_dep[1]
d["version"] = splitted_dep[2]
d["product_file_path"] = splitted_dep[3]
d["vulnerabilities"] = self.get_vulnerabilities(d["product"], d["version"])
# purpose of this code is to not throw away the cve_id reported by 3grander/4depcheck container process
dep_check_cve_id = splitted_dep[4]
# DagdaLogger.get_logger().debug(f"dep_check_cve_id: {dep_check_cve_id}")
included_vuln_ids = []
for vuln in d["vulnerabilities"]:
included_vuln_ids.extend(str(vuln.keys()))
# DagdaLogger.get_logger().debug(
# f"included_vuln_ids: {json.dumps(included_vuln_ids)}"
# )
if not dep_check_cve_id in included_vuln_ids:
info = {}
cve_info = {}
cve_data = self.mongoDbDriver.db.cve_info.find_one(
{"cveid": dep_check_cve_id}
)
# DagdaLogger.get_logger().debug(f"cve_data: {cve_data}")
if cve_data is not None:
cve_info = cve_data.copy()
cve_info["mod_date"] = cve_data["mod_date"].strftime("%d-%m-%Y")
cve_info["pub_date"] = cve_data["pub_date"].strftime("%d-%m-%Y")
del cve_info["_id"]
info[dep_check_cve_id] = cve_info
# DagdaLogger.get_logger().debug(f"info: {json.dumps(info)}")
d["vulnerabilities"].append(info)
# DagdaLogger.get_logger().debug(
# f"d['vulnerabilities']: {json.dumps(d['vulnerabilities'])}"
# )
d["is_vulnerable"] = True
d["is_false_positive"] = self.is_fp(image_name, d["product"], d["version"])
if d["is_false_positive"]:
fp_count += 1
dep_details[splitted_dep[0]].append(d)
# Prepare output
data["vuln_dependencies"] = (
len(dep_details["java"])
+ len(dep_details["python"])
+ len(dep_details["nodejs"])
+ len(dep_details["js"])
+ len(dep_details["ruby"])
+ len(dep_details["php"])
- fp_count
)
data["dependencies_details"] = dep_details
# Return
return data
# Generates os report
def generate_os_report(self, image_name, os_packages):
data = {}
products_status = []
vuln_products = 0
fp_count = 0
for package in os_packages:
p = {}
p["product"] = package["product"]
p["version"] = package["version"]
p["vulnerabilities"] = self.get_vulnerabilities(
package["product"], package["version"]
)
if len(p["vulnerabilities"]) > 0:
p["is_vulnerable"] = True
vuln_products += 1
else:
p["is_vulnerable"] = False
p["is_false_positive"] = self.is_fp(
image_name, package["product"], package["version"]
)
if p["is_false_positive"]:
fp_count += 1
products_status.append(p)
# Prepare output
vuln_products -= fp_count
data["total_os_packages"] = len(products_status)
data["vuln_os_packages"] = vuln_products
data["ok_os_packages"] = data["total_os_packages"] - vuln_products
data["os_packages_details"] = products_status
# Return
return data
# Gets vulnerabilities by product and version
def get_vulnerabilities(self, product, version):
if not self.is_remote:
return self.mongoDbDriver.get_vulnerabilities(product, version)
else:
if product is not None:
product += "/" + version
r = requests.get(self.dagda_server_url + "/vuln/products/" + product)
if r.status_code == 200:
return json.loads(r.content.decode("utf-8"))
return []
# Check if it is a false positive
def is_fp(self, image_name, product, version):
if not self.is_remote:
return self.mongoDbDriver.is_fp(image_name, product, version)
else:
if product is not None:
product += "/" + version
r = requests.get(
self.dagda_server_url + "/history/" + image_name + "/fp/" + product
)
return r.status_code == 204
# Get malware binaries thread
@staticmethod
def _threaded_malware(dockerDriver, temp_dir, malware_binaries):
# Get malware binaries
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
f'Retrieving malware files from the docker image in "{temp_dir}"...'
)
malware_binaries.extend(
malware_extractor.get_malware_included_in_docker_image(
docker_driver=dockerDriver, temp_dir=temp_dir
)
)
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
"Malware files from the docker image retrieved"
)
# Get programming language dependencies thread
@staticmethod
def _threaded_dependencies(dockerDriver, image_name, temp_dir, dependencies):
# Get programming language dependencies
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
f'Retrieving dependencies from the docker image in "{temp_dir}...'
)
dependencies.extend(
dep_info_extractor.get_dependencies_from_docker_image(
docker_driver=dockerDriver, image_name=image_name, temp_dir=temp_dir
)
)
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
"Dependencies from the docker image retrieved"
)
# DagdaLogger.get_logger().debug(f"dependencies: {json.dumps(dependencies)}")
|
py
|
1a55e277819fb60a4ae85d6d90470affe61a2af0
|
from ohapi import api
from django.conf import settings
import arrow
from datetime import timedelta
def get_rescuetime_file(oh_member):
try:
oh_access_token = oh_member.get_access_token(
client_id=settings.OPENHUMANS_CLIENT_ID,
client_secret=settings.OPENHUMANS_CLIENT_SECRET)
user_object = api.exchange_oauth2_member(oh_access_token)
for dfile in user_object['data']:
if 'Rescuetime' in dfile['metadata']['tags']:
return dfile['download_url']
return ''
except:
return 'error'
def check_update(rescuetime_member):
if rescuetime_member.last_submitted < (arrow.now() - timedelta(hours=1)):
return True
return False
|
py
|
1a55e46781301f5f2fb1a2aad6a82fbe20f65661
|
#!/usr/bin/env python
"""Set version.
This script updates the version number information
in the Properties.py files, as well as *.html and *.txt.
This should be used only by Webware developers whenever
a new Webware version is being cut. Please be very
careful and read also the ReleaseProcedures.html.
If setVersion is True, then the version information is updated
in various files as follows:
Properties.py files version information is set, replacing the
version setting and releaseDate setting.
*.html files version information is set by searching for a
comment tag surrounding both version and release date and replacing
the version and release date information respectively.
*.txt files version is set by matching
:Version:
:Released:
tags at the beginning of the line. This is designed for the
reStructured text documents. Note that reStructured text
HTML files will need to be re-generated after processing.
The version in ReleaseNotes-X.Y.phtml is not set (this will be
done by the installer), but they are renamed to the current version.
If possible, this is done with "git mv". Exception: If no new notes
have been written (i.e. ReleaseNotes-X.Y same as ReleaseNotesTemplate),
they will not be saved, but deleted, if possible, using "git rm".
If newRelease is True, then a new release is prepared as follows:
The version in ReleaseNotes-X.Y.phtml files is set, and they
are renamed to the current version if they are not empty.
New ReleaseNotes-X.y.phtml files are created from the
ReleaseNotesTemplates.phtml files instead.
If possible, "git mv" and "git add" will be used.
Note that this script will not automatically perform a "git commit"
so you can always revert when something goes wrong.
You should not use "setVersion" on the master branch, but after creating
a tag for the desired version which must not be a final release.
You should use "newRelease" on the trunk for the final version
when you want to freeze the release notes and switch to empty
release notes for the next release.
Written by Stuart Donaldson - stu at asyn.com.
Improved by Christoph Zwerschke - cito at online.de.
"""
# Version format is (Major, Minor, Sub, Alpha/Beta/etc)
# The Sub is optional, and if 0 is not returned.
# Examples: (0, 8, 1, 'b1'), (0, 8, 2) or (0, 9, 0, 'rc1')
# releaseDate format should be 'MM/DD/YY'.
# Update this to change the current version and release date:
version = ('X', 'Y', 0)
releaseDate = '@@/@@/@@'
# Set Version info in files (should not be done on the trunk):
setVersion = True
# Prepare a new release (this should be done on the trunk):
newRelease = False
# Verbose output (output unchanged files also):
verbose = False
import os
import sys
import re
from glob import glob
# We assume that this script is located in Webware/bin:
progPath = os.path.abspath(sys.argv[0])
webwarePath = os.path.dirname(os.path.dirname(progPath))
sys.path.append(webwarePath)
os.chdir(webwarePath)
from MiscUtils.PropertiesObject import PropertiesObject
class Replacer(object):
"""Class to handle substitutions in a file."""
def __init__(self, *args):
self._subs = list(args)
def add(self, search, replace):
self._subs.append((re.compile(search, re.M), replace))
def replaceInStr(self, data):
for search, replace in self._subs:
data = re.sub(search, replace, data)
return data
def replaceInFile(self, filename):
data = open(filename, 'rb').read()
newData = self.replaceInStr(data)
if data == newData:
if verbose:
print 'Unchanged ' + filename
else:
print 'Updating ' + filename
open(filename, 'wb').write(newData)
if os.system('git add ' + filename):
print "git add not possible."
def replaceGlob(self, pattern):
for filename in glob(pattern):
if os.path.exists(filename):
self.replaceInFile(filename)
def main():
po = PropertiesObject()
po.loadValues(version=version, releaseDate=releaseDate)
po.createVersionString()
if po['versionString'] == 'X.Y':
print "Please set the version."
sys.exit(1)
elif po['releaseDate'] == '@@/@@/@@':
print "Please set the release Date."
sys.exit(1)
propReplace = Replacer()
propReplace.add(r"(version\s*=)\s*.*", r"\g<1> %s" % repr(version))
propReplace.add(r"(releaseDate\s*=)\s*.*", r"\g<1> %s" % repr(releaseDate))
htmlReplace = Replacer()
htmlReplace.add(r"<!--\s*version\s*-->[^<]*<!--\s*/version\s*-->",
r"<!-- version --> %s <!-- /version -->" % po['versionString'])
htmlReplace.add(r"<!--\s*relDate\s*-->[^<]*<!--\s*/relDate\s*-->",
r"<!-- relDate --> %s <!-- /relDate -->" % po['releaseDate'])
rstReplace = Replacer()
rstReplace.add(r"^:Version:.*$", ":Version: %s" % po['versionString'])
rstReplace.add(r"^:Released:.*$", ":Released: %s" % po['releaseDate'])
phtmlReplace = Replacer()
phtmlReplace.add(r"(<%.*)' \+ versionString \+ '(.*%>)",
r"\g<1>%s\g<2>" % po['versionString'])
phtmlReplace.add(r"<% versionString %>", po['versionString'])
phtmlReplace.add(r"<% releaseDate %>", po['releaseDate'])
twillReplace = Replacer()
twillReplace.add(r"^setglobal version .*$",
r"setglobal version %s" % po['versionString'])
twillReplace.add(r"^setglobal date .*$",
r"setglobal date %s" % po['releaseDate'])
twillReplace.add(r"^# if release ", '')
if setVersion:
# Replace in Properties files:
propReplace.replaceGlob('Properties.py')
propReplace.replaceGlob('*/Properties.py')
# Replace in existing HTML:
htmlReplace.replaceGlob('*/Docs/*.html')
htmlReplace.replaceGlob('Docs/*.html')
# Replace in reStructuredText files:
rstReplace.replaceGlob('*/Docs/*.txt')
rstReplace.replaceGlob('Docs/*.txt')
# Replace in global README file:
rstReplace.replaceGlob('_README')
# Replace in twill test scripts:
twillReplace.replaceGlob('WebKit/Tests/twill/*.twill')
# Process release notes:
if setVersion or newRelease:
template = open('DocSupport/RelNotesTemplate.phtml', 'rb').read()
infile = 'RelNotes-X.Y.phtml'
outfile = infile.replace('X.Y', po['versionString'])
for filename in ['Docs/' + infile] + glob('*/Docs/' + infile):
if verbose:
print "Processing " + filename
current = open(filename, 'rb').read()
if current == template:
if newRelease:
print "Kept empty " + filename
continue
else:
print "Removing empty " + filename
if os.system('git rm ' + filename):
print "git rm not possible."
os.remove(filename)
else:
if newRelease:
phtmlReplace.replaceInFile(filename)
newName = os.path.join(os.path.split(filename)[0], outfile)
print "Renaming %s to %s" % (filename, outfile)
if os.system('git mv -f %s %s' % (filename, newName)):
print "git mv not possible."
os.rename(filename, newName)
if newRelease:
print "Creating empty " + filename
open(filename, 'wb').write(template)
if os.system('git add ' + filename):
print "git add not possible."
if __name__ == '__main__':
main()
|
py
|
1a55e468ab91160ab7957182ed0f2c74bb8e18fe
|
# Assignment 2
|
py
|
1a55e577d335d9f9515ee5e52c8dad094799d6fa
|
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from src.PanelMethod import *
import numpy as np
from scipy.special import exp1
def wave_source(x,y,xs,ys,K):
"Source plus generated free surface waves"
r2 = (x-xs)**2+(y-ys)**2 # source square-distance
m2 = (x-xs)**2+(y+ys)**2 # mirror sink square-distance
Z = K*(y+ys+1j*abs(x-xs)) # wave number scaled complex vector
eZ = np.exp(Z) # propagating wave potential
fZ = np.real(eZ*exp1(Z)) # standing wave potential
return 0.5*np.log(r2/m2)-2j*np.pi*eZ-2*fZ
from matplotlib.animation import FuncAnimation
def wave_video(x,y,q,XY,G=wave_source,args=(4,),size=(16,6)):
"Animate the induced flow over a cycle of motion"
# Get complex velocity
def uv(i): return q[i]*velocity(*XY, x[i], y[i], x[i+1], y[i+1], G, args)
UV = sum(uv(i) for i in range(len(x)-1))
# Plot flow and segments
fig, ax = plt.subplots(1,1,figsize=size)
Q = ax.quiver(*XY, *UV)#, pivot='mid')
ax.plot(x,y,c='b')
ax.set_ylim(None,0.5)
ax.set_aspect('equal', adjustable='box')
plt.close()
# run through a wave period
def update_quiver(num, Q):
Q.set_UVC(*np.real(UV*np.exp(-2j*np.pi*num/101)))
return Q,
# create the animation
return FuncAnimation(fig, update_quiver, fargs=(Q,), interval=50)
|
py
|
1a55e67ac43de771b79600482f8e2a484552b0a7
|
"""Support for Tellstick sensors."""
from collections import namedtuple
import logging
from tellcore import telldus
import tellcore.constants as tellcore_constants
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_ID, CONF_NAME, CONF_PROTOCOL, TEMP_CELSIUS
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DatatypeDescription = namedtuple("DatatypeDescription", ["name", "unit"])
CONF_DATATYPE_MASK = "datatype_mask"
CONF_ONLY_NAMED = "only_named"
CONF_TEMPERATURE_SCALE = "temperature_scale"
CONF_MODEL = "model"
DEFAULT_DATATYPE_MASK = 127
DEFAULT_TEMPERATURE_SCALE = TEMP_CELSIUS
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_TEMPERATURE_SCALE, default=DEFAULT_TEMPERATURE_SCALE
): cv.string,
vol.Optional(
CONF_DATATYPE_MASK, default=DEFAULT_DATATYPE_MASK
): cv.positive_int,
vol.Optional(CONF_ONLY_NAMED, default=[]): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_ID): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_PROTOCOL): cv.string,
vol.Optional(CONF_MODEL): cv.string,
}
)
],
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Tellstick sensors."""
sensor_value_descriptions = {
tellcore_constants.TELLSTICK_TEMPERATURE: DatatypeDescription(
"temperature", config.get(CONF_TEMPERATURE_SCALE)
),
tellcore_constants.TELLSTICK_HUMIDITY: DatatypeDescription("humidity", "%"),
tellcore_constants.TELLSTICK_RAINRATE: DatatypeDescription("rain rate", ""),
tellcore_constants.TELLSTICK_RAINTOTAL: DatatypeDescription("rain total", ""),
tellcore_constants.TELLSTICK_WINDDIRECTION: DatatypeDescription(
"wind direction", ""
),
tellcore_constants.TELLSTICK_WINDAVERAGE: DatatypeDescription(
"wind average", ""
),
tellcore_constants.TELLSTICK_WINDGUST: DatatypeDescription("wind gust", ""),
}
try:
tellcore_lib = telldus.TelldusCore()
except OSError:
_LOGGER.exception("Could not initialize Tellstick")
return
sensors = []
datatype_mask = config.get(CONF_DATATYPE_MASK)
if config[CONF_ONLY_NAMED]:
named_sensors = {}
for named_sensor in config[CONF_ONLY_NAMED]:
name = named_sensor[CONF_NAME]
proto = named_sensor.get(CONF_PROTOCOL)
model = named_sensor.get(CONF_MODEL)
id_ = named_sensor[CONF_ID]
if proto is not None:
if model is not None:
named_sensors["{}{}{}".format(proto, model, id_)] = name
else:
named_sensors["{}{}".format(proto, id_)] = name
else:
named_sensors[id_] = name
for tellcore_sensor in tellcore_lib.sensors():
if not config[CONF_ONLY_NAMED]:
sensor_name = str(tellcore_sensor.id)
else:
proto_id = "{}{}".format(tellcore_sensor.protocol, tellcore_sensor.id)
proto_model_id = "{}{}{}".format(
tellcore_sensor.protocol, tellcore_sensor.model, tellcore_sensor.id
)
if tellcore_sensor.id in named_sensors:
sensor_name = named_sensors[tellcore_sensor.id]
elif proto_id in named_sensors:
sensor_name = named_sensors[proto_id]
elif proto_model_id in named_sensors:
sensor_name = named_sensors[proto_model_id]
else:
continue
for datatype in sensor_value_descriptions:
if datatype & datatype_mask and tellcore_sensor.has_value(datatype):
sensor_info = sensor_value_descriptions[datatype]
sensors.append(
TellstickSensor(sensor_name, tellcore_sensor, datatype, sensor_info)
)
add_entities(sensors)
class TellstickSensor(Entity):
"""Representation of a Tellstick sensor."""
def __init__(self, name, tellcore_sensor, datatype, sensor_info):
"""Initialize the sensor."""
self._datatype = datatype
self._tellcore_sensor = tellcore_sensor
self._unit_of_measurement = sensor_info.unit or None
self._value = None
self._name = f"{name} {sensor_info.name}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._value
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Update tellstick sensor."""
self._value = self._tellcore_sensor.value(self._datatype).value
|
py
|
1a55e8092a11da802b3c8a719a2759b90d195f14
|
"""
Author: Alex Kiernan
Desc: Fact model
"""
from app import db
class Fact(db.Model):
__tablename__ = 'prediction_facts'
pf_date = db.Column('pf_date', db.Date, primary_key=True)
pf_time_of_day = db.Column('pf_time_of_day', db.Integer, primary_key=True)
user_id = db.Column('user_id', db.Integer, primary_key=True)
bg_value = db.Column('bg_value', db.Float)
ins_value = db.Column('ins_value', db.Float)
food_value = db.Column('food_value', db.Integer)
exercise_value = db.Column('exercise_value', db.Integer)
def serialize(self):
return {
'pf_date' : self.pf_date.strftime("%A, %d %b %Y"),
'pf_time_of_day' : self.pf_time_of_day,
'user_id' : self.user_id,
'bg_value' : self.bg_value,
'ins_value': self.ins_value,
'food_value': self.food_value,
'exercise_value': self.exercise_value
}
def fact_serialize(self):
return {
'timestamp': self.pf_time_of_day,
'bg_value': self.bg_value,
'carbs': self.food_value,
'exercise': self.exercise_value,
'insulin_dosage': self.ins_value
}
|
py
|
1a55e840ffefb3dc2cf25637ee56ff1c20820011
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2020 Edgewall Software
# Copyright (C) 2005-2006 Christian Boos <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
#
# Author: Christian Boos <[email protected]>
import re
from trac.config import ConfigSection
from trac.core import *
from trac.util.html import Element, Fragment, find_element, tag
from trac.util.translation import N_, _, tag_
from trac.web.api import IRequestHandler
from trac.wiki.api import IWikiMacroProvider
from trac.wiki.formatter import extract_link
class InterTracDispatcher(Component):
"""InterTrac dispatcher."""
implements(IRequestHandler, IWikiMacroProvider)
is_valid_default_handler = False
intertrac_section = ConfigSection('intertrac',
"""This section configures InterTrac prefixes. Option names in
this section that contain a `.` are of the format
`<name>.<attribute>`. Option names that don't contain a `.` define
an alias.
The `.url` attribute is mandatory and is used for locating the
other Trac. This can be a relative path when the other Trac
environment is located on the same server.
The `.title` attribute is used for generating a tooltip when the
cursor is hovered over an InterTrac link.
Example configuration:
{{{#!ini
[intertrac]
# -- Example of setting up an alias:
t = trac
# -- Link to an external Trac:
genshi.title = Edgewall's Trac for Genshi
genshi.url = https://genshi.edgewall.org
}}}
""")
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'^/intertrac/(.*)', req.path_info)
if match:
if match.group(1):
req.args['link'] = match.group(1)
return True
def process_request(self, req):
link = req.args.get('link', '')
parts = link.split(':', 1)
if len(parts) > 1:
resolver, target = parts
if target[:1] + target[-1:] not in ('""', "''"):
link = '%s:"%s"' % (resolver, target)
from trac.web.chrome import web_context
link_frag = extract_link(self.env, web_context(req), link)
if isinstance(link_frag, (Element, Fragment)):
elt = find_element(link_frag, 'href')
if elt is None:
raise TracError(
_("Can't view %(link)s. Resource doesn't exist or "
"you don't have the required permission.", link=link))
href = elt.attrib.get('href')
else:
href = req.href(link.rstrip(':'))
req.redirect(href)
# IWikiMacroProvider methods
def get_macros(self):
yield 'InterTrac'
def get_macro_description(self, name):
return 'messages', N_("Provide a list of known InterTrac prefixes.")
def expand_macro(self, formatter, name, content):
intertracs = {}
for key, value in self.intertrac_section.options():
idx = key.rfind('.')
if idx > 0: # 0 itself doesn't help much: .xxx = ...
prefix, attribute = key[:idx], key[idx+1:]
intertrac = intertracs.setdefault(prefix, {})
try:
intertrac[attribute] = value
except TypeError: # alias
pass
else:
intertracs[key] = value # alias
intertracs.setdefault('trac', {'title': _('The Trac Project'),
'url': 'https://trac.edgewall.org'})
def generate_prefix(prefix):
intertrac = intertracs[prefix]
if isinstance(intertrac, basestring):
yield tag.tr(tag.td(tag.strong(prefix)),
tag.td(tag_("Alias for %(name)s",
name=tag.strong(intertrac))))
else:
url = intertrac.get('url')
if url:
title = intertrac.get('title', url)
yield tag.tr(tag.td(tag.a(tag.strong(prefix),
href=url + '/timeline')),
tag.td(tag.a(title, href=url)))
return tag.table(class_="wiki intertrac")(
tag.tr(tag.th(tag.em(_("Prefix"))),
tag.th(tag.em(_("Trac Site")))),
[generate_prefix(p) for p in sorted(intertracs)])
|
py
|
1a55e8818120935dcd5e7b771d5a59fdabead51e
|
import unittest
from conans.test.utils.tools import TestClient
class ConanfileErrorsTest(unittest.TestCase):
def copy_error_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def package(self):
self.copy2("*.h", dst="include", src=["include","platform"])
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", assert_error=True)
self.assertIn("Hello/0.1@lasote/stable: Error in package() method, line 9",
client.out)
self.assertIn('self.copy2("*.h", dst="include", src=["include","platform"]',
client.out)
self.assertIn("'HelloConan' object has no attribute 'copy2'",
client.out)
def copy_error2_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def package(self):
self.copy("*.h", dst="include", src=["include","platform"])
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", assert_error=True)
self.assertIn("Hello/0.1@lasote/stable: Error in package() method, line 9",
client.out)
self.assertIn('self.copy("*.h", dst="include", src=["include","platform"]',
client.out)
# It results that the error is different in different Python2/3 and OSs
# self.assertIn("'list' object has no attribute 'replace'", client.out)
def package_info_error_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def package_info(self):
self.copy2()
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", assert_error=True)
self.assertIn("Hello/0.1@lasote/stable: Error in package_info() method, line 9",
client.out)
self.assertIn('self.copy2()',
client.out)
self.assertIn("'HelloConan' object has no attribute 'copy2'",
client.out)
def config_error_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def configure(self):
self.copy2()
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", assert_error=True)
self.assertIn("""ERROR: Hello/0.1@lasote/stable: Error in configure() method, line 9
self.copy2()
AttributeError: 'HelloConan' object has no attribute 'copy2'""", client.out)
def source_error_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def source(self):
self.copy2()
'''
files = {"conanfile.py": conanfile, "test.txt": "Hello world"}
client.save(files)
client.run("export . lasote/stable")
client.run("install Hello/0.1@lasote/stable --build", assert_error=True)
self.assertIn("Hello/0.1@lasote/stable: Error in source() method, line 9",
client.out)
self.assertIn('self.copy2()',
client.out)
self.assertIn("'HelloConan' object has no attribute 'copy2'",
client.out)
def duplicate_requires_test(self):
client = TestClient()
conanfile = '''
[requires]
foo/0.1@user/testing
foo/0.2@user/testing
'''
files = {"conanfile.txt": conanfile}
client.save(files)
client.run("install . --build", assert_error=True)
self.assertIn("ERROR: Duplicated requirement", client.out)
def duplicate_requires_py_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
requires = "foo/0.1@user/testing", "foo/0.2@user/testing"
'''
files = {"conanfile.py": conanfile}
client.save(files)
client.run("install . --build", assert_error=True)
self.assertIn("Error while initializing requirements. Duplicated requirement",
client.out)
|
py
|
1a55e97c43e9d8c5726b86415f92b58b7e9b04b9
|
from tridesclous import get_dataset
from tridesclous.peakdetector import get_peak_detector_class
import time
import itertools
import scipy.signal
import numpy as np
import sklearn.metrics.pairwise
from matplotlib import pyplot
from tridesclous.tests.test_signalpreprocessor import offline_signal_preprocessor
from tridesclous.peakdetector import make_sum_rectified, detect_peaks_in_rectified, get_mask_spatiotemporal_peaks
from tridesclous.peakdetector import HAVE_PYOPENCL
import matplotlib.pyplot as plt
def get_normed_sigs(chunksize=None):
# get sigs
sigs, sample_rate = get_dataset(name='olfactory_bulb')
#~ sigs = np.tile(sigs, (1, 20)) #for testing large channels num
if sigs.shape[0] % chunksize >0:
sigs = sigs[:-(sigs.shape[0] % chunksize), :]
nb_channel = sigs.shape[1]
#~ print('nb_channel', nb_channel)
geometry = np.zeros((nb_channel, 2))
geometry[:, 0] = np.arange(nb_channel) * 50 # um spacing
# normalize sigs
highpass_freq = 300.
preprocess_params = dict(
highpass_freq=highpass_freq,
common_ref_removal=True,
backward_chunksize=chunksize+chunksize//4,
output_dtype='float32')
normed_sigs = offline_signal_preprocessor(sigs, sample_rate, **preprocess_params)
return sigs, sample_rate, normed_sigs, geometry
def offline_peak_detect_global(normed_sigs, sample_rate, geometry,
peak_sign='-',relative_threshold = 5, peak_span_ms=0.5, smooth_radius_um=None):
n_span = int(sample_rate * peak_span_ms / 1000.)//2
if smooth_radius_um is None:
spatial_matrix = None
else:
d = sklearn.metrics.pairwise.euclidean_distances(geometry)
spatial_matrix = np.exp(-d/smooth_radius_um)
spatial_matrix[spatial_matrix<0.01] = 0.
sum_rectified = make_sum_rectified(normed_sigs, relative_threshold, peak_sign, spatial_matrix)
mask_peaks = detect_peaks_in_rectified(sum_rectified, n_span, relative_threshold, peak_sign)
ind_peaks, = np.nonzero(mask_peaks)
ind_peaks += n_span
return ind_peaks, sum_rectified
def offline_peak_detect_geometrical(normed_sigs, sample_rate, geometry,
peak_sign='-',relative_threshold = 5, peak_span_ms=0.5,
adjacency_radius_um=None, smooth_radius_um=None):
assert smooth_radius_um is None
assert adjacency_radius_um is not None
nb_channel = normed_sigs.shape[1]
n_span = int(sample_rate * peak_span_ms / 1000.)//2
d = sklearn.metrics.pairwise.euclidean_distances(geometry)
neighbour_mask = d<=adjacency_radius_um
nb_neighbour_per_channel = np.sum(neighbour_mask, axis=0)
nb_max_neighbour = np.max(nb_neighbour_per_channel)
nb_max_neighbour = nb_max_neighbour
neighbours = np.zeros((nb_channel, nb_max_neighbour), dtype='int32')
neighbours[:] = -1
for c in range(nb_channel):
neighb, = np.nonzero(neighbour_mask[c, :])
neighbours[c, :neighb.size] = neighb
peak_mask = get_mask_spatiotemporal_peaks(normed_sigs, n_span, relative_threshold, peak_sign, neighbours)
peaks, chan_inds = np.nonzero(peak_mask)
return peaks
def test_compare_offline_online_engines():
#~ HAVE_PYOPENCL = True
engine_names = [
('global', 'numpy'),
('geometrical', 'numpy'),
('geometrical', 'numba'),
]
if HAVE_PYOPENCL:
#~ engine_names += [('global', 'opencl'),
#~ ('geometrical', 'opencl')]
engine_names += [('geometrical', 'opencl')]
chunksize=1024
sigs, sample_rate, normed_sigs, geometry = get_normed_sigs(chunksize=chunksize)
#params
peak_sign = '-'
relative_threshold = 8
peak_span_ms = 0.9
smooth_radius_um = None
adjacency_radius_um = 200.
nb_channel = sigs.shape[1]
#~ print('n_span', n_span)
nloop = sigs.shape[0]//chunksize
print('sig duration', sigs.shape[0]/sample_rate)
offline_peaks = {}
t1 = time.perf_counter()
peaks, rectified_sum = offline_peak_detect_global(sigs, sample_rate, geometry,
peak_sign=peak_sign, relative_threshold=relative_threshold, peak_span_ms=peak_span_ms,
smooth_radius_um=smooth_radius_um)
t2 = time.perf_counter()
print('offline global', 'process time', t2-t1)
offline_peaks['global', 'numpy'] = peaks
offline_peaks['global', 'opencl'] = peaks
t1 = time.perf_counter()
peaks = offline_peak_detect_geometrical(sigs, sample_rate, geometry,
peak_sign=peak_sign, relative_threshold=relative_threshold, peak_span_ms=peak_span_ms,
smooth_radius_um=smooth_radius_um, adjacency_radius_um=adjacency_radius_um)
t2 = time.perf_counter()
print('offline geometrical', 'process time', t2-t1)
offline_peaks['geometrical', 'numpy'] = peaks
offline_peaks['geometrical', 'numba'] = peaks
offline_peaks['geometrical', 'opencl'] = peaks
online_peaks = {}
for method, engine in engine_names:
print(engine)
EngineClass = get_peak_detector_class(method, engine)
#~ buffer_size = chunksize*4
peakdetector = EngineClass(sample_rate, nb_channel, chunksize, 'float32', geometry)
peakdetector.change_params(peak_sign=peak_sign, relative_threshold=relative_threshold,
peak_span_ms=peak_span_ms, smooth_radius_um=smooth_radius_um,
adjacency_radius_um=adjacency_radius_um)
all_online_peaks = []
t1 = time.perf_counter()
for i in range(nloop):
#~ print(i)
pos = (i+1)*chunksize
chunk = sigs[pos-chunksize:pos,:]
time_ind_peaks, chan_peak_index, peak_val_peaks = peakdetector.process_buffer_stream(pos, chunk)
#~ print(n_peaks)
if time_ind_peaks is not None:
#~ all_online_peaks.append(chunk_peaks['index'])
all_online_peaks.append(time_ind_peaks)
online_peaks[method, engine] = np.concatenate(all_online_peaks)
t2 = time.perf_counter()
print(engine, 'process time', t2-t1, 'size', online_peaks[method, engine].size)
# remove peaks on border for comparison
for method, engine in engine_names:
peaks = online_peaks[method, engine]
peaks = peaks[(peaks>chunksize) & (peaks<sigs.shape[0]-chunksize)]
online_peaks[method, engine] = peaks
peaks = offline_peaks[method, engine]
peaks = peaks[(peaks>chunksize) & (peaks<sigs.shape[0]-chunksize)]
offline_peaks[method, engine] = peaks
# compare
for method, engine in engine_names:
print('compare', method, engine)
onlinepeaks = online_peaks[method, engine]
offlinepeaks = offline_peaks[method, engine]
print(onlinepeaks.size, offlinepeaks.size)
# TODO
#~ assert offlinepeaks.size==onlinepeaks.size, '{} nb_peak {} instead {}'.format(engine, offlinepeaks.size, onlinepeaks.size)
#~ assert np.array_equal(offlinepeaks, onlinepeaks)
def test_detect_geometrical_peaks():
chunksize=1024
sigs, sample_rate, normed_sigs, geometry = get_normed_sigs(chunksize=chunksize)
nb_channel = sigs.shape[1]
n_span = 4
thresh = 5
peak_sign = '-'
d = sklearn.metrics.pairwise.euclidean_distances(geometry)
nb_neighbour = 4
neighbours = np.zeros((nb_channel, nb_neighbour+1), dtype='int64')
for c in range(nb_channel):
nearest = np.argsort(d[c, :])
#~ print(c, nearest)
neighbours[c, :] = nearest[:nb_neighbour+1] # include itself
#~ print(neighbours)
mask = get_mask_spatiotemporal_peaks(normed_sigs, n_span, thresh, peak_sign, neighbours)
peak_inds, chan_inds = np.nonzero(mask)
peak_inds += n_span
print(peak_inds.size)
#~ fig, ax = plt.subplots()
#~ plot_sigs = normed_sigs.copy()
#~ for c in range(nb_channel):
#~ plot_sigs[:, c] += c*30
#~ ax.plot(plot_sigs, color='k')
#~ ampl = plot_sigs[peak_inds, chan_inds]
#~ ax.scatter(peak_inds, ampl, color='r')
#~ plt.show()
# test two way
mask_neg = get_mask_spatiotemporal_peaks(normed_sigs, n_span, thresh, '-', neighbours)
mask_pos = get_mask_spatiotemporal_peaks(-normed_sigs, n_span, thresh, '+', neighbours)
assert np.array_equal(mask_neg, mask_pos)
#~ print(peak_inds)
#~ print(chan_inds)
def benchmark_speed():
chunksize=1024
#~ chunksize=1025
#~ chunksize= 1024 + 256
#~ chunksize=2048
#~ chunksize = 1024 * 10
#~ chunksize=950
sigs, sample_rate, normed_sigs, geometry = get_normed_sigs(chunksize=chunksize)
#~ sigs = np
#***for testing large channels num***
sigs = np.tile(sigs, (1, 20))
normed_sigs = np.tile(normed_sigs, (1, 20))
geometry = np.zeros((sigs.shape[1], 2), dtype='float64')
geometry[:, 0] = np.arange(sigs.shape[1]) * 50.
#***
nb_channel = sigs.shape[1]
print('nb_channel', nb_channel)
engine_names = [
#~ ('global', 'numpy'),
#~ ('geometrical', 'numpy'),
('geometrical', 'numba'),
]
if HAVE_PYOPENCL:
engine_names += [
#~ ('global', 'opencl'),
('geometrical', 'opencl'),
]
args = (sample_rate, nb_channel, chunksize, 'float32', geometry)
params = dict(peak_span_ms = 0.9,
relative_threshold = 5,
peak_sign = '-')
online_peaks = {}
for method, engine in engine_names:
peakdetector = get_peak_detector_class(method, engine)(*args)
peakdetector.change_params(**params)
#~ print(peakdetector.n_span, peakdetector.dtype)
nloop = normed_sigs.shape[0]//chunksize
peak_inds = []
peak_chans = []
t1 = time.perf_counter()
for i in range(nloop):
pos = (i+1)*chunksize
chunk = normed_sigs[pos-chunksize:pos,:]
time_ind_peaks, chan_peak_index, peak_val_peaks = peakdetector.process_buffer_stream(pos, chunk)
if time_ind_peaks is not None:
peak_inds.append(time_ind_peaks)
if chan_peak_index is not None:
peak_chans.append(chan_peak_index)
t2 = time.perf_counter()
peak_inds = np.concatenate(peak_inds)
if len(peak_chans) > 0:
peak_chans = np.concatenate(peak_chans)
else:
peak_chans = np.argmin(normed_sigs[peak_inds, :], axis=1)
online_peaks[method, engine] = peak_inds
print(method, engine, ':' , peak_inds.size)
print(method, engine, 'process time', t2-t1)
#~ fig, ax = plt.subplots()
#~ plot_sigs = normed_sigs.copy()
#~ for c in range(nb_channel):
#~ plot_sigs[:, c] += c*30
#~ ax.plot(plot_sigs, color='k')
#~ ampl = plot_sigs[peak_inds, peak_chans]
#~ ax.scatter(peak_inds, ampl, color='r')
#~ plt.show()
def test_peak_sign_symetry():
chunksize=1024
raw_sigs, sample_rate, normed_sigs, geometry = get_normed_sigs(chunksize=chunksize)
nb_channel = normed_sigs.shape[1]
#~ print('nb_channel', nb_channel)
args = (sample_rate, nb_channel, chunksize, 'float32', geometry)
params = dict(peak_span_ms = 0.9,
relative_threshold = 5)
engine_names = [
('global', 'numpy'),
('geometrical', 'numpy'),
('geometrical', 'numba'),
]
if HAVE_PYOPENCL:
engine_names += [
('global', 'opencl'),
('geometrical', 'opencl'),
]
online_peaks = {}
for method, engine in engine_names:
peakdetector = get_peak_detector_class(method, engine)(*args)
for peak_sign in ['-', '+']:
if peak_sign=='-':
sigs = normed_sigs
elif peak_sign=='+':
sigs = -normed_sigs
peakdetector.change_params(peak_sign=peak_sign, **params)
nloop = normed_sigs.shape[0]//chunksize
peaks = []
t1 = time.perf_counter()
for i in range(nloop):
#~ print(i)
pos = (i+1)*chunksize
chunk = sigs[pos-chunksize:pos,:]
#~ print(chunk.shape)
time_ind_peaks, chan_peak_index, peak_val_peaks = peakdetector.process_buffer_stream(pos, chunk)
#~ print(n_peaks)
#~ print(chunk_peaks)
if time_ind_peaks is not None:
#~ all_online_peaks.append(chunk_peaks['index'])
peaks.append(time_ind_peaks)
peak_inds = np.concatenate(peaks)
online_peaks[method, engine, peak_sign] = peak_inds
t2 = time.perf_counter()
print(method, engine, 'peak_sign', peak_sign,':' , peak_inds.size, 'unique peak size', np.unique(peak_inds).size)
#~ print(name, 'process time', t2-t1)
assert np.array_equal(online_peaks[method, engine, '-'], online_peaks[method, engine, '+'])
if HAVE_PYOPENCL:
assert np.array_equal(online_peaks['global', 'numpy', '-'], online_peaks['global', 'opencl', '-'])
assert np.array_equal(online_peaks['geometrical', 'numpy', '-'], online_peaks['geometrical', 'numba', '-'])
# TODO this should be totally equal
assert np.array_equal(online_peaks['geometrical', 'numpy', '-'], online_peaks['geometrical', 'opencl', '-'])
assert np.array_equal(online_peaks['geometrical', 'numba', '-'], online_peaks['geometrical', 'opencl', '-'])
if __name__ == '__main__':
test_compare_offline_online_engines()
#~ test_detect_geometrical_peaks()
#~ benchmark_speed()
#~ test_peak_sign_symetry()
|
py
|
1a55e988fffe9fadc8032b13229b5853a84e62cd
|
from urllib.request import urlopen, Request
import os
import src.util as util
def save_image_tag(bs_object, conf):
# Header for passing header checker
if conf['site_name'] == conf['comic_sites'][0]:
headers = conf['headers']['m']
elif conf['site_name'] == conf['comic_sites'][1]:
headers = conf['headers']['w']
#이미지 소스 선택
targetString = 'https://'
targetlen = len(targetString)
#썸네일 사진 패스하기
thumbnailString = conf['thumbnail_link']
thumbnaillen = len(thumbnailString)
#그 외 불필요 이미지 파일 패스하기
otherString = conf['unnecessary_link']
otherStringlen = len(otherString)
# 인스턴스의 find_all 이라는 함수에 img 태그가 있으면 img_data에 넣어줌
img_data = bs_object.find_all("img")
num_comic_img = 2
img_idx = 1
'''
structure of img tag(prop list)
1. src
2. data-....
3. style
'''
for img_tag in img_data:
# print(list(img_tag.attrs.keys()))
attr_list = list(img_tag.attrs.keys())
# if lenght of attribute is less than 3
# it isn't comic image
if len(attr_list) < 2:
continue
# print(attr_list)
isComicImg = False
# if it is comic image,
# attribute list must contain 'data class'
for attr in attr_list:
if attr[:4] == 'data':
isComicImg = True
data_tag = attr
# some image tag contains 'itemprop' class
if conf['site_name'] == conf['comic_sites'][0]:
if 'itemprop' in attr_list:
isComicImg = True
data_tag = 'content'
elif conf['site_name'] == conf['comic_sites'][1]:
if 'alt' in attr_list:
isComicImg = True
data_tag = 'src'
if not isComicImg:
continue
print(img_idx, img_tag.attrs[data_tag])
srcString = img_tag.attrs[data_tag]
#썸네일은 건너뛰기
if srcString[:thumbnaillen] == thumbnailString:
print("pass thumbnail")
continue
if 'assets' in srcString:
print("pass img of assets")
continue
#서버 이미지면 저장 그만하기
#모든 만화 이미지는 외부 서버에 있음
print("img index=", img_idx)
if (srcString[:otherStringlen] == otherString):
print("break othrestring")
continue
#구글 드라이브 혹은 타서버에 저장된 만화 이미지 파일 처리
if srcString[0:targetlen] == targetString:
#딕셔너리를 순서대로 넣어줌
imgReq = Request(url=img_tag.attrs[data_tag], headers=headers)
try:
imageDownload = urlopen(imgReq).read()
except:
continue
#파일 이름 생성
filename = "image"+str(img_idx).zfill(2)+'.jpg'
folder_path = os.path.join(conf['comic_name'], str(conf['number']))
#폴더 생성
path = util.create_dir(conf['comic_path'], folder_path)
#파일 생성 경로
filepath = os.path.join(path, filename)
#파일 생성
with open(filepath,"wb") as f:
f.write(imageDownload)
print('save => "' + path + "'/" + str(conf['number']) + '"')
img_idx += 1
|
py
|
1a55e9fccbf4b434e4b506f58aa558e98f88f2fb
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import itertools
import logging
import os
import re
import threading
import time
from typing import Callable, Optional, Sequence
from unittest import skip, SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import core
from jax._src import api
from jax.config import config
from jax import dtypes
from jax.experimental import host_callback as hcb
from jax.experimental import PartitionSpec as P
from jax.experimental import maps
from jax.experimental import pjit
from jax import lax
from jax import numpy as jnp
from jax import test_util as jtu
from jax import tree_util
from jax.lib import xla_bridge
import numpy as np
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class _TestingOutputStream(object):
"""Use as `output_stream` for tests."""
def __init__(self):
self._output = []
self._test_method_name = None
def write(self, what: str) -> None:
print(f"output_stream[{self._test_method_name}]: {what}", end="")
self._output.append(what)
@property
def output(self):
return "".join(self._output)
@property
def output_sorted_by_device(self):
# Assume that the output is a sequence of strings including metadata
# and data, with metadata containing `device: xxx`
by_device = [] # each element is a pair (device, str_list)
for s in self._output:
m = re.match(r".*device: (\S+)", s)
if m:
by_device.append((m.group(1), []))
assert by_device, f"output does not include 'device:': {self._output}"
by_device[-1][1].append(s)
sorted_by_device = sorted(by_device, key=lambda x: x[0])
return "\n".join(itertools.chain(*[s[1] for s in sorted_by_device]))
def __str__(self):
return "TestingOutputStream"
def reset(self):
self._output = []
testing_stream = _TestingOutputStream()
def fun1(a):
"""Function used for several `id_tap` tests."""
y = hcb.id_print(a * 2., what="a * 2", output_stream=testing_stream)
y = hcb.id_print(y * 3., what="y * 3", output_stream=testing_stream, result=y)
return y ** 2 # Some computation to make the gradient interesting
def fun1_equiv(a): # Numerical equivalent of fun1
return (a * 2.) ** 2
def maybe_print(do_print: bool, arg, what: str, tap_with_device: Optional[bool] = False):
"""Conditionally print on testing_string"""
if do_print:
return hcb.id_print(arg, what=what,
output_stream=testing_stream, tap_with_device=tap_with_device)
else:
return arg
def local_devices():
# Tests require using not more than 2 devices.
return api.local_devices()[:2]
ignore_jit_of_pmap_warning = partial(
jtu.ignore_warning, message=".*jit-of-pmap.*")
def assertMultiLineStrippedEqual(tst: jtu.JaxTestCase,
expected: str, what: str):
"""A variant that preprocesses the string to eliminate non-determinism in
floating point values, and several uninteresting id_tap primitive params.
"""
# Sometimes we get floating points in the output; we round them
def repl_floats(match_group):
matched = match_group.group(0)
if matched == ".": return matched
x = np.around(float(matched), decimals=2)
return f"{x:.2f}"
what = re.sub(r"\-?\d*\.[\-\def]*", repl_floats, what)
what = re.sub(r"output_stream=[^\]\n,]*,?", "", what)
what = re.sub(r"threshold=[^\]\n,]*,?", "", what)
what = re.sub(r"bwd=[^\]\n]*", "", what)
what = re.sub(r"out_trees=[^\]\n]*", "", what)
what = re.sub(r"fwd_jaxpr_thunk=[^\]\n]*", "", what)
what = re.sub(r"jvp_jaxpr_thunk=[^\]\n]*", "", what)
# Empty lines
what = re.sub(r"^\s*\n", "", what, flags=re.MULTILINE)
def repl_func(match_group):
matched = match_group.group(3)
if "function _print_consumer" in matched:
return match_group.group(1) + "=_print"
else:
return match_group.group(1) + "=..."
what = re.sub(r"((tap_func_)|(callback))=([^\]\n,]*),?", repl_func, what)
tst.assertMultiLineStrippedEqual(expected, what)
def helper_set_hlo_dump():
flags_str = os.getenv("XLA_FLAGS", "")
import shutil
dump_dir = "/tmp/xla_dump"
os.environ["XLA_FLAGS"] = f"{flags_str} --xla_dump_to={dump_dir}"
if os.path.isdir(dump_dir):
logging.warning(f"Deleting old XLA dump directory {dump_dir}")
shutil.rmtree(dump_dir)
logging.warning(f"Setting XLA dump directory {dump_dir}")
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
def helper_print_optimized_hlo(fun, *args):
backend = api.lib.xla_bridge.get_backend()
c = api.xla_computation(fun)(*args)
print(re.sub(r", metadata.*", "",
backend.compile(c).hlo_modules()[0].to_string()))
def helper_log_ir(name,
f_jax,
*args,
num_partitions=None,
strip_metadata=False):
print(f"Jaxpr[{name}]: {jax.make_jaxpr(f_jax)(*args)}")
jax_comp = jax.xla_computation(f_jax)(*args)
print(f"HLO[{name}]: {jax_comp.as_hlo_text()}")
backend = jax.lib.xla_bridge.get_backend()
if num_partitions is not None:
num_replicas = 1
device_assignment = np.arange(num_partitions * num_replicas)
device_assignment = np.reshape(device_assignment, (-1, num_partitions))
use_spmd_partitioning = num_partitions > 1
compile_options = jax.lib.xla_bridge.get_compile_options(
num_replicas=num_replicas,
num_partitions=num_partitions,
device_assignment=device_assignment,
use_spmd_partitioning=use_spmd_partitioning,
)
else:
compile_options = None
jax_optimized_hlo = backend.compile(
jax_comp, compile_options).hlo_modules()[0].to_string()
if strip_metadata:
jax_optimized_hlo = re.sub(r", metadata.*", "", jax_optimized_hlo)
print(f"Optimized HLO[{name}] for "
f"platform {backend.platform}: {jax_optimized_hlo}")
prev_xla_flags = None
def setUpModule():
global prev_xla_flags
# This will control the CPU devices. On TPU we always have 2 devices
prev_xla_flags = jtu.set_host_platform_device_count(2)
# Reset to previous configuration in case other test modules will be run.
def tearDownModule():
prev_xla_flags()
def assertMultiDeviceOutputEqual(tst: jtu.JaxTestCase,
expected_2CPUs: str):
"""Check that the multi-device output is equal to the expected.
The tests run with 2 devices if available, otherwise 1 device.
We adjust the expected output here for 1 device.
Args:
expected_2CPUs: the expected output for 2 CPUs. If there is only
one device, this is trimmed to the first device. If the current
device_under_test is not a CPU, then we change the names
"""
expected = expected_2CPUs
if len(local_devices()) == 1:
start_device_1 = expected.find('device: cpu:1')
if start_device_1 >= 0:
expected = expected[0:start_device_1]
def replace_device_name(m) -> str:
return str(local_devices()[int(m.group(1))])
expected = re.sub(r'cpu:(\d+)', replace_device_name, expected)
what = testing_stream.output_sorted_by_device
return assertMultiLineStrippedEqual(tst, expected, what)
class HostCallbackTapTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
testing_stream.reset()
testing_stream._test_method_name = self._testMethodName
self.old_flags = os.getenv("XLA_FLAGS", "")
def tearDown(self) -> None:
if os.getenv("XLA_FLAGS") != self.old_flags:
os.environ["XLA_FLAGS"] = self.old_flags
xla_bridge.get_backend.cache_clear()
hcb.barrier_wait("HostCallbackTapTest.tearDown")
super().tearDown()
def test_tap_eval(self):
self.assertAllClose((5. * 2.) ** 2, fun1(5.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: a * 2
10.00
what: y * 3
30.00""", testing_stream.output)
def test_tap_with_tuple_results(self):
def func2(x):
x1, y1 = hcb.id_print((x * 2., x * 3.), output_stream=testing_stream)
return x1 + y1
self.assertEqual(3. * (2. + 3.), func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( 6.00
9.00 )""", testing_stream.output)
def test_tap_with_dict_results(self):
def func2(x):
res = hcb.id_print(dict(a=x * 2., b=x * 3.), output_stream=testing_stream)
return res["a"] + res["b"]
self.assertEqual(3. * (2. + 3.), func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
{ a=6.00
b=9.00 }""", testing_stream.output)
def test_tap_with_result(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream)
return x1
self.assertEqual(3. * 4., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( 6.00
9.00 )""", testing_stream.output)
def test_tap_with_result_no_arg(self):
def tap_func(arg, transforms):
testing_stream.write(f"called tap_func with {arg}")
def func2(x):
x1 = hcb.id_tap(tap_func, None, result=x)
return x1
self.assertEqual(3., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "called tap_func with None",
testing_stream.output)
def test_tap_result_unused(self):
def tap_func(arg, transforms):
testing_stream.write(f"called tap_func with {arg}")
def func2(x):
hcb.id_tap(tap_func, None)
return x
self.assertEqual(3., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "called tap_func with None",
testing_stream.output)
def test_tap_with_device(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream,
tap_with_device=True)
return x1
self.assertEqual(3. * 4., func2(3.))
hcb.barrier_wait()
assertMultiDeviceOutputEqual(self, """
device: cpu:0
( 6.00
9.00 )""")
def test_tap_eval_exception(self):
if not FLAGS.jax_host_callback_outfeed:
raise SkipTest("TODO: implement error handling for customcall")
# Simulate a tap error
def tap_err(*args, **kwargs):
raise ValueError("Some user message")
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1)
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
with self.assertRaisesRegex(
hcb.CallbackException,
re.compile("There were exceptions during callback processing. Last one was:.*"
"ValueError: Some user message", re.DOTALL)):
func(0)
hcb.barrier_wait()
# We should have received everything before the error
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
def test_tap_empty(self):
"""Tap empty arrays."""
hcb.id_print((), output_stream=testing_stream)
hcb.id_print((1., np.ones((2, 0))), what="second", output_stream=testing_stream)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( )
what: second
( 1.00
[] )""", testing_stream.output)
def test_tap_jit_simple(self):
jit_fun1 = api.jit(lambda x: 3. * hcb.id_print(
2. * x, what="here", output_stream=testing_stream))
self.assertAllClose(6. * 5., jit_fun1(5.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: here
10.00""", testing_stream.output)
def test_tap_jit_no_invars(self):
def func(): # jitted function does not take arguments
return hcb.id_print(42, output_stream=testing_stream)
self.assertAllClose(42, api.jit(func)())
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_multiple_invars(self):
def func(x1, x2):
return hcb.id_print(x1 + x2, output_stream=testing_stream)
self.assertAllClose(42, api.jit(func)(40, 2))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_constant(self):
def func(x):
return hcb.id_print(42, result=x, output_stream=testing_stream)
self.assertAllClose(5, api.jit(func)(5))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_sequence1(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
return hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
logging.info("%s: %s", self._testMethodName,
api.make_jaxpr(func)(1))
logging.info("%s: %s", self._testMethodName,
api.xla_computation(func)(1).as_hlo_text())
self.assertEqual(2, api.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2""", testing_stream.output)
def test_tap_jit2(self):
"""A sequence of JIT."""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
return x2
self.assertEqual(2, api.jit(func)(1))
self.assertEqual(11, api.jit(func)(10))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
def test_tap_jit_result_unused(self):
"""We can id_print even if we don't use the result."""
def func(x):
hcb.id_print(x, where="1", output_stream=testing_stream)
hcb.id_print(x + 1, where="2", output_stream=testing_stream)
return x + 1
self.assertEqual(2, api.jit(func)(1))
self.assertEqual(11, api.jit(func)(10))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
def test_tap_jit_nested(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
def func_nested(x):
x2 = hcb.id_print(x + 1, where="nested", output_stream=testing_stream)
return x2
x3 = api.jit(func_nested)(x1)
return hcb.id_print(x3 + 1, where="3", output_stream=testing_stream)
self.assertEqual(3, api.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: nested
2
where: 3
3""", testing_stream.output)
def test_tap_jit_devices(self):
"""Running on multiple devices."""
logging.info(f"{self._testMethodName}: has devices {local_devices()}")
def func(x, device_id):
x1 = hcb.id_print(x, dev=str(device_id), output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, dev=str(device_id), output_stream=testing_stream)
return x2
for d in local_devices():
self.assertEqual(112, api.jit(func, device=d, static_argnums=1)(111, d.id))
hcb.barrier_wait()
logging.info(f"{self._testMethodName}: found output {testing_stream.output}")
self.assertEqual(
len(local_devices()), len(re.findall(r"111", testing_stream.output)))
self.assertEqual(
len(local_devices()), len(re.findall(r"112", testing_stream.output)))
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_pytree(self, with_jit=False):
def func(x, what=""):
"""Returns some pytrees depending on x"""
if what == "pair_1_x":
return (1, x)
elif what == "pair_x_2x":
return (x, 2 * x)
elif what == "dict":
return dict(a=2 * x, b=3 * x)
else:
assert False
tap_count = 0
def tap_func(a, _, *, what=""):
nonlocal tap_count
tap_count += 1
self.assertEqual(func(5, what), a)
transform = api.jit if with_jit else lambda f: f
for what in ("pair_1_x", "pair_x_2x", "dict"):
transformed = transform(
lambda x: hcb.id_tap(
partial(tap_func, what=what),
func(x, what),
result=func(x * 2, what))
)(5)
self.assertEqual(func(10, what), transformed)
hcb.barrier_wait() # Wait for receivers to be done
self.assertEqual(3, tap_count)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_concurrent_{concurrent}",
concurrent=concurrent)
for concurrent in [True, False]))
def test_tap_multiple(self, concurrent=False):
"""Call id_tap multiple times, concurrently or in sequence. """
if concurrent and jtu.device_under_test() in ["cpu", "gpu"]:
# TODO(necula): if there is device side concurrency, outfeeds from
# different computations can be interleaved. For example, it seems that
# on GPU if multiple host threads run a jit computation, the multiple
# computations are interleaved on the GPU. This can result in the outfeed
# trains being interleaved, which will trigger an error.
# The solution is to fix on GPU the receiving logic so that we can outfeed
# the train as one tuple, and receive it one piece as a time. Then the
# trains should be atomic.
# See also b/160692602.
raise SkipTest("concurrent id_tap not supported on CPU, GPU")
received = set()
count = 5
def pause_tap(idx, _):
received.add(int(idx))
logging.info(f"Starting do_tap {idx}. Sleeping 1sec ...")
time.sleep(0.3)
logging.info(f"Finish do_tap {idx}")
def do_tap(idx):
api.jit(lambda idx: hcb.id_tap(pause_tap, idx))(idx)
if concurrent:
threads = [
threading.Thread(
name=f"enqueue_tap_{idx}", target=do_tap, args=(idx,))
for idx in range(count)
]
[t.start() for t in threads]
[t.join() for t in threads]
else:
for idx in range(count):
do_tap(idx)
hcb.barrier_wait()
self.assertEqual(received, set(range(count)))
# TODO(necula): see comment for test_multiple_tap. Here we disable also
# on TPU, because the barrier_wait runs on all devices, including on the CPU
# where it would run into concurrency problems.
@skip("Concurrency not supported")
def test_tap_multiple_barriers(self):
"""Call barrier_wait concurrently."""
def pause_tap(*args, **kwargs):
logging.info("pause_tap waiting")
time.sleep(0.3)
logging.info("pause_tap done")
def long_run(x):
return hcb.id_tap(pause_tap, x)
api.jit(long_run)(5.)
def try_barrier(idx):
logging.info(f"Starting test barrier {idx}")
hcb.barrier_wait()
logging.info(f"Finished test barrier {idx}")
threads = [
threading.Thread(
name=f"barrier_{idx}", target=try_barrier, args=(idx,))
for idx in range(3)
]
[t.start() for t in threads]
[t.join() for t in threads]
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_cond(self, with_jit=False):
"""A conditional"""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="cond_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="cond_f", result=x,
output_stream=testing_stream),
x2 + 1)
x5 = hcb.id_print(x4 + 1, where="end", output_stream=testing_stream)
return x5
transform = api.jit if with_jit else lambda f: f
self.assertEqual(4, transform(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: cond_f
-1
where: end
4""", testing_stream.output)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_while_cond(self, with_jit=False):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(x):
x3 = hcb.id_print(x, where="w_b_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="w_b_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="w_b_f",
result=x, output_stream=testing_stream),
x3 + 1)
return hcb.id_print(x4, where="w_b_2", output_stream=testing_stream)
x10 = lax.while_loop(lambda x: x <= 3, body, x2)
res = hcb.id_print(x10, where="end", output_stream=testing_stream)
return res
transform = api.jit if with_jit else lambda f: f
self.assertEqual(4, transform(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: w_b_1
2
where: w_b_t
3
where: w_b_2
3
where: w_b_1
3
where: w_b_f
-1
where: w_b_2
4
where: end
4""", testing_stream.output)
def test_tap_jit_while_pred_tap(self):
"""While with printing in the conditional."""
def func(x):
x1 = hcb.id_print(x, where="1")
x10 = lax.while_loop(lambda x: hcb.id_print(x < 3,
where="w_p",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x10, where="3", output_stream=testing_stream)
return res
self.assertEqual(3, api.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self,
"""
where: w_p
True
where: w_b
2
where: w_p
True
where: w_b
3
where: w_p
False
where: 3
3""", testing_stream.output)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_scan_cond(self, with_jit=True):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(c, x):
x3 = hcb.id_print(x, where="s_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="s_t", output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="s_f", result=x, output_stream=testing_stream),
x3 + 1)
return (c, hcb.id_print(x4, where="s_2", output_stream=testing_stream))
_, x10 = lax.scan(body, x2, jnp.arange(3))
res = hcb.id_print(x10, where="10", output_stream=testing_stream)
return res
if with_jit:
func = api.jit(func)
res = func(1)
self.assertAllClose(jnp.array([1, 2, 3]), res)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: s_1
0
where: s_t
1
where: s_2
1
where: s_1
1
where: s_f
-1
where: s_2
2
where: s_1
2
where: s_t
3
where: s_2
3
where: 10
[1 2 3]""", testing_stream.output)
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_shape_{shape}_dtype_{np.dtype(dtype).name}_nr_args={nr_args}",
shape=shape,
dtype=dtype,
nr_args=nr_args) for nr_args in [1, 2]
for shape in [(), (2,), (2, 3), (2, 3, 4)]
for dtype in jtu.dtypes.all))
def test_tap_jit_dtypes(self, nr_args=2, dtype=jnp.int16, shape=(2,)):
if dtype in (jnp.complex64, jnp.complex128, jnp.bool_):
raise SkipTest(f"host_callback not implemented for {dtype}.")
if dtype == np.bool_:
args = [np.random.choice(a=[True, False], size=shape)]
else:
args = [jnp.arange(np.prod(shape), dtype=dtype).reshape(shape)]
if nr_args > 1:
args = args * nr_args
jit_fun1 = api.jit(lambda xs: hcb.id_print(
xs,
a_new_test="************",
testcase_name=f"shape_{shape}_dtype_{dtype}_nr_args={nr_args}"))
res = jit_fun1(args)
self.assertAllClose(args, res, check_dtypes=True)
def test_tap_jit_large(self):
arg = jnp.arange(10000, dtype=jnp.int32).reshape((10, 10, 5, -1))
api.jit(hcb.id_print)(arg)
def test_tap_jit_several_together(self):
arg = jnp.arange(50, dtype=jnp.int32).reshape((10, 5))
api.jit(lambda x, y: hcb.id_print((x, y, x * 2.)))(arg, jnp.ones(100, dtype=jnp.int32))
def test_tap_jit_interleaving(self):
# Several jit's without data dependencies; they may interfere
count = 0 # Count tap invocations
nr_arrays = 5
def tap_func(arg, _):
nonlocal count
assert len(arg) == nr_arrays
count += 1
# This is the function that we'll run multiple times
def func(x, count):
for i in range(count):
x = hcb.id_tap(tap_func, [x + i for i in range(nr_arrays)])[-1]
return x
x = jnp.array(1, dtype=np.int32)
res = 0
for _ in range(10):
# No dependencies between the jit invocations
res += api.jit(lambda x: func(x, 10))(x)
hcb.barrier_wait()
self.assertEqual(100, count)
def test_tap_jit_tap_exception(self):
if not FLAGS.jax_host_callback_outfeed:
raise SkipTest("TODO: implement error handling for customcall")
# Simulate a tap error
def tap_err(*args, **kwargs):
raise NotImplementedError
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1)
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
res = api.jit(func)(0) # No error yet
with self.assertRaises(hcb.CallbackException):
hcb.barrier_wait()
# Even though the receiver thread raised, the main thread should still
# return 3.
self.assertEqual(3, res)
# We should have received all others
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
def test_tap_while(self):
"""Executing while, even without JIT uses compiled code"""
y = jnp.ones(5) # captured const
def func(x):
return lax.while_loop(
lambda c: c[1] < 5,
lambda c: (y, hcb.id_print(c[1], output_stream=testing_stream) + 1),
(x, 1))
func(y)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
1
2
3
4""", testing_stream.output)
def test_tap_jvp(self):
jvp_fun1 = lambda x, xt: api.jvp(fun1, (x,), (xt,))
res_primals, res_tangents = jvp_fun1(jnp.float32(5.), jnp.float32(0.1))
self.assertAllClose(100., res_primals, check_dtypes=False)
self.assertAllClose(4., res_tangents, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: ['jvp'] what: a * 2
( 10.00
0.20 )
transforms: ['jvp'] what: y * 3
( 30.00
0.60 )""", testing_stream.output)
def test_tap_grad_primal_unused(self):
# The output of id_print is not needed for backwards pass
def func(x):
return 2. * hcb.id_print(x * 3., what="x * 3",
output_stream=testing_stream)
grad_func = api.grad(func)
arg = jnp.float32(5.)
jaxpr = str(api.make_jaxpr(grad_func)(arg))
# making the Jaxpr does not print anything
hcb.barrier_wait()
treedef = tree_util.tree_structure(arg)
assertMultiLineStrippedEqual(self, f"""
{{ lambda ; a.
let b = mul a 3.00
c = outside_call[ arg_treedef={treedef}
callback=...
identity=True
transforms=( ) ] b
_ = mul c 2.00
d = mul 1.00 2.00
_ = broadcast_in_dim[ broadcast_dimensions=( )
shape=( ) ] 0.00
e = outside_call[ arg_treedef={treedef}
callback=...
identity=True
transforms=(('jvp',), ('transpose',)) ] d
f = mul e 3.00
in (f,) }}""", jaxpr)
assertMultiLineStrippedEqual(self, "", testing_stream.output)
testing_stream.reset()
res_grad = grad_func(arg)
hcb.barrier_wait()
self.assertAllClose(6., res_grad, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
what: x * 3
15.00
transforms: ['jvp', 'transpose'] what: x * 3
2.00""", testing_stream.output)
def test_tap_grad_simple(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * hcb.id_print(y * 3., what="y * 3",
output_stream=testing_stream)
grad_func = api.grad(func)
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(2. * 5. * 6., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
what: y * 3
30.00
transforms: ['jvp', 'transpose'] what: y * 3
5.00
transforms: ['jvp', 'transpose'] what: x * 2
15.00""", testing_stream.output)
def test_tap_grad_grad(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * (y * 3.)
grad_func = api.grad(api.grad(func))
# making the Jaxpr does not print anything
_ = api.make_jaxpr(grad_func)(5.)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "", testing_stream.output)
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(12., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
transforms: ['jvp', 'transpose'] what: x * 2
15.00
transforms: ['jvp', 'transpose', 'jvp', 'transpose'] what: x * 2
2.00
transforms: ['jvp', 'transpose'] what: x * 2
3.00""", testing_stream.output)
def test_tap_grad_pytree(self):
def func(x):
x4, x5 = hcb.id_print((x * 2., x * 3.), what="pair",
result=(x * 4., x * 5.),
output_stream=testing_stream)
return x4 + 2. * x5
x = jnp.float32(5.)
grad_func = api.grad(func)
print(api.make_jaxpr(grad_func)(x))
res_grad = grad_func(x)
self.assertAllClose(14., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: pair
( 10.00
15.00 )
transforms: ['jvp', 'transpose'] what: pair
( 0.00
0.00 )""", testing_stream.output)
def test_tap_jvp_float0(self):
def f(x, yint):
x, yint = hcb.id_tap(lambda arg, _: arg, (x, yint))
return x * yint
res = api.jvp(f, (2., 3), (0.2, np.zeros((), dtypes.float0)))
self.assertAllClose((6., 0.6), res)
def test_tap_grad_float0(self):
def func(x, yint):
x, yint = hcb.id_print((x, yint), what="pair", output_stream=testing_stream)
return x * yint
grad_func = api.grad(func)
res_grad = grad_func(jnp.float32(5.), jnp.int32(2))
self.assertAllClose(2., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: pair
( 5.00
2 )
transforms: ['jvp', 'transpose'] what: pair
( 2.00
False )""", testing_stream.output)
def test_tap_grad_float0_result(self):
# https://github.com/google/jax/issues/7340
# x is a Tuple[f32[2], s32[3]]
x = (np.array([.7, .8], dtype=np.float32),
np.array([11, 12, 13], dtype=np.int32))
def f_jax(x):
x = hcb.id_print(x, result=x, output_stream=testing_stream) # result= is important
return (3. * x[0], x[1])
def f_jax_vjp(x):
res, pullback = jax.vjp(f_jax, x)
g, = pullback((np.ones(x[0].shape, dtype=x[0].dtype),
np.zeros(x[1].shape, dtype=dtypes.float0)))
return g
g = f_jax_vjp(x)
self.assertAllClose(np.array([3., 3.], dtype=np.float32), g[0])
self.assertEqual(dtypes.float0, g[1].dtype)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( [0.70 0.80]
[11 12 13] )
transforms: ['jvp', 'transpose']
( [0.00 0.00]
[False False False] )""", testing_stream.output)
def test_tap_higher_order_grad_float0_result(self):
# https://github.com/google/jax/issues/7340
# x is a Tuple[f32[2], s32[3]]
x = (np.array([.7, .8], dtype=np.float32),
np.array([11, 12, 13], dtype=np.int32))
def f_jax(x):
x = hcb.id_print(x, result=x, output_stream=testing_stream) # result= is important
return (jnp.sin(x[0]), x[1])
def wrap_vjp(f, args, res_f_of_args):
# Given a function "f" and "args" return the f_vjp and args_vjp
def make_ct(res):
res_dtype = np.result_type(res)
if res_dtype == dtypes.float0:
return res
ct_dtype = core.primal_dtype_to_tangent_dtype(res_dtype)
return np.ones(np.shape(res), dtype=ct_dtype)
cts = tree_util.tree_map(make_ct, res_f_of_args)
def f_vjp(args, cts):
res, pullback = jax.vjp(f, *args)
return pullback(cts)
return (f_vjp, (args, cts))
res = f_jax(x)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( [0.70 0.80]
[11 12 13] )""", testing_stream.output)
testing_stream.reset()
# 1st order
f_jax_vjp1, args_vjp1 = wrap_vjp(f_jax, (x,), res)
res_vjp1 = f_jax_vjp1(*args_vjp1)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( [0.70 0.80]
[11 12 13] )
transforms: ['jvp', 'transpose']
( [0.00 0.00]
[False False False] )""", testing_stream.output)
testing_stream.reset()
# 2nd order
f_jax_vjp2, args_vjp2 = wrap_vjp(f_jax_vjp1, args_vjp1, res_vjp1)
res_vjp2 = f_jax_vjp2(*args_vjp2)
# 3rd order
f_jax_vjp3, args_vjp3 = wrap_vjp(f_jax_vjp2, args_vjp2, res_vjp2)
_ = f_jax_vjp3(*args_vjp3)
def test_tap_vmap(self):
vmap_fun1 = api.vmap(fun1)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
vmap_fun1(vargs)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] what: a * 2
[ 8.00 10.00]
transforms: [('batch', {'batch_dims': (0,)})] what: y * 3
[24.00 30.00]""", testing_stream.output)
def test_tap_vmap_not_batched(self):
x = 3.
def func(y):
# x is not mapped, y is mapped
_, y = hcb.id_print((x, y), output_stream=testing_stream)
return x + y
vmap_func = api.vmap(func)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
_ = vmap_func(vargs)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (None, 0)})]
( 3.00
[4.00 5.00] )""", testing_stream.output)
def test_tap_vmap_vmap(self):
# A 2D tensor with x[i, j] = i + j using 2 vmap
def sum(x, y):
return hcb.id_print(x + y, output_stream=testing_stream)
def sum_rows(xv, y):
return api.vmap(sum, in_axes=(0, None))(xv, y)
def sum_all(xv, yv):
return api.vmap(sum_rows, in_axes=(None, 0))(xv, yv)
xv = jnp.arange(5, dtype=np.int32)
yv = jnp.arange(3, dtype=np.int32)
# assertMultiLineStrippedEqual(self, "", str(api.make_jaxpr(sum_all)(xv, yv)))
_ = sum_all(xv, yv)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)}), ('batch', {'batch_dims': (0,)})]
[[0 1 2 3 4]
[1 2 3 4 5]
[2 3 4 5 6]]""", testing_stream.output)
def test_tap_vmap_while(self):
"""Vmap of while."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="before:x", output_stream=testing_stream)
x2 = lax.while_loop(
lambda x: x < 2, lambda x: hcb.id_print(
x + 1, where="body:x+1", output_stream=testing_stream), x1)
res = hcb.id_print(x2, where="after:x", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
self.assertAllClose(
np.array([2, 2, 2, 3, 4]),
api.jit(api.vmap(func))(inputs),
check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(
self, """
transforms: [('batch', {'batch_dims': (0,)})] where: before:x
[0 1 2 3 4]
transforms: [('batch', {'batch_dims': (0,)})] where: body:x+1
[1 2 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: body:x+1
[2 3 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: after:x
[2 2 2 3 4]""", testing_stream.output)
def test_tap_vmap_while_tap_cond(self):
"""Vmap of while, with a tap in the conditional."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = lax.while_loop(lambda x: hcb.id_print(x < 2, where="w_c",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x2, where="3", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
res = api.jit(api.vmap(func))(inputs)
hcb.barrier_wait()
self.assertAllClose(np.array([2, 2, 2, 3, 4]), res, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] where: 1
[0 1 2 3 4]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[ True True False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[1 2 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[ True False False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[2 3 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[False False False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: 3
[2 2 2 3 4]""", testing_stream.output)
def test_tap_transforms(self):
def power3(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
_, y = hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream)
return y * x
print(f"impl = {power3(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3.
9. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"vmap = {jax.vmap(power3)(np.arange(3.))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [0. 1. 2.]
[0. 1. 4.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"jvp = {jax.jvp(power3, (3.,), (0.1,))}")
hcb.barrier_wait()
expected = """
transforms: ['jvp'] what: x,x^2
( ( 3.
9. )
( 0.1
0.6 ) )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"grad = {jax.grad(power3)(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3.
9. )
transforms: ['jvp', 'transpose'] what: x,x^2
( 0.
3. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"vmap o grad {jax.vmap(jax.grad(power3))(np.array([2., 3.]))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [2. 3.]
[4. 9.] )
transforms: ['jvp', 'transpose', ('batch', {'batch_dims': (None, 0)})] what: x,x^2
( 0.
[2. 3.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_pmap(self):
if len(local_devices()) < 2:
raise SkipTest("test requires at least 2 devices")
def power3(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
_, y = hcb.id_print((x, y),
what="x,x^2",
output_stream=testing_stream,
tap_with_device=True)
return y * x
pmap_power3 = api.pmap(power3, devices=local_devices())
xv = np.array([3, 4], dtype=np.int32)
res = pmap_power3(xv)
hcb.barrier_wait()
self.assertAllClose(xv * xv * xv, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(
self, """
device: cpu:0 what: x,x^2
( 3
9 )
device: cpu:1 what: x,x^2
( 4
16 )""")
def test_tap_pmap_vmap(self):
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.int32)
def fun1(x, do_print=False): # x: i32
return maybe_print(do_print, x * 2, "x * 2", tap_with_device=True)
pmap_vmap_fun1 = api.pmap(
api.vmap(partial(fun1, do_print=True)), devices=local_devices())
res = pmap_vmap_fun1(matrix)
hcb.barrier_wait()
expected_res = api.pmap(
api.vmap(partial(fun1, do_print=False)), devices=local_devices())(
matrix)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[0.00 2.00 4.00]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[20.00 22.00 24.00]""")
def test_tap_pmap_pmap_vmap(self):
# A matrix M[ijk] = i * 100 + j * 10 + k
nr_devices = len(local_devices())
if nr_devices % 2 != 0:
raise SkipTest("test works only on even number of devices")
shape = (2, nr_devices // 2, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun1(x, do_print=False): # x: f32
y = maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)
return y ** 2
pmap_fun1 = api.pmap(
api.pmap(api.vmap(partial(fun1, do_print=True))),
devices=local_devices())
res = pmap_fun1(matrix)
hcb.barrier_wait()
expected_res = api.pmap(
api.pmap(api.vmap(partial(fun1, do_print=False))),
devices=local_devices())(
matrix)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[0.00 2.00 4.00]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[200.00 202.00 204.00]""")
@ignore_jit_of_pmap_warning()
def test_tap_pmap_pmap_extra(self):
"""pmap of a pmap surrounded by extra code."""
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
if nr_devices != 2:
raise SkipTest("test works only on 2 devices")
shape = (2, 1, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# This will be printed on all devices, with shape [1, 3]
xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True)
res = api.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv)
# This will be printed on all devices, with shape [1, 3]
return maybe_print(do_print, res + 1., "after", tap_with_device=True)
res = api.pmap(partial(fun, do_print=True))(matrix)
self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[1.00 2.00 3.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[3.00 5.00 7.00]]
device: cpu:1 what: before
[[101.00 102.00 103.00]]
device: cpu:1 what: inside
[202.00 204.00 206.00]
device: cpu:1 what: after
[[203.00 205.00 207.00]]""")
def test_tap_jvp_pmap_vmap(self):
# A matrix M[ijk] = i * 100 + j * 10 * k
nr_devices = len(local_devices())
shape = (nr_devices, 2, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# x: f32[3]
return api.jvp(api.pmap(api.vmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True))),
(xv,), (.1 * jnp.ones_like(xv),))
res = fun(matrix, do_print=True)
hcb.barrier_wait()
expected_res = fun(matrix, do_print=False)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
# Device 0 will get to execute api.jvp(api.vmap(...)) for matrix[0, :, :]
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2
( [[ 0.00 2.00 4.00]
[20.00 22.00 24.00]]
[[0.20 0.20 0.20]
[0.20 0.20 0.20]] )
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2
( [[200.00 202.00 204.00]
[220.00 222.00 224.00]]
[[0.20 0.20 0.20]
[0.20 0.20 0.20]] )""")
def test_tap_vmap_pmap(self):
# A matrix M[ijk] = i * 100 + j * 10 * k
nr_devices = len(local_devices())
shape = (2, nr_devices, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# x: f32[3]
return api.vmap(api.pmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)))(xv)
res = fun(matrix, do_print=True)
hcb.barrier_wait()
expected_res = fun(matrix, do_print=False)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
# Device 0 will get to execute api.jvp(api.vmap(...)) for matrix[:, 0, :]
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 0.00 2.00 4.00]
[200.00 202.00 204.00]]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 20.00 22.00 24.00]
[220.00 222.00 224.00]]""")
@ignore_jit_of_pmap_warning()
def test_tap_jit_pmap_extra(self):
"""jit of a pmap surrounded by extra code."""
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
assert nr_devices in (1, 2)
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# This will be printed on all devices with shape (nr_devices, 3)
xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True)
res = api.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv)
# This will be printed on all devices with shape (nr_devices, 3)
return maybe_print(do_print, res + 1., "after", tap_with_device=True)
res = api.jit(partial(fun, do_print=True))(matrix)
self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
if len(local_devices()) == 2:
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[ 1.00 2.00 3.00]
[11.00 12.00 13.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[ 3.00 5.00 7.00]
[23.00 25.00 27.00]]
device: cpu:1 what: before
[[ 1.00 2.00 3.00]
[11.00 12.00 13.00]]
device: cpu:1 what: inside
[22.00 24.00 26.00]
device: cpu:1 what: after
[[ 3.00 5.00 7.00]
[23.00 25.00 27.00]]""")
else:
assert len(local_devices()) == 1
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[1.00 2.00 3.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[3.00 5.00 7.00]]""")
def test_tap_cond_pmap(self):
raise SkipTest("cond of pmap does not work in JAX. Issue #5178.")
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.float32)
def fun1(x, do_print=False):
return maybe_print(do_print, x * 2., "x * 2")
def fun2(cond, xv, do_print=False):
return lax.cond(cond, api.pmap(partial(fun1, do_print=do_print)),
lambda xv: xv, xv)
res = fun2(True, matrix)
self.assertAllClose(fun2(True, matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
TBD""", testing_stream.output)
@jtu.skip_on_devices("cpu", "gpu")
# TODO(necula): file XLA:GPU bug for the 'Sharding' CustomCall
def test_tap_pjit(self):
devices = np.array(local_devices())
nr_devices = len(devices)
if nr_devices < 2:
raise SkipTest("test requires at least 2 devices")
print(f"test_tap_pjit is running on devices {devices}.")
# x: i32[D, 3] = [[0, 1, 2], [10, 11, 12], ...]
# y: i32[3, 4]
x = jnp.arange(100, dtype=jnp.int32).reshape((10, 10))[:nr_devices, :3]
y = jnp.ones((3, 4), np.int32)
@partial(jax.named_call, name="fun1") # for xprof debugging
def fun1(x, do_print=False):
z = jnp.dot(x, y)
return maybe_print(do_print, z, "z", tap_with_device=True)
res0 = fun1(x, do_print=False)
pjit_fun1 = pjit.pjit(
partial(fun1, do_print=True),
in_axis_resources=(P("d"),),
out_axis_resources=P("d"))
with maps.mesh(devices, ["d"]):
# Print the internal IR
helper_log_ir(
f"{self._testMethodName}.pjit",
pjit_fun1,
x,
num_partitions=nr_devices)
res = pjit_fun1(x)
self.assertAllClose(res0, res)
hcb.barrier_wait("before check")
# Assertion text is for 2 devices (also works for 1 device)
# Note that a single call is made.
assertMultiDeviceOutputEqual(
self, """
device: cpu:0 what: z
[[ 3 3 3 3]
[33 33 33 33]]""")
def test_tap_tap_scan_custom_jvp(self):
"""custom JVP, inside scan.
This exercises the custom_jvp_call_jaxpr primitives."""
@api.custom_jvp
def f(x):
return x * hcb.id_print(x, output_stream=testing_stream, what="x")
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * hcb.id_print(x_dot, output_stream=testing_stream, what="x_dot")
return primal_out, tangent_out
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertAllClose(0.7 * 0.7 * 2, g(arg))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7""", testing_stream.output)
testing_stream.reset()
self.assertAllClose(np.array([2.1, 2.1]), api.grad(g)(arg), check_dtypes=False)
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7
transforms: ['transpose'] what: x_dot
2.1
transforms: ['transpose'] what: x_dot
2.1""", testing_stream.output)
def test_tap_scan_custom_vjp(self):
"""custom VJP, inside scan.
This exercises the custom_vjp_call_jaxpr primitives."""
@api.custom_vjp
def f(x):
return x * hcb.id_print(x, output_stream=testing_stream, what="x")
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * hcb.id_print(ct_b, output_stream=testing_stream, what="ct_b"),
f.defvjp(f_fwd, f_bwd)
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertAllClose(0.7 * 0.7 * 2, g(arg))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7""", testing_stream.output)
testing_stream.reset()
self.assertAllClose(np.array([2.1, 2.1]), api.grad(g)(arg), check_dtypes=False)
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7
what: ct_b
1.
what: ct_b
1.""", testing_stream.output)
def test_tap_mask(self):
@partial(api.mask, in_shapes=['n'], out_shape='')
def padded_sum(x):
three_x = hcb.id_print((x, 2 * x), result=3 * x, what="x",
output_stream=testing_stream)
return jnp.sum(three_x)
x = np.arange(5.)
self.assertAllClose(9., padded_sum([x], dict(n=3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.]
[0. 2. 4. 6. 8.] )
( ( 3 )
( 3 ) ) )""", testing_stream.output)
testing_stream.reset()
# With VMAP
xv = np.arange(10.).reshape((2, 5)) # logical_shape = 5
self.assertAllClose(
np.array([9., 78.]),
# batch_size = 2, n=3 and 4 for the two elements
api.vmap(padded_sum)([xv],
dict(n=np.array([3., 4.]))))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5}), ('batch', {'batch_dims': (0, 0, 0, 0)})] what: x
( ( [[0. 1. 2. 3. 4.]
[5. 6. 7. 8. 9.]]
[[ 0. 2. 4. 6. 8.]
[10. 12. 14. 16. 18.]] )
( ( [3. 4.] )
( [3. 4.] ) ) )""", testing_stream.output)
testing_stream.reset()
# With JVP
self.assertAllClose((9., 0.9),
api.jvp(lambda arg: padded_sum([arg], dict(n=3)),
(x,), (x * 0.1,)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5}), 'jvp'] what: x
( ( ( [0. 1. 2. 3. 4.]
[0. 2. 4. 6. 8.] )
( ( 3 )
( 3 ) ) )
( ( [0. 0.1 0.2 0.3 0.4]
[0. 0.2 0.4 0.6 0.8] )
( ( False )
( False ) ) ) )""", testing_stream.output)
testing_stream.reset()
# Now with JIT
self.assertAllClose(9., api.jit(padded_sum)([x], dict(n=3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.]
[0. 2. 4. 6. 8.] )
( ( 3 )
( 3 ) ) )""", testing_stream.output)
def test_tap_callback_delay(self):
hcb.callback_extra = lambda dev: time.sleep(1)
def func(x):
for i in range(5):
x = hcb.id_print(x * i, what="x times i")
return x
api.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
def test_tap_callback_delay_barrier(self):
hcb.callback_extra = lambda dev: time.sleep(2)
def func(x):
for i in range(1, 4):
x = hcb.id_print(x * i, what=f"x times {i}", output_stream=testing_stream)
return x
api.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
# Wait for the results
hcb.barrier_wait("first")
expected = """
what: x times 1
[[0. 1. 2.]
[3. 4. 5.]]
what: x times 2
[[ 0. 2. 4.]
[ 6. 8. 10.]]
what: x times 3
[[ 0. 6. 12.]
[18. 24. 30.]]"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
# Call again
api.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
hcb.barrier_wait("second")
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_error_bad_consumer_id(self):
"""Try to use reserved consumer ID 0.
Check that we get the proper error from the runtime."""
if not hcb._use_outfeed(jtu.device_under_test()):
raise SkipTest("test works only for outfeed")
comp = xla_bridge.make_computation_builder(self._testMethodName)
token = hcb.xops.CreateToken(comp)
hcb._initialize_outfeed_receiver() # Needed if this is the sole test
with self.assertRaisesRegex(RuntimeError,
"Consumer ID cannot be a reserved value: 0"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 0,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.float32))])
def test_tap_error_different_shapes(self):
"""Try to register different shapes for the same consumer ID."""
if not hcb._use_outfeed(jtu.device_under_test()):
raise SkipTest("test works only for outfeed")
comp = xla_bridge.make_computation_builder(self._testMethodName)
token = hcb.xops.CreateToken(comp)
hcb._initialize_outfeed_receiver() # Needed if this is the sole test
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.float32))])
with self.assertRaisesRegex(
RuntimeError, ".*does not match previous shape element_type.*"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.int32))])
with self.assertRaisesRegex(
RuntimeError, ".*does not match previous shape element_type.*"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2,), dtype=np.float32))])
def test_tap_id_tap_removed_kwargs(self):
def func(x, transforms, y):
pass
with self.assertRaisesRegex(TypeError, r"Support for \*\*kwargs in ``id_tap``"):
hcb.id_tap(func, 1, y=2)
def test_tap_odeint(self):
# TODO: find a smaller repro for bug #4015
# Seems to be xla_call(scan(xla_call)), all under grad.
from jax.experimental.ode import odeint
def f(x, t, k):
x = hcb.id_print(x)
return -k * x
def loss(k=1.0):
t = jnp.linspace(0, 0.001, num=2)
xs = odeint(f, 1.0, t, k)
return xs[-1]
api.grad(loss)(1.0) # should not fail
def test_tap_remat(self):
def f(i, k):
x = hcb.id_print(k + i, output_stream=testing_stream)
return k * x
def loss(k):
return lax.fori_loop(0, 2, api.remat(f), k)
print(loss(3))
hcb.barrier_wait()
expected = """
3
10"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_named_call(self):
def tap_scalar(init, do_print=False):
@partial(api.named_call, name="step")
def step(acc, step_nr):
acc = acc + step_nr
maybe_print(do_print, step_nr, what="step_nr")
return acc, None
return lax.scan(step, init, np.arange(2))
self.assertAllClose(tap_scalar(3., do_print=False), tap_scalar(3., do_print=True))
hcb.barrier_wait()
expected = """
what: step_nr
0
what: step_nr
1"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
class HostCallbackCallTest(jtu.JaxTestCase):
"""Tests for hcb.call"""
def setUp(self):
super().setUp()
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
testing_stream.reset()
testing_stream._test_method_name = self._testMethodName
def tearDown(self) -> None:
hcb.barrier_wait("HostCallbackCallTest.tearDown")
super().tearDown()
def call_log_testing_stream(self, func, arg, *, result_shape, name=""):
"""Call `func` and log inputs and outputs to the testing stream"""
def call_log(arg):
def val2str(v):
return np.array2string(np.array(arg))
testing_stream.write(f"Call {name}({val2str(arg)})\n")
res = func(arg)
testing_stream.write(f" = {val2str(res)}\n")
return res
return hcb.call(call_log, arg, result_shape=result_shape)
def test_call_simple(self):
def f_outside(x):
return 2 * x
def fun(x):
y = hcb.call(f_outside, x + 1, result_shape=x)
return 3 * (1 + y)
arg = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
self.assertAllClose(3 * (1 + 2 * (arg + 1)), fun(arg))
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_{np.dtype(dtype).name}", dtype=dtype)
for dtype in jtu.dtypes.all
if dtype != np.bool_))
def test_call_types(self, dtype=np.float64):
def f_outside(x):
# Use x + x to ensure that the result type is the same
return x + x
def fun(x):
return hcb.call(f_outside, x + x, result_shape=x)
arg = np.arange(24, dtype=dtype).reshape((2, 3, 4))
self.assertAllClose(arg + arg + arg + arg, fun(arg), check_dtypes=True)
def test_call_types_bool(self, dtype=np.float64):
def f_outside(x):
return np.invert(x)
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
arg = np.random.choice(a=[True, False], size=(2, 3, 4))
self.assertAllClose(np.invert(arg), fun(arg))
def test_call_tuples(self):
def f_outside(args):
x, y = args
return y, x # Swap the tuple
def fun(x):
xy = hcb.call(f_outside, (x, x + 1), result_shape=(x, x))
return 2 * xy[0] + 3 * xy[1]
arg = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
self.assertAllClose(2 * (arg + 1) + 3 * arg, fun(arg))
def test_call_empty_arg(self):
"""Call with empty array."""
result = np.ones((2,), dtype=np.float32)
def f_outside(_):
return result
def fun(x):
return x + hcb.call(f_outside, (),
result_shape=api.ShapeDtypeStruct(result.shape, result.dtype))
self.assertAllClose(2. + result, fun(2.))
def test_call_empty_result(self):
"""Call returning empty array."""
result_shape = (2, 0)
def f_outside(_):
return np.ones(result_shape, dtype=np.float32)
def fun(x):
return x + hcb.call(f_outside, 1.,
result_shape=api.ShapeDtypeStruct(result_shape, np.float32))
self.assertAllClose(f_outside(0.), fun(2.))
def test_call_empty_result_inside_pytree(self):
"""Call returning a tuple with an empty array and a non-empty one."""
result_shape_0 = (2, 0)
result_shape_2 = (0,)
def f_outside(_):
return (np.ones(result_shape_0, dtype=np.float32),
np.ones((1,), dtype=np.float32),
np.ones(result_shape_2, dtype=np.float32))
def fun(x):
res = hcb.call(f_outside, 1.,
result_shape=(api.ShapeDtypeStruct(result_shape_0, np.float32),
api.ShapeDtypeStruct((1,), np.float32),
api.ShapeDtypeStruct(result_shape_2, np.float32)))
self.assertEqual(result_shape_0, res[0].shape)
self.assertEqual(result_shape_2, res[2].shape)
return x + res[1]
self.assertAllClose(2 + np.ones((1,), dtype=np.float32), fun(2.))
def test_call_empty_result_all_pytree(self):
"""Call returning a tuple of empty arrays."""
result_shape = (2, 0)
def f_outside(_):
return (np.ones(result_shape, dtype=np.float32),
np.ones(result_shape, dtype=np.float32))
def fun(x):
res = hcb.call(f_outside, 1.,
result_shape=(api.ShapeDtypeStruct(result_shape, np.float32),
api.ShapeDtypeStruct(result_shape, np.float32)))
return x + res[0] + res[1]
self.assertAllClose(np.ones(result_shape, dtype=np.float32),
fun(2.))
def test_call_no_result(self):
def f_outside(arg):
self.call_log_testing_stream(lambda x: None, arg,
result_shape=None,
name="outside")
return arg
self.assertAllClose((3., 4.), f_outside((3., 4.)))
hcb.barrier_wait()
expected = """
Call outside([3. 4.])
= [3. 4.]"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_call_cond(self):
def f_outside(args):
x, y = args
return x * y
def loop(x, use_outside=True):
def body(i, acc):
return lax.cond(i % 2 == 1,
lambda _: (hcb.call(f_outside, (acc, i),
result_shape=acc)
if use_outside else f_outside((acc, i))),
lambda _: acc,
None)
return lax.fori_loop(0, 18, body, x)
res_inside = loop(1.2, use_outside=False)
self.assertAllClose(res_inside, api.jit(loop)(1.2))
def test_call_jit_scan_call(self):
def f_outside(x):
return x
def loop(x, use_outside=True):
def body(carry, i):
if use_outside:
return carry + hcb.call(f_outside, i,
result_shape=i), None
else:
return carry + i, None
return lax.scan(body, 0, x)
x = np.arange(5, dtype=np.int32)
res_outside = api.jit(partial(loop, use_outside=True))(x)
self.assertAllClose(res_outside, loop(x, use_outside=False))
def test_call_doc_example1(self):
"""Examples from the documentation: simplest, call a function"""
def host_eig(x):
return np.linalg.eigvals(x)
shape = (2, 5, 4, 4)
m = np.ones(shape, dtype=np.float32)
def fun(m):
eig_m = hcb.call(host_eig, m,
result_shape=api.ShapeDtypeStruct(m.shape[:-1], m.dtype))
return eig_m
expected_res = np.linalg.eigvals(m)
self.assertAllClose(expected_res, fun(m))
def test_call_doc_example_hlo(self):
"""Examples from the documentation: simplest, call a function."""
def fun1(m):
return jnp.sin(hcb.call(lambda x: np.cos,
jnp.cos(m),
result_shape=m))
m = np.ones((2,), np.float32)
helper_print_optimized_hlo(fun1, m)
def fun2(m):
x = hcb.call(lambda x: None, 2, result_shape=())
return x
m = np.ones((2,), np.float32)
helper_print_optimized_hlo(fun2, m)
def test_call_with_device(self):
def callback_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x
def func(x):
return hcb.call(callback_func, x,
result_shape=x,
call_with_device=True)
self.assertEqual(3., func(3.))
assertMultiDeviceOutputEqual(self, """
device: cpu:0
Called with 3.00""")
def test_call_pmap(self):
# Works for 1 or 2 devices
def callback_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x * np.array(3, np.int32)
def fun(x): # x: i32
return hcb.call(callback_func, x * 2,
result_shape=x,
call_with_device=True)
xv = jnp.arange(len(local_devices()), dtype=jnp.int32)
res = api.pmap(fun)(xv)
self.assertAllClose(api.pmap(lambda x: x * 6)(xv), res)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0
Called with 0
device: cpu:1
Called with 2""")
def test_call_vmap(self):
def f_outside(x): return x
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
with self.assertRaisesRegex(NotImplementedError,
"batching rules are implemented only for id_tap, not for call"):
api.vmap(fun)(np.ones((2, 3)))
@jtu.skip_on_devices("cpu", "gpu")
# TODO(necula): file XLA:GPU bug for the 'Sharding' CustomCall
def test_call_pjit(self):
devices = np.array(local_devices())
nr_devices = len(devices)
if nr_devices < 2:
raise SkipTest("test requires at least 2 devices")
print(f"test_call_pjit is running on devices {devices}.")
# x: i32[D, 3] = [[0, 1, 2], [10, 11, 12], ...]
# y: i32[3, 4]
x = jnp.arange(100, dtype=jnp.int32).reshape((10, 10))[:nr_devices, :3]
y = jnp.ones((3, 4), np.int32)
def callback_x5_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x * np.array(5, np.int32)
def fun(x):
xy = jnp.dot(x, y)
return hcb.call(
callback_x5_func, xy, result_shape=xy, call_with_device=True)
pjit_fun = pjit.pjit(
fun, in_axis_resources=(P("d"),), out_axis_resources=P("d"))
with maps.mesh(devices, ["d"]):
# Print the internal IR
helper_log_ir(
f"{self._testMethodName}.pjit",
pjit_fun,
x,
num_partitions=nr_devices)
res = pjit_fun(x)
expected_res = jnp.dot(x, y) * np.array(5, np.int32)
self.assertAllClose(expected_res, res, check_dtypes=False)
hcb.barrier_wait("before assertion")
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(
self, """
device: cpu:0
Called with [[ 3 3 3 3]
[33 33 33 33]]""")
def test_call_error_bad_result_shape(self):
with self.assertRaisesRegex(
ValueError,
"The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"):
hcb.call(lambda x: x, 3., result_shape="string")
with self.assertRaisesRegex(
ValueError,
"The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"):
hcb.call(lambda x: x, 3., result_shape=lambda x: x)
hcb.barrier_wait("wait for error")
def helper_check_callback_errors(self, thunk: Callable,
expected_exc_txt: str):
"""Calls thunk() and checks for expected exceptions.
"""
if jtu.device_under_test() == "cpu":
# On CPU the runtime crashes, and the tests are all aborted
raise SkipTest("TODO: CPU runtime crashes on unexpected infeed")
elif jtu.device_under_test() == "gpu":
# On GPU we get a nice error back to Python
with self.assertRaisesRegex(
RuntimeError,
"RET_CHECK failure .* Mismatch between infeed source buffer shape s8.12345."):
thunk()
elif jtu.device_under_test() == "tpu":
# On TPU we get no error!!!
raise SkipTest("TODO: TPU runtime does not check infeed, and just computes with garbage")
# Both on GPU and TPU we also get an error during the barrier_wait at the
# end of the test. Run a barrier_wait now, to consume that error.
with self.assertRaisesRegex(
hcb.CallbackException,
re.compile(
"There were exceptions during callback processing.*Last one was:.*" +
expected_exc_txt,
re.DOTALL)):
hcb.barrier_wait("Waiting for error")
def test_call_error_callback_throws_exception(self):
def f_outside(x):
raise ValueError("user exception")
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
self.helper_check_callback_errors(lambda: fun(3.),
"ValueError: user exception")
def test_call_error_callback_returns_unexpected_shape(self):
def fun(x):
return hcb.call(lambda x: (x, x), x, result_shape=x)
self.helper_check_callback_errors(lambda: fun(3.),
"Callback func .* should have returned a result with pytree")
def test_call_error_then_compute(self):
# Continue computation on device after error
def f_outside(x):
raise ValueError("user exception")
def fun(x):
x1 = hcb.call(f_outside, x, result_shape=x)
return x1
arg = np.arange(3, dtype=np.int32)
self.helper_check_callback_errors(lambda: self.assertAllClose(arg, fun(arg)),
"ValueError: user exception")
def call_jax_other_device(jax_outside_fun, arg, *, device):
"""Calls a JAX function on a specific device with simple support for reverse AD.
Functions whose name starts with "jax_outside" are called on another device,
by way of hcb.call.
"""
def run_jax_outside_fun(arg):
return api.jit(jax_outside_fun)(api.device_put(arg, device))
@api.custom_vjp
def make_call(arg):
return hcb.call(run_jax_outside_fun, arg,
result_shape=api.eval_shape(jax_outside_fun, arg))
# Define the fwd and bwd custom_vjp functions
def make_call_vjp_fwd(arg):
# Return the primal argument as the residual. Use `make_call` for the
# primal computation to enable higher-order AD.
return make_call(arg), arg # Return the primal argument as the residual
def make_call_vjp_bwd(res, ct_res):
arg = res # residual is the primal argument
def jax_outside_vjp_fun(arg_and_ct):
arg, ct = arg_and_ct
_, f_vjp = api.vjp(jax_outside_fun, arg)
ct_in, = f_vjp(ct)
return ct_in
return (call_jax_other_device(jax_outside_vjp_fun, (arg, ct_res), device=device),)
make_call.defvjp(make_call_vjp_fwd, make_call_vjp_bwd)
return make_call(arg)
class CallJaxTest(jtu.JaxTestCase):
"""Tests using `call_jax_other_device`."""
def setUp(self):
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
if jtu.device_under_test() != "cpu":
assert api.devices("cpu")
self.outside_device = api.devices("cpu")[0]
else:
if len(api.devices("cpu")) == 1:
raise SkipTest("Test needs at least two devices. On CPU use XLA_FLAGS=--xla_force_host_platform_device_count=2")
self.outside_device = api.devices("cpu")[1]
super().setUp()
def test_jax_impl(self):
def f_jax(x):
return jnp.sin(x)
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
self.assertAllClose(f_jax(3.), f_outside(3.))
self.assertAllClose(f_jax(3.), api.jit(f_outside)(3.))
def test_jax_impl_pytree(self):
def f_jax(x):
# x : dict(a=..., b=...) and output is a list of two elements
return [jnp.sin(x["a"]), jnp.sin(x["b"])]
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
x = dict(a=3., b=4.)
res_jax = f_jax(x)
# print(f"outside_jaxpr = {api.make_jaxpr(f_outside)(x)}")
res_outside = f_outside(x)
self.assertAllClose(res_jax, res_outside)
def test_jax_grad(self):
def f_jax(x):
return 2. * jnp.sin(x)
def f_outside(x):
return 2. * call_jax_other_device(jnp.sin, x, device=self.outside_device)
res_jax = api.grad(f_jax)(3.)
self.assertAllClose(res_jax, api.grad(f_outside)(3.))
def test_jax_grad_pytree(self):
def f_jax(x):
# x : dict(a=..., b=...) and output is a float
return 3. * jnp.sin(x["a"]) + jnp.sin(x["b"])
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
x = dict(a=3., b=4.)
res_jax = api.grad(f_jax)(x)
self.assertAllClose(res_jax, api.grad(f_outside)(x))
def test_jax_grad_of_grad(self):
def f_jax(x):
return 2. * x * x * x
def f_outside(x):
return 2. * call_jax_other_device(lambda x: x * x * x, x, device=self.outside_device)
res_jax = api.grad(api.grad(f_jax))(5.)
res_outside = api.grad(api.grad(f_outside))(5.)
self.assertAllClose(res_jax, res_outside)
class OutfeedRewriterTest(jtu.JaxTestCase):
def setUp(self):
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
super().setUp()
def assertRewrite(self, expected: str, func: Callable, args: Sequence,
has_input_token=True, has_output_token=True):
"""Check that the rewrite of func(*args) matches expected."""
jaxpr = api.make_jaxpr(func)(*args)
rewritten = hcb._rewrite_closed_jaxpr(jaxpr, # noqa: F841
has_input_token, has_output_token)
# Since it is somewhat annoying to update the Jaxpr assertions when we change
# the Jaxpr printing, we do not check these by default. It is recommended that
# before making changes to the code generation and Jaxpr rewriting, turn on
# the checking, update the expected Jaxpr, and then make the changes.
# assertMultiLineStrippedEqual(self, expected, str(rewritten))
del rewritten
def test_no_outfeed(self):
self.assertRewrite("""
{ lambda ; a.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_input_token=False,
has_output_token=False)
self.assertRewrite("""
{ lambda ; a d e.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_output_token=False)
self.assertRewrite("""
{ lambda ; a d e.
let b = mul a a
c = add a b
in (c, d, e) }""", lambda x: x + x * x, [0])
def test_simple_outfeed(self):
self.assertRewrite("""
{ lambda ; a d e.
let b = add a a
c f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b d e
in (c, f, g) }""", lambda x: hcb.id_print(x + x), [0])
def test_simple_outfeed_without_input_token(self):
self.assertRewrite("""
{ lambda ; a b.
let e = create_token a b
f = create_token a b
c = add a b
d g h = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c e f
in (d,) }""", lambda x1, x2: hcb.id_print(x1 + x2), [1, 2],
has_input_token=False, has_output_token=False)
def test_simple_outfeed_without_input_token_nor_invars(self):
self.assertRewrite("""
{ lambda ; .
let b = create_token
c = create_token
a d e = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] 42 b c
in (a,) }""", lambda: hcb.id_print(42), [],
has_input_token=False, has_output_token=False)
def test_multiple_tap_without_dependencies(self):
def f(x):
hcb.id_print(x, what="x")
hcb.id_print(x + 1, what="x + 1")
return 2
self.assertRewrite("""
{ lambda ; a c d.
let _ e f = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a c d
b = add a 1
_ g h = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b e f
in (2, g, h) }""", f, [1])
def test_cond(self):
y = jnp.ones(5) # captured const
def func(x, z):
return lax.cond(z > 0, (1, 2), lambda a: (a[0], jnp.zeros(5)),
z, lambda a: (hcb.id_print(a), y))
self.assertRewrite("""
{ lambda a ; b c h i.
let d = gt c 0
e = convert_element_type[ new_dtype=int32 ] d
f g j k =
cond[ branches=( { lambda ; a b c d f g.
let e h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] d f g
in (e, a, h, i) }
{ lambda ; f_ a b c g h.
let d = broadcast_in_dim[ broadcast_dimensions=( )
shape=(5,) ] 0.00
in (a, d, g, h) } )
linear=(False, False, False, False, False, False) ] e a 1 2 c h i
in (f, g, j, k) }""", func, [y, 5])
def test_while(self):
ct_body = jnp.ones(5, np.float32) # captured const for the body
ct_cond = jnp.ones(5, np.float32) # captured const for the conditional
def func(x):
# x: f32[5]
# c: (f32[5], f32)
return lax.while_loop(lambda c: c[1] < jnp.sum(c[0] + ct_cond),
lambda c: (ct_body, hcb.id_print(c[1]) + 1.),
(x, np.float32(1.)))
self.assertRewrite("""
{ lambda a b ; c f g.
let d e h i =
while[ body_jaxpr={ lambda ; a b c f g.
let d h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c f g
e = add d 1.00
in (a, e, h, i) }
body_nconsts=1
cond_jaxpr={ lambda ; a b c g h.
let d = add b a
e = reduce_sum[ axes=(0,) ] d
f = lt c e
in (f,) }
cond_nconsts=1 ] a b c 1.00 f g
in (d, e, h, i) }""", func, [ct_body])
def test_while_pred_outfeed(self):
"""A while with outfeed in the pred."""
ct_body = jnp.ones(5) # captured const for the body
ct_cond = jnp.ones(2) # captured const for the conditional
def func(x):
return lax.while_loop(lambda c: hcb.id_print(ct_cond, result=c[1]) < 5,
lambda c: (ct_body, hcb.id_print(c[1]) + 1),
(x, 1))
self.assertRewrite("""
{ lambda a b ; c f g.
let j k l = xla_call[ call_jaxpr={ lambda ; a b c g h.
let d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a g h
e = id_tap_dep c d
f = lt e 5
in (f, i, j) }
donated_invars=(False, False, False, False, False)
name=cond_before ] a c 1 f g
bf d e h i =
while[ body_jaxpr={ lambda ; r s t u v w x.
let y z ba bb =
xla_call[ call_jaxpr={ lambda ; a b c f g.
let d h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c f g
e = add d 1
in (a, e, h, i) }
donated_invars=(False, False, False, False, False)
name=body ] s u v w x
bc bd be =
xla_call[ call_jaxpr={ lambda ; a b c g h.
let d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a g h
e = id_tap_dep c d
f = lt e 5
in (f, i, j) }
donated_invars=(False, False, False, False, False)
name=cond_body ] r y z ba bb
in (bc, y, z, bd, be) }
body_nconsts=2
cond_jaxpr={ lambda ; m n o p q.
let
in (m,) }
cond_nconsts=0 ] a b j c 1 k l
in (d, e, h, i) }""", func, [ct_body])
def test_scan(self):
y = jnp.ones(5) # captured const
def func(x):
return lax.scan(lambda c, a: (hcb.id_print(c), y), (1, 2), x)
self.assertRewrite("""
{ lambda a ; b f g.
let c d h i e =
scan[ jaxpr={ lambda ; a b c g h d.
let e f i j =
outside_call[ arg_treedef=PyTreeDef(tuple, [*,*])
callback=...
has_token=True
identity=True ] b c g h
in (e, f, i, j, a) }
length=5
linear=(False, False, False, False, False, False)
num_carry=4
num_consts=1
reverse=False
unroll=1 ] a 1 2 f g b
in (c, d, e, h, i) }""", func, [y])
def test_scan_custom_jvp(self):
"""custom JVP, inside scan.
This exercises the custom_jvp_call_jaxpr primitives."""
@api.custom_jvp
def f(x):
return x * hcb.id_print(x)
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * hcb.id_print(x_dot)
return primal_out, tangent_out
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((5,), 0.7)
self.assertRewrite("""
{ lambda ; a c d.
let b e f _ =
scan[ jaxpr={ lambda ; a e f b.
let c g h = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0 ] b e f
d = add a c
in (d, g, h, 0.00) }
length=5
linear=(False, False, False, False)
num_carry=3
num_consts=0
reverse=False
unroll=1 ] 0.00 c d a
in (b, e, f) }""", g, [arg])
self.assertRewrite("""
{ lambda ; a d e.
let _ _ f g _ b =
scan[ jaxpr={ lambda ; a b h i c d.
let e j k = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0 ] c h i
f = add a e
g = mul c 3.00
in (f, *, j, k, 0.00, g) }
length=5
linear=(False, True, False, False, False, True)
num_carry=4
num_consts=0
reverse=False
unroll=1 ] 0.00 * d e a *
_ _ h i _ c =
scan[ jaxpr={ lambda ; a b g h c d.
let e = mul b d
f i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True
transforms=(('transpose',),) ] e g h
in (*, b, i, j, *, f) }
length=5
linear=(True, True, False, False, True, False)
num_carry=4
num_consts=0
reverse=True
unroll=1 ] * 1.00 f g * b
in (c, h, i) }""", api.grad(g), [arg])
def test_scan_custom_vjp(self):
"""custom VJP, inside scan.
This exercises the custom_vjp_call_jaxpr primitives."""
@api.custom_vjp
def f(x):
return x * hcb.id_print(x)
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * hcb.id_print(ct_b),
f.defvjp(f_fwd, f_bwd)
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertRewrite("""
{ lambda ; a c d.
let b e f _ =
scan[ jaxpr={ lambda ; a e f b.
let c g h = custom_vjp_call_jaxpr[
fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0
] b e f
d = add a c
in (d, g, h, 0.00) }
length=2
linear=(False, False, False, False)
num_carry=3
num_consts=0
reverse=False
unroll=1 ] 0.00 c d a
in (b, e, f) }""", g, [arg])
self.assertRewrite("""
{ lambda ; a d e.
let _ _ f g _ b =
scan[ jaxpr={ lambda ; a b h i c d.
let e j k = custom_vjp_call_jaxpr[
fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0
] c h i
f = add a e
g = mul c 3.00
in (f, *, j, k, 0.00, g) }
length=2
linear=(False, True, False, False, False, True)
num_carry=4
num_consts=0
reverse=False
unroll=1 ] 0.00 * d e a *
_ _ h i _ c =
scan[ jaxpr={ lambda ; a b g h c d.
let e i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b g h
f = mul d e
in (*, b, i, j, *, f) }
length=2
linear=(True, True, False, False, True, False)
num_carry=4
num_consts=0
reverse=True
unroll=1 ] * 1.00 f g * b
in (c, h, i) }""", api.grad(g), [arg])
def test_remat_loop(self):
def f(k, x):
x = hcb.id_print(k + x)
return -k * x
def loss(k):
return lax.fori_loop(0, 1, api.remat(f), k)
self.assertRewrite("""
{ lambda ; a c d.
let _ _ b e f =
while[ body_jaxpr={ lambda ; a b c f g.
let d = add a 1
e h i = remat_call[ call_jaxpr={ lambda ; a b g h.
let c = add a b
d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c g h
e = neg a
f = mul e d
in (f, i, j) }
concrete=False
name=f ] a c f g
in (d, b, e, h, i) }
body_nconsts=0
cond_jaxpr={ lambda ; a b c e f.
let d = lt a b
in (d,) }
cond_nconsts=0 ] 0 1 a c d
in (b, e, f) }""", loss, [2])
def test_named_call(self):
def tap_scalar(init, do_print=False):
@partial(api.named_call, name="step")
def step(acc, step_nr):
acc = acc + step_nr
maybe_print(do_print, step_nr, what="step_nr")
return acc, None
return lax.scan(step, init, np.arange(2, dtype=np.int32))
self.assertRewrite("""
{ lambda a ; b d e.
let c = scan[ jaxpr={ lambda ; a b.
let c = named_call[ call_jaxpr={ lambda ; a b.
let c = add a b
in (c,) }
name=step ] a b
in (c,) }
length=2
linear=(False, False)
num_carry=1
num_consts=0
reverse=False
unroll=1 ] b a
in (c, d, e) }""", tap_scalar, [np.int32(3)])
def test_pmap(self):
def f(xv):
api.pmap(lambda x: jnp.sin(hcb.id_print(x, tap_with_device=True)),
axis_name="i")(xv)
self.assertRewrite("""
{ lambda ; a b c.
let _ d e = xla_pmap[ axis_name=i
axis_size=1
backend=None
call_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = sin b
in (c, f, g) }
devices=None
donated_invars=(False, False, False)
global_arg_shapes=(None,)
global_axis_size=None
in_axes=(0, 0, 0)
name=<lambda>
out_axes=(0, 0, 0) ] a b c
in (d, e) }""", f, [np.array([2.], dtype=np.float32)])
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
py
|
1a55ea413f7ed33e14c00dd4354d7ee5bc6b8d5a
|
"""Define a class for managing a thread pool with delayed execution.
Attributes:
log (logging.Logger): Logger for current module.
"""
import time
import logging
from concurrent import futures
from threading import Lock
from threading import Thread
from .singleton import singleton
log = logging.getLogger("ECC")
@singleton
class ThreadPool:
"""Thread pool that makes sure we don't get recurring jobs.
Whenever a job is submitted we check if there is already a job like this
running. If it is, we try to cancel the previous job. We are only able to
cancel this job if it has not started yet.
Example:
active: ['update', 'info'] and 'update' is running.
incoming: 'update' and then another 'update'.
We will try to cancel the first 'update' and will fail as it is running. We
still cancel the 'info' job as it has less priority (no need to get info if
the translation unit is not up to date). We add a new 'update' to the list.
Now there are two 'update' jobs, one running, one pending. Adding another
'update' job will replace the pending update job.
"""
def __init__(self, max_workers=1):
"""Create a thread pool.
Args:
max_workers (int): Maximum number of parallel workers.
"""
self.__thread_pool = futures.ThreadPoolExecutor(
max_workers=max_workers)
self.__lock = Lock()
self.__progress_lock = Lock()
self.__show_animation = False
self.__progress_update_delay = 0.1
self.__progress_idle_delay = 0.3
# All the jobs that are currently active are stored here.
self.__active_jobs = []
# start animation thread
self.__progress_status = None
self.__progress_thread = Thread(target=self.__animate_progress,
daemon=True).start()
@property
def progress_status(self):
"""Return current progress status."""
return self.__progress_status
@progress_status.setter
def progress_status(self, val):
"""Set progress status instance."""
with self.__progress_lock:
self.__progress_status = val
def new_job(self, job):
"""Add a new job to be submitted to a thread pool.
Args:
job (ThreadJob): A job to be run asynchronously.
"""
# Cancel all the jobs with the same name that are already running.
# Iterating over a list is atomic in python, so we should be safe.
for active_job in self.__active_jobs:
if job.overrides(active_job):
if active_job.future.cancel():
log.debug("Canceled job: '%s'", job)
else:
log.debug("Cannot cancel job: '%s'", active_job)
# Submit a new job to the pool.
future = self.__thread_pool.submit(job.function, *job.args)
future.add_done_callback(job.callback)
future.add_done_callback(self.__on_job_done)
job.future = future # Set the future for this job.
with self.__lock:
self.__active_jobs.append(job)
self.__show_animation = True
def __on_job_done(self, future):
"""Call this when the job is done or cancelled."""
# We want to clear the old list and alter the positions of elements.
# This is a potentially dangerous operation, so protect it by a mutex.
with self.__lock:
self.__active_jobs[:] = [
job for job in self.__active_jobs if not job.future.done()]
if len(self.__active_jobs) < 1:
self.__show_animation = False
def __animate_progress(self):
"""Change the status message, mostly used to animate progress."""
while True:
sleep_time = self.__progress_idle_delay
with self.__progress_lock:
if not self.__progress_status:
sleep_time = self.__progress_idle_delay
elif self.__show_animation:
self.__progress_status.show_next_message()
sleep_time = self.__progress_update_delay
else:
self.__progress_status.show_ready_message()
sleep_time = self.__progress_idle_delay
# Allow some time for progress status to be updated.
time.sleep(sleep_time)
|
py
|
1a55ea67bb9ea37057e9e5c112efd6db98713a8e
|
import StringIO
import os
import unittest
from rnn_prof import simple_rnn
from rnn_prof.data.wrapper import load_data
from rnn_prof.data.rnn import build_nn_data
TESTDATA_FILENAME = os.path.join(os.path.dirname(__file__), 'data', 'test_assist_data.csv.gz')
class TestRnn(unittest.TestCase):
def test_initialization(self):
""" Just make sure initialize doesn't cause the interpreter to crash """
data, _, item_ids, _, _ = load_data(TESTDATA_FILENAME, 'assistments')
num_questions = len(item_ids)
nn_data = build_nn_data(data, num_questions)
pivot = len(nn_data) // 2
train_data = nn_data[:pivot]
test_data = nn_data[pivot:]
opts = simple_rnn.RnnOpts(hidden_dim=20)
simple_rnn.SimpleRnn(train_data, opts, test_data=test_data)
def test_dump_and_load(self):
"""
Test dumping and loading the SimpleRnn and make sure that all of its properties remain in
shape.
"""
data, _, item_ids, _, _ = load_data(TESTDATA_FILENAME, 'assistments')
num_questions = len(item_ids)
nn_data = build_nn_data(data, num_questions)
pivot = len(nn_data) // 2
train_data = nn_data[:pivot]
max_compress_dim = 10
hidden_dim = 20
recurrent = False
grad_norm_limit = 1.0
first_learning_rate = 20.0
decay_rate = 0.5
largest_grad = 4.0
batch_threshold = 0.8
opts = simple_rnn.RnnOpts(max_compress_dim=max_compress_dim, hidden_dim=hidden_dim,
recurrent=recurrent, grad_norm_limit=grad_norm_limit,
largest_grad=largest_grad, batch_threshold=batch_threshold,
first_learning_rate=first_learning_rate, decay_rate=decay_rate)
original = simple_rnn.SimpleRnn(train_data, opts)
dumped = StringIO.StringIO()
original.dump(dumped)
dumped_str = dumped.getvalue()
dumped_reader = StringIO.StringIO(dumped_str)
recalled = simple_rnn.SimpleRnn.load(dumped_reader)
for attr in ('max_compress_dim', 'recurrent', 'grad_norm_limit',
'first_learning_rate', 'decay_rate', 'largest_grad', 'batch_threshold'):
self.assertEqual(getattr(original.opts, attr), getattr(recalled.opts, attr),
"%s was changed" % attr)
|
py
|
1a55ea8fad76205a63af781b79e22f182b9adca0
|
RAWDATA_DIR = '/staging/as/skchoudh/re-ribo-datasets/gallus_gallus/SRP016501'
OUT_DIR = '/staging/as/skchoudh/rna-seq-output/gallus_gallus/SRP016501'
CDNA_FA_GZ = '/home/cmb-panasas2/skchoudh/genomes/gallus_gallus/cdna/Gallus_gallus.Gallus_gallus-5.0.cdna.all.fa.gz'
CDNA_IDX = '/home/cmb-panasas2/skchoudh/genomes/gallus_gallus/cdna/Gallus_gallus.Gallus_gallus-5.0.cdna.all.kallisto.index'
|
py
|
1a55eabf1e571e5e553e716c23cabed05d780ad4
|
# -*- coding: utf-8 -*-
import MeCab
from .tokenizer_none import NoneTokenizer
class TokenizerJaMecab(NoneTokenizer):
def __init__(self):
self.tagger = MeCab.Tagger("-Owakati")
# make sure the dictionary is IPA
# sacreBLEU is only compatible with 0.996.5 for now
# Please see: https://github.com/mjpost/sacrebleu/issues/94
d = self.tagger.dictionary_info()
assert d.size == 392126, \
"Please make sure to use IPA dictionary for MeCab"
assert d.next is None
def __call__(self, line):
"""
Tokenizes an Japanese input line using MeCab morphological analyzer.
:param line: a segment to tokenize
:return: the tokenized line
"""
line = line.strip()
sentence = self.tagger.parse(line).strip()
return sentence
def signature(self):
"""
Returns the MeCab parameters.
:return: signature string
"""
signature = self.tagger.version() + "-IPA"
return 'ja-mecab-' + signature
|
py
|
1a55eb200e1dccd9f0d2db96e7c8ee0b84e6453a
|
# Linear autoencoder (ie PCA) applied to a 3d dataset projecting to 2d
#https://github.com/ageron/handson-ml2/blob/master/17_autoencoders_and_gans.ipynb
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = "../figures"
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
import tensorflow as tf
from tensorflow import keras
from sklearn.decomposition import PCA
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import Axes3D
np.random.seed(4)
def generate_3d_data(m, w1=0.1, w2=0.3, noise=0.1):
angles = np.random.rand(m) * 3 * np.pi / 2 - 0.5
data = np.empty((m, 3))
data[:, 0] = np.cos(angles) + np.sin(angles)/2 + noise * np.random.randn(m) / 2
data[:, 1] = np.sin(angles) * 0.7 + noise * np.random.randn(m) / 2
data[:, 2] = data[:, 0] * w1 + data[:, 1] * w2 + noise * np.random.randn(m)
return data
X_train = generate_3d_data(60)
X_train = X_train - X_train.mean(axis=0, keepdims=0)
np.random.seed(42)
tf.random.set_seed(42)
encoder = keras.models.Sequential([keras.layers.Dense(2, input_shape=[3])])
decoder = keras.models.Sequential([keras.layers.Dense(3, input_shape=[2])])
autoencoder = keras.models.Sequential([encoder, decoder])
autoencoder.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1.5))
history = autoencoder.fit(X_train, X_train, epochs=20)
codings = encoder.predict(X_train)
X = X_train
fig = plt.figure().gca(projection='3d')
fig.scatter(X[:,0], X[:,1], X[:,2], s=50, marker='o')
save_fig("linear-autoecoder-data3d.pdf")
plt.show()
fig = plt.figure(figsize=(4,3))
plt.plot(codings[:,0], codings[:, 1], "b.")
plt.xlabel("$z_1$", fontsize=18)
plt.ylabel("$z_2$", fontsize=18, rotation=0)
plt.grid(True)
save_fig("linear-autoencoder-embedding.pdf")
plt.show()
# PCA version
pca = PCA(n_components=2)
mu = np.mean(X_train, axis=0)
Xc = X_train - mu # center the data
pca.fit(Xc)
W = pca.components_.T # D*K
Z = np.dot(Xc, W) # N * K latent scores
Xrecon = np.dot(Z, W.T) + mu # N*D
fig = plt.figure(figsize=(4,3))
plt.plot(Z[:,0], Z[:, 1], "b.")
plt.xlabel("$z_1$", fontsize=18)
plt.ylabel("$z_2$", fontsize=18, rotation=0)
plt.grid(True)
save_fig("linear-autoencoder-pca.pdf")
plt.show()
|
py
|
1a55ec5e348c6e1f74bdd0c091c38bb19d616deb
|
"""
Defines the NotebookCell class
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import json as _json
import os as _os
class NotebookCell(object):
"""
Struct representing either a code or markdown cell
Parameters
----------
cell_type : str, optional
Tag for the cell: either 'code' or 'markdown'
source : list, optional
A list of strings that are the lines of code/markdown in the cell.
"""
def __init__(self, cell_type='code', source=None):
'''
Build a notebook cell
Parameters
----------
cell_type : str, optional
tag for the cell: either 'code' or 'markdown'
source : list(str), optional
lines of code/markdown in the cell
'''
if source is None:
source = []
self.cellType = cell_type
self.source = source
def to_json_dict(self):
"""
Convert this cell to a json representation of a cell, using a default template
Returns
-------
dict
"""
if self.cellType == 'markdown':
templateFilename = 'MDcell.json'
elif self.cellType == 'code':
templateFilename = 'CodeCell.json'
templateFilename = _os.path.join(_os.path.dirname(_os.path.abspath(__file__)),
'templates', templateFilename)
with open(templateFilename, 'r') as infile:
cellDict = _json.load(infile)
cellDict['source'].extend(self.source)
return cellDict
|
py
|
1a55ecab8d8b82eeb2dcad08ca933498cf77850d
|
# date:2018/8/27 11:35
# -*- coding: utf-8 -*-
#author;cwd
"""
function:获取鼠标在图像上的当前坐标
"""
from PIL import Image
from pylab import *
im = array(Image.open('./images/set.png'))
imshow(im)
# print('Please click 3 points')
# x = ginput(3)
# print('you clicked:', x)
|
py
|
1a55ecac2e13865fa9e9f085fe9b97b122aae442
|
import os
from flask import g, current_app
from .utils import Singleton
class MediaFinder(metaclass=Singleton):
def __init__(self, path):
self.path = path
self._collection = []
for root, dirs, files in os.walk(path):
for name in files:
fname = os.path.abspath(os.path.join(root, name))
size = os.stat(fname).st_size
self._collection.append({'name': fname, 'size': size})
self._collection.sort(key=lambda x: x['name'])
self._collection = {x['name']: x['size'] for x in self._collection}
@property
def collection(self):
return self._collection
def get_media_finder():
if 'media_finder' not in g:
g.media_finder = MediaFinder(current_app.config['MEDIA_DIR'])
return g.media_finder
|
py
|
1a55ef9224c0ee58db6a102b3e3085f7960e9381
|
__copyright__ = """
Copyright (C) 2020 George N Wong
Copyright (C) 2020 Zachary J Weiner
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
__doc__ = """
.. currentmodule:: pydemic.distributions
.. autoclass:: DistributionBase
.. currentmodule:: pydemic
.. autoclass:: GammaDistribution
"""
class DistributionBase:
def pdf(self, t):
raise NotImplementedError
def cdf(self, t):
raise NotImplementedError
def convolve_pdf(self, t, influx, prefactor=1, method='fft'):
pdf = self.pdf(t[:] - t[0])
prefactor = prefactor * np.ones_like(influx[0, ...])
end = t.shape[0]
if method == 'fft':
kernel = np.outer(pdf, prefactor)
from scipy.signal import fftconvolve
result = fftconvolve(kernel, influx, mode='full', axes=0)[:end]
elif method == 'direct':
result = np.zeros_like(influx)
for i in range(1, end):
result[i, ...] = prefactor * np.dot(influx[i-1::-1].T, pdf[:i])
return result
def convolve_survival(self, t, influx, prefactor=1, method='fft'):
survival = 1 - self.cdf(t - t[0])
prefactor = prefactor * np.ones_like(influx[0, ...])
kernel = np.outer(survival, prefactor)
end = t.shape[0]
from scipy.signal import fftconvolve
result = fftconvolve(kernel, influx, mode='full', axes=0)[:end]
return result
class GammaDistribution(DistributionBase):
def __init__(self, mean=None, std=None, shape=None, scale=None):
if shape is None:
self.shape = mean**2 / std**2
else:
self.shape = shape
if scale is None:
self.scale = std**2 / mean
else:
self.scale = scale
def pdf(self, t, method='diff'):
if method == 'diff':
cdf = self.cdf(t)
# FIXME: prepend or append?
return np.diff(cdf, prepend=0)
else:
from scipy.stats import gamma
return gamma.pdf(t, self.shape, scale=self.scale)
def cdf(self, t):
from scipy.stats import gamma
return gamma.cdf(t, self.shape, scale=self.scale)
|
py
|
1a55efb5ed916709d75fbdfcd6104bff0b6677eb
|
#!/usr/bin/env python
'''
Licensed to Elasticsearch B.V under one or more agreements.
Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
See the LICENSE file in the project root for more information
'''
from elasticsearch import Elasticsearch
es = Elasticsearch()
print("c5e5873783246c7b1c01d8464fed72c4 - L:9")
# tag::c5e5873783246c7b1c01d8464fed72c4[]
response = es.delete(
index='twitter',
id=1,
)
# end::c5e5873783246c7b1c01d8464fed72c4[]
print("---------------------------------------")
print(response)
print("---------------------------------------")
print("47b5ff897f26e9c943cee5c06034181d - L:84")
# tag::47b5ff897f26e9c943cee5c06034181d[]
response = es.delete(
index='twitter',
id=1,
routing='kimchy',
)
# end::47b5ff897f26e9c943cee5c06034181d[]
print("---------------------------------------")
print(response)
print("---------------------------------------")
print("d90a84a24a407731dfc1929ac8327746 - L:147")
# tag::d90a84a24a407731dfc1929ac8327746[]
response = es.delete(
index='twitter',
id=1,
timeout='5m',
)
# end::d90a84a24a407731dfc1929ac8327746[]
print("---------------------------------------")
print(response)
print("---------------------------------------")
|
py
|
1a55efeddc9dde05f8d001945eec305c3c24a401
|
# Copyright 2018, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Common test infrastructure functions. To be used by test runners. """
from __future__ import print_function
import ast
import atexit
import os
import re
import shutil
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from nuitka.Tracing import my_print
from nuitka.utils.AppDirs import getAppDir, getCacheDir
from nuitka.utils.Execution import check_output
from nuitka.utils.FileOperations import makePath, removeDirectory
from .SearchModes import (
SearchModeBase,
SearchModeByPattern,
SearchModeCoverage,
SearchModeResume
)
def check_result(*popenargs, **kwargs):
if "stdout" in kwargs:
raise ValueError("stdout argument not allowed, it will be overridden.")
process = subprocess.Popen(
stdout = subprocess.PIPE,
*popenargs,
**kwargs
)
_unused_output, _unused_err = process.communicate()
retcode = process.poll()
if retcode:
return False
else:
return True
def goMainDir():
# Go its own directory, to have it easy with path knowledge.
os.chdir(
os.path.dirname(
os.path.abspath(sys.modules[ "__main__" ].__file__)
)
)
_python_version = None
_python_arch = None
_python_executable = None
def setup(suite = "", needs_io_encoding = False, silent = False, go_main = True):
if go_main:
goMainDir()
if "PYTHON" not in os.environ:
os.environ["PYTHON"] = sys.executable
# Allow test code to use this to make caching specific.
os.environ["NUITKA_TEST_SUITE"] = suite
# Allow providing 33, 27, and expand that to python2.7
if len(os.environ["PYTHON"]) == 2 and \
os.environ["PYTHON"].isdigit() and \
os.name != "nt":
os.environ["PYTHON"] = "python%s.%s" % (
os.environ["PYTHON"][0],
os.environ["PYTHON"][1]
)
if needs_io_encoding and "PYTHONIOENCODING" not in os.environ:
os.environ["PYTHONIOENCODING"] = "utf-8"
version_output = check_output(
(
os.environ["PYTHON"],
"-c",
"""\
import sys, os;\
print(".".join(str(s) for s in list(sys.version_info)[:3]));\
print(("x86_64" if "AMD64" in sys.version else "x86") if os.name == "nt" else os.uname()[4]);\
print(sys.executable);\
""",
),
stderr = subprocess.STDOUT
)
global _python_version, _python_arch, _python_executable # singleton, pylint: disable=global-statement
_python_version = version_output.split(b"\n")[0].strip()
_python_arch = version_output.split(b"\n")[1].strip()
_python_executable = version_output.split(b"\n")[2].strip()
if sys.version.startswith('3'):
_python_arch = _python_arch.decode("utf-8")
_python_version = _python_version.decode("utf-8")
_python_executable = _python_executable.decode("utf-8")
if not silent:
my_print("Using concrete python", _python_version, "on", _python_arch)
assert type(_python_version) is str, repr(_python_version)
assert type(_python_arch) is str, repr(_python_arch)
assert type(_python_executable) is str, repr(_python_executable)
if "COVERAGE_FILE" not in os.environ:
os.environ["COVERAGE_FILE"] = os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
".coverage"
)
return _python_version
tmp_dir = None
def getTempDir():
# Create a temporary directory to work in, automatically remove it in case
# it is empty in the end.
global tmp_dir # singleton, pylint: disable=global-statement
if tmp_dir is None:
tmp_dir = tempfile.mkdtemp(
prefix = os.path.basename(
os.path.dirname(
os.path.abspath(sys.modules[ "__main__" ].__file__)
)
) + '-',
dir = tempfile.gettempdir() if
not os.path.exists("/var/tmp") else
"/var/tmp"
)
def removeTempDir():
removeDirectory(
path = tmp_dir,
ignore_errors = True
)
atexit.register(removeTempDir)
return tmp_dir
def convertUsing2to3(path, force = False):
command = [
os.environ["PYTHON"],
"-m",
"py_compile",
path
]
if not force:
with open(path) as source_file:
if "xrange" not in source_file.read():
with open(os.devnull, 'w') as stderr:
if check_result(command, stderr = stderr):
return path, False
filename = os.path.basename(path)
new_path = os.path.join(getTempDir(), filename)
# This may already be a temp file, e.g. because of construct creation.
try:
shutil.copy(path, new_path)
except shutil.Error:
pass
# For Python2.6 and 3.2 the -m lib2to3 was not yet supported.
use_binary = sys.version_info[:2] in ((2,6), (3,2))
if use_binary:
# On Windows, we cannot rely on 2to3 to be in the path.
if os.name == "nt":
command = [
sys.executable,
os.path.join(
os.path.dirname(sys.executable),
"Tools/Scripts/2to3.py"
)
]
else:
command = [
"2to3"
]
else:
command = [
sys.executable,
"-m",
"lib2to3",
]
command += [
"-w",
"-n",
"--no-diffs",
new_path
]
with open(os.devnull, 'w') as devnull:
check_output(
command,
stderr = devnull
)
with open(new_path) as result_file:
data = result_file.read()
with open(new_path, 'w') as result_file:
result_file.write("__file__ = %r\n" % os.path.abspath(path))
result_file.write(data)
return new_path, True
def decideFilenameVersionSkip(filename):
""" Make decision whether to skip based on filename and Python version.
This codifies certain rules that files can have as suffixes or prefixes
to make them be part of the set of tests executed for a version or not.
Generally, an ening of "<major><minor>.py" indicates that it must be that
Python version or higher. There is no need for ending in "26.py" as this
is the minimum version anyway.
The "_2.py" indicates a maxmimum version of 2.7, i.e. not Python 3.x, for
language syntax no more supported.
"""
# This will make many decisions with immediate returns.
# pylint: disable=too-many-return-statements
assert type(filename) is str
assert type(_python_version) is str
# Skip runner scripts by default.
if filename.startswith("run_"):
return False
# Skip tests that require Python 2.7 at least.
if filename.endswith("27.py") and _python_version.startswith("2.6"):
return False
if filename.endswith("_2.py") and _python_version.startswith('3'):
return False
# Skip tests that require Python 3.2 at least.
if filename.endswith("32.py") and _python_version < "3.2":
return False
# Skip tests that require Python 3.3 at least.
if filename.endswith("33.py") and _python_version < "3.3":
return False
# Skip tests that require Python 3.4 at least.
if filename.endswith("34.py") and _python_version < "3.4":
return False
# Skip tests that require Python 3.5 at least.
if filename.endswith("35.py") and _python_version < "3.5":
return False
# Skip tests that require Python 3.6 at least.
if filename.endswith("36.py") and _python_version < "3.6":
return False
return True
def _removeCPythonTestSuiteDir():
# Cleanup, some tests apparently forget that.
try:
if os.path.isdir("@test"):
removeDirectory("@test", ignore_errors = False)
elif os.path.isfile("@test"):
os.unlink("@test")
except OSError:
# TODO: Move this into removeDirectory maybe. Doing an external
# call as last resort could be a good idea.
# This seems to work for broken "lnk" files.
if os.name == "nt":
os.system("rmdir /S /Q @test")
if os.path.exists("@test"):
raise
def compareWithCPython(dirname, filename, extra_flags, search_mode, needs_2to3):
""" Call the comparison tool. For a given directory filename.
The search mode decides if the test case aborts on error or gets extra
flags that are exceptions.
"""
if dirname is None:
path = filename
else:
path = os.path.join(dirname, filename)
# Apply 2to3 conversion if necessary.
if needs_2to3:
path, converted = convertUsing2to3(path)
else:
converted = False
command = [
sys.executable,
os.path.join("..", "..", "bin", "compare_with_cpython"),
path,
"silent"
]
if extra_flags is not None:
command += extra_flags
command += search_mode.getExtraFlags(dirname, filename)
# Cleanup before and after test stage directory.
_removeCPythonTestSuiteDir()
try:
result = subprocess.call(
command
)
except KeyboardInterrupt:
result = 2
# Cleanup before and after test stage directory.
_removeCPythonTestSuiteDir()
if result != 0 and \
result != 2 and \
search_mode.abortOnFinding(dirname, filename):
my_print("Error exit!", result)
sys.exit(result)
if converted:
os.unlink(path)
if result == 2:
sys.stderr.write("Interrupted, with CTRL-C\n")
sys.exit(2)
def checkCompilesNotWithCPython(dirname, filename, search_mode):
if dirname is None:
path = filename
else:
path = os.path.join(dirname, filename)
command = [
_python_executable,
"-mcompileall",
path
]
try:
result = subprocess.call(
command
)
except KeyboardInterrupt:
result = 2
if result != 1 and \
result != 2 and \
search_mode.abortOnFinding(dirname, filename):
my_print("Error exit!", result)
sys.exit(result)
def checkSucceedsWithCPython(filename):
command = [
_python_executable,
filename
]
result = subprocess.call(
command,
stdout = open(os.devnull,'w'),
stderr = subprocess.STDOUT
)
return result == 0
def hasDebugPython():
# On Debian systems, these work.
debug_python = os.path.join("/usr/bin/", os.environ["PYTHON"] + "-dbg")
if os.path.exists(debug_python):
return True
# On Windows systems, these work.
debug_python = os.environ["PYTHON"]
if debug_python.lower().endswith(".exe"):
debug_python = debug_python[:-4]
debug_python = debug_python + "_d.exe"
if os.path.exists(debug_python):
return True
# For other Python, if it's the one also executing the runner, which is
# very probably the case, we check that. We don't check the provided
# binary here, this could be done as well.
if sys.executable == os.environ["PYTHON"] and \
hasattr(sys, "gettotalrefcount"):
return True
# Otherwise no.
return False
def getArchitecture():
if os.name == "nt":
if "AMD64" in sys.version:
return "x86_64"
else:
return "x86"
else:
return os.uname()[4] # @UndefinedVariable
def getDependsExePath():
if "APPDATA" not in os.environ:
sys.exit("Error, standalone mode cannot find 'APPDATA' environment.")
nuitka_app_dir = getAppDir()
depends_dir = os.path.join(
nuitka_app_dir,
_python_arch,
)
depends_exe = os.path.join(
depends_dir,
"depends.exe"
)
assert os.path.exists(depends_exe), depends_exe
return depends_exe
def isExecutableCommand(command):
path = os.environ["PATH"]
suffixes = (".exe",) if os.name == "nt" else ("",)
for part in path.split(os.pathsep):
if not part:
continue
for suffix in suffixes:
if os.path.isfile(os.path.join(part, command + suffix)):
return True
return False
def getRuntimeTraceOfLoadedFiles(path, trace_error = True):
""" Returns the files loaded when executing a binary. """
# This will make a crazy amount of work, pylint: disable=too-many-branches,too-many-statements
result = []
if os.name == "posix":
if sys.platform == "darwin" or \
sys.platform.startswith("freebsd"):
if not isExecutableCommand("dtruss"):
sys.exit(
"""\
Error, needs 'dtruss' on your system to scan used libraries."""
)
if not isExecutableCommand("sudo"):
sys.exit(
"""\
Error, needs 'sudo' on your system to scan used libraries."""
)
args = (
"sudo",
"dtruss",
"-t",
"open",
path
)
else:
if not isExecutableCommand("strace"):
sys.exit(
"""\
Error, needs 'strace' on your system to scan used libraries."""
)
args = (
"strace",
"-e", "file",
"-s4096", # Some paths are truncated otherwise.
path
)
process = subprocess.Popen(
args = args,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
_stdout_strace, stderr_strace = process.communicate()
exit_strace = process.returncode
if exit_strace != 0:
if str is not bytes:
stderr_strace = stderr_strace.decode("utf8")
my_print(stderr_strace, file = sys.stderr)
sys.exit("Failed to run strace.")
open(path+".strace","wb").write(stderr_strace)
for line in stderr_strace.split(b"\n"):
if process.returncode != 0 and trace_error:
my_print(line)
if not line:
continue
# Don't consider files not found. The "site" module checks lots
# of things.
if b"ENOENT" in line:
continue
if line.startswith(b"stat(") and b"S_IFDIR" in line:
continue
# Allow stats on the python binary, and stuff pointing to the
# standard library, just not uses of it. It will search there
# for stuff.
if line.startswith(b"lstat(") or \
line.startswith(b"stat(") or \
line.startswith(b"readlink("):
filename = line[line.find(b"(")+2:line.find(b", ")-1]
# At least Python3.7 considers the default Python3 path.
if filename == b"/usr/bin/python3":
continue
if filename in (b"/usr/bin/python3." + version for version in (b"5", b"6", b"7")):
continue
binary_path = _python_executable
if str is not bytes:
binary_path = binary_path.encode("utf-8")
found = False
while binary_path:
if filename == binary_path:
found = True
break
if binary_path == os.path.dirname(binary_path):
break
binary_path = os.path.dirname(binary_path)
if filename == os.path.join(binary_path, b"python" + _python_version[:3].encode("utf8")):
found = True
continue
if found:
continue
result.extend(
os.path.abspath(match)
for match in
re.findall(b'"(.*?)(?:\\\\0)?"', line)
)
if sys.version.startswith('3'):
result = [s.decode("utf-8") for s in result]
elif os.name == "nt":
subprocess.call(
(
getDependsExePath(),
"-c",
"-ot%s" % path + ".depends",
"-f1",
"-pa1",
"-ps1",
"-pp0",
"-pl1",
path
)
)
inside = False
for line in open(path + ".depends"):
if "| Module Dependency Tree |" in line:
inside = True
continue
if not inside:
continue
if "| Module List |" in line:
break
if ']' not in line:
continue
# Skip missing DLLs, apparently not needed anyway.
if '?' in line[:line.find(']')]:
continue
dll_filename = line[line.find(']')+2:-1]
assert os.path.isfile(dll_filename), dll_filename
# The executable itself is of course exempted.
if os.path.normcase(dll_filename) == \
os.path.normcase(os.path.abspath(path)):
continue
dll_filename = os.path.normcase(dll_filename)
result.append(dll_filename)
os.unlink(path + ".depends")
result = list(sorted(set(result)))
return result
def checkRuntimeLoadedFilesForOutsideAccesses(loaded_filenames, white_list):
# A lot of special white listing is required.
# pylint: disable=too-many-branches,too-many-statements
result = []
for loaded_filename in loaded_filenames:
loaded_filename = os.path.normpath(loaded_filename)
loaded_filename = os.path.normcase(loaded_filename)
loaded_basename = os.path.basename(loaded_filename)
ok = False
for entry in white_list:
if loaded_filename.startswith(entry):
ok = True
while entry:
old_entry = entry
entry = os.path.dirname(entry)
if old_entry == entry:
break
if loaded_filename == entry:
ok = True
break
if ok:
continue
if loaded_filename.startswith("/etc/"):
continue
if loaded_filename.startswith("/proc/") or loaded_filename == "/proc":
continue
if loaded_filename.startswith("/dev/"):
continue
if loaded_filename.startswith("/tmp/"):
continue
if loaded_filename.startswith("/run/"):
continue
if loaded_filename.startswith("/sys/"):
continue
if loaded_filename.startswith("/usr/lib/locale/"):
continue
if loaded_filename.startswith("/usr/share/locale/"):
continue
if loaded_filename.startswith("/usr/share/X11/locale/"):
continue
# Themes may of course be loaded.
if loaded_filename.startswith("/usr/share/themes"):
continue
if "gtk" in loaded_filename and "/engines/" in loaded_filename:
continue
# Terminal info files are OK too.
if loaded_filename.startswith("/lib/terminfo/"):
continue
# System C libraries are to be expected.
if loaded_basename.startswith((
"libc.so.",
"libpthread.so.",
"libdl.so.",
"libm.so.",
)):
continue
# Taking these from system is harmless and desirable
if loaded_basename.startswith((
"libz.so",
"libutil.so",
"libgcc_s.so",
)):
continue
# TODO: Unclear, loading gconv from filesystem of installed system
# may be OK or not. I think it should be.
if loaded_basename == "gconv-modules.cache":
continue
if "/gconv/" in loaded_filename:
continue
if loaded_basename.startswith("libicu"):
continue
# GTK may access X files.
if loaded_basename == ".Xauthority":
continue
result.append(loaded_filename)
return result
def hasModule(module_name):
result = subprocess.call(
(
os.environ["PYTHON"],
"-c"
"import %s" % module_name
),
stdout = open(os.devnull,'w'),
stderr = subprocess.STDOUT
)
return result == 0
m1 = {}
m2 = {}
def snapObjRefCntMap(before):
import gc
if before:
m = m1
else:
m = m2
for x in gc.get_objects():
if x is m1:
continue
if x is m2:
continue
m[ str(x) ] = sys.getrefcount(x)
def checkReferenceCount(checked_function, max_rounds = 10):
assert sys.exc_info() == (None, None, None), sys.exc_info()
print(checked_function.__name__ + ": ", end = "")
sys.stdout.flush()
ref_count1 = 17
ref_count2 = 17
explain = False
import gc
assert max_rounds > 0
for count in range(max_rounds):
gc.collect()
ref_count1 = sys.gettotalrefcount() # @UndefinedVariable
if explain and count == max_rounds - 1:
snapObjRefCntMap(True)
checked_function()
# Not allowed, but happens when bugs occur.
assert sys.exc_info() == (None, None, None), sys.exc_info()
gc.collect()
if explain and count == max_rounds - 1:
snapObjRefCntMap(False)
ref_count2 = sys.gettotalrefcount() # @UndefinedVariable
if ref_count1 == ref_count2:
result = True
print("PASSED")
break
# print count, ref_count1, ref_count2
else:
result = False
print("FAILED", ref_count1, ref_count2, "leaked", ref_count2 - ref_count1)
if explain:
assert m1
assert m2
for key in m1:
if key not in m2:
print('*' * 80)
print("extra", key)
elif m1[key] != m2[key]:
print('*' * 80)
print(m1[key], "->", m2[key], key)
else:
pass
# print m1[key]
assert sys.exc_info() == (None, None, None), sys.exc_info()
gc.collect()
sys.stdout.flush()
return result
def createSearchMode():
search_mode = len(sys.argv) > 1 and sys.argv[1] == "search"
resume_mode = len(sys.argv) > 1 and sys.argv[1] == "resume"
start_at = sys.argv[2] if len(sys.argv) > 2 else None
coverage_mode = len(sys.argv) > 1 and sys.argv[1] == "coverage"
if coverage_mode:
return SearchModeCoverage()
elif resume_mode:
return SearchModeResume(
sys.modules["__main__"].__file__
)
elif search_mode and start_at:
start_at = start_at.replace('/', os.path.sep)
return SearchModeByPattern(start_at)
else:
class SearchModeImmediate(SearchModeBase):
def abortOnFinding(self, dirname, filename):
return search_mode and \
SearchModeBase.abortOnFinding(self, dirname, filename)
return SearchModeImmediate()
def reportSkip(reason, dirname, filename):
case = os.path.join(dirname, filename)
case = os.path.normpath(case)
my_print("Skipped, %s (%s)." % (case, reason))
def executeReferenceChecked(prefix, names, tests_skipped, tests_stderr):
import gc
gc.disable()
extract_number = lambda name: int(name.replace(prefix, ""))
# Find the function names.
matching_names = tuple(
name
for name in names
if name.startswith(prefix) and name[-1].isdigit()
)
old_stderr = sys.stderr
# Everything passed
result = True
for name in sorted(matching_names, key = extract_number):
number = extract_number(name)
# print(tests_skipped)
if number in tests_skipped:
my_print(name + ": SKIPPED (%s)" % tests_skipped[number])
continue
# Avoid unraisable output.
try:
if number in tests_stderr:
sys.stderr = open(os.devnull, "wb")
except OSError: # Windows
if not checkReferenceCount(names[name]):
result = False
else:
if not checkReferenceCount(names[name]):
result = False
if number in tests_stderr:
new_stderr = sys.stderr
sys.stderr = old_stderr
new_stderr.close()
gc.enable()
return result
def checkDebugPython():
if not hasattr(sys, "gettotalrefcount"):
my_print("Warning, using non-debug Python makes this test ineffective.")
sys.gettotalrefcount = lambda : 0
elif sys.version_info >= (3,7,0) and sys.version_info < (3,7,2):
my_print("Warning, bug of CPython 3.7.0/1 breaks reference counting and makes this test ineffective.")
sys.gettotalrefcount = lambda : 0
def addToPythonPath(python_path):
if type(python_path) in (tuple, list):
python_path = os.pathsep.join(python_path)
if python_path:
if "PYTHONPATH" in os.environ:
os.environ["PYTHONPATH"] += os.pathsep + python_path
else:
os.environ["PYTHONPATH"] = python_path
@contextmanager
def withPythonPathChange(python_path):
if python_path:
if type(python_path) not in (tuple, list):
python_path = python_path.split(os.pathsep)
python_path = [
os.path.normpath(os.path.abspath(element))
for element in
python_path
]
python_path = os.pathsep.join(python_path)
if "PYTHONPATH" in os.environ:
old_path = os.environ["PYTHONPATH"]
os.environ["PYTHONPATH"] += os.pathsep + python_path
else:
old_path = None
os.environ["PYTHONPATH"] = python_path
# print(
# "Effective PYTHONPATH in %s is %r" % (
# sys.modules["__main__"],
# os.environ.get("PYTHONPATH", "")
# )
# )
yield
if python_path:
if old_path is None:
del os.environ["PYTHONPATH"]
else:
os.environ["PYTHONPATH"] = old_path
@contextmanager
def withExtendedExtraOptions(*args):
assert args
old_value = os.environ.get("NUITKA_EXTRA_OPTIONS", None)
value = old_value
for arg in args:
if value is None:
value = arg
else:
value += ' ' + arg
os.environ[ "NUITKA_EXTRA_OPTIONS" ] = value
yield
if old_value is None:
del os.environ[ "NUITKA_EXTRA_OPTIONS" ]
else:
os.environ[ "NUITKA_EXTRA_OPTIONS" ] = old_value
def indentedCode(codes, count):
""" Indent code, used for generating test codes.
"""
return '\n'.join( ' ' * count + line if line else "" for line in codes )
def convertToPython(doctests, line_filter = None):
""" Convert give doctest string to static Python code.
"""
# This is convoluted, but it just needs to work, pylint: disable=too-many-branches
import doctest
code = doctest.script_from_examples(doctests)
if code.endswith('\n'):
code += "#\n"
else:
assert False
output = []
inside = False
def getPrintPrefixed(evaluated, line_number):
try:
node = ast.parse(evaluated.lstrip(), "eval")
except SyntaxError:
return evaluated
if node.body[0].__class__.__name__ == "Expr":
count = 0
while evaluated.startswith(' ' * count):
count += 1
if sys.version_info < (3,):
modified = (count-1) * ' ' + "print " + evaluated
return (count-1) * ' ' + ("print 'Line %d'" % line_number) + '\n' + modified
else:
modified = (count-1) * ' ' + "print(" + evaluated + "\n)\n"
return (count-1) * ' ' + ("print('Line %d'" % line_number) + ")\n" + modified
else:
return evaluated
def getTried(evaluated, line_number):
if sys.version_info < (3,):
return """
try:
%(evaluated)s
except Exception as __e:
print "Occurred", type(__e), __e
""" % { "evaluated" : indentedCode(getPrintPrefixed(evaluated, line_number).split('\n'), 4) }
else:
return """
try:
%(evaluated)s
except Exception as __e:
print("Occurred", type(__e), __e)
""" % { "evaluated" : indentedCode(getPrintPrefixed(evaluated, line_number).split('\n'), 4) }
def isOpener(evaluated):
evaluated = evaluated.lstrip()
if evaluated == "":
return False
return evaluated.split()[0] in (
"def", "class", "for", "while", "try:", "except", "except:",
"finally:", "else:"
)
chunk = None
for line_number, line in enumerate(code.split('\n')):
# print "->", inside, line
if line_filter is not None and line_filter(line):
continue
if inside and line and line[0].isalnum() and not isOpener(line):
output.append(getTried('\n'.join(chunk), line_number)) # @UndefinedVariable
chunk = []
inside = False
if inside and not (line.startswith('#') and line.find("SyntaxError:") != -1):
chunk.append(line)
elif line.startswith('#'):
if line.find("SyntaxError:") != -1:
# print "Syntax error detected"
if inside:
# print "Dropping chunk", chunk
chunk = []
inside = False
else:
del output[-1]
elif isOpener(line):
inside = True
chunk = [line]
elif line.strip() == "":
output.append(line)
else:
output.append(getTried(line, line_number))
return '\n'.join(output).rstrip() + '\n'
def compileLibraryPath(search_mode, path, stage_dir, decide, action):
my_print("Checking standard library path:", path)
for root, dirnames, filenames in os.walk(path):
dirnames_to_remove = [
dirname
for dirname in dirnames
if '-' in dirname
]
for dirname in dirnames_to_remove:
dirnames.remove(dirname)
dirnames.sort()
filenames = [
filename
for filename in filenames
if decide(root, filename)
]
for filename in sorted(filenames):
if not search_mode.consider(root, filename):
continue
full_path = os.path.join(root, filename)
my_print(full_path, ':', end = ' ')
sys.stdout.flush()
action(stage_dir, path, full_path)
def compileLibraryTest(search_mode, stage_dir, decide, action):
if not os.path.exists(stage_dir):
os.makedirs(stage_dir)
my_dirname = os.path.join(os.path.dirname(__file__), "../../..")
my_dirname = os.path.normpath(my_dirname)
paths = [
path
for path in
sys.path
if not path.startswith(my_dirname)
]
my_print("Using standard library paths:")
for path in paths:
my_print(path)
for path in paths:
print("Checking path:", path)
compileLibraryPath(
search_mode = search_mode,
path = path,
stage_dir = stage_dir,
decide = decide,
action = action
)
search_mode.finish()
def run_async(coro):
""" Execute a coroutine until it's done. """
values = []
result = None
while True:
try:
values.append(coro.send(None))
except StopIteration as ex:
result = ex.args[0] if ex.args else None
break
return values, result
def async_iterate(g):
""" Execute async generator until it's done. """
# Test code for Python3, catches all kinds of exceptions.
# pylint: disable=broad-except
# Also Python3 only, pylint: disable=I0021,undefined-variable
res = []
while True:
try:
g.__anext__().__next__()
except StopAsyncIteration: # @UndefinedVariable
res.append("STOP")
break
except StopIteration as ex:
if ex.args:
res.append("ex arg %s" % ex.args[0])
else:
res.append("EMPTY StopIteration")
break
except Exception as ex:
res.append(str(type(ex)))
return res
def getTestingCacheDir():
cache_dir = getCacheDir()
result = os.path.join(cache_dir, "tests_state")
makePath(result)
return result
def getTestingCPythonOutputsCacheDir():
cache_dir = getCacheDir()
result = os.path.join(cache_dir, "cpython_outputs", os.environ.get("NUITKA_TEST_SUITE", ""))
makePath(result)
return result
@contextmanager
def withDirectoryChange(path, allow_none = False):
if path is not None or not allow_none:
old_cwd = os.getcwd()
os.chdir(path)
yield
if path is not None or not allow_none:
os.chdir(old_cwd)
def someGenerator():
yield 1
yield 2
yield 3
def someGeneratorRaising():
yield 1
raise TypeError(2)
|
py
|
1a55f0d91ccb600d1b3d67f387f11ec4ece4c786
|
from pazusoba import adventureEx, Profile, ProfileName, Orb
import time
import random
def random_board() -> str:
return "".join(random.choice(["L", "R", "G", "B", "D", "H"]) for _ in range(30))
def amen_benchmark():
print("Running amen benchmark...")
COUNT = 10
goal_counter = 0
steps = 0
start = time.time()
for i in range(COUNT):
print("Test {}".format(i + 1))
board = random_board()
print("Board: {}".format(board))
result = adventureEx(board, 3, 150, 10000, [
Profile(name=ProfileName.COMBO, target=7),
Profile(name=ProfileName.ORB_REMAINING, target=3),
])
if result.goal:
goal_counter += 1
steps += result.step
time_taken = time.time() - start
print("It took {} seconds, {} seconds on average".format(
time_taken, time_taken / COUNT))
print("Goal rate: {}/{}".format(goal_counter, COUNT))
print("Average steps: {}".format(steps / COUNT))
def combo_benchmark():
print("Running combo benchmark...")
COUNT = 100
goal_counter = 0
steps = 0
start = time.time()
for i in range(COUNT):
board = random_board()
print("{} - {}".format(i + 1, board))
result = adventureEx(board, 3, 150, 1000, [
Profile(name=ProfileName.COMBO, threshold=50)
])
if result.goal:
goal_counter += 1
steps += result.step
time_taken = time.time() - start
print("It took {} seconds, {} seconds on average".format(
time_taken, time_taken / COUNT))
print("Goal rate: {}/{}".format(goal_counter, COUNT))
print("Average steps: {}".format(steps / COUNT))
def find_best_small_size_combo():
COUNT = 20
# generate same board
boards = [random_board() for _ in range(COUNT)]
for x in range(1, 11):
size = x * 100
goal_counter = 0
steps = 0
start = time.time()
for i in range(COUNT):
result = adventureEx(boards[i], 3, 150, 1000, [
Profile(name=ProfileName.COMBO, threshold=20)
])
if result.goal:
goal_counter += 1
steps += result.step
time_taken = time.time() - start
print("Size {} - avg {} s, {}/{}, avg {} steps".format(
size, time_taken / COUNT, goal_counter, COUNT, steps / COUNT))
if __name__ == '__main__':
print("Running benchmark")
# amen_benchmark()
combo_benchmark()
# find_best_small_size_combo()
|
py
|
1a55f104752f257e3462ac8338301d58407d8c96
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
def nhwc_to_format(x, data_format):
"""Converts a numpy array from NHWC format to `data_format`."""
rank = len(x.shape)
if data_format == "NCHW":
return np.transpose(x, [0, rank - 1] + list(range(1, rank - 1)))
elif data_format == "NHWC":
return x
else:
raise ValueError("Unknown format {}".format(data_format))
class UnaryOpsTest(XLATestCase):
"""Test cases for unary operators."""
def _assertOpOutputMatchesExpected(self, op, inp, expected,
equality_test=None, rtol=1e-3, atol=1e-5):
"""Verifies that 'op' produces 'expected' when fed input 'inp' .
Args:
op: operator to test
inp: numpy input array to use as input to 'op'.
expected: numpy array representing the expected output of 'op'.
equality_test: either None, or a function that tests two numpy arrays for
equality. If None, self.assertAllClose is used.
rtol: relative tolerance for equality test.
atol: absolute tolerance for equality test.
"""
with self.test_session() as session:
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(inp.dtype), inp.shape, name="a")
output = op(pinp)
result = session.run(output, {pinp: inp})
if equality_test is None:
self.assertAllCloseAccordingToType(
result, expected, rtol=rtol, atol=atol, bfloat16_rtol=0.03)
else:
equality_test(result, expected, rtol=rtol, atol=atol)
def ListsAreClose(self, result, expected, rtol, atol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in xrange(len(result)):
self.assertAllClose(result[i], expected[i], rtol, atol)
def testAllTypeOps(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
array_ops.diag,
np.array([1, 2, 3, 4], dtype=dtype),
np.array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.diag_part,
np.arange(36).reshape([2, 3, 2, 3]).astype(dtype),
np.array([[0, 7, 14], [21, 28, 35]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.diag, np.array([[1, 2], [3, 4]], dtype=dtype),
np.array(
[[[[1, 0], [0, 0]], [[0, 2], [0, 0]]], [[[0, 0], [3, 0]],
[[0, 0], [0, 4]]]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.identity,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.matrix_diag,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([[[1, 0], [0, 2]], [[3, 0], [0, 4]]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.matrix_diag, np.array([1, 2, 3, 4], dtype=dtype),
np.array(
[[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.matrix_diag,
np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=dtype),
np.array(
[[[[1, 0, 0], [0, 2, 0], [0, 0, 3]],
[[4, 0, 0], [0, 5, 0], [0, 0, 6]]],
[[[7, 0, 0], [0, 8, 0], [0, 0, 9]],
[[10, 0, 0], [0, 11, 0], [0, 0, 12]]]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.matrix_diag_part,
np.arange(3 * 2 * 4).reshape([3, 2, 4]).astype(dtype),
np.array([[0, 5], [8, 13], [16, 21]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.prevent_gradient,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[[[]]]]], dtype=dtype),
expected=np.array([], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1], [2]]], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1]], [[2]]], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
expected=np.array([[1, 2], [3, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.stop_gradient,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
def testFloatOps(self):
for dtype in self.float_types:
# TODO (b/77694432): Half test failed on CPU, last ran on 04-06-2018. id:177
# https://github.com/imdone/tensorflow/issues/178
if dtype == np.float16 and self.device == "XLA_CPU":
continue
x = np.arange(-0.90, 0.90, 0.25)
self._assertOpOutputMatchesExpected(
math_ops.acos,
x.astype(dtype),
expected=np.arccos(x).astype(dtype))
self._assertOpOutputMatchesExpected(
math_ops.asin,
x.astype(dtype),
expected=np.arcsin(x).astype(dtype))
x = np.arange(-3, 3).reshape(1, 3, 2)
self._assertOpOutputMatchesExpected(
math_ops.atan,
x.astype(dtype),
expected=np.arctan(x).astype(dtype))
self._assertOpOutputMatchesExpected(
math_ops.acosh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array([0, 1.3169579, 1.76274717, 2.06343707],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.asinh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array([0.88137359, 1.44363548, 1.81844646, 2.09471255],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.atanh,
np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype),
expected=np.array([0.10033535, 0.20273255, 0.3095196, 0.42364893],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.ceil,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-1, 2]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.cosh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array([1.54308063, 3.76219569, 10.067662, 27.30823284],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.exp,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0.36787945, 2.7182817]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.expm1,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-0.63212056, 1.71828183]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.floor,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-2, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.is_finite,
np.array([[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]],
dtype=dtype),
expected=np.array([[0, 1, 1, 1, 1, 1, 1, 0, 0]], dtype=np.bool))
# Tests for tf.nn ops.
self._assertOpOutputMatchesExpected(
nn_ops.l2_loss, np.array([[[]]], dtype=dtype), expected=dtype(0))
self._assertOpOutputMatchesExpected(nn_ops.l2_loss, dtype(4), dtype(8))
self._assertOpOutputMatchesExpected(
nn_ops.l2_loss, np.array([[-2, 4]], dtype=dtype), expected=dtype(10))
self._assertOpOutputMatchesExpected(
math_ops.reciprocal,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[1, 0.5]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0, 0.69314718]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sin,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0.841478, 0.909302]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.cos,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0.540297, -0.41614]], dtype=dtype))
# TODO (b/34703906): improve log1p implementation and make tolerance id:332
# https://github.com/imdone/tensorflow/issues/333
# tighter.
self._assertOpOutputMatchesExpected(
math_ops.log1p,
np.array([[1e-14, 1e-15, 0.6]], dtype=dtype),
expected=np.log1p(np.array([[1e-14, 1e-15, 0.6]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.rint,
np.array([[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5]], dtype=dtype),
expected=np.array([[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.round,
np.array([[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5]], dtype=dtype),
expected=np.array([[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.rsqrt,
np.array([[4, 16]], dtype=dtype),
expected=np.array([[0.5, 0.25]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid,
np.array(
[[1, 1, 1, 1],
[1, 2, 3, 4]],
dtype=dtype),
expected=np.array(
[[0.7310586, 0.7310586, 0.7310586, 0.7310586],
[0.7310586, 0.880797, 0.95257413, 0.98201376]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid,
np.array([-300, -150, 0, 150, 300], dtype=dtype),
expected=np.array([0, 0, 0.5, 1, 1], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sinh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array([1.17520119, 3.62686041, 10.01787493, 27.2899172],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sqrt,
np.array([[4, 9]], dtype=dtype),
expected=np.array([[2, 3]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.tan,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array([1.55740772, -2.18503986, -0.14254654, 1.15782128],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.tanh,
np.array(
[[1, 1, 1, 1],
[1, 2, 3, 4]],
dtype=dtype),
expected=np.array(
[[0.76159418, 0.76159418, 0.76159418, 0.76159418],
[0.76159418, 0.96402758, 0.99505478, 0.99932933]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.log_softmax,
np.array(
[[1, 1, 1, 1],
[1, 2, 3, 4]],
dtype=dtype),
expected=np.array(
[[-1.3862944, -1.3862944, -1.3862944, -1.3862944],
[-3.4401896, -2.4401896, -1.4401897, -0.44018969]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.elu,
np.array([[-1, 0, 1]], dtype=dtype),
expected=np.array([[-0.63212056, 0, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.selu,
np.array([[-1, 0, 1]], dtype=dtype),
expected=np.array([[-1.11133074, 0., 1.05070099]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.relu,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.relu6,
np.array([[-0.05, 6.05, 5]], dtype=dtype),
expected=np.array([[0, 6, 5]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array(
[[1, 1, 1, 1],
[1, 2, 3, 4]],
dtype=dtype),
expected=np.array(
[[0.25, 0.25, 0.25, 0.25],
[0.032058604, 0.087144323, 0.23688284, 0.64391428]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softsign,
np.array([[-2, -1, 0, 1, 2]], dtype=dtype),
expected=np.array([[-0.66666669, -0.5, 0, 0.5, 0.66666669]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.is_finite,
np.array(
[[42, float("inf"), -123], [float("nan"), 0, -0.0]], dtype=dtype),
expected=np.array(
[[True, False, True], [False, True, True]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.quantize_and_dequantize_v2(x, -127, 127, True, 8),
np.array([-1, -0.5, 0, 0.3], dtype=dtype),
expected=np.array([-1, -64.0 / 127, 0, 38.0 / 127], dtype=dtype))
def testComplexOps(self):
for dtype in self.complex_types:
self._assertOpOutputMatchesExpected(
math_ops.acosh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arccosh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.asinh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arcsinh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.atanh,
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
expected=np.arctanh(
np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.cosh,
np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype),
expected=np.cosh(np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.sinh,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.sinh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.exp,
np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype),
expected=np.exp(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.expm1,
np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype),
expected=np.expm1(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.reciprocal,
np.array([[1, 2j, 2 + 3j]], dtype=dtype),
expected=1.0 / np.array([[1, 2j, 2 + 3j]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.log(np.array([[5j, 3 - 2j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.sin,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.sin(np.array([[5j, 3 - 2j]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.cos,
np.array([[5j, 3 - 2j]], dtype=dtype),
expected=np.cos(np.array([[5j, 3 - 2j]], dtype=dtype)))
# TODO (b/34703906): improve log1p implementation and make tolerance id:256
# https://github.com/imdone/tensorflow/issues/257
# tighter.
self._assertOpOutputMatchesExpected(
math_ops.log1p,
np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype),
expected=np.log1p(
np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype)))
val = np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)
self._assertOpOutputMatchesExpected(
math_ops.rsqrt, val, expected=1 / np.sqrt(val))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid, val, expected=1 / (1 + np.exp(-val)))
self._assertOpOutputMatchesExpected(
math_ops.sqrt, val, expected=np.sqrt(val))
self._assertOpOutputMatchesExpected(
math_ops.tanh,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.tanh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.tan,
np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
expected=np.tan(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))
ctypes = {np.complex64: np.float32}
self._assertOpOutputMatchesExpected(
math_ops.abs,
np.array([[3 - 4j, -1j, np.inf]], dtype=dtype),
expected=np.array([[5, 1, np.inf]], dtype=ctypes[dtype]))
self._assertOpOutputMatchesExpected(
math_ops.negative,
np.array([[-1 + 2j, -3j]], dtype=dtype),
expected=np.array([[1 - 2j, 3j]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.square,
np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype),
expected=np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype)**2)
self._assertOpOutputMatchesExpected(
array_ops.zeros_like,
np.array([[4j, 3 - 2j], [2, -1j]], dtype=dtype),
expected=np.array([[0, 0], [0, 0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.ones_like,
np.array([[-4j, 3 + 2j], [2, -1j]], dtype=dtype),
expected=np.array([[1, 1], [1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.angle,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.angle(np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.conj,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([1 - 3j, -4 - 7j, 2.7, 3j], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.imag,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([3, 7, 0, -3], dtype=ctypes[dtype]))
self._assertOpOutputMatchesExpected(
math_ops.real,
np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
expected=np.array([1, -4, 2.7, 0], dtype=ctypes[dtype]))
def testIntOps(self):
for dtype in self.int_types:
self._assertOpOutputMatchesExpected(
bitwise_ops.invert,
np.array([0, -1, 1, 16, 42], dtype=dtype),
expected=np.array([-1, 0, -2, -17, -43], dtype=dtype))
def testNumericOps(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
math_ops.abs,
np.array([[2, -1]], dtype=dtype),
expected=np.array([[2, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.negative,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[1, -1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.square,
np.array([[-2, 3]], dtype=dtype),
expected=np.array([[4, 9]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.zeros_like,
np.array([[4, 3], [2, 1]], dtype=dtype),
expected=np.array([[0, 0], [0, 0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.ones_like,
np.array([[4, 3], [2, 1]], dtype=dtype),
expected=np.array([[1, 1], [1, 1]], dtype=dtype))
# TODO (phawkins): these tests fail unless fastmath optimizations id:202
# https://github.com/imdone/tensorflow/issues/203
# are disabled. Use more robust IsInf/IsNaN detection and enable these
# tests.
@unittest.skip("test case fails in fast-math mode")
def testIsInfAndIsNan(self):
for dtype in self.float_types:
self._assertOpOutputMatchesExpected(
math_ops.is_inf,
np.array([[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]],
dtype=dtype),
expected=np.array([[1, 0, 0, 0, 0, 0, 0, 1, 0]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
math_ops.is_nan,
np.array([[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]],
dtype=dtype),
expected=np.array([[0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=np.bool))
def testLogicalOps(self):
self._assertOpOutputMatchesExpected(
math_ops.logical_not,
np.array([[True, False], [False, True]], dtype=np.bool),
expected=np.array([[False, True], [True, False]], dtype=np.bool))
def testBiasAddGrad(self):
self._assertOpOutputMatchesExpected(
gen_nn_ops.bias_add_grad,
np.array([[1., 2.], [3., 4.]], dtype=np.float32),
expected=np.array([4., 6.], dtype=np.float32))
self._assertOpOutputMatchesExpected(
lambda x: gen_nn_ops.bias_add_grad(x, data_format="NCHW"),
np.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]],
dtype=np.float32),
expected=np.array([10., 26.], dtype=np.float32))
def testCast(self):
shapes = [[], [4], [2, 3], [2, 0, 4]]
types = (set([dtypes.bool, dtypes.int32, dtypes.float32]) |
self.complex_tf_types)
for shape in shapes:
for src_type in types:
for dst_type in types:
src = np.arange(np.prod(shape)).astype(src_type.as_numpy_dtype)
if src_type in self.complex_tf_types:
src += (np.arange(np.prod(shape)) * 2j).astype(
src_type.as_numpy_dtype)
src = src.reshape(shape)
dst = src.astype(dst_type.as_numpy_dtype)
self._assertOpOutputMatchesExpected(
lambda x, dst_type=dst_type: math_ops.cast(x, dst_type),
src,
expected=dst)
def testBitcast(self):
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.int32),
np.array([1, 0x3f800000], np.int32),
expected=np.array([1, 0x3f800000], np.int32))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.float32),
np.array([1, 0x3f800000], np.int32),
expected=np.array([1e-45, 1.0], np.float32))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.bitcast(x, dtypes.int32),
np.array([1e-45, 1.0], np.float32),
expected=np.array([1, 0x3f800000], np.int32))
def testInvertPermutation(self):
self._assertOpOutputMatchesExpected(
array_ops.invert_permutation,
np.array([1, 2, 0], np.int32),
expected=np.array([2, 0, 1], dtype=np.int32))
def testRank(self):
rank_op = lambda x: array_ops.rank_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
rank_op, dtype(7), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
rank_op, np.array(
[[], []], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op, np.array(
[-1, 1], dtype=dtype), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
rank_op, np.array(
[[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int32(2))
def testShape(self):
shape_op = lambda x: array_ops.shape_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
shape_op, dtype(7), expected=np.array([], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[], []], dtype=dtype),
expected=np.array([2, 0], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([-1, 1], dtype=dtype),
expected=np.array([2], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([1, 2], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.array([3, 1], dtype=np.int32))
def testSize(self):
size_op = lambda x: array_ops.size_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
size_op, dtype(7), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
size_op, np.array([[], []], dtype=dtype), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
size_op, np.array([-1, 1], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
size_op, np.array([[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
size_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int32(3))
def testUnpack(self):
self._assertOpOutputMatchesExpected(
array_ops.unstack,
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 2.], dtype=np.float32),
np.array([3., 4.], dtype=np.float32),
np.array([5., 6.], dtype=np.float32),
],
equality_test=self.ListsAreClose)
self._assertOpOutputMatchesExpected(
lambda x: array_ops.unstack(x, axis=1),
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 3., 5.], dtype=np.float32),
np.array([2., 4., 6.], dtype=np.float32),
],
equality_test=self.ListsAreClose)
def testDepthToSpace(self):
def make_op(data_format):
def op(x):
return array_ops.depth_to_space(x, block_size=2,
data_format=data_format)
return op
for dtype in self.numeric_types:
for data_format in ["NCHW", "NHWC"]:
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(np.array([[[[1, 2, 3, 4]]]], dtype=dtype),
data_format),
expected=nhwc_to_format(np.array([[[[1], [2]],
[[3], [4]]]], dtype=dtype),
data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array([[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]],
dtype=dtype),
data_format),
expected=nhwc_to_format(
np.array([[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]],
dtype=dtype),
data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(
np.array([[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]], dtype=dtype),
data_format),
expected=nhwc_to_format(
np.array([[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]], dtype=dtype),
data_format))
def testSpaceToDepth(self):
def make_op(data_format):
def op(x):
return array_ops.space_to_depth(x, block_size=2,
data_format=data_format)
return op
for dtype in self.numeric_types:
for data_format in ["NCHW", "NHWC"]:
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(np.array([[[[1], [2]],
[[3], [4]]]], dtype=dtype),
data_format),
expected=nhwc_to_format(np.array([[[[1, 2, 3, 4]]]], dtype=dtype),
data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(np.array([[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]], dtype=dtype),
data_format),
expected=nhwc_to_format(
np.array([[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]],
dtype=dtype),
data_format))
self._assertOpOutputMatchesExpected(
make_op(data_format),
nhwc_to_format(np.array([[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]], dtype=dtype),
data_format),
expected=nhwc_to_format(
np.array([[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]], dtype=dtype),
data_format))
def _assertSoftplusMatchesExpected(self, features, dtype):
features = np.array(features, dtype=dtype)
zero = np.asarray(0).astype(dtype)
expected = np.logaddexp(zero, features)
self._assertOpOutputMatchesExpected(
nn_ops.softplus, features, expected=expected)
def testSoftplus(self):
for dtype in self.float_types:
self._assertSoftplusMatchesExpected([[-2, 0, 8]], dtype)
self._assertSoftplusMatchesExpected(
[[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]], dtype)
if dtype == dtypes.bfloat16.as_numpy_dtype:
log_eps = np.log(np.finfo(np.float32).eps)
else:
log_eps = np.log(np.finfo(dtype).eps)
one = dtype(1)
ten = dtype(10)
self._assertSoftplusMatchesExpected([
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten], dtype)
if __name__ == "__main__":
googletest.main()
|
py
|
1a55f19ed14304cec7e5fc14d53880f21c6759c3
|
from classes import List
def write_data_into_file(f, password, count_of_lists, list_of_task_lists):
"""
Writes information from variables into file
:param f: file
:param password: user's hashed password
:param count_of_lists: number of user's task lists
:param list_of_task_lists: all user's task lists are included in this list
:return: file with updated data
"""
f.write(password + '\n')
f.write(str(count_of_lists) + '\n')
for i in range(count_of_lists):
temp_line = str(list_of_task_lists[i].name) + ' ' + str(len(list_of_task_lists[i].tasks))
f.write(temp_line + '\n')
for j in range(len(list_of_task_lists[i].tasks)):
temp_task = list_of_task_lists[i].tasks[j].name + ', ' + \
list_of_task_lists[i].tasks[j].expiration_date + ', ' + \
list_of_task_lists[i].tasks[j].reminder + ', ' + \
str(list_of_task_lists[i].tasks[j].priority) + ', ' + \
list_of_task_lists[i].tasks[j].description
f.write(temp_task + '\n')
temp_line = str(len(list_of_task_lists[i].done_tasks))
f.write(temp_line + '\n')
for j in range(len(list_of_task_lists[i].done_tasks)):
temp_done_task = list_of_task_lists[i].done_tasks[j].name + ', ' + \
list_of_task_lists[i].done_tasks[j].expiration_date + ', ' + \
list_of_task_lists[i].done_tasks[j].reminder + ', ' + \
str(list_of_task_lists[i].done_tasks[j].priority) + ', ' + \
list_of_task_lists[i].done_tasks[j].description
f.write(temp_done_task + '\n')
return f
def read_data_from_file(f):
"""
Takes data from files and writes it in count_of_lists, list_of_task_lists
:param f: file
:return: number of user's task lists and information about all user's task lists
"""
count_of_lists = int(f.readline().strip()) # количество списков задач
list_of_task_lists = []
for i in range(count_of_lists):
list_of_task_lists.append(List('', [], []))
task_list, done_task_list = [], []
list_of_task_lists[i].name, count_of_tasks = f.readline().strip().split()
count_of_tasks = int(count_of_tasks)
for j in range(count_of_tasks):
task_list.append(f.readline().strip())
list_of_task_lists[i].read_tasks(task_list)
count_of_done_tasks = int(f.readline().strip())
for j in range(count_of_done_tasks):
done_task_list.append(f.readline().strip())
list_of_task_lists[i].read_done_tasks(done_task_list)
return count_of_lists, list_of_task_lists
|
py
|
1a55f2c411aafabf63f2f51336ad9cc7e9d4399a
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowServerTagsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'tags': 'list[ServerTag]'
}
attribute_map = {
'tags': 'tags'
}
def __init__(self, tags=None):
"""ShowServerTagsResponse - a model defined in huaweicloud sdk"""
super(ShowServerTagsResponse, self).__init__()
self._tags = None
self.discriminator = None
if tags is not None:
self.tags = tags
@property
def tags(self):
"""Gets the tags of this ShowServerTagsResponse.
标签列表
:return: The tags of this ShowServerTagsResponse.
:rtype: list[ServerTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this ShowServerTagsResponse.
标签列表
:param tags: The tags of this ShowServerTagsResponse.
:type: list[ServerTag]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowServerTagsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py
|
1a55f410223c4fdd289ad3100e7352ae082f0fee
|
from random import randint
palpite = []
jog = []
jogcop = []
qtd = int(input('Digite quantos jogos você quer fazer: '))
i = 0
c = 0
while i < qtd:
while c < 6:
n1 = randint(1, 60)
if n1 not in jog:
jog.append(n1)
c += 1
jog.sort()
palpite.append(jog[:])
jog.clear()
c = 0
print(f'{i+1}° jogo: {palpite[i]}.')
i += 1
|
py
|
1a55f506f88716d4f0199bb245dc93a2b86c16ae
|
""" Sample Data"""
# published as /alice/index.schema
alice_index_schema = ''.join(("doc:/alice/movies/[^/]+$\n"
" -> wrapper:/irtf/icnrg/flic\n"
" -> wrapper:/alice/homebrewed/ac\n"
" mode='CBC'\n"
" padding='PKCS5'\n"
" => type:/mime/video/mp4\n"
"\n"
"doc:/alice/public/docs/.*[.]pdf$\n"
" -> wrapper:/simple/chunking\n"
" => type:/mime/application/pdf\n"
"\n"
"doc:/alice/public/img/basel.jpg$\n"
" -> wrapper:/simple/chunking\n"
" => type:/mime/image/jpeg\n"))
# published as /alice/homebrewed/ac
ac_wrapper_desc = ''.join(("def decap:\n"
" $secDek = call:/alice/homebrewed/fetchDEK(#, @id.pub)\n"
" $dek = call:/crypto/lib/rsa/decrypt($secDek, @id.priv)\n"
" return call:/nist/aes/decrypt(#, $dek, %mode, %padding)\n"
"\n"
"\n"
"def encap:\n"
" $secDek = call:/alice/homebrewed/fetchDEK(#, @id.pub)\n",
" $dek = call:/crypto/lib/rsa/decrypt($secDek, @id.priv\n",
" sreturn call:/nist/aes/encrypt(#, $dek, %mode, %padding)\n"))
|
py
|
1a55f57c13ee96107dafc097d50c50d6ef0c2376
|
"""
This is an end to end release test automation script used to kick off periodic
release tests, running on Anyscale.
The tool leverages app configs and compute templates.
Calling this script will run a single release test.
Example:
python e2e.py --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
The following steps are then performed:
1. It will look up the test tune_small in the file xgboost_tests.yaml
2. It will fetch the specified app config and compute template and register
those with anyscale (if they don’t exist yet)
3. It waits until the app config is built
4. It then kicks off the script defined in the run block
5. When the script is finished, it will fetch the latest logs, the full log
output, and any artifacts specified in the artifacts block.
6. The full logs and artifacts will be stored in a s3 bucket
7. It will also fetch the json file specified in the run block as results.
This is the file where you should write your metrics to.
8. All results are then stored in a database.
Specifically it will store the following fields:
- Timestamp
- Test name
- Status (finished, error, timeout, invalid)
- Last logs (50 lines)
- results (see above)
- artifacts (links to s3 files)
Then the script exits. If an error occurs at any time, a fail result is
written to the database.
Writing a new release test
--------------------------
Each release test requires the following:
1. It has to be added in a release test yaml file, describing meta information
about the test (e.g. name, command to run, timeout)
2. You need an app config yaml
3. You need a compute template yaml
4. You need to define a command to run. This is usually a python script.
The command should accept (or ignore) a single optional
`--smoke-test` argument.
Usually the command should write its result metrics to a json file.
The json filename is available in the TEST_OUTPUT_JSON env variable.
5. Add your test in release/.buildkite/build_pipeline.py.
The script will have access to these environment variables:
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto")
"TEST_OUTPUT_JSON": results_json_filename
"IS_SMOKE_TEST": "1" if smoke_test else "0"
For an example, take a look at the XGBoost test suite:
https://github.com/ray-project/ray/blob/master/release/xgboost_tests/xgboost_tests.yaml
These all use the same app configs and similar compute templates. This means
that app configs can be re-used across runs and only have to be built ones.
App configs and compute templates can interpret environment variables.
A notable one is the `RAY_WHEELS` variable which points to the wheels that
should be tested (e.g. latest master wheels). You might want to include
something like this in your `post_build_cmds`:
- pip3 install -U {{ env["RAY_WHEELS"] | default("ray") }}
If you want to force rebuilds, consider using something like
- echo {{ env["TIMESTAMP"] }}
so that your app configs changes each time the script is executed. If you
only want to trigger rebuilds once per day, use `DATESTAMP` instead:
- echo {{ env["DATESTAMP"] }}
Local testing
-------------
For local testing, make sure to authenticate with the ray-ossci AWS user
(e.g. by setting the respective environment variables obtained from go/aws),
or use the `--no-report` command line argument.
Also make sure to set these environment variables:
- ANYSCALE_CLI_TOKEN (should contain your anyscale credential token)
- ANYSCALE_PROJECT (should point to a project ID you have access to)
A test can then be run like this:
python e2e.py --no-report --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
The `--no-report` option disables storing the results in the DB and
artifacts on S3. If you set this option, you do not need access to the
ray-ossci AWS user.
Using Compilation on Product + App Config Override
--------------------------------------------------
For quick iteration when debugging a release test, go/compile-on-product allows
you to easily modify and recompile Ray, such that the recompilation happens
within an app build step and can benefit from a warm Bazel cache. See
go/compile-on-product for more information.
After kicking off the app build, you can give the app config ID to this script
as an app config override, where the indicated app config will be used instead
of the app config given in the test config. E.g., running
python e2e.py --no-report --test-config ~/ray/benchmarks/benchmark_tests.yaml --test-name=single_node --app-config-id-override=apt_TBngEXXXrhipMXgexVcrpC9i
would run the single_node benchmark test with the apt_TBngEXXXrhipMXgexVcrpC9i
app config instead of the app config given in
~/ray/benchmarks/benchmark_tests.yaml. If the build for the app config is still
in progress, the script will wait until it completes, same as for a locally
defined app config.
Running on Head Node vs Running with Anyscale Connect
-----------------------------------------------------
By default release tests run their drivers on the head node. Support is being
added to run release tests that execute the driver as a subprocess and run
the workload on Anyscale product via Anyscale connect.
Note that when the driver in the test is a subprocess of releaser, releaser
cannot be terminated before the test finishes.
Other known feature gaps when running with Anyscale connect:
- Kicking off a test or checking progress is not supported.
- Downloading / uploading logs and artifacts are unsupported.
- Logs from remote may not have finished streaming, before the driver exits.
Long running tests
------------------
Long running tests can be kicked off with by adding the --kick-off-only
parameters to the e2e script. The status can then be checked with the
--check command.
Long running test sessions will be terminated after `timeout` seconds, after
which the latest result in the TEST_OUTPUT_JSON will be reported. Thus,
long running release tests should update this file periodically.
There are also two config options to configure behavior. The `time_key` is
needed to track the latest update of the TEST_OUTPUT_JSON and should contain
a floating point number (usually `time.time()`). The `max_update_delay` then
specified the maximum time in seconds that can be passed without an update
to the results json. If the output file hasn't been updated in e.g. 60 seconds,
this could indicate that the command is stale/frozen, and thus should fail.
Release test yaml example
-------------------------
- name: example
owner:
mail: "[email protected]" # Currently not used
slack: "@tune-team" # Currentl not used
cluster:
app_config: app_config.yaml # Relative to the release test yaml
compute_template: tpl_cpu.yaml
run:
timeout: 600 # in seconds
prepare: python wait_cluster.py 4 600 # prepare cmd to run before test
script: python workloads/train.py # actual release test command
# Only needed for long running test
time_key: last_update # Key in the results json indicating current time
max_update_delay: 30 # If state hasn't been updated in 30s, terminate
# This block is optional
artifacts:
# Artifact name: location on head node
- detailed_output: detailed_output.csv
# This block is optional. If present, the contents will be
# deep updated for smoke testing
smoke_test:
cluster:
compute_template: tpl_cpu_smoketest.yaml
""" # noqa: E501
import argparse
import boto3
import collections
import copy
import datetime
import hashlib
import jinja2
import json
import logging
import multiprocessing
import os
import requests
import shutil
import subprocess
import sys
import tempfile
import time
from queue import Empty
from typing import Any, Dict, Optional, Tuple, List
import yaml
import anyscale
import anyscale.conf
from anyscale.api import instantiate_api_client
from anyscale.controllers.session_controller import SessionController
from anyscale.sdk.anyscale_client.sdk import AnyscaleSDK
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(fmt="[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def getenv_default(key: str, default: Optional[str] = None):
"""Return environment variable with default value"""
# If the environment variable is set but "", still return default
return os.environ.get(key, None) or default
GLOBAL_CONFIG = {
"ANYSCALE_USER": getenv_default("ANYSCALE_USER",
"[email protected]"),
"ANYSCALE_HOST": getenv_default("ANYSCALE_HOST",
"https://beta.anyscale.com"),
"ANYSCALE_CLI_TOKEN": getenv_default("ANYSCALE_CLI_TOKEN"),
"ANYSCALE_CLOUD_ID": getenv_default(
"ANYSCALE_CLOUD_ID",
"cld_4F7k8814aZzGG8TNUGPKnc"), # cld_4F7k8814aZzGG8TNUGPKnc
"ANYSCALE_PROJECT": getenv_default("ANYSCALE_PROJECT", ""),
"RAY_VERSION": getenv_default("RAY_VERSION", "2.0.0.dev0"),
"RAY_REPO": getenv_default("RAY_REPO",
"https://github.com/ray-project/ray.git"),
"RAY_BRANCH": getenv_default("RAY_BRANCH", "master"),
"RELEASE_AWS_BUCKET": getenv_default("RELEASE_AWS_BUCKET",
"ray-release-automation-results"),
"RELEASE_AWS_LOCATION": getenv_default("RELEASE_AWS_LOCATION", "dev"),
"RELEASE_AWS_DB_NAME": getenv_default("RELEASE_AWS_DB_NAME", "ray_ci"),
"RELEASE_AWS_DB_TABLE": getenv_default("RELEASE_AWS_DB_TABLE",
"release_test_result"),
"RELEASE_AWS_DB_SECRET_ARN": getenv_default(
"RELEASE_AWS_DB_SECRET_ARN",
"arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"rds-db-credentials/cluster-7RB7EYTTBK2EUC3MMTONYRBJLE/ray_ci-MQN2hh",
),
"RELEASE_AWS_DB_RESOURCE_ARN": getenv_default(
"RELEASE_AWS_DB_RESOURCE_ARN",
"arn:aws:rds:us-west-2:029272617770:cluster:ci-reporting",
),
"RELEASE_RESULTS_DIR": getenv_default("RELEASE_RESULTS_DIR",
"/tmp/ray_release_test_artifacts"),
"DATESTAMP": str(datetime.datetime.now().strftime("%Y%m%d")),
"TIMESTAMP": str(int(datetime.datetime.now().timestamp())),
"EXPIRATION_1D": str((datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")),
"EXPIRATION_2D": str((datetime.datetime.now() +
datetime.timedelta(days=2)).strftime("%Y-%m-%d")),
"EXPIRATION_3D": str((datetime.datetime.now() +
datetime.timedelta(days=3)).strftime("%Y-%m-%d")),
}
REPORT_S = 30
RETRY_MULTIPLIER = 2
def exponential_backoff_retry(f, retry_exceptions, initial_retry_delay_s,
max_retries):
retry_cnt = 0
retry_delay_s = initial_retry_delay_s
while True:
try:
return f()
except retry_exceptions as e:
retry_cnt += 1
if retry_cnt > max_retries:
raise
logger.info(f"Retry function call failed due to {e} "
f"in {retry_delay_s} seconds...")
time.sleep(retry_delay_s)
retry_delay_s *= RETRY_MULTIPLIER
def maybe_fetch_api_token():
if GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] is None:
logger.info(
"Missing ANYSCALE_CLI_TOKEN, retrieving from AWS secrets store")
# NOTE(simon) This should automatically retrieve
# [email protected]'s anyscale token
GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] = boto3.client(
"secretsmanager", region_name="us-west-2"
).get_secret_value(
SecretId="arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"release-automation/"
"anyscale-token20210505220406333800000001-BcUuKB")["SecretString"]
class PrepareCommandRuntimeError(RuntimeError):
pass
class ReleaseTestTimeoutError(RuntimeError):
pass
class SessionTimeoutError(ReleaseTestTimeoutError):
pass
class FileSyncTimeoutError(ReleaseTestTimeoutError):
pass
class CommandTimeoutError(ReleaseTestTimeoutError):
pass
class PrepareCommandTimeoutError(ReleaseTestTimeoutError):
pass
# e.g., App config failure.
class AppConfigBuildFailure(RuntimeError):
pass
class State:
def __init__(self, state: str, timestamp: float, data: Any):
self.state = state
self.timestamp = timestamp
self.data = data
sys.path.insert(0, anyscale.ANYSCALE_RAY_DIR)
def anyscale_project_url(project_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/?tab=session-list"
def anyscale_session_url(project_id: str, session_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/clusters/{session_id}"
def anyscale_compute_tpl_url(compute_tpl_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/configurations/cluster-computes" \
f"/{compute_tpl_id}"
def anyscale_app_config_build_url(build_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/configurations/app-config-details" \
f"/{build_id}"
def wheel_url(ray_version, git_branch, git_commit):
return f"https://s3-us-west-2.amazonaws.com/ray-wheels/" \
f"{git_branch}/{git_commit}/" \
f"ray-{ray_version}-cp37-cp37m-manylinux2014_x86_64.whl"
def wheel_exists(ray_version, git_branch, git_commit):
url = wheel_url(ray_version, git_branch, git_commit)
return requests.head(url).status_code == 200
def commit_or_url(commit_or_url: str) -> str:
if commit_or_url.startswith("http"):
# Assume URL
return commit_or_url
# Else, assume commit
os.environ["RAY_COMMIT"] = commit_or_url
return wheel_url(GLOBAL_CONFIG["RAY_VERSION"], GLOBAL_CONFIG["RAY_BRANCH"],
commit_or_url)
def get_latest_commits(repo: str, branch: str = "master") -> List[str]:
cur = os.getcwd()
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
clone_cmd = [
"git",
"clone",
"--filter=tree:0",
"--no-checkout",
# "--single-branch",
# "--depth=10",
f"--branch={branch}",
repo,
tmpdir,
]
log_cmd = [
"git",
"log",
"-n",
"10",
"--pretty=format:%H",
]
subprocess.check_output(clone_cmd)
commits = subprocess.check_output(log_cmd).decode(
sys.stdout.encoding).split("\n")
os.chdir(cur)
return commits
def find_ray_wheels(repo: str, branch: str, version: str):
url = None
commits = get_latest_commits(repo, branch)
logger.info(f"Latest 10 commits for branch {branch}: {commits}")
for commit in commits:
if wheel_exists(version, branch, commit):
url = wheel_url(version, branch, commit)
os.environ["RAY_WHEELS"] = url
os.environ["RAY_COMMIT"] = commit
logger.info(
f"Found wheels URL for Ray {version}, branch {branch}: "
f"{url}")
break
return url
def populate_wheels_sanity_check(commit: Optional[str] = None):
if not commit:
cmd = ("python -c 'import ray; print("
"\"No commit sanity check available, but this is the "
"Ray wheel commit:\", ray.__commit__)'")
else:
cmd = (f"python -c 'import ray; "
f"assert ray.__commit__ == \"{commit}\", ray.__commit__'")
os.environ["RAY_WHEELS_SANITY_CHECK"] = cmd
def _check_stop(stop_event: multiprocessing.Event, timeout_type: str):
if stop_event.is_set():
if timeout_type == "prepare_command":
raise PrepareCommandTimeoutError(
"Process timed out in the prepare command stage.")
if timeout_type == "command":
raise CommandTimeoutError(
"Process timed out while running a command.")
elif timeout_type == "file_sync":
raise FileSyncTimeoutError(
"Process timed out while syncing files.")
elif timeout_type == "session":
raise SessionTimeoutError(
"Process timed out while starting a session.")
else:
assert False, "Unexpected timeout type."
def _deep_update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = _deep_update(d.get(k, {}), v)
else:
d[k] = v
return d
def _dict_hash(dt: Dict[Any, Any]) -> str:
json_str = json.dumps(dt, sort_keys=True, ensure_ascii=True)
sha = hashlib.sha256()
sha.update(json_str.encode())
return sha.hexdigest()
def _load_config(local_dir: str, config_file: Optional[str]) -> Optional[Dict]:
if not config_file:
return None
config_path = os.path.join(local_dir, config_file)
with open(config_path, "rt") as f:
# Todo: jinja2 render
content = f.read()
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
content = jinja2.Template(content).render(env=env)
return yaml.safe_load(content)
def has_errored(result: Dict[Any, Any]) -> bool:
return result.get("status", "invalid") != "finished"
def report_result(test_suite: str, test_name: str, status: str, last_logs: str,
results: Dict[Any, Any], artifacts: Dict[Any, Any],
category: str):
now = datetime.datetime.utcnow()
rds_data_client = boto3.client("rds-data", region_name="us-west-2")
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_TABLE"]
sql = (
f"INSERT INTO {schema} "
f"(created_on, test_suite, test_name, status, last_logs, "
f"results, artifacts, category) "
f"VALUES (:created_on, :test_suite, :test_name, :status, :last_logs, "
f":results, :artifacts, :category)")
parameters = [{
"name": "created_on",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": now.strftime("%Y-%m-%d %H:%M:%S")
},
}, {
"name": "test_suite",
"value": {
"stringValue": test_suite
}
}, {
"name": "test_name",
"value": {
"stringValue": test_name
}
}, {
"name": "status",
"value": {
"stringValue": status
}
}, {
"name": "last_logs",
"value": {
"stringValue": last_logs
}
}, {
"name": "results",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(results)
},
}, {
"name": "artifacts",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(artifacts)
},
}, {
"name": "category",
"value": {
"stringValue": category
}
}]
# Default boto3 call timeout is 45 seconds.
retry_delay_s = 64
MAX_RDS_RETRY = 3
exponential_backoff_retry(
lambda: rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
parameters=parameters,
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql),
retry_exceptions=rds_data_client.exceptions.StatementTimeoutException,
initial_retry_delay_s=retry_delay_s,
max_retries=MAX_RDS_RETRY)
logger.info("Result has been persisted to the databse")
def log_results_and_artifacts(result: Dict):
results = result.get("results", {})
if results:
msg = "Observed the following results:\n\n"
for key, val in results.items():
msg += f" {key} = {val}\n"
else:
msg = "Did not find any results."
logger.info(msg)
artifacts = result.get("artifacts", {})
if artifacts:
msg = "Saved the following artifacts:\n\n"
for key, val in artifacts.items():
msg += f" {key} = {val}\n"
else:
msg = "Did not find any artifacts."
logger.info(msg)
def _cleanup_session(sdk: AnyscaleSDK, session_id: str):
if session_id:
# Just trigger a request. No need to wait until session shutdown.
sdk.terminate_session(
session_id=session_id, terminate_session_options={})
def search_running_session(sdk: AnyscaleSDK, project_id: str,
session_name: str) -> Optional[str]:
session_id = None
logger.info(f"Looking for existing session with name {session_name}")
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(name=dict(equals=session_name)))
if len(result.results) > 0 and result.results[0].state == "Running":
logger.info("Found existing session.")
session_id = result.results[0].id
return session_id
def create_or_find_compute_template(
sdk: AnyscaleSDK,
project_id: str,
compute_tpl: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
compute_tpl_id = None
compute_tpl_name = None
if compute_tpl:
# As of Anyscale 0.4.1, it is an error to use the same compute template
# name within the same organization, between different projects.
compute_tpl_name = f"{project_id}/compute/{_dict_hash(compute_tpl)}"
logger.info(f"Tests uses compute template "
f"with name {compute_tpl_name}. Looking up existing "
f"templates.")
paging_token = None
while not compute_tpl_id:
result = sdk.search_compute_templates(
dict(
project_id=project_id,
name=dict(equals=compute_tpl_name),
include_anonymous=True),
paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == compute_tpl_name:
compute_tpl_id = res.id
logger.info(
f"Template already exists with ID {compute_tpl_id}")
break
if not paging_token:
break
if not compute_tpl_id:
logger.info(f"Compute template not found. "
f"Creating with name {compute_tpl_name}.")
try:
result = sdk.create_compute_template(
dict(
name=compute_tpl_name,
project_id=project_id,
config=compute_tpl))
compute_tpl_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create compute "
f"template: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_compute_template(
sdk=sdk,
project_id=project_id,
compute_tpl=compute_tpl,
_repeat=False)
raise e
logger.info(f"Compute template created with ID {compute_tpl_id}")
return compute_tpl_id, compute_tpl_name
def create_or_find_app_config(
sdk: AnyscaleSDK,
project_id: str,
app_config: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
app_config_id = None
app_config_name = None
if app_config:
app_config_name = f"{project_id}-{_dict_hash(app_config)}"
logger.info(f"Test uses an app config with hash {app_config_name}. "
f"Looking up existing app configs with this name.")
paging_token = None
while not app_config_id:
result = sdk.list_app_configs(
project_id=project_id, count=50, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == app_config_name:
app_config_id = res.id
logger.info(
f"App config already exists with ID {app_config_id}")
break
if not paging_token or app_config_id:
break
if not app_config_id:
logger.info("App config not found. Creating new one.")
try:
result = sdk.create_app_config(
dict(
name=app_config_name,
project_id=project_id,
config_json=app_config))
app_config_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create app "
f"config: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_app_config(
sdk=sdk,
project_id=project_id,
app_config=app_config,
_repeat=False)
raise e
logger.info(f"App config created with ID {app_config_id}")
return app_config_id, app_config_name
def install_app_config_packages(app_config: Dict[Any, Any]):
os.environ.update(app_config.get("env_vars", {}))
packages = app_config["python"]["pip_packages"]
for package in packages:
subprocess.check_output(["pip", "install", "-U", package], text=True)
def install_matching_ray():
wheel = os.environ.get("RAY_WHEELS", None)
if not wheel:
return
assert "manylinux2014_x86_64" in wheel, wheel
if sys.platform == "darwin":
platform = "macosx_10_15_intel"
elif sys.platform == "win32":
platform = "win_amd64"
else:
platform = "manylinux2014_x86_64"
wheel = wheel.replace("manylinux2014_x86_64", platform)
subprocess.check_output(["pip", "uninstall", "-y", "ray"], text=True)
subprocess.check_output(["pip", "install", "-U", wheel], text=True)
def wait_for_build_or_raise(sdk: AnyscaleSDK,
app_config_id: Optional[str]) -> Optional[str]:
if not app_config_id:
return None
# Fetch build
build_id = None
last_status = None
result = sdk.list_builds(app_config_id)
for build in sorted(result.results, key=lambda b: b.created_at):
build_id = build.id
last_status = build.status
if build.status == "failed":
continue
if build.status == "succeeded":
logger.info(f"Link to app config build: "
f"{anyscale_app_config_build_url(build_id)}")
return build_id
if last_status == "failed":
raise AppConfigBuildFailure("App config build failed.")
if not build_id:
raise AppConfigBuildFailure("No build found for app config.")
# Build found but not failed/finished yet
completed = False
start_wait = time.time()
next_report = start_wait + REPORT_S
logger.info(f"Waiting for build {build_id} to finish...")
logger.info(f"Track progress here: "
f"{anyscale_app_config_build_url(build_id)}")
while not completed:
now = time.time()
if now > next_report:
logger.info(f"... still waiting for build {build_id} to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
result = sdk.get_build(build_id)
build = result.result
if build.status == "failed":
raise AppConfigBuildFailure(
f"App config build failed. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
if build.status == "succeeded":
logger.info("Build succeeded.")
return build_id
completed = build.status not in ["in_progress", "pending"]
if completed:
raise AppConfigBuildFailure(
f"Unknown build status: {build.status}. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
time.sleep(1)
return build_id
def run_job(cluster_name: str, compute_tpl_name: str, cluster_env_name: str,
job_name: str, min_workers: str, script: str,
script_args: List[str], env_vars: Dict[str, str],
autosuspend: int) -> Tuple[int, str]:
# Start cluster and job
address = f"anyscale://{cluster_name}?autosuspend={autosuspend}"
logger.info(f"Starting job {job_name} with Ray address: {address}")
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
env.update(env_vars)
env["RAY_ADDRESS"] = address
env["RAY_JOB_NAME"] = job_name
env["RAY_RELEASE_MIN_WORKERS"] = str(min_workers)
proc = subprocess.Popen(
script.split(" ") + script_args,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True)
proc.stdout.reconfigure(line_buffering=True)
logs = ""
for line in proc.stdout:
logs += line
sys.stdout.write(line)
proc.wait()
return proc.returncode, logs
def create_and_wait_for_session(
sdk: AnyscaleSDK,
stop_event: multiprocessing.Event,
session_name: str,
session_options: Dict[Any, Any],
) -> str:
# Create session
logger.info(f"Creating session {session_name}")
result = sdk.create_session(session_options)
session_id = result.result.id
# Trigger session start
logger.info(f"Starting session {session_name} ({session_id})")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result = sdk.start_session(session_id, start_session_options={})
sop_id = result.result.id
completed = result.result.completed
# Wait for session
logger.info(f"Waiting for session {session_name}...")
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
# Sleep 1 sec before next check.
time.sleep(1)
session_operation_response = sdk.get_session_operation(
sop_id, _request_timeout=30)
session_operation = session_operation_response.result
completed = session_operation.completed
_check_stop(stop_event, "session")
now = time.time()
if now > next_report:
logger.info(f"... still waiting for session {session_name} "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
return session_id
def run_session_command(sdk: AnyscaleSDK,
session_id: str,
cmd_to_run: str,
result_queue: multiprocessing.Queue,
env_vars: Dict[str, str],
state_str: str = "CMD_RUN") -> Tuple[str, int]:
full_cmd = " ".join(f"{k}={v}"
for k, v in env_vars.items()) + " " + cmd_to_run
logger.info(f"Running command in session {session_id}: \n" f"{full_cmd}")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result_queue.put(State(state_str, time.time(), None))
result = sdk.create_session_command(
dict(session_id=session_id, shell_command=full_cmd))
scd_id = result.result.id
return scd_id, result
def wait_for_session_command_to_complete(create_session_command_result,
sdk: AnyscaleSDK,
scd_id: str,
stop_event: multiprocessing.Event,
state_str: str = "CMD_RUN"):
result = create_session_command_result
completed = result.result.finished_at is not None
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
# Sleep 1 sec before next check.
time.sleep(1)
result = exponential_backoff_retry(
lambda: sdk.get_session_command(session_command_id=scd_id),
retry_exceptions=Exception,
initial_retry_delay_s=10,
max_retries=3)
completed = result.result.finished_at
if state_str == "CMD_RUN":
_check_stop(stop_event, "command")
elif state_str == "CMD_PREPARE":
_check_stop(stop_event, "prepare_command")
now = time.time()
if now > next_report:
logger.info(f"... still waiting for command to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
status_code = result.result.status_code
runtime = time.time() - start_wait
if status_code != 0:
if state_str == "CMD_RUN":
raise RuntimeError(
f"Command returned non-success status: {status_code}")
elif state_str == "CMD_PREPARE":
raise PrepareCommandRuntimeError(
f"Prepare command returned non-success status: {status_code}")
return status_code, runtime
def get_command_logs(session_controller: SessionController,
scd_id: str,
lines: int = 50):
result = exponential_backoff_retry(
lambda: session_controller.api_client.get_execution_logs_api_v2_session_commands_session_command_id_execution_logs_get( # noqa: E501
session_command_id=scd_id,
start_line=-1 * lines,
end_line=0),
retry_exceptions=Exception,
initial_retry_delay_s=10,
max_retries=3)
return result.result.lines
def get_remote_json_content(
temp_dir: str,
session_name: str,
remote_file: Optional[str],
session_controller: SessionController,
):
if not remote_file:
logger.warning("No remote file specified, returning empty dict")
return {}
local_target_file = os.path.join(temp_dir, ".tmp.json")
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
with open(local_target_file, "rt") as f:
return json.load(f)
def get_local_json_content(local_file: Optional[str], ):
if not local_file:
logger.warning("No local file specified, returning empty dict")
return {}
with open(local_file, "rt") as f:
return json.load(f)
def pull_artifacts_and_store_in_cloud(
temp_dir: str,
logs: str,
session_name: str,
test_name: str,
artifacts: Optional[Dict[Any, Any]],
session_controller: SessionController,
):
output_log_file = os.path.join(temp_dir, "output.log")
with open(output_log_file, "wt") as f:
f.write(logs)
bucket = GLOBAL_CONFIG["RELEASE_AWS_BUCKET"]
location = f"{GLOBAL_CONFIG['RELEASE_AWS_LOCATION']}" \
f"/{session_name}/{test_name}"
saved_artifacts = {}
s3_client = boto3.client("s3")
s3_client.upload_file(output_log_file, bucket, f"{location}/output.log")
saved_artifacts["output.log"] = f"s3://{bucket}/{location}/output.log"
# Download artifacts
if artifacts:
for name, remote_file in artifacts.items():
logger.info(f"Downloading artifact `{name}` from "
f"{remote_file}")
local_target_file = os.path.join(temp_dir, name)
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
# Upload artifacts to s3
s3_client.upload_file(local_target_file, bucket,
f"{location}/{name}")
saved_artifacts[name] = f"s3://{bucket}/{location}/{name}"
return saved_artifacts
def find_session_by_test_name(
sdk: AnyscaleSDK,
session_controller: SessionController,
temp_dir: str,
state_json: str,
project_id: str,
test_name: str,
) -> Optional[Tuple[str, str, Dict[Any, Any]]]:
paging_token = None
while True: # Will break if paging_token is None after first search
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(
name=dict(contains=test_name),
state_filter=["Running"],
paging=dict(count=20, paging_token=paging_token)))
for session in result.results:
logger.info(f"Found sessions {session.name}")
if not session.name.startswith(test_name):
continue
try:
session_state = get_remote_json_content(
temp_dir=temp_dir,
session_name=session.name,
remote_file=state_json,
session_controller=session_controller)
except Exception as exc:
raise RuntimeError(f"Could not get remote json content "
f"for session {session.name}") from exc
if session_state.get("test_name") == test_name:
return session.id, session.name, session_state
session_token = result.metadata.next_paging_token
if not session_token:
return None
def get_latest_running_command_id(sdk: AnyscaleSDK, session_id: str
) -> Tuple[Optional[str], Optional[bool]]:
scd_id = None
paging_token = None
success = None
while not scd_id:
result = sdk.list_session_commands(
session_id=session_id, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for cmd in result.results:
if not scd_id:
scd_id = cmd.id
completed = cmd.finished_at is not None
if completed:
if success is None:
success = True
success = success and cmd.status_code == 0
if not completed:
return cmd.id, None
return scd_id, success or False
def run_test_config(
local_dir: str,
project_id: str,
test_name: str,
test_config: Dict[Any, Any],
commit_url: str,
session_name: str = None,
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress: bool = False,
upload_artifacts: bool = True,
keep_results_dir: bool = False,
app_config_id_override: Optional[str] = None,
) -> Dict[Any, Any]:
"""
Returns:
Dict with the following entries:
status (str): One of [finished, error, timeout]
command_link (str): Link to command (Anyscale web UI)
last_logs (str): Last logs (excerpt) to send to owner
artifacts (dict): Dict of artifacts
Key: Name
Value: S3 URL
"""
# Todo (mid-term): Support other cluster definitions
# (not only cluster configs)
cluster_config_rel_path = test_config["cluster"].get(
"cluster_config", None)
cluster_config = _load_config(local_dir, cluster_config_rel_path)
app_config_rel_path = test_config["cluster"].get("app_config", None)
app_config = _load_config(local_dir, app_config_rel_path)
compute_tpl_rel_path = test_config["cluster"].get("compute_template", None)
compute_tpl = _load_config(local_dir, compute_tpl_rel_path)
stop_event = multiprocessing.Event()
result_queue = multiprocessing.Queue()
if not session_name:
session_name = f"{test_name}_{int(time.time())}"
temp_dir = tempfile.mkdtemp()
# Result and state files
results_json = test_config["run"].get("results", None)
if results_json is None:
results_json = "/tmp/release_test_out.json"
state_json = test_config["run"].get("state", None)
if state_json is None:
state_json = "/tmp/release_test_state.json"
env_vars = {
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto"),
"TEST_OUTPUT_JSON": results_json,
"TEST_STATE_JSON": state_json,
"IS_SMOKE_TEST": "1" if smoke_test else "0",
}
with open(os.path.join(local_dir, ".anyscale.yaml"), "wt") as f:
f.write(f"project_id: {project_id}")
os.chdir(local_dir)
# Setup interface
# Unfortunately, there currently seems to be no great way to
# transfer files with the Anyscale SDK.
# So we use the session controller instead.
sdk = AnyscaleSDK(auth_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"])
session_controller = SessionController(
api_client=instantiate_api_client(
cli_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"],
host=GLOBAL_CONFIG["ANYSCALE_HOST"],
),
anyscale_api_client=sdk.api_client,
)
timeout = test_config["run"].get("timeout", 1800)
if "RELEASE_OVERRIDE_TIMEOUT" in os.environ:
previous_timeout = timeout
timeout = int(os.environ.get("RELEASE_OVERRIDE_TIMEOUT", str(timeout)))
logger.warning(f"Release test timeout override: {timeout} "
f"(would have been {previous_timeout})")
# If a test is long running, timeout does not mean it failed
is_long_running = test_config["run"].get("long_running", False)
build_id_override = None
if test_config["run"].get("use_connect"):
autosuspend_mins = test_config["run"].get("autosuspend_mins", 5)
assert not kick_off_only, \
"Unsupported for running with Anyscale connect."
if app_config_id_override is not None:
logger.info(
"Using connect and an app config override, waiting until "
"build finishes so we can fetch the app config in order to "
"install its pip packages locally.")
build_id_override = wait_for_build_or_raise(
sdk, app_config_id_override)
response = sdk.get_cluster_environment_build(build_id_override)
app_config = response.result.config_json
install_app_config_packages(app_config)
install_matching_ray()
elif "autosuspend_mins" in test_config["run"]:
raise ValueError(
"'autosuspend_mins' is only supported if 'use_connect' is True.")
# Add information to results dict
def _update_results(results: Dict):
if "last_update" in results:
results["last_update_diff"] = time.time() - results["last_update"]
if smoke_test:
results["smoke_test"] = True
def _process_finished_command(session_controller: SessionController,
scd_id: str,
results: Optional[Dict] = None,
runtime: int = None,
commit_url: str = None,
session_url: str = None):
logger.info("Command finished successfully.")
if results_json:
results = results or get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
else:
results = {"passed": 1}
_update_results(results)
if scd_id:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
else:
logs = "No command found to fetch logs for"
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=test_config.get("artifacts", {}),
session_controller=session_controller,
)
logger.info("Fetched results and stored on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
# Add these metadata here to avoid changing SQL schema.
results["_runtime"] = runtime
results["_session_url"] = session_url
results["_commit_url"] = commit_url
results["_stable"] = test_config.get("stable", True)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
# When running the test script in client mode, the finish command is a
# completed local process.
def _process_finished_client_command(returncode: int, logs: str):
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=None,
session_controller=None,
)
logger.info("Stored results on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
if results_json:
results = get_local_json_content(local_file=results_json, )
else:
results = {
"passed": int(returncode == 0),
}
results["returncode"] = returncode
_update_results(results)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
def _run(logger):
# These values will be set as the test runs.
session_url = None
runtime = None
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
test_uses_ray_connect = test_config["run"].get("use_connect")
session_id = None
scd_id = None
try:
# First, look for running sessions
session_id = search_running_session(sdk, project_id, session_name)
compute_tpl_name = None
app_config_id = app_config_id_override
app_config_name = None
build_id = build_id_override
if not session_id:
logger.info("No session found.")
# Start session
session_options = dict(
name=session_name, project_id=project_id)
if cluster_config is not None:
logging.info("Starting session with cluster config")
cluster_config_str = json.dumps(cluster_config)
session_options["cluster_config"] = cluster_config_str
session_options["cloud_id"] = (
GLOBAL_CONFIG["ANYSCALE_CLOUD_ID"], )
session_options["uses_app_config"] = False
else:
logging.info("Starting session with app/compute config")
# Find/create compute template
compute_tpl_id, compute_tpl_name = \
create_or_find_compute_template(
sdk, project_id, compute_tpl)
logger.info(f"Link to compute template: "
f"{anyscale_compute_tpl_url(compute_tpl_id)}")
# Find/create app config
if app_config_id is None:
(
app_config_id,
app_config_name,
) = create_or_find_app_config(sdk, project_id,
app_config)
else:
logger.info(
f"Using override app config {app_config_id}")
app_config_name = sdk.get_app_config(
app_config_id).result.name
if build_id is None:
# We might have already retrieved the build ID when
# installing app config packages locally if using
# connect, so only get the build ID if it's not set.
build_id = wait_for_build_or_raise(sdk, app_config_id)
session_options["compute_template_id"] = compute_tpl_id
session_options["build_id"] = build_id
session_options["uses_app_config"] = True
# Start session
session_id = create_and_wait_for_session(
sdk=sdk,
stop_event=stop_event,
session_name=session_name,
session_options=session_options,
)
prepare_command = test_config["run"].get("prepare")
# Write test state json
test_state_file = os.path.join(local_dir, "test_state.json")
with open(test_state_file, "wt") as f:
json.dump({
"start_time": time.time(),
"test_name": test_name
}, f)
if prepare_command or not test_uses_ray_connect:
if test_uses_ray_connect:
logger.info("Found a prepare command, so pushing it "
"to the session.")
# Rsync up
logger.info("Syncing files to session...")
session_controller.push(
session_name=session_name,
source=None,
target=None,
config=None,
all_nodes=False,
)
logger.info("Syncing test state to session...")
session_controller.push(
session_name=session_name,
source=test_state_file,
target=state_json,
config=None,
all_nodes=False,
)
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
session_id=session_id)
_check_stop(stop_event, "file_sync")
# Optionally run preparation command
if prepare_command:
logger.info(
f"Running preparation command: {prepare_command}")
scd_id, result = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=prepare_command,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_PREPARE")
_, _ = wait_for_session_command_to_complete(
result,
sdk=sdk,
scd_id=scd_id,
stop_event=stop_event,
state_str="CMD_PREPARE")
if test_uses_ray_connect:
script_args = test_config["run"].get("args", [])
if smoke_test:
script_args += ["--smoke-test"]
min_workers = 0
for node_type in compute_tpl["worker_node_types"]:
min_workers += node_type["min_workers"]
# Build completed, use job timeout
result_queue.put(State("CMD_RUN", time.time(), None))
returncode, logs = run_job(
cluster_name=session_name,
compute_tpl_name=compute_tpl_name,
cluster_env_name=app_config_name,
job_name=session_name,
min_workers=min_workers,
script=test_config["run"]["script"],
script_args=script_args,
env_vars=env_vars,
autosuspend=autosuspend_mins)
_process_finished_client_command(returncode, logs)
return
# Run release test command
cmd_to_run = test_config["run"]["script"] + " "
args = test_config["run"].get("args", [])
if args:
cmd_to_run += " ".join(args) + " "
if smoke_test:
cmd_to_run += " --smoke-test"
scd_id, result = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=cmd_to_run,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_RUN")
if not kick_off_only:
_, runtime = wait_for_session_command_to_complete(
result,
sdk=sdk,
scd_id=scd_id,
stop_event=stop_event,
state_str="CMD_RUN")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
runtime=runtime,
session_url=session_url,
commit_url=commit_url)
else:
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": ""
}))
except (ReleaseTestTimeoutError, Exception) as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = logs + "; Command logs:" + get_command_logs(
session_controller, scd_id,
test_config.get("log_lines", 50))
except Exception as e2:
logger.error(e2, exc_info=True)
# Long running tests are "finished" successfully when
# timed out
if isinstance(e, ReleaseTestTimeoutError) and is_long_running:
_process_finished_command(
session_controller=session_controller, scd_id=scd_id)
else:
timeout_type = ""
runtime = None
if isinstance(e, CommandTimeoutError):
timeout_type = "timeout"
runtime = 0
elif (isinstance(e, PrepareCommandTimeoutError)
or isinstance(e, FileSyncTimeoutError)
or isinstance(e, SessionTimeoutError)
or isinstance(e, PrepareCommandRuntimeError)
or isinstance(e, AppConfigBuildFailure)):
timeout_type = "infra_timeout"
runtime = None
elif isinstance(e, RuntimeError):
timeout_type = "runtime_error"
runtime = 0
else:
timeout_type = "unknown timeout"
runtime = None
# Add these metadata here to avoid changing SQL schema.
results = {}
results["_runtime"] = runtime
results["_session_url"] = session_url
results["_commit_url"] = commit_url
results["_stable"] = test_config.get("stable", True)
result_queue.put(
State(
"END", time.time(), {
"status": timeout_type,
"last_logs": logs,
"results": results
}))
finally:
if no_terminate:
logger.warning(
"`no_terminate` is set to True, so the session will "
"*not* be terminated!")
else:
_cleanup_session(sdk, session_id)
def _check_progress(logger):
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
should_terminate = False
session_id = None
scd_id = None
try:
existing_session = find_session_by_test_name(
sdk=sdk,
session_controller=session_controller,
temp_dir=temp_dir,
state_json=state_json,
project_id=project_id,
test_name=test_name)
if existing_session is None:
logger.info(f"Found no existing session for {test_name}")
result_queue.put(
State("END", time.time(), {
"status": "nosession",
"last_logs": ""
}))
return
session_id, session_name, session_state = existing_session
logger.info(f"Found existing session for {test_name}: "
f"{session_name}")
scd_id, success = get_latest_running_command_id(
sdk=sdk, session_id=session_id)
latest_result = get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
# Fetch result json and check if it has been updated recently
result_time_key = test_config["run"].get("time_key", None)
maximum_update_delay = test_config["run"].get(
"max_update_delay", None)
if result_time_key and maximum_update_delay:
last_update = latest_result.get(result_time_key, None)
if not last_update:
result_queue.put(
State(
"END", time.time(), {
"status": "error",
"last_logs": f"Test did not store "
f"{result_time_key} in the "
f"results json."
}))
return
delay = time.time() - last_update
logger.info(f"Last update was at {last_update:.2f}. "
f"This was {delay:.2f} seconds ago "
f"(maximum allowed: {maximum_update_delay})")
if delay > maximum_update_delay:
raise RuntimeError(
f"Test did not update the results json within "
f"the last {maximum_update_delay} seconds.")
if time.time() - session_state["start_time"] > timeout:
# Long running test reached timeout
logger.info(
f"Test command reached timeout after {timeout} seconds")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
elif success:
logger.info("All commands finished.")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
else:
rest_time = timeout - time.time() + session_state["start_time"]
logger.info(f"Test command should continue running "
f"for {rest_time} seconds")
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": "Test is still running"
}))
except Exception as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
logs += f"\n{str(e)}"
except Exception as e2:
logger.error(e2, exc_info=True)
result_queue.put(
State("END", time.time(), {
"status": "error",
"last_logs": logs
}))
should_terminate = True
finally:
if should_terminate:
logger.warning("Terminating session")
_cleanup_session(sdk, session_id)
if not check_progress:
process = multiprocessing.Process(target=_run, args=(logger, ))
else:
process = multiprocessing.Process(
target=_check_progress, args=(logger, ))
build_timeout = test_config["run"].get("build_timeout", 1800)
prepare_timeout = test_config["run"].get("prepare_timeout", timeout)
project_url = anyscale_project_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"])
logger.info(f"Link to project: {project_url}")
msg = f"This will now run test {test_name}."
if smoke_test:
msg += " This is a smoke test."
if is_long_running:
msg += " This is a long running test."
logger.info(msg)
logger.info(f"Starting process with timeout {timeout} "
f"(prepare timeout {prepare_timeout}, "
f"build timeout {build_timeout})")
process.start()
# The timeout time will be updated after the build finished
# Build = App config + compute template build and session start
timeout_time = time.time() + build_timeout
result = {}
while process.is_alive():
try:
state: State = result_queue.get(timeout=1)
except (Empty, TimeoutError):
if time.time() > timeout_time:
stop_event.set()
logger.warning("Process timed out.")
if not is_long_running:
logger.warning("Terminating process in 10 seconds.")
time.sleep(10)
logger.warning("Terminating process now.")
process.terminate()
else:
logger.info("Process is long running. Give 2 minutes to "
"fetch result and terminate.")
start_terminate = time.time()
while time.time(
) < start_terminate + 120 and process.is_alive():
time.sleep(1)
if process.is_alive():
logger.warning("Terminating forcefully now.")
process.terminate()
else:
logger.info("Long running results collected.")
break
continue
if not isinstance(state, State):
raise RuntimeError(f"Expected `State` object, got {result}")
if state.state == "CMD_PREPARE":
# Reset timeout after build finished
timeout_time = state.timestamp + prepare_timeout
if state.state == "CMD_RUN":
# Reset timeout after prepare command or build finished
timeout_time = state.timestamp + timeout
elif state.state == "END":
result = state.data
break
while not result_queue.empty():
state = result_queue.get_nowait()
result = state.data
logger.info("Final check if everything worked.")
try:
result.setdefault("status", "error (status not found)")
except (TimeoutError, Empty):
result = {"status": "timeout", "last_logs": "Test timed out."}
logger.info(f"Final results: {result}")
log_results_and_artifacts(result)
if not keep_results_dir:
logger.info(f"Removing results dir {temp_dir}")
shutil.rmtree(temp_dir)
else:
# Write results.json
with open(os.path.join(temp_dir, "results.json"), "wt") as fp:
json.dump(result, fp)
out_dir = os.path.expanduser(GLOBAL_CONFIG["RELEASE_RESULTS_DIR"])
logger.info(f"Moving results dir {temp_dir} to persistent location "
f"{out_dir}")
shutil.rmtree(out_dir, ignore_errors=True)
shutil.copytree(temp_dir, out_dir)
logger.info(f"Dir contents: {os.listdir(out_dir)}")
return result
def run_test(test_config_file: str,
test_name: str,
project_id: str,
commit_url: str,
category: str = "unspecified",
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress: bool = False,
report: bool = True,
keep_results_dir: bool = False,
session_name: Optional[str] = None,
app_config_id_override=None) -> Dict[str, Any]:
with open(test_config_file, "rt") as f:
test_configs = yaml.safe_load(f)
test_config_dict = {}
for test_config in test_configs:
name = test_config.pop("name")
test_config_dict[name] = test_config
if test_name not in test_config_dict:
raise ValueError(
f"Test with name `{test_name}` not found in test config file "
f"at `{test_config_file}`.")
test_config = test_config_dict[test_name]
if smoke_test and "smoke_test" in test_config:
smoke_test_config = test_config.pop("smoke_test")
test_config = _deep_update(test_config, smoke_test_config)
local_dir = os.path.dirname(test_config_file)
if "local_dir" in test_config:
# local_dir is relative to test_config_file
local_dir = os.path.join(local_dir, test_config["local_dir"])
if test_config["run"].get("use_connect"):
assert not kick_off_only, \
"--kick-off-only is unsupported when running with " \
"Anyscale connect."
assert not check_progress, \
"--check is unsupported when running with Anyscale connect."
if test_config.get("artifacts", {}):
logger.error(
"Saving artifacts are not yet supported when running with "
"Anyscale connect.")
result = run_test_config(
local_dir,
project_id,
test_name,
test_config,
commit_url,
session_name=session_name,
smoke_test=smoke_test,
no_terminate=no_terminate,
kick_off_only=kick_off_only,
check_progress=check_progress,
upload_artifacts=report,
keep_results_dir=keep_results_dir,
app_config_id_override=app_config_id_override)
status = result.get("status", "invalid")
if kick_off_only:
if status != "kickoff":
raise RuntimeError("Error kicking off test.")
logger.info("Kicked off test. It's now up to the `--check` "
"part of the script to track its process.")
return {}
else:
# `--check` or no kick off only
if status == "nosession":
logger.info(f"No running session found for test {test_name}, so "
f"assuming everything is fine.")
return {}
if status == "kickoff":
logger.info(f"Test {test_name} is still running.")
return {}
last_logs = result.get("last_logs", "No logs.")
test_suite = os.path.basename(test_config_file).replace(".yaml", "")
report_kwargs = dict(
test_suite=test_suite,
test_name=test_name,
status=status,
last_logs=last_logs,
results=result.get("results", {}),
artifacts=result.get("artifacts", {}),
category=category,
)
if report:
report_result(**report_kwargs)
else:
logger.info(f"Usually I would now report the following results:\n"
f"{report_kwargs}")
if has_errored(result):
raise RuntimeError(last_logs)
return report_kwargs
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--test-config", type=str, required=True, help="Test config file")
parser.add_argument("--test-name", type=str, help="Test name in config")
parser.add_argument(
"--ray-wheels", required=False, type=str, help="URL to ray wheels")
parser.add_argument(
"--no-terminate",
action="store_true",
default=False,
help="Don't terminate session after failure")
parser.add_argument(
"--no-report",
action="store_true",
default=False,
help="Do not report any results or upload to S3")
parser.add_argument(
"--kick-off-only",
action="store_true",
default=False,
help="Kick off only (don't wait for command to finish)")
parser.add_argument(
"--check",
action="store_true",
default=False,
help="Check (long running) status")
parser.add_argument(
"--keep-results-dir",
action="store_true",
default=False,
help="Keep results in directory (named RELEASE_RESULTS_DIR), e.g. "
"for Buildkite artifact upload.")
parser.add_argument(
"--category",
type=str,
default="unspecified",
help="Category name, e.g. `release-1.3.0` (will be saved in database)")
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--session-name",
required=False,
type=str,
help="Name of the session to run this test.")
parser.add_argument(
"--app-config-id-override",
required=False,
type=str,
help=("An app config ID, which will override the test config app "
"config."))
args, _ = parser.parse_known_args()
if not GLOBAL_CONFIG["ANYSCALE_PROJECT"]:
raise RuntimeError(
"You have to set the ANYSCALE_PROJECT environment variable!")
ray_wheels = args.ray_wheels or os.environ.get("RAY_WHEELS", "")
maybe_fetch_api_token()
if ray_wheels:
logger.info(f"Using Ray wheels provided from URL/commit: "
f"{ray_wheels}")
url = commit_or_url(str(ray_wheels))
# Overwrite with actual URL
os.environ["RAY_WHEELS"] = url
elif not args.check:
url = find_ray_wheels(
GLOBAL_CONFIG["RAY_REPO"],
GLOBAL_CONFIG["RAY_BRANCH"],
GLOBAL_CONFIG["RAY_VERSION"],
)
if not url:
raise RuntimeError(f"Could not find wheels for "
f"Ray {GLOBAL_CONFIG['RAY_VERSION']}, "
f"branch {GLOBAL_CONFIG['RAY_BRANCH']}")
# RAY_COMMIT is set by commit_or_url and find_ray_wheels
populate_wheels_sanity_check(os.environ.get("RAY_COMMIT", ""))
test_config_file = os.path.abspath(os.path.expanduser(args.test_config))
result_dict = run_test(
test_config_file=test_config_file,
test_name=args.test_name,
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
commit_url=url,
category=args.category,
smoke_test=args.smoke_test,
no_terminate=args.no_terminate or args.kick_off_only,
kick_off_only=args.kick_off_only,
check_progress=args.check,
report=not args.no_report,
session_name=args.session_name,
keep_results_dir=args.keep_results_dir,
app_config_id_override=args.app_config_id_override,
)
if result_dict:
# If we get a result dict, check if any alerts should be raised
from alert import SUITE_TO_FN, default_handle_result
logger.info("Checking if results are valid...")
handle_result_kwargs = result_dict.copy()
handle_result_kwargs["created_on"] = None
test_suite = handle_result_kwargs.get("test_suite", None)
test_name = handle_result_kwargs.get("test_name", None)
category = handle_result_kwargs.get("category", None)
handle_fn = SUITE_TO_FN.get(test_suite, None)
if not handle_fn:
logger.warning(f"No handle for suite {test_suite}")
alert = default_handle_result(**handle_result_kwargs)
else:
alert = handle_fn(**handle_result_kwargs)
if alert:
# If we get an alert, the test failed.
raise RuntimeError(alert)
else:
logger.info(f"No alert raised for test {test_suite}/{test_name} "
f"({category}) - the test successfully passed!")
|
py
|
1a55f580c13ca5cf9be25cdf1210ca376a577eb9
|
#Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Complete The Pattern #3 (Horizontal Image of #2)
#Problem level: 7 kyu
def pattern(n):
return '\n'.join(''.join(str(i) for i in reversed(list(range(x, n+1)))) for x in list(range(1, n+1))[::-1])
|
py
|
1a55f5814d06c54cb360436d3f1240de1a21d455
|
# DO NOT EDIT, FILE BEING WRITTEN BY FIRMWARE
import original.main
original.main.run()
|
py
|
1a55f870d30ccb996f23cb8edd7c97bf55c44df5
|
# ----------------------------------------------------------------------------------------------------------------------
# -----------------------------------------Copy and Paste Comment Bars From Here----------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# --------------------------------Multiplication code in 6 lines--------------------------------------------------------
# numbers_to_multiply = [2,4,6,8]
#
# def multiplier_by_two (number1):
# result = number1*2
# print(result)
#
# for i in numbers_to_multiply:
# multiplier_by_two(i)
# ----------------------------------Use list comprehension to create a list of the results in 2 lines-------------------
# same_as_above = [i * 2 for i in range(1, 9) if i % 2 == 0]
#
# print(f'The results are {", ".join(str(i) for i in same_as_above)}')
# ------------------------------------------------F string play---------------------------------------------------------
#
# name = input('What is your name? ')
#
# age = input('How old are you? ')
#
# print(f'Hi {name}! You are {100 - int(age)} years away from being 100!')
# -----------------------------------------------Map and lists and such-------------------------------------------------
# myList = [1, 3, 5, 7, 9]
#
# print(f'My list is {len(myList)} item(s) long')
# print(myList)
# ---------------------------------------------Trying to use OOP--------------------------------------------------------
class Human:
def __init__(self, name, age, country):
self.name = name
self.age = age
self.country = country
def introduction(self, language):
if language in ["English", "english", "e", "eng", "ENG", "American", "american"]:
return print(f"Hi! My name is {self.name}, I am {self.age} years old and I am from {self.country}")
elif language in ["Japanese", "japanese", "jp", "JP"]:
return print(f"はじめまして!{self.name}と申します。{self.age}歳です。{self.country}から来ました。宜しくお願いします!")
seth = Human("Seth", 29, "Japan")
loser = Human("Loser", 30, "Britain")
loser.introduction("JP")
seth.introduction("ENG")
|
py
|
1a55f8b015a20e6949b2804078a228e3800d003d
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
# All Credits to https://t.me/azrim89 for timestamp.
# All Credits to https://t.me/Devp73 for Offline stamps..
#
""" Userbot module which contains afk-related commands """
from datetime import datetime
import time
from random import randint
from telethon.events import StopPropagation
from telethon.tl.functions.account import UpdateProfileRequest
from userbot import (AFKREASON, CMD_HELP, BOTLOG, BOTLOG_CHATID, PM_AUTO_BAN,
bot)
from userbot.events import register
# =================================================================
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
USER_AFK = {}
afk_time = None
afk_start = {}
# =================================================================
@register(outgoing=True, pattern="^.afk(?: |$)(.*)", disable_errors=True)
async def set_afk(afk_e):
""" For .afk command, allows you to inform people that you are afk when they message you """
afk_e.text
string = afk_e.pattern_match.group(1)
global ISAFK
global AFKREASON
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
user = await bot.get_me()
global reason
USER_AFK = {}
afk_time = None
afk_end = {}
start_1 = datetime.now()
afk_start = start_1.replace(microsecond=0)
if string:
AFKREASON = string
await afk_e.edit(f"**Gonna go AFK. I'll be right back.**")
else:
await afk_e.edit("**Gonna go AFK. I'll be right back.**")
if user.last_name:
await afk_e.client(UpdateProfileRequest(first_name=user.first_name, last_name=user.last_name + " [OFF]"))
else:
await afk_e.client(UpdateProfileRequest(first_name=user.first_name, last_name=" [OFF]"))
if BOTLOG:
await afk_e.client.send_message(BOTLOG_CHATID, "#AFK\nYou went AFK!")
ISAFK = True
afk_time = datetime.now() # pylint:disable=E0602
raise StopPropagation
@register(outgoing=True, pattern="^.unafk(?: |$)(.*)", disable_errors=True)
async def type_afk_is_not_true(notafk):
""" This sets your status as not afk automatically when you write something while being afk """
global ISAFK
global COUNT_MSG
global USERS
global AFKREASON
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
user = await bot.get_me()
last = user.last_name
if last and last.endswith(" [OFF]"):
raw_last = f"{last}"
last1 = raw_last.replace(" [OFF]", "")
else:
last1 = ""
back_alive = datetime.now()
afk_end = back_alive.replace(microsecond=0)
if ISAFK:
ISAFK = False
msg = await notafk.edit("**I'm back! Did you guys miss me?**")
time.sleep(3)
await msg.delete()
await notafk.client(UpdateProfileRequest(first_name=user.first_name, last_name=last1))
if BOTLOG:
await notafk.client.send_message(
BOTLOG_CHATID,
"You've recieved " + str(COUNT_MSG) + " messages from " +
str(len(USERS)) + " chats while you were away",
)
for i in USERS:
name = await notafk.client.get_entity(i)
name0 = str(name.first_name)
await notafk.client.send_message(
BOTLOG_CHATID,
"[" + name0 + "](tg://user?id=" + str(i) + ")" +
" sent you " + "`" + str(USERS[i]) + " messages`",
)
COUNT_MSG = 0
USERS = {}
AFKREASON = None
@register(incoming=True, disable_edited=True)
async def mention_afk(mention):
""" This function takes care of notifying the people who mention you that you are AFK."""
global COUNT_MSG
global USERS
global ISAFK
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
await bot.get_me()
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
afk_since = "**a while ago**"
if mention.message.mentioned and not (await mention.get_sender()).bot:
if ISAFK:
now = datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "**Yesterday**"
elif days > 1:
if days > 6:
date = now + \
datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime('%A')
elif hours > 1:
afk_since = f"`{int(hours)}h {int(minutes)}m`"
elif minutes > 0:
afk_since = f"`{int(minutes)}m {int(seconds)}s`"
else:
afk_since = f"`{int(seconds)}s`"
if mention.sender_id not in USERS:
if AFKREASON:
await mention.reply(f"**I've been AFK.** (Since {afk_since} ago.)\
\n**Reason:** `{AFKREASON}`")
else:
await mention.reply(f"**I've been AFK.** (Since {afk_since} ago.)")
USERS.update({mention.sender_id: 1})
COUNT_MSG = COUNT_MSG + 1
elif mention.sender_id in USERS:
if USERS[mention.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await mention.reply(f"**I'm still AFK.** (Since {afk_since} ago.)\
\n**Reason:** `{AFKREASON}`")
else:
await mention.reply(f"**I'm still AFK.** (Since {afk_since} ago.)")
USERS[mention.sender_id] = USERS[mention.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
else:
USERS[mention.sender_id] = USERS[mention.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
@register(incoming=True, disable_errors=True)
async def afk_on_pm(sender):
""" Function which informs people that you are AFK in PM """
global ISAFK
global USERS
global COUNT_MSG
global COUNT_MSG
global USERS
global ISAFK
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
await bot.get_me()
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
afk_since = "**a while ago**"
if sender.is_private and sender.sender_id != 777000 and not (
await sender.get_sender()).bot:
if PM_AUTO_BAN:
try:
from userbot.modules.sql_helper.pm_permit_sql import is_approved
apprv = is_approved(sender.sender_id)
except AttributeError:
apprv = True
else:
apprv = True
if apprv and ISAFK:
now = datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "**yesterday**"
elif days > 1:
if days > 6:
date = now + \
datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime('%A')
elif hours > 1:
afk_since = f"`{int(hours)}h {int(minutes)}m`"
elif minutes > 0:
afk_since = f"`{int(minutes)}m {int(seconds)}s`"
else:
afk_since = f"`{int(seconds)}s`"
if sender.sender_id not in USERS:
if AFKREASON:
await sender.reply(f"**I've been AFK.** (Since {afk_since} ago.)\
\n**Reason:** `{AFKREASON}`")
else:
await sender.reply(f"**I've been AFK.** (Since {afk_since} ago.)")
USERS.update({sender.sender_id: 1})
COUNT_MSG = COUNT_MSG + 1
elif apprv and sender.sender_id in USERS:
if USERS[sender.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await sender.reply(f"**I'm still AFK.** (Since {afk_since} ago.)\
\n**Reason:** `{AFKREASON}`")
else:
await sender.reply(f"**I'm still AFK.** (Since {afk_since} ago.)")
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
else:
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
CMD_HELP.update({
"afk":
"`.afk` [Optional Reason]\
\nUsage: Sets you as afk.\nReplies to anyone who tags/PM's \
you telling them that you are AFK(reason).\
\n\n`.unafk`\
\nBack from AFK state, anywhere.\
"
})
|
py
|
1a55fa37bbdc5941f299ced5b0c0b1b41ae54361
|
from ..base.Dat import Dat
class VerbWord():
def __init__(self, filename1, filename2):
self.__vmDat = Dat(filename=filename1)
self.__vdDat = Dat(filename=filename2)
self.__tagV = 'v'
def adjustTag(self, sentence):
if not self.__vmDat or not self.__vdDat:
return
for i in range(len(sentence) - 1):
if sentence[i].tag == self.__tagV and sentence[i + 1].tag == self.__tagV:
if self.__vmDat.match(sentence[i].word) != -1:
sentence[i].tag = 'vm'
elif self.__vdDat.match(sentence[i + 1].word) != -1:
sentence[i + 1].tag = 'vd'
|
py
|
1a55fb0af5e631cc4faa07514e68dd576e3ca937
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from paper import Paper
class Page(Paper):
"""This class represents a single document page"""
__name__ = "Page"
def __init__(self):
Paper.__init__(self)
self.children = list()
def draw(self, context, hints):
Paper.draw(self, context)
for child in sorted(self.children, key=lambda child: child.z):
child.hints = hints # TODO Not here
child.draw(context)
def serialize(self):
text = "<object type=\"%s\">" % self.__name__
text += "<children>"
for child in self.children:
text += child.serialize()
text += "</children>"
text += "</object>"
return text
|
py
|
1a55fc461d2d3f2b292a8afecd2df78884249b96
|
import collections
import json
import os
import shutil
import tempfile
import traceback
from functools import partial
from ice import steam_model
from ice import steam_paths
from ice import shortcuts
from ice import configuration
from ice.cli import CommandLineRunner
from ice.filesystem import FakeFilesystem
from ice.persistence.config_file_backing_store import ConfigFileBackingStore
from fixtures import SteamFixture, UserFixture
def json_to_shortcut(json):
for field in ["name", "exe", "startdir", "icon", "shortcut_path", "launch_options", "hidden", "allow_desktop_config", "open_vr", "last_play_time", "tags"]:
assert field in json
return model.Shortcut(
name = json.get("name").encode("UTF8"),
exe = json.get("exe").encode("UTF8"),
startdir = json.get("startdir").encode("UTF8"),
icon = json.get("icon").encode("UTF8"),
shortcut_path = json.get("shortcut_path").encode("UTF8"),
launch_options = json.get("launch_options").encode("UTF8"),
hidden = json.get("hidden"),
allow_desktop_config = json.get("allow_desktop_config"),
open_vr = json.get("open_vr"),
last_play_time = json.get("last_play_time"),
tags = json.get("tags")
)
class FakeEnvironment(object):
def __init__(self, file_path):
"""
Generates a new environment in which to run Integration tests.
`testdata_dir` refers to the base testdata directory, which individual tests
will then use to load their test-specific configurations.
"""
# We also need a sandbox to play in.
self.sandbox = FakeFilesystem(root = tempfile.mkdtemp())
# Need a dummy Steam installation for Ice to work with
# We'll put it in the `Steam` directory of our sandbox
self.steam_fixture = SteamFixture(os.path.join(self.sandbox.root, "Steam"))
# Create a list of user fixtures that consumers can populate
self.user_fixtures = []
# The testdata directory should be in the same directory as the tests
# themselves.
self.testdata_dir = os.path.join(os.path.dirname(file_path), "testdata")
assert os.path.exists(self.testdata_dir)
self.loaded_data = None
self.extra_args = []
def clean(self):
for user_fixture in self.user_fixtures:
user_fixture.tearDown()
self.steam_fixture.tearDown()
shutil.rmtree(self.sandbox.root)
def _use_config_file(self, file, location):
assert os.path.exists(location)
self.extra_args.append('--%s' % file)
self.extra_args.append(location)
def _test_config_path(self, directory, file):
return os.path.join(directory, '%s.txt' % file)
def _load_config_file_overrides(self, directory):
file_basenames = ['config', 'consoles', 'emulators']
filenames = map(lambda f: '%s.txt' % f, file_basenames)
for f in file_basenames:
self._use_config_file(f, self._test_config_path(directory, f))
def _load_roms_for_test(self, directory):
"""Takes the ROMs located in `directory/ROMs` and moves them into the
ROMs directory specified in the provided config.txt file."""
config_path = self._test_config_path(directory, 'config')
c = configuration.from_store(ConfigFileBackingStore(config_path))
target_roms_directory = self.sandbox.adjusted_path(c.roms_directory)
source_roms_directory = os.path.join(directory, 'ROMs')
shutil.copytree(source_roms_directory, target_roms_directory)
def _override_backups_directory(self, data_directory):
# TODO: Figure out a way to actually override this, so I can test that
# backups get made correctly.
pass
def _adjust_json_path(self, path):
return path.replace("%sb", self.sandbox.root)
def _adjust_shortcut_exe(self, shortcut):
return model.Shortcut(
name = shortcut.name,
exe = self._adjust_json_path(shortcut.exe),
startdir = shortcut.startdir,
icon = shortcut.icon,
shortcut_path ='',
launch_options = self._adjust_json_path(shortcut.launch_options.replace("/", os.sep)),
hidden = False,
allow_desktop_config = False,
open_vr = False,
last_play_time = 0,
tags = shortcut.tags,
)
def load_test_data(self, testdata):
"""
Reads the config.txt, consoles.txt, emulators.txt, shortcuts.vdf, and ROMs
folder from the provided testdata subdirectory and places it in the sandbox
such that it will be used by Ice the next time its run.
"""
assert self.loaded_data is None, "Can't load test data twice in a single test"
self.loaded_data = testdata
data_directory = os.path.join(self.testdata_dir, testdata)
assert os.path.exists(data_directory), "Can't load test data from a missing directory"
self._load_config_file_overrides(data_directory)
self._load_roms_for_test(data_directory)
self._override_backups_directory(data_directory)
def create_fake_user(self, uid=None):
fixture = UserFixture(self.steam_fixture, uid)
self.user_fixtures.append(fixture)
return fixture.uid
def load_shortcuts_from_json(self, filename):
expectations_path = os.path.join(self.testdata_dir, self.loaded_data, filename)
with open(expectations_path) as f:
expected_shortcuts_json = json.load(f)
return map(json_to_shortcut, expected_shortcuts_json)
def expected_shortcuts(self, filename="shortcuts-expected.json"):
"""Returns the shortcuts which the test expects will exist after executing"""
expected_shortcuts = self.load_shortcuts_from_json(filename)
return map(self._adjust_shortcut_exe, expected_shortcuts)
def set_user_shortcuts(self, uid, new_shortcuts):
context = model.LocalUserContext(self.steam_fixture.get_steam(), uid)
return shortcuts.set_shortcuts(context, new_shortcuts)
def user_shortcuts(self, uid):
context = model.LocalUserContext(self.steam_fixture.get_steam(), uid)
return shortcuts.get_shortcuts(context)
def run_command(self, *args):
""""
Runs the command specified by `args`, where doing
run_command("list", "consoles", "--json")
is equivalent to running the command
./ice.py list consoles --json
"""
# Turn `args` into a list, like `sys.argv` represents them
args = list(args)
# Add the 'ice.py' at the beginning of the list, which `run` will then
# promptly ignore
args.insert(0, "ice.py")
args.extend(self.extra_args)
# Run the command
try:
runner = CommandLineRunner(self.steam_fixture.get_steam(), self.sandbox)
runner.run(args)
success = True
except Exception as e:
success = False
print e
|
py
|
1a55fc7a869f78e3a86112f0e80d25915b7f640d
|
##############################################################################
# Copyright 2016-2019 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
r"""
Standard gate set, as detailed in Quil whitepaper (arXiV:1608:03355v2)
Currently includes:
I - identity :math:`\begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix}`
X - Pauli-X :math:`\begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix}`
Y - Pauli-Y :math:`\begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix}`
Z - Pauli-Z :math:`\begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix}`
H - Hadamard
:math:`\frac{1}{\sqrt{2}} \begin{pmatrix} 1 & 1 \\ 1 & -1 \end{pmatrix}`
S - PHASE(pi/2)
:math:`\begin{pmatrix} 1 & 0 \\ 0 & i \end{pmatrix}`
T - PHASE(pi/4)
:math:`\begin{pmatrix} 1 & 0 \\ 0 & e^{i \pi / 4} \end{pmatrix}`
PHASE(:math:`\phi`) - PHASE
:math:`\begin{pmatrix} 1 & 0 \\ 0 & e^{i \phi} \end{pmatrix}`
RX(:math:`\phi`) - RX
:math:`\begin{pmatrix} \cos(\phi / 2) & -i \sin(\phi/2) \\
-i \sin(\phi/2) & \cos(\phi/2) \end{pmatrix}`
RY(:math:`\phi`) - RY
:math:`\begin{pmatrix} \cos(\phi / 2) & -\sin(\phi / 2) \\
\sin(\phi/2) & \cos(\phi/2) \end{pmatrix}`
RZ(:math:`\phi`) - RZ
:math:`\begin{pmatrix} \cos(\phi/2) - i \sin(\phi/2) & 0 \\
0 & \cos(\phi/2) + i \sin(\phi/2) \end{pmatrix}`
CZ - controlled-Z
:math:`P_0 \otimes I + P_1 \otimes Z = \begin{pmatrix} 1&0&0&0 \\ 0&1&0&0 \\
0&0&1&0 \\ 0&0&0&-1 \end{pmatrix}`
CNOT - controlled-X / controlled-NOT
:math:`P_0 \otimes I + P_1 \otimes X = \begin{pmatrix} 1&0&0&0 \\ 0&1&0&0 \\
0&0&0&1 \\ 0&0&1&0 \end{pmatrix}`
CCNOT - double-controlled-X
:math:`P_0 \otimes P_0 \otimes I + P_0 \otimes P_1 \otimes I + P_1 \otimes P_0 \otimes I
+ P_1 \otimes P_1 \otimes X`
CPHASE00(:math:`\phi`) - controlled-phase-on-|00>
:math:`\text{diag}(e^{i \phi}, 1, 1, 1,)`
CPHASE01(:math:`\phi`) - controlled-phase-on-|01>
:math:`\text{diag}(1, e^{i \phi}, 1, 1,)`
CPHASE10(:math:`\phi`) - controlled-phase-on-|10>
:math:`\text{diag}(1, 1, e^{i \phi}, 1)`
CPHASE(:math:`\phi`) - controlled-phase-on-|11>
:math:`\text{diag}(1, 1, 1, e^{i \phi})`
SWAP - swap
:math:`\begin{pmatrix} 1&0&0&0 \\ 0&0&1&0 \\ 0&1&0&0 \\ 0&0&0&1 \end{pmatrix}`
CSWAP - controlled-swap
:math:`P_0 \otimes I_2 + P_1 \otimes \text{SWAP}`
ISWAP - i-phase-swap
:math:`\begin{pmatrix} 1&0&0&0 \\ 0&0&i&0 \\ 0&i&0&0 \\ 0&0&0&1 \end{pmatrix}`
PSWAP(:math:`\phi`) - phi-phase-swap
:math:`\begin{pmatrix} 1&0&0&0 \\ 0&0&e^{i\phi}&0 \\ 0&e^{i\phi}&0&0 \\ 0&0&0&1 \end{pmatrix}`
XY(:math:`\phi`) - XY-interaction
:math:`\begin{pmatrix} 1&0&0&0 \\
0&\cos(\phi/2)&i\sin(\phi/2)&0 \\
0&i\sin(\phi/2)&\cos(\phi/2)&0 \\
0&0&0&1 \end{pmatrix}`
Specialized gates / internal utility gates:
BARENCO(:math:`\alpha, \phi, \theta`) - Barenco gate
:math:`\begin{pmatrix} 1&0&0&0 \\ 0&1&0&0 \\ 0&0&e^{i\phi} \cos\theta & -i e^{i(\alpha-\phi)}
\sin\theta \\ 0&0&-i e^{i(\alpha+\phi)} \sin\theta & e^{i\alpha} \cos\theta \end{pmatrix}`
P0 - project-onto-zero
:math:`\begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix}`
P1 - project-onto-one
:math:`\begin{pmatrix} 0 & 0 \\ 0 & 1 \end{pmatrix}`
"""
import cmath
from typing import Tuple
import numpy as np
I = np.array([[1.0, 0.0], [0.0, 1.0]])
X = np.array([[0.0, 1.0], [1.0, 0.0]])
Y = np.array([[0.0, 0.0 - 1.0j], [0.0 + 1.0j, 0.0]])
Z = np.array([[1.0, 0.0], [0.0, -1.0]])
H = (1.0 / np.sqrt(2.0)) * np.array([[1.0, 1.0], [1.0, -1.0]])
S = np.array([[1.0, 0.0], [0.0, 1.0j]])
T = np.array([[1.0, 0.0], [0.0, cmath.exp(1.0j * np.pi / 4.0)]])
def PHASE(phi: float) -> np.ndarray:
return np.array([[1.0, 0.0], [0.0, np.exp(1j * phi)]])
def RX(phi: float) -> np.ndarray:
return np.array(
[[np.cos(phi / 2.0), -1j * np.sin(phi / 2.0)], [-1j * np.sin(phi / 2.0), np.cos(phi / 2.0)]]
)
def RY(phi: float) -> np.ndarray:
return np.array(
[[np.cos(phi / 2.0), -np.sin(phi / 2.0)], [np.sin(phi / 2.0), np.cos(phi / 2.0)]]
)
def RZ(phi: float) -> np.ndarray:
return np.array(
[
[np.cos(phi / 2.0) - 1j * np.sin(phi / 2.0), 0],
[0, np.cos(phi / 2.0) + 1j * np.sin(phi / 2.0)],
]
)
CZ = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])
CNOT = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
CCNOT = np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
]
)
def CPHASE00(phi: float) -> np.ndarray:
return np.diag([np.exp(1j * phi), 1.0, 1.0, 1.0])
def CPHASE01(phi: float) -> np.ndarray:
return np.diag([1.0, np.exp(1j * phi), 1.0, 1.0])
def CPHASE10(phi: float) -> np.ndarray:
return np.diag([1.0, 1.0, np.exp(1j * phi), 1.0])
def CPHASE(phi: float) -> np.ndarray:
return np.diag([1.0, 1.0, 1.0, np.exp(1j * phi)])
SWAP = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
CSWAP = np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
]
)
ISWAP = np.array([[1, 0, 0, 0], [0, 0, 1j, 0], [0, 1j, 0, 0], [0, 0, 0, 1]])
def PSWAP(phi: float) -> np.ndarray:
return np.array(
[[1, 0, 0, 0], [0, 0, np.exp(1j * phi), 0], [0, np.exp(1j * phi), 0, 0], [0, 0, 0, 1]]
)
def XY(phi: float) -> np.ndarray:
return np.array(
[
[1, 0, 0, 0],
[0, np.cos(phi / 2), 1j * np.sin(phi / 2), 0],
[0, 1j * np.sin(phi / 2), np.cos(phi / 2), 0],
[0, 0, 0, 1],
]
)
# Utility gates for internal QVM use
P0 = np.array([[1, 0], [0, 0]])
P1 = np.array([[0, 0], [0, 1]])
# Specialized useful gates; not officially in standard gate set
def BARENCO(alpha: float, phi: float, theta: float) -> np.ndarray:
lower_unitary = np.array(
[
[np.exp(1j * phi) * np.cos(theta), -1j * np.exp(1j * (alpha - phi)) * np.sin(theta)],
[-1j * np.exp(1j * (alpha + phi)) * np.sin(theta), np.exp(1j * alpha) * np.cos(theta)],
]
)
return np.kron(P0, np.eye(2)) + np.kron(P1, lower_unitary)
QUANTUM_GATES = {
"I": I,
"X": X,
"Y": Y,
"Z": Z,
"H": H,
"S": S,
"T": T,
"PHASE": PHASE,
"RX": RX,
"RY": RY,
"RZ": RZ,
"CNOT": CNOT,
"CCNOT": CCNOT,
"CPHASE00": CPHASE00,
"CPHASE01": CPHASE01,
"CPHASE10": CPHASE10,
"CPHASE": CPHASE,
"SWAP": SWAP,
"CSWAP": CSWAP,
"ISWAP": ISWAP,
"PSWAP": PSWAP,
"BARENCO": BARENCO,
"CZ": CZ,
"XY": XY,
}
def relaxation_operators(p: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Return the amplitude damping Kraus operators
"""
k0 = np.array([[1.0, 0.0], [0.0, np.sqrt(1 - p)]])
k1 = np.array([[0.0, np.sqrt(p)], [0.0, 0.0]])
return k0, k1
def dephasing_operators(p: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Return the phase damping Kraus operators
"""
k0 = np.eye(2) * np.sqrt(1 - p / 2)
k1 = np.sqrt(p / 2) * Z
return k0, k1
def depolarizing_operators(p: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Return the phase damping Kraus operators
"""
k0 = np.sqrt(1.0 - p) * I
k1 = np.sqrt(p / 3.0) * X
k2 = np.sqrt(p / 3.0) * Y
k3 = np.sqrt(p / 3.0) * Z
return k0, k1, k2, k3
def phase_flip_operators(p: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Return the phase flip kraus operators
"""
k0 = np.sqrt(1 - p) * I
k1 = np.sqrt(p) * Z
return k0, k1
def bit_flip_operators(p: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Return the phase flip kraus operators
"""
k0 = np.sqrt(1 - p) * I
k1 = np.sqrt(p) * X
return k0, k1
def bitphase_flip_operators(p: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Return the bitphase flip kraus operators
"""
k0 = np.sqrt(1 - p) * I
k1 = np.sqrt(p) * Y
return k0, k1
KRAUS_OPS = {
"relaxation": relaxation_operators,
"dephasing": dephasing_operators,
"depolarizing": depolarizing_operators,
"phase_flip": phase_flip_operators,
"bit_flip": bit_flip_operators,
"bitphase_flip": bitphase_flip_operators,
}
SIC0 = np.array([1, 0])
SIC1 = np.array([1, np.sqrt(2)]) / np.sqrt(3)
SIC2 = np.array([1, np.exp(-np.pi * 2j / 3) * np.sqrt(2)]) / np.sqrt(3)
SIC3 = np.array([1, np.exp(np.pi * 2j / 3) * np.sqrt(2)]) / np.sqrt(3)
"""
The symmetric informationally complete POVMs for a qubit.
These can reduce the number of experiments to perform quantum process tomography.
For more information, please see http://info.phys.unm.edu/~caves/reports/infopovm.pdf
"""
STATES = {
"X": [np.array([1, 1]) / np.sqrt(2), np.array([1, -1]) / np.sqrt(2)],
"Y": [np.array([1, 1j]) / np.sqrt(2), np.array([1, -1j]) / np.sqrt(2)],
"Z": [np.array([1, 0]), np.array([0, 1])],
"SIC": [SIC0, SIC1, SIC2, SIC3],
}
__all__ = list(QUANTUM_GATES.keys()) + [
"relaxation_operators",
"dephasing_operators",
"depolarizing_operators",
"phase_flip_operators",
"bit_flip_operators",
"bitphase_flip_operators",
"STATES",
"SIC0",
"SIC1",
"SIC2",
"SIC3",
]
|
py
|
1a55fceb55d92d9c35b3c934aac745f6c3a580b3
|
"""
######################################################################################################
# Class of scripts to testing Feudal Network policy #
######################################################################################################
"""
import numpy as np
import unittest
from scripts.training.feudal_networks.policies.feudal_policy import FeudalPolicy
import tensorflow as tf
np.set_printoptions(suppress=True, precision=6)
class TestFeudalPolicy(unittest.TestCase):
"""
Class for testing feudal network policy
"""
def setUp(self):
"""
Function for initializing the test
"""
# reset graph before each test case
tf.reset_default_graph()
def test_init(self):
"""
Function for initializing the test
"""
global_step = tf.get_variable("global_step", [], tf.int32,\
initializer=tf.constant_initializer(0, dtype=tf.int32),
trainable=False)
feudal = FeudalPolicy((80,80,3), 4, global_step)
def test_fit_simple_dataset(self):
"""
Function for initializing the test
"""
with tf.Session() as session:
global_step = tf.get_variable("global_step", [], tf.int32,\
initializer=tf.constant_initializer(0, dtype=tf.int32),
trainable=False)
obs_space = (80,80,3)
act_space = 2
lr = 1e-5
g_dim = 256
worker_hid_dim = 32
manager_hid_dim = 256
pi = FeudalPolicy(obs_space, act_space, global_step)
grads = tf.gradients(pi.loss, pi.var_list)
prints = []
for g in grads:
prints.append(g.op.name)
prints.append(g)
# grads[0] = tf.Print(grads[0],prints)
grads, _ = tf.clip_by_global_norm(grads, 40)
grads_and_vars = list(zip(grads, pi.var_list))
opt = tf.train.AdamOptimizer(lr)
train_op = opt.apply_gradients(grads_and_vars)
# train_op = tf.train.AdamOptimizer(lr).minimize(pi.loss,var_list=pi.var_list)
session.run(tf.global_variables_initializer())
obs = [np.zeros(obs_space), np.zeros(obs_space)]
a = [[1,0], [0,1]]
returns = [0, 1]
s_diff = [np.ones(g_dim), np.ones(g_dim)]
gsum = [np.zeros((1,g_dim)), np.ones((1,g_dim))]
ri = [0, 0]
_,features = pi.get_initial_features()
worker_features = features[0:2]
manager_features = features[2:]
feed_dict = {
pi.obs: obs,
pi.ac: a,
pi.r: returns,
pi.s_diff: s_diff,
pi.prev_g: gsum,
pi.ri: ri,
pi.state_in[0]: worker_features[0],
pi.state_in[1]: worker_features[1],
pi.state_in[2]: manager_features[0],
pi.state_in[3]: manager_features[1]
}
n_updates = 1000
verbose = True
for i in range(n_updates):
loss, vf, policy, _ = session.run([pi.loss,pi.manager_vf,pi.pi, train_op], feed_dict=feed_dict)
if verbose:
print('loss: {}\npolicy: {}\nvalue: {}\n-------'.format(
loss, policy, vf))
def test_simple_manager_behavior(self):
"""
Function for initializing the test
"""
with tf.Session() as session:
global_step = tf.get_variable("global_step", [], tf.int32,\
initializer=tf.constant_initializer(0, dtype=tf.int32),
trainable=False)
obs_space = (80,80,3)
act_space = 2
lr = 5e-4
g_dim = 256
worker_hid_dim = 32
manager_hid_dim = 256
pi = FeudalPolicy(obs_space, act_space, global_step)
train_op = tf.train.AdamOptimizer(lr).minimize(pi.loss)
worker_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
worker_vars = [v for v in worker_vars if 'worker' in v.name]
worker_assign = tf.group(*[tf.assign(v, tf.zeros_like(v))
for v in worker_vars])
session.run(tf.global_variables_initializer())
obs = [np.zeros(obs_space), np.zeros(obs_space)]
a = [[1,0], [0,1]]
returns = [0, 1]
s_diff = [np.ones(g_dim), np.ones(g_dim)]
gsum = [np.zeros((1,g_dim)), np.ones((1,g_dim))]
ri = [0, 0]
_, features = pi.get_initial_features()
worker_features = features[0:2]
manager_features = features[2:]
feed_dict = {
pi.obs: obs,
pi.ac: a,
pi.r: returns,
pi.s_diff: s_diff,
pi.prev_g: gsum,
pi.ri: ri,
pi.state_in[0]: worker_features[0],
pi.state_in[1]: worker_features[1],
pi.state_in[2]: manager_features[0],
pi.state_in[3]: manager_features[1]
}
n_updates = 1000
verbose = True
for i in range(n_updates):
loss, vf, policy, _, _ = session.run(
[pi.loss, pi.manager_vf, pi.pi, train_op, worker_assign],
feed_dict=feed_dict)
if verbose:
print('loss: {}\npolicy: {}\nvalue: {}\n-------'.format(
loss, policy, vf))
worker_var_values = session.run(worker_vars)
print(worker_var_values)
U = session.run(pi.U, feed_dict=feed_dict)
print(U)
input()
if __name__ == '__main__':
unittest.main()
|
py
|
1a55fd7636d713267632d40fed2c74926426d25a
|
#!/usr/bin/env python3
# Copyright 2015-2021 Scott Bezek and the splitflap contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import os
import subprocess
import sys
from svg_processor import SvgProcessor
from projection_renderer import Renderer
script_dir = os.path.dirname(os.path.abspath(__file__))
source_parts_dir = os.path.dirname(script_dir)
repo_root = os.path.dirname(source_parts_dir)
sys.path.append(repo_root)
from util import rev_info
KERF_PRESETS = {
'ponoko-3mm-mdf': 0.18,
'ponoko-3mm-acrylic': 0.1,
}
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('--panelize', type=int, default=1, help='Quantity to panelize - must be 1 or an even number')
parser.add_argument('--skip-optimize', action='store_true', help='Don\'t remove redundant/overlapping cut lines')
kerf_group = parser.add_mutually_exclusive_group()
kerf_group.add_argument('--kerf', type=float, help='Override kerf_width value')
kerf_group.add_argument('--kerf-preset', choices=KERF_PRESETS, help='Override kerf_width using a defined preset')
parser.add_argument('--render-raster', action='store_true', help='Render raster PNG from the output SVG (requires '
'Inkscape)')
parser.add_argument('--thickness', type=float, help='Override panel thickness value')
parser.add_argument('--no-etch', action='store_true', help='Do not render laser-etched features')
parser.add_argument('--mirror', action='store_true', help='Mirror the assembly so the outside faces are facing up. '
'Note that this will remove all etched features.')
args = parser.parse_args()
laser_parts_directory = os.path.join(source_parts_dir, 'build', 'laser_parts')
extra_variables = {
'render_revision': rev_info.git_short_rev(),
'render_date': rev_info.current_date(),
'render_etch': not args.no_etch,
'render_2d_mirror': args.mirror,
}
if args.kerf is not None:
extra_variables['kerf_width'] = args.kerf
elif args.kerf_preset is not None:
extra_variables['kerf_width'] = KERF_PRESETS[args.kerf_preset]
if args.thickness is not None:
extra_variables['thickness'] = args.thickness
print('Variables:\n' + json.dumps(extra_variables, indent=4))
renderer = Renderer(os.path.join(source_parts_dir, 'splitflap.scad'), laser_parts_directory, extra_variables)
renderer.clean()
svg_output = renderer.render_svgs(panelize_quantity=args.panelize)
logging.info('Removing redundant lines')
processor = SvgProcessor(svg_output)
redundant_lines, merged_lines = None, None
if not args.skip_optimize:
redundant_lines, merged_lines = processor.remove_redundant_lines()
processor.write(svg_output)
logging.info('\n\n\nDone rendering to SVG: ' + svg_output)
if args.render_raster:
# Export to png
logging.info('Generating raster preview')
raster_svg = os.path.join(laser_parts_directory, 'raster.svg')
raster_png = os.path.join(laser_parts_directory, 'raster.png')
processor.apply_raster_render_style()
if not args.skip_optimize:
# Show which redundant lines were removed and lines merged
processor.add_highlight_lines(redundant_lines, '#ff0000')
processor.add_highlight_lines(merged_lines, '#0000ff')
processor.write(raster_svg)
logging.info('Resize SVG canvas')
subprocess.check_call([
'inkscape',
'--verb=FitCanvasToDrawing',
'--verb=FileSave',
'--verb=FileClose',
'--verb=FileQuit',
raster_svg,
])
logging.info('Export PNG')
subprocess.check_call([
'inkscape',
'--export-width=320',
'--export-png', raster_png,
raster_svg,
])
|
py
|
1a55fda1529209c9e33b00282a607e41601e292f
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from copy import deepcopy
from transformers import RobertaConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_generation_utils import GenerationTesterMixin
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
)
from transformers.models.roberta.modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaEmbeddings,
create_position_ids_from_input_ids,
)
ROBERTA_TINY = "sshleifer/tiny-distilroberta-base"
class RobertaModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return RobertaConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = RobertaModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = RobertaForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = RobertaForCausalLM(config=config).to(torch_device).eval()
# make sure that ids don't start with pad token
mask = input_ids.ne(config.pad_token_id).long()
input_ids = input_ids * mask
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
# make sure that ids don't start with pad token
mask = next_tokens.ne(config.pad_token_id).long()
next_tokens = next_tokens * mask
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = RobertaForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = RobertaForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class RobertaModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaModel,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (RobertaForCausalLM,) if is_torch_available() else ()
fx_compatible = True
def setUp(self):
self.model_tester = RobertaModelTester(self)
self.config_tester = ConfigTester(self, config_class=RobertaConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_model_as_decoder_with_default_input_mask(self):
# This regression test was failing with PyTorch < 1.3
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = RobertaModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_create_position_ids_respects_padding_index(self):
"""Ensure that the default position ids only assign a sequential . This is a regression
test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
model = RobertaEmbeddings(config=config)
input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
expected_positions = torch.as_tensor(
[[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
)
position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
def test_create_position_ids_from_inputs_embeds(self):
"""Ensure that the default position ids only assign a sequential . This is a regression
test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
embeddings = RobertaEmbeddings(config=config)
inputs_embeds = torch.empty(2, 4, 30)
expected_single_positions = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions])
position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
@require_torch
class RobertaModelIntegrationTest(TestCasePlus):
@slow
def test_inference_masked_lm(self):
model = RobertaForMaskedLM.from_pretrained("roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 11, 50265))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[33.8802, -4.3103, 22.7761], [4.6539, -2.8098, 13.6253], [1.8228, -3.6898, 8.8600]]]
)
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')
# roberta.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
@slow
def test_inference_no_head(self):
model = RobertaModel.from_pretrained("roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
with torch.no_grad():
output = model(input_ids)[0]
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-0.0231, 0.0782, 0.0074], [-0.1854, 0.0540, -0.0175], [0.0548, 0.0799, 0.1687]]]
)
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')
# roberta.eval()
# expected_slice = roberta.extract_features(input_ids)[:, :3, :3].detach()
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
@slow
def test_inference_classification_head(self):
model = RobertaForSequenceClassification.from_pretrained("roberta-large-mnli")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 3))
self.assertEqual(output.shape, expected_shape)
expected_tensor = torch.tensor([[-0.9469, 0.3913, 0.5118]])
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.large.mnli')
# roberta.eval()
# expected_tensor = roberta.predict("mnli", input_ids, return_logits=True).detach()
self.assertTrue(torch.allclose(output, expected_tensor, atol=1e-4))
# XXX: this might be a candidate for common tests if we have many of those
def test_lm_head_ignore_keys(self):
keys_to_ignore_on_save_tied = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
keys_to_ignore_on_save_untied = [r"lm_head.decoder.bias"]
config = RobertaConfig.from_pretrained(ROBERTA_TINY)
config_tied = deepcopy(config)
config_tied.tie_word_embeddings = True
config_untied = deepcopy(config)
config_untied.tie_word_embeddings = False
for cls in [RobertaForMaskedLM, RobertaForCausalLM]:
model = cls(config_tied)
self.assertEqual(model._keys_to_ignore_on_save, keys_to_ignore_on_save_tied, cls)
# the keys should be different when embeddings aren't tied
model = cls(config_untied)
self.assertEqual(model._keys_to_ignore_on_save, keys_to_ignore_on_save_untied, cls)
# test that saving works with updated ignore keys - just testing that it doesn't fail
model.save_pretrained(self.get_auto_remove_tmp_dir())
|
py
|
1a55ff1226791265e9839633702c788bceb5cedb
|
"""Binary tree
=== CSC148 Winter 2018 ===
University of Toronto,
Department of Computer Science
__author__ = 'Eric K'
=== Module Description ===
This module contains a binary tree implementation
"""
from typing import Union, Optional
class BinaryTree:
"""
A Binary Tree, i.e. arity 2.
"""
value: object
left: Optional['BinaryTree']
right: Optional['BinaryTree']
def __init__(self, value: object, left: Optional['BinaryTree'] = None,
right: Optional['BinaryTree'] = None) -> None:
"""
Create BinaryTree self with value and children left and right.
"""
self.value, self.left, self.right = value, left, right
def __eq__(self, other: Union['BinaryTree', object]) -> bool:
"""
Return whether BinaryTree self is equivalent to other.
>>> BinaryTree(7).__eq__("seven")
False
>>> b1 = BinaryTree(7, BinaryTree(5))
>>> b1.__eq__(BinaryTree(7, BinaryTree(5), None))
True
>>> b1.__eq__(BinaryTree(7, BinaryTree(5,BinaryTree(2)), None))
False
"""
return (type(self) is type(other) and
self.value == other.value and
self.left == other.left and
self.right == other.right)
def __repr__(self) -> str:
"""
Represent BinaryTree (self) as a string that can be evaluated to
produce an equivalent BinaryTree.
>>> BinaryTree(1, BinaryTree(2), BinaryTree(3))
BinaryTree(1, BinaryTree(2), BinaryTree(3))
"""
if self.value is None:
return ''
elif self.left is None and self.right is None:
return f'BinaryTree({self.value})'
else:
return "BinaryTree({}, {}, {})".format(repr(self.value),
repr(self.left),
repr(self.right))
def __str__(self, level: str = '') -> str:
"""
Return a user-friendly string representing BinaryTree (self)
inorder. Indent by indent.
>>> b = BinaryTree(1, BinaryTree(2, BinaryTree(3)), BinaryTree(4))
>>> print(b)
4
1
2
3
<BLANKLINE>
"""
if self.value is None:
return ''
else:
right = self.right.__str__(level + ' ') if self.right else ''
left = self.left.__str__(level + ' ') if self.left else ''
s = right + "{}{}\n".format(level, str(self.value)) + left
return s
def __contains__(self, value: object) -> bool:
"""
Return whether tree rooted at self contains value.
>>> t = BinaryTree(5, BinaryTree(7), BinaryTree(9))
>>> 7 in t
True
>>> t = BinaryTree(5, BinaryTree(7), None)
>>> 3 in t
False
"""
# Can also use: self.left.__contains__(value) rather than in
# Version 1
if self.value is None:
return False
elif value == self.value:
return True
else:
return any([self.left is not None and value in self.left,
self.right is not None and value in self.right])
# Version 2
# if self.value is None:
# return False
# else:
# return any([self.value == value,
# self.left is not None and value in self.left,
# self.right is not None and value in self.right])
# Version 3
# if self.value is None:
# return False
# elif value == self.value:
# return True
# else:
# return any([value in self.left if self.left else False,
# value in self.right if self.right else False])
if __name__ == '__main__':
import doctest
doctest.testmod()
|
py
|
1a55ff7fa520084010447498500ab37973886012
|
import os
import glob
import sys
import functools
import jsonpickle
from collections import OrderedDict
from Orange.widgets import widget, gui, settings
import Orange.data
from Orange.data.io import FileFormat
from DockerClient import DockerClient
from BwBase import OWBwBWidget, ConnectionDict, BwbGuiElements, getIconName, getJsonName
from PyQt5 import QtWidgets, QtGui
class OWPerl(OWBwBWidget):
name = "Perl"
description = "Minimum perl container"
priority = 20
icon = getIconName(__file__, "perl.png")
want_main_area = False
docker_image_name = "biodepot/perl"
docker_image_tag = "5.26.2-r1__alpine-3.7__081418"
inputs = [
("inputFile", str, "handleInputsinputFile"),
("Trigger", str, "handleInputsTrigger"),
]
outputs = [("OutputDir", str)]
pset = functools.partial(settings.Setting, schema_only=True)
runMode = pset(0)
exportGraphics = pset(False)
runTriggers = pset([])
triggerReady = pset({})
inputConnectionsStore = pset({})
optionsChecked = pset({})
InputFile = pset(None)
def __init__(self):
super().__init__(self.docker_image_name, self.docker_image_tag)
with open(getJsonName(__file__, "Perl")) as f:
self.data = jsonpickle.decode(f.read())
f.close()
self.initVolumes()
self.inputConnections = ConnectionDict(self.inputConnectionsStore)
self.drawGUI()
def handleInputsinputFile(self, value, *args):
if args and len(args) > 0:
self.handleInputs("inputFile", value, args[0][0], test=args[0][3])
else:
self.handleInputs("inputFile", value, None)
def handleInputsTrigger(self, value, *args):
if args and len(args) > 0:
self.handleInputs("Trigger", value, args[0][0], test=args[0][3])
else:
self.handleInputs("inputFile", value, None)
def handleOutputs(self):
outputValue = None
if hasattr(self, "OutputDir"):
outputValue = getattr(self, "OutputDir")
self.send("OutputDir", outputValue)
|
py
|
1a55ffd4b029f2cc8684f21bf780b292f2ab3c52
|
#!/usr/bin/env python3
"""compressed_stream
-----------------
A simple python module to handle streams of compressed files.
"""
from setuptools import setup, find_packages
setup(
name='compressed_stream',
version='0.0.2',
author='Cristian Consonni',
author_email='crist' 'ian' '<dot>' 'conson' 'ni' '<a' 't>' 'uni' 'tn' '<d' 'ot>i' 't',
license='MIT',
description='Handle streams of compressed files.',
long_description=__doc__,
url='https://github.com/CristianCantoro/compressed_stream',
packages=find_packages(),
entry_points={
'console_scripts': [
'decompressgrep=compressed_stream.__main__:decompressgrep',
'decompressless=compressed_stream.__main__:decompressless',
'decompressmore=compressed_stream.__main__:decompressmore',
'decompressdiff=compressed_stream.__main__:decompressdiff',
'decompresscmp=compressed_stream.__main__:decompresscmp',
'decompresscat=compressed_stream.__main__:decompresscat',
],
},
options={
'build_scripts': {
'executable': 'python3',
},
},
install_requires=[],
zip_safe=False,
)
|
py
|
1a55ffe095f70e0e410f20db7fd063ce014c346f
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class W3nco(CMakePackage):
"""This library contains Fortran 90 decoder/encoder routines for GRIB
edition 1 with NCO changes. This library is deprecated; all
functionality has been moved to the w3emc library.
This is part of the NCEPLIBS project."""
homepage = "https://noaa-emc.github.io/NCEPLIBS/NCEPLIBS-w3nco/"
url = "https://github.com/NOAA-EMC/NCEPLIBS-w3nco/archive/refs/tags/v2.4.1.tar.gz"
maintainers = ['t-brown', 'kgerheiser', 'Hang-Lei-NOAA', 'edwardhartnett']
version('2.4.1', sha256='48b06e0ea21d3d0fd5d5c4e7eb50b081402567c1bff6c4abf4fd4f3669070139')
|
py
|
1a56006c34032556059e97ac1683195d9eead7e1
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
"""
(C) Copyright [2015] InfoSec Consulting, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
...
.:::|#:#|::::.
.:::::|##|##|::::::.
.::::|##|:|##|:::::.
::::|#|:::|#|:::::
::::|#|:::|#|:::::
::::|##|:|##|:::::
::::.|#|:|#|.:::::
::|####|::|####|::
:|###|:|##|:|###|:
|###|::|##|::|###|
|#|::|##||##|::|#|
|#|:|##|::|##|:|#|
|#|##|::::::|##|#|
|#|::::::::::|#|
::::::::::::::
::::::::::
::::::::
::::::
::
"""
__author__ = 'Avery Rozar'
import os
import sys
import classes.db_tables
try:
import yaml
except ImportError:
print('Installing PyYmal..')
os.system('pip3 install PyYAML')
import yaml
try:
import psycopg2
except ImportError:
print('Installing Psycopg2..')
os.system('pip3 install psycopg2')
import psycopg2
try:
import sqlalchemy
from sqlalchemy import Column, String, Text, Integer, ForeignKey, Sequence, create_engine, MetaData
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.engine.url import URL
except ImportError:
print('Installing SQLAlchemy..')
os.system('pip3 install SQLAlchemy')
import sqlalchemy
from sqlalchemy import Column, String, Text, Integer, ForeignKey, Sequence, create_engine, MetaData
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.engine.url import URL
Session = sessionmaker()
def connect():
db_yml = open('config/database.yml', 'r')
db_info = yaml.safe_load(db_yml)
cursor = None
try:
Session = sessionmaker()
engine = create_engine(URL(**db_info), pool_size=20)
Session.configure(bind=engine)
return Session
except sqlalchemy.exc.OperationalError as e:
print(e)
sys.exit(1)
finally:
if cursor:
cursor.close()
def connect_and_create_db():
db_yml = open('config/database.yml', 'r')
db_info = yaml.safe_load(db_yml)
cursor = None
try:
engine = create_engine(URL(**db_info))
Session.configure(bind=engine)
classes.db_tables.Base.metadata.create_all(engine)
return Session
except sqlalchemy.exc.OperationalError as e:
print(e)
sys.exit(1)
finally:
if cursor:
cursor.close()
def connect_and_drop_all():
db_yml = open('config/database.yml', 'r')
db_info = yaml.safe_load(db_yml)
cursor = None
try:
engine = create_engine(URL(**db_info))
Session.configure(bind=engine)
classes.db_tables.Base.metadata.drop_all(engine)
return Session
except sqlalchemy.exc.OperationalError as e:
print(e)
sys.exit(1)
finally:
if cursor:
cursor.close()
|
py
|
1a560088279ac945cce14d02454e50b8483771e4
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from apache.aurora.client import base
from gen.apache.aurora.api.ttypes import (
PopulateJobResult,
Response,
ResponseCode,
ResponseDetail,
Result,
TaskConfig
)
class TestBase(unittest.TestCase):
def test_format_response_with_message(self):
resp = Response(responseCode=ResponseCode.ERROR, details=[ResponseDetail(message='Error')])
formatted = base.format_response(resp)
assert formatted == 'Response from scheduler: ERROR (message: Error)'
def test_format_response_with_details(self):
resp = Response(responseCode=ResponseCode.ERROR, details=[ResponseDetail(message='Error')])
formatted = base.format_response(resp)
assert formatted == 'Response from scheduler: ERROR (message: Error)'
def test_combine_messages(self):
resp = Response(responseCode=ResponseCode.ERROR)
assert base.combine_messages(resp) == ''
resp = Response(responseCode=ResponseCode.ERROR, details=[])
assert base.combine_messages(resp) == ''
resp = Response(responseCode=ResponseCode.ERROR, details=[ResponseDetail(message='Error')])
assert base.combine_messages(resp) == 'Error'
resp = Response(responseCode=ResponseCode.ERROR, details=[ResponseDetail()])
assert base.combine_messages(resp) == 'Unknown error'
resp = Response(
responseCode=ResponseCode.ERROR,
details=[ResponseDetail(message='Error1'), ResponseDetail(message='Error2')])
assert base.combine_messages(resp) == 'Error1, Error2'
def test_get_populated_task_config_set(self):
config = TaskConfig()
resp = Response(responseCode=ResponseCode.OK, result=Result(populateJobResult=PopulateJobResult(
taskConfig=config)))
assert config == resp.result.populateJobResult.taskConfig
|
py
|
1a560131a27e82eae69a9cf89789ad79575b143f
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Interfaces and abstractions for filesystem access.
We should be agnostic whether we're using a "temporary" file
system, rooted in a local tmp dir, or whether we're using
a true HDFS. This file defines the interface.
Note that PEP 355 (Path - object oriented filesystem paths) did
not pass. Many file system methods are in __builtin__, os, or
os.path, and take strings representing filenames as arguments.
We maintain this usage of paths as arguments.
When possible, the interfaces here have fidelity to the
native python interfaces.
"""
from __future__ import division
from future import standard_library
from functools import reduce
standard_library.install_aliases()
from builtins import map
from builtins import range
from builtins import object
import errno
import grp
import logging
import math
import os
import posixpath
import pwd
import re
import shutil
import stat
import sys
if sys.version_info[0] > 2:
from builtins import open as builtins_open
else:
from __builtin__ import open as builtins_open
SEEK_SET, SEEK_CUR, SEEK_END = os.SEEK_SET, os.SEEK_CUR, os.SEEK_END
# The web (and POSIX) always uses forward slash as a separator
LEADING_DOUBLE_SEPARATORS = re.compile("^" + posixpath.sep*2)
def normpath(path):
"""
Eliminates double-slashes.
Oddly, posixpath.normpath doesn't eliminate leading double slashes,
but it does clean-up triple-slashes.
"""
p = posixpath.normpath(path)
return LEADING_DOUBLE_SEPARATORS.sub(posixpath.sep, p)
class IllegalPathException(Exception):
pass
class LocalSubFileSystem(object):
"""
Facade around normal python filesystem calls, for a temporary/local
file system rooted in a root directory. This is intended for testing,
and is not a secure chroot alternative.
So far, this doesn't have a notion of current working dir, so all
paths are "absolute". I dislike the state that having cwd's implies,
but it may be convenient.
TODO(philip):
* chown: want to implement with names, not uids.
* chmod
* stat: perhaps implement "stats" which returns a dictionary;
Hadoop and posix have different stats
* set_replication: no equivalent
* file-system level stats
I think this covers all the functionality in "src/contrib/thriftfs/if/hadoopfs.thrift",
but there may be some bits missing. The implementation of the file-like object
for HDFS will be a bit tricky: open(f, "w") is generally the equivalent
of createFile, but it has to handle the case where f already
exists (in which case the best we can do is append, if that).
"""
def __init__(self, root):
"""
A file system rooted in root.
"""
self.root = root
self.name = "file://%s" % self.root
if not os.path.isdir(root):
logging.fatal("Root(%s) not found." % root +
" Perhaps you need to run manage.py create_test_fs")
def _resolve_path(self, path):
"""
Returns path to use in native file system.
"""
# Strip leading "/"
if not path.startswith("/"):
raise IllegalPathException("Path %s must start with leading /." % path)
path = path.lstrip("/")
joined = os.path.join(self.root, path)
absolute = os.path.abspath(joined)
normalized = os.path.normpath(absolute)
prefix = os.path.commonprefix([self.root, normalized])
if prefix != self.root:
raise IllegalPathException("Path %s is not valid." % path)
return joined
def _unresolve_path(self, path):
"""
Given an absolute path within the wrapped filesystem,
return the path that the user of this class sees.
"""
# Resolve it to make it realy absolute
assert path.startswith(self.root)
return path[len(self.root):]
def _wrap(f, paths=None, users=None, groups=None):
"""
Wraps an existing function f, and transforms
path arguments to "resolved paths" and
user arguments to uids.
By default transforms the first (zeroth) argument as
a path, but can be customized.
This lets us write:
def open(self, name, mode="r"):
return open(self._resolve_path(name), mode)
as
open = _wrap(__builtin__.open)
NOTE: No transformation is done on the keyword args;
they are not accepted. (The alternative would be to
require the names of the keyword transformations.)
"""
if users is None:
users = []
if groups is None:
groups = []
if paths is None and 0 not in users and 0 not in groups:
paths = [0]
# complicated way of taking the intersection of three lists.
assert not reduce(set.intersection, list(map(set, [paths, users, groups])))
def wrapped(*args):
self = args[0]
newargs = list(args[1:])
for i in paths:
newargs[i] = self._resolve_path(newargs[i])
for i in users:
newargs[i] = pwd.getpwnam(newargs[i]).pw_uid
for i in groups:
newargs[i] = grp.getgrnam(newargs[i]).gr_gid
if f == builtins_open and sys.version_info[0] > 2:
return f(*newargs, encoding='utf-8')
return f(*newargs)
return wrapped
# These follow their namesakes.
open = _wrap(builtins_open)
remove = _wrap(os.remove)
mkdir = _wrap(os.mkdir)
rmdir = _wrap(os.rmdir)
listdir = _wrap(os.listdir)
rename = _wrap(os.rename, paths=[0,1])
exists = _wrap(os.path.exists)
isfile = _wrap(os.path.isfile)
isdir = _wrap(os.path.isdir)
chmod = _wrap(os.chmod)
join = _wrap(os.path.join)
# This could be provided with an error_handler
rmtree = _wrap(shutil.rmtree)
chown = _wrap(os.chown, paths=[0], users=[1], groups=[2])
@property
def uri(self):
return self.name
def stats(self, path, raise_on_fnf=True):
path = self._resolve_path(path)
try:
statobj = os.stat(path)
except OSError as ose:
if ose.errno == errno.ENOENT and not raise_on_fnf:
return None
raise
ret = dict()
ret["path"] = self._unresolve_path(path)
ret["size"] = statobj[stat.ST_SIZE]
ret["mtime"] = statobj[stat.ST_MTIME]
ret["mode"] = statobj[stat.ST_MODE]
ret["user"] = pwd.getpwuid(statobj[stat.ST_UID]).pw_name
ret["group"] = grp.getgrgid(statobj[stat.ST_GID]).gr_name
return ret
def setuser(self, user, groups=None):
pass
def status(self):
return FakeStatus()
def listdir_stats(self, path):
"""
This is an equivalent of listdir that, instead of returning file names,
returns a list of stats instead.
"""
listdir_files = self.listdir(path)
paths = [posixpath.join(path, f) for f in listdir_files]
return [self.stats(path) for path in paths]
def __repr__(self):
return "LocalFileSystem(%s)" % repr(self.root)
class FakeStatus(object):
"""
A fake implementation of HDFS health RPCs.
These follow the thrift naming conventions,
but return dicts or arrays of dicts,
because they will be encoded as JSON.
"""
def get_messages(self):
"""Warnings/lint checks."""
return [
dict(type="WARNING",message="All your base belong to us."),
dict(type="INFO", message="Hamster Dance!")
]
def get_health(self):
o = dict()
GB = 1024*1024*1024
o["bytesTotal"] = 5*GB
o["bytesUsed"] = math.floor(5*GB / 2)
o["bytesRemaining"] = 2*GB
o["bytesNonDfs"] = math.floor(GB / 2)
o["liveDataNodes"] = 13
o["deadDataNodes"] = 2
o["upgradeStatus"] = dict(version=13, percentComplete=100, finalized=True)
return o
def get_datanode_report(self):
r = []
for i in range(0, 13):
dinfo = dict()
dinfo["name"] = "fake-%d" % i
dinfo["storageID"] = "fake-id-%d" % i
dinfo["host"] = "fake-host-%d" % i
dinfo["capacity"] = 123456789
dinfo["dfsUsed"] = 23456779
dinfo["remaining"] = 100000010
dinfo["xceiverCount"] = 3
dinfo["state"] = "NORMAL_STATE"
r.append(dinfo)
for i in range(0, 2):
dinfo = dict()
dinfo["name"] = "fake-dead-%d" % i
dinfo["storageID"] = "fake-dead-id-%d" % i
dinfo["host"] = "fake-dead-host-%d" % i
dinfo["capacity"] = 523456789
dinfo["dfsUsed"] = 23456779
dinfo["remaining"] = 500000010
dinfo["xceiverCount"] = 3
dinfo["state"] = "DECOMISSION_INPROGRESS"
r.append(dinfo)
return r
|
py
|
1a560169e5ffaa15bf7771c99187dc1770159c28
|
import typing
def main() -> typing.NoReturn:
s = input()
print(2 * min(s.count('0'), s.count('1')))
main()
|
py
|
1a560250a939b3e8d062a7cf5397c3cd7f14f175
|
import anndata as ad
import episcanpy as epi
import scanpy as sc
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.io as pio
from scanpy.plotting._tools.scatterplots import _get_palette
def river_plot_2_omics(adata,
source,
target,
omic,
cell_number=False,
title='River plot (Sankey Diagram)',
save=None,
scale=4):
"""
cell number count doesn't work in this function yet.
"""
omics = list(set(adata.obs[omic]))
# print(omics[1], ' cells on the left')
adata_rna = adata[adata.obs[omic] == omics[1], :].copy()
# print(omics[0], ' cells on the right')
adata_atac = adata[adata.obs[omic] == omics[0], :].copy()
df_nodes_rna, df_links_rna = __tool_sankey(adata_rna,
source=source,
target=target,
cell_number=False)
key_infos = pd.crosstab(adata_atac.obs[target], adata_atac.obs[source])
# key_infos
label_list = key_infos.columns.tolist()
id_list = list(range(df_nodes_rna.shape[0], df_nodes_rna.shape[0] + len(label_list), 1))
nodes = [['ID', 'Label', 'Color']]
if source + '_colors' not in adata_atac.uns.keys():
adata_atac.uns[source + '_colors'] = list(_get_palette(adata_atac, source).values())
if type(adata_atac.uns[source + '_colors']) == np.ndarray:
adata_atac.uns[source + '_colors'] = adata_atac.uns[source + '_colors'].tolist()
colors = adata.uns[source + '_colors'] + adata.uns[target + '_colors']
index = 0
for number in id_list:
tmp_list = [number, label_list[index], colors[index]]
nodes.append(tmp_list)
index += 1
### merge atac nodes to rna nodes
nodes_headers = nodes.pop(0)
df_nodes = pd.DataFrame(nodes, columns=nodes_headers)
df_nodes = df_nodes_rna.append(df_nodes)
index_target = df_nodes_rna.shape[0] - len(nodes)
# print(index_target)
del nodes
### add cell number
if cell_number:
df_nodes.index = df_nodes['ID']
key_infos = pd.crosstab(adata_atac.obs[target], adata_atac.obs[source], margins=True)
atac_cell_numbers_source = ['(n=' + str(x) + ')' for x in key_infos.loc['All'].tolist()[:-1]]
atac_cell_numbers_target = key_infos['All'].tolist()[:-1]
key_infos = pd.crosstab(adata_rna.obs[target], adata_rna.obs[source], margins=True)
rna_cell_numbers_source = ['(n=' + str(x) + ')' for x in key_infos.loc['All'].tolist()[:-1]]
rna_cell_numbers_target = key_infos['All'].tolist()[:-1]
rna_cell_numbers_target = [": ".join([str(omics[1]), str(x)]) for x in rna_cell_numbers_target]
atac_cell_numbers_target = [": ".join([str(omics[0]), str(x)]) for x in atac_cell_numbers_target]
target_cell_numbers = []
index = 0
for rna_count in rna_cell_numbers_target:
target_cell_numbers.append('(' + ' & '.join([str(rna_count), str(atac_cell_numbers_target[index])]) + ')')
index += 1
total_count = rna_cell_numbers_source + target_cell_numbers + atac_cell_numbers_source
new_label = []
index = 0
for n_cells in total_count:
new_label.append(' '.join([str(df_nodes['Label'][index]), str(n_cells)]))
index += 1
df_nodes['Label'] = new_label
###### LINKS ######
key_infos_values = key_infos.values.tolist()
key_infos_index = key_infos.index.tolist()
# make the link df
links = [['Source', 'Target', 'Value', 'Link Color']]
# index_target = len(label_list)-len(key_infos.index.tolist())
# print(key_infos)
for index_value in key_infos_values:
index_source = df_nodes_rna.shape[0]
index_color = 0
for count in index_value:
tmp_list = [index_source, index_target, count, colors[index_color]]
index_source += 1
index_color += 1
links.append(tmp_list)
index_target += 1
### merge atac links to rna links
links_headers = links.pop(0)
df_links = pd.DataFrame(links, columns=links_headers)
tmp_var = df_links['Source'].tolist()
tmp_var2 = df_links['Target'].tolist()
df_links['Source'] = tmp_var2
df_links['Target'] = tmp_var
del tmp_var, tmp_var2
df_links = df_links_rna.append(df_links)
new_title = title + '\n (' + omics[1] + ' cells on the left & ' + omics[0] + ' cells on the right)'
__plot_sankey(df_nodes, df_links,
title=new_title,
save=save,
scale=4)
def __tool_sankey(adata, source, target, cell_number=True):
# extract key_infos in adata
key_infos = pd.crosstab(adata.obs[target], adata.obs[source])
###### NODES ######
# transform key_infos into the nodes df
nodes = [['ID', 'Label', 'Color']]
if not cell_number:
label_list = key_infos.columns.tolist() + key_infos.index.tolist()
else:
target_cell_nb = pd.crosstab(adata.obs[target], adata.obs[target], margins=True)
source_cell_nb = pd.crosstab(adata.obs[source], adata.obs[source], margins=True)
source_names = []
for n in range(0, len(key_infos.columns.tolist())):
source_names.append(" n=".join([str(key_infos.columns.tolist()[n]), str(source_cell_nb['All'][n])]))
target_names = []
index = 0
for target_name in key_infos.index.tolist():
# print(target_name, target_cell_nb['All'][index])
target_names.append(" n=".join([target_name, str(target_cell_nb['All'][index])]))
index += 1
label_list = source_names + target_names
# print(label_list)
id_list = list(range(0, len(label_list), 1))
# Pay attention if clusters_colors or 'orig.ident_colors' missing
if source + '_colors' not in adata.uns.keys():
adata.uns[source + '_colors'] = list(_get_palette(adata, source).values())
if target + '_colors' not in adata.uns.keys():
adata.uns[target + '_colors'] = list(_get_palette(adata, target).values())
if type(adata.uns[source + '_colors']) == np.ndarray:
adata.uns[source + '_colors'] = adata.uns[source + '_colors'].tolist()
if type(adata.uns[target + '_colors']) == np.ndarray:
adata.uns[target + '_colors'] = adata.uns[target + '_colors'].tolist()
colors = adata.uns[source + '_colors'] + adata.uns[target + '_colors']
for number in id_list:
tmp_list = [number, label_list[number], colors[number]]
nodes.append(tmp_list)
###### LINKS ######
key_infos_values = key_infos.values.tolist()
key_infos_index = key_infos.index.tolist()
# make the link df
links = [['Source', 'Target', 'Value', 'Link Color']]
index_target = len(label_list) - len(key_infos.index.tolist())
for index_value in key_infos_values:
index_source = 0
for count in index_value:
tmp_list = [index_source, index_target, count, colors[index_source]]
index_source += 1
links.append(tmp_list)
index_target += 1
# Retrieve headers and build dataframes
nodes_headers = nodes.pop(0)
links_headers = links.pop(0)
df_nodes = pd.DataFrame(nodes, columns=nodes_headers)
df_links = pd.DataFrame(links, columns=links_headers)
return df_nodes, df_links
def __plot_sankey(df_nodes,
df_links,
title="Draw Sankey Diagram from dataframes",
save=None, scale=1):
"""
"""
# Sankey plot setup
data_trace = dict(
type='sankey',
domain=dict(
x=[0, 1],
y=[0, 1]
),
orientation="h",
valueformat=".0f",
node=dict(
pad=10,
# thickness = 30,
line=dict(
color="black",
width=0
),
label=df_nodes['Label'].dropna(axis=0, how='any'),
color=df_nodes['Color']
),
link=dict(
source=df_links['Source'].dropna(axis=0, how='any'),
target=df_links['Target'].dropna(axis=0, how='any'),
value=df_links['Value'].dropna(axis=0, how='any'),
color=df_links['Link Color'].dropna(axis=0, how='any'),
)
)
layout = dict(
title=title,
height=772,
font=dict(
size=10), )
fig = dict(data=[data_trace], layout=layout)
# fig.savefig('test.png')
iplot(fig, validate=False)
if save:
pio.write_image(fig, save, width=700, height=775, scale=scale)
|
py
|
1a5602752e8a7ad4a4c6f51137506b7e55e330ac
|
import argparse
import math
from urllib.request import urlopen
import sys
import os
import subprocess
import glob
from braceexpand import braceexpand
from types import SimpleNamespace
# pip install taming-transformers work with Gumbel, but does works with coco etc
# appending the path works with Gumbel, but gives ModuleNotFoundError: No module named 'transformers' for coco etc
sys.path.append("taming-transformers")
import os.path
from omegaconf import OmegaConf
from taming.models import cond_transformer, vqgan
import torch
from torch import nn, optim
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
torch.backends.cudnn.benchmark = (
False # NR: True is a bit faster, but can lead to OOM. False is more deterministic.
)
# torch.use_deterministic_algorithms(True) # NR: grid_sampler_2d_backward_cuda does not have a deterministic implementation
from torch_optimizer import DiffGrad, AdamP, RAdam
from perlin_numpy import generate_fractal_noise_2d
from CLIP import clip
import kornia
import kornia.augmentation as K
import numpy as np
import imageio
from PIL import ImageFile, Image, PngImagePlugin
ImageFile.LOAD_TRUNCATED_IMAGES = True
# or 'border'
global_padding_mode = "reflection"
global_aspect_width = 1
vqgan_config_table = {
"imagenet_f16_1024": "http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_1024.yaml",
"imagenet_f16_16384": "http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.yaml",
"openimages_f16_8192": "https://heibox.uni-heidelberg.de/d/2e5662443a6b4307b470/files/?p=%2Fconfigs%2Fmodel.yaml&dl=1",
"coco": "https://dl.nmkd.de/ai/clip/coco/coco.yaml",
"faceshq": "https://drive.google.com/uc?export=download&id=1fHwGx_hnBtC8nsq7hesJvs-Klv-P0gzT",
"wikiart_1024": "http://mirror.io.community/blob/vqgan/wikiart.yaml",
"wikiart_16384": "http://mirror.io.community/blob/vqgan/wikiart_16384.yaml",
"sflckr": "https://heibox.uni-heidelberg.de/d/73487ab6e5314cb5adba/files/?p=%2Fconfigs%2F2020-11-09T13-31-51-project.yaml&dl=1",
}
vqgan_checkpoint_table = {
"imagenet_f16_1024": "http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_1024.ckpt",
"imagenet_f16_16384": "http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.ckpt",
"openimages_f16_8192": "https://heibox.uni-heidelberg.de/d/2e5662443a6b4307b470/files/?p=%2Fckpts%2Flast.ckpt&dl=1",
"coco": "https://dl.nmkd.de/ai/clip/coco/coco.ckpt",
"faceshq": "https://app.koofr.net/content/links/a04deec9-0c59-4673-8b37-3d696fe63a5d/files/get/last.ckpt?path=%2F2020-11-13T21-41-45_faceshq_transformer%2Fcheckpoints%2Flast.ckpt",
"wikiart_1024": "http://mirror.io.community/blob/vqgan/wikiart.ckpt",
"wikiart_16384": "http://mirror.io.community/blob/vqgan/wikiart_16384.ckpt",
"sflckr": "https://heibox.uni-heidelberg.de/d/73487ab6e5314cb5adba/files/?p=%2Fcheckpoints%2Flast.ckpt&dl=1",
}
# https://stackoverflow.com/a/39662359
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
elif shell == "Shell":
return True # Seems to be what co-lab does
elif shell == "TerminalInteractiveShell":
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
IS_NOTEBOOK = isnotebook()
if IS_NOTEBOOK:
from IPython import display
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
# file helpers
def real_glob(rglob):
glob_list = braceexpand(rglob)
files = []
for g in glob_list:
files = files + glob.glob(g)
return sorted(files)
# Functions and classes
def sinc(x):
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
def lanczos(x, a):
cond = torch.logical_and(-a < x, x < a)
out = torch.where(cond, sinc(x) * sinc(x / a), x.new_zeros([]))
return out / out.sum()
def ramp(ratio, width):
n = math.ceil(width / ratio + 1)
out = torch.empty([n])
cur = 0
for i in range(out.shape[0]):
out[i] = cur
cur += ratio
return torch.cat([-out[1:].flip([0]), out])[1:-1]
# NR: Testing with different intital images
def old_random_noise_image(w, h):
random_image = Image.fromarray(
np.random.randint(0, 255, (w, h, 3), dtype=np.dtype("uint8"))
)
return random_image
def NormalizeData(data):
return (data - np.min(data)) / (np.max(data) - np.min(data))
def random_noise_image(w, h):
# scale up roughly as power of 2
if w > 1024 or h > 1024:
side, octp = 2048, 7
elif w > 512 or h > 512:
side, octp = 1024, 6
elif w > 256 or h > 256:
side, octp = 512, 5
else:
side, octp = 256, 4
nr = NormalizeData(generate_fractal_noise_2d((side, side), (32, 32), octp))
ng = NormalizeData(generate_fractal_noise_2d((side, side), (32, 32), octp))
nb = NormalizeData(generate_fractal_noise_2d((side, side), (32, 32), octp))
stack = np.dstack((nr, ng, nb))
substack = stack[:h, :w, :]
im = Image.fromarray((255.9 * stack).astype("uint8"))
return im
# testing
def gradient_2d(start, stop, width, height, is_horizontal):
if is_horizontal:
return np.tile(np.linspace(start, stop, width), (height, 1))
else:
return np.tile(np.linspace(start, stop, height), (width, 1)).T
def gradient_3d(width, height, start_list, stop_list, is_horizontal_list):
result = np.zeros((height, width, len(start_list)), dtype=float)
for i, (start, stop, is_horizontal) in enumerate(
zip(start_list, stop_list, is_horizontal_list)
):
result[:, :, i] = gradient_2d(start, stop, width, height, is_horizontal)
return result
def random_gradient_image(w, h):
array = gradient_3d(
w,
h,
(0, 0, np.random.randint(0, 255)),
(
np.random.randint(1, 255),
np.random.randint(2, 255),
np.random.randint(3, 128),
),
(True, False, False),
)
random_image = Image.fromarray(np.uint8(array))
return random_image
# Not used?
def resample(input, size, align_corners=True):
n, c, h, w = input.shape
dh, dw = size
input = input.view([n * c, 1, h, w])
if dh < h:
kernel_h = lanczos(ramp(dh / h, 2), 2).to(input.device, input.dtype)
pad_h = (kernel_h.shape[0] - 1) // 2
input = F.pad(input, (0, 0, pad_h, pad_h), "reflect")
input = F.conv2d(input, kernel_h[None, None, :, None])
if dw < w:
kernel_w = lanczos(ramp(dw / w, 2), 2).to(input.device, input.dtype)
pad_w = (kernel_w.shape[0] - 1) // 2
input = F.pad(input, (pad_w, pad_w, 0, 0), "reflect")
input = F.conv2d(input, kernel_w[None, None, None, :])
input = input.view([n, c, h, w])
return F.interpolate(input, size, mode="bicubic", align_corners=align_corners)
class ReplaceGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, x_forward, x_backward):
ctx.shape = x_backward.shape
return x_forward
@staticmethod
def backward(ctx, grad_in):
return None, grad_in.sum_to_size(ctx.shape)
replace_grad = ReplaceGrad.apply
class ClampWithGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input, min, max):
ctx.min = min
ctx.max = max
ctx.save_for_backward(input)
return input.clamp(min, max)
@staticmethod
def backward(ctx, grad_in):
(input,) = ctx.saved_tensors
return (
grad_in * (grad_in * (input - input.clamp(ctx.min, ctx.max)) >= 0),
None,
None,
)
clamp_with_grad = ClampWithGrad.apply
def vector_quantize(x, codebook):
d = (
x.pow(2).sum(dim=-1, keepdim=True)
+ codebook.pow(2).sum(dim=1)
- 2 * x @ codebook.T
)
indices = d.argmin(-1)
x_q = F.one_hot(indices, codebook.shape[0]).to(d.dtype) @ codebook
return replace_grad(x_q, x)
def spherical_dist_loss(x, y):
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
class Prompt(nn.Module):
def __init__(self, embed, weight=1.0, stop=float("-inf")):
super().__init__()
self.register_buffer("embed", embed)
self.register_buffer("weight", torch.as_tensor(weight))
self.register_buffer("stop", torch.as_tensor(stop))
def forward(self, input):
input_normed = F.normalize(input.unsqueeze(1), dim=2)
embed_normed = F.normalize(self.embed.unsqueeze(0), dim=2)
dists = input_normed.sub(embed_normed).norm(dim=2).div(2).arcsin().pow(2).mul(2)
dists = dists * self.weight.sign()
return (
self.weight.abs()
* replace_grad(dists, torch.maximum(dists, self.stop)).mean()
)
def parse_prompt(prompt):
vals = prompt.rsplit(":", 2)
vals = vals + ["", "1", "-inf"][len(vals) :]
# print(f"parsed vals is {vals}")
return vals[0], float(vals[1]), float(vals[2])
from typing import cast, Dict, List, Optional, Tuple, Union
# override class to get padding_mode
class MyRandomPerspective(K.RandomPerspective):
def apply_transform(
self,
input: torch.Tensor,
params: Dict[str, torch.Tensor],
transform: Optional[torch.Tensor] = None,
) -> torch.Tensor:
_, _, height, width = input.shape
transform = cast(torch.Tensor, transform)
return kornia.geometry.warp_perspective(
input,
transform,
(height, width),
mode=self.resample.name.lower(),
align_corners=self.align_corners,
padding_mode=global_padding_mode,
)
cached_spot_indexes = {}
def fetch_spot_indexes(sideX, sideY):
# make sure image is loaded if we need it
cache_key = (sideX, sideY)
if cache_key not in cached_spot_indexes:
if global_aspect_width != 1:
mask_image = Image.open("inputs/spot_wide.png")
else:
mask_image = Image.open("inputs/spot_square.png")
# this is a one channel mask
mask_image = mask_image.convert("RGB")
mask_image = mask_image.resize((sideX, sideY), Image.LANCZOS)
mask_image_tensor = TF.to_tensor(mask_image)
# print("ONE CHANNEL ", mask_image_tensor.shape)
mask_indexes = mask_image_tensor.ge(0.5).to(device)
# print("GE ", mask_indexes.shape)
# sys.exit(0)
mask_indexes_off = mask_image_tensor.lt(0.5).to(device)
cached_spot_indexes[cache_key] = [mask_indexes, mask_indexes_off]
return cached_spot_indexes[cache_key]
# n = torch.ones((3,5,5))
# f = generate.fetch_spot_indexes(5, 5)
# f[0].shape = [60,3]
class MakeCutouts(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.0):
global global_aspect_width
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow
self.transforms = None
augmentations = []
if global_aspect_width != 1:
augmentations.append(
K.RandomCrop(
size=(self.cut_size, self.cut_size), p=1.0, return_transform=True
)
)
augmentations.append(
MyRandomPerspective(distortion_scale=0.40, p=0.7, return_transform=True)
)
augmentations.append(
K.RandomResizedCrop(
size=(self.cut_size, self.cut_size),
scale=(0.15, 0.80),
ratio=(0.75, 1.333),
cropping_mode="resample",
p=0.7,
return_transform=True,
)
)
augmentations.append(
K.ColorJitter(hue=0.1, saturation=0.1, p=0.8, return_transform=True)
)
self.augs = nn.Sequential(*augmentations)
# self.augs = nn.Sequential(
# # K.RandomHorizontalFlip(p=0.5), # NR: add augmentation options
# # K.RandomVerticalFlip(p=0.5),
# # K.RandomSolarize(0.01, 0.01, p=0.7),
# # K.RandomSharpness(0.3,p=0.4),
# # K.RandomResizedCrop(size=(self.cut_size,self.cut_size), scale=(0.1,1), ratio=(0.75,1.333), cropping_mode='resample', p=0.5, return_transform=True),
# K.RandomCrop(size=(self.cut_size,self.cut_size), p=1.0),
# # K.RandomAffine(degrees=15, translate=0.1, p=0.7, padding_mode='border', return_transform=True),
# # MyRandomPerspective(distortion_scale=0.40, p=0.7, return_transform=True),
# # K.RandomResizedCrop(size=(self.cut_size,self.cut_size), scale=(0.15,0.80), ratio=(0.75,1.333), cropping_mode='resample', p=0.7, return_transform=True),
# K.ColorJitter(hue=0.1, saturation=0.1, p=0.8, return_transform=True),
# # K.RandomErasing((.1, .4), (.3, 1/.3), same_on_batch=True, p=0.7, return_transform=True),
# )
self.noise_fac = 0.1
# Pooling
self.av_pool = nn.AdaptiveAvgPool2d((self.cut_size, self.cut_size))
self.max_pool = nn.AdaptiveMaxPool2d((self.cut_size, self.cut_size))
def forward(self, input, spot=None):
global i, global_aspect_width
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
mask_indexes = None
if spot is not None:
spot_indexes = fetch_spot_indexes(self.cut_size, self.cut_size)
if spot == 0:
mask_indexes = spot_indexes[1]
else:
mask_indexes = spot_indexes[0]
# print("Mask indexes ", mask_indexes)
for _ in range(self.cutn):
# size = int(torch.rand([])**self.cut_pow * (max_size - min_size) + min_size)
# offsetx = torch.randint(0, sideX - size + 1, ())
# offsety = torch.randint(0, sideY - size + 1, ())
# cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
# cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))
# cutout = transforms.Resize(size=(self.cut_size, self.cut_size))(input)
# Pooling
cutout = (self.av_pool(input) + self.max_pool(input)) / 2
if mask_indexes is not None:
cutout[0][mask_indexes] = 0.5
if global_aspect_width != 1:
cutout = kornia.geometry.transform.rescale(cutout, (1, 16 / 9))
# if i % 50 == 0 and _ == 0:
# print(cutout.shape)
# TF.to_pil_image(cutout[0].cpu()).save(f"cutout_im_{i:02d}_{spot}.png")
cutouts.append(cutout)
if self.transforms is not None:
# print("Cached transforms available, but I'm not smart enough to use them")
# print(cutouts.shape)
# print(torch.cat(cutouts, dim=0).shape)
# print(self.transforms.shape)
# batch = kornia.geometry.transform.warp_affine(torch.cat(cutouts, dim=0), self.transforms, (sideY, sideX))
# batch = self.transforms @ torch.cat(cutouts, dim=0)
batch = kornia.geometry.transform.warp_perspective(
torch.cat(cutouts, dim=0),
self.transforms,
(self.cut_size, self.cut_size),
padding_mode=global_padding_mode,
)
# if i < 4:
# for j in range(4):
# TF.to_pil_image(batch[j].cpu()).save(f"cached_im_{i:02d}_{j:02d}_{spot}.png")
else:
batch, self.transforms = self.augs(torch.cat(cutouts, dim=0))
# if i < 4:
# for j in range(4):
# TF.to_pil_image(batch[j].cpu()).save(f"live_im_{i:02d}_{j:02d}_{spot}.png")
# print(batch.shape, self.transforms.shape)
if self.noise_fac:
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
batch = batch + facs * torch.randn_like(batch)
return batch
def load_vqgan_model(config_path, checkpoint_path):
global gumbel
gumbel = False
config = OmegaConf.load(config_path)
if config.model.target == "taming.models.vqgan.VQModel":
model = vqgan.VQModel(**config.model.params)
model.eval().requires_grad_(False)
model.init_from_ckpt(checkpoint_path)
elif config.model.target == "taming.models.vqgan.GumbelVQ":
model = vqgan.GumbelVQ(**config.model.params)
model.eval().requires_grad_(False)
model.init_from_ckpt(checkpoint_path)
gumbel = True
elif config.model.target == "taming.models.cond_transformer.Net2NetTransformer":
parent_model = cond_transformer.Net2NetTransformer(**config.model.params)
parent_model.eval().requires_grad_(False)
parent_model.init_from_ckpt(checkpoint_path)
model = parent_model.first_stage_model
else:
raise ValueError(f"unknown model type: {config.model.target}")
del model.loss
return model
def resize_image(image, out_size):
ratio = image.size[0] / image.size[1]
area = min(image.size[0] * image.size[1], out_size[0] * out_size[1])
size = round((area * ratio) ** 0.5), round((area / ratio) ** 0.5)
return image.resize(size, Image.LANCZOS)
def wget_file(url, out):
try:
output = subprocess.check_output(["wget", "-O", out, url])
except subprocess.CalledProcessError as cpe:
output = e.output
print("Ignoring non-zero exit: ", output)
def do_init(args):
global model, opt, perceptors, normalize, cutoutsTable, cutoutSizeTable
global z, z_orig, z_targets, z_labels, z_min, z_max, init_image_tensor
global gside_X, gside_Y, overlay_image_rgba
global pmsTable, pImages, device, spotPmsTable, spotOffPmsTable
# Do it (init that is)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if args.vqgan_config is not None:
vqgan_config = args.vqgan_config
vqgan_checkpoint = args.vqgan_checkpoint
else:
# the "vqgan_model" option also downloads if necessary
vqgan_config = f"models/vqgan_{args.vqgan_model}.yaml"
vqgan_checkpoint = f"models/vqgan_{args.vqgan_model}.ckpt"
if not os.path.exists(vqgan_config):
wget_file(vqgan_config_table[args.vqgan_model], vqgan_config)
if not os.path.exists(vqgan_checkpoint):
wget_file(vqgan_checkpoint_table[args.vqgan_model], vqgan_checkpoint)
model = load_vqgan_model(vqgan_config, vqgan_checkpoint).to(device)
jit = True if float(torch.__version__[:3]) < 1.8 else False
f = 2 ** (model.decoder.num_resolutions - 1)
for clip_model in args.clip_models:
perceptor = (
clip.load(clip_model, jit=jit)[0].eval().requires_grad_(False).to(device)
)
perceptors[clip_model] = perceptor
# TODO: is one cut_size enought? I hope so.
cut_size = perceptor.visual.input_resolution
cutoutSizeTable[clip_model] = cut_size
if not cut_size in cutoutsTable:
make_cutouts = MakeCutouts(cut_size, args.num_cuts, cut_pow=args.cut_pow)
cutoutsTable[cut_size] = make_cutouts
toksX, toksY = args.size[0] // f, args.size[1] // f
sideX, sideY = toksX * f, toksY * f
if gumbel:
e_dim = 256
n_toks = model.quantize.n_embed
z_min = model.quantize.embed.weight.min(dim=0).values[None, :, None, None]
z_max = model.quantize.embed.weight.max(dim=0).values[None, :, None, None]
else:
e_dim = model.quantize.e_dim
n_toks = model.quantize.n_e
z_min = model.quantize.embedding.weight.min(dim=0).values[None, :, None, None]
z_max = model.quantize.embedding.weight.max(dim=0).values[None, :, None, None]
# z_min = model.quantize.embedding.weight.min(dim=0).values[None, :, None, None]
# z_max = model.quantize.embedding.weight.max(dim=0).values[None, :, None, None]
# normalize_imagenet = transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# save sideX, sideY in globals (need if using overlay)
gside_X = sideX
gside_Y = sideY
init_image_tensor = None
# Image initialisation
if args.init_image or args.init_noise:
# setup init image wih pil
# first - always start with noise or blank
if args.init_noise == "pixels":
img = random_noise_image(args.size[0], args.size[1])
elif args.init_noise == "gradient":
img = random_gradient_image(args.size[0], args.size[1])
else:
img = Image.new(
mode="RGB", size=(args.size[0], args.size[1]), color=(255, 255, 255)
)
starting_image = img.convert("RGB")
starting_image = starting_image.resize((sideX, sideY), Image.LANCZOS)
if args.init_image:
# now we might overlay an init image (init_image also can be recycled as overlay)
if "http" in args.init_image:
init_image = Image.open(urlopen(args.init_image))
else:
init_image = Image.open(args.init_image)
# this version is needed potentially for the loss function
init_image_rgb = init_image.convert("RGB")
init_image_rgb = init_image_rgb.resize((sideX, sideY), Image.LANCZOS)
init_image_tensor = TF.to_tensor(init_image_rgb)
init_image_tensor = init_image_tensor.to(device).unsqueeze(0)
# this version gets overlaid on the background (noise)
init_image_rgba = init_image.convert("RGBA")
init_image_rgba = init_image_rgba.resize((sideX, sideY), Image.LANCZOS)
top_image = init_image_rgba.copy()
if args.init_image_alpha and args.init_image_alpha >= 0:
top_image.putalpha(args.init_image_alpha)
starting_image.paste(top_image, (0, 0), top_image)
starting_image.save("starting_image.png")
starting_tensor = TF.to_tensor(starting_image)
z, *_ = model.encode(starting_tensor.to(device).unsqueeze(0) * 2 - 1)
else:
# legacy init
one_hot = F.one_hot(
torch.randint(n_toks, [toksY * toksX], device=device), n_toks
).float()
# z = one_hot @ model.quantize.embedding.weight
if gumbel:
z = one_hot @ model.quantize.embed.weight
else:
z = one_hot @ model.quantize.embedding.weight
z = z.view([-1, toksY, toksX, e_dim]).permute(0, 3, 1, 2)
if args.overlay_every:
if args.overlay_image:
if "http" in args.overlay_image:
overlay_image = Image.open(urlopen(args.overlay_image))
else:
overlay_image = Image.open(args.overlay_image)
overlay_image_rgba = overlay_image.convert("RGBA")
overlay_image_rgba = overlay_image_rgba.resize(
(sideX, sideY), Image.LANCZOS
)
else:
overlay_image_rgba = init_image_rgba
if args.overlay_alpha:
overlay_image_rgba.putalpha(args.overlay_alpha)
overlay_image_rgba.save("overlay_image.png")
if args.target_images is not None:
z_targets = []
filelist = real_glob(args.target_images)
for target_image in filelist:
target_image = Image.open(target_image)
target_image_rgb = target_image.convert("RGB")
target_image_rgb = target_image_rgb.resize((sideX, sideY), Image.LANCZOS)
target_image_tensor = TF.to_tensor(target_image_rgb)
target_image_tensor = target_image_tensor.to(device).unsqueeze(0) * 2 - 1
z_target, *_ = model.encode(target_image_tensor)
z_targets.append(z_target)
if args.image_labels is not None:
z_labels = []
filelist = real_glob(args.image_labels)
cur_labels = []
for image_label in filelist:
image_label = Image.open(image_label)
image_label_rgb = image_label.convert("RGB")
image_label_rgb = image_label_rgb.resize((sideX, sideY), Image.LANCZOS)
image_label_rgb_tensor = TF.to_tensor(image_label_rgb)
image_label_rgb_tensor = (
image_label_rgb_tensor.to(device).unsqueeze(0) * 2 - 1
)
z_label, *_ = model.encode(image_label_rgb_tensor)
cur_labels.append(z_label)
image_embeddings = torch.stack(cur_labels)
print("Processing labels: ", image_embeddings.shape)
image_embeddings /= image_embeddings.norm(dim=-1, keepdim=True)
image_embeddings = image_embeddings.mean(dim=0)
image_embeddings /= image_embeddings.norm()
z_labels.append(image_embeddings.unsqueeze(0))
z_orig = z.clone()
z.requires_grad_(True)
pmsTable = {}
spotPmsTable = {}
spotOffPmsTable = {}
for clip_model in args.clip_models:
pmsTable[clip_model] = []
spotPmsTable[clip_model] = []
spotOffPmsTable[clip_model] = []
pImages = []
normalize = transforms.Normalize(
mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711],
)
# CLIP tokenize/encode
# NR: Weights / blending
for prompt in args.prompts:
for clip_model in args.clip_models:
pMs = pmsTable[clip_model]
perceptor = perceptors[clip_model]
txt, weight, stop = parse_prompt(prompt)
embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for prompt in args.spot_prompts:
for clip_model in args.clip_models:
pMs = spotPmsTable[clip_model]
perceptor = perceptors[clip_model]
txt, weight, stop = parse_prompt(prompt)
embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for prompt in args.spot_prompts_off:
for clip_model in args.clip_models:
pMs = spotOffPmsTable[clip_model]
perceptor = perceptors[clip_model]
txt, weight, stop = parse_prompt(prompt)
embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for label in args.labels:
for clip_model in args.clip_models:
pMs = pmsTable[clip_model]
perceptor = perceptors[clip_model]
txt, weight, stop = parse_prompt(label)
texts = [
template.format(txt) for template in imagenet_templates
] # format with class
print(f"Tokenizing all of {texts}")
texts = clip.tokenize(texts).to(device) # tokenize
class_embeddings = perceptor.encode_text(texts) # embed with text encoder
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
pMs.append(Prompt(class_embedding.unsqueeze(0), weight, stop).to(device))
for prompt in args.image_prompts:
path, weight, stop = parse_prompt(prompt)
img = Image.open(path)
pil_image = img.convert("RGB")
img = resize_image(pil_image, (sideX, sideY))
pImages.append(TF.to_tensor(img).unsqueeze(0).to(device))
# batch = make_cutouts(TF.to_tensor(img).unsqueeze(0).to(device))
# embed = perceptor.encode_image(normalize(batch)).float()
# pMs.append(Prompt(embed, weight, stop).to(device))
for seed, weight in zip(args.noise_prompt_seeds, args.noise_prompt_weights):
gen = torch.Generator().manual_seed(seed)
embed = torch.empty([1, perceptor.visual.output_dim]).normal_(generator=gen)
pMs.append(Prompt(embed, weight).to(device))
# Set the optimiser
if args.optimiser == "Adam":
opt = optim.Adam([z], lr=args.step_size) # LR=0.1
elif args.optimiser == "AdamW":
opt = optim.AdamW([z], lr=args.step_size) # LR=0.2
elif args.optimiser == "Adagrad":
opt = optim.Adagrad([z], lr=args.step_size) # LR=0.5+
elif args.optimiser == "Adamax":
opt = optim.Adamax([z], lr=args.step_size) # LR=0.5+?
elif args.optimiser == "DiffGrad":
opt = DiffGrad([z], lr=args.step_size) # LR=2+?
elif args.optimiser == "AdamP":
opt = AdamP([z], lr=args.step_size) # LR=2+?
elif args.optimiser == "RAdam":
opt = RAdam([z], lr=args.step_size) # LR=2+?
# Output for the user
print("Using device:", device)
print("Optimising using:", args.optimiser)
if args.prompts:
print("Using text prompts:", args.prompts)
if args.spot_prompts:
print("Using spot prompts:", args.spot_prompts)
if args.spot_prompts_off:
print("Using spot off prompts:", args.spot_prompts_off)
if args.image_prompts:
print("Using image prompts:", args.image_prompts)
if args.init_image:
print("Using initial image:", args.init_image)
if args.noise_prompt_weights:
print("Noise prompt weights:", args.noise_prompt_weights)
if args.seed is None:
seed = torch.seed()
else:
seed = args.seed
torch.manual_seed(seed)
print("Using seed:", seed)
def synth(z):
if gumbel:
z_q = vector_quantize(z.movedim(1, 3), model.quantize.embed.weight).movedim(
3, 1
) # Vector quantize
else:
z_q = vector_quantize(z.movedim(1, 3), model.quantize.embedding.weight).movedim(
3, 1
)
return clamp_with_grad(model.decode(z_q).add(1).div(2), 0, 1)
# dreaded globals (for now)
z = None
z_orig = None
z_targets = None
z_labels = None
z_min = None
z_max = None
opt = None
model = None
perceptors = {}
normalize = None
cutoutsTable = {}
cutoutSizeTable = {}
init_image_tensor = None
pmsTable = None
spotPmsTable = None
spotOffPmsTable = None
pImages = None
gside_X = None
gside_Y = None
overlay_image_rgba = None
device = None
# OK, THIS ONE IS AWFUL
i = None
@torch.no_grad()
def z_to_pil():
global z
out = synth(z)
return TF.to_pil_image(out[0].cpu())
@torch.no_grad()
def checkin(args, i, losses):
losses_str = ", ".join(f"{loss.item():g}" for loss in losses)
tqdm.write(f"i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}")
info = PngImagePlugin.PngInfo()
info.add_text("comment", f"{args.prompts}")
img = z_to_pil()
img.save(args.output, pnginfo=info)
if IS_NOTEBOOK:
display.display(display.Image(args.output))
def ascend_txt(args):
global i, perceptors, normalize, cutoutsTable, cutoutSizeTable
global z, z_orig, z_targets, z_labels, init_image_tensor
global pmsTable, spotPmsTable, spotOffPmsTable, global_padding_mode
out = synth(z)
result = []
if i % 2 == 0:
global_padding_mode = "reflection"
else:
global_padding_mode = "border"
cur_cutouts = {}
cur_spot_cutouts = {}
cur_spot_off_cutouts = {}
for cutoutSize in cutoutsTable:
make_cutouts = cutoutsTable[cutoutSize]
cur_cutouts[cutoutSize] = make_cutouts(out)
if args.spot_prompts:
for cutoutSize in cutoutsTable:
cur_spot_cutouts[cutoutSize] = make_cutouts(out, spot=1)
if args.spot_prompts_off:
for cutoutSize in cutoutsTable:
cur_spot_off_cutouts[cutoutSize] = make_cutouts(out, spot=0)
for clip_model in args.clip_models:
perceptor = perceptors[clip_model]
cutoutSize = cutoutSizeTable[clip_model]
transient_pMs = []
if args.spot_prompts:
iii_s = perceptor.encode_image(
normalize(cur_spot_cutouts[cutoutSize])
).float()
spotPms = spotPmsTable[clip_model]
for prompt in spotPms:
result.append(prompt(iii_s))
if args.spot_prompts_off:
iii_so = perceptor.encode_image(
normalize(cur_spot_off_cutouts[cutoutSize])
).float()
spotOffPms = spotOffPmsTable[clip_model]
for prompt in spotOffPms:
result.append(prompt(iii_so))
pMs = pmsTable[clip_model]
iii = perceptor.encode_image(normalize(cur_cutouts[cutoutSize])).float()
for prompt in pMs:
result.append(prompt(iii))
# If there are image prompts we make cutouts for those each time
# so that they line up with the current cutouts from augmentation
make_cutouts = cutoutsTable[cutoutSize]
for timg in pImages:
# note: this caches and reuses the transforms - a bit of a hack but it works
if args.image_prompt_shuffle:
# print("Disabling cached transforms")
make_cutouts.transforms = None
# new way builds throwaway Prompts
batch = make_cutouts(timg)
embed = perceptor.encode_image(normalize(batch)).float()
if args.image_prompt_weight is not None:
transient_pMs.append(Prompt(embed, args.image_prompt_weight).to(device))
else:
transient_pMs.append(Prompt(embed).to(device))
for prompt in transient_pMs:
result.append(prompt(iii))
for cutoutSize in cutoutsTable:
# clear the transform "cache"
make_cutouts = cutoutsTable[cutoutSize]
make_cutouts.transforms = None
# main init_weight uses spherical loss
if args.target_images is not None:
for z_target in z_targets:
f = z.reshape(1, -1)
f2 = z_target.reshape(1, -1)
cur_loss = spherical_dist_loss(f, f2) * args.target_image_weight
result.append(cur_loss)
if args.image_labels is not None:
for z_label in z_labels:
f = z.reshape(1, -1)
f2 = z_label.reshape(1, -1)
cur_loss = spherical_dist_loss(f, f2) * args.image_label_weight
result.append(cur_loss)
# main init_weight uses spherical loss
if args.init_weight:
f = z.reshape(1, -1)
f2 = z_orig.reshape(1, -1)
cur_loss = spherical_dist_loss(f, f2) * args.init_weight
result.append(cur_loss)
# these three init_weight variants offer mse_loss, mse_loss in pixel space, and cos loss
if args.init_weight_dist:
cur_loss = F.mse_loss(z, z_orig) * args.init_weight_dist / 2
result.append(cur_loss)
if args.init_weight_pix:
if init_image_tensor is None:
print("OOPS IIT is 0")
else:
# TF.to_pil_image(out[0].cpu()).save(f"out_1.png")
# TF.to_pil_image(init_image_tensor[0].cpu()).save(f"init_1.png")
# print(out.shape)
# print(init_image_tensor.shape)
# print(out[0][0])
# print(init_image_tensor[0][0])
cur_loss = F.l1_loss(out, init_image_tensor) * args.init_weight_pix / 2
result.append(cur_loss)
if args.init_weight_cos:
f = z.reshape(1, -1)
f2 = z_orig.reshape(1, -1)
y = torch.ones_like(f[0])
cur_loss = F.cosine_embedding_loss(f, f2, y) * args.init_weight_cos
result.append(cur_loss)
if args.make_video:
img = np.array(
out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8)
)[:, :, :]
img = np.transpose(img, (1, 2, 0))
imageio.imwrite(f"./steps/frame_{i:04d}.png", np.array(img))
return result
def re_average_z(args):
global z, gside_X, gside_Y
global model, device
# old_z = z.clone()
cur_z_image = z_to_pil()
cur_z_image = cur_z_image.convert("RGB")
if overlay_image_rgba:
# print("applying overlay image")
cur_z_image.paste(overlay_image_rgba, (0, 0), overlay_image_rgba)
cur_z_image.save("overlaid.png")
cur_z_image = cur_z_image.resize((gside_X, gside_Y), Image.LANCZOS)
new_z, *_ = model.encode(TF.to_tensor(cur_z_image).to(device).unsqueeze(0) * 2 - 1)
# t_dist = F.pairwise_distance(new_z, old_z)
with torch.no_grad():
z.copy_(new_z)
# with torch.no_grad():
# z.copy_(z.maximum(z_min).minimum(z_max))
# torch.autograd.set_detect_anomaly(True)
def train(args, i):
global z, z_min, z_max
opt.zero_grad(set_to_none=True)
lossAll = ascend_txt(args)
if i % args.display_freq == 0:
checkin(args, i, lossAll)
loss = sum(lossAll)
loss.backward()
opt.step()
if (
args.overlay_every
and i != 0
and (i % (args.overlay_every + args.overlay_offset)) == 0
):
re_average_z(args)
with torch.no_grad():
z.copy_(z.maximum(z_min).minimum(z_max))
imagenet_templates = [
"itap of a {}.",
"a bad photo of the {}.",
"a origami {}.",
"a photo of the large {}.",
"a {} in a video game.",
"art of the {}.",
"a photo of the small {}.",
]
def do_run(args):
global i
i = 0
try:
with tqdm() as pbar:
while True:
try:
train(args, i)
if i == args.iterations:
break
i += 1
pbar.update()
except RuntimeError as e:
print("Oops: runtime error: ", e)
print("Try reducing --num-cuts to save memory")
raise e
except KeyboardInterrupt:
pass
if args.make_video:
do_video(settings)
def do_video(args):
global i
# Video generation
init_frame = 1 # This is the frame where the video will start
last_frame = i # You can change i to the number of the last frame you want to generate. It will raise an error if that number of frames does not exist.
min_fps = 10
max_fps = 60
total_frames = last_frame - init_frame
length = 15 # Desired time of the video in seconds
frames = []
tqdm.write("Generating video...")
for i in range(init_frame, last_frame): #
frames.append(Image.open(f"./steps/frame_{i:04d}.png"))
# fps = last_frame/10
fps = np.clip(total_frames / length, min_fps, max_fps)
from subprocess import Popen, PIPE
import re
output_file = re.compile("\.png$").sub(".mp4", args.output)
p = Popen(
[
"ffmpeg",
"-y",
"-f",
"image2pipe",
"-vcodec",
"png",
"-r",
str(fps),
"-i",
"-",
"-vcodec",
"libx264",
"-r",
str(fps),
"-pix_fmt",
"yuv420p",
"-crf",
"17",
"-preset",
"veryslow",
"-metadata",
f"comment={args.prompts}",
output_file,
],
stdin=PIPE,
)
for im in tqdm(frames):
im.save(p.stdin, "PNG")
p.stdin.close()
p.wait()
# this dictionary is used for settings in the notebook
global_clipit_settings = {}
def setup_parser():
# Create the parser
vq_parser = argparse.ArgumentParser(description="Image generation using VQGAN+CLIP")
# Add the arguments
vq_parser.add_argument(
"-p", "--prompts", type=str, help="Text prompts", default=[], dest="prompts"
)
vq_parser.add_argument(
"-sp",
"--spot",
type=str,
help="Spot Text prompts",
default=[],
dest="spot_prompts",
)
vq_parser.add_argument(
"-spo",
"--spot_off",
type=str,
help="Spot off Text prompts",
default=[],
dest="spot_prompts_off",
)
vq_parser.add_argument(
"-l", "--labels", type=str, help="ImageNet labels", default=[], dest="labels"
)
vq_parser.add_argument(
"-ip",
"--image_prompts",
type=str,
help="Image prompts",
default=[],
dest="image_prompts",
)
vq_parser.add_argument(
"-ipw",
"--image_prompt_weight",
type=float,
help="Weight for image prompt",
default=None,
dest="image_prompt_weight",
)
vq_parser.add_argument(
"-ips",
"--image_prompt_shuffle",
type=bool,
help="Shuffle image prompts",
default=False,
dest="image_prompt_shuffle",
)
vq_parser.add_argument(
"-il",
"--image_labels",
type=str,
help="Image prompts",
default=None,
dest="image_labels",
)
vq_parser.add_argument(
"-ilw",
"--image_label_weight",
type=float,
help="Weight for image prompt",
default=1.0,
dest="image_label_weight",
)
vq_parser.add_argument(
"-i",
"--iterations",
type=int,
help="Number of iterations",
default=None,
dest="iterations",
)
vq_parser.add_argument(
"-se",
"--save_every",
type=int,
help="Save image iterations",
default=50,
dest="display_freq",
)
vq_parser.add_argument(
"-ove",
"--overlay_every",
type=int,
help="Overlay image iterations",
default=None,
dest="overlay_every",
)
vq_parser.add_argument(
"-ovo",
"--overlay_offset",
type=int,
help="Overlay image iteration offset",
default=0,
dest="overlay_offset",
)
vq_parser.add_argument(
"-ovi",
"--overlay_image",
type=str,
help="Overlay image (if not init)",
default=None,
dest="overlay_image",
)
vq_parser.add_argument(
"-qua",
"--quality",
type=str,
help="draft, normal, best",
default="normal",
dest="quality",
)
vq_parser.add_argument(
"-asp",
"--aspect",
type=str,
help="widescreen, square",
default="widescreen",
dest="aspect",
)
vq_parser.add_argument(
"-ezs",
"--ezsize",
type=str,
help="small, medium, large",
default=None,
dest="ezsize",
)
vq_parser.add_argument(
"-sca",
"--scale",
type=float,
help="scale (instead of ezsize)",
default=None,
dest="scale",
)
vq_parser.add_argument(
"-ova",
"--overlay_alpha",
type=int,
help="Overlay alpha (0-255)",
default=None,
dest="overlay_alpha",
)
vq_parser.add_argument(
"-s",
"--size",
nargs=2,
type=int,
help="Image size (width height)",
default=None,
dest="size",
)
vq_parser.add_argument(
"-ii",
"--init_image",
type=str,
help="Initial image",
default=None,
dest="init_image",
)
vq_parser.add_argument(
"-iia",
"--init_image_alpha",
type=int,
help="Init image alpha (0-255)",
default=200,
dest="init_image_alpha",
)
vq_parser.add_argument(
"-in",
"--init_noise",
type=str,
help="Initial noise image (pixels or gradient)",
default="pixels",
dest="init_noise",
)
vq_parser.add_argument(
"-ti",
"--target_images",
type=str,
help="Target images",
default=None,
dest="target_images",
)
vq_parser.add_argument(
"-tiw",
"--target_image_weight",
type=float,
help="Target images weight",
default=1.0,
dest="target_image_weight",
)
vq_parser.add_argument(
"-iw",
"--init_weight",
type=float,
help="Initial weight (main=spherical)",
default=None,
dest="init_weight",
)
vq_parser.add_argument(
"-iwd",
"--init_weight_dist",
type=float,
help="Initial weight dist loss",
default=0.0,
dest="init_weight_dist",
)
vq_parser.add_argument(
"-iwc",
"--init_weight_cos",
type=float,
help="Initial weight cos loss",
default=0.0,
dest="init_weight_cos",
)
vq_parser.add_argument(
"-iwp",
"--init_weight_pix",
type=float,
help="Initial weight pix loss",
default=0.0,
dest="init_weight_pix",
)
vq_parser.add_argument(
"-m",
"--clip_models",
type=str,
help="CLIP model",
default=None,
dest="clip_models",
)
vq_parser.add_argument(
"-vqgan",
"--vqgan_model",
type=str,
help="VQGAN model",
default="imagenet_f16_16384",
dest="vqgan_model",
)
vq_parser.add_argument(
"-conf",
"--vqgan_config",
type=str,
help="VQGAN config",
default=None,
dest="vqgan_config",
)
vq_parser.add_argument(
"-ckpt",
"--vqgan_checkpoint",
type=str,
help="VQGAN checkpoint",
default=None,
dest="vqgan_checkpoint",
)
vq_parser.add_argument(
"-nps",
"--noise_prompt_seeds",
nargs="*",
type=int,
help="Noise prompt seeds",
default=[],
dest="noise_prompt_seeds",
)
vq_parser.add_argument(
"-npw",
"--noise_prompt_weights",
nargs="*",
type=float,
help="Noise prompt weights",
default=[],
dest="noise_prompt_weights",
)
vq_parser.add_argument(
"-lr",
"--learning_rate",
type=float,
help="Learning rate",
default=0.2,
dest="step_size",
)
vq_parser.add_argument(
"-cuts",
"--num_cuts",
type=int,
help="Number of cuts",
default=None,
dest="num_cuts",
)
vq_parser.add_argument(
"-cutp",
"--cut_power",
type=float,
help="Cut power",
default=1.0,
dest="cut_pow",
)
vq_parser.add_argument(
"-sd", "--seed", type=int, help="Seed", default=None, dest="seed"
)
vq_parser.add_argument(
"-opt",
"--optimiser",
type=str,
help="Optimiser (Adam, AdamW, Adagrad, Adamax, DiffGrad, AdamP or RAdam)",
default="Adam",
dest="optimiser",
)
vq_parser.add_argument(
"-o",
"--output",
type=str,
help="Output file",
default="output.png",
dest="output",
)
vq_parser.add_argument(
"-vid",
"--video",
type=bool,
help="Create video frames?",
default=False,
dest="make_video",
)
vq_parser.add_argument(
"-d",
"--deterministic",
type=bool,
help="Enable cudnn.deterministic?",
default=False,
dest="cudnn_determinism",
)
return vq_parser
square_size = [144, 144]
widescreen_size = [200, 112] # at the small size this becomes 192,112
def process_args(vq_parser, namespace=None):
global global_aspect_width
if namespace == None:
# command line: use ARGV to get args
args = vq_parser.parse_args()
else:
# notebook, ignore ARGV and use dictionary instead
args = vq_parser.parse_args(args=[], namespace=namespace)
if args.cudnn_determinism:
torch.backends.cudnn.deterministic = True
quality_to_clip_models_table = {
"draft": "ViT-B/32",
"normal": "ViT-B/32,ViT-B/16",
"better": "RN50,ViT-B/32,ViT-B/16",
"best": "RN50x4,ViT-B/32,ViT-B/16",
}
quality_to_iterations_table = {
"draft": 200,
"normal": 350,
"better": 500,
"best": 500,
}
quality_to_scale_table = {"draft": 1, "normal": 2, "better": 3, "best": 4}
# this should be replaced with logic that does somethings
# smart based on available memory (eg: size, num_models, etc)
quality_to_num_cuts_table = {"draft": 40, "normal": 40, "better": 40, "best": 40}
if args.quality not in quality_to_clip_models_table:
print("Qualitfy setting not understood, aborting -> ", argz.quality)
exit(1)
if args.clip_models is None:
args.clip_models = quality_to_clip_models_table[args.quality]
if args.iterations is None:
args.iterations = quality_to_iterations_table[args.quality]
if args.num_cuts is None:
args.num_cuts = quality_to_num_cuts_table[args.quality]
if args.ezsize is None and args.scale is None:
args.scale = quality_to_scale_table[args.quality]
size_to_scale_table = {"small": 1, "medium": 2, "large": 4}
aspect_to_size_table = {"square": [150, 150], "widescreen": [200, 112]}
# determine size if not set
if args.size is None:
size_scale = args.scale
if size_scale is None:
if args.ezsize in size_to_scale_table:
size_scale = size_to_scale_table[args.ezsize]
else:
print("EZ Size not understood, aborting -> ", argz.ezsize)
exit(1)
if args.aspect in aspect_to_size_table:
base_size = aspect_to_size_table[args.aspect]
base_width = int(size_scale * base_size[0])
base_height = int(size_scale * base_size[1])
args.size = [base_width, base_height]
else:
print("aspect not understood, aborting -> ", argz.aspect)
exit(1)
if args.aspect == "widescreen":
global_aspect_width = 16 / 9
if args.init_noise.lower() == "none":
args.init_noise = None
# Split text prompts using the pipe character
if args.prompts:
args.prompts = [phrase.strip() for phrase in args.prompts.split("|")]
# Split text prompts using the pipe character
if args.spot_prompts:
args.spot_prompts = [phrase.strip() for phrase in args.spot_prompts.split("|")]
# Split text prompts using the pipe character
if args.spot_prompts_off:
args.spot_prompts_off = [
phrase.strip() for phrase in args.spot_prompts_off.split("|")
]
# Split text labels using the pipe character
if args.labels:
args.labels = [phrase.strip() for phrase in args.labels.split("|")]
# Split target images using the pipe character
if args.image_prompts:
args.image_prompts = args.image_prompts.split("|")
args.image_prompts = [image.strip() for image in args.image_prompts]
# legacy "spread mode" removed
# if args.init_weight is not None:
# args.init_weight_pix = args.init_weight
# args.init_weight_cos = args.init_weight
# args.init_weight_dist = args.init_weight
if args.overlay_every is not None and args.overlay_every <= 0:
args.overlay_every = None
clip_models = args.clip_models.split(",")
args.clip_models = [model.strip() for model in clip_models]
# Make video steps directory
if args.make_video:
if not os.path.exists("steps"):
os.mkdir("steps")
return args
def reset_settings():
global global_clipit_settings
global_clipit_settings = {}
def add_settings(**kwargs):
global global_clipit_settings
for k, v in kwargs.items():
if v is None:
# just remove the key if it is there
global_clipit_settings.pop(k, None)
else:
global_clipit_settings[k] = v
def apply_settings():
global global_clipit_settings
settingsDict = None
vq_parser = setup_parser()
if len(global_clipit_settings) > 0:
# check for any bogus entries in the settings
dests = [d.dest for d in vq_parser._actions]
for k in global_clipit_settings:
if not k in dests:
raise ValueError(
f"Requested setting not found, aborting: {k}={global_clipit_settings[k]}"
)
# convert dictionary to easyDict
# which can be used as an argparse namespace instead
# settingsDict = easydict.EasyDict(global_clipit_settings)
settingsDict = SimpleNamespace(**global_clipit_settings)
settings = process_args(vq_parser, settingsDict)
return settings
def main():
settings = apply_settings()
do_init(settings)
do_run(settings)
if __name__ == "__main__":
main()
|
py
|
1a56038c8136ec0da18ae5b097290b08dfaa59c0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.cert.rpcapi
"""
from oslo.config import cfg
from nova.cert import rpcapi as cert_rpcapi
from nova import context
from nova.openstack.common import rpc
from nova import test
CONF = cfg.CONF
class CertRpcAPITestCase(test.NoDBTestCase):
def _test_cert_api(self, method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = cert_rpcapi.CertAPI()
expected_retval = 'foo'
expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
expected_msg = rpcapi.make_msg(method, **kwargs)
expected_msg['version'] = expected_version
self.call_ctxt = None
self.call_topic = None
self.call_msg = None
self.call_timeout = None
def _fake_call(_ctxt, _topic, _msg, _timeout):
self.call_ctxt = _ctxt
self.call_topic = _topic
self.call_msg = _msg
self.call_timeout = _timeout
return expected_retval
self.stubs.Set(rpc, 'call', _fake_call)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
self.assertEqual(self.call_ctxt, ctxt)
self.assertEqual(self.call_topic, CONF.cert_topic)
self.assertEqual(self.call_msg, expected_msg)
self.assertIsNone(self.call_timeout)
def test_revoke_certs_by_user(self):
self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id')
# NOTE(russellb) Havana compat
self.flags(cert='havana', group='upgrade_levels')
self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id',
version='1.0')
def test_revoke_certs_by_project(self):
self._test_cert_api('revoke_certs_by_project',
project_id='fake_project_id')
# NOTE(russellb) Havana compat
self.flags(cert='havana', group='upgrade_levels')
self._test_cert_api('revoke_certs_by_project',
project_id='fake_project_id', version='1.0')
def test_revoke_certs_by_user_and_project(self):
self._test_cert_api('revoke_certs_by_user_and_project',
user_id='fake_user_id',
project_id='fake_project_id')
# NOTE(russellb) Havana compat
self.flags(cert='havana', group='upgrade_levels')
self._test_cert_api('revoke_certs_by_user_and_project',
user_id='fake_user_id',
project_id='fake_project_id', version='1.0')
def test_generate_x509_cert(self):
self._test_cert_api('generate_x509_cert',
user_id='fake_user_id',
project_id='fake_project_id')
# NOTE(russellb) Havana compat
self.flags(cert='havana', group='upgrade_levels')
self._test_cert_api('generate_x509_cert',
user_id='fake_user_id',
project_id='fake_project_id', version='1.0')
def test_fetch_ca(self):
self._test_cert_api('fetch_ca', project_id='fake_project_id')
# NOTE(russellb) Havana compat
self.flags(cert='havana', group='upgrade_levels')
self._test_cert_api('fetch_ca', project_id='fake_project_id',
version='1.0')
def test_fetch_crl(self):
self._test_cert_api('fetch_crl', project_id='fake_project_id')
# NOTE(russellb) Havana compat
self.flags(cert='havana', group='upgrade_levels')
self._test_cert_api('fetch_crl', project_id='fake_project_id',
version='1.0')
def test_decrypt_text(self):
self._test_cert_api('decrypt_text',
project_id='fake_project_id', text='blah')
# NOTE(russellb) Havana compat
self.flags(cert='havana', group='upgrade_levels')
self._test_cert_api('decrypt_text',
project_id='fake_project_id', text='blah',
version='1.0')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.