filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_21123 | '''Views which are mapped from the URL objects in urls.py
.. moduleauthor:: Aaron France <[email protected]>
:platform: All
:synopsis: Module which contains view functions that are mapped from urls
'''
import datetime
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.mail import send_mail
from django.template.loader import get_template
from django.template import Context
from django.conf import settings
from django.views.decorators.csrf import csrf_protect
from django.core.urlresolvers import reverse
from timetracker.tracker.models import Tbluser, UserForm, TrackingEntry
from timetracker.tracker.models import Tblauthorization as tblauth
from timetracker.tracker.forms import EntryForm, AddForm, Login
from timetracker.utils.calendar_utils import (gen_calendar, gen_holiday_list,
ajax_add_entry,
ajax_change_entry,
ajax_delete_entry, ajax_error,
get_user_data, delete_user,
useredit, mass_holidays,
profile_edit, gen_datetime_cal,
get_comments, add_comment,
remove_comment,
get_tracking_entry_data)
from timetracker.utils.datemaps import (generate_select,
generate_employee_box,
generate_year_box)
from timetracker.utils.decorators import admin_check, loggedin
from timetracker.loggers import suspicious_log, email_log, error_log, debug_log
def user_context_manager(request):
'''Context manager which always puts certain variables into the
template context. This is because all pages require certain
pieces of data so it's easier to push this work down to middleware
'''
try:
user = Tbluser.objects.get(id=request.session.get("user_id"))
except Tbluser.DoesNotExist:
return {}
return {
"user": user,
"logged_in_user": user,
"welcome_name": user.firstname,
"is_admin": user.super_or_admin(),
"is_team_leader": user.is_tl(),
"can_view_jobcodes": user.can_view_jobcodes(),
"is_super": user.is_super(),
"balance": user.get_normalized_balance(),
"doculink": settings.DOCUMENTATION_BASE_URL,
"approval_notifications": user.approval_notifications()
}
def index(request):
"""This function serves the base login page. This view detects if the
user is logged in. If so, redirects, else, serves them the login
page.
This function shouldn't be directly called, it's invocation is automatic
:param request: Automatically passed. Contains a map of the httprequest
:return: A HttpResponse object which is then passed to the browser
"""
try:
user = Tbluser.objects.get(id=request.session.get("user_id"))
except Tbluser.DoesNotExist:
return render_to_response('index.html',
{'login': Login()},
RequestContext(request))
if request.session.get("user_id"):
user = Tbluser.objects.get(id=request.session.get("user_id"))
if user.sup_tl_or_admin():
return HttpResponseRedirect("/overtime/")
if user.is_indeng():
return HttpResponseRedirect(reverse("timetracker.industrial_engineering.views.costbuckets"))
return HttpResponseRedirect("/calendar/")
def login(request):
"""This function logs the user in, directly adding the session id to a
database entry. This function is invoked from the url mapped in
urls.py. The url is POSTed to, and should contain two fields, the
use_name and the pass word field. This is then pulled from the
database and matched against, what the user supplied. If they
match, the user is then checked to see what *kind* of user their
are, if they are ADMIN or TEAML they will be sent to the
administrator view. Else they will be sent to the user page.
This function shouldn't be directly called, it's invocation is
automatic from the url mappings.
:param request: Automatically passed. Contains a map of the httprequest
:return: A HttpResponse object which is then passed to the browser
"""
user_id = request.POST.get('user_name')
if not user_id:
return HttpResponseRedirect("/") # pragma: no cover
try:
# pull out the user from the POST and
# match it against our db
user = Tbluser.objects.get(user_id=user_id)
# if the user doesn't match anything, notify
except Tbluser.DoesNotExist: # pragma: no cover
return render_to_response(
"fail.html",
{
"fail": "Login failure",
"reason":"Non existent user.",
"helpfultext":"If you expect your account to be " + \
"active please contact your manager " + \
"or a site administrator."
},
RequestContext(request))
if user.isdisabled():
return render_to_response(
"fail.html",
{
"fail": "Login failure",
"reason":"Your account is disabled.",
"helpfultext":"You will need to request " + \
"re-activation from your manager."
},
RequestContext(request))
if user.validate_password(request.POST['password']):
# if all goes well, send to the tracker
request.session['user_id'] = user.id
if user.sup_tl_or_admin():
return HttpResponseRedirect("/overtime/")
else:
return HttpResponseRedirect("/calendar/")
else:
return render_to_response(
"fail.html",
{
"fail": "Login failure",
"reason":"Incorrect password",
"helpfultext":"You can receive a reminder <a href=\"/" + \
"forgot_my_password/\">here</a>"
},
RequestContext(request))
def logout(request):
"""Simple logout function
This function will delete a session id from the session dictionary
so that the user will need to log back in order to access the same
pages.
:param request: Automatically passed contains a map of the httprequest
:return: A HttpResponse object which is passed to the browser.
"""
try:
del request.session['user_id']
except KeyError:
pass
return HttpResponseRedirect("/")
@loggedin
def user_view(request, year=None, month=None, day=None):
"""Generates a calendar based on the URL it receives. For example:
domain.com/calendar/{year}/{month}/{day}, also takes a day just in
case you want to add a particular view for a day, for
example. Currently a day-level is not in-use.
:note: The generated HTML should be pretty printed
:param request: Automatically passed contains a map of the httprequest
:param year: The year that the view will be rendered with, default is
the current year.
:param month: The month that the view will be rendered with, default is
the current month.
:param day: The day that the view will be rendered with, default is
the current day
:returns: A HttpResponse object which is passed to the browser.
"""
year = datetime.datetime.today().year if year is None else year
month = datetime.datetime.today().month if month is None else month
day = datetime.datetime.today().day if day is None else day
user_id = request.session['user_id']
calendar_table = gen_calendar(year, month, day,
user=user_id)
user_object = Tbluser.objects.get(id=user_id)
return render_to_response(
'calendar.html',
{
'calendar': calendar_table,
'changeform': EntryForm(),
'addform': AddForm(),
},
RequestContext(request)
)
@csrf_protect
def ajax(request):
"""Ajax request handler, dispatches to specific ajax functions
depending on what json gets sent.
Any additional ajax views should be added to the ajax_funcs map,
this will allow the dispatch function to be used. Future revisions
could have a kind of decorator which could be applied to functions
to mutate some global map of ajax dispatch functions. For now,
however, just add them into the map.
The idea for this is that on the client-side call you would
construct your javascript call with something like the below
(using jQuery):
.. code-block:: javascript
$.ajaxSetup({
type: 'POST',
url: '/ajax/',
dataType: 'json'
});\n
$.ajax({
data: {
form: 'functionName',
data: 'data'
}
});
Using this method, this allows us to construct a single view url
and have all ajax requests come through here. This is highly
advantagious because then we don't have to create a url map and
construct views to handle that specific call. We just have some
server-side map and route through there.
The lookup and dispatch works like this:
1) Request comes through.
2) Request gets sent to the ajax view due to the client-side call making a
request to the url mapped to this view.
3) The form type is detected in the json data sent along with the call.
4) This string is then pulled out of the dict, executed and it's response
sent back to the browser.
:param request: Automatically passed contains a map of the httprequest
:return: HttpResponse object back to the browser.
"""
# see which form we're dealing with and if it's in the POST
form_type = request.POST.get('form_type', None)
# if not, try the GET
if not form_type:
form_type = request.GET.get('form_type', None)
#if there isn't one, we'll send an error back
if not form_type:
return ajax_error("Missing Form")
# this could be mutated with a @register_ajax
# decorator or something
ajax_funcs = {
'add': ajax_add_entry,
'change': ajax_change_entry,
'delete': ajax_delete_entry,
'admin_get': gen_calendar,
'get_user_data': get_user_data,
'useredit': useredit,
'delete_user': delete_user,
'mass_holidays': mass_holidays,
'profileedit': profile_edit,
'get_comments': get_comments,
'add_comment': add_comment,
'remove_comment': remove_comment,
'tracking_data': get_tracking_entry_data,
'password_reminder': forgot_pass,
}
try:
return ajax_funcs.get(
form_type,ajax_error
)(request)
except Exception as e: # pragma: no cover
error_log.error(str(e))
raise
@admin_check
def view_with_employee_list(request, template=None, get_all=False): # pragma: no cover
'''Some pages are generic HTML pages with only a select amount of
differences on them. We use this to generate the employee select
box and assign the regularly used template variables for these
templates.
'''
user = Tbluser.objects.get(
id=request.session.get("user_id", None)
)
try:
ees = user.get_subordinates(get_all=get_all)
employees_select = generate_employee_box(user, get_all=get_all)
except tblauth.DoesNotExist:
ees = []
employees_select = """<select id=user_select>
<option id="null">----------</option>
</select>"""
return render_to_response(
template,
{
"employees": ees,
"user_form": UserForm(),
"employee_option_list": employees_select
},
RequestContext(request)
)
@loggedin
def view_with_holiday_list(request,
year=None,
month=None,
process=None,
template=None,
admin_required=False):
"""Generates the full holiday table for all employees under a manager
or a user's teammates if they are a regular user.
:param request: Automatically passed contains a map of the httprequest
:return: HttpResponse object back to the browser.
"""
user = Tbluser.objects.get(id=request.session.get('user_id'))
if admin_required and not user.sup_tl_or_admin():
raise Http404
year = datetime.datetime.today().year if year is None else year
month = datetime.datetime.today().month if month is None else month
# django urls parse to unicode objects
year, month = int(year), int(month)
holiday_table, comments_list, js_calendar = gen_holiday_list(user,
year,
month,
process)
# calculate the days in the month, this is inefficient.
# It creates a list of datetime objects and gets the len
# of that. Being lazy.
days_this_month = range(1, len(gen_datetime_cal(year, month))+1)
return render_to_response(
template,
{
'holiday_table': holiday_table,
'comments_list': comments_list,
'days_this_month': days_this_month,
'employee_select': generate_employee_box(user),
'js_calendar': js_calendar,
},
RequestContext(request))
@admin_check
def yearview(request, who=None, year=None): # pragma: no cover
'''Yearview generates the 'year at a glance' for both Administrators
and regular users.
:param who: This will be the ID of an employee which the yearview
will be generated from if the employee is not within the span
of control then a 404 will be generated.'''
auth_user = Tbluser.objects.get(
id=request.session.get('user_id')
)
if not year:
year = str(datetime.datetime.now().year)
if not who:
try:
userid = auth_user.get_subordinates()[0].id
return HttpResponseRedirect("/yearview/%s/%s/" % (userid, year))
except (tblauth.DoesNotExist, IndexError):
return HttpResponseRedirect("/user_edit/")
# stop people from editing the URL to access agents outside their
# span of control.
try:
target_user = auth_user.get_subordinates().get(id=who)
except Tbluser.DoesNotExist:
raise Http404
# generate our year table.
yeartable = target_user.yearview(year)
# interpolate our values into it.
yeartable = yeartable.format(employees_select=generate_employee_box(auth_user),
c="EMPTY",
function="")
return render_to_response("yearview.html",
{"yearview_table": yeartable,
"year": year,
"eeid": who,
}, RequestContext(request))
@admin_check
def overtime(request, who=None, year=None):
auth_user = Tbluser.objects.get(
id=request.session.get('user_id')
)
if not year:
year = str(datetime.datetime.now().year)
if not who:
try:
userid = auth_user.get_subordinates()[0].id
return HttpResponseRedirect("/overtime/%s/%s/" % (userid, year))
except (tblauth.DoesNotExist, IndexError):
return HttpResponseRedirect("/user_edit/")
# stop people from editing the URL to access agents outside their
# span of control.
try:
target_user = auth_user.get_subordinates().get(id=who)
except Tbluser.DoesNotExist:
raise Http404
# generate our year table.
ot_table = target_user.overtime_view(year)
# interpolate our values into it.
ot_table = ot_table.format(employees_select=generate_employee_box(auth_user),
yearbox=generate_year_box(int(year), id="cmb_yearbox"),
c="EMPTY",
function="")
return render_to_response("overtime.html",
{"ot_table": ot_table,
"year": year,
"eeid": who,
}, RequestContext(request))
@loggedin
def edit_profile(request):
"""View for sending the user to the edit profile page
This view is a simple set of fields which allow all kinds of users
to edit pieces of information about their profile, currently it
allows uers to edit their name and their password.
:param request: Automatically passed contains a map of the httprequest
:return: HttpResponse object back to the browser.
"""
user = Tbluser.objects.get(id=request.session.get("user_id"))
return render_to_response("editprofile.html",
{'firstname': user.firstname,
'lastname': user.lastname,
},
RequestContext(request))
@loggedin
def explain(request):
"""Renders the Balance explanation page.
This page renders a simple template to show the users how their
balance is calculated. This view takes the user object, retrieves
a couple of fields, which are user.shiftlength and the associated
values with that datetime object, constructs a string with them
and passes it to the template as the users 'shiftlength'
attribute. It then takes the count of working days in the database
so that the user has an idea of how many days they have tracked
altogether. Then it calculates their total balance and pushes all
these strings into the template.
:param request: Automatically passed contains a map of the httprequest
:return: HttpResponse object back to the browser.
"""
user = Tbluser.objects.get(id=request.session.get("user_id"))
return render_to_response(
"balance.html",
{'firstname': user.firstname,
'lastname': user.lastname,
'shiftlength': "%s:%s" % (user.shiftlength.hour,
user.shiftlength.minute),
'working_days': TrackingEntry.objects.filter(user=user.id).count(),
'balances': user.balance_breakdown(),
'holiday_balances': user.get_balances(datetime.datetime.now().year),
},
RequestContext(request))
def forgot_pass(request):
"""Simple view for resetting a user's password
This view has a dual function. The first function is to simply
render the initial page which has a field and the themed
markup. On this page a user can enter their e-mail address and
then click submit to have their password sent to them.
The second function of this page is to respond to the change
password request. In the html markup of the 'forgotpass.html' page
you will see that the intention is to have the page post to the
same URL which this page was rendered from. If the request
contains POST information then we retrieve that user from the
database, construct an e-mail based on that and send their
password to them. Finally, we redirect to the login page.
:param request: Automatically passed contains a map of the httprequest
:return: HttpResponse object back to the browser.
"""
# if the email recipient isn't in the POST dict,
# then we've got a non-post request
email_recipient = request.POST.get("email_input", None)
if not email_recipient:
return render_to_response("forgotpass.html",
{},
RequestContext(request))
try:
try:
user = Tbluser.objects.get(id=email_recipient)
except ValueError:
user = Tbluser.objects.get(user_id=email_recipient)
except Tbluser.DoesNotExist:
return render_to_response(
"fail.html",
{
"fail": "Login failure",
"reason":"Non existent user.",
"helpfultext":"If you expect your account to be " + \
"active please contact your manager " + \
"or a site administrator."
},
RequestContext(request))
user.set_random_password()
user.send_password_reminder()
return HttpResponseRedirect("/")
|
the-stack_0_21124 | import requests
from bs4 import BeautifulSoup as bs
from random import choice
# from selenium import webdriver
# PATH = 'C:\Program Files (x86)/chromedriver.exe'
# driver = webdriver.Chrome(PATH)
# driver.get('https://auth.nfx.com/login?state=hKFo2SBxeGxWUjlDSUpyV29kSGhyc3lMM3d1T0h1UnZ2R1AtYaFupWxvZ2luo3RpZNkgTnlrRV9LRXJOWm92SjVGRGI3RUZuOFVERk9KOXZqMDajY2lk2SBWaTJFd28wblc2ZmxLUXpPME5CYzhFMFl2ZUJqaktsVQ&client=Vi2Ewo0nW6flKQzO0NBc8E0YveBjjKlU&protocol=oauth2&audience=https%3A%2F%2Fnfxsignal-production.auth0.com%2Fapi%2Fv2%2F&scope=openid%20email%20profile&response_type=token%20id_token&redirect_uri=https%3A%2F%2Fsignal.nfx.com%2Flogin&connection=username-password&login_hint=&connection_scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fgmail.metadata&access_type=offline&nonce=QQC.uNugsqtA7A37PESQd0~QT35rY.a.&auth0Client=eyJuYW1lIjoiYXV0aDAuanMiLCJ2ZXJzaW9uIjoiOS4xMC4xIn0%3D')
def proxy_scraper():
response = requests.get("http://sslproxies.org/")
soup = bs(response.content, "html.parser")
table = soup.find('tbody')
ips = table.select('tr > td')[::8]
ports = table.select('tr > td')[1::8]
complete_ip = []
for index in range(len(ips)):
complete_ip.append(ips[index].contents[0] + ':' + ports[index].contents[0])
proxies = {
'http': 'http://' + choice(complete_ip),
'https': 'https://' + choice(complete_ip)
}
return complete_ip
# print(proxies)
|
the-stack_0_21125 | """
Simple returner for CouchDB. Optional configuration
settings are listed below, along with sane defaults:
.. code-block:: yaml
couchdb.db: 'salt'
couchdb.url: 'http://salt:5984/'
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location:
.. code-block:: yaml
alternative.couchdb.db: 'salt'
alternative.couchdb.url: 'http://salt:5984/'
To use the couchdb returner, append ``--return couchdb`` to the salt command. Example:
.. code-block:: bash
salt '*' test.ping --return couchdb
To use the alternative configuration, append ``--return_config alternative`` to the salt command.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' test.ping --return couchdb --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. versionadded:: 2016.3.0
.. code-block:: bash
salt '*' test.ping --return couchdb --return_kwargs '{"db": "another-salt"}'
On concurrent database access
==============================
As this returner creates a couchdb document with the salt job id as document id
and as only one document with a given id can exist in a given couchdb database,
it is advised for most setups that every minion be configured to write to it own
database (the value of ``couchdb.db`` may be suffixed with the minion id),
otherwise multi-minion targeting can lead to losing output:
* the first returning minion is able to create a document in the database
* other minions fail with ``{'error': 'HTTP Error 409: Conflict'}``
"""
import logging
import time
from urllib.error import HTTPError
from urllib.request import HTTPHandler as _HTTPHandler
from urllib.request import Request as _Request
from urllib.request import build_opener as _build_opener
import salt.returners
import salt.utils.jid
import salt.utils.json
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "couchdb"
def __virtual__():
return __virtualname__
def _get_options(ret=None):
"""
Get the couchdb options from salt.
"""
attrs = {"url": "url", "db": "db"}
_options = salt.returners.get_returner_options(
__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__
)
if "url" not in _options:
log.debug("Using default url.")
_options["url"] = "http://salt:5984/"
if "db" not in _options:
log.debug("Using default database.")
_options["db"] = "salt"
return _options
def _generate_doc(ret):
"""
Create a object that will be saved into the database based on
options.
"""
# Create a copy of the object that we will return.
retc = ret.copy()
# Set the ID of the document to be the JID.
retc["_id"] = ret["jid"]
# Add a timestamp field to the document
retc["timestamp"] = time.time()
return retc
def _request(method, url, content_type=None, _data=None):
"""
Makes a HTTP request. Returns the JSON parse, or an obj with an error.
"""
opener = _build_opener(_HTTPHandler)
request = _Request(url, data=_data)
if content_type:
request.add_header("Content-Type", content_type)
request.get_method = lambda: method
try:
handler = opener.open(request)
except HTTPError as exc:
return {"error": "{}".format(exc)}
return salt.utils.json.loads(handler.read())
def returner(ret):
"""
Take in the return and shove it into the couchdb database.
"""
options = _get_options(ret)
# Check to see if the database exists.
_response = _request("GET", options["url"] + "_all_dbs")
if options["db"] not in _response:
# Make a PUT request to create the database.
_response = _request("PUT", options["url"] + options["db"])
# Confirm that the response back was simple 'ok': true.
if "ok" not in _response or _response["ok"] is not True:
log.error("Unable to create database '%s'", options["db"])
log.error("Nothing logged! Lost data.")
return
log.info("Created database '%s'", options["db"])
# Call _generate_doc to get a dict object of the document we're going to
# shove into the database.
doc = _generate_doc(ret)
# Make the actual HTTP PUT request to create the doc.
_response = _request(
"PUT",
options["url"] + options["db"] + "/" + doc["_id"],
"application/json",
salt.utils.json.dumps(doc),
)
# Sanity check regarding the response..
if "ok" not in _response or _response["ok"] is not True:
log.error("Unable to create document: '%s'", _response)
log.error("Nothing logged! Lost data.")
def get_jid(jid):
"""
Get the document with a given JID.
"""
options = _get_options(ret=None)
_response = _request("GET", options["url"] + options["db"] + "/" + jid)
if "error" in _response:
log.error("Unable to get JID '%s' : '%s'", jid, _response)
return {}
return {_response["id"]: _response}
def get_jids():
"""
List all the jobs that we have..
"""
options = _get_options(ret=None)
_response = _request(
"GET", options["url"] + options["db"] + "/_all_docs?include_docs=true"
)
# Make sure the 'total_rows' is returned.. if not error out.
if "total_rows" not in _response:
log.error("Didn't get valid response from requesting all docs: %s", _response)
return {}
# Return the rows.
ret = {}
for row in _response["rows"]:
# Because this shows all the documents in the database, including the
# design documents, verify the id is salt jid
jid = row["id"]
if not salt.utils.jid.is_jid(jid):
continue
ret[jid] = salt.utils.jid.format_jid_instance(jid, row["doc"])
return ret
def get_fun(fun):
"""
Return a dict with key being minion and value
being the job details of the last run of function 'fun'.
"""
# Get the options..
options = _get_options(ret=None)
# Define a simple return object.
_ret = {}
# get_minions takes care of calling ensure_views for us.
# For each minion we know about
for minion in get_minions():
# Make a query of the by-minion-and-timestamp view and limit the count
# to 1.
_response = _request(
"GET",
options["url"] + options["db"] + "/_design/salt/_view/by-minion-fun-times"
'tamp?descending=true&endkey=["{0}","{1}'
'",0]&startkey=["{0}","{1}",9999999999]&'
"limit=1".format(minion, fun),
)
# Skip the minion if we got an error..
if "error" in _response:
log.warning(
"Got an error when querying for last command by a minion: %s",
_response["error"],
)
continue
# Skip the minion if we didn't get any rows back. ( IE function that
# they're looking for has a typo in it or some such ).
if len(_response["rows"]) < 1:
continue
# Set the respnse ..
_ret[minion] = _response["rows"][0]["value"]
return _ret
def get_minions():
"""
Return a list of minion identifiers from a request of the view.
"""
options = _get_options(ret=None)
# Make sure the views are valid, which includes the minions..
if not ensure_views():
return []
# Make the request for the view..
_response = _request(
"GET", options["url"] + options["db"] + "/_design/salt/_view/minions?group=true"
)
# Verify that we got a response back.
if "rows" not in _response:
log.error("Unable to get available minions: %s", _response)
return []
# Iterate over the rows to build up a list return it.
_ret = []
for row in _response["rows"]:
_ret.append(row["key"])
return _ret
def ensure_views():
"""
This function makes sure that all the views that should
exist in the design document do exist.
"""
# Get the options so we have the URL and DB..
options = _get_options(ret=None)
# Make a request to check if the design document exists.
_response = _request("GET", options["url"] + options["db"] + "/_design/salt")
# If the document doesn't exist, or for some reason there are not views.
if "error" in _response:
return set_salt_view()
# Determine if any views are missing from the design doc stored on the
# server.. If we come across one, simply set the salt view and return out.
# set_salt_view will set all the views, so we don't need to continue t
# check.
for view in get_valid_salt_views():
if view not in _response["views"]:
return set_salt_view()
# Valid views, return true.
return True
def get_valid_salt_views():
"""
Returns a dict object of views that should be
part of the salt design document.
"""
ret = {}
ret["minions"] = {}
ret["minions"]["map"] = "function( doc ){ emit( doc.id, null ); }"
ret["minions"]["reduce"] = "function( keys,values,rereduce ){ return key[0]; }"
ret["by-minion-fun-timestamp"] = {}
ret["by-minion-fun-timestamp"][
"map"
] = "function( doc ){ emit( [doc.id,doc.fun,doc.timestamp], doc ); }"
return ret
def set_salt_view():
"""
Helper function that sets the salt design
document. Uses get_valid_salt_views and some hardcoded values.
"""
options = _get_options(ret=None)
# Create the new object that we will shove in as the design doc.
new_doc = {}
new_doc["views"] = get_valid_salt_views()
new_doc["language"] = "javascript"
# Make the request to update the design doc.
_response = _request(
"PUT",
options["url"] + options["db"] + "/_design/salt",
"application/json",
salt.utils.json.dumps(new_doc),
)
if "error" in _response:
log.warning("Unable to set the salt design document: %s", _response["error"])
return False
return True
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
"""
Do any work necessary to prepare a JID, including sending a custom id
"""
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument
"""
Included for API consistency
"""
|
the-stack_0_21126 | from typing import Union
from molten import Route, Include, HTTP_201, HTTPError, HTTP_404, HTTP_409
from sqlalchemy.exc import IntegrityError
from runcible import APIResponse
from runcible.error import EntityNotFound
from .model import User
from .manager import UserManager
def create_user(user: User, user_manger: UserManager) -> User:
try:
_user = user_manger.create_user(user)
except IntegrityError as err: # noqa: F841
raise HTTPError(
HTTP_409,
APIResponse(
status=409,
message=f"User email {user.email} or {user.display_name} already in use.",
),
)
headers = {"Location": _user.href}
return HTTP_201, _user, headers
def get_user_by_display_name(
display_name: str, user_manager: UserManager
) -> Union[User, APIResponse]:
try:
user = user_manager.get_user_by_display_name(display_name)
except EntityNotFound as err:
raise HTTPError(HTTP_404, APIResponse(status=404, message=err.message))
return user
user_routes = Include(
"/users",
[
Route("", create_user, method="POST"),
Route("/{display_name}", get_user_by_display_name, method="GET"),
],
)
|
the-stack_0_21129 | '''
scans
=====
The following methods allow for interaction into the Nessus Professional
Methods available on ``nessus.scans``:
.. rst-class:: hide-signature
.. autoclass:: ScanAPI
.. automethod:: list
.. automethod:: export_scan
'''
from .base import NessusEndpoint
import time
class ScanAPI(NessusEndpoint):
def list(self):
'''
Retrieves the list of scans.
Args:
Returns:
:obj:`list`:
A list of scan resources.
Examples:
>>> for scan in nessus.scans.list():
... pprint(scan)
'''
return self._api.get('scans').json()['scans']
def export_scan(self, id: int, fobj=None, export_format="nessus"):
'''
Downloads scan results to file
Args:
:id:`int`:
ID of the scan result
:fobj:`BytesIO`:
File-like object to store results.
:export_format:`string`:
Format of the scan result.
Returns:
:obj:`BytesIO`:
Scan results in a file-like object.
'''
# Request download for scan id
resp = self._api.post(f'scans/{id}/export',
json={'format': export_format}).json()
token = resp['token']
file_id = resp['file']
# Check status of requested download
while True:
status = self._api.get('scans/{}/export/{}/status'.format(
self._check('id', id, int), file_id)).json()['status']
if status == 'ready':
break
time.sleep(1)
# if no file-like object was passed, then we will instantiate a BytesIO
# object to push the file into.
if not fobj:
fobj = BytesIO()
# Download scan
resp = self._api.get('scans/{}/export/{}/download'.format(
self._check('id', id, int), file_id), stream=True)
# Lets stream the file into the file-like object...
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fobj.write(chunk)
fobj.seek(0)
resp.close()
return fobj |
the-stack_0_21134 | import flask
from flask_praetorian.exceptions import PraetorianError
def current_guard():
"""
Fetches the current instance of flask-praetorian that is attached to the
current flask app
"""
guard = flask.current_app.extensions.get('praetorian', None)
PraetorianError.require_condition(
guard is not None,
"No current guard found; Praetorian must be initialized first",
)
return guard
def add_jwt_data_to_app_context(jwt_data):
"""
Adds a dictionary of jwt data (presumably unpacked from a token) to the
top of the flask app's context
"""
ctx = flask._app_ctx_stack.top
ctx.jwt_data = jwt_data
def get_jwt_data_from_app_context():
"""
Fetches a dict of jwt token data from the top of the flask app's context
"""
ctx = flask._app_ctx_stack.top
jwt_data = getattr(ctx, 'jwt_data', None)
PraetorianError.require_condition(
jwt_data is not None,
"""
No jwt_data found in app context.
Make sure @auth_required decorator is specified *first* for route
""",
)
return jwt_data
def remove_jwt_data_from_app_context():
"""
Removes the dict of jwt token data from the top of the flask app's context
"""
ctx = flask._app_ctx_stack.top
del ctx.jwt_data
def current_user_id():
"""
This method returns the user id retrieved from jwt token data attached to
the current flask app's context
"""
jwt_data = get_jwt_data_from_app_context()
user_id = jwt_data.get('id')
PraetorianError.require_condition(
user_id is not None,
"Could not fetch an id for the current user",
)
return user_id
def current_user():
"""
This method returns a user instance for jwt token data attached to the
current flask app's context
"""
user_id = current_user_id()
guard = current_guard()
user = guard.user_class.identify(user_id)
PraetorianError.require_condition(
user is not None,
"Could not identify the current user from the current id",
)
return user
def current_rolenames():
"""
This method returns the names of all roles associated with the current user
"""
jwt_data = get_jwt_data_from_app_context()
if 'rls' not in jwt_data:
# This is necessary so our set arithmetic works correctly
return set(['non-empty-but-definitely-not-matching-subset'])
else:
return set(r.strip() for r in jwt_data['rls'].split(','))
|
the-stack_0_21135 | import json
from dbt.adapters.factory import get_adapter
from dbt.compat import basestring
import dbt.clients.jinja
import dbt.context.common
import dbt.flags
import dbt.utils
from dbt.logger import GLOBAL_LOGGER as logger # noqa
execute = True
def ref(model, project, profile, flat_graph):
current_project = project.get('name')
def do_ref(*args):
target_model_name = None
target_model_package = None
if len(args) == 1:
target_model_name = args[0]
elif len(args) == 2:
target_model_package, target_model_name = args
else:
dbt.exceptions.ref_invalid_args(model, args)
target_model = dbt.parser.resolve_ref(
flat_graph,
target_model_name,
target_model_package,
current_project,
model.get('package_name'))
if target_model is None:
dbt.exceptions.ref_target_not_found(
model,
target_model_name,
target_model_package)
target_model_id = target_model.get('unique_id')
if target_model_id not in model.get('depends_on', {}).get('nodes'):
dbt.exceptions.ref_bad_context(model,
target_model_name,
target_model_package)
if dbt.utils.get_materialization(target_model) == 'ephemeral':
model['extra_ctes'][target_model_id] = None
adapter = get_adapter(profile)
return dbt.utils.Relation(profile, adapter, target_model)
return do_ref
class Config:
def __init__(self, model):
self.model = model
def __call__(*args, **kwargs):
return ''
def set(self, name, value):
return self.__call__({name: value})
def _validate(self, validator, value):
validator(value)
def require(self, name, validator=None):
if name not in self.model['config']:
dbt.exceptions.missing_config(self.model, name)
to_return = self.model['config'][name]
if validator is not None:
self._validate(validator, to_return)
return to_return
def get(self, name, validator=None, default=None):
to_return = self.model['config'].get(name, default)
if validator is not None and default is not None:
self._validate(validator, to_return)
return to_return
def generate(model, project, flat_graph):
return dbt.context.common.generate(
model, project, flat_graph, dbt.context.runtime)
|
the-stack_0_21136 | from KratosMultiphysics import Parameters
from KratosMultiphysics import Vector
from KratosMultiphysics import Matrix
from KratosMultiphysics import FileSerializer, StreamSerializer, SerializerTraceType
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.kratos_utilities as kratos_utils
# Use cPickle on Python 2.7 (Note that only the cPickle module is supported on Python 2.7)
# Source: https://pybind11.readthedocs.io/en/stable/advanced/classes.html
pickle_message = ""
try:
import cPickle as pickle
have_pickle_module = True
except ImportError:
try:
import pickle
have_pickle_module = True
except ImportError:
have_pickle_module = False
pickle_message = "No pickle module found"
# Import copy
import copy
# input string with ugly formatting
json_string = """
{
"bool_value" : true, "double_value": 2.0, "int_value" : 10,
"level1":
{
"list_value":[ 3, "hi", false],
"tmp" : 5.0
},
"string_value" : "hello"
}
"""
pretty_out = """{
"bool_value": true,
"double_value": 2.0,
"int_value": 10,
"level1": {
"list_value": [
3,
"hi",
false
],
"tmp": 5.0
},
"string_value": "hello"
}"""
pretty_out_after_change = """{
"bool_value": true,
"double_value": 2.0,
"int_value": 10,
"level1": {
"list_value": [
"changed",
"hi",
false
],
"tmp": 5.0
},
"string_value": "hello"
}"""
# here the level1 var is set to a double so that a validation error should be thrown
wrong_type = """{
"bool_value": true,
"double_value": 2.0,
"int_value": 10,
"level1": 0.0,
"string_value": "hello"
}"""
# int value is badly spelt
wrong_spelling = """{
"bool_value": true,
"double_value": 2.0,
"int_values": 10,
"level1": 0.0,
"string_value": "hello"
}"""
# wrong on the first level
# error shall be only detective by recursive validation
wrong_lev2 = """{
"bool_value": true,
"double_value": 2.0,
"int_value": 10,
"level1": { "a":0.0 },
"string_value": "hello"
}"""
defaults = """
{
"bool_value": false,
"double_value": 2.0,
"int_value": 10,
"level1": {
"list_value": [
3,
"hi",
false
],
"tmp": "here we expect a string"
},
"new_default_obj": {
"aaa": "string",
"bbb": false,
"ccc": 22
},
"new_default_value": -123.0,
"string_value": "hello"
}
"""
incomplete = """
{
"level1": {
},
"new_default_obj": {
"aaa": "string",
"bbb": false,
"ccc": 22
},
"new_default_value": -123.0,
"string_value": "hello"
}"""
incomplete_with_extra_parameter = """
{
"level1": {
"new_sublevel": "this should only be assigned in recursive"
},
"new_default_obj": {
"aaa": "string",
"bbb": false,
"ccc": 22
},
"new_default_value": -123.0,
"string_value": "hello"
}"""
expected_validation_output = """{
"bool_value": true,
"double_value": 2.0,
"int_value": 10,
"level1": {
"list_value": [
3,
"hi",
false
],
"tmp": 5.0
},
"new_default_obj": {
"aaa": "string",
"bbb": false,
"ccc": 22
},
"new_default_value": -123.0,
"string_value": "hello"
}"""
four_levels = """{
"bool_value": true,
"double_value": 2.0,
"int_value": 10,
"level1": {
"level2": {
"level3": {
"level4": {
}
}
}
},
"string_value": "hello"
}"""
four_levels_variation = """{
"bool_value": true,
"double_value": 2.0,
"int_value": 10,
"level1": {
"a":11.0,
"level2": {
"level3": {
"level4": {
}
}
}
},
"string_value": "hello"
}"""
four_levels_wrong_variation = """{
"int_value": 10,
"double_value": "hi",
"bool_value": true,
"string_value": "hello",
"level1": {
"a":11.0,
"level2": {
"level3": {
"level4": {
}
}
}
}
}"""
four_levels_defaults = """{
"bool_value": true,
"double_value": 2.0,
"int_value": 10,
"level1": {
"a":1.0,
"level2": {
"b":2.0,
"level3": {
"c":3.0,
"level4": {
"d":4.0
}
}
}
},
"string_value": "hello"
}"""
class TestParameters(KratosUnittest.TestCase):
def setUp(self):
self.kp = Parameters(json_string)
self.compact_expected_output = """{"bool_value":true,"double_value":2.0,"int_value":10,"level1":{"list_value":[3,"hi",false],"tmp":5.0},"string_value":"hello"}"""
def test_kratos_parameters(self):
self.assertEqual(
self.kp.WriteJsonString(),
self.compact_expected_output
)
self.assertTrue(self.kp.Has("int_value"))
self.assertFalse(self.kp.Has("unextisting_value"))
self.assertEqual(self.kp["int_value"].GetInt(), 10)
self.assertEqual(self.kp["double_value"].GetDouble(), 2.0)
self.assertEqual(self.kp["bool_value"].GetBool(), True)
self.assertEqual(self.kp["string_value"].GetString(), "hello")
self.assertEqual(self.kp.PrettyPrintJsonString(), pretty_out)
def test_kratos_change_parameters(self):
# now change one item in the sublist
subparams = self.kp["level1"]
my_list = subparams["list_value"]
for i in range(my_list.size()):
if my_list[i].IsBool():
self.assertEqual(my_list[i].GetBool(), False)
# my_list = subparams["list_value"]
subparams["list_value"][0].SetString("changed")
self.assertEqual(
self.kp.PrettyPrintJsonString(),
pretty_out_after_change
)
def test_kratos_copy_parameters(self):
# try to make a copy
original_out = self.kp.PrettyPrintJsonString()
other_copy = self.kp.Clone()
self.assertEqual(
other_copy.PrettyPrintJsonString(),
original_out
)
other_copy["int_value"].SetInt(-1)
self.assertEqual(self.kp["int_value"].GetInt(), 10)
# self.assertEqual(other_copy["int_value").GetString(),-1)
def test_set_value(self):
kp = Parameters(json_string)
kp1 = Parameters(pretty_out_after_change)
kp["bool_value"] = kp1["level1"]
kp["bool_value"].PrettyPrintJsonString()
self.assertEqual(kp["bool_value"].PrettyPrintJsonString(), kp1["level1"].PrettyPrintJsonString())
def test_kratos_wrong_parameters(self):
# should check which errors are thrown!!
with self.assertRaisesRegex(RuntimeError, "no_value"):
self.kp["no_value"].GetInt()
def test_validation_fails_due_to_wrong_type(self):
kp = Parameters(wrong_type)
defaults_params = Parameters(defaults)
# should check which errors are thrown!!
with self.assertRaises(RuntimeError):
kp.ValidateAndAssignDefaults(defaults_params)
def test_validation_fails_due_to_wrong_spelling(self):
kp = Parameters(wrong_spelling)
defaults_params = Parameters(defaults)
# should check which errors are thrown!!
with self.assertRaises(RuntimeError):
kp.ValidateAndAssignDefaults(defaults_params)
def test_recursive_validation_fails_error_on_first_level(self):
kp = Parameters(wrong_lev2)
defaults_params = Parameters(defaults)
# should check which errors are thrown!!
with self.assertRaises(RuntimeError):
kp.RecursivelyValidateAndAssignDefaults(defaults_params)
def test_recursive_validation_4_levels(self):
kp = Parameters(four_levels)
kp_variation = Parameters(four_levels_variation)
kp_wrong_wariation = Parameters(four_levels_wrong_variation)
defaults_params = Parameters(four_levels_defaults)
kp.RecursivelyValidateAndAssignDefaults(defaults_params)
kp_variation.RecursivelyValidateAndAssignDefaults(defaults_params)
self.assertTrue( kp.IsEquivalentTo(defaults_params) )
self.assertFalse( kp_variation.IsEquivalentTo(defaults_params) )
self.assertTrue( kp.HasSameKeysAndTypeOfValuesAs(defaults_params) )
self.assertTrue( kp_variation.HasSameKeysAndTypeOfValuesAs(defaults_params) )
self.assertFalse( kp_wrong_wariation.HasSameKeysAndTypeOfValuesAs(defaults_params) )
def test_validation_succeds_error_on_first_level(self):
kp = Parameters(wrong_lev2)
defaults_params = Parameters(defaults)
# here no error shall be thrown since validation is only done on level0
kp.ValidateAndAssignDefaults(defaults_params)
def test_validation_succeeds(self):
kp = Parameters(json_string)
defaults_params = Parameters(defaults)
defaults_params["level1"]["tmp"].SetDouble(2.0) # this does not coincide with the value in kp, but is of the same type
kp.ValidateAndAssignDefaults(defaults_params)
self.assertEqual(kp.PrettyPrintJsonString(), expected_validation_output)
self.assertEqual(kp["level1"]["tmp"].GetDouble(), 5.0) # not 2, since kp overwrites the defaults
def test_add_missing_parameters(self):
# only missing parameters are added, no complaints if there already exist more than in the defaults
kp = Parameters(json_string)
tmp = Parameters(incomplete_with_extra_parameter)
kp.AddMissingParameters(tmp)
self.assertEqual(kp["new_default_obj"]["aaa"].GetString(), "string")
self.assertEqual(kp["string_value"].GetString(), "hello")
self.assertFalse(kp["level1"].Has("new_sublevel"))
def test_recursively_add_missing_parameters(self):
# only missing parameters are added, no complaints if there already exist more than in the defaults
kp = Parameters(json_string)
tmp = Parameters(incomplete_with_extra_parameter)
kp.RecursivelyAddMissingParameters(tmp)
self.assertTrue(kp["level1"].Has("new_sublevel"))
self.assertEqual(kp["level1"]["new_sublevel"].GetString(), "this should only be assigned in recursive")
def test_validate_defaults(self):
# only parameters from defaults are validated, no new values are added
kp = Parameters(incomplete_with_extra_parameter)
tmp = Parameters(defaults)
kp.ValidateDefaults(tmp)
self.assertFalse(kp.Has("bool_value"))
self.assertFalse(kp.Has("double_value"))
self.assertTrue(kp.Has("level1"))
def test_recursively_validate_defaults(self):
# only parameters from defaults are validated, no new values are added
kp = Parameters(incomplete)
tmp = Parameters(defaults)
kp.RecursivelyValidateDefaults(tmp)
self.assertFalse(kp.Has("bool_value"))
self.assertFalse(kp.Has("double_value"))
self.assertTrue(kp.Has("level1"))
def test_recursively_validate_defaults_fails(self):
# only parameters from defaults are validated, no new values are added
kp = Parameters(incomplete_with_extra_parameter)
tmp = Parameters(defaults)
with self.assertRaises(RuntimeError):
kp.RecursivelyValidateDefaults(tmp)
# sub_level
self.assertFalse(kp["level1"].Has("tmp"))
def test_add_value(self):
kp = Parameters("{}")
kp.AddEmptyValue("new_double").SetDouble(1.0)
self.assertTrue(kp.Has("new_double"))
self.assertEqual(kp["new_double"].GetDouble(), 1.0)
def test_add_empty_array(self):
kp = Parameters("{}")
kp.AddEmptyArray("new_array")
self.assertTrue(kp.Has("new_array"))
self.assertEqual(kp["new_array"].size(), 0)
def test_iterators(self):
kp = Parameters(json_string)
#iteration by range
nitems = 0
for iterator in kp:
nitems = nitems + 1
self.assertEqual(nitems, 5)
#iteration by items
for key,value in kp.items():
#print(value.PrettyPrintJsonString())
self.assertEqual(kp[key].PrettyPrintJsonString(), value.PrettyPrintJsonString())
#print(key,value)
#testing values
expected_values = ['true', '2.0', '10', '{"list_value":[3,"hi",false],"tmp":5.0}','"hello"']
counter = 0
for value in kp.values():
self.assertEqual(value.WriteJsonString(), expected_values[counter])
counter += 1
#testing values
expected_keys = ['bool_value', 'double_value', 'int_value', 'level1', 'string_value']
counter = 0
for key in kp.keys():
self.assertEqual(key, expected_keys[counter])
counter += 1
def test_remove_value(self):
kp = Parameters(json_string)
self.assertTrue(kp.Has("int_value"))
self.assertTrue(kp.Has("level1"))
kp.RemoveValue("int_value")
kp.RemoveValue("level1")
self.assertFalse(kp.Has("int_value"))
self.assertFalse(kp.Has("level1"))
def test_copy_deepcopy(self):
kp = Parameters(json_string)
# Copy values
kp_copy1 = kp.__copy__()
kp_copy2 = copy.copy(kp)
kp_deepcopy1 = kp.__deepcopy__()
kp_deepcopy2 = copy.deepcopy(kp)
# Check is the same
self.assertTrue(kp.Has("int_value"))
self.assertTrue(kp.Has("level1"))
self.assertTrue(kp_copy1.Has("int_value"))
self.assertTrue(kp_copy1.Has("level1"))
self.assertTrue(kp_copy2.Has("int_value"))
self.assertTrue(kp_copy2.Has("level1"))
self.assertTrue(kp_deepcopy1.Has("int_value"))
self.assertTrue(kp_deepcopy1.Has("level1"))
self.assertTrue(kp_deepcopy2.Has("int_value"))
self.assertTrue(kp_deepcopy2.Has("level1"))
# Remove values
kp.RemoveValue("int_value")
kp.RemoveValue("level1")
# Check the deep copies is the same
self.assertFalse(kp.Has("int_value"))
self.assertFalse(kp.Has("level1"))
self.assertFalse(kp_copy1.Has("int_value"))
self.assertFalse(kp_copy1.Has("level1"))
self.assertFalse(kp_copy2.Has("int_value"))
self.assertFalse(kp_copy2.Has("level1"))
self.assertTrue(kp_deepcopy1.Has("int_value"))
self.assertTrue(kp_deepcopy1.Has("level1"))
self.assertTrue(kp_deepcopy2.Has("int_value"))
self.assertTrue(kp_deepcopy2.Has("level1"))
def test_is_methods(self):
# This method checks all the "IsXXX" Methods
tmp = Parameters("""{
"int_value" : 10, /* This is comment to check that comments work */
"double_value": 2.0, // This is comment too, but using another comment
"bool_value" : true, // This is another comment being meta as realizing that all the possibilities are already check
"string_value" : "hello",/* This is a nihilist comment about the futile existence of the previous comment as a metacomment */
"vector_value" : [5,3,4],
"matrix_value" : [[1,2],[3,6]]
}""") # if you add more values to this, make sure to add the corresponding in the loop
for key in tmp.keys():
val_type = key[:-6] # removing "_value"
if val_type == "int":
self.assertTrue(tmp[key].IsInt())
else:
self.assertFalse(tmp[key].IsInt())
if val_type == "double":
self.assertTrue(tmp[key].IsDouble())
else:
self.assertFalse(tmp[key].IsDouble())
if val_type == "bool":
self.assertTrue(tmp[key].IsBool())
else:
self.assertFalse(tmp[key].IsBool())
if val_type == "string":
self.assertTrue(tmp[key].IsString())
else:
self.assertFalse(tmp[key].IsString())
if val_type == "vector":
self.assertTrue(tmp[key].IsVector())
else:
self.assertFalse(tmp[key].IsVector())
if val_type == "matrix":
self.assertTrue(tmp[key].IsMatrix())
else:
self.assertFalse(tmp[key].IsMatrix())
def test_get_methods(self):
# This method checks all the "GetXXX" Methods if they throw an error
tmp = Parameters("""{
"int_value" : 10,
"double_value": 2.0,
"bool_value" : true,
"string_value" : "hello",
"vector_value" : [5.2,-3.1,4.33],
"matrix_value" : [[1,2],[3,4],[5,6]]
}""") # if you add more values to this, make sure to add the corresponding in the loop
for key in tmp.keys():
val_type = key[:-6] # removing "_value"
# Int and Double are checked tgth bcs both internally call "IsNumber"
if val_type == "int" or val_type == "double":
if val_type == "int":
self.assertEqual(tmp[key].GetInt(),10)
else:
with self.assertRaises(RuntimeError):
tmp[key].GetInt()
if val_type == "double" or val_type == "int":
if val_type == "double":
self.assertEqual(tmp[key].GetDouble(),2.0)
else:
with self.assertRaises(RuntimeError):
tmp[key].GetDouble()
if val_type == "bool":
self.assertEqual(tmp[key].GetBool(),True)
else:
with self.assertRaises(RuntimeError):
tmp[key].GetBool()
if val_type == "string":
self.assertEqual(tmp[key].GetString(),"hello")
else:
with self.assertRaises(RuntimeError):
tmp[key].GetString()
if val_type == "vector":
V = tmp[key].GetVector()
self.assertEqual(V[0],5.2)
self.assertEqual(V[1],-3.1)
self.assertEqual(V[2],4.33)
else:
with self.assertRaises(RuntimeError):
tmp[key].GetVector()
if val_type == "matrix":
A = tmp[key].GetMatrix()
self.assertEqual(A[0,0],1.0)
self.assertEqual(A[0,1],2.0)
self.assertEqual(A[1,0],3.0)
self.assertEqual(A[1,1],4.0)
self.assertEqual(A[2,0],5.0)
self.assertEqual(A[2,1],6.0)
else:
with self.assertRaises(RuntimeError):
tmp[key].GetMatrix()
def test_set_methods(self):
# This method checks all the "GetXXX" Methods if they throw an error
tmp = Parameters("""{
"int_value" : 0,
"double_value": 0.0,
"bool_value" : false,
"string_value" : "",
"vector_value" : [],
"matrix_value" : [[0]]
}""") # if you add more values to this, make sure to add the corresponding in the loop
for key in tmp.keys():
val_type = key[:-6] # removing "_value"
# Int and Double are checked tgth bcs both internally call "IsNumber"
if val_type == "int" or val_type == "double":
if val_type == "int":
tmp[key].SetInt(10)
self.assertEqual(tmp[key].GetInt(),10)
else:
with self.assertRaises(RuntimeError):
tmp[key].GetInt()
if val_type == "double" or val_type == "int":
if val_type == "double":
tmp[key].SetDouble(2.0)
self.assertEqual(tmp[key].GetDouble(),2.0)
else:
with self.assertRaises(RuntimeError):
tmp[key].GetDouble()
if val_type == "bool":
tmp[key].SetBool(True)
self.assertEqual(tmp[key].GetBool(),True)
else:
with self.assertRaises(RuntimeError):
tmp[key].GetBool()
if val_type == "string":
tmp[key].SetString("hello")
self.assertEqual(tmp[key].GetString(),"hello")
else:
with self.assertRaises(RuntimeError):
tmp[key].GetString()
if val_type == "vector":
vector = Vector(3)
vector[0] = 5.2
vector[1] = -3.1
vector[2] = 4.33
tmp[key].SetVector(vector)
V = tmp[key].GetVector()
self.assertEqual(V[0],5.2)
self.assertEqual(V[1],-3.1)
self.assertEqual(V[2],4.33)
else:
with self.assertRaises(RuntimeError):
tmp[key].GetVector()
if val_type == "matrix":
matrix = Matrix(3,2)
matrix[0,0] = 1.0
matrix[0,1] = 2.0
matrix[1,0] = 3.0
matrix[1,1] = 4.0
matrix[2,0] = 5.0
matrix[2,1] = 6.0
tmp[key].SetMatrix(matrix)
A = tmp[key].GetMatrix()
self.assertEqual(A[0,0],1.0)
self.assertEqual(A[0,1],2.0)
self.assertEqual(A[1,0],3.0)
self.assertEqual(A[1,1],4.0)
self.assertEqual(A[2,0],5.0)
self.assertEqual(A[2,1],6.0)
else:
with self.assertRaises(RuntimeError):
tmp[key].GetMatrix()
def test_add_methods(self):
# This method checks all the "GetXXX" Methods if they throw an error
tmp = Parameters("""{}""")
key = "int"
tmp.AddInt(key, 10)
self.assertEqual(tmp[key].GetInt(),10)
key = "double"
tmp.AddDouble(key, 2.0)
self.assertEqual(tmp[key].GetDouble(),2.0)
key = "bool"
tmp.AddBool(key, True)
self.assertEqual(tmp[key].GetBool(),True)
key = "string"
tmp.AddString(key, "hello")
self.assertEqual(tmp[key].GetString(),"hello")
key = "vector"
vector = Vector(3)
vector[0] = 5.2
vector[1] = -3.1
vector[2] = 4.33
tmp.AddVector(key, vector)
V = tmp[key].GetVector()
self.assertEqual(V[0],5.2)
self.assertEqual(V[1],-3.1)
self.assertEqual(V[2],4.33)
key = "matrix"
matrix = Matrix(3,2)
matrix[0,0] = 1.0
matrix[0,1] = 2.0
matrix[1,0] = 3.0
matrix[1,1] = 4.0
matrix[2,0] = 5.0
matrix[2,1] = 6.0
tmp.AddMatrix(key, matrix)
A = tmp[key].GetMatrix()
self.assertEqual(A[0,0],1.0)
self.assertEqual(A[0,1],2.0)
self.assertEqual(A[1,0],3.0)
self.assertEqual(A[1,1],4.0)
self.assertEqual(A[2,0],5.0)
self.assertEqual(A[2,1],6.0)
def test_vector_interface(self):
# Read and check Vectors from a Parameters-Object
tmp = Parameters("""{
"valid_vectors" : [ []
],
"false_vectors" : [ [[]],
[[2,3],2],
[2,3,[2]],
[2,3,[]],
[{"key":3},2],
[2,3,{"key":3}],
[true,2],
[2,3,true],
[5,"string",2]
]
}""")
# Check the IsVector Method
for i in range(tmp["valid_vectors"].size()):
valid_vector = tmp["valid_vectors"][i]
self.assertTrue(valid_vector.IsVector())
for i in range(tmp["false_vectors"].size()):
false_vector = tmp["false_vectors"][i]
self.assertFalse(false_vector.IsVector())
# Check the GetVector Method also on the valid Matrices
for i in range(tmp["valid_vectors"].size()):
valid_vector = tmp["valid_vectors"][i]
valid_vector.GetVector()
# Check that the errors of the GetVector method are thrown correctly
for i in range(tmp["false_vectors"].size()):
false_vector = tmp["false_vectors"][i]
with self.assertRaises(RuntimeError):
false_vector.GetVector()
# Manually assign and check a Vector
vec = Vector(3)
vec[0] = 1.32
vec[1] = -2.22
vec[2] = 5.5
tmp.AddEmptyValue("vector_value")
tmp["vector_value"].SetVector(vec)
self.assertTrue(tmp["vector_value"].IsVector())
V2 = tmp["vector_value"].GetVector()
self.assertEqual(V2[0],1.32)
self.assertEqual(V2[1],-2.22)
self.assertEqual(V2[2],5.5)
def test_matrix_interface(self):
# Read and check Matrices from a Parameters-Object
tmp = Parameters("""{
"valid_matrices" : [ [[]],
[[],[]],
[[-9.81,8, 5.47]]
],
"false_matrices" : [ [],
[[[]]],
[[3.3] , [1,2]],
[[2,1.5,3.3] , [3,{"key":3},2]],
[[2,1.5,3.3] , [5,false,2]],
[[2,1.5,3.3] , [[2,3],1,2]],
[[2,1.5,3.3] , ["string",2,9]]
]
}""")
# Check the IsMatrix Method
for i in range(tmp["valid_matrices"].size()):
valid_matrix = tmp["valid_matrices"][i]
self.assertTrue(valid_matrix.IsMatrix())
for i in range(tmp["false_matrices"].size()):
false_matrix = tmp["false_matrices"][i]
self.assertFalse(false_matrix.IsMatrix())
# Check the GetMatrix Method also on the valid Matrices
for i in range(tmp["valid_matrices"].size()):
valid_matrix = tmp["valid_matrices"][i]
valid_matrix.GetMatrix()
# Check that the errors of the GetMatrix method are thrown correctly
for i in range(tmp["false_matrices"].size()):
false_matrix = tmp["false_matrices"][i]
with self.assertRaises(RuntimeError):
false_matrix.GetMatrix()
# Manually assign and check a Matrix
mat = Matrix(3,2)
mat[0,0] = 1.0
mat[0,1] = 2.0
mat[1,0] = 3.0
mat[1,1] = 4.0
mat[2,0] = 5.0
mat[2,1] = 6.0
tmp.AddEmptyValue("matrix_value")
tmp["matrix_value"].SetMatrix(mat)
self.assertTrue(tmp["matrix_value"].IsMatrix())
A2 = tmp["matrix_value"].GetMatrix()
self.assertEqual(A2[0,0],1.0)
self.assertEqual(A2[0,1],2.0)
self.assertEqual(A2[1,0],3.0)
self.assertEqual(A2[1,1],4.0)
self.assertEqual(A2[2,0],5.0)
self.assertEqual(A2[2,1],6.0)
def test_null_vs_null_validation(self):
# supplied settings
null_custom = Parameters("""{
"parameter": null
}""")
# default settings
null_default = Parameters("""{
"parameter": null
}""")
#this should NOT raise, hence making the test to pass
null_custom.ValidateAndAssignDefaults(null_default)
def test_double_vs_null_validation(self):
# supplied settings
double_custom = Parameters("""{
"parameter": 0.0
}""")
# default settings
null_default = Parameters("""{
"parameter": null
}""")
with self.assertRaises(RuntimeError):
double_custom.ValidateAndAssignDefaults(null_default)
def test_file_serialization(self):
tmp = Parameters(defaults)
check = tmp.WriteJsonString()
file_name = "parameter_serialization"
serializer = FileSerializer(file_name, SerializerTraceType.SERIALIZER_NO_TRACE)
serializer.Save("ParametersSerialization",tmp)
del(tmp)
del(serializer)
#unpickle data - note that here i override "serialized_data"
serializer = FileSerializer(file_name,SerializerTraceType.SERIALIZER_NO_TRACE)
loaded_parameters = Parameters()
serializer.Load("ParametersSerialization",loaded_parameters)
self.assertEqual(check, loaded_parameters.WriteJsonString())
kratos_utils.DeleteFileIfExisting(file_name + ".rest")
def test_get_string_array_valid(self):
tmp = Parameters("""{
"parameter": ["foo", "bar"]
} """)
v = tmp["parameter"].GetStringArray()
self.assertEqual(len(v), 2)
self.assertEqual(v[0], "foo")
self.assertEqual(v[1], "bar")
def test_get_string_array_invalid(self):
tmp = Parameters("""{
"parameter": ["foo", true]
} """)
with self.assertRaisesRegex(RuntimeError, r'Error: Argument must be a string'):
tmp["parameter"].GetStringArray()
def test_set_string_array_valid(self):
initial = Parameters("""{
"parameter": ["foo", "bar"]
} """)
string_array = initial["parameter"].GetStringArray()
new_param = Parameters()
new_param.AddEmptyValue("new_parameter")
new_param["new_parameter"].SetStringArray(string_array)
new_string_array = initial["parameter"].GetStringArray()
self.assertListEqual(new_string_array, string_array)
@KratosUnittest.skipUnless(have_pickle_module, "Pickle module error: : " + pickle_message)
def test_stream_serialization(self):
tmp = Parameters(defaults)
check = tmp.WriteJsonString()
serializer = StreamSerializer(SerializerTraceType.SERIALIZER_NO_TRACE)
serializer.Save("ParametersSerialization",tmp)
del(tmp)
#pickle dataserialized_data
pickled_data = pickle.dumps(serializer, protocol=2) # Second argument is the protocol and is NECESSARY (according to pybind11 docs)
del(serializer)
#unpickle data - note that here i override "serialized_data"
serializer = pickle.loads(pickled_data)
loaded_parameters = Parameters()
serializer.Load("ParametersSerialization",loaded_parameters)
self.assertEqual(check, loaded_parameters.WriteJsonString())
if __name__ == '__main__':
KratosUnittest.main()
|
the-stack_0_21137 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""The setup script."""
import os
import sys
from shutil import rmtree
from subprocess import Popen
from setuptools import Command, find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
# Package meta-data.
AUTHOR = "Mpho Mphego"
DESCRIPTION = "Some useful Pandas utility functions"
EMAIL = "[email protected]"
NAME = "pandas_utility"
REQUIRED = ["pandas", "numpy"]
DEV_REQUIRED = [
# put all required packages here
"black",
"coverage",
"loguru",
"pytest",
"twine",
"wheel",
]
REQUIRES_PYTHON = ">=3.6.0"
URL = "https://github.com/mmphego/pandas_utility"
VERSION = None
try:
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
LONG_DESCRIPTION = "\n" + f.read()
except FileNotFoundError:
LONG_DESCRIPTION = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, "__version__.py")) as f:
_version_info = f.readline().strip()
_version_info = _version_info.replace(" ", "").replace('"', "").replace('"', "")
about = dict([_version_info.split("=")])
else:
about["__version__"] = VERSION
SCRIPTS = []
for dirname, dirnames, filenames in os.walk("scripts"):
for filename in filenames:
SCRIPTS.append(os.path.join(dirname, filename))
class UploadCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print(f"\033[1m{s}\033[0m")
@staticmethod
def executer(cmd):
p = Popen(cmd, bufsize=-1)
p.communicate()
return p.returncode
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
try:
import twine # noqa:401
except ImportError:
errmsg = "\n'Twine' is not installed.\n\nRun: \n\tpip install twine"
self.status(errmsg)
raise SystemExit(1)
self.status("Building Source and Wheel (universal) distribution...")
cmd = f"{sys.executable} setup.py sdist bdist_wheel --universal".split(" ")
try:
cmd = "twine check dist/*".split(" ")
assert self.executer(cmd) == 0
except AssertionError:
self.status("Failed Twine Test.")
raise
try:
self.status("Uploading the package to PyPI via Twine...")
cmd = "twine upload dist/*".split()
self.executer(cmd)
except AssertionError:
self.status("Failed to upload to PyPi.")
raise
else:
self.status("Pushing git tags...")
cmd = f"git tag v{about.get('__version__')}".split(" ")
self.executer(cmd)
cmd = "git push --tags"
self.executer(cmd)
response = input("Do you want to generate a CHANGELOG.md? (y/n) ")
if response.lower() == "y":
self.status("Generating the CHANGELOG.md.")
cmd = "make changelog"
self.executer(cmd)
setup(
name=NAME,
version=about["__version__"],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
author=NAME,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(
include=["pandas_utility"], exclude=["tests", "*.tests", "*.tests.*", "tests.*"]
),
install_requires=REQUIRED,
include_package_data=True,
scripts=SCRIPTS,
license="MIT license",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="pandas_utility",
test_suite="tests",
tests_require=["pytest", "unittest"],
project_urls={
"Bug Reports": f"{URL}/issues",
"Source": URL,
"Say Thanks!": f"https://saythanks.io/to/mmphego",
"Donate!": f"https://paypal.me/mmphego",
},
zip_safe=False,
# $ setup.py publish support.
cmdclass={"upload": UploadCommand},
)
|
the-stack_0_21140 | # MIT License
#
# Copyright (c) 2015-2020 Iakiv Kramarenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
import time
from abc import abstractmethod, ABC
from typing import Generic, Callable, TypeVar, Optional
from selene.core.exceptions import TimeoutException
from selene.common.fp import identity
T = TypeVar('T')
R = TypeVar('R')
E = TypeVar('E')
# todo: not sure, if we need all these Lambda, Proc, Query, Command in python
# todo: they was added just to quickly port selenidejs waiting mechanism
# todo: let's consider removing them... or moving them e.g. to fp
Lambda = Callable[[T], R]
Proc = Callable[[T], None]
Predicate = Callable[[T], bool]
Fn = Callable[[T], R]
# todo: consider moving outside of "wait" module... because there is no direct cohesion with it
class Query(Generic[T, R]):
def __init__(self, description: str, fn: Callable[[T], R]):
self._description = description
self._fn = fn
def __call__(self, entity: T) -> R:
return self._fn(entity)
def __str__(self):
return self._description
class Command(Query[T, None]):
pass
# todo: provide sexy fluent implementation via builder, i.e. Wait.the(element).atMost(3).orFailWith(hook)
class Wait(Generic[E]):
# todo: provide the smallest possible timeout default, something like 1ms
def __init__(self, entity: E, at_most: int, or_fail_with: Optional[Callable[[TimeoutException], Exception]] = None):
self._entity = entity
self._timeout = at_most
self._hook_failure = or_fail_with or identity
def at_most(self, timeout: int) -> Wait[E]:
return Wait(self._entity, timeout, self._hook_failure)
def or_fail_with(self,
hook_failure: Optional[Callable[
[TimeoutException],
Exception]]
) -> Wait[E]:
return Wait(self._entity, self._timeout, hook_failure)
@property
def hook_failure(self) -> Optional[Callable[[TimeoutException], Exception]]:
# todo: hook_failure or failure_hook?
return self._hook_failure
# todo: consider renaming to `def to(...)`, though will sound awkward when wait.to(condition)
def for_(self, fn: Callable[[E], R]) -> R:
finish_time = time.time() + self._timeout
while True:
try:
return fn(self._entity)
except Exception as reason:
if time.time() > finish_time:
reason_message = str(reason)
reason_string = '{name}: {message}'.format(name=reason.__class__.__name__, message=reason_message)
# todo: think on how can we improve logging failures in selene, e.g. reverse msg and stacktrace
# stacktrace = getattr(reason, 'stacktrace', None)
timeout = self._timeout
entity = self._entity
failure = TimeoutException(f'''
Timed out after {timeout}s, while waiting for:
{entity}.{fn}
Reason: {reason_string}''')
raise self._hook_failure(failure)
def until(self, fn: Callable[[E], R]) -> bool:
try:
self.for_(fn)
return True
except TimeoutException:
return False
# todo: do we really need these aliases?
def command(self, description: str, fn: Callable[[E], None]) -> None:
self.for_(Command(description, fn))
def query(self, description: str, fn: Callable[[E], R]) -> R:
return self.for_(Query(description, fn)) |
the-stack_0_21145 | import argparse
import importlib
import os
import sys
import numpy as np
import torch as th
import yaml
from stable_baselines3.common.utils import set_random_seed
import utils.import_envs # noqa: F401 pylint: disable=unused-import
from utils import ALGOS, create_test_env, get_latest_run_id, get_saved_hyperparams
from utils.exp_manager import ExperimentManager
from utils.utils import StoreDict
def main(): # noqa: C901
parser = argparse.ArgumentParser()
parser.add_argument("--env", help="environment ID", type=str, default="CartPole-v1")
parser.add_argument("-f", "--folder", help="Log folder", type=str, default="rl-trained-agents")
parser.add_argument("--algo", help="RL Algorithm", default="ppo", type=str, required=False, choices=list(ALGOS.keys()))
parser.add_argument("-n", "--n-timesteps", help="number of timesteps", default=1000, type=int)
parser.add_argument("--num-threads", help="Number of threads for PyTorch (-1 to use default)", default=-1, type=int)
parser.add_argument("--n-envs", help="number of environments", default=1, type=int)
parser.add_argument("--exp-id", help="Experiment ID (default: 0: latest, -1: no exp folder)", default=0, type=int)
parser.add_argument("--verbose", help="Verbose mode (0: no output, 1: INFO)", default=1, type=int)
parser.add_argument(
"--no-render", action="store_true", default=False, help="Do not render the environment (useful for tests)"
)
parser.add_argument("--deterministic", action="store_true", default=False, help="Use deterministic actions")
parser.add_argument(
"--load-best", action="store_true", default=False, help="Load best model instead of last model if available"
)
parser.add_argument(
"--load-checkpoint",
type=int,
help="Load checkpoint instead of last model if available, "
"you must pass the number of timesteps corresponding to it",
)
parser.add_argument("--stochastic", action="store_true", default=False, help="Use stochastic actions")
parser.add_argument(
"--norm-reward", action="store_true", default=False, help="Normalize reward if applicable (trained with VecNormalize)"
)
parser.add_argument("--seed", help="Random generator seed", type=int, default=0)
parser.add_argument("--reward-log", help="Where to log reward", default="", type=str)
parser.add_argument(
"--gym-packages",
type=str,
nargs="+",
default=[],
help="Additional external Gym environment package modules to import (e.g. gym_minigrid)",
)
parser.add_argument(
"--env-kwargs", type=str, nargs="+", action=StoreDict, help="Optional keyword argument to pass to the env constructor"
)
args = parser.parse_args()
# Going through custom gym packages to let them register in the global registory
for env_module in args.gym_packages:
importlib.import_module(env_module)
env_id = args.env
algo = args.algo
folder = args.folder
if args.exp_id == 0:
args.exp_id = get_latest_run_id(os.path.join(folder, algo), env_id)
print(f"Loading latest experiment, id={args.exp_id}")
# Sanity checks
if args.exp_id > 0:
log_path = os.path.join(folder, algo, f"{env_id}_{args.exp_id}")
else:
log_path = os.path.join(folder, algo)
assert os.path.isdir(log_path), f"The {log_path} folder was not found"
found = False
for ext in ["zip"]:
model_path = os.path.join(log_path, f"{env_id}.{ext}")
found = os.path.isfile(model_path)
if found:
break
if args.load_best:
model_path = os.path.join(log_path, "best_model.zip")
found = os.path.isfile(model_path)
if args.load_checkpoint is not None:
model_path = os.path.join(log_path, f"rl_model_{args.load_checkpoint}_steps.zip")
found = os.path.isfile(model_path)
if not found:
raise ValueError(f"No model found for {algo} on {env_id}, path: {model_path}")
off_policy_algos = ["qrdqn", "dqn", "ddpg", "sac", "her", "td3", "tqc"]
if algo in off_policy_algos:
args.n_envs = 1
set_random_seed(args.seed)
if args.num_threads > 0:
if args.verbose > 1:
print(f"Setting torch.num_threads to {args.num_threads}")
th.set_num_threads(args.num_threads)
is_atari = ExperimentManager.is_atari(env_id)
stats_path = os.path.join(log_path, env_id)
hyperparams, stats_path = get_saved_hyperparams(stats_path, norm_reward=args.norm_reward, test_mode=True)
# load env_kwargs if existing
env_kwargs = {}
args_path = os.path.join(log_path, env_id, "args.yml")
if os.path.isfile(args_path):
with open(args_path, "r") as f:
loaded_args = yaml.load(f, Loader=yaml.UnsafeLoader) # pytype: disable=module-attr
if loaded_args["env_kwargs"] is not None:
env_kwargs = loaded_args["env_kwargs"]
# overwrite with command line arguments
if args.env_kwargs is not None:
env_kwargs.update(args.env_kwargs)
log_dir = args.reward_log if args.reward_log != "" else None
env = create_test_env(
env_id,
n_envs=args.n_envs,
stats_path=stats_path,
seed=args.seed,
log_dir=log_dir,
should_render=not args.no_render,
hyperparams=hyperparams,
env_kwargs=env_kwargs,
)
kwargs = dict(seed=args.seed)
if algo in off_policy_algos:
# Dummy buffer size as we don't need memory to enjoy the trained agent
kwargs.update(dict(buffer_size=1))
# Check if we are running python 3.8+
# we need to patch saved model under python 3.6/3.7 to load them
newer_python_version = sys.version_info.major == 3 and sys.version_info.minor >= 8
custom_objects = {}
if newer_python_version:
custom_objects = {
"learning_rate": 0.0,
"lr_schedule": lambda _: 0.0,
"clip_range": lambda _: 0.0,
}
model = ALGOS[algo].load(model_path, env=env, custom_objects=custom_objects, **kwargs)
obs = env.reset()
# Deterministic by default except for atari games
stochastic = args.stochastic or is_atari and not args.deterministic
deterministic = not stochastic
state = None
episode_reward = 0.0
episode_rewards, episode_lengths = [], []
ep_len = 0
# For HER, monitor success rate
successes = []
try:
for _ in range(args.n_timesteps):
action, state = model.predict(obs, state=state, deterministic=deterministic)
obs, reward, done, infos = env.step(action)
if not args.no_render:
env.render("human")
episode_reward += reward[0]
ep_len += 1
if args.n_envs == 1:
# For atari the return reward is not the atari score
# so we have to get it from the infos dict
if is_atari and infos is not None and args.verbose >= 1:
episode_infos = infos[0].get("episode")
if episode_infos is not None:
print(f"Atari Episode Score: {episode_infos['r']:.2f}")
print("Atari Episode Length", episode_infos["l"])
if done and not is_atari and args.verbose > 0:
# NOTE: for env using VecNormalize, the mean reward
# is a normalized reward when `--norm_reward` flag is passed
print(f"Episode Reward: {episode_reward:.2f}")
print("Episode Length", ep_len)
episode_rewards.append(episode_reward)
episode_lengths.append(ep_len)
episode_reward = 0.0
ep_len = 0
state = None
# Reset also when the goal is achieved when using HER
if done and infos[0].get("is_success") is not None:
if args.verbose > 1:
print("Success?", infos[0].get("is_success", False))
if infos[0].get("is_success") is not None:
successes.append(infos[0].get("is_success", False))
episode_reward, ep_len = 0.0, 0
except KeyboardInterrupt:
pass
if args.verbose > 0 and len(successes) > 0:
print(f"Success rate: {100 * np.mean(successes):.2f}%")
if args.verbose > 0 and len(episode_rewards) > 0:
print(f"{len(episode_rewards)} Episodes")
print(f"Mean reward: {np.mean(episode_rewards):.2f} +/- {np.std(episode_rewards):.2f}")
if args.verbose > 0 and len(episode_lengths) > 0:
print(f"Mean episode length: {np.mean(episode_lengths):.2f} +/- {np.std(episode_lengths):.2f}")
env.close()
if __name__ == "__main__":
main()
|
the-stack_0_21148 | import pickle
import time
from tqdm import tqdm
class ProbaseConcept(object):
def __init__(self, data_concept_path=None):
self.concept2idx = dict()
self.idx2concept = dict()
self.concept_inverted_list = dict()
self.instance2idx = dict()
self.idx2instance = dict()
self.instance_inverted_list = dict()
if data_concept_path:
self._load_raw_data(data_concept_path)
def _load_raw_data(self, data_concept_path):
st = time.time()
print("[probase-concept] Loading Probase files...")
with open(data_concept_path) as f:
triple_lines = [line.strip() for line in f]
print("[probase-concept] Building index...")
for line in tqdm(triple_lines):
concept, instance, freq = line.split('\t')
if concept not in self.concept2idx:
self.concept2idx[concept] = len(self.concept2idx)
concept_idx = self.concept2idx[concept]
if instance not in self.instance2idx:
self.instance2idx[instance] = len(self.instance2idx)
instance_idx = self.instance2idx[instance]
if concept_idx not in self.concept_inverted_list:
self.concept_inverted_list[concept_idx] = list()
self.concept_inverted_list[concept_idx].append((instance_idx, int(freq)))
if instance_idx not in self.instance_inverted_list:
self.instance_inverted_list[instance_idx] = list()
self.instance_inverted_list[instance_idx].append((concept_idx, int(freq)))
self.idx2concept = {val: key for key, val in self.concept2idx.items()}
self.idx2instance = {val: key for key, val in self.instance2idx.items()}
print("[probase-concept] Loading data finished in {:.2f} s".format(time.time() - st))
def conceptualize(self, instance, score_method="likelihood"):
""" Conceptualize given instance
:type instance: str
:type score_method: str
:param instance: input instance such as "microsoft"
:param score_method: "likelihood" or "pmi"
:return:
"""
if instance not in self.instance2idx:
return []
instance_idx = self.instance2idx[instance]
instance_freq = self.get_instance_freq(instance_idx)
concept_list = self.instance_inverted_list[instance_idx]
rst_list = list()
for concept_idx, co_occurrence in concept_list:
if score_method == "pmi":
score = co_occurrence / \
self.get_concept_freq(concept_idx) / \
instance_freq
elif score_method == "likelihood":
score = co_occurrence / instance_freq
else:
raise NotImplementedError
rst_list.append((self.idx2concept[concept_idx], score))
rst_list.sort(key=lambda x: x[1], reverse=True)
return rst_list
def instantiate(self, concept):
""" Retrieve all instances of a concept
:type concept: str
:param concept: input concept such as "company"
:return:
"""
if concept not in self.concept2idx:
return []
concept_idx = self.concept2idx[concept]
rst_list = [(self.idx2instance[idx], freq) for idx, freq
in self.concept_inverted_list[concept_idx]]
rst_list.sort(key=lambda x: x[1], reverse=True)
return rst_list
def get_concept_chain(self, instance, max_chain_length=5):
if instance in self.concept2idx:
chain = [instance]
else:
chain = list()
tmp_instance = instance
while True:
concepts = self.conceptualize(tmp_instance, score_method="likelihood")
if concepts:
chain.append(concepts[0][0])
else:
break
if len(chain) >= max_chain_length:
break
tmp_instance = chain[-1]
if chain and chain[0] != instance:
return [instance] + chain
else:
return chain
def get_concept_freq(self, concept):
if isinstance(concept, str):
if concept not in self.concept2idx:
return 0
concept = self.concept2idx[concept]
elif isinstance(concept, int):
if concept not in self.idx2concept:
return 0
return sum([t[1] for t in self.concept_inverted_list[concept]])
def get_instance_freq(self, instance):
if isinstance(instance, str):
if instance not in self.instance2idx:
return 0
instance = self.instance2idx[instance]
elif isinstance(instance, int):
if instance not in self.idx2instance:
return 0
return sum([t[1] for t in self.instance_inverted_list[instance]])
def save(self, saved_path):
st = time.time()
print("[probase-concept] Loading data to {}".format(saved_path))
with open(saved_path, "wb") as f:
pickle.dump(self.__dict__, f)
print("[probase-concept] Saving data finished in {:.2f} s".format(time.time() - st))
def load(self, load_path):
st = time.time()
print("[probase-concept] Loading data from {}".format(load_path))
with open(load_path, "rb") as f:
tmp_dict = pickle.load(f)
for key, val in tmp_dict.items():
self.__setattr__(key, val)
print("[probase-concept] Loading data finished in {:.2f} s".format(time.time() - st))
@property
def concept_size(self):
return len(self.concept2idx)
@property
def instance_size(self):
return len(self.instance2idx) |
the-stack_0_21150 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 28 June 2017
@author: Maurizio Ferrari Dacrema
"""
import sys
import time
import numpy as np
from scipy.special import expit
from course_lib.Base.BaseRecommender import BaseRecommender
class SLIM_BPR(BaseRecommender):
"""
This class is a python porting of the BPRSLIM algorithm in MyMediaLite written in C#
The code is identical with no optimizations
"""
def __init__(self, URM_train, lambda_i = 0.0025, lambda_j = 0.00025, learning_rate = 0.05):
super(SLIM_BPR, self).__init__()
self.URM_train = URM_train
self.n_users = URM_train.shape[0]
self.n_items = URM_train.shape[1]
self.lambda_i = lambda_i
self.lambda_j = lambda_j
self.learning_rate = learning_rate
self.normalize = False
self.sparse_weights = False
def updateFactors(self, user_id, pos_item_id, neg_item_id):
# Calculate current predicted score
userSeenItems = self.URM_train[user_id].indices
prediction = 0
for userSeenItem in userSeenItems:
prediction += self.S[pos_item_id, userSeenItem] - self.S[neg_item_id, userSeenItem]
x_uij = prediction
logisticFunction = expit(-x_uij)
# Update similarities for all items except those sampled
for userSeenItem in userSeenItems:
# For positive item is PLUS logistic minus lambda*S
if(pos_item_id != userSeenItem):
update = logisticFunction - self.lambda_i*self.S[pos_item_id, userSeenItem]
self.S[pos_item_id, userSeenItem] += self.learning_rate*update
# For positive item is MINUS logistic minus lambda*S
if (neg_item_id != userSeenItem):
update = - logisticFunction - self.lambda_j*self.S[neg_item_id, userSeenItem]
self.S[neg_item_id, userSeenItem] += self.learning_rate*update
def fit(self, epochs=15):
"""
Train SLIM wit BPR. If the model was already trained, overwrites matrix S
:param epochs:
:return: -
"""
# Initialize similarity with random values and zero-out diagonal
self.S = np.random.random((self.n_items, self.n_items)).astype('float32')
self.S[np.arange(self.n_items),np.arange(self.n_items)] = 0
start_time_train = time.time()
for currentEpoch in range(epochs):
start_time_epoch = time.time()
self.epochIteration()
print("Epoch {} of {} complete in {:.2f} minutes".format(currentEpoch+1, epochs, float(time.time()-start_time_epoch)/60))
print("Train completed in {:.2f} minutes".format(float(time.time()-start_time_train)/60))
# The similarity matrix is learnt row-wise
# To be used in the product URM*S must be transposed to be column-wise
self.W = self.S.T
del self.S
def epochIteration(self):
# Get number of available interactions
numPositiveIteractions = self.URM_train.nnz
start_time = time.time()
# Uniform user sampling without replacement
for numSample in range(numPositiveIteractions):
user_id, pos_item_id, neg_item_id = self.sampleTriple()
self.updateFactors(user_id, pos_item_id, neg_item_id)
if(numSample % 5000 == 0):
print("Processed {} ( {:.2f}% ) in {:.4f} seconds".format(numSample,
100.0* float(numSample)/numPositiveIteractions,
time.time()-start_time))
sys.stderr.flush()
start_time = time.time()
def sampleUser(self):
"""
Sample a user that has viewed at least one and not all items
:return: user_id
"""
while(True):
user_id = np.random.randint(0, self.n_users)
numSeenItems = self.URM_train[user_id].nnz
if(numSeenItems >0 and numSeenItems<self.n_items):
return user_id
def sampleItemPair(self, user_id):
"""
Returns for the given user a random seen item and a random not seen item
:param user_id:
:return: pos_item_id, neg_item_id
"""
userSeenItems = self.URM_train[user_id].indices
pos_item_id = userSeenItems[np.random.randint(0,len(userSeenItems))]
while(True):
neg_item_id = np.random.randint(0, self.n_items)
if(neg_item_id not in userSeenItems):
return pos_item_id, neg_item_id
def sampleTriple(self):
"""
Randomly samples a user and then samples randomly a seen and not seen item
:return: user_id, pos_item_id, neg_item_id
"""
user_id = self.sampleUser()
pos_item_id, neg_item_id = self.sampleItemPair(user_id)
return user_id, pos_item_id, neg_item_id
|
the-stack_0_21151 | import unittest
from deepcomparer import deep_compare
class Cat:
def __init__(self, a):
self.a = a
class Dog:
def __init__(self, a):
self.a = a
class TestCompareDict(unittest.TestCase):
def test_equal_classes(self):
"""
Test equal classes
"""
data1 = Cat(1)
data2 = Cat(1)
result = deep_compare(data1, data2)
self.assertTrue(result)
def test_same_classes_different_values(self):
"""
Test equal classes with different values
"""
data1 = Cat(1)
data2 = Cat(2)
result = deep_compare(data1, data2)
self.assertFalse(result)
def test_different_classes_same_values(self):
"""
Test different classes with same values
"""
data1 = Cat(1)
data2 = Dog(1)
result = deep_compare(data1, data2)
self.assertFalse(result)
def test_equals_embeded_classes(self):
data1 = Cat(Dog(2))
data2 = Cat(Dog(2))
result = deep_compare(data1, data2)
self.assertTrue(result)
def test_different_embeded_classes(self):
data1 = Cat(Dog(2))
data2 = Cat(Dog(1))
result = deep_compare(data1, data2)
self.assertFalse(result)
def test_different_typed_embeded_classes(self):
data1 = Dog(Cat(1))
data2 = Cat(Dog(1))
result = deep_compare(data1, data2)
self.assertFalse(result)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_21153 | import logging
import math
import os
import numpy as np
from numpy import index_exp
from westpa.core.data_manager import seg_id_dtype, weight_dtype
from westpa.core.binning import index_dtype, assign_and_label, accumulate_labeled_populations
from westpa.tools import (WESTParallelTool, WESTDataReader, WESTDSSynthesizer, BinMappingComponent,
ProgressIndicatorComponent)
import westpa
from westpa.core import h5io
from westpa.core.h5io import WESTPAH5File
from westpa.core.extloader import get_object
log = logging.getLogger('w_assign')
# Changes to keep it alive...
def parse_pcoord_value(pc_str):
namespace = {'math': math,
'numpy': np,
'np': np,
'inf': float('inf')}
arr = np.array(eval(pc_str,namespace))
if arr.ndim == 0:
arr.shape = (1,1)
elif arr.ndim == 1:
arr.shape = (1,) + arr.shape
else:
raise ValueError('too many dimensions')
return arr
def _assign_label_pop(n_iter, lb, ub, mapper, nstates, state_map, last_labels, parent_id_dsspec, weight_dsspec, pcoord_dsspec, subsample):
nbins = len(state_map)-1
parent_ids = parent_id_dsspec.get_iter_data(n_iter,index_exp[lb:ub])
weights = weight_dsspec.get_iter_data(n_iter,index_exp[lb:ub])
pcoords = pcoord_dsspec.get_iter_data(n_iter,index_exp[lb:ub])
assignments, trajlabels, statelabels = assign_and_label(lb, ub, parent_ids,
mapper.assign, nstates, state_map, last_labels, pcoords, subsample)
pops = np.zeros((nstates+1,nbins+1), weight_dtype)
accumulate_labeled_populations(weights, assignments, trajlabels, pops)
return (assignments, trajlabels, pops, lb, ub, statelabels)
class WAssign(WESTParallelTool):
prog='w_assign'
description = '''\
Assign walkers to bins, producing a file (by default named "assign.h5")
which can be used in subsequent analysis.
For consistency in subsequent analysis operations, the entire dataset
must be assigned, even if only a subset of the data will be used. This
ensures that analyses that rely on tracing trajectories always know the
originating bin of each trajectory.
-----------------------------------------------------------------------------
Source data
-----------------------------------------------------------------------------
Source data is provided either by a user-specified function
(--construct-dataset) or a list of "data set specifications" (--dsspecs).
If neither is provided, the progress coordinate dataset ''pcoord'' is used.
To use a custom function to extract or calculate data whose probability
distribution will be calculated, specify the function in standard Python
MODULE.FUNCTION syntax as the argument to --construct-dataset. This function
will be called as function(n_iter,iter_group), where n_iter is the iteration
whose data are being considered and iter_group is the corresponding group
in the main WEST HDF5 file (west.h5). The function must return data which can
be indexed as [segment][timepoint][dimension].
To use a list of data set specifications, specify --dsspecs and then list the
desired datasets one-by-one (space-separated in most shells). These data set
specifications are formatted as NAME[,file=FILENAME,slice=SLICE], which will
use the dataset called NAME in the HDF5 file FILENAME (defaulting to the main
WEST HDF5 file west.h5), and slice it with the Python slice expression SLICE
(as in [0:2] to select the first two elements of the first axis of the
dataset). The ``slice`` option is most useful for selecting one column (or
more) from a multi-column dataset, such as arises when using a progress
coordinate of multiple dimensions.
-----------------------------------------------------------------------------
Specifying macrostates
-----------------------------------------------------------------------------
Optionally, kinetic macrostates may be defined in terms of sets of bins.
Each trajectory will be labeled with the kinetic macrostate it was most
recently in at each timepoint, for use in subsequent kinetic analysis.
This is required for all kinetics analysis (w_kintrace and w_kinmat).
There are three ways to specify macrostates:
1. States corresponding to single bins may be identified on the command
line using the --states option, which takes multiple arguments, one for
each state (separated by spaces in most shells). Each state is specified
as a coordinate tuple, with an optional label prepended, as in
``bound:1.0`` or ``unbound:(2.5,2.5)``. Unlabeled states are named
``stateN``, where N is the (zero-based) position in the list of states
supplied to --states.
2. States corresponding to multiple bins may use a YAML input file specified
with --states-from-file. This file defines a list of states, each with a
name and a list of coordinate tuples; bins containing these coordinates
will be mapped to the containing state. For instance, the following
file::
---
states:
- label: unbound
coords:
- [9.0, 1.0]
- [9.0, 2.0]
- label: bound
coords:
- [0.1, 0.0]
produces two macrostates: the first state is called "unbound" and
consists of bins containing the (2-dimensional) progress coordinate
values (9.0, 1.0) and (9.0, 2.0); the second state is called "bound"
and consists of the single bin containing the point (0.1, 0.0).
3. Arbitrary state definitions may be supplied by a user-defined function,
specified as --states-from-function=MODULE.FUNCTION. This function is
called with the bin mapper as an argument (``function(mapper)``) and must
return a list of dictionaries, one per state. Each dictionary must contain
a vector of coordinate tuples with key "coords"; the bins into which each
of these tuples falls define the state. An optional name for the state
(with key "label") may also be provided.
-----------------------------------------------------------------------------
Output format
-----------------------------------------------------------------------------
The output file (-o/--output, by default "assign.h5") contains the following
attributes datasets:
``nbins`` attribute
*(Integer)* Number of valid bins. Bin assignments range from 0 to
*nbins*-1, inclusive.
``nstates`` attribute
*(Integer)* Number of valid macrostates (may be zero if no such states are
specified). Trajectory ensemble assignments range from 0 to *nstates*-1,
inclusive, when states are defined.
``/assignments`` [iteration][segment][timepoint]
*(Integer)* Per-segment and -timepoint assignments (bin indices).
``/npts`` [iteration]
*(Integer)* Number of timepoints in each iteration.
``/nsegs`` [iteration]
*(Integer)* Number of segments in each iteration.
``/labeled_populations`` [iterations][state][bin]
*(Floating-point)* Per-iteration and -timepoint bin populations, labeled
by most recently visited macrostate. The last state entry (*nstates-1*)
corresponds to trajectories initiated outside of a defined macrostate.
``/bin_labels`` [bin]
*(String)* Text labels of bins.
When macrostate assignments are given, the following additional datasets are
present:
``/trajlabels`` [iteration][segment][timepoint]
*(Integer)* Per-segment and -timepoint trajectory labels, indicating the
macrostate which each trajectory last visited.
``/state_labels`` [state]
*(String)* Labels of states.
``/state_map`` [bin]
*(Integer)* Mapping of bin index to the macrostate containing that bin.
An entry will contain *nbins+1* if that bin does not fall into a
macrostate.
Datasets indexed by state and bin contain one more entry than the number of
valid states or bins. For *N* bins, axes indexed by bin are of size *N+1*, and
entry *N* (0-based indexing) corresponds to a walker outside of the defined bin
space (which will cause most mappers to raise an error). More importantly, for
*M* states (including the case *M=0* where no states are specified), axes
indexed by state are of size *M+1* and entry *M* refers to trajectories
initiated in a region not corresponding to a defined macrostate.
Thus, ``labeled_populations[:,:,:].sum(axis=1)[:,:-1]`` gives overall per-bin
populations, for all defined bins and
``labeled_populations[:,:,:].sum(axis=2)[:,:-1]`` gives overall
per-trajectory-ensemble populations for all defined states.
-----------------------------------------------------------------------------
Parallelization
-----------------------------------------------------------------------------
This tool supports parallelized binning, including reading/calculating input
data.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def __init__(self):
super(WAssign,self).__init__()
# Parallel processing by default (this is not actually necessary, but it is
# informative!)
self.wm_env.default_work_manager = self.wm_env.default_parallel_work_manager
self.data_reader = WESTDataReader()
self.dssynth = WESTDSSynthesizer(default_dsname='pcoord')
self.binning = BinMappingComponent()
self.progress = ProgressIndicatorComponent()
self.output_file = None
self.output_filename = None
self.states = []
self.subsample = False
def add_args(self, parser):
self.data_reader.add_args(parser)
self.binning.add_args(parser)
self.dssynth.add_args(parser)
sgroup = parser.add_argument_group('macrostate definitions').add_mutually_exclusive_group()
sgroup.add_argument('--states', nargs='+', metavar='STATEDEF',
help='''Single-bin kinetic macrostate, specified by a coordinate tuple (e.g. '1.0' or '[1.0,1.0]'),
optionally labeled (e.g. 'bound:[1.0,1.0]'). States corresponding to multiple bins
must be specified with --states-from-file.''')
sgroup.add_argument('--states-from-file', metavar='STATEFILE',
help='''Load kinetic macrostates from the YAML file STATEFILE. See description
above for the appropriate structure.''')
sgroup.add_argument('--states-from-function', metavar='STATEFUNC',
help='''Load kinetic macrostates from the function STATEFUNC, specified as
module_name.func_name. This function is called with the bin mapper as an argument,
and must return a list of dictionaries {'label': state_label, 'coords': 2d_array_like}
one for each macrostate; the 'coords' entry must contain enough rows to identify all bins
in the macrostate.''')
agroup = parser.add_argument_group('other options')
agroup.add_argument('-o', '--output', dest='output', default='assign.h5',
help='''Store results in OUTPUT (default: %(default)s).''')
agroup.add_argument('--subsample', dest='subsample', action='store_const', const=True,
help='''Determines whether or not the data should be subsampled.
This is rather useful for analysing steady state simulations.''')
agroup.add_argument('--config-from-file', dest='config_from_file', action='store_true',
help='''Load bins/macrostates from a scheme specified in west.cfg.''')
agroup.add_argument('--scheme-name', dest='scheme',
help='''Name of scheme specified in west.cfg.''')
def process_args(self, args):
self.progress.process_args(args)
self.data_reader.process_args(args)
# Necessary to open the file to get the current iteration
# if we want to use the mapper in the file
self.data_reader.open(mode='r+')
self.n_iter = self.data_reader.current_iteration
# If we decide to use this option for iteration selection:
# getattr(args,'bins_from_h5file',None) or self.data_reader.current_iteration
with self.data_reader:
self.dssynth.h5filename = self.data_reader.we_h5filename
self.dssynth.process_args(args)
if args.config_from_file == False:
self.binning.set_we_h5file_info(self.n_iter,self.data_reader)
self.binning.process_args(args)
self.output_filename = args.output
if args.config_from_file:
if not args.scheme:
raise ValueError('A scheme must be specified.')
else:
self.load_config_from_west(args.scheme)
elif args.states:
self.parse_cmdline_states(args.states)
elif args.states_from_file:
self.load_state_file(args.states_from_file)
elif args.states_from_function:
self.load_states_from_function(get_object(args.states_from_function,path=['.']))
if self.states and len(self.states) < 2:
raise ValueError('zero, two, or more macrostates are required')
#self.output_file = WESTPAH5File(args.output, 'w', creating_program=True)
log.debug('state list: {!r}'.format(self.states))
self.subsample = args.subsample if args.subsample is not None else False
def parse_cmdline_states(self, state_strings):
states = []
for istring, state_string in enumerate(state_strings):
try:
(label, coord_str) = state_string.split(':')
except ValueError:
label = 'state{}'.format(istring)
coord_str = state_string
coord = parse_pcoord_value(coord_str)
states.append({'label': label, 'coords': coord})
self.states = states
def load_config_from_west(self, scheme):
try:
config = westpa.rc.config['west']['analysis']
except:
raise ValueError('There is no configuration file specified.')
ystates = config['analysis_schemes'][scheme]['states']
self.states_from_dict(ystates)
try:
self.subsample = config['subsample']
except:
pass
from westpa.core._rc import bins_from_yaml_dict
self.binning.mapper = bins_from_yaml_dict(config['analysis_schemes'][scheme]['bins'][0])
path = os.path.join(os.getcwd(), config['directory'], scheme)
try:
os.mkdir(config['directory'])
os.mkdir(path)
except:
pass
self.output_filename = os.path.join(path, 'assign.h5')
def load_state_file(self, state_filename):
import yaml
ydict = yaml.load(open(state_filename, 'rt'))
ystates = ydict['states']
self.states_from_dict(ystates)
def states_from_dict(self, ystates):
states = []
for istate, ystate in enumerate(ystates):
state = {}
state['label'] = ystate.get('label', 'state{}'.format(istate))
# coords can be:
# - a scalar, in which case it is one bin, 1-D
# - a single list, which is rejected as ambiguous
# - a list of lists, which is a list of coordinate tuples
coords = np.array(ystate['coords'])
if coords.ndim == 0:
coords.shape = (1,1)
elif coords.ndim == 1:
raise ValueError('list {!r} is ambiguous (list of 1-d coordinates, or single multi-d coordinate?)'
.format(ystate['coords']))
elif coords.ndim > 2:
raise ValueError('coordinates must be 2-D')
state['coords'] = coords
states.append(state)
self.states = states
def load_states_from_function(self, statefunc):
states = statefunc(self.binning.mapper)
for istate, state in enumerate(states):
state.setdefault('label','state{}'.format(istate))
try:
state['coords'] = np.array(state['coords'])
except KeyError:
raise ValueError('state function {!r} returned a state {!r} without coordinates'.format(statefunc,state))
self.states = states
log.debug('loaded states: {!r}'.format(self.states))
def assign_iteration(self, n_iter, nstates, nbins, state_map, last_labels):
''' Method to encapsulate the segment slicing (into n_worker slices) and parallel job submission
Submits job(s), waits on completion, splices them back together
Returns: assignments, trajlabels, pops for this iteration'''
futures = []
iter_group = self.data_reader.get_iter_group(n_iter)
nsegs, npts = iter_group['pcoord'].shape[:2]
n_workers = self.work_manager.n_workers or 1
assignments = np.empty((nsegs, npts), dtype=index_dtype)
trajlabels = np.empty((nsegs, npts), dtype=index_dtype)
statelabels = np.empty((nsegs, npts), dtype=index_dtype)
pops = np.zeros((nstates+1,nbins+1), dtype=weight_dtype)
#Submit jobs to work manager
blocksize = nsegs // n_workers
if nsegs % n_workers > 0:
blocksize += 1
def task_gen():
if __debug__:
checkset = set()
for lb in range(0, nsegs, blocksize):
ub = min(nsegs, lb+blocksize)
if __debug__:
checkset.update(set(range(lb,ub)))
args = ()
kwargs = dict(n_iter=n_iter,
lb=lb, ub=ub, mapper=self.binning.mapper, nstates=nstates, state_map=state_map,
last_labels=last_labels,
parent_id_dsspec=self.data_reader.parent_id_dsspec,
weight_dsspec=self.data_reader.weight_dsspec,
pcoord_dsspec=self.dssynth.dsspec,
subsample=self.subsample)
yield (_assign_label_pop, args, kwargs)
#futures.append(self.work_manager.submit(_assign_label_pop,
#kwargs=)
if __debug__:
assert checkset == set(range(nsegs)), 'segments missing: {}'.format(set(range(nsegs)) - checkset)
#for future in self.work_manager.as_completed(futures):
for future in self.work_manager.submit_as_completed(task_gen(), queue_size=self.max_queue_len):
assign_slice, traj_slice, slice_pops, lb, ub, state_slice = future.get_result(discard=True)
assignments[lb:ub, :] = assign_slice
trajlabels[lb:ub, :] = traj_slice
statelabels[lb:ub, :] = state_slice
pops += slice_pops
del assign_slice, traj_slice, slice_pops, state_slice
del futures
return (assignments, trajlabels, pops, statelabels)
def go(self):
assert self.data_reader.parent_id_dsspec._h5file is None
assert self.data_reader.weight_dsspec._h5file is None
if hasattr(self.dssynth.dsspec, '_h5file'):
assert self.dssynth.dsspec._h5file is None
pi = self.progress.indicator
pi.operation = 'Initializing'
with pi, self.data_reader, WESTPAH5File(self.output_filename, 'w', creating_program=True) as self.output_file:
assign = self.binning.mapper.assign
# We always assign the entire simulation, so that no trajectory appears to start
# in a transition region that doesn't get initialized in one.
iter_start = 1
iter_stop = self.data_reader.current_iteration
h5io.stamp_iter_range(self.output_file, iter_start, iter_stop)
nbins = self.binning.mapper.nbins
self.output_file.attrs['nbins'] = nbins
state_map = np.empty((self.binning.mapper.nbins+1,), index_dtype)
state_map[:] = 0 # state_id == nstates => unknown state
# Recursive mappers produce a generator rather than a list of labels
# so consume the entire generator into a list
labels = [np.string_(label) for label in self.binning.mapper.labels]
self.output_file.create_dataset('bin_labels', data=labels, compression=9)
if self.states:
nstates = len(self.states)
state_map[:] = nstates # state_id == nstates => unknown state
state_labels = [np.string_(state['label']) for state in self.states]
for istate, sdict in enumerate(self.states):
assert state_labels[istate] == np.string_(sdict['label']) #sanity check
state_assignments = assign(sdict['coords'])
for assignment in state_assignments:
state_map[assignment] = istate
self.output_file.create_dataset('state_map', data=state_map, compression=9, shuffle=True)
self.output_file['state_labels'] = state_labels #+ ['(unknown)']
else:
nstates = 0
self.output_file.attrs['nstates'] = nstates
# Stamp if this has been subsampled.
self.output_file.attrs['subsampled'] = self.subsample
iter_count = iter_stop - iter_start
nsegs = np.empty((iter_count,), seg_id_dtype)
npts = np.empty((iter_count,), seg_id_dtype)
# scan for largest number of segments and largest number of points
pi.new_operation ('Scanning for segment and point counts', iter_stop-iter_start)
for iiter, n_iter in enumerate(range(iter_start,iter_stop)):
iter_group = self.data_reader.get_iter_group(n_iter)
nsegs[iiter], npts[iiter] = iter_group['pcoord'].shape[0:2]
pi.progress += 1
del iter_group
pi.new_operation('Preparing output')
# create datasets
self.output_file.create_dataset('nsegs', data=nsegs, shuffle=True, compression=9)
self.output_file.create_dataset('npts', data=npts, shuffle=True, compression=9)
max_nsegs = nsegs.max()
max_npts = npts.max()
assignments_shape = (iter_count,max_nsegs,max_npts)
assignments_dtype = np.min_scalar_type(nbins)
assignments_ds = self.output_file.create_dataset('assignments', dtype=assignments_dtype, shape=assignments_shape,
compression=4, shuffle=True,
chunks=h5io.calc_chunksize(assignments_shape, assignments_dtype),
fillvalue=nbins)
if self.states:
trajlabel_dtype = np.min_scalar_type(nstates)
trajlabels_ds = self.output_file.create_dataset('trajlabels', dtype=trajlabel_dtype, shape=assignments_shape,
compression=4, shuffle=True,
chunks=h5io.calc_chunksize(assignments_shape, trajlabel_dtype),
fillvalue=nstates)
statelabels_ds = self.output_file.create_dataset('statelabels', dtype=trajlabel_dtype, shape=assignments_shape,
compression=4, shuffle=True,
chunks=h5io.calc_chunksize(assignments_shape, trajlabel_dtype),
fillvalue=nstates)
pops_shape = (iter_count,nstates+1,nbins+1)
pops_ds = self.output_file.create_dataset('labeled_populations', dtype=weight_dtype, shape=pops_shape,
compression=4, shuffle=True,
chunks=h5io.calc_chunksize(pops_shape, weight_dtype))
h5io.label_axes(pops_ds, [np.string_(i) for i in ['iteration', 'state', 'bin']])
pi.new_operation('Assigning to bins', iter_stop-iter_start)
last_labels = None # mapping of seg_id to last macrostate inhabited
for iiter, n_iter in enumerate(range(iter_start,iter_stop)):
#get iteration info in this block
if iiter == 0:
last_labels = np.empty((nsegs[iiter],), index_dtype)
last_labels[:] = nstates #unknown state
#Slices this iteration into n_workers groups of segments, submits them to wm, splices results back together
assignments, trajlabels, pops, statelabels = self.assign_iteration(n_iter, nstates, nbins, state_map, last_labels)
##Do stuff with this iteration's results
last_labels = trajlabels[:,-1].copy()
assignments_ds[iiter, 0:nsegs[iiter], 0:npts[iiter]] = assignments
pops_ds[iiter] = pops
if self.states:
trajlabels_ds[iiter, 0:nsegs[iiter], 0:npts[iiter]] = trajlabels
statelabels_ds[iiter, 0:nsegs[iiter], 0:npts[iiter]] = statelabels
pi.progress += 1
del assignments, trajlabels, pops, statelabels
for dsname in 'assignments', 'npts', 'nsegs', 'labeled_populations', 'statelabels':
h5io.stamp_iter_range(self.output_file[dsname], iter_start, iter_stop)
def entry_point():
WAssign().main()
if __name__ == '__main__':
entry_point()
|
the-stack_0_21156 | import argparse
import ibm_boto3
import requests
from ibm_botocore.client import Config
from pyspark.sql import SparkSession
def get_secret(path):
with open(path, 'r') as f:
cred = f.readline().strip('\'')
f.close()
return cred
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--bucket_name', type=str, help='Object storage bucket name', default="dummy-bucket-name")
parser.add_argument('--data_url', type=str, help='URL of the data source', default="https://raw.githubusercontent.com/emartensibm/german-credit/binary/credit_risk_training.csv")
args = parser.parse_args()
cos_bucket_name = args.bucket_name
data_url = args.data_url
cos_url = get_secret("/app/secrets/cos_url")
cos_apikey = get_secret("/app/secrets/cos_resource_id")
cos_resource_instance_id = get_secret("/app/secrets/cos_resource_id")
''' Download data from data source '''
filename = data_url
response = requests.get(data_url, allow_redirects=True)
if data_url.find('/'):
filename = data_url.rsplit('/', 1)[1]
open(filename, 'wb').write(response.content)
''' Read data with Spark SQL '''
spark = SparkSession.builder.getOrCreate()
df_data = spark.read.csv(path=filename, sep=",", header=True, inferSchema=True)
df_data.head()
''' Upload data to IBM Cloud object storage '''
cos = ibm_boto3.resource('s3',
ibm_api_key_id=cos_apikey,
ibm_service_instance_id=cos_resource_instance_id,
ibm_auth_endpoint='https://iam.bluemix.net/oidc/token',
config=Config(signature_version='oauth'),
endpoint_url=cos_url)
buckets = []
for bucket in cos.buckets.all():
buckets.append(bucket.name)
if cos_bucket_name not in buckets:
cos.create_bucket(Bucket=cos_bucket_name)
cos.Bucket(cos_bucket_name).upload_file(filename, filename)
print('Data ' + filename + ' is uploaded to bucket at ' + cos_bucket_name)
with open("/tmp/filename.txt", "w") as report:
report.write(filename)
df_data.printSchema()
print("Number of records: " + str(df_data.count()))
|
the-stack_0_21157 | """TorchScript
This module contains functionality to support the JIT's scripting frontend, notably:
- torch.jit.script
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
import functools
import collections
import enum
import inspect
import copy
import pickle
import warnings
from typing import Any, Dict, List, Tuple, Union, Callable
import torch
import torch._jit_internal as _jit_internal
from torch.utils import set_module
from torch.jit._recursive import ScriptMethodStub, wrap_cpp_module, infer_methods_to_compile
from torch.nn import Module
from torch.jit._state import _enabled
from torch.jit._builtins import _register_builtin
from torch._six import with_metaclass
from torch.jit.frontend import get_jit_def, get_default_args, get_jit_class_def
from torch._jit_internal import _qualified_name
from torch.jit._fuser import _graph_for
from torch.jit._state import (
_try_get_jit_cached_function,
_try_get_jit_cached_overloads,
_set_jit_function_cache,
_set_jit_overload_cache,
)
from torch.overrides import (
has_torch_function, has_torch_function_unary, has_torch_function_variadic)
from torch.package import PackageExporter, PackageImporter
from ._serialization import validate_map_location
from torch.jit._monkeytype_config import (
monkeytype_trace,
JitTypeTraceConfig ,
JitTypeTraceStore
)
from torch._classes import classes
type_trace_db = JitTypeTraceStore() # DB to hold all call traces from MonkeyType
torch._C.ScriptMethod.graph_for = _graph_for # type: ignore[attr-defined]
torch._C.ScriptFunction.graph_for = _graph_for # type: ignore[attr-defined]
ScriptFunction = torch._C.ScriptFunction
ScriptFunction.__doc__ = """
Functionally equivalent to a :class:`ScriptModule`, but represents a single
function and does not have any attributes or Parameters.
"""
set_module(ScriptFunction, "torch.jit")
if _enabled:
Attribute = collections.namedtuple("Attribute", ["value", "type"])
else:
def Attribute(value, type): # type: ignore[no-redef]
return value
Attribute.__doc__ = """
This method is a pass-through function that returns `value`, mostly
used to indicate to the TorchScript compiler that the left-hand side
expression is a class instance attribute with type of `type`. Note that
`torch.jit.Attribute` should only be used in `__init__` method of `nn.Module`
subclasses.
Though TorchScript can infer correct type for most Python expressions, there are some cases where
type inference can be wrong, including:
- Empty containers like `[]` and `{}`, which TorchScript assumes to be container of `Tensor`
- Optional types like `Optional[T]` but assigned a valid value of type `T`, TorchScript would assume
it is type `T` rather than `Optional[T]`
In eager mode, it is simply a pass-through function that returns `value`
without other implications.
Example:
.. testcode::
import torch
from typing import Dict
class AttributeModule(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.foo = torch.jit.Attribute(0.1, float)
# we should be able to use self.foo as a float here
assert 0.0 < self.foo
self.names_ages = torch.jit.Attribute({}, Dict[str, int])
self.names_ages["someone"] = 20
assert isinstance(self.names_ages["someone"], int)
m = AttributeModule()
# m will contain two attributes
# 1. foo of type float
# 2. names_ages of type Dict[str, int]
.. testcleanup::
del AttributeModule
del m
Args:
value: An initial value to be assigned to attribute.
type: A Python type
Returns:
Returns `value`
"""
def _get_type_trace_db():
# This is a private API. Use of this for external purposes is discouraged.
return type_trace_db
# Gets a function from the name of a method on a type
def _get_function_from_type(cls, name):
return getattr(cls, name, None)
# ScriptClasses must be new-style classes because we construct them using their
# __new__ method.
def _is_new_style_class(cls):
if hasattr(cls, "__class__"):
return "__dict__" in dir(cls) or hasattr(cls, "__slots__")
def _compile_and_register_class(obj, rcb, qualified_name):
ast = get_jit_class_def(obj, obj.__name__)
defaults = torch.jit.frontend.get_default_args_for_class(obj)
script_class = torch._C._jit_script_class_compile(qualified_name, ast, defaults, rcb)
torch.jit._state._add_script_class(obj, script_class)
return script_class
# These OrderedDictWrapper classes replace the actual OrderedDicts in
# module with versions that get/set properties inside of Module.
# This allows us to reuse most of nn.Module while still storing the
# data in C++.
# Each OrderedDict needs to support:
# x not in view
# x in view
# view[name] = ...
# view.values()
# del view[name]
# view.items()
# view.keys()
# len(view)
class OrderedDictWrapper(object):
def __init__(self, _c):
self._c = _c
def keys(self):
return [k for k, v in self.items()]
def values(self):
return [v for k, v in self.items()]
def __len__(self):
return len(self.values())
def __delitem__(self, k):
raise RuntimeError("cannot delete methods or parameters of a script module")
def items(self):
return self._c.items()
def __setitem__(self, k, v):
if k not in self:
raise RuntimeError(
"Can't add a new parameter after ScriptModule construction."
" Tried to add '{}".format(k)
)
self._c.setattr(k, v)
def __contains__(self, k):
return self._c.contains(k)
def __getitem__(self, k):
if k not in self:
raise KeyError(k)
return self._c.getattr(k)
class OrderedModuleDict(OrderedDictWrapper):
def __init__(self, module, python_dict):
super(OrderedModuleDict, self).__init__(torch._C.ModuleDict(module))
# contains _both_ script modules and non-script python-only modules
# because script modules are subclassed in python and the
# C++ Module class will not hold references to them,
# to ensure that you always get the same python value here
# we store it in the python dict as well
self._python_modules = python_dict
def items(self):
r = self._python_modules.items()
return r
def __contains__(self, k):
return k in self._python_modules
def __setitem__(self, k, v):
# Cases where sub-module can be re-assigned after ScriptModule construction
# 1. If the attr is an module interface type, it's guaranteed that the module is
# not inlined in the graph, so it's safe to swap a new ScriptModule in.
# 2. if the new value if a ScriptModule with the same JIT type, IR won't change
# and it's legit to swap a new module in.
# In these two cases we allow swapping a new scripted module and update the
# corresponding python module dict to keep sync.
# Note: the value to be swapped in has to be ScriptModule instead of nn.Module,
# otherwise it's illegal and we throw error.
if isinstance(v, ScriptModule):
self._c.setattr(k, v)
self._python_modules[k] = v
else:
raise RuntimeError(
"Cannot re-assign modules in a ScriptModule with non-scripted "
"module, tried to replace existing module '{}': {}".format(k, v)
)
def __getitem__(self, k):
return self._python_modules[k]
# For each user-defined class that subclasses ScriptModule, this meta-class:
# (1) finds all the methods annotated with @script_method in a ScriptModule and
# removes them from the class attributes
# (2) puts a wrapper around the class's __init__ method to recursively compile
# all of the script_methods with the module after the original __init__ has
# run. This has to occur after the user-defined __init__ so that submodules and
# parameters are initialized _before_ the script compiler resolve references to
# `self.param` or `self.module`.
class ScriptMeta(type):
def __init__(cls, name, bases, attrs): # noqa: B902
# Aggregate all the ScriptMethods and constants from superclasses
cls._methods: Dict[str, Any] = {}
cls._constants_set = set(getattr(cls, "__constants__", ()))
for base in reversed(bases):
for k, v in getattr(base, "_methods", {}).items():
cls._methods[k] = v
base_constants = getattr(base, "_constants_set", set())
cls._constants_set = cls._constants_set.union(base_constants)
# find all the script methods of the current class
for k, v in sorted(attrs.items()):
if isinstance(v, ScriptMethodStub):
delattr(cls, k)
cls._methods[v.original_method.__name__] = v
if getattr(cls, "_disable_script_meta", False):
# We leave built-in ScriptModule types alone, since this metaclass
# is only for compiling user classes that inherit from
# ScriptModule.
return super(ScriptMeta, cls).__init__(name, bases, attrs)
original_init = getattr(cls, "__init__", lambda self: None)
@functools.wraps(original_init)
def init_then_script(self, *args, **kwargs):
num_methods = len(cls._methods)
original_init(self, *args, **kwargs)
added_methods_in_init = len(cls._methods) > num_methods
if type(self) == cls:
def make_stubs(module):
cls = type(module)
if hasattr(cls, "_methods"):
return [v for k, v in sorted(cls._methods.items())]
else:
return infer_methods_to_compile(module)
self.__dict__[
"_actual_script_module"
] = torch.jit._recursive.create_script_module(self, make_stubs, share_types=not added_methods_in_init)
# Delete the Python attributes that now shadow the ScriptModule
# ones, so that __getattr__ and __setattr__ will properly find
# the scripted versions.
concrete_type = self._actual_script_module._concrete_type
for name in concrete_type.get_attributes():
delattr(self, name)
for name, _ in concrete_type.get_modules():
delattr(self, name)
for name in ("_parameters", "_buffers", "_modules"):
delattr(self, name)
cls.__init__ = init_then_script # type: ignore[misc]
super(ScriptMeta, cls).__init__(name, bases, attrs)
class _CachedForward(object):
def __get__(self, obj, cls):
return self.__getattr__("forward") # type: ignore[attr-defined]
class ScriptWarning(Warning):
pass
def script_method(fn):
if not _enabled:
return fn
# NOTE: we need to traverse two frames here because the meta-class frame
# for ScriptModule will be present, as opposed to invoking @script on a
# a function or invoking define() on a CompilationUnit.
# The stack will look like:
#
# 0. createResolutionCallback()
# 1. script_method()
# 2. ScriptModule metaclass frame
# 3. Surrounding scope
#
# createResolutionCallback internally adds 1 to get us to the scope of this
# function (the calling function). Adding 2 gets us to the proper surrounding scope.
_rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=2)
ast = get_jit_def(fn, fn.__name__, self_name="ScriptModule")
return ScriptMethodStub(_rcb, ast, fn)
class ConstMap:
def __init__(self, const_mapping):
self.const_mapping = const_mapping
def __getattr__(self, attr):
return self.const_mapping[attr]
def unpackage_script_module(importer: PackageImporter, script_module_id: str) -> torch.nn.Module:
"""
Called by ``torch.package.PackageImporter``'s Pickler's ``persistent_load`` function.
Performs work of loading and returning a ScriptModule from a ``torch.package`` archive.
"""
if not isinstance(importer.zip_reader, torch._C.PyTorchFileReader):
raise RuntimeError(
"Loading ScriptObjects from a PackageImporter created from a "
"directory is not supported. Use a package archive file instead."
)
cu = torch._C.CompilationUnit()
cpp_module = torch._C._import_ir_module_from_package(
cu,
importer.zip_reader,
importer.storage_context,
validate_map_location(importer.last_map_location),
script_module_id,
)
return wrap_cpp_module(cpp_module)
if _enabled:
# this is a Python 'non-data descriptor' that causes the first access
# to ScriptModule's forward to lookup the forward method and stash
# it in the objects dict. Due to the standard rules for attribute lookup,
# subsequent lookups will just directly return the previously looked up method.
# This is necessary because nn.Module defines forward as a method. If we
# did nothing, __getattr__ would not be called. Instead we'd get nn.Module.forward
# which always throws an exception.
class ScriptModule(with_metaclass(ScriptMeta, Module)): # type: ignore[misc]
r"""
A wrapper around C++ ``torch::jit::Module``. ``ScriptModule``\s
contain methods, attributes, parameters, and
constants. These can be accessed the same way as on a normal ``nn.Module``.
"""
__jit_unused_properties__ = ['code', 'code_with_constants', 'graph', 'inlined_graph', 'original_name']
def __init__(self):
super(ScriptModule, self).__init__()
forward = _CachedForward()
def __getattr__(self, attr):
if "_actual_script_module" not in self.__dict__:
return super(ScriptModule, self).__getattr__(attr)
return getattr(self._actual_script_module, attr)
def __setattr__(self, attr, value):
if "_actual_script_module" not in self.__dict__:
# Unwrap torch.jit.Attribute into a regular setattr + record
# the provided type in __annotations__.
#
# This ensures that if we use the attr again in `__init__`, it
# will look like the actual value, not an instance of Attribute.
if isinstance(value, Attribute):
# NB: Ensure that we set __annotations__ on the specific
# class in question, and not on a superclass (which would
# be wrong wrong wrong!).
# See also https://github.com/pytorch/pytorch/issues/39463
if "__annotations__" not in self.__class__.__dict__:
self.__class__.__annotations__ = {}
self.__annotations__[attr] = value.type
value = value.value
return super(ScriptModule, self).__setattr__(attr, value)
setattr(self._actual_script_module, attr, value)
def define(self, src):
if "_actual_script_module" in self.__dict__:
# If we have completed initialization, just defer to the
# backing RecursiveScriptModule to eagerly compile the provided
# source.
return self._actual_script_module.define(src)
# Otherwise, we are still in the object's __init__.
# In that case, add `src` as a stub to be compiled.
#
# We use frames_up=1 to get to the proper surrounding scope. The stack
# will look like:
# 0. createResolutionCallback
# 1. define()
# 2. surrounding scope.
#
# createResolutionCallback internally adds 1 to get us to our frame, then
# we add 1 to get to the proper surrounding scope.
rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
ast = torch._C._parse_source_def(src)
self._methods[ast.name().name] = ScriptMethodStub(rcb, ast, None)
def _replicate_for_data_parallel(self):
return self._actual_script_module._replicate_for_data_parallel()
def __reduce_package__(self, exporter: PackageExporter):
"""
Called by ``torch.package.PackageExporter``'s Pickler's ``persistent_id`` when
saving TorchScript objects. Performs act of saving a ScriptModule inside of
a ``torch.package`` archive.
Returns method to load the ScriptModule from a ``torch.package.PackageImporter``'s
Pickler's ``persistent_load`` function.
"""
script_module_id = exporter.get_unique_id()
exporter.script_module_serializer.serialize(self._c, int(script_module_id))
return (unpackage_script_module, (script_module_id,))
class RecursiveScriptModule(ScriptModule):
# XXX: RecursiveScriptModule inherits from ScriptModule for the sole
# reason that it retains the existing isinstance(ScriptModule)
# behavior.
r"""
The core data structure in TorchScript is the ``ScriptModule``. It is an
analogue of torch's ``nn.Module`` and represents an entire model as a tree of
submodules. Like normal modules, each individual module in a ``ScriptModule`` can
have submodules, parameters, and methods. In ``nn.Module``\s methods are implemented
as Python functions, but in ``ScriptModule``\s methods are implemented as
TorchScript functions, a statically-typed subset of Python that contains all
of PyTorch's built-in Tensor operations. This difference allows your
``ScriptModule``\s code to run without the need for a Python interpreter.
``ScriptModule``\s should not be created manually, instead use
either :func:`tracing <torch.jit.trace>` or :func:`scripting <torch.jit.script>`.
Tracing and scripting can be applied incrementally and :ref:`composed as necessary <Types>`.
* Tracing records the tensor operations as executed with a set of example inputs and uses these
operations to construct a computation graph. You can use the full dynamic behavior of Python with tracing,
but values other than Tensors and control flow aren't captured in the graph.
* Scripting inspects the Python code of the model
and compiles it to TorchScript. Scripting allows the use of many `types`_ of values and supports dynamic control flow.
Many, but not all features of Python are supported by the compiler, so changes to the source code may be necessary.
"""
_disable_script_meta = True
def __init__(self, cpp_module):
self.__dict__["_initializing"] = True
self._c = cpp_module
super(RecursiveScriptModule, self).__init__()
# Delete the 'training' attribute set up by `Module.__init__`. It
# will get set on the underlying cpp module, so we delete it here
# to avoid this version shadowing the cpp module version.
delattr(self, "training")
@staticmethod
def _construct(cpp_module, init_fn):
"""
Construct a RecursiveScriptModule that's ready for use. PyTorch
code should use this to construct a RecursiveScriptModule instead
of instead of calling `__init__` directly, as it makes sure the
object is properly finalized (and in the future, we may take
control of how the RecursiveScriptModule instance is created).
Args:
cpp_module: The C++ Module that will hold the actual state of
this RecursiveScriptModule instance.
init_fn: Lambda that initializes the RecursiveScriptModule passed to it.
"""
script_module = RecursiveScriptModule(cpp_module)
init_fn(script_module)
# Finalize the ScriptModule: replace the nn.Module state with our
# custom implementations and flip the _initializing bit.
RecursiveScriptModule._finalize_scriptmodule(script_module)
return script_module
@staticmethod
def _finalize_scriptmodule(script_module):
script_module._parameters = OrderedDictWrapper(
torch._C.ParameterDict(script_module._c)
)
script_module._buffers = OrderedDictWrapper(
torch._C.BufferDict(script_module._c)
)
script_module._modules = OrderedModuleDict(
script_module._c, script_module._modules
)
script_module._initializing = False
def _reconstruct(self, cpp_module):
"""
Re-construct an instance of RecursiveScriptModule using an instance of a C++ module.
Args:
cpp_module: The C++ module that this RecursiveScriptModule will be rebuilt around.
"""
self.__init__(cpp_module) # type: ignore[misc]
# Copy the concrete type from the C++ module to this ScriptModule.
self._concrete_type = torch._C.ConcreteModuleType.from_jit_type(
self._c._type()
)
# Copy submodules from the C++ module to this ScriptModule.
modules = {}
for name, cpp_module in torch._C.ModuleDict(self._c).items():
modules[name] = wrap_cpp_module(cpp_module)
self._modules = OrderedModuleDict(self._c, modules)
# Copy parameters and buffers.
self._parameters = OrderedDictWrapper(torch._C.ParameterDict(self._c))
self._buffers = OrderedDictWrapper(torch._C.BufferDict(self._c))
# Get rid of the functions from the old C++ module.
self.__dict__ = {
k: v
for k, v in self.__dict__.items()
if not isinstance(v, torch._C.ScriptMethod)
}
self.__dict__["_initializing"] = False
@property
def graph(self):
r"""
Returns a string representation of the internal graph for the
``forward`` method. See :ref:`interpreting-graphs` for details.
"""
return self._c._get_method("forward").graph
@property
def inlined_graph(self):
r"""
Returns a string representation of the internal graph for the
``forward`` method. This graph will be preprocessed to inline all function and method calls.
See :ref:`interpreting-graphs` for details.
"""
return self.forward.inlined_graph
@property
def code(self):
r"""
Returns a pretty-printed representation (as valid Python syntax) of
the internal graph for the ``forward`` method. See
:ref:`inspecting-code` for details.
"""
return self.forward.code
@property
def code_with_constants(self):
r"""
Returns a tuple of:
[0] a pretty-printed representation (as valid Python syntax) of
the internal graph for the ``forward`` method. See `code`.
[1] a ConstMap following the CONSTANT.cN format of the output in [0].
The indices in the [0] output are keys to the underlying constant's values.
See :ref:`inspecting-code` for details.
"""
r = self.forward.code_with_constants
return (r[0], ConstMap(r[1]))
def save(self, f, **kwargs):
r"""
save(f, _extra_files={})
See :func:`torch.jit.save <torch.jit.save>` for details.
"""
return self._c.save(str(f), **kwargs)
def _save_for_lite_interpreter(self, *args, **kwargs):
r"""
_save_for_lite_interpreter(f)
Add (or update) the bytecode session to the script model. The updated model is used
in lite interpreter for mobile applications.
Args:
f: a string containing a file name.
_extra_files: Map from filename to contents which will be stored as part of 'f'.
"""
return self._c._save_for_mobile(*args, **kwargs)
def _save_to_buffer_for_lite_interpreter(self, *args, **kwargs):
return self._c._save_to_buffer_for_mobile(*args, **kwargs)
def save_to_buffer(self, *args, **kwargs):
return self._c.save_to_buffer(*args, **kwargs)
def get_debug_state(self, *args, **kwargs):
return self._c.get_debug_state()
def extra_repr(self):
return "original_name={}".format(self.original_name)
def graph_for(self, *args, **kwargs):
return self.forward.graph_for(*args, **kwargs)
@property
def original_name(self):
if type(self) == str(self._c._type().name()):
return ""
return str(self._c._type().name())
def define(self, src):
# We use frames_up=1 to get to the proper surrounding scope. The stack
# will look like:
# 0. createResolutionCallback
# 1. define()
# 2. surrounding scope.
#
# createResolutionCallback internally adds 1 to get us to our frame, then
# we add 1 to get to the proper surrounding scope.
rcb = _jit_internal.createResolutionCallbackFromFrame(frames_up=1)
self._c._define(self._concrete_type, src, rcb)
def __getattr__(self, attr):
if "_initializing" not in self.__dict__:
raise RuntimeError(
"ScriptModule has not been initialized, did you forget to call super's init?"
)
if self._initializing:
return super(RecursiveScriptModule, self).__getattr__(attr)
# _modules check is before hasattr since modules are included as attributes in _c,
# but we want to get the python wrapper from _modules instead of the raw _c object.
if attr in self._modules:
return self._modules[attr]
elif self._c.hasattr(attr):
return self._c.getattr(attr)
elif self._c._has_method(attr):
script_method = self._c._get_method(attr)
# cache method so future calls do not go through __getattr__
# to improve invocation performance
self.__dict__[attr] = script_method
return script_method
return super(RecursiveScriptModule, self).__getattr__(attr)
def __setattr__(self, attr, value):
if self._initializing:
return super(RecursiveScriptModule, self).__setattr__(attr, value)
if attr in self._modules:
self._modules[attr] = value
elif self._c.hasattr(attr):
self._c.setattr(attr, value)
elif (
hasattr(self, "_concrete_type")
and attr in self._concrete_type.get_constants().keys()
):
# TODO: we don't have _concrete_type set after load(), and in general we lose constant information.
# We should encode constants as class type attributes (or something) so it persists across save/load.
raise AttributeError(
"Cannot mutate TorchScript constant value: '{}'. Value: '{}'".format(
attr, value
)
)
else:
# We allow setting Python attributes on the ScriptModule, for
# when people want to stash some convenience info on it.
# TODO: it's possible that the following is confusing:
# s = torch.jit.script(...)
# s.python_attr = ...
# s.save() <--- this doesn't have `python_attr`
# It's fairly trivial to save enough info to warn in this case.
return super(RecursiveScriptModule, self).__setattr__(attr, value)
def __getstate__(self):
raise pickle.PickleError(
"ScriptModules cannot be deepcopied using copy.deepcopy or saved using torch.save. "
+ "Mixed serialization of script and non-script modules is not supported. "
+ "For purely script modules use my_script_module.save(<filename>) instead."
)
def __copy__(self):
return torch.jit._recursive.wrap_cpp_module(copy.copy(self._c))
def __deepcopy__(self, memo):
return torch.jit._recursive.wrap_cpp_module(copy.deepcopy(self._c, memo))
# Python magic methods do method lookups on an object's class type, instead of looking up
# the method defines on the class instance. In order to continue to expose the magic methods
# of builtin-containers (ModuleList, Sequential, ModuleDict) to Python, we
# define magic methods here as a shim to the correct attribute.
def forward_magic_method(self, method_name, *args, **kwargs):
self_method = getattr(self, method_name)
if getattr(self_method, "__func__", None) == getattr(
RecursiveScriptModule, method_name
):
raise NotImplementedError()
return self_method(*args, **kwargs)
def __iter__(self):
return self.forward_magic_method("__iter__")
def __getitem__(self, idx):
return self.forward_magic_method("__getitem__", idx)
def __len__(self):
return self.forward_magic_method("__len__")
def __contains__(self, key):
return self.forward_magic_method("__contains__", key)
# dir is defined by the base nn.Module, so instead of throwing if
# it is not overridden, we call into the nn.Module __dir__ method
def __dir__(self):
self_method = self.__dir__
if self_method.__func__ == _get_function_from_type( # type: ignore[attr-defined]
RecursiveScriptModule, "__dir__"
):
return super(RecursiveScriptModule, self).__dir__()
return self_method()
# to resolve bool(value), Python looks if __bool__ is defined then __iter__
# is defined then returns true for classes. Since __iter__() on this
# class throws if it isn't overridden, we define __bool__ to preserve default behavior
def __bool__(self):
self_method = self.__bool__
if self_method.__func__ == _get_function_from_type( # type: ignore[attr-defined]
RecursiveScriptModule, "__bool__"
):
return True
return self_method()
def _replicate_for_data_parallel(self):
# we have to initialize ScriptModule properly so that
# it works with pybind11
def init_fn(script_module):
# Don't do anything here, we'll initialize the ScriptModule below
return
return RecursiveScriptModule._construct(
self._c._replicate_for_data_parallel(), init_fn
)
# Need to copy all RecursiveScriptModule methods to ScriptModule.
#
# This is because `super(MyScriptModule, self).foo()` does not use
# `__getattr__` to look up `foo`. So we need to make each method available on
# the ScriptModule manually.
for name, item in RecursiveScriptModule.__dict__.items():
if not callable(item) and not isinstance(item, property):
continue
if name.startswith("__") or hasattr(ScriptModule, name):
continue
# We can copy over the implementation wholesale because besides the
# `super()` thing above, ScriptModule behaves exactly like
# RecursiveScriptModule
setattr(ScriptModule, name, item)
def _get_methods(cls):
import inspect
# In Python 3 unbound methods are functions, but in Python 2 they are methods
return inspect.getmembers(
cls, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x)
)
_compiled_methods_allowlist = {
"forward",
"register_buffer",
"register_parameter",
"add_module",
"_apply",
"apply",
"cuda",
"cpu",
"to",
"type",
"float",
"double",
"half",
"state_dict",
"_save_to_state_dict",
"load_state_dict",
"_load_from_state_dict",
"_named_members",
"parameters",
"named_parameters",
"buffers",
"named_buffers",
"children",
"named_children",
"modules",
"named_modules",
"zero_grad",
"share_memory",
"_get_name",
"extra_repr",
"_slow_forward",
"_tracing_name",
"eval",
"train",
}
def _make_fail(name):
def fail(self, *args, **kwargs):
raise RuntimeError(name + " is not supported on ScriptModules")
return fail
for name, method in _get_methods(torch.nn.Module):
if name.startswith("__"):
continue
if (
name not in RecursiveScriptModule.__dict__
and name not in _compiled_methods_allowlist
):
setattr(RecursiveScriptModule, method.__name__, _make_fail(name))
else:
# TODO MAKE SURE THAT DISABLING WORKS
class ScriptModule(torch.nn.Module): # type: ignore[no-redef]
def __init__(self, arg=None):
super().__init__()
class RecursiveScriptModule(ScriptModule): # type: ignore[no-redef]
def __init__(self, arg=None):
super().__init__()
def call_prepare_scriptable_func_impl(obj, memo):
if not isinstance(obj, torch.nn.Module):
return obj
obj_id = id(obj)
# If obj_id is in memo, obj has already been prepared or is being
# prepared in another call up the stack.
if obj_id in memo:
return memo[id(obj)]
obj = obj.__prepare_scriptable__() if hasattr(obj, '__prepare_scriptable__') else obj # type: ignore[operator]
# Record obj in memo to avoid infinite recursion in the case of cycles in the module
# hierarchy when recursing below.
memo[obj_id] = obj
new_obj_dict = {}
for name, sub_module in obj.__dict__.items():
if name == '_modules':
for k, v in sub_module.items():
sub_module[k] = call_prepare_scriptable_func_impl(v, memo)
new_obj_dict[name] = sub_module
elif isinstance(sub_module, torch.nn.Module) and not isinstance(sub_module, ScriptModule):
new_obj_dict[name] = call_prepare_scriptable_func_impl(sub_module, memo)
else:
new_obj_dict[name] = sub_module
for k, v in new_obj_dict.items():
obj.__dict__[name] = v
return obj
def call_prepare_scriptable_func(obj):
memo: Dict[int, torch.nn.Module] = {}
return call_prepare_scriptable_func_impl(obj, memo)
def _script_pdt(obj, optimize=None, _frames_up=0, _rcb=None,
example_inputs: Union[List[Tuple], Dict[Callable, List[Tuple]], None] = None):
# This is a private API, intended for internal use only. Usage of this API is only for experimental
# purposes only and is highly discouraged.
global type_trace_db
if not _enabled:
return obj
if optimize is not None:
warnings.warn(
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
)
# No-op for modules and functions that are already scripted
if isinstance(obj, ScriptModule):
return obj
if isinstance(obj, ScriptFunction):
return obj
if example_inputs:
# If MonkeyType is installed, enable profile directed type annotation
# Check if example_inputs are defined and generate call traces
# for the method by running eager mode version of the method with
# the provide example inputs. This logs all the traces in type_trace_db
type_trace_db = JitTypeTraceStore()
if monkeytype_trace:
monkeytype_config = JitTypeTraceConfig(type_trace_db)
with monkeytype_trace(monkeytype_config):
if isinstance(example_inputs, Dict):
# If the obj is an nn.Module or a class, then each method is
# executed with the arguments provided in the example inputs.
# example inputs here will be of type Dict(class.method, (arguments))
# This is used to infer type annotations for those methods
# which are not called directly under the hood of monkeytype.
for module, example_input in example_inputs.items():
for example in example_input:
module(*example)
elif isinstance(example_inputs, List):
for examples in example_inputs:
obj(*examples)
else:
warnings.warn("Error: Unable to infer types. Please format the inputs to type `List[Tuple]`"
" or `Dict[Callable, List[Tuple]]` to be run with MonkeyType.")
else:
warnings.warn("Warning: monkeytype is not installed. Please install https://github.com/Instagram/MonkeyType "
"to enable Profile-Directed Typing in TorchScript. Refer to "
"https://github.com/Instagram/MonkeyType/blob/master/README.rst to install MonkeyType. ")
return script(obj, optimize, _frames_up, _rcb)
def create_script_dict(obj):
"""
Create a ``torch._C.ScriptDict`` instance with the data from ``obj``.
Args:
obj (dict): The Python dictionary that is used to initialize the ``ScriptDict``
returned by this function.
Returns:
An instance of ``torch._C.ScriptDict`` that has the same data as ``obj``
and can be passed between Python and TorchScript with reference semantics and
zero copy overhead.
"""
return torch._C.ScriptDict(obj) # type: ignore[attr-defined]
def script(obj, optimize=None, _frames_up=0, _rcb=None):
r"""
Scripting a function or ``nn.Module`` will inspect the source code, compile
it as TorchScript code using the TorchScript compiler, and return a :class:`ScriptModule` or
:class:`ScriptFunction`. TorchScript itself is a subset of the Python language, so not all
features in Python work, but we provide enough functionality to compute on
tensors and do control-dependent operations. For a complete guide, see the
:ref:`language-reference`.
Scripting a dictionary copies the data inside it into a TorchScript instance than can be
subsequently passed by reference between Python and TorchScript with zero copy overhead.
``torch.jit.script`` can be used as a function for modules, functions, and dictionaries
and as a decorator ``@torch.jit.script`` for :ref:`torchscript-classes` and functions.
Args:
obj (callable, class, or ``nn.Module``): The ``nn.Module``, function, class type, or
dictionary to compile.
Returns:
If ``obj`` is ``nn.Module``, ``script`` returns
a :class:`ScriptModule` object. The returned :class:`ScriptModule` will
have the same set of sub-modules and parameters as the
original ``nn.Module``. If ``obj`` is a standalone function,
a :class:`ScriptFunction` will be returned. If ``obj`` is a ``dict``, then
``script`` returns an instance of `torch._C.ScriptDict`.
**Scripting a function**
The ``@torch.jit.script`` decorator will construct a :class:`ScriptFunction`
by compiling the body of the function.
Example (scripting a function):
.. testcode::
import torch
@torch.jit.script
def foo(x, y):
if x.max() > y.max():
r = x
else:
r = y
return r
print(type(foo)) # torch.jit.ScriptFunction
# See the compiled graph as Python code
print(foo.code)
# Call the function using the TorchScript interpreter
foo(torch.ones(2, 2), torch.ones(2, 2))
.. testoutput::
:hide:
...
**Scripting an nn.Module**
Scripting an ``nn.Module`` by default will compile the ``forward`` method and recursively
compile any methods, submodules, and functions called by ``forward``. If a ``nn.Module`` only uses
features supported in TorchScript, no changes to the original module code should be necessary. ``script``
will construct :class:`ScriptModule` that has copies of the attributes, parameters, and methods of
the original module.
Example (scripting a simple module with a Parameter):
.. testcode::
import torch
class MyModule(torch.nn.Module):
def __init__(self, N, M):
super(MyModule, self).__init__()
# This parameter will be copied to the new ScriptModule
self.weight = torch.nn.Parameter(torch.rand(N, M))
# When this submodule is used, it will be compiled
self.linear = torch.nn.Linear(N, M)
def forward(self, input):
output = self.weight.mv(input)
# This calls the `forward` method of the `nn.Linear` module, which will
# cause the `self.linear` submodule to be compiled to a `ScriptModule` here
output = self.linear(output)
return output
scripted_module = torch.jit.script(MyModule(2, 3))
Example (scripting a module with traced submodules):
.. testcode::
import torch
import torch.nn as nn
import torch.nn.functional as F
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
# torch.jit.trace produces a ScriptModule's conv1 and conv2
self.conv1 = torch.jit.trace(nn.Conv2d(1, 20, 5), torch.rand(1, 1, 16, 16))
self.conv2 = torch.jit.trace(nn.Conv2d(20, 20, 5), torch.rand(1, 20, 16, 16))
def forward(self, input):
input = F.relu(self.conv1(input))
input = F.relu(self.conv2(input))
return input
scripted_module = torch.jit.script(MyModule())
To compile a method other than ``forward`` (and recursively compile anything it calls), add
the :func:`@torch.jit.export <torch.jit.export>` decorator to the method. To opt out of compilation
use :func:`@torch.jit.ignore <torch.jit.ignore>` or :func:`@torch.jit.unused <torch.jit.unused>`.
Example (an exported and ignored method in a module)::
import torch
import torch.nn as nn
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
@torch.jit.export
def some_entry_point(self, input):
return input + 10
@torch.jit.ignore
def python_only_fn(self, input):
# This function won't be compiled, so any
# Python APIs can be used
import pdb
pdb.set_trace()
def forward(self, input):
if self.training:
self.python_only_fn(input)
return input * 99
scripted_module = torch.jit.script(MyModule())
print(scripted_module.some_entry_point(torch.randn(2, 2)))
print(scripted_module(torch.randn(2, 2)))
"""
if not _enabled:
return obj
if optimize is not None:
warnings.warn(
"`optimize` is deprecated and has no effect. Use `with torch.jit.optimized_execution() instead"
)
# No-op for modules and functions that are already scripted
if isinstance(obj, ScriptModule):
return obj
if isinstance(obj, ScriptFunction):
return obj
if isinstance(obj, torch.nn.Module):
obj = call_prepare_scriptable_func(obj)
return torch.jit._recursive.create_script_module(
obj, torch.jit._recursive.infer_methods_to_compile
)
if isinstance(obj, dict):
return create_script_dict(obj)
qualified_name = _qualified_name(obj)
if inspect.isclass(obj):
# If this type is a `nn.Module` subclass, they probably meant to pass
# an instance instead of a Module
if issubclass(obj, torch.nn.Module):
raise RuntimeError(
"Type '{}' cannot be compiled since it inherits"
" from nn.Module,"
" pass an instance instead".format(obj)
)
# Enums are automatically usable in TorchScript, explicitly scripting
# is not necessary, but not harmful either.
if issubclass(obj, enum.Enum):
return obj
if not _is_new_style_class(obj):
raise RuntimeError(
"TorchScript classes must be new-style classes. "
"Please inherit from 'object'."
)
if len(obj.mro()) > 2:
raise RuntimeError(
"TorchScript classes does not support inheritance yet. "
"Please directly inherit from 'object'."
)
if _rcb is None:
_rcb = _jit_internal.createResolutionCallbackFromFrame(_frames_up + 1)
_compile_and_register_class(obj, _rcb, qualified_name)
return obj
else:
# this is a decorated fn, and we need to the underlying fn and its rcb
if hasattr(obj, "__script_if_tracing_wrapper"):
obj = obj.__original_fn
_rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
_check_directly_compile_overloaded(obj)
maybe_already_compiled_fn = _try_get_jit_cached_function(obj)
if maybe_already_compiled_fn:
return maybe_already_compiled_fn
ast = get_jit_def(obj, obj.__name__)
if _rcb is None:
_rcb = _jit_internal.createResolutionCallbackFromClosure(obj)
fn = torch._C._jit_script_compile(
qualified_name, ast, _rcb, get_default_args(obj)
)
# Forward docstrings
fn.__doc__ = obj.__doc__
_set_jit_function_cache(obj, fn)
return fn
# overloads are registered in _jit_internal and compiled here so that _overload
# can be used in nn/functional.py without an import cycle
def _check_overload_defaults(impl_defaults, overload_defaults, loc):
for name, overload_value in overload_defaults.items():
if name not in impl_defaults or impl_defaults[name] != overload_value:
raise torch.jit.frontend.FrontendError(
loc,
"Default parameters on overloads do not affect the runtime so they "
"must equal to the default parameter on the implementation function. Found on "
"parameter {name}".format(name=name),
)
def _compile_function_with_overload(overload_fn, qual_name, impl_fn):
overload_decl = get_jit_def(overload_fn, overload_fn.__name__).decl()
overload_signature = torch.jit.annotations.get_signature(
overload_fn, None, None, inspect.ismethod(overload_fn)
)
impl_ast = get_jit_def(impl_fn, impl_fn.__name__)
overload_defaults = get_default_args(overload_fn)
implementation_defaults = get_default_args(impl_fn)
_rcb = _jit_internal.createResolutionCallbackFromClosure(impl_fn)
_check_overload_defaults(
implementation_defaults, overload_defaults, overload_decl.range()
)
fn = torch._C._jit_script_compile_overload(
qual_name,
overload_decl,
impl_ast,
_rcb,
implementation_defaults,
overload_signature,
)
return fn
def _get_overloads(obj):
# check for cached compiled fns
existing_compiled_fns = _try_get_jit_cached_overloads(obj)
qual_name = _qualified_name(obj)
uncompiled_overloads = _jit_internal._get_fn_overloads(qual_name)
if uncompiled_overloads is None:
return existing_compiled_fns
compiled_fns = []
for overload_fn in uncompiled_overloads:
compiled_fns.append(
_compile_function_with_overload(overload_fn, qual_name, obj)
)
if existing_compiled_fns:
compiled_fns = existing_compiled_fns + compiled_fns
# cache compilation, remove information stored to do compilation
_set_jit_overload_cache(obj, compiled_fns)
_jit_internal._clear_fn_overloads(qual_name)
return compiled_fns
def _check_directly_compile_overloaded(obj):
qual_name = _qualified_name(obj)
if _jit_internal._get_fn_overloads(qual_name) or _try_get_jit_cached_overloads(obj):
raise RuntimeError(
"Function {} cannot be directly compiled because it"
" is overloaded. It must be used in a context of a function"
" where its inputs can determine which overload to call.".format(qual_name)
)
def interface(obj):
if not inspect.isclass(obj):
raise RuntimeError("interface must be applied to a class")
if not _is_new_style_class(obj):
raise RuntimeError("TorchScript interfaces must inherit from 'object'")
# Expected MRO is:
# User module
# torch.nn.modules.module.Module
# object
is_module_interface = issubclass(obj, torch.nn.Module) and len(obj.mro()) == 3
if not is_module_interface and len(obj.mro()) > 2:
raise RuntimeError(
"TorchScript interface does not support inheritance yet. "
"Please directly inherit from 'object' or 'nn.Module'."
)
qualified_name = _qualified_name(obj)
rcb = _jit_internal.createResolutionCallbackFromFrame(1)
# if this type is a `nn.Module` subclass, generate a module interface type
# instead of a class interface type; a module interface type only compiles
# the user provided methods as part of the interface
ast = get_jit_class_def(obj, obj.__name__)
mangled_classname = torch._C._jit_script_interface_compile(
qualified_name, ast, rcb, is_module_interface
)
obj.__torch_script_interface__ = mangled_classname
return obj
def _recursive_compile_class(obj, loc):
_qual_name = _qualified_name(obj)
# We're starting a new compilation, so update the error call stack in
# case it fails
error_stack = torch._C.CallStack(_qual_name, loc)
rcb = _jit_internal.createResolutionCallbackForClassMethods(obj)
return _compile_and_register_class(obj, rcb, _qual_name)
CompilationUnit = torch._C.CompilationUnit
set_module(CompilationUnit, "torch.jit")
def pad(s: str, padding: int, offset: int = 0, char: str = ' '):
if padding >= len(s):
padding -= len(s)
return ''.join([char for _ in range(padding + offset)]) + s
class _ScriptProfileColumn:
def __init__(self, header: str, alignment: int = 4, offset: int = 0):
self.header = header
self.alignment = alignment
self.offset = offset
self.rows: Dict[int, Any] = {}
def add_row(self, lineno: int, value: Any):
self.rows[lineno] = value
def materialize(self):
max_length = len(self.header)
rows: List[Tuple[int, str]] = []
for (key, value) in self.rows.items():
cell = str(value)
rows.append((key, cell))
max_length = max(len(cell), max_length)
if self.alignment > 0:
padding = max_length + self.alignment
padding -= padding % self.alignment
else:
padding = 0
rows = [(key, pad(cell, padding, self.offset)) for key, cell in rows]
return pad(self.header, padding, self.offset), rows
class _ScriptProfileTable:
def __init__(self, cols: List[_ScriptProfileColumn], source_range: List[int]):
self.cols = cols
self.source_range = source_range
def dump_string(self):
outputs: List[str] = []
cells: List[Tuple[str, Dict[int, str]]] = []
header_buffer = ''
for col in self.cols:
header, rows = col.materialize()
header_buffer += header
cells.append((header, dict(rows)))
outputs.append(header_buffer)
outputs.append(pad('', len(header_buffer), 0, '='))
for line in self.source_range:
row_buffer = ''
for header, rows in cells:
cell = rows.get(line)
if cell is None:
row_buffer += pad('', len(header))
else:
row_buffer += cell
outputs.append(row_buffer)
return '\n'.join(outputs)
class _ScriptProfile:
def __init__(self):
self.profile = classes.profiling._ScriptProfile()
def enable(self):
self.profile.enable()
def disable(self):
self.profile.disable()
def dump_string(self) -> str:
outputs: List[str] = []
for source_stats in self.profile._dump_stats():
source_ref = source_stats.source()
source_lines = source_ref.text().splitlines()
dedent = min([len(line) - len(line.lstrip(' ')) for line in source_lines])
source_lines = [line[dedent:] for line in source_lines]
start_line = source_ref.starting_lineno()
end_line = start_line + len(source_lines)
source_range = range(start_line, end_line)
lineno = _ScriptProfileColumn("Line #")
hits = _ScriptProfileColumn("Hits")
time_ns = _ScriptProfileColumn("Time (ns)")
line_contents = _ScriptProfileColumn("Line Contents", 0, 1)
stats = source_stats.line_map()
for line in source_range:
lineno.add_row(line, line)
line_contents.add_row(line, source_lines[line - start_line])
stat = stats.get(line)
if stat is not None:
hits.add_row(line, stat.count())
time_ns.add_row(line, stat.duration_ns())
table = _ScriptProfileTable([lineno, hits, time_ns, line_contents], list(source_range))
outputs.append(table.dump_string())
return '\n\n'.join(outputs)
def dump(self):
print(self.dump_string())
def _unwrap_optional(x):
assert x is not None, "Unwrapping null optional"
return x
_register_builtin(_unwrap_optional, "aten::_unwrap_optional")
_register_builtin(_jit_internal.is_scripting, "aten::is_scripting")
_register_builtin(has_torch_function, "aten::has_torch_function")
_register_builtin(has_torch_function_unary, "aten::has_torch_function")
_register_builtin(has_torch_function_variadic, "aten::has_torch_function")
|
the-stack_0_21158 | import demistomock as demisto
import requests_mock
def test_upload_sample_command(mocker):
"""
Given:
A file that has already been analyzed
When:
upload_sample_command is running
Then:
Make sure the error includes "Please try using the command with reanalyzed=true".
"""
expected_output = str("Error in API call to VMRay [200] - [{u'error_msg': u'Submission not stored because no jobs "
"were created \\nThere is a possibility this file has been analyzed before. Please try using "
"the command with the argument: reanalyze=true.', u'submission_filename': u'README.md'}]")
mocker.patch.object(demisto, 'params', return_value={"api_key": "123456", "server": "https://cloud.vmray.com/",
'shareable': False, 'reanalyze': False})
mocker.patch.object(demisto, 'command', return_value='vmray-upload-sample')
mocker.patch.object(demisto, 'getFilePath', return_value={'id': 'id', 'path': 'README.md', 'name': 'README.md'})
mocker_output = mocker.patch('VMRay.return_error')
with requests_mock.Mocker() as m:
m.request('POST',
'https://cloud.vmray.com/rest/sample/submit',
json={'data': {'errors': [{'error_msg': 'Submission not stored because no jobs were created',
'submission_filename': 'README.md'}]}},
status_code=200)
from VMRay import main
main()
assert mocker_output.call_args.args[0] == expected_output
|
the-stack_0_21160 | import zmq
import zmq.asyncio
import asyncio
from . import STOPSTREAM, FRAMEMISS, TRACKMISS
from . import REQ_HWM, REQ_TIMESTEP
from .device import Device
"""Stop on STOPSTREAM, or TRACKMISS
Continue on FRAMEMISS
Wait at most TIMEOUT for receiving a response?
have to use async poll for this behavior
"""
async def aioreq(context, source, track, drain, lock):
socket = context.socket(zmq.REQ)
socket.connect(source)
track_bytes = bytes(track, "utf-8")
while True:
await asyncio.sleep(REQ_TIMESTEP)
await socket.send(track_bytes)
buf = await socket.recv()
meta = await socket.recv_pyobj()
ftime = await socket.recv_pyobj()
fno = await socket.recv_pyobj()
if fno == STOPSTREAM:
raise StopAsyncIteration("Stop stream signal received. Exiting.")
if fno == FRAMEMISS:
continue # throw away if no frame available
if fno == TRACKMISS:
raise StopAsyncIteration(f'Track "{track}" was not recognized. Exiting.')
try:
async with lock:
await drain.send(buf, copy=False, flags=zmq.SNDMORE | zmq.NOBLOCK)
await drain.send_pyobj(meta, flags=zmq.SNDMORE | zmq.NOBLOCK)
await drain.send_pyobj(ftime, flags=zmq.SNDMORE | zmq.NOBLOCK)
await drain.send_pyobj(fno, flags=zmq.NOBLOCK)
except zmq.error.Again:
pass
async def stop(shutdown):
while True:
await asyncio.sleep(0.1)
if shutdown.is_set():
raise StopAsyncIteration()
def aiomain(*, shutdown, barrier, source, outfd, track, nthread):
context = zmq.asyncio.Context()
drain = context.socket(zmq.PUSH)
drain.setsockopt(zmq.SNDHWM, REQ_HWM)
drain.bind(outfd)
lock = asyncio.Lock()
args = [aioreq(context, source, track, drain, lock) for _ in range(nthread)]
args.append(stop(shutdown))
loop = asyncio.get_event_loop()
barrier.wait()
try:
loop.run_until_complete(asyncio.gather(*args))
except StopAsyncIteration:
loop.stop()
context.destroy(linger=0)
loop.close()
class RequesterDevice(Device):
def __init__(self, source, track, nthread, seed):
"""Create a asyncio frame requester device.
Args:
source (str): Descriptor of stream endpoint.
track (str): Video stream track name.
nthread (int): Number of requester threads.
seed (str): File descriptor seed (to prevent ipc collisions).
"""
self.outfd = "ipc:///tmp/decin" + seed
self.source, self.nthread, self.track = source, nthread, track
dkwargs = {
"source": self.source,
"outfd": self.outfd,
"track": self.track,
"nthread": self.nthread,
}
super().__init__(aiomain, dkwargs, 1)
def __repr__(self):
rpr = "-----RequesterDevice-----\n"
rpr += f"{'THDS': <8}{self.nthread}\n"
rpr += f"{'TRACK': <8}{self.track}\n"
rpr += f"{'IN': <8}{self.source}\n"
rpr += f"{'OUT': <8}{self.outfd}\n"
rpr += f"{'HWM': <8}> XX)({REQ_HWM} >"
return rpr
|
the-stack_0_21161 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Precice(CMakePackage):
"""preCICE (Precise Code Interaction Coupling Environment) is a
coupling library for partitioned multi-physics simulations.
Partitioned means that preCICE couples existing programs (solvers)
capable of simulating a subpart of the complete physics involved in
a simulation."""
homepage = 'https://precice.org/'
git = 'https://github.com/precice/precice.git'
url = 'https://github.com/precice/precice/archive/v1.2.0.tar.gz'
maintainers = ['fsimonis', 'MakisH']
tags = ['e4s']
version('develop', branch='develop')
version('2.3.0', sha256='57bab08e8b986f5faa364689d470940dbd9c138e5cfa7b861793e7db56b89da3')
version('2.2.1', sha256='bca8cedfb5c86656e4fdfaca5cb982b861f9aba926538fa4411bc0d015e09c1f')
version('2.2.0', sha256='f8c4e0810dcaeb6a40a0fcab64b95c899f0121c968e0730416d4d2a97d39d0c4')
version('2.1.1', sha256='729b7c24a7a61b3953bb70d96a954ad3a85729a29a35a288b59ba25661117064')
version('2.1.0', sha256='1e6432724f70d0c6c05fdd645e0026754edbc547719a35bf1d3c12a779b1d00e')
version('2.0.2', sha256='72864480f32696e7b6da94fd404ef5cd6586e2e1640613e46b75f1afac8569ed')
version('2.0.1', sha256='e4fe2d2063042761ab325f8c802f88ae088c90862af288ad1a642967d074bd50')
version('2.0.0', sha256='c8979d366f06e35626a8da08a1c589df77ec13972eb524a1ba99a011e245701f')
version('1.6.1', sha256='7d0c54faa2c69e52304f36608d93c408629868f16f3201f663a0f9b2008f0763')
version('1.6.0', sha256='c3b16376fda9eb3449adb6cc3c1e267c3dc792a5d118e37d93a32a59b5a4bc6f')
version('1.5.2', sha256='051e0d7655a91f8681901e5c92812e48f33a5779309e2f104c99f5a687e1a418')
version('1.5.1', sha256='fbe151f1a9accf9154362c70d15254935d4f594d189982c3a99fdb3dd9d9e665')
version('1.5.0', sha256='a2a794becd08717e3049252134ae35692fed71966ed32e22cca796a169c16c3e')
version('1.4.1', sha256='dde4882edde17882340f9f601941d110d5976340bd71af54c6e6ea22ae56f1a5')
version('1.4.0', sha256='3499bfc0941fb9f004d5e32eb63d64f93e17b4057fab3ada1cde40c8311bd466')
version('1.3.0', sha256='610322ba1b03df8e8f7d060d57a6a5afeabd5db4e8c4a638d04ba4060a3aec96')
version('1.2.0', sha256='0784ecd002092949835151b90393beb6e9e7a3e9bd78ffd40d18302d6da4b05b')
# Skip version 1.1.1 entirely, the cmake was lacking install.
variant('mpi', default=True, description='Enable MPI support')
variant('petsc', default=True, description='Enable PETSc support')
variant('python', default=False, description='Enable Python support')
variant('shared', default=True, description='Build shared libraries')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type='build', when='@1.4:')
depends_on('pkgconfig', type='build', when='@2.2:')
depends_on('[email protected]:')
depends_on('[email protected]:', when='@1.4:')
depends_on('boost@:1.72', when='@:2.0.2')
depends_on('boost@:1.74', when='@:2.1.1')
depends_on('[email protected]:')
depends_on('eigen@:3.3.7', type='build', when='@:1.5') # bug in prettyprint
depends_on('libxml2')
depends_on('mpi', when='+mpi')
depends_on('[email protected]:', when='+petsc')
depends_on('[email protected]:', when='[email protected]:')
# Python 3 support was added in version 2.0
depends_on('[email protected]:2.8', when='@:1.9+python', type=('build', 'run'))
depends_on('python@3:', when='@2:+python', type=('build', 'run'))
# numpy 1.17+ requires Python 3
depends_on('py-numpy@:1.16', when='@:1.9+python', type=('build', 'run'))
depends_on('[email protected]:', when='@2:+python', type=('build', 'run'))
# We require C++14 compiler support
conflicts('%gcc@:4')
conflicts('%apple-clang@:5')
conflicts('%clang@:3.7')
conflicts('%intel@:16')
conflicts('%pgi@:17.3')
def cmake_args(self):
"""Populate cmake arguments for precice."""
spec = self.spec
# The xSDK installation policies were implemented after 1.5.2
xsdk_mode = spec.satisfies("@1.6:")
# Select the correct CMake variables by version
mpi_option = "MPI"
if spec.satisfies("@2:"):
mpi_option = "PRECICE_MPICommunication"
petsc_option = "PETSC"
if spec.satisfies("@2:"):
petsc_option = "PRECICE_PETScMapping"
python_option = "PYTHON"
if spec.satisfies("@2:"):
python_option = "PRECICE_PythonActions"
def variant_bool(feature, on='ON', off='OFF'):
"""Ternary for spec variant to ON/OFF string"""
if feature in spec:
return on
return off
cmake_args = [
'-DBUILD_SHARED_LIBS:BOOL=%s' % variant_bool('+shared'),
]
cmake_args.append('-D%s:BOOL=%s' % (mpi_option, variant_bool('+mpi')))
# Boost
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_BOOST=ON')
cmake_args.append('-DBOOST_ROOT=%s' % spec['boost'].prefix)
# Eigen3
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_EIGEN3=ON')
cmake_args.append(
'-DEIGEN3_INCLUDE_DIR=%s' % spec['eigen'].headers.directories[0])
# LibXML2
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_LIBXML2=ON')
libxml2_includes = spec['libxml2'].headers.directories[0]
cmake_args.extend([
'-DLIBXML2_INCLUDE_DIRS=%s' % libxml2_includes,
'-DLIBXML2_LIBRARIES=%s' % spec['libxml2'].libs[0],
])
# PETSc
if '+petsc' in spec:
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_PETSC:BOOL=ON')
else:
cmake_args.append('-D%s:BOOL=ON' % petsc_option)
cmake_args.extend([
'-DPETSC_DIR=%s' % spec['petsc'].prefix,
'-DPETSC_ARCH=.'
])
else:
cmake_args.append('-D%s:BOOL=OFF' % petsc_option)
# Python
if '+python' in spec:
python_library = spec['python'].libs[0]
python_include = spec['python'].headers.directories[0]
numpy_include = join_path(
spec['py-numpy'].prefix,
spec['python'].package.platlib,
'numpy', 'core', 'include')
if xsdk_mode:
cmake_args.append('-DTPL_ENABLE_PYTHON:BOOL=ON')
else:
cmake_args.append('-D%s:BOOL=ON' % python_option)
cmake_args.extend([
'-DPYTHON_INCLUDE_DIR=%s' % python_include,
'-DNumPy_INCLUDE_DIR=%s' % numpy_include,
'-DPYTHON_LIBRARY=%s' % python_library
])
else:
cmake_args.append('-D%s:BOOL=OFF' % python_option)
return cmake_args
|
the-stack_0_21162 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
def _CreateXpathFunction(xpath):
return ('document.evaluate("%s",'
'document,'
'null,'
'XPathResult.FIRST_ORDERED_NODE_TYPE,'
'null)'
'.singleNodeValue' % re.escape(xpath))
class GmailComposeDiscardPage(page_module.Page):
""" Why: Compose and discard a new email """
def __init__(self, page_set):
super(GmailComposeDiscardPage, self).__init__(
url='https://mail.google.com/mail/',
page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState,
credentials_path = 'data/credentials.json')
self.credentials = 'google'
def RunNavigateSteps(self, action_runner):
super(GmailComposeDiscardPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'window.gmonkey !== undefined &&'
'document.getElementById("gb") !== null')
def ComposeClick(self, action_runner):
action_runner.ExecuteJavaScript('''
var button=document.evaluate('//div[text()="COMPOSE"]',
document,null,XPathResult.FIRST_ORDERED_NODE_TYPE,null)
.singleNodeValue;
var mousedownevent=new MouseEvent('mousedown',true,true,window,0,0,0,0,0,
false,false,false,false,0,null);
var mouseupevent=new MouseEvent('mouseup',true,true,window,0,0,0,0,0,
false,false,false,false,0,null);
button.dispatchEvent(mousedownevent);
button.dispatchEvent(mouseupevent);''')
def RunEndure(self, action_runner):
action_runner.WaitForElement(
element_function=_CreateXpathFunction('//div[text()="COMPOSE"]'))
self.ComposeClick(action_runner)
action_runner.Wait(1)
action_runner.WaitForElement(
'div[class~="oh"][data-tooltip="Discard draft"]')
action_runner.ClickElement('div[class~="oh"][data-tooltip="Discard draft"]')
action_runner.Wait(1)
class GmailComposeDiscardPageSet(story.StorySet):
"""
Description: Gmail endure test: compose and discard an email.
"""
def __init__(self):
super(GmailComposeDiscardPageSet, self).__init__()
self.AddStory(GmailComposeDiscardPage(self))
|
the-stack_0_21166 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-06-09 14:28
# @Author : Vassago
# @File : test_instance.py
# @Software: PyCharm
import logging
from unit_tests.common import BaseTestCase
LOG = logging.getLogger(__name__)
class TestInstance(BaseTestCase):
def setUp(self):
self.short_id = None
self.create_test_app()
def test_create_instance(self):
data = {
"image": "mysql:5.7.19",
"ports": "",
"type": "mysql",
"volumes": "",
"environment": ""
}
status, testcreateresponse = self.post('/v1/instances', data=data)
self.assertEqual(status, 200)
self.short_id = testcreateresponse.get("short_id")
#self.assertxDictContainsEqual(test_user_openapi_key, 'name', "test_user_apikey_test1")
def test_get_instance_list(self):
status, testgetinstance = self.get('/v1/instances')
self.assertEqual(status, 200)
def test_restart_instance(self):
status, testrestartinstance = self.post('/v1/instances/{}/restart'.format(self.short_id))
self.assertEqual(status, 200)
def test_stop_instance(self):
status, teststopinstnace = self.delete('/v1/instances/{}/stop'.format(self.short_id))
self.assertEqual(status, 200)
|
the-stack_0_21168 | import sqlite3
from datetime import datetime
class DB(object):
def __init__(self):
self.DATA_BASE_NAME = 'aspyts.db3'
self.STATUS_EXECUTING = "executing"
self.STATUS_ERROR = "error"
self.STATUS_FINISHED = "success"
self.cursor = None
self.conn = None
def __enter__(self):
self.conn = sqlite3.connect(self.DATA_BASE_NAME)
self.cursor = self.conn.cursor()
return self
def __exit__(self, type, value, tb):
self.conn.commit()
self.conn.close()
def execute(self, sql, args=()):
self.cursor.execute(sql, args)
return self.cursor
def now(self):
return datetime.now().isoformat(' ')
def getIdStatus(self, name):
self.execute("""
INSERT OR IGNORE INTO status (name, dt_insert) VALUES (?,?)
""", [
name,
self.now()
])
cursor = self.execute("SELECT id FROM status WHERE name = ?;" ,[
name
])
return cursor.fetchone()[0]
def checkActive(self, id_task):
cursor = self.execute("""
select active from task where id = ?
""", [
id_task
])
return bool(cursor.fetchone()[0])
def create(self):
self.execute(
'''CREATE TABLE IF NOT EXISTS task (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name text NOT NULL,
interval REAL NOT NULL,
cmd text NOT NULL,
dt_last_exec text,
dt_insert text NOT NULL,
active INTEGER,
UNIQUE(name, interval, cmd)
)''')
self.execute(
'''CREATE TABLE IF NOT EXISTS status (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name text NOT NULL,
dt_insert TEXT NOT NULL,
UNIQUE(name)
)'''
)
self.execute(
'''CREATE TABLE IF NOT EXISTS exec (
id INTEGER PRIMARY KEY AUTOINCREMENT,
id_task INTEGER NOT NULL,
dt_start text NOT NULL,
dt_finish text,
id_status INTEGER NOT NULL,
output TEXT,
result_code INTEGER,
dt_insert text NOT NULL
)'''
)
def listTask(self):
cursor = self.execute("""
SELECT
id, name, interval, cmd, dt_last_exec, dt_insert, active
FROM task
order by name, interval
""")
return cursor
def nextTask(self):
cursor = self.execute("""
SELECT
id, name, interval, cmd, dt_last_exec, dt_insert
FROM task
where
active = 1 AND (
(strftime('%s','now') - strftime('%s',dt_last_exec)) >= interval OR
(dt_last_exec IS NULL)
)
order by dt_last_exec, id
limit 1
""")
return cursor
def insertTask(self, name, interval, cmd, active=True):
cursor = self.execute('''
insert OR IGNORE into task (
name,
interval,
cmd,
dt_insert,
active) values (
?,
?,
?,
?,
?
)
''', [
name, interval, cmd, self.now(), int(active)
])
return cursor.lastrowid
def updateTask(self, id, dt_last_exec=None, active=1):
sql = 'update task set '
args = []
if(not dt_last_exec is None):
sql = sql + "dt_last_exec = ?, "
args.append(dt_last_exec)
args.append(active)
args.append(id)
self.execute(sql + '''
active = ?
where id = ?
''', args)
def insertExec(self, id_task):
now = self.now()
cursor = self.execute('''
INSERT INTO exec (id_task, dt_start, id_status, dt_insert)
values (?,?,?,?)
''', [
id_task,
now,
self.getIdStatus(self.STATUS_EXECUTING),
now,
])
return cursor.lastrowid
def updateExec(self, id, status, output, dt_finish, result_code):
self.execute('''
UPDATE exec SET
dt_finish = ?,
id_status = ?,
output=?,
result_code=?
where id = ?
''', [
dt_finish,
self.getIdStatus(status),
output,
result_code,
id
]) |
the-stack_0_21170 | # Parser for games - arff
import numpy as np
from dataset import Dataset
def parse_arff(folder, filename):
assert('games' in folder)
assert('.arff' in filename)
data = False
X = []
Y = []
Xnames = []
Xdomains = []
Ynames = {}
for line in open('%s/%s' % (folder, filename), 'r'):
if data:
sample = [float(e) if e.isdigit() else e for e in line.split(',')]
cl = sample[-1].strip()
del sample[-1]
if cl not in Ynames:
pos = len(Ynames)
Ynames[cl] = pos
Y.append(Ynames[cl])
X.append(sample)
else:
if line.startswith('@DATA'):
data = True
elif line.startswith('@ATTRIBUTE'):
label = line.split('\"')[1]
if label != 'class':
Xnames.append(label)
Xdomains.append(set({0, 1}))
return Dataset(np.array(X), np.array(Y), np.array(Xnames),
Xdomains, Ynames)
|
the-stack_0_21171 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from models import *
from server import app
from params import HTTP_OK, HTTP_UNKNOWN, HTTP_BADREQ, HTTP_UNAUTH
import json
class TestModels(object):
def setup_class(self):
# Init
self.app = app
self.app.testing = True
self.client = self.app.test_client()
# Add an user for testing
new_user = User(username='__test__user', password='xxx')
self.app.db.session.add(new_user)
self.app.db.session.commit()
def teardown_class(self):
# Deletes(cascade)
self.app.db.session.query(User).filter(User.username == '__test__user').delete()
self.app.db.session.commit()
# Close connections
self.app.db.session.remove()
print('\n/sys/delete ut complete!')
def test_systems(self):
# Add a system(column 'createtime' is automated)
new_system = System(systemname='__test__system', username='__test__user',
method='__test__method', results=json.dumps([{'attr': 'val'}]),
description='test')
self.app.db.session.add(new_system)
self.app.db.session.commit()
res = self.app.db.session.query(System.results).filter(System.systemname == '__test__system').first()
assert json.loads(res[0]) == [{'attr': 'val'}]
# Test for wrong cookies
def test_401(self):
res = self.client.delete('/sys/delete?systemname=__test__system2')
assert res.status_code == HTTP_UNAUTH
# Test for bad requests
def test_400(self):
with self.client as c:
with c.session_transaction() as sess:
sess['username'] = '__test__user'
res = self.client.delete('/sys/delete?username=__test__user')
assert res.status_code == HTTP_BADREQ
def test_exist(self):
# Invalid
res = self.client.delete('/sys/delete?systemname=__test__system2')
assert res.status_code == HTTP_OK
assert res.is_json is True
data = res.get_json()
assert data['status'] == 1
assert data['error'] == '系统不存在'
# Valid
with self.client as c:
res = self.client.delete('/sys/delete?systemname=__test__system')
assert res.status_code == HTTP_OK
assert res.is_json is True
data = res.get_json()
assert data['status'] == 0
|
the-stack_0_21172 | """3D mesh manipulation utilities."""
from builtins import str
from collections import OrderedDict
import numpy as np
def frustum(left, right, bottom, top, znear, zfar):
"""Create view frustum matrix."""
assert right != left
assert bottom != top
assert znear != zfar
M = np.zeros((4, 4), dtype=np.float32)
M[0, 0] = +2.0 * znear / (right - left)
M[2, 0] = (right + left) / (right - left)
M[1, 1] = +2.0 * znear / (top - bottom)
M[3, 1] = (top + bottom) / (top - bottom)
M[2, 2] = -(zfar + znear) / (zfar - znear)
M[3, 2] = -2.0 * znear * zfar / (zfar - znear)
M[2, 3] = -1.0
return M
def perspective(fovy, aspect, znear, zfar):
"""Create perspective projection matrix."""
assert znear != zfar
h = np.tan(fovy / 360.0 * np.pi) * znear
w = h * aspect
return frustum(-w, w, -h, h, znear, zfar)
def anorm(x, axis=None, keepdims=False):
"""Compute L2 norms alogn specified axes."""
return np.sqrt((x*x).sum(axis=axis, keepdims=keepdims))
def normalize(v, axis=None, eps=1e-10):
"""L2 Normalize along specified axes."""
return v / max(anorm(v, axis=axis, keepdims=True), eps)
def lookat(eye, target=[0, 0, 0], up=[0, 1, 0]):
"""Generate LookAt modelview matrix."""
eye = np.float32(eye)
forward = normalize(target - eye)
side = normalize(np.cross(forward, up))
up = np.cross(side, forward)
M = np.eye(4, dtype=np.float32)
R = M[:3, :3]
R[:] = [side, up, -forward]
M[:3, 3] = -R.dot(eye)
return M
def homotrans(M, p):
p = np.asarray(p)
if p.shape[-1] == M.shape[1]-1:
p = np.append(p, np.ones_like(p[...,:1]), -1)
p = np.dot(p, M.T)
return p[...,:-1] / p[...,-1:]
def _parse_vertex_tuple(s):
"""Parse vertex indices in '/' separated form (like 'i/j/k', 'i//k' ...)."""
vt = [0, 0, 0]
for i, c in enumerate(s.split('/')):
if c:
vt[i] = int(c)
return tuple(vt)
def _unify_rows(a):
"""Unify lengths of each row of a."""
lens = np.fromiter(map(len, a), np.int32)
if not (lens[0] == lens).all():
out = np.zeros((len(a), lens.max()), np.float32)
for i, row in enumerate(a):
out[i, :lens[i]] = row
else:
out = np.float32(a)
return out
def load_obj(fn):
"""Load 3d mesh form .obj' file.
Args:
fn: Input file name or file-like object.
Returns:
dictionary with the following keys (some of which may be missing):
position: np.float32, (n, 3) array, vertex positions
uv: np.float32, (n, 2) array, vertex uv coordinates
normal: np.float32, (n, 3) array, vertex uv normals
face: np.int32, (k*3,) traingular face indices
"""
position = [np.zeros(3, dtype=np.float32)]
normal = [np.zeros(3, dtype=np.float32)]
uv = [np.zeros(2, dtype=np.float32)]
tuple2idx = OrderedDict()
trinagle_indices = []
input_file = open(fn) if isinstance(fn, str) else fn
for line in input_file:
line = line.strip()
if not line or line[0] == '#':
continue
tag, line = line.split(' ', 1)
if tag == 'v':
position.append(np.fromstring(line, sep=' '))
elif tag == 'vt':
uv.append(np.fromstring(line, sep=' '))
elif tag == 'vn':
normal.append(np.fromstring(line, sep=' '))
elif tag == 'f':
output_face_indices = []
for chunk in line.split():
# tuple order: pos_idx, uv_idx, normal_idx
vt = _parse_vertex_tuple(chunk)
if vt not in tuple2idx: # create a new output vertex?
tuple2idx[vt] = len(tuple2idx)
output_face_indices.append(tuple2idx[vt])
# generate face triangles
for i in range(1, len(output_face_indices)-1):
for vi in [0, i, i+1]:
trinagle_indices.append(output_face_indices[vi])
outputs = {}
outputs['face'] = np.int32(trinagle_indices)
pos_idx, uv_idx, normal_idx = np.int32(list(tuple2idx)).T
if np.any(pos_idx):
outputs['position'] = _unify_rows(position)[pos_idx]
if np.any(uv_idx):
outputs['uv'] = _unify_rows(uv)[uv_idx]
if np.any(normal_idx):
outputs['normal'] = _unify_rows(normal)[normal_idx]
return outputs
|
the-stack_0_21173 | # -*- coding: utf-8 -*-
"""
@Remark: 自定义序列化器
"""
from rest_framework import serializers
from rest_framework.fields import empty
from rest_framework.request import Request
from rest_framework.serializers import ModelSerializer
from django.utils.functional import cached_property
from rest_framework.utils.serializer_helpers import BindingDict
from mysystem.models import Users
class CustomModelSerializer(ModelSerializer):
"""
增强DRF的ModelSerializer,可自动更新模型的审计字段记录
(1)self.request能获取到rest_framework.request.Request对象
"""
# 修改人的审计字段名称, 默认modifier, 继承使用时可自定义覆盖
modifier_field_id = 'modifier'
modifier_name = serializers.SerializerMethodField(read_only=True)
def get_modifier_name(self, instance):
if not hasattr(instance,'modifier'):
return None
queryset = Users.objects.filter(id=instance.modifier).values_list('name', flat=True).first()
if queryset:
return queryset
return None
# 创建人的审计字段名称, 默认creator, 继承使用时可自定义覆盖
creator_field_id = 'creator'
creator_name = serializers.SlugRelatedField(slug_field="name", source="creator", read_only=True)
# 数据所属部门字段
dept_belong_id_field_name = 'dept_belong_id'
# 添加默认时间返回格式
create_datetime = serializers.DateTimeField(format="%Y-%m-%d %H:%M:%S", required=False, read_only=True)
update_datetime = serializers.DateTimeField(format="%Y-%m-%d %H:%M:%S", required=False)
def __init__(self, instance=None, data=empty, request=None, **kwargs):
super().__init__(instance, data, **kwargs)
self.request: Request = request or self.context.get('request', None)
def save(self, **kwargs):
return super().save(**kwargs)
def create(self, validated_data):
if self.request:
if self.modifier_field_id in self.fields.fields:
validated_data[self.modifier_field_id] = self.get_request_user_id()
if self.creator_field_id in self.fields.fields:
validated_data[self.creator_field_id] = self.request.user
if self.dept_belong_id_field_name in self.fields.fields and validated_data.get(self.dept_belong_id_field_name, None):
validated_data[self.dept_belong_id_field_name] = getattr(self.request.user, 'dept_id', None)
return super().create(validated_data)
def update(self, instance, validated_data):
if self.request:
if hasattr(self.instance, self.modifier_field_id):
self.instance.modifier = self.get_request_username()
return super().update(instance, validated_data)
def get_request_username(self):
if getattr(self.request, 'user', None):
return getattr(self.request.user, 'username', None)
return None
def get_request_name(self):
if getattr(self.request, 'user', None):
return getattr(self.request.user, 'name', None)
return None
def get_request_user_id(self):
if getattr(self.request, 'user', None):
return getattr(self.request.user, 'id', None)
return None
@cached_property
def fields(self):
fields = BindingDict(self)
for key, value in self.get_fields().items():
fields[key] = value
if not hasattr(self, '_context'):
return fields
is_root = self.root == self
parent_is_list_root = self.parent == self.root and getattr(self.parent, 'many', False)
if not (is_root or parent_is_list_root):
return fields
try:
request = self.request or self.context['request']
except KeyError:
return fields
params = getattr(
request, 'query_params', getattr(request, 'GET', None)
)
if params is None:
pass
try:
filter_fields = params.get('_fields', None).split(',')
except AttributeError:
filter_fields = None
try:
omit_fields = params.get('_exclude', None).split(',')
except AttributeError:
omit_fields = []
existing = set(fields.keys())
if filter_fields is None:
allowed = existing
else:
allowed = set(filter(None, filter_fields))
omitted = set(filter(None, omit_fields))
for field in existing:
if field not in allowed:
fields.pop(field, None)
if field in omitted:
fields.pop(field, None)
return fields
|
the-stack_0_21174 | # -*- coding: utf-8 -*-
"""
pyboleto.data
~~~~~~~~~~~~~
Base para criação dos módulos dos bancos. Comtém funções genéricas
relacionadas a geração dos dados necessários para o boleto bancário.
:copyright: © 2011 - 2012 by Eduardo Cereto Carvalho
:license: BSD, see LICENSE for more details.
"""
import datetime
from decimal import Decimal
from erpbrasil.febraban.boleto.base import modulo10, modulo11
from erpbrasil.febraban.boleto.custom_property import CustomProperty
from erpbrasil.febraban.boleto.exceptions import BoletoException
_EPOCH = datetime.date(1997, 10, 7)
class Boleto(object):
"""Interface para implementações específicas de bancos
Esta classe geralmente nunca será usada diretamente. Geralmente o usuário
irá usar uma das subclasses com a implementação específica de cada banco.
As classes dentro do pacote :mod:`pyboleto.bank` extendem essa classe
para implementar as especificações de cada banco.
Portanto as especificações dentro desta classe são genéricas seguindo as
normas da FEBRABAN.
Todos os parâmetros devem ser passados como ``**kwargs`` para o construtor
ou então devem ser passados depois, porém antes de imprimir o boleto.
eg::
bData = Boleto(agencia='123', valor='650')
bData.cedente = u'João Ninguém'
bData.cedente_cidade = u'Rio de Janeiro'
bData.cedente_uf = u'RJ'
# Assim por diante até preencher todos os campos obrigatórios.
**Parâmetros obrigatórios**:
:param aceite: 'N' para o caixa não acetitar o boleto após a
validade ou 'A' para aceitar. *(default: 'N')*
:param agencia_cedente: Tamanho pode variar com o banco.
:param carteira: Depende do Banco.
:param cedente: Nome do Cedente
:param cedente_cidade:
:param cedente_uf:
:param cedente_logradouro: Endereço do Cedente
:param cedente_bairro:
:param cedente_cep:
:param cedente_documento: CPF ou CNPJ do Cedente.
:param conta_cedente: Conta do Cedente sem o dígito verificador.
:param data_documento:
:type data_documento: `datetime.date`
:param data_processamento:
:type data_processamento: `datetime.date`
:param data_vencimento:
:type data_vencimento: `datetime.date`
:param numero_documento: Número Customizado para controle. Pode ter até 13
caracteres dependendo do banco.
:param sacado_nome: Nome do Sacado
:param sacado_documento: CPF ou CNPJ do Sacado
:param sacado_cidade:
:param sacado_uf:
:param sacado_endereco: Endereco do Sacado
:param sacado_bairro:
:param sacado_cep:
**Parâmetros não obrigatórios**:
:param quantidade:
:param especie: Nunca precisa mudar essa opção *(default: 'R$')*
:param especie_documento:
:param local_pagamento: *(default: 'Pagável em qualquer banco
até o vencimento')*
:param moeda: Nunca precisa mudar essa opção *(default: '9')*
"""
nosso_numero = CustomProperty('nosso_numero', 13)
"""Nosso Número geralmente tem 13 posições
Algumas subclasses podem alterar isso dependendo das normas do banco
"""
agencia_cedente = CustomProperty('agencia_cedente', 4)
"""Agência do Cedente geralmente tem 4 posições
Algumas subclasses podem alterar isso dependendo das normas do banco
"""
conta_cedente = CustomProperty('conta_cedente', 7)
"""Conta do Cedente geralmente tem 7 posições
Algumas subclasses podem alterar isso dependendo das normas do banco
"""
def __init__(self, **kwargs):
# otherwise the printed value might diffent from the value in
# the barcode.
self.aceite = kwargs.pop('aceite', "N")
self.agencia_cedente = kwargs.pop('agencia_cedente', "")
self.carteira = kwargs.pop('carteira', "")
self.cedente = kwargs.pop('cedente', "")
self.cedente_cidade = kwargs.pop('cedente_cidade', "")
self.cedente_uf = kwargs.pop('cedente_uf', "")
self.cedente_logradouro = kwargs.pop('cedente_logradouro', "")
self.cedente_bairro = kwargs.pop('cedente_bairro', "")
self.cedente_cep = kwargs.pop('cedente_cep', "")
self.cedente_documento = kwargs.pop('cedente_documento', "")
self.codigo_banco = kwargs.pop('codigo_banco', "")
self.conta_cedente = kwargs.pop('conta_cedente', "")
self.data_documento = kwargs.pop('data_documento', "")
self.data_processamento = kwargs.pop('data_processamento',
datetime.date.today())
self.data_vencimento = kwargs.pop('data_vencimento', "")
self.especie = kwargs.pop('especie', "R$")
self.especie_documento = kwargs.pop('especie_documento', "")
self.local_pagamento = kwargs.pop(
'local_pagamento', "Pagável em qualquer banco até o vencimento")
self.logo_image = kwargs.pop('logo_image', "")
self.moeda = kwargs.pop('moeda', "9")
self.numero_documento = kwargs.pop('numero_do_documento', "")
self.quantidade = kwargs.pop('quantidade', "")
self.sacado_nome = kwargs.pop('sacado_nome', "")
self.sacado_documento = kwargs.pop('sacado_documento', "")
self.sacado_cidade = kwargs.pop('sacado_cidade', "")
self.sacado_uf = kwargs.pop('sacado_uf', "")
self.sacado_endereco = kwargs.pop('sacado_endereco', "")
self.sacado_bairro = kwargs.pop('sacado_bairro', "")
self.sacado_cep = kwargs.pop('sacado_cep', 0)
if kwargs:
raise TypeError("Paramêtro(s) desconhecido: %r" % (kwargs,))
self._cedente_endereco = None
self._demonstrativo = []
self._instrucoes = []
self._sacado = None
self._valor = None
self._valor_documento = None
self._gera_pdf = True
@property
def barcode(self):
"""Essa função sempre é a mesma para todos os bancos. Então basta
implementar o método :func:`barcode` para o pyboleto calcular a linha
digitável.
Posição # Conteúdo
01 a 03 03 Número do banco
04 01 Código da Moeda - 9 para Real
05 01 Digito verificador do Código de Barras
06 a 09 04 Data de vencimento em dias partis de 07/10/1997
10 a 19 10 Valor do boleto (8 inteiros e 2 decimais)
20 a 44 25 Campo Livre definido por cada banco
Total 44
"""
for attr, length, data_type in [
('codigo_banco', 3, str),
('moeda', 1, str),
('data_vencimento', None, datetime.date),
('valor_documento', -1, str),
('campo_livre', 25, str),
]:
value = getattr(self, attr)
if not isinstance(value, data_type):
raise TypeError("%s.%s must be a %s, got %r (type %s)" % (
self.__class__.__name__, attr, data_type.__name__, value,
type(value).__name__))
if (data_type == str and
length != -1 and
len(value) != length):
raise ValueError(
"%s.%s must have a length of %d, not %r (len: %d)" %
(self.__class__.__name__,
attr,
length,
value,
len(value)))
due_date_days = (self.data_vencimento - _EPOCH).days
if not (9999 >= due_date_days >= 0):
raise TypeError(
"Invalid date, must be between 1997/07/01 and "
"2024/11/15")
num = "%s%1s%04d%010d%24s" % (self.codigo_banco,
self.moeda,
due_date_days,
Decimal(self.valor_documento) * 100,
self.campo_livre)
dv = self.calculate_dv_barcode(num)
barcode = num[:4] + str(dv) + num[4:]
if len(barcode) != 44:
raise BoletoException(
'The barcode must have 44 characteres, found %d' %
len(barcode))
return barcode
@property
def campo_livre(self):
"""Must be overriden by child class property
:exception NotImplementedError: Needs to be implemented by derived
class
"""
raise NotImplementedError(
'This method has not been implemented by this class'
)
def calculate_dv_barcode(self, line):
"""Calcula DV para código de barras
Está é uma implementação genérica mas pode ser reimplementada pela
classe derivada dependendo das definições de cada bancoGeralmente
é implementado pela classe derivada.
"""
resto2 = self.modulo11(line, 9, 1)
if resto2 in [0, 1, 10]:
dv = 1
else:
dv = 11 - resto2
return dv
def format_nosso_numero(self):
"""
Geralmente é implementado pela classe derivada. Usada para formatar
como o noso número será impresso no boleto. Às vezes é o mesmo
do `numero_documento` e às vezes contém outros campos
juntos.
"""
return self.nosso_numero
def _cedente_endereco_get(self):
if self._cedente_endereco is None:
self._cedente_endereco = '%s - %s - %s - %s - %s' % (
self.cedente_logradouro,
self.cedente_bairro,
self.cedente_cidade,
self.cedente_uf,
self.cedente_cep
)
return self._cedente_endereco
def _cedente_endereco_set(self, endereco):
if len(endereco) > 80:
raise BoletoException(
u'Linha de endereço possui mais que 80 caracteres')
self._cedente_endereco = endereco
cedente_endereco = property(_cedente_endereco_get, _cedente_endereco_set)
"""Endereço do Cedente com no máximo 80 caracteres"""
def _get_valor(self):
if self._valor is not None:
return "%.2f" % self._valor
def _set_valor(self, val):
if type(val) is Decimal:
self._valor = val
else:
self._valor = Decimal(str(val))
valor = property(_get_valor, _set_valor)
"""Valor convertido para :class:`Decimal`.
Geralmente valor e valor_documento são o mesmo número.
:type: Decimal
"""
def _get_valor_documento(self):
if self._valor_documento is not None:
return "%.2f" % self._valor_documento
def _set_valor_documento(self, val):
if type(val) is Decimal:
self._valor_documento = val
else:
self._valor_documento = Decimal(str(val))
valor_documento = property(_get_valor_documento, _set_valor_documento)
"""Valor do Documento convertido para :class:`Decimal`.
De preferência para passar um valor em :class:`Decimal`, se não for passado
outro tipo será feito um cast para :class:`Decimal`.
"""
def _instrucoes_get(self):
return self._instrucoes
def _instrucoes_set(self, list_inst):
if isinstance(list_inst, str):
list_inst = list_inst.splitlines()
if len(list_inst) > 7:
raise BoletoException(
u'Número de linhas de instruções maior que 7')
for line in list_inst:
if len(line) > 90:
raise BoletoException(
u'Linha de instruções possui mais que 90 caracteres')
self._instrucoes = list_inst
instrucoes = property(_instrucoes_get, _instrucoes_set)
"""Instruções para o caixa do banco que recebe o bilhete
Máximo de 7 linhas com 90 caracteres cada.
Geralmente contém instruções para aplicar multa ou não aceitar caso tenha
passado a data de validade.
"""
def _demonstrativo_get(self):
return self._demonstrativo
def _demonstrativo_set(self, list_dem):
if isinstance(list_dem, str):
list_dem = list_dem.splitlines()
if len(list_dem) > 12:
raise BoletoException(
u'Número de linhas de demonstrativo maior que 12')
for line in list_dem:
if len(line) > 90:
raise BoletoException(
u'Linha de demonstrativo possui mais que 90 caracteres')
self._demonstrativo = list_dem
demonstrativo = property(_demonstrativo_get, _demonstrativo_set)
"""Texto que vai impresso no corpo do Recibo do Sacado
Máximo de 12 linhas com 90 caracteres cada.
"""
def _sacado_get(self):
"""Tenta usar o sacado que foi setado ou constroi um
Se você não especificar um sacado o boleto tentará construir um sacado
a partir de outras proriedades setadas.
Para facilitar você deve sempre setar essa propriedade.
"""
if self._sacado is None:
self.sacado = [
'%s - CPF/CNPJ: %s' % (self.sacado_nome,
self.sacado_documento),
self.sacado_endereco,
'%s - %s - %s - %s' % (
self.sacado_bairro,
self.sacado_cidade,
self.sacado_uf,
self.sacado_cep
)
]
return self._sacado
def _sacado_set(self, list_sacado):
if len(list_sacado) > 3:
raise BoletoException(u'Número de linhas do sacado maior que 3')
self._sacado = list_sacado
sacado = property(_sacado_get, _sacado_set)
"""Campo sacado composto por até 3 linhas.
A primeira linha precisa ser o nome do sacado.
As outras duas linhas devem ser usadas para o endereço do sacado.
"""
@property
def agencia_conta_cedente(self):
return "%s/%s" % (self.agencia_cedente, self.conta_cedente)
@property
def codigo_dv_banco(self):
cod = "%s-%s" % (self.codigo_banco, self.modulo11(self.codigo_banco))
return cod
@property
def linha_digitavel(self):
"""Monta a linha digitável a partir do barcode
Esta é a linha que o cliente pode utilizar para digitar se o código
de barras não estiver legível.
"""
linha = self.barcode
if not linha:
raise BoletoException("Boleto doesn't have a barcode")
def monta_campo(campo):
campo_dv = "%s%s" % (campo, self.modulo10(campo))
return "%s.%s" % (campo_dv[0:5], campo_dv[5:])
return ' '.join([monta_campo(linha[0:4] + linha[19:24]),
monta_campo(linha[24:34]),
monta_campo(linha[34:44]),
linha[4],
linha[5:19]])
@staticmethod
def modulo11(num, base=9, r=0):
return modulo11(num, base, r)
@staticmethod
def modulo10(num):
return modulo10(num)
|
the-stack_0_21175 | import sys
fields = [int(x) - 1 for x in sys.argv[1].split(',')]
for line in sys.stdin:
parts = line.split(',')
val = []
for field in fields:
val.append(parts[field])
sys.stdout.write(','.join(val) + '\n')
|
the-stack_0_21178 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class TokenInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
TokenInfo - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'organization': 'NamedEntity',
'home_organization': 'NamedEntity',
'authorized_scope': 'list[str]',
'cloned_user': 'TokenInfoClonedUser',
'o_auth_client': 'OrgOAuthClient'
}
self.attribute_map = {
'organization': 'organization',
'home_organization': 'homeOrganization',
'authorized_scope': 'authorizedScope',
'cloned_user': 'clonedUser',
'o_auth_client': 'OAuthClient'
}
self._organization = None
self._home_organization = None
self._authorized_scope = None
self._cloned_user = None
self._o_auth_client = None
@property
def organization(self):
"""
Gets the organization of this TokenInfo.
The current organization
:return: The organization of this TokenInfo.
:rtype: NamedEntity
"""
return self._organization
@organization.setter
def organization(self, organization):
"""
Sets the organization of this TokenInfo.
The current organization
:param organization: The organization of this TokenInfo.
:type: NamedEntity
"""
self._organization = organization
@property
def home_organization(self):
"""
Gets the home_organization of this TokenInfo.
The token's home organization
:return: The home_organization of this TokenInfo.
:rtype: NamedEntity
"""
return self._home_organization
@home_organization.setter
def home_organization(self, home_organization):
"""
Sets the home_organization of this TokenInfo.
The token's home organization
:param home_organization: The home_organization of this TokenInfo.
:type: NamedEntity
"""
self._home_organization = home_organization
@property
def authorized_scope(self):
"""
Gets the authorized_scope of this TokenInfo.
The list of scopes authorized for the OAuth client
:return: The authorized_scope of this TokenInfo.
:rtype: list[str]
"""
return self._authorized_scope
@authorized_scope.setter
def authorized_scope(self, authorized_scope):
"""
Sets the authorized_scope of this TokenInfo.
The list of scopes authorized for the OAuth client
:param authorized_scope: The authorized_scope of this TokenInfo.
:type: list[str]
"""
self._authorized_scope = authorized_scope
@property
def cloned_user(self):
"""
Gets the cloned_user of this TokenInfo.
Only present when a user is a clone of trustee user in the trustor org.
:return: The cloned_user of this TokenInfo.
:rtype: TokenInfoClonedUser
"""
return self._cloned_user
@cloned_user.setter
def cloned_user(self, cloned_user):
"""
Sets the cloned_user of this TokenInfo.
Only present when a user is a clone of trustee user in the trustor org.
:param cloned_user: The cloned_user of this TokenInfo.
:type: TokenInfoClonedUser
"""
self._cloned_user = cloned_user
@property
def o_auth_client(self):
"""
Gets the o_auth_client of this TokenInfo.
:return: The o_auth_client of this TokenInfo.
:rtype: OrgOAuthClient
"""
return self._o_auth_client
@o_auth_client.setter
def o_auth_client(self, o_auth_client):
"""
Sets the o_auth_client of this TokenInfo.
:param o_auth_client: The o_auth_client of this TokenInfo.
:type: OrgOAuthClient
"""
self._o_auth_client = o_auth_client
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_0_21179 | # Copyright (c) 2021, NECSTLab, Politecnico di Milano. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NECSTLab nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# * Neither the name of Politecnico di Milano nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 20 08:39:27 2021
Implement the image processing pipeline using Python and OpenCV,
and Python implementations of the CUDA kernels.
Used for debugging, and to visualize intermediate results.
@author: albyr
"""
from skimage.io import imread, imsave
from skimage.filters import gaussian, sobel, unsharp_mask
from skimage.color import rgb2gray
from skimage import data, img_as_float
import matplotlib.pyplot as plt
import numpy as np
from typing import Callable
import time
BW = False
KERNEL_SMALL = 0.1
KERNEL_LARGE = 2
KERNEL_UNSHARPEN = 0.7
KERNEL_SMALL_DIAMETER = 3
KERNEL_SMALL_VARIANCE = 0.1
KERNEL_LARGE_DIAMETER = 5
KERNEL_LARGE_VARIANCE = 10
KERNEL_UNSHARPEN_DIAMETER = 3
KERNEL_UNSHARPEN_VARIANCE = 5
SOBEL_FILTER_DIAMETER = 3
SOBEL_FILTER_X = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
SOBEL_FILTER_Y = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
def time_function(name: str=None) -> Callable:
"""
Decorator that simplifies timing a function call;
:param name: name of the function or computation to measure
:return: the output of the wrapped function
"""
def inner_func(func) -> Callable:
def func_call(self, *args, **kwargs) -> object:
start = time.time()
result = func(self, *args, **kwargs)
end = time.time()
print(f"{name if name is not None else func.__name__} took {end - start} sec")
return result
return func_call
return inner_func
def gaussian_kernel(diameter, sigma):
kernel = np.zeros((diameter, diameter))
mean = diameter / 2
sum_tmp = 0
for x in range(diameter):
for y in range(diameter):
kernel[x, y] = np.exp(-0.5 * ((x - mean) ** 2 + (y - mean) ** 2) / sigma ** 2)
sum_tmp += kernel[x, y]
for x in range(diameter):
for y in range(diameter):
kernel[x, y] /= sum_tmp
return kernel
def gaussian_blur_py(image, kernel):
out = np.zeros(image.shape)
rows, cols = image.shape
# Blur radius;
diameter = kernel.shape[0]
radius = diameter // 2
# Flatten image and kernel;
image_1d = image.reshape(-1)
kernel_1d = kernel.reshape(-1)
for i in range(rows):
for j in range(cols):
sum_tmp = 0
for x in range(-radius, radius + 1):
for y in range(-radius, radius + 1):
nx = x + i
ny = y + j
if (nx >= 0 and ny >= 0 and nx < rows and ny < cols):
sum_tmp += kernel_1d[(x + radius) * diameter + (y + radius)] * image_1d[nx * cols + ny]
out[i, j] = sum_tmp
return out
def sobel_filter_py(image):
out = np.zeros(image.shape)
rows, cols = image.shape
radius = SOBEL_FILTER_DIAMETER // 2
for i in range(rows):
for j in range(cols):
sum_gradient_x = 0
sum_gradient_y = 0
for x in range(-radius, radius + 1):
for y in range(-radius, radius + 1):
nx = x + i
ny = y + j
if (nx >= 0 and ny >= 0 and nx < rows and ny < cols):
gray_value_neigh = image[nx, ny]
gradient_x = SOBEL_FILTER_X[x + radius][y + radius]
gradient_y = SOBEL_FILTER_Y[x + radius][y + radius]
sum_gradient_x += gray_value_neigh * gradient_x
sum_gradient_y += gray_value_neigh * gradient_y
out[i, j] = np.sqrt(sum_gradient_x ** 2 + sum_gradient_y ** 2)
return out
def normalize(image):
return (image - np.min(image)) / (np.max(image) - np.min(image))
def truncate(image, minimum=0, maximum=1):
out = image.copy()
out[out < minimum] = minimum
out[out > maximum] = maximum
return out
def scurve(img):
img_out = img.copy()
lut_b = lambda x: 0.7 * (1 / (1 + np.exp((-x + 0.5) * 10))) + 0.3 if x < 0.5 else 1 / (1 + np.exp((-x + 0.5) * 10))
lut_r = lambda x: 0.8 * (1 / (1 + np.exp((-x + 0.5) * 7))) + 0.2 if x < 0.5 else (1 / (1 + np.exp((-x + 0.5) * 7)))
lut_g = lambda x: 0.8 * (1 / (1 + np.exp((-x + 0.5) * 10))) + 0.2 if x < 0.5 else (1 / (1 + np.exp((-x + 0.5) * 9)))
lut_g2 = lambda x: x**1.4
lut_b2 = lambda x: x**1.6
img_out[:, :, 0] = np.vectorize(lut_b)(img[:, :, 0])
img_out[:, :, 1] = np.vectorize(lut_g)(img[:, :, 1])
img_out[:, :, 2] = np.vectorize(lut_r)(img[:, :, 2])
img_out[:, :, 1] = np.vectorize(lut_g2)(img_out[:, :, 1])
img_out[:, :, 0] = np.vectorize(lut_b2)(img_out[:, :, 0])
return img_out
# plt.plot(np.linspace(0,1,255), scurve(np.linspace(0,1,255)))
#%%
@time_function()
def pipeline_golden(img):
multichannel = not BW
# Part 1: Small blur on medium frequencies;
blurred_small = gaussian(img, sigma=(KERNEL_SMALL, KERNEL_SMALL), multichannel=multichannel)
edges_small = normalize(sobel(blurred_small))
# Part 2: High blur on low frequencies;
blurred_large = gaussian(img, sigma=(KERNEL_LARGE, KERNEL_LARGE), multichannel=multichannel)
edges_large = sobel(blurred_large)
# Extend mask to cover a larger area;
edges_large = truncate(normalize(edges_large) * 5)
# Part 3: Sharpen image;
amount = 10
sharpened = unsharp_mask(img, radius=KERNEL_UNSHARPEN, amount=amount, multichannel=multichannel)
# Part 4: Merge sharpened image and low frequencies;
image2 = normalize(sharpened * edges_large + blurred_large * (1 - edges_large))
# Part 5: Merge image and medium frequencies;
result = image2 * edges_small + blurred_small * (1 - edges_small)
# Part 6: Apply LUT;
result_lut = scurve(result)
return result_lut, [blurred_small, edges_small, blurred_large, edges_large, sharpened, image2, result]
@time_function()
def pipeline_bw(img):
# Create kernels for blur;
kernel_small_cpu = gaussian_kernel(KERNEL_SMALL_DIAMETER, KERNEL_SMALL_VARIANCE)
kernel_large_cpu = gaussian_kernel(KERNEL_LARGE_DIAMETER, KERNEL_LARGE_VARIANCE)
kernel_unsharpen_cpu = gaussian_kernel(KERNEL_UNSHARPEN_DIAMETER, KERNEL_UNSHARPEN_VARIANCE)
# Part 1: Small blur on medium frequencies;
blurred_small = gaussian_blur_py(img, kernel_small_cpu)
edges_small = normalize(sobel_filter_py(blurred_small))
# Part 2: High blur on low frequencies;
blurred_large = gaussian_blur_py(img, kernel_large_cpu)
edges_large = sobel_filter_py(blurred_large)
# Extend mask to cover a larger area;
edges_large = truncate(normalize(edges_large) * 5)
# Part 3: Sharpen image;
unsharpen = gaussian_blur_py(img, kernel_unsharpen_cpu)
amount = 8
sharpened = truncate(img * (1 + amount) - unsharpen * amount)
# Part 4: Merge sharpened image and low frequencies;
image2 = normalize(sharpened * edges_large + blurred_large * (1 - edges_large))
# Part 5: Merge image and medium frequencies;
result = image2 * edges_small + blurred_small * (1 - edges_small)
return result, [blurred_small, edges_small, blurred_large, edges_large, sharpened, image2]
if __name__ == "__main__":
# img = imread("puppy.jpg")
img = img_as_float(data.astronaut())
if BW:
img = rgb2gray(img) # Output is a [0,1] matrix;
# Golden pipeline;
result, other = pipeline_golden(img)
fig, axes = plt.subplots(4, 2, figsize=(6, 6))
ax = axes.ravel()
cmap = plt.cm.gray if BW else None
ax[0].imshow(img, cmap=cmap)
ax[1].imshow(other[0], cmap=cmap)
ax[2].imshow(np.dot(other[1][...,:3], [0.33, 0.33, 0.33]), cmap='gray')
ax[3].imshow(other[2], cmap=cmap)
ax[4].imshow(np.dot(other[3][...,:3], [0.33, 0.33, 0.33]), cmap='gray')
ax[5].imshow(other[4], cmap=cmap)
ax[6].imshow(other[5], cmap=cmap)
ax[7].imshow(result, cmap=cmap)
for i in ax:
i.axis("off")
fig.tight_layout()
plt.show()
fig.savefig("astronaut_g.jpg")
# Custom BW pipeline;
result2 = np.zeros(img.shape)
other2 = [np.zeros(img.shape) for i in range(len(other))]
for i in range(img.shape[-1]):
result2[:, :, i], tmp = pipeline_bw(img[:, :, i])
for j, x in enumerate(tmp):
other2[j][:, :, i] = x
fig, axes = plt.subplots(2, 2, figsize=(6, 6))
ax = axes.ravel()
cmap = plt.cm.gray if BW else None
ax[0].imshow(img, cmap=cmap)
ax[1].imshow(other2[2], cmap=cmap)
ax[2].imshow(other2[3], cmap=cmap)
ax[3].imshow(result2, cmap=cmap)
for i in ax:
i.axis("off")
fig.tight_layout()
plt.show()
fig.savefig("astronaut_py.jpg")
|
the-stack_0_21183 | import sys
import traceback
from peopleBook import PeopleBook
from peopleBook import Person
def print_help():
"""
prints the help message for this application
:return: None
"""
print("python contact.py add|update|get|find|remove operations")
print(
'add operations: [firstname=first name | middlename=middle name | lastname=last name | addresses=address1,address2,...,addressn | phone_numbers=phone1,phone2,...,phonen | emails=email1,email2,...,emailn | note=note]')
print(
'update operations: entry_number [firstname="first name" | middlename="middle name" | lastname="last name" | addresses="address1","address2",...,"addressn" | phone_numbers="phone1","phone2",...,"phonen" | emails="email1","email2",...,"emailn" | note="note"]')
print("get operations: entry_number1 entry_number2 ...")
print('find operations: keyword name "part of name" "part of name 2" ...')
print("remove operation: entry_number1 entry_number2 ...")
def get_equivalence_expressions(strings):
"""
get a list of strings in the format of name=some expression from a list of strings. It is expected that a whole string is split by space to form the input string list
:param strings: the input string list. It is assumed to be once a whole string with space delimiter
:return: a list of strings in the format name=some expression
"""
result = []
equal_indexes = []
# identify the index of all segments with equal sign
for counter, value in enumerate(strings):
if '=' in value:
equal_indexes.append(counter)
if len(equal_indexes) == 0:
return result
# regroup the original lists into segments seperated by the equal sign indexes
equal_indexes.append(len(strings))
last_index = 0
for i in equal_indexes:
result.append(" ".join(strings[last_index:i]))
last_index = i
result.pop(0)
return result
def equiv2handler_mapper(person):
"""
creates the handler table for handling add and update operations
:param person: the person record that is being operated on
:return: the handler table
"""
def set_first(name):
person.firstName = name
def set_middle(name):
person.middleName = name
def set_last(name):
person.lastName = name
def set_addresses(addresses):
person.addresses = [i.strip() for i in addresses.split(',')]
def set_phones(phone_numbers):
person.phoneNumbers = [i.strip() for i in phone_numbers.split(',')]
def set_emails(emails):
person.emailAddresses = [i.strip() for i in emails.split(',')]
def set_note(note):
person.note = note
value_mapper = {"firstname": set_first,
"middlename": set_middle,
"lastname": set_last,
"addresses": set_addresses,
"phone_numbers": set_phones,
"emails": set_emails,
"note": set_note}
return value_mapper
def handle_equivalence_expression(expression, person = Person()):
"""
translate the name=expression format into a person record. It make use of the getEquivalenceExpressions function and the operation table generated by the equiv2HandlerMapper.
:param expression: the expression to evaluate
:param person: the record to update information in
:return: the record
"""
equivalence_expressions = get_equivalence_expressions(expression)
value_mapper = equiv2handler_mapper(person)
for exp in equivalence_expressions:
parts = exp.split('=')
handler = value_mapper.get(parts[0], None)
if handler is None:
print("%s is not a valid field" % parts[0])
return None
handler(parts[1])
return person
def handle_get(contact, entryNumbers):
"""
handles the get operation.
:param contact: the contact book to get from
:param entryNumbers: a list of entry numbers to look up
:return: None
"""
for entryNumber in entryNumbers:
person = contact.get(entryNumber)
if person is None:
print("The address with entry number: %s does not exists" % entryNumber)
continue
print("-" * 80)
print(person)
def handle_find(contact, keywords):
"""
handles the find operation
:param contact: the contact book to search
:param keywords: the name keywords tokenized by space to search
:return: None
"""
keyword = " ".join(keywords)
for i in contact.find(keyword):
print("-" * 80)
print(i)
def handle_add(contact, addStrings):
"""
handles the add operation
:param contact: the contact book to add records to
:param addStrings: the add expression string tokenized by space
:return: None
"""
person = handle_equivalence_expression(addStrings)
if person is None:
return
contact.save(person)
print("Entry %s saved" % person.entryNumber)
def handle_update(contact, updateStrings):
"""
handles the update operation
:param contact: the contact book to update records in
:param updateStrings: the update operation expression tokenized by space in the format of entry_number name=value name2=value2 etc...
:return: None
"""
if len(updateStrings) < 1:
print_help()
return
entry_number = updateStrings[0]
expression_strs = updateStrings[1:]
person = contact.get(entry_number)
if person is None:
print("Entry %s does not exists" % entry_number)
person = handle_equivalence_expression(expression_strs, person)
person.setEntryNumber(int(entry_number))
contact.save(person)
print("Entry %s updated" % person.entryNumber)
def handle_remove(contact, entries):
"""
handles the remove operation
:param contact: the contact book to remove from
:param entries: the entry numbers to remove
:return: None
"""
for i in entries:
contact.delete(i)
print("Entry %s removed" % i)
def construct_handler_chain():
"""
constructs the handler chain which maps user inputs to handler functions
:return: the handler chain
"""
handlers = dict()
handlers["add"] = handle_add
handlers["update"] = handle_update
handlers["get"] = handle_get
handlers["find"] = dict()
handlers["find"]["keyword"] = dict()
handlers["find"]["keyword"]["name"] = handle_find
handlers["remove"] = handle_remove
return handlers
def main(argv):
"""
the main starting function
:param argv: the command line arguments
:return: None
"""
contact_book = PeopleBook()
try:
contact_book.open()
handlers = construct_handler_chain()
current_handler = handlers
# trace to the handler function based on user input
while len(argv) != 0:
to_process = argv[:1]
argv = argv[1:]
current_handler = current_handler.get(to_process[0], None)
if current_handler is None:
print("Unsupported Operation: %s" % to_process[0])
return
if type(current_handler) == dict:
continue
# handles the user input with the handler mapped
current_handler(contact_book, argv)
return
print_help()
except:
print("Error Occurred")
traceback.print_exc()
finally:
contact_book.close()
if __name__ == "__main__":
arguments = sys.argv
# remove the python file argument if launched with python
if len(sys.argv) != 0 and sys.argv[0].endswith(".py"):
arguments = sys.argv[1:]
if len(arguments) == 0:
print_help()
else:
main(arguments)
|
the-stack_0_21184 | """Test cases for equity shares."""
from quantfinpy.instrument.equity.share import EquityShare
def test_equity_share_ctor():
# Building equity share for Google.
company: str = "Google"
share = EquityShare(company)
# Checking the built equity share.
assert isinstance(share, EquityShare)
assert share.company == company
|
the-stack_0_21185 | """Code and data structures for managing source directives."""
import bisect
import collections
import io
import keyword
import logging
import re
import sys
import tokenize
from typing import Collection
from pytype import blocks
from pytype import utils
log = logging.getLogger(__name__)
_DIRECTIVE_RE = re.compile(r"#\s*(pytype|type)\s*:\s?([^#]*)")
_IGNORE_RE = re.compile(r"^ignore(\[.+\])?$")
_CLASS_OR_FUNC_RE = re.compile(r"^(def|class)\s")
_DOCSTRING_RE = re.compile(r"^\s*(\"\"\"|''')")
_DECORATOR_RE = re.compile(r"^\s*@(\w+)([(]|\s*$)")
_ALL_ERRORS = "*" # Wildcard for disabling all errors.
_FUNCTION_CALL_ERRORS = (
"duplicate-keyword",
"missing-parameter",
"wrong-arg-count",
"wrong-arg-types",
"wrong-keyword-args",
)
class _DirectiveError(Exception):
pass
class SkipFileError(Exception):
"""Exception thrown if we encounter "pytype: skip-file" in the source code."""
class _LineSet:
"""A set of line numbers.
The data structure is optimized to represent the union of a sparse set
of integers and ranges of non-negative integers. This supports the two styles
of directives: those after a statement apply only to that line and those on
their own line apply until countered by the opposing directive.
"""
def __init__(self):
# Map of line->bool for specific lines, takes precedence over _transitions.
self._lines = {}
# A sorted list of the lines at which the range state changes
# polarity. It is assumed to initially be false (not in a range).
# Even positions represent the start of a range, odd positions represent
# the end of a range. Thus [2, 5, 10, 12] would include lines 2, 3, 4, 10,
# and 11. If the length is odd, then an end of maxint is implied, thus
# [2, 5, 10] would disable lines 2, 3, 4, 10, 11, 12, ...
self._transitions = []
@property
def lines(self):
return self._lines
def set_line(self, line, membership):
"""Set whether a given line is a member of the set."""
self._lines[line] = membership
def start_range(self, line, membership):
"""Start a range of lines that are either included/excluded from the set.
Args:
line: A line number.
membership: If True, lines >= line are included in the set (starting
a range), otherwise they are excluded (ending a range).
Raises:
ValueError: if line is less than that of a previous call to start_range().
"""
last = self._transitions[-1] if self._transitions else -1
# Assert that lines are monotonically increasing. This simplifies the
# logic of adding new lines and ensures that _ranges is sorted.
if line < last:
raise ValueError("Line number less than previous start_range() call.")
# Determine previous membership state (True if the last range has an
# indefinite end).
previous = (len(self._transitions) % 2) == 1
if membership == previous:
return # Redundant with previous state, do nothing.
elif line == last:
# We have either enable/disable or disable/enable on the same line,
# cancel them out by popping the previous transition.
self._transitions.pop()
else:
# Normal case - add a transition at this line.
self._transitions.append(line)
def __contains__(self, line):
"""Return if a line is a member of the set."""
# First check for an entry in _lines.
specific = self._lines.get(line)
if specific is not None:
return specific
# Find the position in _ranges for line. The polarity of this position
# determines whether we are inside a range (odd) or outside (even).
pos = bisect.bisect(self._transitions, line)
return (pos % 2) == 1
def get_disable_after(self, lineno):
"""Get an unclosed disable, if any, that starts after lineno."""
if len(self._transitions) % 2 == 1 and self._transitions[-1] >= lineno:
return self._transitions[-1]
return None
class _TypeCommentSet:
"""A set of type comments in a single logical line."""
@classmethod
def start(cls, lineno):
return cls(lineno)
def __init__(self, start_lineno):
self.start_line = start_lineno
self.end_line = None
self.type_comments = {}
class _FunctionDefinition:
"""Tracks the line numbers of function definitions."""
@classmethod
def start(cls, lineno):
return cls(lineno)
def __init__(self, start_lineno):
self._paren_count = 0
self._start_line = start_lineno
self._end_line = None
def add_lpar(self, lineno):
assert lineno >= self._start_line
self._paren_count += 1
def add_rpar(self, lineno):
if self._end_line is not None:
return
self._paren_count -= 1
if self._paren_count == 0:
self._end_line = lineno
def contains(self, lineno):
if lineno < self._start_line:
return False
return self._end_line is None or lineno <= self._end_line
class _VariableAnnotation:
"""Processes a single logical line, looking for a variable annotation."""
@classmethod
def start(cls, lineno, token):
self = cls()
self.add_token(lineno, token)
return self
def __init__(self):
self._tokens = []
self.annotation = ""
# Set to True when the full annotation has been found, or if we determine
# that the line does not contain an annotation.
self.closed = False
# Set to the line on which the colon is found. We do not use the line at
# which start() is called because the latter may be a blank line.
self.start_lineno = None
# Used to consume a 'self.' or 'cls.' prefix so we can detect variable
# annotations on attributes.
self._attr_prefix = []
def _add_to_attr_prefix(self, token):
if self._tokens:
return False
if (not self._attr_prefix and token.string in ("self", "cls") or
len(self._attr_prefix) == 1 and token.exact_type == tokenize.DOT):
self._attr_prefix.append(token)
return True
return False
def _accept(self, token):
if self.closed:
return False
# Allow comments and whitespace before the NAME token signifying the start
# of the annotation.
return token.exact_type != tokenize.COMMENT and token.string.strip()
def add_token(self, lineno, token):
"""Process a token."""
if not self._accept(token):
return
if self._add_to_attr_prefix(token):
return
# Match NAME COLON [annotation] EQUAL. We assume the annotation starts at
# the beginning of the line, which greatly simplifies matching at the cost
# of failing to find annotations in lines like `if __random__: v: int = 0`.
if not self._tokens:
# Filter out false positives like `else: x = 0`.
if token.exact_type != tokenize.NAME or keyword.iskeyword(token.string):
self.closed = True
elif len(self._tokens) == 1:
if token.exact_type == tokenize.COLON:
self.start_lineno = lineno
else:
self.closed = True
elif token.exact_type == tokenize.EQUAL:
self.closed = True
else:
if self.annotation and self._tokens[-1].end[0] == token.start[0]:
# Preserve whitespace.
self.annotation += token.line[self._tokens[-1].end[1]:token.start[1]]
self.annotation += token.string
self._tokens.append(token)
def _collect_bytecode(ordered_code):
bytecode_blocks = []
stack = [ordered_code]
while stack:
code = stack.pop()
bytecode_blocks.append(code.original_co_code)
for const in code.co_consts:
if isinstance(const, blocks.OrderedCode):
stack.append(const)
return bytecode_blocks
def _adjust_line_number(line, allowed_lines, min_line=1):
adjusted_line = line
while adjusted_line not in allowed_lines and adjusted_line >= min_line:
adjusted_line -= 1
return adjusted_line if adjusted_line >= min_line else None
def _is_function_call(opcode_name):
return opcode_name.startswith("CALL_") or opcode_name in {
"BINARY_SUBSCR",
"COMPARE_OP",
"FOR_ITER",
}
def _is_funcdef_op(opcode_name):
"""Checks whether the opcode may appear in a function signature."""
return opcode_name.startswith("LOAD_") or opcode_name in {
"BINARY_SUBSCR",
"BUILD_TUPLE",
}
def _is_load_attribute_op(opcode_name):
"""Checks whether the opcode loads an attribute."""
return (opcode_name.startswith("GET_") or
opcode_name.startswith("UNPACK_") or
opcode_name in {
"LOAD_ATTR",
"LOAD_METHOD",
"SETUP_WITH",
})
class _OpcodeLines:
"""Stores opcode line numbers for Director.adjust_line_numbers()."""
class Call:
def __init__(self, line):
self.line = line
self.children = set()
def __repr__(self):
return f"call @ line {self.line} ({len(self.children)} nested calls)"
def __init__(
self,
store_lines: Collection[int],
make_function_lines: Collection[int],
non_funcdef_lines: Collection[int],
load_attr_lines: Collection[int],
call_lines: Collection[Call]):
self.store_lines = store_lines
self.make_function_lines = make_function_lines
self.non_funcdef_lines = non_funcdef_lines
self.load_attr_lines = load_attr_lines
# We transform call_lines into a line->Call mapping so that
# _adjust_line_number can treat it as a Collection[int] like the other
# *_lines attributes.
self.call_lines = {call.line: call for call in call_lines}
@classmethod
def from_code(cls, code):
"""Builds an _OpcodeLines from a code object."""
store_lines = set()
make_function_lines = set()
non_funcdef_lines = set()
load_attr_lines = set()
all_call_lines = []
for block in _collect_bytecode(code):
call_lines = []
for opcode in block:
if opcode.name.startswith("STORE_"):
store_lines.add(opcode.line)
elif opcode.name == "MAKE_FUNCTION":
make_function_lines.add(opcode.line)
elif _is_load_attribute_op(opcode.name):
load_attr_lines.add(opcode.line)
elif _is_function_call(opcode.name):
# Function calls can be nested, so we represent them as a sequence of
# call trees. As opcodes are typically ordered by increasing line
# number, we detect nested calls via decreasing line numbers. For
# example, for:
# top_level1() # line 1
# top_level2( # line 2
# nested1( # line 3
# more_nested()), # line 4
# nested2()) # line 5
# the sequence of line numbers is [1, 4, 3, 5, 2].
call = _OpcodeLines.Call(opcode.line)
while call_lines and opcode.line < call_lines[-1].line:
call.children.add(call_lines.pop())
call_lines.append(call)
if not _is_funcdef_op(opcode.name):
non_funcdef_lines.add(opcode.line)
all_call_lines.extend(call_lines)
return cls(store_lines, make_function_lines, non_funcdef_lines,
load_attr_lines, all_call_lines)
class Director:
"""Holds all of the directive information for a source file."""
def __init__(self, src, errorlog, filename, disable, python_version):
"""Create a Director for a source file.
Args:
src: The source text as a string.
errorlog: An ErrorLog object. Directive errors will be logged to the
errorlog.
filename: The name of the source file.
disable: List of error messages to always ignore.
python_version: The target python version.
"""
self._filename = filename
self._errorlog = errorlog
self._type_comments = [] # _TypeCommentSet objects.
self._variable_annotations = {} # Map from line number to annotation.
self._docstrings = set() # Start lines of docstrings.
# Lines that have "type: ignore". These will disable all errors, and in
# the future may have other impact (such as not attempting an import).
self._ignore = _LineSet()
# Map from error name to lines for which that error is disabled. Note
# that _ALL_ERRORS is essentially a wildcard name (it matches all names).
self._disables = collections.defaultdict(_LineSet)
# Line numbers of decorators. Since this is used to mark a class or function
# as decorated, stacked decorators will record the one closest to the
# definition (i.e. the last one). The python bytecode uses this line number
# for all the stacked decorator invocations.
self._decorators = set()
# Apply global disable, from the command line arguments:
for error_name in disable:
self._disables[error_name].start_range(0, True)
# Parse the source code for directives.
self._parse_source(src, python_version)
@property
def type_comments(self):
return collections.ChainMap(*(s.type_comments for s in self._type_comments))
@property
def annotations(self):
# It's okay to overwrite type comments with variable annotations here
# because _FindIgnoredTypeComments in vm.py will flag ignored comments.
return {**self.type_comments, **self._variable_annotations}
@property
def docstrings(self):
return sorted(self._docstrings)
@property
def ignore(self):
return self._ignore
@property
def decorators(self):
return self._decorators
def _parse_source(self, src, python_version):
"""Parse a source file, extracting directives from comments."""
f = io.StringIO(src)
defs_start = None
open_type_comment_set = _TypeCommentSet.start(1)
open_decorator = False
last_function_definition = None
open_variable_annotation = None
for token in tokenize.generate_tokens(f.readline):
tok = token.exact_type
line = token.line
lineno, col = token.start
# Check for the first line with a top-level class or function definition.
if defs_start is None and _CLASS_OR_FUNC_RE.match(line):
defs_start = lineno
# Process the token for decorators, function definitions, and comments.
if tok == tokenize.AT:
if _DECORATOR_RE.match(line):
open_decorator = True
elif tok == tokenize.NAME:
if open_decorator and token.string in ("class", "def"):
self.decorators.add(lineno)
open_decorator = False
if token.string == "def":
last_function_definition = _FunctionDefinition.start(lineno)
elif tok == tokenize.COMMENT:
self._process_comment(line, lineno, col, open_type_comment_set)
elif tok == tokenize.LPAR:
if last_function_definition:
last_function_definition.add_lpar(lineno)
elif tok == tokenize.RPAR:
if last_function_definition:
last_function_definition.add_rpar(lineno)
elif tok in (tokenize.NEWLINE, tokenize.ENDMARKER):
if open_type_comment_set.type_comments:
open_type_comment_set.end_line = lineno
self._type_comments.append(open_type_comment_set)
open_type_comment_set = _TypeCommentSet.start(lineno + 1)
# Process the token for variable annotations.
if last_function_definition and last_function_definition.contains(lineno):
pass # ignore function annotations
elif not open_variable_annotation:
open_variable_annotation = _VariableAnnotation.start(lineno, token)
elif tok in (tokenize.NEWLINE, tokenize.SEMI):
# NEWLINE indicates the end of a *logical* line of Python code, allowing
# us to handle annotations split over multiple lines.
annotation = open_variable_annotation.annotation
if annotation and open_variable_annotation.closed:
# In 3.8+, the STORE_* opcode for a multiline variable assignment is
# at the first line in the assignment; before that, it is at the last.
if python_version >= (3, 8):
assert open_variable_annotation.start_lineno
annotation_lineno = open_variable_annotation.start_lineno
else:
annotation_lineno = lineno
self._variable_annotations[annotation_lineno] = annotation
open_variable_annotation = None
else:
open_variable_annotation.add_token(lineno, token)
# Record docstrings.
if _DOCSTRING_RE.match(line):
self._docstrings.add(lineno)
if defs_start is not None:
disables = list(self._disables.items())
# Add "# type: ignore" to the list of disables that we check.
disables.append(("Type checking", self._ignore))
for name, lineset in disables:
lineno = lineset.get_disable_after(defs_start)
if lineno is not None:
self._errorlog.late_directive(self._filename, lineno, name)
def _process_comment(self, line, lineno, col, type_comment_set):
"""Process a single comment."""
matches = list(_DIRECTIVE_RE.finditer(line[col:]))
is_nested = bool(matches) and matches[0].start(0) > 0
for m in matches:
code = line[:col].strip()
open_ended = not code
tool, data = m.groups()
assert data is not None
data = data.strip()
if tool == "type":
self._process_type(lineno, code, data, is_nested, type_comment_set)
elif tool == "pytype":
try:
self._process_pytype(lineno, data, open_ended)
except _DirectiveError as e:
self._errorlog.invalid_directive(
self._filename, lineno, utils.message(e))
else:
pass # ignore comments for other tools
def _process_type(self, lineno, code, data, is_nested, type_comment_set):
"""Process a type: comment."""
# Discard type comments embedded in larger whole-line comments.
if not code and is_nested:
return
if lineno in type_comment_set.type_comments:
# If we have multiple type comments on the same line, take the last one,
# but add an error to the log.
self._errorlog.invalid_directive(
self._filename, lineno,
"Multiple type comments on the same line.")
# Also supports mypy-style ignore[code, ...] syntax, treated as regular
# ignores.
if _IGNORE_RE.match(data):
if not code:
self._ignore.start_range(lineno, True)
else:
self._ignore.set_line(lineno, True)
else:
type_comment_set.type_comments[lineno] = data
def _process_pytype(self, lineno, data, open_ended):
"""Process a pytype: comment."""
if not data:
raise _DirectiveError("Invalid directive syntax.")
for option in data.split():
# Parse the command.
if option == "skip-file":
raise SkipFileError()
try:
command, values = option.split("=", 1)
values = values.split(",")
except ValueError as e:
raise _DirectiveError("Invalid directive syntax.") from e
# Additional commands may be added in the future. For now, only
# "disable" and "enable" are supported.
if command == "disable":
disable = True
elif command == "enable":
disable = False
else:
raise _DirectiveError("Unknown pytype directive: '%s'" % command)
if not values:
raise _DirectiveError(
"Disable/enable must specify one or more error names.")
for error_name in values:
if (error_name == _ALL_ERRORS or
self._errorlog.is_valid_error_name(error_name)):
lines = self._disables[error_name]
if open_ended:
lines.start_range(lineno, disable)
else:
lines.set_line(lineno, disable)
else:
self._errorlog.invalid_directive(
self._filename, lineno, "Invalid error name: '%s'" % error_name)
def should_report_error(self, error):
"""Return whether the error should be logged.
This method is suitable for use as an error filter.
Args:
error: An error._Error object.
Returns:
True iff the error should be included in the log.
"""
# Always report errors that aren't for this file or do not have a line
# number.
if error.filename != self._filename or error.lineno is None:
return True
# Treat lineno=0 as below the file, so we can filter it.
lineno = error.lineno or sys.maxsize
# Report the error if it isn't subject to any ignore or disable.
return (lineno not in self._ignore and
lineno not in self._disables[_ALL_ERRORS] and
lineno not in self._disables[error.name])
def _adjust_line_numbers_for_type_comments(self, opcode_lines):
"""Adjusts line numbers for `# type:` comments."""
for type_comment_set in self._type_comments:
for line, comment in sorted(type_comment_set.type_comments.items()):
adjusted_line = _adjust_line_number(
line, opcode_lines.store_lines, type_comment_set.start_line)
if not adjusted_line:
# vm._FindIgnoredTypeComments will take care of error reporting.
continue
if line != type_comment_set.end_line:
self._errorlog.ignored_type_comment(self._filename, line, comment)
del type_comment_set.type_comments[line]
elif adjusted_line != line:
type_comment_set.type_comments[adjusted_line] = comment
del type_comment_set.type_comments[line]
def _adjust_line_numbers_for_decorators(self, opcode_lines):
for line in sorted(self._decorators):
adjusted_line = _adjust_line_number(
line, opcode_lines.make_function_lines)
if not adjusted_line:
log.error(
"No MAKE_FUNCTION opcode found for decorator on line %d", line)
elif adjusted_line != line:
self._decorators.add(adjusted_line)
self._decorators.remove(line)
def _adjust_line_numbers_for_variable_annotations(self, opcode_lines):
for line, annot in sorted(self._variable_annotations.items()):
adjusted_line = _adjust_line_number(line, opcode_lines.store_lines)
if not adjusted_line:
log.error(
"No STORE_* opcode found for annotation %r on line %d", annot, line)
del self._variable_annotations[line]
elif adjusted_line != line:
self._variable_annotations[adjusted_line] = annot
del self._variable_annotations[line]
def _adjust_line_numbers_for_error_directives(self, opcode_lines):
"""Adjusts line numbers for error directives."""
for error_class in _FUNCTION_CALL_ERRORS + (
"annotation-type-mismatch", "attribute-error"):
if error_class not in self._disables:
continue
lines = self._disables[error_class].lines
for line, membership in sorted(lines.items()):
if error_class == "annotation-type-mismatch":
min_line = line
# In Python 3.8+, the MAKE_FUNCTION opcode's line number is the first
# line of the function signature, so we need to skip any opcodes
# associated with the signature in between.
while min_line not in opcode_lines.non_funcdef_lines and min_line > 1:
min_line -= 1
allowed_lines = (
opcode_lines.store_lines | opcode_lines.make_function_lines)
elif error_class == "attribute-error":
min_line = 1
allowed_lines = opcode_lines.load_attr_lines
else:
min_line = 1
allowed_lines = opcode_lines.call_lines
adjusted_line = _adjust_line_number(line, allowed_lines, min_line)
if adjusted_line and adjusted_line != line:
if error_class in ("annotation-type-mismatch", "attribute-error"):
lines[adjusted_line] = membership
del lines[line]
else:
# 'adjusted_line' points to the top-level function call that
# contains 'line'. We apply the directive on 'line' to the top-level
# call as well as all nested calls between 'adjusted_line' and
# 'line'. For example, in:
# top_level_call(
# nested1(),
# nested2(), # directive
# nested3())
# the disable directive applies to top_level_call, nested1, and
# nested 2, but not nested3.
stack = [opcode_lines.call_lines[adjusted_line]]
found = False
while stack:
call = stack.pop()
if call.line > line:
continue
if call.line == line:
found = True
else:
lines[call.line] = membership
stack.extend(call.children)
# If 'line' does not itself contain a function call, then we delete
# the directive on 'line' after applying it to all the relevant
# lines that do contain function calls.
if not found:
del lines[line]
def adjust_line_numbers(self, code):
"""Uses the bytecode to adjust line numbers."""
opcode_lines = _OpcodeLines.from_code(code)
self._adjust_line_numbers_for_type_comments(opcode_lines)
self._adjust_line_numbers_for_decorators(opcode_lines)
self._adjust_line_numbers_for_variable_annotations(opcode_lines)
self._adjust_line_numbers_for_error_directives(opcode_lines)
|
the-stack_0_21186 | from flask import jsonify, request, g, abort, url_for, current_app
from .. import db
from ..models import Post, Permission
from . import api
from .decorators import permission_required
from .errors import forbidden
@api.route('/posts/')
def get_posts():
page = request.args.get('page', 1, type=int)
pagination = Post.query.paginate(
page, per_page=current_app.config['POSTS_PER_PAGE'],
error_out=False
)
prev=None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next=None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/posts/<int:id>')
def get_post(id):
post = Post.query.get_or_404(id)
return jsonify(post.to_json())
@api.route('/posts/', methods=['POST'])
@permission_required(Permission.WRITE_ARTICLES)
def new_post(id):
post = Post.from_json(request.json)
post.author= g.current_user
db.session.add(post)
db.session.commit()
return jsonify(post.to_json()), 201, \
{'Location': url_for('api.get_post', id=post.id, _external=True)}
@api.route('/posts/<int:id>', methods=['PUT'])
@permission_required(Permission.WRITE_ARTICLES)
def edit_post(id):
post = Post.qurey.get_or_404(id)
if g.current_user != post.author and \
not g.current_user.can(Permission.ADMINISTER):
return forbidden('Insufficient permissions')
post.body=request.json.get('body', post.body)
db.session.add(post)
return jsonify(post.to_json('')) |
the-stack_0_21188 | from io import BytesIO
from pathlib import Path
from typing import Iterable, NamedTuple, Union, List, Optional
from fs.copy import copy_fs
from fs.info import Info
from typing_extensions import Literal
from typhoon.contrib.hooks.filesystem_hooks import FileSystemHookInterface
class ReadDataResponse(NamedTuple):
data: bytes
info: Info
class WriteDataResponse(NamedTuple):
metadata: dict
path: str
def read_data(hook: FileSystemHookInterface, path: Union[Path, str]) -> ReadDataResponse:
"""
Reads the data from a file given its relative path and returns a named tuple with the shape (data: bytes, path: str)
:param hook: FileSystem Hook
:param path: File path relative to base directory
"""
with hook as conn:
print(f'Reading from {path}')
return ReadDataResponse(data=conn.readbytes(str(path)), info=conn.getinfo(path))
def write_data(
data: Union[str, bytes, BytesIO],
hook: FileSystemHookInterface,
path: Union[Path, str],
create_intermediate_dirs: bool = False,
metadata: Optional[dict] = None,
return_path_format: Literal['relative', 'absolute', 'url'] = 'relative',
) -> Iterable[str]:
"""
Write the given data to the path specified.
:param metadata: optional dict
:param data: Bytes buffer
:param hook: A FileSystemHookInterface hook instance
:param path: Path where the data should be written
:param create_intermediate_dirs: Create intermediate directories if necessary
:param return_path_format: relative, absolute or url
"""
if isinstance(data, BytesIO):
data = data.getvalue()
elif isinstance(data, str):
data = data.encode()
path = str(path)
with hook as conn:
if create_intermediate_dirs:
print('Creating intermediate directories')
conn.makedirs(str(Path(path).parent), recreate=True)
print(f'Writing to {path}')
conn.writebytes(path, data)
if return_path_format == 'relative':
return_path = path
elif return_path_format == 'absolute':
return_path = str(conn.open(path))
elif return_path_format == 'url':
return_path = conn.geturl(path)
else:
raise ValueError(f'return_path_format should be "relative", "absolute" or "url". Found "{return_path_format}"')
yield WriteDataResponse(metadata=metadata, path=return_path)
def list_directory(hook: FileSystemHookInterface, path: Union[Path, str] = '/') -> Iterable[str]:
"""
List all the files in a given directory relative to base path
:param hook: FileSystem Hook
:param path: Directory relative path
"""
with hook as conn:
print(f'Listing directory {path}')
yield from [str(Path(path) / f) for f in conn.listdir(str(path))]
def copy(source_hook: FileSystemHookInterface, source_path: str, destination_hook: FileSystemHookInterface, destination_path: str):
"""
Copy a file from a source filesystem to a destination filesystem
"""
with source_hook as source_conn, destination_hook as dest_conn:
print(f'Copy {source_path} to {destination_path}')
copy_fs(source_conn.opendir(source_path), dest_conn.opendir(destination_path))
def glob(hook: FileSystemHookInterface, pattern: str, path: Union[Path, str] = '/', exclude_dirs: Optional[List[str]] = None) -> Iterable[Info]:
"""
List all the files in a given directory matching the glob pattern
:param hook: Filesystem hook
:param pattern: Glob pattern e.g. '*.csv' or '"**/*.py"'
:param path: Optional directory path
:param exclude_dirs: An optional list of patterns to exclude when searching e.g. ["*.git"]
:return fs.info.Info
path: str - A string with the matched file
raw_info (dict) – A dict containing resource info.
accessed: datetime. The resource last access time, or None.
copy(to_datetime=None)[source]: Create a copy of this resource info object.
created: datetime. The resource creation time, or None.
get(namespace, key, default=None)[source]: Get a raw info value.
Example
>>> info.get('access', 'permissions')
['u_r', 'u_w', '_wx']
gid: int. The group id of the resource, or None.
group: str. The group of the resource owner, or None.
has_namespace(namespace)[source]: Check if the resource info contains a given namespace.
is_dir: bool. True if the resource references a directory.
is_link: bool. True if the resource is a symlink.
is_writeable(namespace, key)[source]: Check if a given key in a namespace is writable.
make_path(dir_path)[source]: Make a path by joining dir_path with the resource name.
modified: datetime. The resource last modification time, or None.
name: str. The resource name.
permissions: Permissions. The permissions of the resource, or None.
size: int. The size of the resource, in bytes.
stem: str. The name minus any suffixes.
Example
>>> info
<info 'foo.tar.gz'>
>>> info.stem
'foo'
suffix: str. The last component of the name (including dot), or an empty string if there is no suffix.
suffixes: List. A list of any suffixes in the name.
target: str. The link target (if resource is a symlink), or None.
type: ResourceType. The type of the resource.
uid: int. The user id of the resource, or None.
user: str. The owner of the resource, or None.
"""
with hook as conn:
for match in conn.glob(pattern, path=path, exclude_dirs=exclude_dirs, namespaces=['details', 'access']):
yield match
|
the-stack_0_21189 | import os
import numpy as np
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from definitions import ROOT_DIR
from parser import Parser
from recommendation.models.gmf_model import GMFModel
from recommendation.models.mf_model import MFModel
from recommendation.models.neumf_model import NeuMFModel
from recommendation.models.nnmf_model import NNMFModel
class ModelReco:
def __init__(self, method, batch_size=256, num_negatives=4, num_factors_user=8, num_factors_item=8, regs=None,
nb_epochs=1):
if regs is None:
regs = [0, 0]
self.method = method
self.batch_size = batch_size
self.num_negatives = num_negatives
self.num_factors_user = num_factors_user
self.num_factors_item = num_factors_item
self.nb_epochs = nb_epochs
self.regs = regs
self.train_corpus = None
self.test_corpus = None
self.num_users = None
self.num_tweets = None
self.model = None
def load_corpus(self, corpus_path=os.path.join(ROOT_DIR, 'corpus/iot-tweets-vector-v31.tsv'),
like_rt_graph=os.path.join(ROOT_DIR, 'corpus/like_rt_graph.adj')):
"""
Load the corpus and the Favorite/RT adjancy matrix
:param corpus_path: absolute path
:param like_rt_graph: absolute path
:return: pd.DataFrame object
"""
original_corpus = Parser.parsing_base_corpus_pandas(corpus_path, categorize=True)
self.num_users = len(original_corpus.User_Name.unique())
self.num_tweets = len(original_corpus.TweetID.unique())
corpus = original_corpus[['User_ID_u', 'TweetID_u']]
like_rt_file = open(like_rt_graph, 'r')
for line in like_rt_file:
parts = line[:-1].split(' ')
user_name = parts[0]
user_id = original_corpus[original_corpus.User_Name == user_name].User_ID_u.iloc[0]
tweets = parts[1:] # like or RT tweets
for tweet in tweets:
if len(original_corpus[original_corpus.TweetID == int(tweet)]) > 0:
tweet_id = original_corpus[original_corpus.TweetID == int(tweet)].TweetID_u.iloc[0]
corpus = corpus.append({'User_ID_u': user_id, 'TweetID_u': tweet_id}, ignore_index=True)
like_rt_file.close()
corpus['Rating'] = 1
for index, line in corpus.iterrows():
u = line.User_ID_u
# negative instances
for t in range(self.num_negatives):
j = np.random.randint(self.num_tweets)
while (u, j) in corpus[['User_ID_u', 'TweetID_u']]:
j = np.random.randint(self.num_tweets)
corpus = corpus.append({'User_ID_u': u, 'TweetID_u': j, 'Rating': 0}, ignore_index=True)
self.train_corpus, self.test_corpus = train_test_split(corpus, test_size=0.2)
return self.train_corpus, self.test_corpus
def create_model(self):
"""
Build and compile a MasterModel depending on the method asked
:return:
"""
if self.method == "gmf":
self.model = GMFModel(self.num_users, self.num_tweets, self.num_factors_user, self.num_factors_item,
self.regs).get_model()
elif self.method == "mf":
self.model = MFModel(self.num_users, self.num_tweets, self.num_factors_user, self.num_factors_item,
self.regs).get_model()
elif self.method == "neumf":
self.model = NeuMFModel(self.num_users, self.num_tweets, self.num_factors_user, self.num_factors_item,
self.regs).get_model()
elif self.method == "nnmf":
self.model = NNMFModel(self.num_users, self.num_tweets, self.num_factors_user, self.num_factors_item,
self.regs).get_model()
else:
self.model = None
raise Exception('Wgrong Argument ! : must be among "gmf", "mf", "neumf", "nnmf"')
self.model.compile('adam', 'mean_squared_error')
def train(self, save=False):
"""
Train the model
:param save: boolean : save the model into a file
:return:
"""
assert self.train_corpus is not None
for e in range(self.nb_epochs):
self.model.fit([self.train_corpus.User_ID_u, self.train_corpus.TweetID_u], # input
self.train_corpus.Rating, # labels
batch_size=self.batch_size, epochs=1, verbose=0, shuffle=True)
if save:
m.save(epoch=e)
def save(self, model_out_file=None, out_dir=os.path.join(ROOT_DIR, 'saved_models/reco_models'), epoch=0):
"""
Save the model into file
:param model_out_file: name of the file
:param out_dir: absolute path of the directory to save the model
:param epoch: num of epoch
:return: None
"""
if model_out_file is None:
model_out_file = self.method
if not os.path.exists(out_dir):
os.mkdir(out_dir)
# Saving weights
self.model.save_weights(os.path.join(out_dir, model_out_file + str(epoch) + '.model'), overwrite=True)
# Saving configuration
f = open(os.path.join(out_dir, model_out_file + str(epoch)) + '.yaml', 'w')
yaml_string = self.model.to_yaml()
f.write(yaml_string)
f.close()
def predict(self):
"""
Predict values based on test corpus
:return: predictions, 1-d array
"""
return self.model.predict([self.test_corpus.User_ID_u, self.test_corpus.TweetID_u])
def mae_metric(self, predictions):
"""
Return the MAE metrics based on predictions
:param predictions:
:return:
"""
y_true = self.test_corpus.Rating
y_hat = np.round(predictions, 0)
mae = mean_absolute_error(y_true, y_hat)
return mae
if __name__ == '__main__':
for method in ["gmf", "mf", "neumf", "nnmf"]:
m = ModelReco(method)
m.load_corpus(corpus_path=os.path.join(ROOT_DIR, 'corpus/iot-tweets-2009-2016-completv3.tsv'),
like_rt_graph=os.path.join(ROOT_DIR, 'corpus/likes_matrix.tsv'))
m.create_model()
m.train()
m.save()
pred = m.predict()
print('MAE ', method, m.mae_metric(pred))
# users = np.full(len(negatives), main_user, dtype='int32')
# predictions = model.predict([users, np.array(negatives)], batch_size=100, verbose=0)
#
# predictions_map = []
# predictions = predictions.flatten()
# for i in range(len(predictions)):
# predictions_map.append({'tweet': negatives[i], 'predict': predictions[i]})
#
# predictions_map = sorted(predictions_map, key=lambda k: k['predict'], reverse=True)
# k_first = 5
# recommended_tweets = [t['tweet'] for t in predictions_map[:k_first]]
# print(recommended_tweets)
|
the-stack_0_21190 | """
Author:
Haris Hasic, PhD Student
Institution:
Ishida Laboratory, Department of Computer Science, School of Computing, Tokyo Institute of Technology
Updated on:
May, 2021
"""
import re
from typing import List, Set, Tuple, Union
from rdkit.Chem.AllChem import Atom, Bond, FragmentOnBonds, Kekulize, Mol, RWMol, SanitizeMol
from .general_utils import CompoundRepresentationUtils
# noinspection PyArgumentList
class CompoundEditingUtils:
"""
Description:
Group of methods for handling chemical compound structure editing.
"""
@staticmethod
def __remove_floating_atoms(editable_compound: RWMol) -> None:
"""
Description:
Remove all wildcard atoms '*' that are disconnected from the rest of the atoms in the editable
chemical compound structure.
Input:
editable_compound (RWMol): The chemical compound structure as an Editable Mol object.
Output:
(None)
"""
floating_atoms = sorted([atom.GetIdx() for atom in editable_compound.GetAtoms()
if atom.GetSymbol() == "*" and atom.GetDegree() == 0], reverse=True)
for atom in floating_atoms:
editable_compound.RemoveAtom(atom)
@staticmethod
def __remove_marked_atoms(editable_compound: RWMol) -> None:
"""
Description:
Remove all atoms marked with the wildcard symbol '*' in the editable chemical compound structure.
Input:
editable_compound (RWMol): The chemical compound structure as an Editable Mol object.
Output:
(None)
"""
atoms_to_remove = []
for atom in editable_compound.GetAtoms():
if atom.GetSymbol() == "*":
atoms_to_remove.append(atom.GetIdx())
for atom in sorted(list(set(atoms_to_remove)), reverse=True):
editable_compound.RemoveAtom(atom)
@staticmethod
def __remove_marked_bonds(editable_compound: RWMol) -> None:
"""
Description:
Remove all bonds between atoms marked with the wildcard symbol '*' in the editable chemical compound
structure.
Input:
editable_mol (RWMol): The chemical compound structure as an Editable Mol object.
Output:
(None)
"""
bonds_to_remove = []
for bond in editable_compound.GetBonds():
if editable_compound.GetAtomWithIdx(bond.GetBeginAtomIdx()).GetSymbol() == "*" and \
editable_compound.GetAtomWithIdx(bond.GetEndAtomIdx()).GetSymbol() == "*":
bonds_to_remove.append((bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()))
for bond in sorted(list(set(bonds_to_remove)), reverse=True):
editable_compound.RemoveBond(bond[0], bond[1])
CompoundEditingUtils.__remove_floating_atoms(editable_compound)
@staticmethod
def __remove_bonds_manual(compound: Union[str, Mol],
bonds_to_remove: Union[List[Bond], Tuple[Bond], List[int], Set[int], Tuple[int]],
sanitize_result: bool = False, verbose: bool = False) -> Union[RWMol, None]:
"""
Description:
Manually remove the specified set of bonds from a chemical compound structure.
Input:
compound (str or Mol): The chemical compound structure as a SMILES string or a Mol object.
atoms_to_remove (List, Set, Tuple): The specified collection of Mol bonds or Mol bond indices to be removed.
sanitize_result (bool): The flag signalizing whether the fragmented compound should be sanitized after
fragmentation.
verbose (bool): The flag signalizing whether feedback messages should be displayed in the console.
Output:
(RWMol or None): The fragmented RWMol object or None if the fragmentation fails.
"""
if isinstance(compound, str):
compound = CompoundRepresentationUtils.string_to_mol(compound)
if compound is None:
if verbose:
print("Unable to construct a Mol object from the given SMILES string '{}'. Please make sure that "
"the SMILES string represents a valid chemical compound.".format(compound))
return None
editable_compound = RWMol(compound)
all_atom_indices = set()
for bond in bonds_to_remove:
if isinstance(bond, Bond):
all_atom_indices.add(bond.GetBeginAtomIdx())
all_atom_indices.add(bond.GetEndAtomIdx())
else:
all_atom_indices.add(compound.GetBonds()[bond].GetBeginAtomIdx())
all_atom_indices.add(compound.GetBonds()[bond].GetEndAtomIdx())
for atom_ind in sorted(list(all_atom_indices), reverse=True):
editable_compound.ReplaceAtom(atom_ind, Atom("*"))
try:
CompoundEditingUtils.__remove_marked_bonds(editable_compound)
if sanitize_result:
SanitizeMol(editable_compound)
return editable_compound
except Exception as exc_msg:
if verbose:
print("Exception occurred during the fragmentation process. Detailed message: \n{}".format(exc_msg))
return None
@staticmethod
def __remove_bonds_rdkit(compound: Union[str, Mol, RWMol],
bonds_to_remove: Union[List[Bond], Tuple[Bond], List[int], Set[int], Tuple[int]],
sanitize_result: bool = False, verbose: bool = False) -> Union[Mol, None]:
"""
Description:
Remove the specified set of bonds from a chemical compound structure using the built-in RDKit function.
Input:
compound (str or Mol): The chemical compound structure as a SMILES string or a Mol object.
bonds_to_remove (List, Set, Tuple): The specified collection of Mol bonds or Mol bond indices to be removed.
sanitize_result (bool): The flag signalizing whether the fragmented compound should be sanitized after
fragmentation.
verbose (bool): The flag signalizing whether feedback messages should be displayed in the console.
Output:
(Mol or None): The fragmented Mol object or None if the fragmentation fails.
"""
if isinstance(compound, str):
compound = CompoundRepresentationUtils.string_to_mol(compound)
if compound is None:
if verbose:
print("Unable to construct a Mol object from the given SMILES string '{}'. Please make sure that "
"the SMILES string represents a valid chemical compound.".format(compound))
return None
all_bond_indices = set()
for bond in bonds_to_remove:
if isinstance(bond, Bond):
all_bond_indices.add(bond.GetIdx())
else:
all_bond_indices.add(bond)
try:
fragmented_compound = FragmentOnBonds(compound, tuple(sorted(list(all_bond_indices))))
if sanitize_result:
SanitizeMol(fragmented_compound)
return fragmented_compound
except Exception as exc_msg:
if verbose:
print("Exception occurred during the fragmentation process. Detailed message: \n{}".format(exc_msg))
return None
@staticmethod
def fragment_compound_on_bonds(compound: Union[str, Mol],
bonds_to_remove: Union[List[Bond], Tuple[Bond], List[int], Set[int], Tuple[int]],
sanitize_result: bool = False, verbose: bool = False) -> Union[List[str], None]:
"""
Description:
Remove the specified set of bonds from a chemical compound structure and check the validity of the
generated compound fragments.
Input:
compound (str or Mol): The chemical compound structure as a SMILES string or a Mol object.
bonds_to_remove (List, Set, Tuple): The specified collection of Mol bonds or Mol bond indices to be removed.
out_str_format (str): The indicator of the given output fragment string representation format.
sanitize_result (bool): The flag signalizing whether the fragmented compound should be sanitized after
fragmentation.
verbose (bool): The flag signalizing whether feedback messages should be displayed in the console.
Output:
(List or None): The valid fragment SMILES or SMARTS strings or None if the fragmentation fails.
"""
if isinstance(compound, str):
compound = CompoundRepresentationUtils.string_to_mol(compound)
if compound is None:
if verbose:
print("Unable to construct a Mol object from the given SMILES string '{}'. Please make sure that "
"the SMILES string represents a valid chemical compound.".format(compound))
return None
all_bond_indices = set()
for bond in bonds_to_remove:
if isinstance(bond, Bond):
all_bond_indices.add(bond.GetIdx())
else:
all_bond_indices.add(bond)
editable_compound = RWMol(compound)
if sanitize_result:
Kekulize(editable_compound, clearAromaticFlags=True)
# noinspection PyTypeChecker
fragmented_compound = CompoundEditingUtils.__remove_bonds_rdkit(
compound=editable_compound,
bonds_to_remove=all_bond_indices,
sanitize_result=sanitize_result,
verbose=verbose
)
if fragmented_compound is None:
fragmented_compound = CompoundEditingUtils.__remove_bonds_manual(
compound=editable_compound,
bonds_to_remove=all_bond_indices,
sanitize_result=sanitize_result,
verbose=verbose
)
if fragmented_compound is None:
return None
else:
compound_fragments = CompoundRepresentationUtils.mol_to_string(
compound=fragmented_compound,
verbose=verbose
)
compound_fragments = re.sub(r"\*[-]?\d+", "*", re.sub(r"[-]?\d+\*", "*", compound_fragments))
if "." in compound_fragments:
return sorted(compound_fragments.split("."), key=len, reverse=True)
else:
return [compound_fragments]
|
the-stack_0_21192 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import struct
import typing
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized,
InvalidKey,
UnsupportedAlgorithm,
_Reasons,
)
from cryptography.hazmat.backends import _get_backend
from cryptography.hazmat.backends.interfaces import HashBackend
from cryptography.hazmat.primitives import constant_time, hashes
from cryptography.hazmat.primitives.kdf import KeyDerivationFunction
def _int_to_u32be(n):
return struct.pack(">I", n)
class X963KDF(KeyDerivationFunction):
def __init__(
self,
algorithm: hashes.HashAlgorithm,
length: int,
sharedinfo: typing.Optional[bytes],
backend=None,
):
backend = _get_backend(backend)
max_len = algorithm.digest_size * (2 ** 32 - 1)
if length > max_len:
raise ValueError(
"Cannot derive keys larger than {} bits.".format(max_len)
)
if sharedinfo is not None:
utils._check_bytes("sharedinfo", sharedinfo)
self._algorithm = algorithm
self._length = length
self._sharedinfo = sharedinfo
if not isinstance(backend, HashBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement HashBackend.",
_Reasons.BACKEND_MISSING_INTERFACE,
)
self._backend = backend
self._used = False
def derive(self, key_material: bytes) -> bytes:
if self._used:
raise AlreadyFinalized
self._used = True
utils._check_byteslike("key_material", key_material)
output = [b""]
outlen = 0
counter = 1
while self._length > outlen:
h = hashes.Hash(self._algorithm, self._backend)
h.update(key_material)
h.update(_int_to_u32be(counter))
if self._sharedinfo is not None:
h.update(self._sharedinfo)
output.append(h.finalize())
outlen += len(output[-1])
counter += 1
return b"".join(output)[: self._length]
def verify(self, key_material: bytes, expected_key: bytes) -> None:
if not constant_time.bytes_eq(self.derive(key_material), expected_key):
raise InvalidKey
|
the-stack_0_21196 | import os, sys
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# change this as you see fit
image_path = sys.argv[1]
# Read in the image_data
image_data = tf.gfile.FastGFile(image_path, 'rb').read()
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile("retrained_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
print('%s (score = %.5f)' % (human_string, score)) |
the-stack_0_21200 | """
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import os
import sys
from pathlib import Path
from typing import List, Union
from atomic_reactor.plugins.check_and_set_platforms import CheckAndSetPlatformsPlugin
import pytest
import yaml
from flexmock import flexmock
from atomic_reactor.constants import (
PLUGIN_CHECK_AND_SET_PLATFORMS_KEY,
REPO_CONTAINER_CONFIG,
DOCKERFILE_FILENAME,
)
import atomic_reactor.utils.koji as koji_util
from atomic_reactor.source import SourceConfig
from tests.mock_env import MockEnv
KOJI_TARGET = "target"
# ClientSession is xmlrpc instance, we need to mock it explicitly
def mock_session(platforms):
arches = None
if platforms:
arches = ' '.join(sorted(platforms.keys()))
last_event_id = 456
build_target = {
'build_tag': 'build-tag',
'name': 'target-name',
'dest_tag_name': 'dest-tag'
}
session = flexmock()
(session
.should_receive('getLastEvent')
.and_return({'id': last_event_id}))
(session
.should_receive('getBuildTarget')
.with_args('target', event=last_event_id)
.and_return(build_target))
(session
.should_receive('getBuildConfig')
.with_args('build-tag', event=last_event_id)
.and_return({'arches': arches}))
return session
class MockSource(object):
def __init__(self, tmpdir):
self.path = str(tmpdir)
self.dockerfile_path = str(tmpdir)
self._config = None
def get_build_file_path(self):
return self.path, self.path
@property
def config(self):
self._config = self._config or SourceConfig(self.path)
return self._config
def make_reactor_config_map(platforms):
remote_hosts = {'slots_dir': '/some/slots', 'pools': {}}
if platforms:
for platform, enabled in platforms.items():
remote_hosts['pools'][platform] = {
'some-hostname': {
'enabled': enabled,
'username': 'somebody',
'auth': '/some/ssh/key',
'slots': 10,
'socket_path': '/some/socket',
},
}
return {'version': 1, 'koji': {'auth': {}, 'hub_url': 'test'}, 'remote_hosts': remote_hosts}
else:
return {'version': 1, 'koji': {'auth': {}, 'hub_url': 'test'}}
def write_container_yaml(source_dir: Path,
platform_exclude: Union[str, List[str]] = '',
platform_only: Union[str, List[str]] = ''):
platforms_dict = {}
if platform_exclude:
platforms_dict['platforms'] = {}
platforms_dict['platforms']['not'] = platform_exclude
if platform_only:
if 'platforms' not in platforms_dict:
platforms_dict['platforms'] = {}
platforms_dict['platforms']['only'] = platform_only
container_path = os.path.join(source_dir, REPO_CONTAINER_CONFIG)
with open(container_path, 'w') as f:
f.write(yaml.safe_dump(platforms_dict))
f.flush()
def mock_env(workflow, source_dir: Path, labels=None):
labels = labels or {}
env = (
MockEnv(workflow)
.for_plugin(CheckAndSetPlatformsPlugin.key)
.set_scratch(labels.get('scratch', False))
.set_isolated(labels.get('isolated', False))
)
env.workflow.source = MockSource(source_dir)
return env
def teardown_function(function):
sys.modules.pop('check_and_set_platforms', None)
@pytest.mark.parametrize(('platforms', 'platform_exclude', 'platform_only', 'result'), [
(None, '', 'ppc64le', None),
({'x86_64': True, 'ppc64le': True},
'', 'ppc64le', ['ppc64le']),
({'x86_64': True, 'spam': True, 'bacon': True, 'toast': True, 'ppc64le': True},
['spam', 'bacon', 'eggs', 'toast'], '',
['x86_64', 'ppc64le']),
({'ppc64le': True, 'spam': True, 'bacon': True, 'toast': True},
['spam', 'bacon', 'eggs', 'toast'], 'ppc64le',
['ppc64le']),
({'x86_64': True, 'bacon': True, 'toast': True},
'toast', ['x86_64', 'ppc64le'], ['x86_64']),
({'x86_64': True, 'toast': True},
'toast', 'x86_64', ['x86_64']),
({'x86_64': True, 'spam': True, 'bacon': True, 'toast': True},
['spam', 'bacon', 'eggs', 'toast'], ['x86_64', 'ppc64le'], ['x86_64']),
({'x86_64': True, 'ppc64le': True},
'', '', ['x86_64', 'ppc64le'])
])
def test_check_and_set_platforms(workflow, source_dir, caplog,
platforms, platform_exclude, platform_only, result):
write_container_yaml(source_dir, platform_exclude, platform_only)
env = mock_env(workflow, source_dir)
session = mock_session(platforms)
flexmock(koji_util).should_receive('create_koji_session').and_return(session)
env.set_reactor_config(make_reactor_config_map(platforms))
env.set_plugin_args({'koji_target': KOJI_TARGET})
runner = env.create_runner()
plugin_result = runner.run()
if platforms:
koji_msg = "Koji platforms are {0}".format(sorted(platforms.keys()))
assert koji_msg in caplog.text
assert plugin_result[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY]
assert sorted(plugin_result[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY]) == sorted(result)
else:
assert plugin_result[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY] is None
assert "No platforms found in koji target" in caplog.text
@pytest.mark.parametrize(('labels', 'platforms', 'user_platforms', 'platform_only',
'result'), [
({}, None,
None, '', None),
({}, {'x86_64': True, 'arm64': True},
['spam', 'bacon'], '', ['arm64', 'x86_64']),
({'isolated': True}, {'spam': True, 'bacon': True},
['x86_64', 'arm64'], '', ['arm64', 'x86_64']),
({'isolated': True}, {'x86_64': True, 'arm64': True},
None, '', ['arm64', 'x86_64']),
({'isolated': True}, None,
['x86_64', 'arm64'], '', None),
({'scratch': True}, {'spam': True, 'bacon': True},
['x86_64', 'arm64'], '', ['arm64', 'x86_64']),
({'scratch': True}, {'x86_64': True, 'arm64': True},
None, '', ['arm64', 'x86_64']),
({'scratch': True}, None,
['x86_64', 'arm64'], '', None),
({'scratch': True}, {'x86_64': True, 'arm64': True},
['x86_64', 'arm64'], 'x86_64', ['x86_64']),
({'scratch': True}, {'x86_64': True, 'arm64': True, 's390x': True},
['x86_64', 'arm64'], 'x86_64', ['x86_64', 'arm64']),
])
def test_check_isolated_or_scratch(workflow, source_dir, caplog,
labels, platforms, user_platforms, platform_only,
result):
write_container_yaml(source_dir, platform_only=platform_only)
env = mock_env(workflow, source_dir, labels=labels)
if user_platforms:
env.set_user_params(platforms=user_platforms)
session = mock_session(platforms)
flexmock(koji_util).should_receive('create_koji_session').and_return(session)
env.set_reactor_config(make_reactor_config_map(platforms))
env.set_plugin_args({'koji_target': KOJI_TARGET})
runner = env.create_runner()
plugin_result = runner.run()
if platforms:
koji_msg = "Koji platforms are {0}".format(sorted(platforms.keys()))
assert koji_msg in caplog.text
diffplat = user_platforms and set(platforms.keys()) != set(user_platforms)
if labels and diffplat:
sort_platforms = sorted(user_platforms)
user_msg = "Received user specified platforms {0}".format(sort_platforms)
assert user_msg in caplog.text
else:
assert "No platforms found in koji target" in caplog.text
if result:
assert plugin_result[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY]
assert sorted(plugin_result[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY]) == sorted(result)
else:
assert plugin_result[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY] is None
@pytest.mark.parametrize(('platforms', 'platform_only', 'result'), [
(None, 'ppc64le', None),
({'x86_64': True, 'ppc64le': True}, '', ['x86_64', 'ppc64le']),
({'x86_64': True, 'ppc64le': True}, 'ppc64le', ['ppc64le']),
])
def test_check_and_set_platforms_no_koji(workflow, source_dir, caplog,
platforms, platform_only, result):
write_container_yaml(source_dir, platform_only=platform_only)
env = mock_env(workflow, source_dir)
if platforms:
env.set_user_params(platforms=list(platforms))
env.set_reactor_config(make_reactor_config_map(platforms))
runner = env.create_runner()
if platforms:
plugin_result = runner.run()
# Build up the message to avoid wrapping
no_koji_msg = "No koji platforms. "
platform_msg = "User specified platforms are {0}".format(sorted(platforms.keys()))
user_msg = no_koji_msg + platform_msg
assert user_msg in caplog.text
assert plugin_result[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY]
assert sorted(plugin_result[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY]) == sorted(result)
else:
with pytest.raises(Exception) as e:
runner.run()
assert "no koji target or platform list" in str(e.value)
@pytest.mark.parametrize(('platforms', 'platform_only'), [
({'x86_64': True}, 'ppc64le'),
({'x86_64': True, 'ppc64le': True}, 's390x'),
({'s390x': True, 'ppc64le': True}, 'x86_64'),
])
def test_check_and_set_platforms_no_platforms_in_limits(
workflow, source_dir, caplog, platforms, platform_only
):
write_container_yaml(source_dir, platform_only=platform_only)
env = mock_env(workflow, source_dir)
if platforms:
env.set_user_params(platforms=list(platforms))
env.set_reactor_config(make_reactor_config_map(platforms))
runner = env.create_runner()
with pytest.raises(Exception) as e:
runner.run()
assert f"platforms in limits : {[]}" in caplog.text
assert "platforms in limits are empty" in caplog.text
assert "No platforms to build for" in str(e.value)
@pytest.mark.parametrize(('platforms', 'platform_only', 'cluster_platforms', 'result'), [
('x86_64 ppc64le', '', {'x86_64': True}, ['x86_64']),
('x86_64 ppc64le arm64', ['x86_64', 'arm64'], {'x86_64': True}, ['x86_64']),
])
def test_platforms_from_cluster_config(workflow, source_dir,
platforms, platform_only, cluster_platforms, result):
write_container_yaml(source_dir, platform_only=platform_only)
env = mock_env(workflow, source_dir)
if platforms:
env.set_user_params(platforms=platforms.split())
env.set_reactor_config(make_reactor_config_map(cluster_platforms))
runner = env.create_runner()
plugin_result = runner.run()
if platforms:
assert plugin_result[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY]
assert sorted(plugin_result[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY]) == sorted(result)
else:
assert plugin_result[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY] is None
@pytest.mark.parametrize(('koji_platforms', 'cluster_platforms', 'result', 'skips', 'fails'), [
(None, None, None, None, None),
(['x86_64'], None, None, None, 'no_platforms'),
(['x86_64'], {'ppc64le': True}, None, None, 'no_platforms'),
(['x86_64', 'ppc64le'], {'x86_64': True, 'ppc64le': True}, ['x86_64', 'ppc64le'], None, None),
(['x86_64', 'ppc64le'], {'x86_64': False, 'ppc64le': True}, None, None, 'disabled'),
(['x86_64', 'ppc64le'], {'x86_64': False, 'ppc64le': False}, None, None, 'disabled'),
(['x86_64', 'ppc64le'], {'x86_64': True}, ['x86_64'], ['ppc64le'], None),
(['x86_64', 'ppc64le', 's390x'], {'x86_64': True}, ['x86_64'], ['ppc64le', 's390x'], None),
])
def test_disabled_clusters(workflow, source_dir, caplog, koji_platforms,
cluster_platforms, result, skips, fails):
write_container_yaml(source_dir)
env = mock_env(workflow, source_dir)
new_koji_platforms = None
if koji_platforms:
new_koji_platforms = {k: True for k in koji_platforms}
session = mock_session(new_koji_platforms)
flexmock(koji_util).should_receive('create_koji_session').and_return(session)
env.set_reactor_config(make_reactor_config_map(cluster_platforms))
env.set_plugin_args({'koji_target': KOJI_TARGET})
runner = env.create_runner()
if fails:
with pytest.raises(Exception) as e:
runner.run()
if fails == 'no_platforms':
msg = 'No platforms to build for'
elif fails == 'disabled':
msg = 'Platforms specified in config map, but have all remote hosts disabled'
assert msg in str(e.value)
else:
plugin_result = runner.run()
if koji_platforms:
koji_msg = "Koji platforms are {0}".format(sorted(koji_platforms))
assert koji_msg in caplog.text
assert plugin_result[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY]
assert sorted(plugin_result[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY]) == sorted(result)
if skips:
for skip in skips:
msg = "No remote hosts found for platform '{}' in reactor config map, " \
"skipping".format(skip)
assert msg in caplog.text
else:
assert plugin_result[PLUGIN_CHECK_AND_SET_PLATFORMS_KEY] is None
assert "No platforms found in koji target" in caplog.text
def test_init_root_build_dir(workflow, source_dir):
# platform -> enabled
platforms = {"x86_64": True, "ppc64le": True}
env = mock_env(workflow, source_dir)
env.set_user_params(platforms=list(platforms))
env.set_reactor_config(make_reactor_config_map(platforms))
runner = env.create_runner()
# Prepare content of the source directory. All of them must be available in
# the build directories.
source_dir.joinpath(DOCKERFILE_FILENAME).write_text("FROM fedora:35", "utf-8")
write_container_yaml(source_dir)
runner.run()
assert workflow.build_dir.has_sources
for platform, file_name in zip(platforms.keys(), [DOCKERFILE_FILENAME, REPO_CONTAINER_CONFIG]):
copied_file = workflow.build_dir.path / platform / file_name
assert copied_file.exists()
original_content = source_dir.joinpath(file_name).read_text("utf-8")
assert original_content == copied_file.read_text("utf-8")
@pytest.mark.parametrize('input_platforms,excludes,only,expected', [
(['x86_64', 'ppc64le'], [], ['ppc64le'], ['ppc64le']),
(
['x86_64', 'spam', 'bacon', 'toast', 'ppc64le'],
['spam', 'bacon', 'eggs', 'toast'],
[],
['x86_64', 'ppc64le'],
),
(
['ppc64le', 'spam', 'bacon', 'toast'],
['spam', 'bacon', 'eggs', 'toast'],
['ppc64le'],
['ppc64le'],
),
# only takes the priority
(
['ppc64le', 'spam', 'bacon', 'toast'],
['bacon', 'eggs', 'toast'],
['ppc64le'],
['ppc64le'], # spam is not excluded, but only include ppc64le
),
(
['x86_64', 'bacon', 'toast'],
['toast'],
['x86_64', 'ppc64le'],
['x86_64']
),
(
['x86_64', 'spam', 'bacon', 'toast'],
['spam', 'bacon', 'eggs', 'toast'],
['x86_64', 'ppc64le'],
['x86_64'],
),
(['x86_64', 'ppc64le'], [], [], ['x86_64', 'ppc64le']),
(['x86_64', 'ppc64le'], ["x86_64"], ["x86_64"], []),
])
def test_limit_the_platforms(input_platforms, excludes, only, expected, workflow, caplog):
write_container_yaml(workflow.source.path,
platform_exclude=excludes,
platform_only=only)
plugin = CheckAndSetPlatformsPlugin(workflow)
limited_platforms = plugin._limit_platforms(input_platforms)
assert sorted(expected) == sorted(limited_platforms)
if only and sorted(only) == sorted(excludes):
assert "only and not platforms are the same" in caplog.text
|
the-stack_0_21201 | """
SONiC ConfigDB connection module
Example:
# Write to config DB
config_db = ConfigDBConnector()
config_db.connect()
config_db.mod_entry('BGP_NEIGHBOR', '10.0.0.1', {
'admin_status': state
})
# Daemon to watch config change in certain table:
config_db = ConfigDBConnector()
handler = lambda table, key, data: print (key, data)
config_db.subscribe('BGP_NEIGHBOR', handler)
config_db.connect()
config_db.listen()
"""
import sys
import time
from .dbconnector import SonicV2Connector
PY3K = sys.version_info >= (3, 0)
class ConfigDBConnector(SonicV2Connector):
INIT_INDICATOR = 'CONFIG_DB_INITIALIZED'
TABLE_NAME_SEPARATOR = '|'
KEY_SEPARATOR = '|'
def __init__(self, decode_responses=True, **kwargs):
# By default, connect to Redis through TCP, which does not requires root.
if len(kwargs) == 0:
kwargs['host'] = '127.0.0.1'
if PY3K:
if not decode_responses:
raise NotImplementedError('ConfigDBConnector with decode_responses=False is not supported in python3')
kwargs['decode_responses'] = True
"""The ConfigDBConnector class will accept the parameter 'namespace' which is used to
load the database_config and connect to the redis DB instances in that namespace.
By default namespace is set to None, which means it connects to local redis DB instances.
When connecting to a different namespace set the use_unix_socket_path flag to true.
Eg. ConfigDBConnector(use_unix_socket_path=True, namespace=namespace)
'namespace' is implicitly passed to the parent SonicV2Connector class.
"""
super(ConfigDBConnector, self).__init__(**kwargs)
self.handlers = {}
def __wait_for_db_init(self):
client = self.get_redis_client(self.db_name)
pubsub = client.pubsub()
initialized = client.get(self.INIT_INDICATOR)
if not initialized:
pattern = "__keyspace@{}__:{}".format(self.get_dbid(self.db_name), self.INIT_INDICATOR)
pubsub.psubscribe(pattern)
for item in pubsub.listen():
if item['type'] == 'pmessage':
key = item['channel'].split(':', 1)[1]
if key == self.INIT_INDICATOR:
initialized = client.get(self.INIT_INDICATOR)
if initialized:
break
pubsub.punsubscribe(pattern)
def db_connect(self, dbname, wait_for_init=False, retry_on=False):
self.db_name = dbname
self.KEY_SEPARATOR = self.TABLE_NAME_SEPARATOR = self.get_db_separator(self.db_name)
SonicV2Connector.connect(self, self.db_name, retry_on)
if wait_for_init:
self.__wait_for_db_init()
def connect(self, wait_for_init=True, retry_on=False):
self.db_connect('CONFIG_DB', wait_for_init, retry_on)
def subscribe(self, table, handler):
"""Set a handler to handle config change in certain table.
Note that a single handler can be registered to different tables by
calling this fuction multiple times.
Args:
table: Table name.
handler: a handler function that has signature of handler(table_name, key, data)
"""
self.handlers[table] = handler
def unsubscribe(self, table):
"""Remove registered handler from a certain table.
Args:
table: Table name.
"""
if table in self.handlers:
self.handlers.pop(table)
def __fire(self, table, key, data):
if table in self.handlers:
handler = self.handlers[table]
handler(table, key, data)
def listen(self):
"""Start listen Redis keyspace events and will trigger corresponding handlers when content of a table changes.
"""
self.pubsub = self.get_redis_client(self.db_name).pubsub()
self.pubsub.psubscribe("__keyspace@{}__:*".format(self.get_dbid(self.db_name)))
for item in self.pubsub.listen():
if item['type'] == 'pmessage':
key = item['channel'].split(':', 1)[1]
try:
(table, row) = key.split(self.TABLE_NAME_SEPARATOR, 1)
if table in self.handlers:
client = self.get_redis_client(self.db_name)
data = self.raw_to_typed(client.hgetall(key))
self.__fire(table, row, data)
except ValueError:
pass #Ignore non table-formated redis entries
def raw_to_typed(self, raw_data):
if raw_data is None:
return None
typed_data = {}
for raw_key in raw_data:
key = raw_key
# "NULL:NULL" is used as a placeholder for objects with no attributes
if key == "NULL":
pass
# A column key with ending '@' is used to mark list-typed table items
# TODO: Replace this with a schema-based typing mechanism.
elif key.endswith("@"):
value = raw_data[raw_key].split(',')
typed_data[key[:-1]] = value
else:
typed_data[key] = raw_data[raw_key]
return typed_data
def typed_to_raw(self, typed_data):
if typed_data is None:
return None
elif typed_data == {}:
return { "NULL": "NULL" }
raw_data = {}
for key in typed_data:
value = typed_data[key]
if type(value) is list:
raw_data[key+'@'] = ','.join(value)
else:
raw_data[key] = str(value)
return raw_data
@staticmethod
def serialize_key(key):
if type(key) is tuple:
return ConfigDBConnector.KEY_SEPARATOR.join(key)
else:
return str(key)
@staticmethod
def deserialize_key(key):
tokens = key.split(ConfigDBConnector.KEY_SEPARATOR)
if len(tokens) > 1:
return tuple(tokens)
else:
return key
def set_entry(self, table, key, data):
"""Write a table entry to config db.
Remove extra fields in the db which are not in the data.
Args:
table: Table name.
key: Key of table entry, or a tuple of keys if it is a multi-key table.
data: Table row data in a form of dictionary {'column_key': 'value', ...}.
Pass {} as data will create an entry with no column if not already existed.
Pass None as data will delete the entry.
"""
key = self.serialize_key(key)
client = self.get_redis_client(self.db_name)
_hash = '{}{}{}'.format(table.upper(), self.TABLE_NAME_SEPARATOR, key)
if data is None:
client.delete(_hash)
else:
original = self.get_entry(table, key)
client.hmset(_hash, self.typed_to_raw(data))
for k in [ k for k in original if k not in data ]:
if type(original[k]) == list:
k = k + '@'
client.hdel(_hash, self.serialize_key(k))
def mod_entry(self, table, key, data):
"""Modify a table entry to config db.
Args:
table: Table name.
key: Key of table entry, or a tuple of keys if it is a multi-key table.
data: Table row data in a form of dictionary {'column_key': 'value', ...}.
Pass {} as data will create an entry with no column if not already existed.
Pass None as data will delete the entry.
"""
key = self.serialize_key(key)
client = self.get_redis_client(self.db_name)
_hash = '{}{}{}'.format(table.upper(), self.TABLE_NAME_SEPARATOR, key)
if data is None:
client.delete(_hash)
else:
client.hmset(_hash, self.typed_to_raw(data))
def get_entry(self, table, key):
"""Read a table entry from config db.
Args:
table: Table name.
key: Key of table entry, or a tuple of keys if it is a multi-key table.
Returns:
Table row data in a form of dictionary {'column_key': 'value', ...}
Empty dictionary if table does not exist or entry does not exist.
"""
key = self.serialize_key(key)
client = self.get_redis_client(self.db_name)
_hash = '{}{}{}'.format(table.upper(), self.TABLE_NAME_SEPARATOR, key)
return self.raw_to_typed(client.hgetall(_hash))
def get_keys(self, table, split=True):
"""Read all keys of a table from config db.
Args:
table: Table name.
split: split the first part and return second.
Useful for keys with two parts <tablename>:<key>
Returns:
List of keys.
"""
client = self.get_redis_client(self.db_name)
pattern = '{}{}*'.format(table.upper(), self.TABLE_NAME_SEPARATOR)
keys = client.keys(pattern)
data = []
for key in keys:
try:
if split:
(_, row) = key.split(self.TABLE_NAME_SEPARATOR, 1)
data.append(self.deserialize_key(row))
else:
data.append(self.deserialize_key(key))
except ValueError:
pass #Ignore non table-formated redis entries
return data
def get_table(self, table):
"""Read an entire table from config db.
Args:
table: Table name.
Returns:
Table data in a dictionary form of
{ 'row_key': {'column_key': value, ...}, ...}
or { ('l1_key', 'l2_key', ...): {'column_key': value, ...}, ...} for a multi-key table.
Empty dictionary if table does not exist.
"""
client = self.get_redis_client(self.db_name)
pattern = '{}{}*'.format(table.upper(), self.TABLE_NAME_SEPARATOR)
keys = client.keys(pattern)
data = {}
for key in keys:
try:
entry = self.raw_to_typed(client.hgetall(key))
if entry is not None:
(_, row) = key.split(self.TABLE_NAME_SEPARATOR, 1)
data[self.deserialize_key(row)] = entry
except ValueError:
pass #Ignore non table-formated redis entries
return data
def delete_table(self, table):
"""Delete an entire table from config db.
Args:
table: Table name.
"""
client = self.get_redis_client(self.db_name)
pattern = '{}{}*'.format(table.upper(), self.TABLE_NAME_SEPARATOR)
keys = client.keys(pattern)
data = {}
for key in keys:
client.delete(key)
def mod_config(self, data):
"""Write multiple tables into config db.
Extra entries/fields in the db which are not in the data are kept.
Args:
data: config data in a dictionary form
{
'TABLE_NAME': { 'row_key': {'column_key': 'value', ...}, ...},
'MULTI_KEY_TABLE_NAME': { ('l1_key', 'l2_key', ...) : {'column_key': 'value', ...}, ...},
...
}
"""
for table_name in data:
table_data = data[table_name]
if table_data == None:
self.delete_table(table_name)
continue
for key in table_data:
self.mod_entry(table_name, key, table_data[key])
def get_config(self):
"""Read all config data.
Returns:
Config data in a dictionary form of
{
'TABLE_NAME': { 'row_key': {'column_key': 'value', ...}, ...},
'MULTI_KEY_TABLE_NAME': { ('l1_key', 'l2_key', ...) : {'column_key': 'value', ...}, ...},
...
}
"""
client = self.get_redis_client(self.db_name)
keys = client.keys('*')
data = {}
for key in keys:
try:
(table_name, row) = key.split(self.TABLE_NAME_SEPARATOR, 1)
entry = self.raw_to_typed(client.hgetall(key))
if entry != None:
data.setdefault(table_name, {})[self.deserialize_key(row)] = entry
except ValueError:
pass #Ignore non table-formated redis entries
return data
class ConfigDBPipeConnector(ConfigDBConnector):
REDIS_SCAN_BATCH_SIZE = 30
def __init__(self, **kwargs):
super(ConfigDBPipeConnector, self).__init__(**kwargs)
def __delete_entries(self, client, pipe, pattern, cursor):
"""Helper method to delete table entries from config db using Redis pipeline
with batch size of REDIS_SCAN_BATCH_SIZE.
The caller should call pipeline execute once ready
Args:
client: Redis client
pipe: Redis DB pipe
pattern: key pattern
cursor: position to start scanning from
Returns:
cur: poition of next item to scan
"""
cur, keys = client.scan(cursor=cursor, match=pattern, count=self.REDIS_SCAN_BATCH_SIZE)
for key in keys:
pipe.delete(key)
return cur
def __delete_table(self, client, pipe, table):
"""Helper method to delete table entries from config db using Redis pipeline.
The caller should call pipeline execute once ready
Args:
client: Redis client
pipe: Redis DB pipe
table: Table name.
"""
pattern = '{}{}*'.format(table.upper(), self.TABLE_NAME_SEPARATOR)
cur = self.__delete_entries(client, pipe, pattern, 0)
while cur != 0:
cur = self.__delete_entries(client, pipe, pattern, cur)
def __mod_entry(self, pipe, table, key, data):
"""Modify a table entry to config db.
Args:
table: Table name.
pipe: Redis DB pipe
table: Table name.
key: Key of table entry, or a tuple of keys if it is a multi-key table.
data: Table row data in a form of dictionary {'column_key': 'value', ...}.
Pass {} as data will create an entry with no column if not already existed.
Pass None as data will delete the entry.
"""
key = self.serialize_key(key)
_hash = '{}{}{}'.format(table.upper(), self.TABLE_NAME_SEPARATOR, key)
if data is None:
pipe.delete(_hash)
else:
pipe.hmset(_hash, self.typed_to_raw(data))
def mod_config(self, data):
"""Write multiple tables into config db.
Extra entries/fields in the db which are not in the data are kept.
Args:
data: config data in a dictionary form
{
'TABLE_NAME': { 'row_key': {'column_key': 'value', ...}, ...},
'MULTI_KEY_TABLE_NAME': { ('l1_key', 'l2_key', ...) : {'column_key': 'value', ...}, ...},
...
}
"""
client = self.get_redis_client(self.db_name)
pipe = client.pipeline()
for table_name in data:
table_data = data[table_name]
if table_data is None:
self.__delete_table(client, pipe, table_name)
continue
for key in table_data:
self.__mod_entry(pipe, table_name, key, table_data[key])
pipe.execute()
def __get_config(self, client, pipe, data, cursor):
"""Read config data in batches of size REDIS_SCAN_BATCH_SIZE using Redis pipelines
Args:
client: Redis client
pipe: Redis DB pipe
data: config dictionary
cursor: position to start scanning from
Returns:
cur: poition of next item to scan
"""
cur, keys = client.scan(cursor=cursor, match='*', count=self.REDIS_SCAN_BATCH_SIZE)
keys = [key for key in keys if key != self.INIT_INDICATOR]
for key in keys:
pipe.hgetall(key)
records = pipe.execute()
for index, key in enumerate(keys):
(table_name, row) = key.split(self.TABLE_NAME_SEPARATOR, 1)
entry = self.raw_to_typed(records[index])
if entry is not None:
data.setdefault(table_name, {})[self.deserialize_key(row)] = entry
return cur
def get_config(self):
"""Read all config data.
Returns:
Config data in a dictionary form of
{
'TABLE_NAME': { 'row_key': {'column_key': 'value', ...}, ...},
'MULTI_KEY_TABLE_NAME': { ('l1_key', 'l2_key', ...) : {'column_key': 'value', ...}, ...},
...
}
"""
client = self.get_redis_client(self.db_name)
pipe = client.pipeline()
data = {}
cur = self.__get_config(client, pipe, data, 0)
while cur != 0:
cur = self.__get_config(client, pipe, data, cur)
return data
|
the-stack_0_21204 | # ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
"""Online 3D depth video processing pipeline.
- Connects to a RGBD camera or RGBD video file (currently
RealSense camera and bag file format are supported).
- Captures / reads color and depth frames. Allow recording from camera.
- Convert frames to point cloud, optionally with normals.
- Visualize point cloud video and results.
- Save point clouds and RGBD images for selected frames.
"""
import os
import json
import time
import logging as log
import argparse
import threading
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import open3d as o3d
import open3d.visualization.gui as gui
import open3d.visualization.rendering as rendering
# Camera and processing
class PipelineModel:
"""Controls IO (camera, video file, recording, saving frames). Methods run
in worker threads."""
def __init__(self,
update_view,
camera_config_file=None,
rgbd_video=None,
device=None):
"""Initialize.
Args:
update_view (callback): Callback to update display elements for a
frame.
camera_config_file (str): Camera configuration json file.
rgbd_video (str): RS bag file containing the RGBD video. If this is
provided, connected cameras are ignored.
device (str): Compute device (e.g.: 'cpu:0' or 'cuda:0').
"""
self.update_view = update_view
if device:
self.device = device.lower()
else:
self.device = 'cuda:0' if o3d.core.cuda.is_available() else 'cpu:0'
self.o3d_device = o3d.core.Device(self.device)
self.video = None
self.camera = None
self.flag_capture = False
self.cv_capture = threading.Condition() # condition variable
self.recording = False # Are we currently recording
self.flag_record = False # Request to start/stop recording
if rgbd_video: # Video file
self.video = o3d.t.io.RGBDVideoReader.create(rgbd_video)
self.rgbd_metadata = self.video.metadata
self.status_message = f"Video {rgbd_video} opened."
else: # RGBD camera
now = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
filename = f"{now}.bag"
self.camera = o3d.t.io.RealSenseSensor()
if camera_config_file:
with open(camera_config_file) as ccf:
self.camera.init_sensor(o3d.t.io.RealSenseSensorConfig(
json.load(ccf)),
filename=filename)
else:
self.camera.init_sensor(filename=filename)
self.camera.start_capture(start_record=False)
self.rgbd_metadata = self.camera.get_metadata()
self.status_message = f"Camera {self.rgbd_metadata.serial_number} opened."
log.info(self.rgbd_metadata)
# RGBD -> PCD
self.extrinsics = o3d.core.Tensor.eye(4,
dtype=o3d.core.Dtype.Float32,
device=self.o3d_device)
self.intrinsic_matrix = o3d.core.Tensor(
self.rgbd_metadata.intrinsics.intrinsic_matrix,
dtype=o3d.core.Dtype.Float32,
device=self.o3d_device)
self.depth_max = 3.0 # m
self.pcd_stride = 2 # downsample point cloud, may increase frame rate
self.flag_normals = False
self.flag_save_rgbd = False
self.flag_save_pcd = False
self.pcd_frame = None
self.rgbd_frame = None
self.executor = ThreadPoolExecutor(max_workers=3,
thread_name_prefix='Capture-Save')
self.flag_exit = False
@property
def max_points(self):
"""Max points in one frame for the camera or RGBD video resolution."""
return self.rgbd_metadata.width * self.rgbd_metadata.height
@property
def vfov(self):
"""Camera or RGBD video vertical field of view."""
return np.rad2deg(2 * np.arctan(self.intrinsic_matrix[1, 2].item() /
self.intrinsic_matrix[1, 1].item()))
def run(self):
"""Run pipeline."""
n_pts = 0
frame_id = 0
t1 = time.perf_counter()
if self.video:
self.rgbd_frame = self.video.next_frame()
else:
self.rgbd_frame = self.camera.capture_frame(
wait=True, align_depth_to_color=True)
pcd_errors = 0
while (not self.flag_exit and
(self.video is None or # Camera
(self.video and not self.video.is_eof()))): # Video
if self.video:
future_rgbd_frame = self.executor.submit(self.video.next_frame)
else:
future_rgbd_frame = self.executor.submit(
self.camera.capture_frame,
wait=True,
align_depth_to_color=True)
if self.flag_save_pcd:
self.save_pcd()
self.flag_save_pcd = False
try:
self.rgbd_frame = self.rgbd_frame.to(self.o3d_device)
self.pcd_frame = o3d.t.geometry.PointCloud.create_from_rgbd_image(
self.rgbd_frame, self.intrinsic_matrix, self.extrinsics,
self.rgbd_metadata.depth_scale, self.depth_max,
self.pcd_stride, self.flag_normals)
depth_in_color = self.rgbd_frame.depth.colorize_depth(
self.rgbd_metadata.depth_scale, 0, self.depth_max)
except RuntimeError:
pcd_errors += 1
if self.pcd_frame.is_empty():
log.warning(f"No valid depth data in frame {frame_id})")
continue
n_pts += self.pcd_frame.point['positions'].shape[0]
if frame_id % 60 == 0 and frame_id > 0:
t0, t1 = t1, time.perf_counter()
log.debug(f"\nframe_id = {frame_id}, \t {(t1-t0)*1000./60:0.2f}"
f"ms/frame \t {(t1-t0)*1e9/n_pts} ms/Mp\t")
n_pts = 0
frame_elements = {
'color': self.rgbd_frame.color.cpu(),
'depth': depth_in_color.cpu(),
'pcd': self.pcd_frame.cpu(),
'status_message': self.status_message
}
self.update_view(frame_elements)
if self.flag_save_rgbd:
self.save_rgbd()
self.flag_save_rgbd = False
self.rgbd_frame = future_rgbd_frame.result()
with self.cv_capture: # Wait for capture to be enabled
self.cv_capture.wait_for(
predicate=lambda: self.flag_capture or self.flag_exit)
self.toggle_record()
frame_id += 1
if self.camera:
self.camera.stop_capture()
else:
self.video.close()
self.executor.shutdown()
log.debug(f"create_from_depth_image() errors = {pcd_errors}")
def toggle_record(self):
if self.camera is not None:
if self.flag_record and not self.recording:
self.camera.resume_record()
self.recording = True
elif not self.flag_record and self.recording:
self.camera.pause_record()
self.recording = False
def save_pcd(self):
"""Save current point cloud."""
now = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
filename = f"{self.rgbd_metadata.serial_number}_pcd_{now}.ply"
# Convert colors to uint8 for compatibility
self.pcd_frame.point['colors'] = (self.pcd_frame.point['colors'] *
255).to(o3d.core.Dtype.UInt8)
self.executor.submit(o3d.t.io.write_point_cloud,
filename,
self.pcd_frame,
write_ascii=False,
compressed=True,
print_progress=False)
self.status_message = f"Saving point cloud to {filename}."
def save_rgbd(self):
"""Save current RGBD image pair."""
now = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
filename = f"{self.rgbd_metadata.serial_number}_color_{now}.jpg"
self.executor.submit(o3d.t.io.write_image, filename,
self.rgbd_frame.color)
filename = f"{self.rgbd_metadata.serial_number}_depth_{now}.png"
self.executor.submit(o3d.t.io.write_image, filename,
self.rgbd_frame.depth)
self.status_message = (
f"Saving RGBD images to {filename[:-3]}.{{jpg,png}}.")
class PipelineView:
"""Controls display and user interface. All methods must run in the main thread."""
def __init__(self, vfov=60, max_pcd_vertices=1 << 20, **callbacks):
"""Initialize.
Args:
vfov (float): Vertical field of view for the 3D scene.
max_pcd_vertices (int): Maximum point clud verties for which memory
is allocated.
callbacks (dict of kwargs): Callbacks provided by the controller
for various operations.
"""
self.vfov = vfov
self.max_pcd_vertices = max_pcd_vertices
gui.Application.instance.initialize()
self.window = gui.Application.instance.create_window(
"Open3D || Online RGBD Video Processing", 1280, 960)
# Called on window layout (eg: resize)
self.window.set_on_layout(self.on_layout)
self.window.set_on_close(callbacks['on_window_close'])
self.pcd_material = o3d.visualization.rendering.MaterialRecord()
self.pcd_material.shader = "defaultLit"
# Set n_pixels displayed for each 3D point, accounting for HiDPI scaling
self.pcd_material.point_size = int(4 * self.window.scaling)
# 3D scene
self.pcdview = gui.SceneWidget()
self.window.add_child(self.pcdview)
self.pcdview.enable_scene_caching(
True) # makes UI _much_ more responsive
self.pcdview.scene = rendering.Open3DScene(self.window.renderer)
self.pcdview.scene.set_background([1, 1, 1, 1]) # White background
self.pcdview.scene.set_lighting(
rendering.Open3DScene.LightingProfile.SOFT_SHADOWS, [0, -6, 0])
# Point cloud bounds, depends on the sensor range
self.pcd_bounds = o3d.geometry.AxisAlignedBoundingBox([-3, -3, 0],
[3, 3, 6])
self.camera_view() # Initially look from the camera
em = self.window.theme.font_size
# Options panel
self.panel = gui.Vert(em, gui.Margins(em, em, em, em))
self.panel.preferred_width = int(360 * self.window.scaling)
self.window.add_child(self.panel)
toggles = gui.Horiz(em)
self.panel.add_child(toggles)
toggle_capture = gui.ToggleSwitch("Capture / Play")
toggle_capture.is_on = False
toggle_capture.set_on_clicked(
callbacks['on_toggle_capture']) # callback
toggles.add_child(toggle_capture)
self.flag_normals = False
self.toggle_normals = gui.ToggleSwitch("Colors / Normals")
self.toggle_normals.is_on = False
self.toggle_normals.set_on_clicked(
callbacks['on_toggle_normals']) # callback
toggles.add_child(self.toggle_normals)
view_buttons = gui.Horiz(em)
self.panel.add_child(view_buttons)
view_buttons.add_stretch() # for centering
camera_view = gui.Button("Camera view")
camera_view.set_on_clicked(self.camera_view) # callback
view_buttons.add_child(camera_view)
birds_eye_view = gui.Button("Bird's eye view")
birds_eye_view.set_on_clicked(self.birds_eye_view) # callback
view_buttons.add_child(birds_eye_view)
view_buttons.add_stretch() # for centering
save_toggle = gui.Horiz(em)
self.panel.add_child(save_toggle)
save_toggle.add_child(gui.Label("Record / Save"))
self.toggle_record = None
if callbacks['on_toggle_record'] is not None:
save_toggle.add_fixed(1.5 * em)
self.toggle_record = gui.ToggleSwitch("Video")
self.toggle_record.is_on = False
self.toggle_record.set_on_clicked(callbacks['on_toggle_record'])
save_toggle.add_child(self.toggle_record)
save_buttons = gui.Horiz(em)
self.panel.add_child(save_buttons)
save_buttons.add_stretch() # for centering
save_pcd = gui.Button("Save Point cloud")
save_pcd.set_on_clicked(callbacks['on_save_pcd'])
save_buttons.add_child(save_pcd)
save_rgbd = gui.Button("Save RGBD frame")
save_rgbd.set_on_clicked(callbacks['on_save_rgbd'])
save_buttons.add_child(save_rgbd)
save_buttons.add_stretch() # for centering
self.video_size = (int(240 * self.window.scaling),
int(320 * self.window.scaling), 3)
self.show_color = gui.CollapsableVert("Color image")
self.show_color.set_is_open(False)
self.panel.add_child(self.show_color)
self.color_video = gui.ImageWidget(
o3d.geometry.Image(np.zeros(self.video_size, dtype=np.uint8)))
self.show_color.add_child(self.color_video)
self.show_depth = gui.CollapsableVert("Depth image")
self.show_depth.set_is_open(False)
self.panel.add_child(self.show_depth)
self.depth_video = gui.ImageWidget(
o3d.geometry.Image(np.zeros(self.video_size, dtype=np.uint8)))
self.show_depth.add_child(self.depth_video)
self.status_message = gui.Label("")
self.panel.add_child(self.status_message)
self.flag_exit = False
self.flag_gui_init = False
def update(self, frame_elements):
"""Update visualization with point cloud and images. Must run in main
thread since this makes GUI calls.
Args:
frame_elements: dict {element_type: geometry element}.
Dictionary of element types to geometry elements to be updated
in the GUI:
'pcd': point cloud,
'color': rgb image (3 channel, uint8),
'depth': depth image (uint8),
'status_message': message
"""
if not self.flag_gui_init:
# Set dummy point cloud to allocate graphics memory
dummy_pcd = o3d.t.geometry.PointCloud({
'positions':
o3d.core.Tensor.zeros((self.max_pcd_vertices, 3),
o3d.core.Dtype.Float32),
'colors':
o3d.core.Tensor.zeros((self.max_pcd_vertices, 3),
o3d.core.Dtype.Float32),
'normals':
o3d.core.Tensor.zeros((self.max_pcd_vertices, 3),
o3d.core.Dtype.Float32)
})
if self.pcdview.scene.has_geometry('pcd'):
self.pcdview.scene.remove_geometry('pcd')
self.pcd_material.shader = "normals" if self.flag_normals else "defaultLit"
self.pcdview.scene.add_geometry('pcd', dummy_pcd, self.pcd_material)
self.flag_gui_init = True
# TODO(ssheorey) Switch to update_geometry() after #3452 is fixed
if os.name == 'nt':
self.pcdview.scene.remove_geometry('pcd')
self.pcdview.scene.add_geometry('pcd', frame_elements['pcd'],
self.pcd_material)
else:
update_flags = (rendering.Scene.UPDATE_POINTS_FLAG |
rendering.Scene.UPDATE_COLORS_FLAG |
(rendering.Scene.UPDATE_NORMALS_FLAG
if self.flag_normals else 0))
self.pcdview.scene.scene.update_geometry('pcd',
frame_elements['pcd'],
update_flags)
# Update color and depth images
# TODO(ssheorey) Remove CPU transfer after we have CUDA -> OpenGL bridge
if self.show_color.get_is_open() and 'color' in frame_elements:
sampling_ratio = self.video_size[1] / frame_elements['color'].columns
self.color_video.update_image(
frame_elements['color'].resize(sampling_ratio).cpu())
if self.show_depth.get_is_open() and 'depth' in frame_elements:
sampling_ratio = self.video_size[1] / frame_elements['depth'].columns
self.depth_video.update_image(
frame_elements['depth'].resize(sampling_ratio).cpu())
if 'status_message' in frame_elements:
self.status_message.text = frame_elements["status_message"]
self.pcdview.force_redraw()
def camera_view(self):
"""Callback to reset point cloud view to the camera"""
self.pcdview.setup_camera(self.vfov, self.pcd_bounds, [0, 0, 0])
# Look at [0, 0, 1] from camera placed at [0, 0, 0] with Y axis
# pointing at [0, -1, 0]
self.pcdview.scene.camera.look_at([0, 0, 1], [0, 0, 0], [0, -1, 0])
def birds_eye_view(self):
"""Callback to reset point cloud view to birds eye (overhead) view"""
self.pcdview.setup_camera(self.vfov, self.pcd_bounds, [0, 0, 0])
self.pcdview.scene.camera.look_at([0, 0, 1.5], [0, 3, 1.5], [0, -1, 0])
def on_layout(self, layout_context):
# The on_layout callback should set the frame (position + size) of every
# child correctly. After the callback is done the window will layout
# the grandchildren.
"""Callback on window initialize / resize"""
frame = self.window.content_rect
self.pcdview.frame = frame
panel_size = self.panel.calc_preferred_size(layout_context,
self.panel.Constraints())
self.panel.frame = gui.Rect(frame.get_right() - panel_size.width,
frame.y, panel_size.width,
panel_size.height)
class PipelineController:
"""Entry point for the app. Controls the PipelineModel object for IO and
processing and the PipelineView object for display and UI. All methods
operate on the main thread.
"""
def __init__(self, camera_config_file=None, rgbd_video=None, device=None):
"""Initialize.
Args:
camera_config_file (str): Camera configuration json file.
rgbd_video (str): RS bag file containing the RGBD video. If this is
provided, connected cameras are ignored.
device (str): Compute device (e.g.: 'cpu:0' or 'cuda:0').
"""
self.pipeline_model = PipelineModel(self.update_view,
camera_config_file, rgbd_video,
device)
self.pipeline_view = PipelineView(
1.25 * self.pipeline_model.vfov,
self.pipeline_model.max_points,
on_window_close=self.on_window_close,
on_toggle_capture=self.on_toggle_capture,
on_save_pcd=self.on_save_pcd,
on_save_rgbd=self.on_save_rgbd,
on_toggle_record=self.on_toggle_record
if rgbd_video is None else None,
on_toggle_normals=self.on_toggle_normals)
threading.Thread(name='PipelineModel',
target=self.pipeline_model.run).start()
gui.Application.instance.run()
def update_view(self, frame_elements):
"""Updates view with new data. May be called from any thread.
Args:
frame_elements (dict): Display elements (point cloud and images)
from the new frame to be shown.
"""
gui.Application.instance.post_to_main_thread(
self.pipeline_view.window,
lambda: self.pipeline_view.update(frame_elements))
def on_toggle_capture(self, is_enabled):
"""Callback to toggle capture."""
self.pipeline_model.flag_capture = is_enabled
if not is_enabled:
self.on_toggle_record(False)
if self.pipeline_view.toggle_record is not None:
self.pipeline_view.toggle_record.is_on = False
else:
with self.pipeline_model.cv_capture:
self.pipeline_model.cv_capture.notify()
def on_toggle_record(self, is_enabled):
"""Callback to toggle recording RGBD video."""
self.pipeline_model.flag_record = is_enabled
def on_toggle_normals(self, is_enabled):
"""Callback to toggle display of normals"""
self.pipeline_model.flag_normals = is_enabled
self.pipeline_view.flag_normals = is_enabled
self.pipeline_view.flag_gui_init = False
def on_window_close(self):
"""Callback when the user closes the application window."""
self.pipeline_model.flag_exit = True
with self.pipeline_model.cv_capture:
self.pipeline_model.cv_capture.notify_all()
return True # OK to close window
def on_save_pcd(self):
"""Callback to save current point cloud."""
self.pipeline_model.flag_save_pcd = True
def on_save_rgbd(self):
"""Callback to save current RGBD image pair."""
self.pipeline_model.flag_save_rgbd = True
if __name__ == "__main__":
log.basicConfig(level=log.INFO)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--camera-config',
help='RGBD camera configuration JSON file')
parser.add_argument('--rgbd-video', help='RGBD video file (RealSense bag)')
parser.add_argument('--device',
help='Device to run computations. e.g. cpu:0 or cuda:0 '
'Default is CUDA GPU if available, else CPU.')
args = parser.parse_args()
if args.camera_config and args.rgbd_video:
log.critical(
"Please provide only one of --camera-config and --rgbd-video arguments"
)
else:
PipelineController(args.camera_config, args.rgbd_video, args.device)
|
the-stack_0_21205 | """
Author: Remi Lafage <<[email protected]>>
This package is distributed under New BSD license.
"""
import unittest
import numpy as np
from smt.surrogate_models import KRG
from smt.problems import Sphere
from smt.sampling_methods import FullFactorial
class TestKRG(unittest.TestCase):
def test_predict_output_shape(self):
x = np.random.random((10, 3))
y = np.random.random((10, 2))
kriging = KRG()
kriging.set_training_values(x, y)
kriging.train()
val = kriging.predict_values(x)
self.assertEqual(y.shape, val.shape)
var = kriging.predict_variances(x)
self.assertEqual(y.shape, var.shape)
def test_derivatives(self):
# Construction of the DOE
ndim = 4
fun = Sphere(ndim=ndim)
sampling = FullFactorial(xlimits=fun.xlimits)
xt = sampling(100)
yt = fun(xt)
# Compute the training derivatives
for i in range(ndim):
yd = fun(xt, kx=i)
yt = np.concatenate((yt, yd), axis=1)
# check KRG models
sm_krg_c = KRG(poly="constant", print_global=False)
sm_krg_c.set_training_values(xt, yt[:, 0])
sm_krg_c.train()
TestKRG._check_derivatives(sm_krg_c, xt, yt, ndim)
sm_krg_l = KRG(poly="linear", print_global=False)
sm_krg_l.set_training_values(xt, yt[:, 0])
sm_krg_l.train()
TestKRG._check_derivatives(sm_krg_l, xt, yt, ndim)
@staticmethod
def _check_derivatives(sm, xt, yt, ndim, i=10):
# Compares three derivatives at i-th traning point
# 1. Training derivative: "exact" value
# 2. Predicted derivative: obtaied by sm.predict_derivatives()
# testing point
x_test = xt[i].reshape((1, ndim))
# 2. derivatives prediction by surrogate
dydx_predict = np.zeros(ndim)
for j in range(ndim):
dydx_predict[j] = sm.predict_derivatives(x_test, kx=j)[0]
dydx_predict[j] = sm.predict_derivatives(x_test, kx=j)[0]
dydx_predict[j] = sm.predict_derivatives(x_test, kx=j)[0]
dydx_predict[j] = sm.predict_derivatives(x_test, kx=j)[0]
print(dydx_predict)
print(yt[i, 1:])
# compare results
np.testing.assert_allclose(yt[i, 1:], dydx_predict, atol=2e-3, rtol=1e-3)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_21206 | #!/usr/bin/env python
__author__ = "XXX"
__email__ = "XXX"
from abc import abstractmethod, ABC
import numpy as np
from recommender_interface import Recommender, Recommender
import pandas as pd
class RepresentationsBasedRecommender(Recommender, ABC):
"""Representation based algorithm interface
Interface for recommendation system algorithms which learn users and items embeddings to retrieve recommendation
We use `pandas` dataframe to store the representations for both user and item, the dataframes have to be indexed by
the user and item idxs
Attributes:
train_data (pd.DataFrame): dataframe containing user-item interactions
"""
def __init__(self, train_data):
"""Representation based algorithm interface
Interface for recommendation system algorithms which learn users and items embeddings to retrieve recommendation
We use `pandas` dataframe to store the representations for both user and item, the dataframes have to be indexed by
the user and item idxs
Args:
train_data (pd.DataFrame): dataframe containing user-item interactions
"""
super().__init__(train_data=train_data)
@abstractmethod
def compute_representations(self, user_data):
"""Compute users and items representations
Args:
user_data (dict):dictionary containing inside `interactions` the interactions of the users for which
retrieve predictions stored inside a pd.DataFrame
Returns:
pd.DataFrame, pd.DataFrame: user representations, item representations
"""
pass
def compute_items_scores(self, user_data):
"""Compute items scores as dot product between users and items representations
Args:
user_data (dict):dictionary containing inside `interactions` the interactions of the users for which
retrieve predictions stored inside a pd.DataFrame
Returns:
pd.DataFrame: items scores for each user
"""
users_repr_df, items_repr_df = self.compute_representations(user_data)
assert isinstance(users_repr_df, pd.DataFrame) and isinstance(
items_repr_df, pd.DataFrame
), "Representations have to be stored inside pd.DataFrane objects!\n user: {}, item: {}".format(
type(users_repr_df), type(items_repr_df)
)
assert (
users_repr_df.shape[1] == items_repr_df.shape[1]
), "Users and Items representations have not the same shape!\n user: {}, item: {}".format(
users_repr_df.shape[1], items_repr_df.shape[1]
)
# sort items representations
items_repr_df.sort_index(inplace=True)
# compute the scores as dot product between users and items representations
arr_scores = users_repr_df.to_numpy().dot(items_repr_df.to_numpy().T)
scores = pd.DataFrame(arr_scores, index=users_repr_df.index)
return scores
|
the-stack_0_21207 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
config = Script.get_config()
mapred_user = config['configurations']['global']['mapred_user']
yarn_user = config['configurations']['global']['yarn_user']
yarn_pid_dir_prefix = config['configurations']['global']['yarn_pid_dir_prefix']
mapred_pid_dir_prefix = config['configurations']['global']['mapred_pid_dir_prefix']
yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid") |
the-stack_0_21209 | import glob
import os
import re
import shutil
import time
import fitz
from docx import Document
from googletrans import Translator
from tqdm import tqdm
input_path = os.path.join(os.path.dirname(__file__), "../anderson/")
output_path = os.path.join(os.path.dirname(__file__), "../old/anderson-JA_old/")
temp = os.path.join(os.path.dirname(__file__), "../old/temp_old/")
def remove_temp(temp, debug=False):
if debug:
return
shutil.rmtree(temp)
def get_filename(pdf_path):
return os.path.splitext(os.path.basename(pdf_path))[0]
def get_text_from_pdf(pdf_path):
text = ""
with fitz.open(pdf_path) as pdf:
for page in pdf:
text += page.getText()
return text
def write_text(text_path, text):
with open(text_path, "w", encoding="utf-8") as txt:
txt.write(text)
def convert_text(text_path, convert_path):
# ヘッダー
def is_header(line):
line = line.replace("\n", "")
if line == "Preface to the Third Edition":
return True
if re.fullmatch("\d{1,2}\.\d+\.(\s|[A-Z]|\?|-)+", line) is not None:
return True
return False
# フッター
def is_footer(line):
line = line.replace("\n", "")
for footer in ['Security Engineering', 'Ross Anderson']:
if line == footer:
return True
if line.isdecimal():
return True
return False
# 章番号
def is_title(line):
line = line.replace("\n", "")
if line == "Preface to the Third Edition":
return True
if re.fullmatch("\d{1,2}(\.\d)+", line) is not None:
return True
return False
def convert_title(title_number, title_name):
text = "\n" + title_number.replace("\n", " ") + title_name
return text
# チャプター
def is_chapter(line):
line = line.replace("\n", "")
if re.fullmatch("^Chapter\s\d{1,2}", line) is not None:
return True
return False
def convert_chapter(chapter_number, chapter_name):
text = chapter_number.replace("\n", " ") + chapter_name
return text
# 偉人の名言
def is_wise_saying(line):
line = line.replace("\n", "")
if re.fullmatch("^–\s(\s|[A-Z]|\[|\]|[0-9])+", line) is not None:
return True
return False
def convert_line(line):
# リガチャの処理
def remove_ligature(line):
text = line.replace(u"\ufffd", u"\u0066\u0066\u0069").replace(u"\u21B5", u"\u0066\u0066")
return text
line = remove_ligature(line)
text = line.replace("\n", " ")
text = text.replace("fig.", "fig").replace("Fig.", "Fig")
text = text.replace(". ", ". \n").replace(")", ")\n").replace("?", "?\n")
for i in range(1, 7):
text = text.replace(" " * i, " ")
return text
title_next = False
chapter_next = False
with open(text_path, "r", encoding="utf-8") as txt:
with open(convert_path, "w", encoding="utf-8") as convert_txt:
for line in txt:
# ヘッダーとフッターの除去
if is_header(line) or is_footer(line):
continue
# チャプター
if is_chapter(line):
chapter_next = True
chapter_number = line
continue
# タイトル
if is_title(line):
title_next = True
title_number = line
continue
# タイトルの次の要素を章番号と結合
if title_next:
text = convert_title(title_number, line)
title_next = False
elif chapter_next:
text = convert_chapter(chapter_number, line)
chapter_next = False
# 偉人の名言
elif is_wise_saying(line):
text = line + "\n"
# 本文
else:
text = convert_line(line)
convert_txt.write(text)
def translate_google(text_path):
trans_text = ""
translated_text = ""
with open(text_path, "r", encoding="utf-8") as convert_text:
for line in convert_text:
trans_text += convert_text.readline()
if len(trans_text) >= 4000:
translated_text += Translator().translate(trans_text, dest="ja").text
trans_text = ""
time.sleep(3)
translated_text += Translator().translate(trans_text, dest="ja").text
return translated_text
def save_word(translated_text, filename):
doc = Document()
doc.add_paragraph(translated_text)
doc.save(output_path + filename + "_ja.docx")
if __name__ == '__main__':
if not os.path.isdir(output_path):
os.mkdir(output_path)
if not os.path.isdir(temp):
os.mkdir(temp)
pdf_paths = glob.glob(input_path + "*.pdf")
pbar = tqdm(pdf_paths)
for pdf_path in pbar:
filename = get_filename(pdf_path)
text_path = temp + filename + "_pdf_text.txt"
convert_path = temp + filename + "_convert_text.txt"
pbar.set_description("Translating {} ".format(filename))
# PDFファイルからテキストを抽出
text = get_text_from_pdf(pdf_path)
# 抽出したテキストをtxtファイルに書き込み
write_text(text_path, text)
# 整形
convert_text(text_path, convert_path)
# 翻訳(Google翻訳を利用)
translated_text = translate_google(convert_path)
# Wordに出力
save_word(translated_text, filename)
# 最後にtempを消去(デバック時は消さない)
remove_temp(temp, debug=True)
|
the-stack_0_21210 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import fractions
from mako.template import Template
file_template = Template(
"""
#ifndef CHANDRA_${system.upper()}_UNITS_H
#define CHANDRA_${system.upper()}_UNITS_H
#include <ratio>
#include "dimensions.h"
#include "quantity.h"
namespace chandra
{
namespace units
{
namespace ${system}
{
${units_overview}
${units}
} /*namespace ${system}*/
} /*namespace units*/
} /*namespace chandra*/
#endif /*CHANDRA_${system.upper()}_UNITS_H*/
""")
unit_template = Template(
"""
//
// Unit "${symbol}"
//
struct ${symbol}
{
using factor_t = std::ratio<${num}, ${den}>; // ${factor}
using dimensions_t = ${dimensions};
using offset_t = std::ratio<${offset_num}, ${offset_den}>;
using relative_t = ${rel_symbol};
static constexpr bool absolute = ${'true' if absolute else 'false'};
template<typename Stream>
static void streamname(Stream& _stream) {
_stream << "${symbol}";
}
};
template<typename Value = long double>
using Q_${symbol} = units::Quantity<Value, ${symbol}>;
namespace literals
{
static inline Q_${symbol}<long double> operator "" _${symbol}_ (long double _v) {
return Q_${symbol}<long double>(_v); }
static inline Q_${symbol}<long double> operator "" _${symbol}_ (unsigned long long int _v) {
return Q_${symbol}<long double>(static_cast<long double>(_v)); }
}
""")
class UnitsContext:
def __init__(self):
self.units = []
self.names = {}
self.system = None
self.let = {}
self.default_tolerance = 1e-6
self.param_limit = int(1e12)
self.ops = {
'exact': self.op_exact,
'offset_decade': self.op_offset_decade,
'frac': self.op_frac
}
@property
def process_globals(self):
gl = {k: op for k, op in self.ops.items()}
gl.update({k: v for (_, k), v in self.let.items()})
return gl
def op_frac(self, num, den):
return fractions.Fraction(num, den)
def op_offset_decade(self, value):
try:
exp = int(value)
if exp > 0:
return fractions.Fraction(10**exp, 1)
elif exp < 0:
return fractions.Fraction(1, 10**-exp)
return fractions.Fraction(1, 1)
except:
assert False, 'Error: offset_decade() requires integral paramater, received {}'.format(value)
def op_exact(self, value):
if isinstance(value, float):
return fractions.Fraction(value)
elif isinstance(value, fractions.Fraction):
return value
elif isinstance(value, str) or isinstance(value, bytes):
text = value.strip()
integral = None
fractional = ''
rem = ''
exp = ''
if text.find('.') >= 0:
integral, _, rem = text.partition('.')
integral = integral.strip()
fractional, _, exp = rem.partition('e')
fractional = fractional.strip()
else:
integral, _, exp = text.partition('e')
integral = integral.strip()
fractional = fractional.strip()
exp = exp.strip()
integral_val = 0 if len(integral) == 0 else int(integral)
fractional_val = 0 if len(fractional) == 0 else int(fractional)
base_exp_val = 0 if len(exp) == 0 else int(exp)
exp_frac = len(fractional)
integral_val *= 10**exp_frac
fractional_val *= 1 if integral_val >= 0 else -1
base = fractions.Fraction((integral_val + fractional_val), 1)
mult = 1
exp_val = base_exp_val - exp_frac
if exp_val > 0:
mult = fractions.Fraction(10**exp_val, 1)
else:
mult = fractions.Fraction(1, 10**-exp_val)
return base * mult
else:
assert False, 'Error: function exact() undefined for values of type {}'.format(type(value))
def process_field(self, field):
if len(field) > 0:
gl = self.process_globals
exec('__result__ = {}'.format(field), gl)
return gl['__result__']
return None
class ContextCommand:
def __init__(self, name, value):
self.__name = name
self.__value = value
def __call__(self, ctx):
name = self.__name
value = self.__value
if name == 'system':
assert ctx.system is None, 'Error: Only a single units system supported now!'
ctx.system = value
if ctx.system not in ctx.names:
ctx.names[ctx.system] = set()
if name == 'tolerance':
ctx.default_tolerance = float(ctx.process_field(value))
class LetDeclaration:
def __init__(self, system, name, value):
self.__system = system
self.__name = name
self.__value = value
def __str__(self):
return 'let {}::{} = {}'.format(self.system, self.name, self.value)
@property
def system(self): return self.__system
@property
def name(self): return self.__name
@property
def value(self): return self.__value
class UnitCommand:
def __init__(self, dimensions, symbol, factor, offset, message, absolute=False):
self.__dimensions = dimensions
self.__symbol = symbol
self.__factor = factor
self.__offset = offset
self.__absolute = absolute
self.__message = message
def __call__(self, ctx):
factor = ctx.process_field(self.__factor)
offset = ctx.process_field(self.__offset)
prefixed, symbol = self.__parse_symbol(self.__symbol, ctx)
if prefixed:
self.__prefixed_units(symbol, self.__dimensions, factor, offset, ctx)
else:
self.__unit(symbol, self.__dimensions, factor, offset, ctx)
def __prefixed_units(self, symbol, dimensions, factor, offset, ctx):
prefixes = [
('E', fractions.Fraction(1000000000000000000)),
('P', fractions.Fraction(1000000000000000)),
('T', fractions.Fraction(1000000000000)),
('G', fractions.Fraction(1000000000)),
('M', fractions.Fraction(1000000)),
('k', fractions.Fraction(1000)),
('h', fractions.Fraction(100)),
('da', fractions.Fraction(10)),
('', fractions.Fraction(1)),
('d', fractions.Fraction(1, 10)),
('c', fractions.Fraction(1, 100)),
('m', fractions.Fraction(1, 1000)),
('u', fractions.Fraction(1, 1000000)),
('n', fractions.Fraction(1, 1000000000)),
('p', fractions.Fraction(1, 1000000000000)),
('f', fractions.Fraction(1, 1000000000000000)),
('a', fractions.Fraction(1, 1000000000000000000))
]
for prefix, mult in prefixes:
prefixed_symbol = prefix + symbol
self.__unit(prefixed_symbol, dimensions, factor, offset, ctx, mult=mult)
def __unit(self, symbol, dimensions, factor, offset, ctx, mult=None):
if symbol not in ctx.names[ctx.system]:
factor, factor_tol = self.__fractionize(factor, ctx, mult=mult)
offset, offset_tol = self.__fractionize(offset, ctx)
added = False
if (factor is not None):
offset = fractions.Fraction(0, 1) if offset is None else offset
max_param = max(factor.numerator, factor.denominator)
if max_param <= ctx.param_limit and factor != 0:
added = True
self.__add_unit(ctx, symbol, factor, offset, dimensions)
if self.__absolute:
self.__add_unit(ctx, symbol, factor, offset, dimensions, absolute=True)
if not added:
print("Unable to create {} (factor = {})".format(symbol, factor))
else:
assert False, "Attempting to read unit {}".format(symbol)
def __add_unit(self, ctx, symbol, factor, offset, dimensions, absolute=False):
main_symbol = symbol + ('_abs' if absolute else '')
unit_text = unit_template.render(
symbol=main_symbol,
rel_symbol=symbol,
absolute=absolute,
num=factor.numerator,
den=factor.denominator,
factor='approx. {:g}'.format(float(factor)),
dimensions="dimensions::{}".format(dimensions),
offset_num=offset.numerator,
offset_den=offset.denominator
)
data = (dimensions, factor, offset)
ctx.units.append((ctx.system, main_symbol, unit_text, data)) # DO SOMETHING TO ARRANGE THESE....
ctx.names[ctx.system].add(symbol)
def __parse_symbol(self, symbol, ctx): # TODO: VALIDATE THE REDUCED SYMBOL AS AN IDENTIFIER
symbol = symbol.strip()
if symbol[0] == '*':
return True, symbol[1:]
return False, symbol
def __fractionize(self, value, ctx, mult=None):
mult = fractions.Fraction(1, 1) if mult is None else mult
if value is None:
return None, ctx.default_tolerance
elif isinstance(value, fractions.Fraction):
if value.denominator > ctx.param_limit:
return None, ctx.default_tolerance
return (mult * value).limit_denominator(ctx.param_limit), 0
elif isinstance(value, int):
value = fractions.Fraction(value, 1)
elif isinstance(value, float):
value = fractions.Fraction(value)
else:
assert False, 'Error: Fractionalize is not defined for values of type = {}'.type(value)
denom_limit = 10
done = False
base_value = fractions.Fraction(value)
if base_value.numerator == 0:
return fractions.Fraction(0, 1), 0
reference_value = mult * base_value
while not done:
frac = reference_value.limit_denominator(denom_limit)
est_err = abs(float(reference_value - frac)) / reference_value
# print('frac = {}, err_est = {}, tol = {}'.format(frac, est_err, ctx.default_tolerance))
if est_err <= ctx.default_tolerance and (frac.numerator <= ctx.param_limit):
done = True
value = frac
else:
denom_limit *= 10
if denom_limit > ctx.param_limit:
done = True
value = None
return value, ctx.default_tolerance
def parseSystemLine(line): # TODO: THIS SHOULD RETURN NONE IF THE STRIPPED LINE IS NOT A VALID IDENTIFIER
line = line.strip()
return ContextCommand('system', line), line
def parseTolerenceLine(line):
try:
return ContextCommand('tolerance', line)
except:
pass
return None
def parseLetLine(line, system):
name, _, value = line.partition("=")
name = name.strip()
value = value.strip()
return LetDeclaration(system, name, value)
def parseUnitLine(dimensions, line, message): # TODO: VALIDATE THE DIMENSIONS
symbol, _, line = line.partition('=')
symbol = symbol.strip()
factor, absolute, line = line.partition('@')
factor = factor.strip()
offset = line
return UnitCommand(dimensions, symbol, factor, offset, message, absolute=absolute=='@')
def parseUnitDefinitionFile(filename):
commands = []
declarations = []
current_system = None
with open(filename) as definition:
for line in definition.readlines():
line = line.strip()
line, _, comment = line.partition('#')
comment = comment.strip()
comment = None if len(comment) == 0 else comment
if len(line) > 0:
(name, line) = line.split(None, 1)
new_definition = None
if name == 'tolerance':
new_definition = parseTolerenceLine(line)
elif name == 'system':
new_definition, system = parseSystemLine(line)
if new_definition is not None:
current_system = system
elif name == 'let':
new_declaration = parseLetLine(line, current_system)
if new_declaration is not None:
declarations.append(new_declaration)
else:
new_definition = parseUnitLine(name, line, comment)
if new_definition is not None:
commands.append(new_definition)
return commands, declarations
def processUnitDefinitions(commands, declarations, ctx):
ctx.let = {}
for declaration in declarations:
ctx.let[(declaration.system, declaration.name)] = ctx.process_field(declaration.value)
num_definitions = sum([1 if isinstance(command, UnitCommand) else 0 for command in commands])
print('{} unit definitions'.format(num_definitions))
for command in commands:
command(ctx)
units_snippets = [text for system, symbol, text, _ in ctx.units]
units_data = [(symbol, data) for _, symbol, _, data in ctx.units]
units_text = '\n\n'.join(units_snippets)
print('{} units generated'.format(len(units_snippets)))
units_overview = '//\n// Overview of Units in {} System\n//\n'.format(ctx.system)
for symbol, (dimensions, factor, offset) in units_data:
offset_render = '' if offset.numerator == 0 else '@ {:0.2f}'.format(float(offset))
units_render = '//\t{} {} = {:0.4g} {}\n'.format(dimensions, symbol, float(factor), offset_render)
units_overview += units_render
return ctx.system, file_template.render(units=units_text, units_overview=units_overview, system=ctx.system)
if __name__ == '__main__':
ctx = UnitsContext()
commands, declarations = parseUnitDefinitionFile("units.def")
system, file_text = processUnitDefinitions(commands, declarations, ctx)
# for key, value in ctx.process_globals.items():
# print(f'{key}: {value}')
filename = '../Chandra HAL/units_{}.h'.format(system)
with open(filename, 'w') as file:
file.write(file_text)
|
the-stack_0_21213 | """
Basic tests to make sure the client passes `visibility` without errors.
"""
import pytest
import requests
from verta._protos.public.common import CommonService_pb2 as _CommonCommonService
from verta._protos.public.modeldb import DatasetService_pb2 as _DatasetService
from verta._protos.public.modeldb import ProjectService_pb2 as _ProjectService
from verta._protos.public.modeldb.versioning import VersioningService_pb2 as _VersioningService
from verta._internal_utils import _utils
from verta.visibility import (
OrgCustom,
Private,
)
pytestmark = pytest.mark.not_oss
def assert_visibility(entity, visibility, entity_name):
if not entity._msg.HasField('custom_permission'):
pytest.skip("backend does not support new visibility")
assert entity._msg.custom_permission == visibility._custom_permission
if entity_name == "registered_model":
assert entity._msg.resource_visibility == visibility._visibility
else:
assert entity._msg.visibility == visibility._visibility
@pytest.mark.deployment
def assert_endpoint_visibility(endpoint, visibility):
endpoint_json = endpoint._get_json_by_id(endpoint._conn, endpoint.workspace, endpoint.id)
if 'custom_permission' not in endpoint_json['creator_request']:
pytest.skip("backend does not support new visibility")
assert endpoint_json['creator_request']['custom_permission']['collaborator_type'] == visibility._collaborator_type_str
assert endpoint_json['creator_request']['resource_visibility'] == visibility._visibility_str
class TestCreate:
@pytest.mark.parametrize(
("entity_name", "visibility"),
[
("dataset", OrgCustom(write=True)),
("project", OrgCustom(write=True, deploy=True)),
("registered_model", OrgCustom(write=True, deploy=True)),
]
)
def test_mdb_entity(self, client, organization, entity_name, visibility):
create_entity = getattr(client, "create_{}".format(entity_name))
entity = create_entity(workspace=organization.name, visibility=visibility)
try:
assert_visibility(entity, visibility, entity_name)
finally:
entity.delete()
client._ctx.proj = None # otherwise client teardown tries to delete
def test_endpoint(self, client, organization, created_entities):
visibility = OrgCustom(write=True)
endpoint = client.create_endpoint(
path=_utils.generate_default_name(),
workspace=organization.name, visibility=visibility,
)
created_entities.append(endpoint)
assert_endpoint_visibility(endpoint, visibility)
class TestSet:
@pytest.mark.parametrize(
("entity_name", "visibility"),
[
("dataset", OrgCustom(write=True)),
("project", OrgCustom(write=True, deploy=True)),
("registered_model", OrgCustom(write=True, deploy=True)),
]
)
def test_mdb_entity(self, client, organization, entity_name, visibility):
set_entity = getattr(client, "set_{}".format(entity_name))
entity = set_entity(workspace=organization.name, visibility=visibility)
try:
assert_visibility(entity, visibility, entity_name)
# second set ignores visibility
with pytest.warns(UserWarning, match="cannot set"):
entity = set_entity(entity.name, workspace=organization.name, visibility=Private())
assert_visibility(entity, visibility, entity_name)
finally:
entity.delete()
client._ctx.proj = None # otherwise client teardown tries to delete
@pytest.mark.deployment
def test_endpoint(self, client, organization, created_entities):
visibility = OrgCustom(write=True)
endpoint = client.set_endpoint(
path=_utils.generate_default_name(),
workspace=organization.name, visibility=visibility,
)
created_entities.append(endpoint)
assert_endpoint_visibility(endpoint, visibility)
# second set ignores visibility
with pytest.warns(UserWarning, match="cannot set"):
endpoint = client.set_endpoint(path=endpoint.path, workspace=organization.name, visibility=Private())
assert_endpoint_visibility(endpoint, visibility)
class TestPublicWithinOrg:
"""
`visibility` gets translated to an equivalent `public_within_org` value for
compatibility with older backends.
"""
def test_dataset(self, client, organization, created_entities):
visibility = OrgCustom(write=True)
dataset = client.set_dataset(workspace=organization.name, visibility=visibility)
created_entities.append(dataset)
if visibility._to_public_within_org():
assert dataset._msg.dataset_visibility == _DatasetService.DatasetVisibilityEnum.ORG_SCOPED_PUBLIC
else:
assert dataset._msg.dataset_visibility == _DatasetService.DatasetVisibilityEnum.PRIVATE
@pytest.mark.deployment
def test_endpoint(self, client, organization, created_entities):
visibility = OrgCustom(write=True)
endpoint = client.set_endpoint(
path=_utils.generate_default_name(),
workspace=organization.name, visibility=visibility,
)
created_entities.append(endpoint)
endpoint_json = endpoint._get_json_by_id(endpoint._conn, endpoint.workspace, endpoint.id)
if visibility._to_public_within_org():
assert endpoint_json['creator_request']['visibility'] == "ORG_SCOPED_PUBLIC"
else:
assert endpoint_json['creator_request']['visibility'] == "PRIVATE"
def test_project(self, client, organization):
visibility = OrgCustom(write=True)
entity = client.set_project(workspace=organization.name, visibility=visibility)
try:
if visibility._to_public_within_org():
assert entity._msg.project_visibility == _ProjectService.ORG_SCOPED_PUBLIC
else:
assert entity._msg.project_visibility == _ProjectService.PRIVATE
finally:
entity.delete()
client._ctx.proj = None # otherwise client teardown tries to delete
def test_registered_model(self, client, organization, created_entities):
visibility = OrgCustom(write=True)
entity = client.set_registered_model(workspace=organization.name, visibility=visibility)
created_entities.append(entity)
if visibility._to_public_within_org():
assert entity._msg.visibility == _CommonCommonService.VisibilityEnum.ORG_SCOPED_PUBLIC
else:
assert entity._msg.visibility == _CommonCommonService.VisibilityEnum.PRIVATE
|
the-stack_0_21214 | #
# Advent of code 2021
# Day 2 pt 2
# 12/2/2021
#
import re
with open('./input.txt') as f:
content = f.readlines()
# Remove \n in each item
content = [x.strip() for x in content]
hor_coord = 0
ver_coord = 0
aim = 0
for i in range(len(content)):
direction = re.search('^\w+(?=\s)', content[i])
direction = direction.group(0)
number = re.search('\d+$', content[i])
number = int(number.group(0))
if direction == 'forward':
hor_coord += number
ver_coord += aim * number
elif direction == 'up':
aim -= number
elif direction == 'down':
aim += number
print(f'The product of the coordinates is: {hor_coord * ver_coord}') |
the-stack_0_21216 | # This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Codon tables based on those from the NCBI.
These tables are based on parsing the NCBI file:
ftp://ftp.ncbi.nih.gov/entrez/misc/data/gc.prt
Last updated at Version 4.0
"""
from __future__ import print_function
from anarci.Bio import Alphabet
from anarci.Bio.Alphabet import IUPAC
from anarci.Bio.Data import IUPACData
unambiguous_dna_by_name = {}
unambiguous_dna_by_id = {}
unambiguous_rna_by_name = {}
unambiguous_rna_by_id = {}
generic_by_name = {} # unambiguous DNA or RNA
generic_by_id = {} # unambiguous DNA or RNA
ambiguous_dna_by_name = {}
ambiguous_dna_by_id = {}
ambiguous_rna_by_name = {}
ambiguous_rna_by_id = {}
ambiguous_generic_by_name = {} # ambiguous DNA or RNA
ambiguous_generic_by_id = {} # ambiguous DNA or RNA
# standard IUPAC unambiguous codons
standard_dna_table = None
standard_rna_table = None
# In the future, the back_table could return a statistically
# appropriate distribution of codons, so do not cache the results of
# back_table lookups!
class TranslationError(Exception):
pass
class CodonTable(object):
"""A codon-table, or genetic code."""
nucleotide_alphabet = Alphabet.generic_nucleotide
protein_alphabet = Alphabet.generic_protein
forward_table = {} # only includes codons which actually code
back_table = {} # for back translations
start_codons = []
stop_codons = []
# Not always called from derived classes!
def __init__(self, nucleotide_alphabet=nucleotide_alphabet,
protein_alphabet=protein_alphabet,
forward_table=forward_table, back_table=back_table,
start_codons=start_codons, stop_codons=stop_codons):
self.nucleotide_alphabet = nucleotide_alphabet
self.protein_alphabet = protein_alphabet
self.forward_table = forward_table
self.back_table = back_table
self.start_codons = start_codons
self.stop_codons = stop_codons
def __str__(self):
"""Returns a simple text representation of the codon table.
e.g.
>>> import Bio.Data.CodonTable
>>> print(Bio.Data.CodonTable.standard_dna_table)
>>> print(Bio.Data.CodonTable.generic_by_id[1])
"""
if self.id:
answer = "Table %i" % self.id
else:
answer = "Table ID unknown"
if self.names:
answer += " " + ", ".join([x for x in self.names if x])
# Use the main four letters (and the conventional ordering)
# even for ambiguous tables
letters = self.nucleotide_alphabet.letters
if isinstance(self.nucleotide_alphabet, Alphabet.DNAAlphabet) \
or (letters is not None and "T" in letters):
letters = "TCAG"
else:
# Should be either RNA or generic nucleotides,
# e.g. Bio.Data.CodonTable.generic_by_id[1]
letters = "UCAG"
# Build the table...
answer += "\n\n |" + "|".join(" %s " % c2 for c2 in letters) + "|"
answer += "\n--+" + "+".join("---------" for c2 in letters) + "+--"
for c1 in letters:
for c3 in letters:
line = c1 + " |"
for c2 in letters:
codon = c1 + c2 + c3
line += " %s" % codon
if codon in self.stop_codons:
line += " Stop|"
else:
try:
amino = self.forward_table[codon]
except KeyError:
amino = "?"
except TranslationError:
amino = "?"
if codon in self.start_codons:
line += " %s(s)|" % amino
else:
line += " %s |" % amino
line += " " + c3
answer += "\n" + line
answer += "\n--+" + "+".join("---------" for c2 in letters) + "+--"
return answer
def make_back_table(table, default_stop_codon):
"""Back a back-table (naive single codon mapping).
ONLY RETURNS A SINGLE CODON, chosen from the possible alternatives
based on their sort order.
"""
# Do the sort so changes in the hash implementation won't affect
# the result when one amino acid is coded by more than one codon.
back_table = {}
for key in sorted(table):
back_table[table[key]] = key
back_table[None] = default_stop_codon
return back_table
class NCBICodonTable(CodonTable):
nucleotide_alphabet = Alphabet.generic_nucleotide
protein_alphabet = IUPAC.protein
def __init__(self, id, names, table, start_codons, stop_codons):
self.id = id
self.names = names
self.forward_table = table
self.back_table = make_back_table(table, stop_codons[0])
self.start_codons = start_codons
self.stop_codons = stop_codons
class NCBICodonTableDNA(NCBICodonTable):
nucleotide_alphabet = IUPAC.unambiguous_dna
class NCBICodonTableRNA(NCBICodonTable):
nucleotide_alphabet = IUPAC.unambiguous_rna
# ######## Deal with ambiguous forward translations
class AmbiguousCodonTable(CodonTable):
def __init__(self, codon_table,
ambiguous_nucleotide_alphabet,
ambiguous_nucleotide_values,
ambiguous_protein_alphabet,
ambiguous_protein_values):
CodonTable.__init__(self,
ambiguous_nucleotide_alphabet,
ambiguous_protein_alphabet,
AmbiguousForwardTable(codon_table.forward_table,
ambiguous_nucleotide_values,
ambiguous_protein_values),
codon_table.back_table,
# These two are WRONG! I need to get the
# list of ambiguous codons which code for
# the stop codons XXX
list_ambiguous_codons(codon_table.start_codons, ambiguous_nucleotide_values),
list_ambiguous_codons(codon_table.stop_codons, ambiguous_nucleotide_values)
)
self._codon_table = codon_table
# Be sneaky and forward attribute lookups to the original table.
# This lets us get the names, if the original table is an NCBI
# table.
def __getattr__(self, name):
return getattr(self._codon_table, name)
def list_possible_proteins(codon, forward_table, ambiguous_nucleotide_values):
c1, c2, c3 = codon
x1 = ambiguous_nucleotide_values[c1]
x2 = ambiguous_nucleotide_values[c2]
x3 = ambiguous_nucleotide_values[c3]
possible = {}
stops = []
for y1 in x1:
for y2 in x2:
for y3 in x3:
try:
possible[forward_table[y1 + y2 + y3]] = 1
except KeyError:
# If tripping over a stop codon
stops.append(y1 + y2 + y3)
if stops:
if possible:
raise TranslationError("ambiguous codon %r codes for both"
" proteins and stop codons" % codon)
# This is a true stop codon - tell the caller about it
raise KeyError(codon)
return list(possible)
def list_ambiguous_codons(codons, ambiguous_nucleotide_values):
"""Extends a codon list to include all possible ambigous codons.
e.g.::
['TAG', 'TAA'] -> ['TAG', 'TAA', 'TAR']
['UAG', 'UGA'] -> ['UAG', 'UGA', 'URA']
Note that ['TAG', 'TGA'] -> ['TAG', 'TGA'], this does not add 'TRR'.
Thus only two more codons are added in the following:
e.g.::
['TGA', 'TAA', 'TAG'] -> ['TGA', 'TAA', 'TAG', 'TRA', 'TAR']
Returns a new (longer) list of codon strings.
"""
# Note ambiguous_nucleotide_values['R'] = 'AG' (etc)
# This will generate things like 'TRR' from ['TAG', 'TGA'], which
# we don't want to include:
c1_list = sorted(letter for (letter, meanings)
in ambiguous_nucleotide_values.items()
if set(codon[0] for codon in codons).issuperset(set(meanings)))
c2_list = sorted(letter for (letter, meanings)
in ambiguous_nucleotide_values.items()
if set(codon[1] for codon in codons).issuperset(set(meanings)))
c3_list = sorted(letter for (letter, meanings)
in ambiguous_nucleotide_values.items()
if set(codon[2] for codon in codons).issuperset(set(meanings)))
# candidates is a list (not a set) to preserve the iteration order
candidates = []
for c1 in c1_list:
for c2 in c2_list:
for c3 in c3_list:
codon = c1 + c2 + c3
if codon not in candidates and codon not in codons:
candidates.append(codon)
answer = codons[:] # copy
# print "Have %i new candidates" % len(candidates)
for ambig_codon in candidates:
wanted = True
# e.g. 'TRR' -> 'TAA', 'TAG', 'TGA', 'TGG'
for codon in [c1 + c2 + c3
for c1 in ambiguous_nucleotide_values[ambig_codon[0]]
for c2 in ambiguous_nucleotide_values[ambig_codon[1]]
for c3 in ambiguous_nucleotide_values[ambig_codon[2]]]:
if codon not in codons:
# This ambiguous codon can code for a non-stop, exclude it!
wanted = False
# print "Rejecting %s" % ambig_codon
continue
if wanted:
answer.append(ambig_codon)
return answer
assert list_ambiguous_codons(['TGA', 'TAA'], IUPACData.ambiguous_dna_values) == ['TGA', 'TAA', 'TRA']
assert list_ambiguous_codons(['TAG', 'TGA'], IUPACData.ambiguous_dna_values) == ['TAG', 'TGA']
assert list_ambiguous_codons(['TAG', 'TAA'], IUPACData.ambiguous_dna_values) == ['TAG', 'TAA', 'TAR']
assert list_ambiguous_codons(['UAG', 'UAA'], IUPACData.ambiguous_rna_values) == ['UAG', 'UAA', 'UAR']
assert list_ambiguous_codons(['TGA', 'TAA', 'TAG'],
IUPACData.ambiguous_dna_values) == ['TGA', 'TAA', 'TAG', 'TAR', 'TRA']
# Forward translation is "onto", that is, any given codon always maps
# to the same protein, or it doesn't map at all. Thus, I can build
# off of an existing table to produce the ambiguous mappings.
#
# This handles the general case. Perhaps it's overkill?
# >>> t = CodonTable.ambiguous_dna_by_id[1]
# >>> t.forward_table["AAT"]
# 'N'
# >>> t.forward_table["GAT"]
# 'D'
# >>> t.forward_table["RAT"]
# 'B'
# >>> t.forward_table["YTA"]
# 'L'
class AmbiguousForwardTable(object):
def __init__(self, forward_table, ambiguous_nucleotide, ambiguous_protein):
self.forward_table = forward_table
self.ambiguous_nucleotide = ambiguous_nucleotide
self.ambiguous_protein = ambiguous_protein
inverted = {}
for name, val in ambiguous_protein.items():
for c in val:
x = inverted.get(c, {})
x[name] = 1
inverted[c] = x
for name, val in inverted.items():
inverted[name] = list(val)
self._inverted = inverted
self._cache = {}
def get(self, codon, failobj=None):
try:
return self.__getitem__(codon)
except KeyError:
return failobj
def __getitem__(self, codon):
try:
x = self._cache[codon]
except KeyError:
pass
else:
if x is TranslationError:
raise TranslationError(codon) # no unique translation
if x is KeyError:
raise KeyError(codon) # it's a stop codon
return x
try:
x = self.forward_table[codon]
self._cache[codon] = x
return x
except KeyError:
pass
# XXX Need to make part of this into a method which returns
# a list of all possible encodings for a codon!
try:
possible = list_possible_proteins(codon,
self.forward_table,
self.ambiguous_nucleotide)
except KeyError:
self._cache[codon] = KeyError
raise KeyError(codon) # stop codon
except TranslationError:
self._cache[codon] = TranslationError
raise TranslationError(codon) # does not code
assert len(possible) > 0, "unambiguous codons must code"
# Hah! Only one possible protein, so use it
if len(possible) == 1:
self._cache[codon] = possible[0]
return possible[0]
# See if there's an ambiguous protein encoding for the multiples.
# Find residues which exist in every coding set.
ambiguous_possible = {}
for amino in possible:
for term in self._inverted[amino]:
ambiguous_possible[term] = ambiguous_possible.get(term, 0) + 1
n = len(possible)
possible = []
for amino, val in ambiguous_possible.items():
if val == n:
possible.append(amino)
# No amino acid encoding for the results
if len(possible) == 0:
self._cache[codon] = TranslationError
raise TranslationError(codon) # no valid translation
# All of these are valid, so choose one
# To be unique, sort by smallet ambiguity then alphabetically
# Can get this if "X" encodes for everything.
# def _sort(x, y, table = self.ambiguous_protein):
# a = cmp(len(table[x]), len(table[y]))
# if a == 0:
# return cmp(x, y)
# return a
# Sort by key is 2.x and 3.x compatible
possible.sort(key=lambda x: (len(self.ambiguous_protein[x]), x))
x = possible[0]
self._cache[codon] = x
return x
def register_ncbi_table(name, alt_name, id,
table, start_codons, stop_codons):
"""Turns codon table data into objects, and stores them in the dictionaries (PRIVATE)."""
# In most cases names are divided by "; ", however there is also
# 'Bacterial and Plant Plastid' (which used to be just 'Bacterial')
names = [x.strip() for x in name.replace(" and ", "; ").split("; ")]
dna = NCBICodonTableDNA(id, names + [alt_name], table, start_codons,
stop_codons)
ambig_dna = AmbiguousCodonTable(dna,
IUPAC.ambiguous_dna,
IUPACData.ambiguous_dna_values,
IUPAC.extended_protein,
IUPACData.extended_protein_values)
# replace all T's with U's for the RNA tables
rna_table = {}
generic_table = {}
for codon, val in table.items():
generic_table[codon] = val
codon = codon.replace("T", "U")
generic_table[codon] = val
rna_table[codon] = val
rna_start_codons = []
generic_start_codons = []
for codon in start_codons:
generic_start_codons.append(codon)
codon = codon.replace("T", "U")
generic_start_codons.append(codon)
rna_start_codons.append(codon)
rna_stop_codons = []
generic_stop_codons = []
for codon in stop_codons:
generic_stop_codons.append(codon)
codon = codon.replace("T", "U")
generic_stop_codons.append(codon)
rna_stop_codons.append(codon)
generic = NCBICodonTable(id, names + [alt_name], generic_table,
generic_start_codons, generic_stop_codons)
# The following isn't very elegant, but seems to work nicely.
_merged_values = dict(IUPACData.ambiguous_rna_values.items())
_merged_values["T"] = "U"
ambig_generic = AmbiguousCodonTable(generic,
Alphabet.NucleotideAlphabet(),
_merged_values,
IUPAC.extended_protein,
IUPACData.extended_protein_values)
rna = NCBICodonTableRNA(id, names + [alt_name], rna_table,
rna_start_codons, rna_stop_codons)
ambig_rna = AmbiguousCodonTable(rna,
IUPAC.ambiguous_rna,
IUPACData.ambiguous_rna_values,
IUPAC.extended_protein,
IUPACData.extended_protein_values)
if id == 1:
global standard_dna_table, standard_rna_table
standard_dna_table = dna
standard_rna_table = rna
unambiguous_dna_by_id[id] = dna
unambiguous_rna_by_id[id] = rna
generic_by_id[id] = generic
ambiguous_dna_by_id[id] = ambig_dna
ambiguous_rna_by_id[id] = ambig_rna
ambiguous_generic_by_id[id] = ambig_generic
if alt_name is not None:
names.append(alt_name)
for name in names:
unambiguous_dna_by_name[name] = dna
unambiguous_rna_by_name[name] = rna
generic_by_name[name] = generic
ambiguous_dna_by_name[name] = ambig_dna
ambiguous_rna_by_name[name] = ambig_rna
ambiguous_generic_by_name[name] = ambig_generic
# These tables created from the data file
# ftp://ftp.ncbi.nih.gov/entrez/misc/data/gc.prt
# using the following:
# import re
# for line in open("gc.prt").readlines():
# if line[:2] == " {":
# names = []
# id = None
# aa = None
# start = None
# bases = []
# elif line[:6] == " name":
# names.append(re.search('"([^"]*)"', line).group(1))
# elif line[:8] == " name":
# names.append(re.search('"(.*)$', line).group(1))
# elif line == ' Mitochondrial; Mycoplasma; Spiroplasma" ,\n':
# names[-1] = names[-1] + " Mitochondrial; Mycoplasma; Spiroplasma"
# elif line[:4] == " id":
# id = int(re.search('(\d+)', line).group(1))
# elif line[:10] == " ncbieaa ":
# aa = line[12:12+64]
# elif line[:10] == " sncbieaa":
# start = line[12:12+64]
# elif line[:9] == " -- Base":
# bases.append(line[12:12+64])
# elif line[:2] == " }":
# assert names != [] and id is not None and aa is not None
# assert start is not None and bases != []
# if len(names) == 1:
# names.append(None)
# print("register_ncbi_table(name=%s," % repr(names[0]))
# print(" alt_name=%s, id=%d," % \
# (repr(names[1]), id))
# print(" table={")
# s = " "
# for i in range(64):
# if aa[i] != "*":
# t = " '%s%s%s': '%s'," % (bases[0][i], bases[1][i],
# bases[2][i], aa[i])
# if len(s) + len(t) > 75:
# print(s)
# s = " " + t
# else:
# s = s + t
# print("%s }," % s)
# s = " stop_codons=["
# for i in range(64):
# if aa[i] == "*":
# t = "'%s%s%s'," % (bases[0][i], bases[1][i], bases[2][i])
# if len(s) + len(t) > 75:
# s_with_spaces = s.replace("','", "', '")
# print(s_with_spaces)
# s = " " + t
# else:
# s = s + t
# s_with_spaces = s.replace("','", "', '")
# print("%s ]," % s_with_spaces)
# s = " start_codons=["
# for i in range(64):
# if start[i] == "M":
# t = "'%s%s%s'," % (bases[0][i], bases[1][i], bases[2][i])
# if len(s) + len(t) > 75:
# s_with_spaces = s.replace("','", "', '")
# print(s_with_spaces)
# s = " " + t
# else:
# s = s + t
# s_with_spaces = s.replace("','", "', '")
# print("%s ]" % s_with_spaces)
# print(" )")
# elif line[:2] == "--" or line == "\n" or line == "}\n" or \
# line == 'Genetic-code-table ::= {\n':
# pass
# else:
# raise Exception("Unparsed: " + repr(line))
register_ncbi_table(name='Standard',
alt_name='SGC0', id=1,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', },
stop_codons=['TAA', 'TAG', 'TGA', ],
start_codons=['TTG', 'CTG', 'ATG', ]
)
register_ncbi_table(name='Vertebrate Mitochondrial',
alt_name='SGC1', id=2,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'GTT': 'V',
'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A', 'GCC': 'A',
'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D', 'GAA': 'E',
'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG', 'AGA', 'AGG', ],
start_codons=['ATT', 'ATC', 'ATA', 'ATG', 'GTG', ]
)
register_ncbi_table(name='Yeast Mitochondrial',
alt_name='SGC2', id=3,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'T',
'CTC': 'T', 'CTA': 'T', 'CTG': 'T', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG', ],
start_codons=['ATA', 'ATG', ]
)
register_ncbi_table(name='Mold Mitochondrial; Protozoan Mitochondrial; Coelenterate Mitochondrial; Mycoplasma; Spiroplasma',
alt_name='SGC3', id=4,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG', ],
start_codons=['TTA', 'TTG', 'CTG', 'ATT', 'ATC',
'ATA', 'ATG', 'GTG', ]
)
register_ncbi_table(name='Invertebrate Mitochondrial',
alt_name='SGC4', id=5,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'S',
'AGG': 'S', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG', ],
start_codons=['TTG', 'ATT', 'ATC', 'ATA', 'ATG',
'GTG', ]
)
register_ncbi_table(name='Ciliate Nuclear; Dasycladacean Nuclear; Hexamita Nuclear',
alt_name='SGC5', id=6,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TAA': 'Q', 'TAG': 'Q', 'TGT': 'C', 'TGC': 'C', 'TGG': 'W',
'CTT': 'L', 'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P',
'CCC': 'P', 'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H',
'CAA': 'Q', 'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R',
'CGG': 'R', 'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M',
'ACT': 'T', 'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N',
'AAC': 'N', 'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S',
'AGA': 'R', 'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V',
'GTG': 'V', 'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A',
'GAT': 'D', 'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G',
'GGC': 'G', 'GGA': 'G', 'GGG': 'G', },
stop_codons=['TGA', ],
start_codons=['ATG', ]
)
register_ncbi_table(name='Echinoderm Mitochondrial; Flatworm Mitochondrial',
alt_name='SGC8', id=9,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'N', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'S',
'AGG': 'S', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG', ],
start_codons=['ATG', 'GTG', ]
)
register_ncbi_table(name='Euplotid Nuclear',
alt_name='SGC9', id=10,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'C', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG', ],
start_codons=['ATG', ]
)
register_ncbi_table(name='Bacterial and Plant Plastid',
alt_name=None, id=11,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', },
stop_codons=['TAA', 'TAG', 'TGA', ],
start_codons=['TTG', 'CTG', 'ATT', 'ATC', 'ATA',
'ATG', 'GTG', ]
)
register_ncbi_table(name='Alternative Yeast Nuclear',
alt_name=None, id=12,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'S', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', },
stop_codons=['TAA', 'TAG', 'TGA', ],
start_codons=['CTG', 'ATG', ]
)
register_ncbi_table(name='Ascidian Mitochondrial',
alt_name=None, id=13,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'G',
'AGG': 'G', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG', ],
start_codons=['TTG', 'ATA', 'ATG', 'GTG', ]
)
register_ncbi_table(name='Alternative Flatworm Mitochondrial',
alt_name=None, id=14,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TAA': 'Y', 'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W',
'CTT': 'L', 'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P',
'CCC': 'P', 'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H',
'CAA': 'Q', 'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R',
'CGG': 'R', 'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M',
'ACT': 'T', 'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N',
'AAC': 'N', 'AAA': 'N', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S',
'AGA': 'S', 'AGG': 'S', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V',
'GTG': 'V', 'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A',
'GAT': 'D', 'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G',
'GGC': 'G', 'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAG', ],
start_codons=['ATG', ]
)
register_ncbi_table(name='Blepharisma Macronuclear',
alt_name=None, id=15,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TAG': 'Q', 'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TGA', ],
start_codons=['ATG', ]
)
register_ncbi_table(name='Chlorophycean Mitochondrial',
alt_name=None, id=16,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TAG': 'L', 'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R',
'AGG': 'R', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TGA', ],
start_codons=['ATG', ]
)
register_ncbi_table(name='Trematode Mitochondrial',
alt_name=None, id=21,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'M', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'N', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'S',
'AGG': 'S', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG', ],
start_codons=['ATG', 'GTG', ]
)
register_ncbi_table(name='Scenedesmus obliquus Mitochondrial',
alt_name=None, id=22,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y', 'TAG': 'L',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', },
stop_codons=['TCA', 'TAA', 'TGA', ],
start_codons=['ATG', ]
)
register_ncbi_table(name='Thraustochytrium Mitochondrial',
alt_name=None, id=23,
table={
'TTT': 'F', 'TTC': 'F', 'TTG': 'L', 'TCT': 'S', 'TCC': 'S',
'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y', 'TGT': 'C',
'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L', 'CTA': 'L',
'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P', 'CCG': 'P',
'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q', 'CGT': 'R',
'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I', 'ATC': 'I',
'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T', 'ACA': 'T',
'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K', 'AAG': 'K',
'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R', 'GTT': 'V',
'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A', 'GCC': 'A',
'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D', 'GAA': 'E',
'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G', 'GGG': 'G', },
stop_codons=['TTA', 'TAA', 'TAG', 'TGA', ],
start_codons=['ATT', 'ATG', 'GTG', ]
)
register_ncbi_table(name='Pterobranchia Mitochondrial',
alt_name=None, id=24,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGA': 'W', 'TGG': 'W', 'CTT': 'L',
'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q',
'CAG': 'Q', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'S',
'AGG': 'K', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D',
'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G', },
stop_codons=['TAA', 'TAG', ],
start_codons=['TTG', 'CTG', 'ATG', 'GTG', ],
)
register_ncbi_table(name='Candidate Division SR1 and Gracilibacteria',
alt_name=None, id=25,
table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', 'TGA': 'G', },
stop_codons=['TAA', 'TAG', ],
start_codons=['TTG', 'CTG', 'ATG', ]
)
# Basic sanity test,
for key, val in generic_by_name.items():
assert key in ambiguous_generic_by_name[key].names
for key, val in generic_by_id.items():
assert ambiguous_generic_by_id[key].id == key
del key, val
for n in ambiguous_generic_by_id:
assert ambiguous_rna_by_id[n].forward_table["GUU"] == "V"
assert ambiguous_rna_by_id[n].forward_table["GUN"] == "V"
if n != 23:
# For table 23, UUN = F, L or stop.
assert ambiguous_rna_by_id[n].forward_table["UUN"] == "X" # F or L
# R = A or G, so URR = UAA or UGA / TRA = TAA or TGA = stop codons
if "UAA" in unambiguous_rna_by_id[n].stop_codons \
and "UGA" in unambiguous_rna_by_id[n].stop_codons:
try:
print(ambiguous_dna_by_id[n].forward_table["TRA"])
assert False, "Should be a stop only"
except KeyError:
pass
assert "URA" in ambiguous_generic_by_id[n].stop_codons
assert "URA" in ambiguous_rna_by_id[n].stop_codons
assert "TRA" in ambiguous_generic_by_id[n].stop_codons
assert "TRA" in ambiguous_dna_by_id[n].stop_codons
del n
assert ambiguous_generic_by_id[1] == ambiguous_generic_by_name["Standard"]
assert ambiguous_generic_by_id[4] == ambiguous_generic_by_name["SGC3"]
assert ambiguous_generic_by_id[11] == ambiguous_generic_by_name["Bacterial"]
assert ambiguous_generic_by_id[11] == ambiguous_generic_by_name["Plant Plastid"]
assert ambiguous_generic_by_id[15] == ambiguous_generic_by_name['Blepharisma Macronuclear']
assert ambiguous_generic_by_id[24] == ambiguous_generic_by_name["Pterobranchia Mitochondrial"]
assert generic_by_id[1] == generic_by_name["Standard"]
assert generic_by_id[4] == generic_by_name["SGC3"]
assert generic_by_id[11] == generic_by_name["Bacterial"]
assert generic_by_id[11] == generic_by_name["Plant Plastid"]
assert generic_by_id[15] == generic_by_name['Blepharisma Macronuclear']
assert generic_by_id[24] == generic_by_name["Pterobranchia Mitochondrial"]
|
the-stack_0_21217 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from nnunet.network_architecture.custom_modules.conv_blocks import BasicResidualBlock, ResidualLayer
from nnunet.network_architecture.generic_UNet import Upsample
from nnunet.network_architecture.generic_modular_UNet import PlainConvUNetDecoder, get_default_network_config
from nnunet.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss
from torch import nn
from torch.optim import SGD
from torch.backends import cudnn
class ResidualUNetEncoder(nn.Module):
def __init__(self, input_channels, base_num_features, num_blocks_per_stage, feat_map_mul_on_downscale,
pool_op_kernel_sizes, conv_kernel_sizes, props, default_return_skips=True,
max_num_features=480, block=BasicResidualBlock):
"""
Following UNet building blocks can be added by utilizing the properties this class exposes (TODO)
this one includes the bottleneck layer!
:param input_channels:
:param base_num_features:
:param num_blocks_per_stage:
:param feat_map_mul_on_downscale:
:param pool_op_kernel_sizes:
:param conv_kernel_sizes:
:param props:
"""
super(ResidualUNetEncoder, self).__init__()
self.default_return_skips = default_return_skips
self.props = props
self.stages = []
self.stage_output_features = []
self.stage_pool_kernel_size = []
self.stage_conv_op_kernel_size = []
assert len(pool_op_kernel_sizes) == len(conv_kernel_sizes)
num_stages = len(conv_kernel_sizes)
if not isinstance(num_blocks_per_stage, (list, tuple)):
num_blocks_per_stage = [num_blocks_per_stage] * num_stages
else:
assert len(num_blocks_per_stage) == num_stages
self.num_blocks_per_stage = num_blocks_per_stage # decoder may need this
self.initial_conv = props['conv_op'](input_channels, base_num_features, 3, padding=1, **props['conv_op_kwargs'])
self.initial_norm = props['norm_op'](base_num_features, **props['norm_op_kwargs'])
self.initial_nonlin = props['nonlin'](**props['nonlin_kwargs'])
current_input_features = base_num_features
for stage in range(num_stages):
current_output_features = min(base_num_features * feat_map_mul_on_downscale ** stage, max_num_features)
current_kernel_size = conv_kernel_sizes[stage]
current_pool_kernel_size = pool_op_kernel_sizes[stage]
current_stage = ResidualLayer(current_input_features, current_output_features, current_kernel_size, props,
self.num_blocks_per_stage[stage], current_pool_kernel_size, block)
self.stages.append(current_stage)
self.stage_output_features.append(current_output_features)
self.stage_conv_op_kernel_size.append(current_kernel_size)
self.stage_pool_kernel_size.append(current_pool_kernel_size)
# update current_input_features
current_input_features = current_output_features
self.stages = nn.ModuleList(self.stages)
def forward(self, x, return_skips=None):
"""
:param x:
:param return_skips: if none then self.default_return_skips is used
:return:
"""
skips = []
x = self.initial_nonlin(self.initial_norm(self.initial_conv(x)))
for s in self.stages:
x = s(x)
if self.default_return_skips:
skips.append(x)
if return_skips is None:
return_skips = self.default_return_skips
if return_skips:
return skips
else:
return x
@staticmethod
def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, pool_op_kernel_sizes, num_conv_per_stage_encoder,
feat_map_mul_on_downscale, batch_size):
npool = len(pool_op_kernel_sizes) - 1
current_shape = np.array(patch_size)
tmp = (num_conv_per_stage_encoder[0] * 2 + 1) * np.prod(current_shape) * base_num_features \
+ num_modalities * np.prod(current_shape)
num_feat = base_num_features
for p in range(1, npool + 1):
current_shape = current_shape / np.array(pool_op_kernel_sizes[p])
num_feat = min(num_feat * feat_map_mul_on_downscale, max_num_features)
num_convs = num_conv_per_stage_encoder[p] * 2 + 1 # + 1 for conv in skip in first block
print(p, num_feat, num_convs, current_shape)
tmp += num_convs * np.prod(current_shape) * num_feat
return tmp * batch_size
class ResidualUNetDecoder(nn.Module):
def __init__(self, previous, num_classes, num_blocks_per_stage=None, network_props=None, deep_supervision=False,
upscale_logits=False, block=BasicResidualBlock):
super(ResidualUNetDecoder, self).__init__()
self.num_classes = num_classes
self.deep_supervision = deep_supervision
"""
We assume the bottleneck is part of the encoder, so we can start with upsample -> concat here
"""
previous_stages = previous.stages
previous_stage_output_features = previous.stage_output_features
previous_stage_pool_kernel_size = previous.stage_pool_kernel_size
previous_stage_conv_op_kernel_size = previous.stage_conv_op_kernel_size
if network_props is None:
self.props = previous.props
else:
self.props = network_props
if self.props['conv_op'] == nn.Conv2d:
transpconv = nn.ConvTranspose2d
upsample_mode = "bilinear"
elif self.props['conv_op'] == nn.Conv3d:
transpconv = nn.ConvTranspose3d
upsample_mode = "trilinear"
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(self.props['conv_op']))
if num_blocks_per_stage is None:
num_blocks_per_stage = previous.num_blocks_per_stage[:-1][::-1]
assert len(num_blocks_per_stage) == len(previous.num_blocks_per_stage) - 1
self.stage_pool_kernel_size = previous_stage_pool_kernel_size
self.stage_output_features = previous_stage_output_features
self.stage_conv_op_kernel_size = previous_stage_conv_op_kernel_size
num_stages = len(previous_stages) - 1 # we have one less as the first stage here is what comes after the
# bottleneck
self.tus = []
self.stages = []
self.deep_supervision_outputs = []
# only used for upsample_logits
cum_upsample = np.cumprod(np.vstack(self.stage_pool_kernel_size), axis=0).astype(int)
for i, s in enumerate(np.arange(num_stages)[::-1]):
features_below = previous_stage_output_features[s + 1]
features_skip = previous_stage_output_features[s]
self.tus.append(transpconv(features_below, features_skip, previous_stage_pool_kernel_size[s + 1],
previous_stage_pool_kernel_size[s + 1], bias=False))
# after we tu we concat features so now we have 2xfeatures_skip
self.stages.append(ResidualLayer(2 * features_skip, features_skip, previous_stage_conv_op_kernel_size[s],
self.props, num_blocks_per_stage[i], None, block))
if deep_supervision and s != 0:
seg_layer = self.props['conv_op'](features_skip, num_classes, 1, 1, 0, 1, 1, False)
if upscale_logits:
upsample = Upsample(scale_factor=cum_upsample[s], mode=upsample_mode)
self.deep_supervision_outputs.append(nn.Sequential(seg_layer, upsample))
else:
self.deep_supervision_outputs.append(seg_layer)
self.segmentation_output = self.props['conv_op'](features_skip, num_classes, 1, 1, 0, 1, 1, False)
self.tus = nn.ModuleList(self.tus)
self.stages = nn.ModuleList(self.stages)
self.deep_supervision_outputs = nn.ModuleList(self.deep_supervision_outputs)
def forward(self, skips):
# skips come from the encoder. They are sorted so that the bottleneck is last in the list
# what is maybe not perfect is that the TUs and stages here are sorted the other way around
# so let's just reverse the order of skips
skips = skips[::-1]
seg_outputs = []
x = skips[0] # this is the bottleneck
for i in range(len(self.tus)):
x = self.tus[i](x)
x = torch.cat((x, skips[i + 1]), dim=1)
x = self.stages[i](x)
if self.deep_supervision and (i != len(self.tus) - 1):
seg_outputs.append(self.deep_supervision_outputs[i](x))
segmentation = self.segmentation_output(x)
if self.deep_supervision:
seg_outputs.append(segmentation)
return seg_outputs[
::-1] # seg_outputs are ordered so that the seg from the highest layer is first, the seg from
# the bottleneck of the UNet last
else:
return segmentation
@staticmethod
def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_classes, pool_op_kernel_sizes, num_blocks_per_stage_decoder,
feat_map_mul_on_downscale, batch_size):
"""
This only applies for num_conv_per_stage and convolutional_upsampling=True
not real vram consumption. just a constant term to which the vram consumption will be approx proportional
(+ offset for parameter storage)
:param patch_size:
:param num_pool_per_axis:
:param base_num_features:
:param max_num_features:
:return:
"""
npool = len(pool_op_kernel_sizes) - 1
current_shape = np.array(patch_size)
tmp = (num_blocks_per_stage_decoder[-1] * 2 + 1) * np.prod(
current_shape) * base_num_features + num_classes * np.prod(current_shape)
num_feat = base_num_features
for p in range(1, npool):
current_shape = current_shape / np.array(pool_op_kernel_sizes[p])
num_feat = min(num_feat * feat_map_mul_on_downscale, max_num_features)
num_convs = num_blocks_per_stage_decoder[-(p + 1)] * 2 + 1 + 1 # +1 for transpconv and +1 for conv in skip
print(p, num_feat, num_convs, current_shape)
tmp += num_convs * np.prod(current_shape) * num_feat
return tmp * batch_size
class ResidualUNet(SegmentationNetwork):
use_this_for_batch_size_computation_2D = 858931200.0 # 1167982592.0
use_this_for_batch_size_computation_3D = 727842816.0 # 1152286720.0
default_base_num_features = 24
default_conv_per_stage = (2, 2, 2, 2, 2, 2, 2, 2)
def __init__(self, input_channels, base_num_features, num_blocks_per_stage_encoder, feat_map_mul_on_downscale,
pool_op_kernel_sizes, conv_kernel_sizes, props, num_classes, num_blocks_per_stage_decoder,
deep_supervision=False, upscale_logits=False, max_features=512, initializer=None,
block=BasicResidualBlock):
super(ResidualUNet, self).__init__()
self.conv_op = props['conv_op']
self.num_classes = num_classes
self.encoder = ResidualUNetEncoder(input_channels, base_num_features, num_blocks_per_stage_encoder,
feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes,
props, default_return_skips=True, max_num_features=max_features, block=block)
self.decoder = ResidualUNetDecoder(self.encoder, num_classes, num_blocks_per_stage_decoder, props,
deep_supervision, upscale_logits, block=block)
if initializer is not None:
self.apply(initializer)
def forward(self, x):
skips = self.encoder(x)
return self.decoder(skips)
@staticmethod
def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, num_classes, pool_op_kernel_sizes, num_conv_per_stage_encoder,
num_conv_per_stage_decoder, feat_map_mul_on_downscale, batch_size):
enc = ResidualUNetEncoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, pool_op_kernel_sizes,
num_conv_per_stage_encoder,
feat_map_mul_on_downscale, batch_size)
dec = ResidualUNetDecoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_classes, pool_op_kernel_sizes,
num_conv_per_stage_decoder,
feat_map_mul_on_downscale, batch_size)
return enc + dec
class FabiansUNet(SegmentationNetwork):
"""
Residual Encoder, Plain conv decoder
"""
use_this_for_2D_configuration = 1244233721.0 # 1167982592.0
use_this_for_3D_configuration = 1230348801.0
default_blocks_per_stage_encoder = (1, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4)
default_blocks_per_stage_decoder = (1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
default_min_batch_size = 2 # this is what works with the numbers above
def __init__(self, input_channels, base_num_features, num_blocks_per_stage_encoder, feat_map_mul_on_downscale,
pool_op_kernel_sizes, conv_kernel_sizes, props, num_classes, num_blocks_per_stage_decoder,
deep_supervision=False, upscale_logits=False, max_features=512, initializer=None,
block=BasicResidualBlock,
props_decoder=None):
super().__init__()
self.conv_op = props['conv_op']
self.num_classes = num_classes
self.encoder = ResidualUNetEncoder(input_channels, base_num_features, num_blocks_per_stage_encoder,
feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes,
props, default_return_skips=True, max_num_features=max_features, block=block)
props['dropout_op_kwargs']['p'] = 0
if props_decoder is None:
props_decoder = props
self.decoder = PlainConvUNetDecoder(self.encoder, num_classes, num_blocks_per_stage_decoder, props_decoder,
deep_supervision, upscale_logits)
if initializer is not None:
self.apply(initializer)
def forward(self, x):
skips = self.encoder(x)
return self.decoder(skips)
@staticmethod
def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, num_classes, pool_op_kernel_sizes, num_conv_per_stage_encoder,
num_conv_per_stage_decoder, feat_map_mul_on_downscale, batch_size):
enc = ResidualUNetEncoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_modalities, pool_op_kernel_sizes,
num_conv_per_stage_encoder,
feat_map_mul_on_downscale, batch_size)
dec = PlainConvUNetDecoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features,
num_classes, pool_op_kernel_sizes,
num_conv_per_stage_decoder,
feat_map_mul_on_downscale, batch_size)
return enc + dec
def find_3d_configuration():
# lets compute a reference for 3D
# we select hyperparameters here so that we get approximately the same patch size as we would get with the
# regular unet. This is just my choice. You can do whatever you want
# These default hyperparemeters will then be used by the experiment planner
# since this is more parameter intensive than the UNet, we will test a configuration that has a lot of parameters
# herefore we copy the UNet configuration for Task005_Prostate
cudnn.deterministic = False
cudnn.benchmark = True
patch_size = (20, 320, 256)
max_num_features = 320
num_modalities = 2
num_classes = 3
batch_size = 2
# now we fiddle with the network specific hyperparameters until everything just barely fits into a titanx
blocks_per_stage_encoder = FabiansUNet.default_blocks_per_stage_encoder
blocks_per_stage_decoder = FabiansUNet.default_blocks_per_stage_decoder
initial_num_features = 32
# we neeed to add a [1, 1, 1] for the res unet because in this implementation all stages of the encoder can have a stride
pool_op_kernel_sizes = [[1, 1, 1],
[1, 2, 2],
[1, 2, 2],
[2, 2, 2],
[2, 2, 2],
[1, 2, 2],
[1, 2, 2]]
conv_op_kernel_sizes = [[1, 3, 3],
[1, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3],
[3, 3, 3]]
unet = FabiansUNet(num_modalities, initial_num_features, blocks_per_stage_encoder[:len(conv_op_kernel_sizes)], 2,
pool_op_kernel_sizes, conv_op_kernel_sizes,
get_default_network_config(3, dropout_p=None), num_classes,
blocks_per_stage_decoder[:len(conv_op_kernel_sizes)-1], False, False,
max_features=max_num_features).cuda()
optimizer = SGD(unet.parameters(), lr=0.1, momentum=0.95)
loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {})
dummy_input = torch.rand((batch_size, num_modalities, *patch_size)).cuda()
dummy_gt = (torch.rand((batch_size, 1, *patch_size)) * num_classes).round().clamp_(0, 2).cuda().long()
for _ in range(20):
optimizer.zero_grad()
skips = unet.encoder(dummy_input)
print([i.shape for i in skips])
output = unet.decoder(skips)
l = loss(output, dummy_gt)
l.backward()
optimizer.step()
if _ == 0:
torch.cuda.empty_cache()
# that should do. Now take the network hyperparameters and insert them in FabiansUNet.compute_approx_vram_consumption
# whatever number this spits out, save it to FabiansUNet.use_this_for_batch_size_computation_3D
print(FabiansUNet.compute_approx_vram_consumption(patch_size, initial_num_features, max_num_features, num_modalities,
num_classes, pool_op_kernel_sizes,
blocks_per_stage_encoder[:len(conv_op_kernel_sizes)],
blocks_per_stage_decoder[:len(conv_op_kernel_sizes)-1], 2, batch_size))
# the output is 1230348800.0 for me
# I increment that number by 1 to allow this configuration be be chosen
def find_2d_configuration():
# lets compute a reference for 3D
# we select hyperparameters here so that we get approximately the same patch size as we would get with the
# regular unet. This is just my choice. You can do whatever you want
# These default hyperparemeters will then be used by the experiment planner
# since this is more parameter intensive than the UNet, we will test a configuration that has a lot of parameters
# herefore we copy the UNet configuration for Task003_Liver
cudnn.deterministic = False
cudnn.benchmark = True
patch_size = (512, 512)
max_num_features = 512
num_modalities = 1
num_classes = 3
batch_size = 12
# now we fiddle with the network specific hyperparameters until everything just barely fits into a titanx
blocks_per_stage_encoder = FabiansUNet.default_blocks_per_stage_encoder
blocks_per_stage_decoder = FabiansUNet.default_blocks_per_stage_decoder
initial_num_features = 30
# we neeed to add a [1, 1, 1] for the res unet because in this implementation all stages of the encoder can have a stride
pool_op_kernel_sizes = [[1, 1],
[2, 2],
[2, 2],
[2, 2],
[2, 2],
[2, 2],
[2, 2],
[2, 2]]
conv_op_kernel_sizes = [[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3]]
unet = FabiansUNet(num_modalities, initial_num_features, blocks_per_stage_encoder[:len(conv_op_kernel_sizes)], 2,
pool_op_kernel_sizes, conv_op_kernel_sizes,
get_default_network_config(2, dropout_p=None), num_classes,
blocks_per_stage_decoder[:len(conv_op_kernel_sizes)-1], False, False,
max_features=max_num_features).cuda()
optimizer = SGD(unet.parameters(), lr=0.1, momentum=0.95)
loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {})
dummy_input = torch.rand((batch_size, num_modalities, *patch_size)).cuda()
dummy_gt = (torch.rand((batch_size, 1, *patch_size)) * num_classes).round().clamp_(0, 2).cuda().long()
for _ in range(20):
optimizer.zero_grad()
skips = unet.encoder(dummy_input)
print([i.shape for i in skips])
output = unet.decoder(skips)
l = loss(output, dummy_gt)
l.backward()
optimizer.step()
if _ == 0:
torch.cuda.empty_cache()
# that should do. Now take the network hyperparameters and insert them in FabiansUNet.compute_approx_vram_consumption
# whatever number this spits out, save it to FabiansUNet.use_this_for_batch_size_computation_2D
print(FabiansUNet.compute_approx_vram_consumption(patch_size, initial_num_features, max_num_features, num_modalities,
num_classes, pool_op_kernel_sizes,
blocks_per_stage_encoder[:len(conv_op_kernel_sizes)],
blocks_per_stage_decoder[:len(conv_op_kernel_sizes)-1], 2, batch_size))
# the output is 1244233728.0 for me
# I increment that number by 1 to allow this configuration be be chosen
# This will not fit with 32 filters, but so will the regular U-net. We still use 32 filters in training.
# This does not matter because we are using mixed precision training now, so a rough memory approximation is OK
if __name__ == "__main__":
pass
|
the-stack_0_21218 | from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class DeviceRouteRemote(RemoteModel):
"""
The routing table entries for each device.
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``DeviceID:`` The internal NetMRI identifier for the device from which this routing table entry was collected.
| ``attribute type:`` number
| ``DeviceRouteID:`` The internal NetMRI identifier for this routing table entry on this device.
| ``attribute type:`` number
| ``InterfaceID:`` The internal NetMRI identifier of the outgoing interface for this route.
| ``attribute type:`` number
| ``RouteAdminDistance:`` The administrative distance of the protocol through which this route was learned, as specified by default Cisco conventions.
| ``attribute type:`` number
| ``RouteChangedCols:`` The fields that changed between this revision of the record and the previous revision.
| ``attribute type:`` string
| ``RouteCIDR:`` The route destination network in CIDR format.
| ``attribute type:`` string
| ``RouteEndTime:`` The ending effective time of this revision of this record, or empty if still in effect.
| ``attribute type:`` datetime
| ``RouteIfIndex:`` The SNMP interface index of the outgoing interface for this route.
| ``attribute type:`` number
| ``RouteMetric1:`` The first route metric value.
| ``attribute type:`` number
| ``RouteMetric2:`` The second route metric value.
| ``attribute type:`` number
| ``RouteNetMaskDotted:`` The network mask of the route destination network in dotted (or colon-delimited for IPv6) format.
| ``attribute type:`` string
| ``RouteNetMaskNumeric:`` The numerical value of the network mask.
| ``attribute type:`` number
| ``RouteNextHopIPDotted:`` The next hop IP address for this route, in dotted (or colon-delimited for IPv6) format.
| ``attribute type:`` string
| ``RouteNextHopIPNumeric:`` The numerical value of the next hop IP address.
| ``attribute type:`` number
| ``RouteProto:`` The routing protocol through which this route was learned.
| ``attribute type:`` string
| ``RouteStartTime:`` The starting effective time of this revision of the record.
| ``attribute type:`` datetime
| ``RouteSubnetIPDotted:`` The route destination network address in dotted (or colon-delimited for IPv6) format.
| ``attribute type:`` string
| ``RouteSubnetIPNumeric:`` The numerical value of the route destination network address.
| ``attribute type:`` number
| ``RouteTimestamp:`` The date and time this record was collected or calculated.
| ``attribute type:`` datetime
| ``RouteType:`` The type of the route.
| ``attribute type:`` string
| ``VirtualNetworkMemberID:`` The internal NetMRI identifier for the VRF-based VPN related to this record.
| ``attribute type:`` number
"""
properties = ("DataSourceID",
"DeviceID",
"DeviceRouteID",
"InterfaceID",
"RouteAdminDistance",
"RouteChangedCols",
"RouteCIDR",
"RouteEndTime",
"RouteIfIndex",
"RouteMetric1",
"RouteMetric2",
"RouteNetMaskDotted",
"RouteNetMaskNumeric",
"RouteNextHopIPDotted",
"RouteNextHopIPNumeric",
"RouteProto",
"RouteStartTime",
"RouteSubnetIPDotted",
"RouteSubnetIPNumeric",
"RouteTimestamp",
"RouteType",
"VirtualNetworkMemberID",
)
@property
@check_api_availability
def data_source(self):
"""
The NetMRI device that collected this record.
``attribute type:`` model
"""
return self.broker.data_source(**{"DeviceRouteID": self.DeviceRouteID})
@property
@check_api_availability
def device(self):
"""
The device from which this routing table entry was collected.
``attribute type:`` model
"""
return self.broker.device(**{"DeviceRouteID": self.DeviceRouteID})
@property
@check_api_availability
def interface(self):
"""
The outgoing interface for this route.
``attribute type:`` model
"""
return self.broker.interface(**{"DeviceRouteID": self.DeviceRouteID})
@property
@check_api_availability
def infradevice(self):
"""
The device from which this routing table entry was collected.
``attribute type:`` model
"""
return self.broker.infradevice(**{"DeviceRouteID": self.DeviceRouteID})
|
the-stack_0_21220 | #%%
import numpy as np
import pytest
from natural_bm.preprocessing import make_PCA_matrix, make_ZCA_matrix
import natural_bm.backend.theano_backend as BTH
import natural_bm.backend.numpy_backend as BNP
#%% Test prep for tests
def _diag_non_diag(x):
diag = np.copy(np.diag(x))
index = np.where(~np.eye(x.shape[0], dtype=bool))
non_diag = x[index]
return diag, non_diag
def setup_data():
n = 10
data = np.random.normal(size=(n, n))
cov = np.cov(data.T)
return data, cov
def setup_datatype(B, data, cov):
data = B.variable(data)
cov = B.variable(cov)
return data, cov
def setup_white(whitetype, cov, eps):
if whitetype == 'PCA':
white = make_PCA_matrix(cov, eps)
elif whitetype == 'ZCA':
white = make_ZCA_matrix(cov, eps)
else:
raise NotImplementedError
return white
def verify(whitetype, eps, cov, new_cov):
# break into diag and non-diagonal
diag, non_diag = _diag_non_diag(new_cov)
if whitetype == 'PCA':
atol = 2e-2
# Non-diagonal elements should all be zero
assert np.allclose(non_diag, 0.0, atol=atol)
if eps == 1e-2:
# first element is one
assert np.isclose(diag[0], 1.0, atol=atol)
# other elements, besides last, should be greater than zero
assert np.all(diag[1:-1] > 0.0)
elif eps == 1e-5:
# last element is zero, but everyone else should be one
assert np.allclose(diag[:-1], 1.0, atol=atol)
else:
raise NotImplementedError
elif whitetype == 'ZCA':
# break old cov into diag and non-diagonal
diag_old, non_diag_old = _diag_non_diag(cov)
# checks on diagonal
assert np.max(diag) <= 1.0
assert np.min(diag) >= 0.0
# checks on non-diagonal, just a statistical argument
assert np.std(non_diag) < 0.75*np.std(non_diag_old)
else:
raise NotImplementedError
#%%
@pytest.mark.parametrize('whitetype', ['PCA', 'ZCA'], ids=['PCA', 'ZCA'])
@pytest.mark.parametrize('B', [BTH, BNP], ids=["BTH", "BNP"])
@pytest.mark.parametrize('eps', [1e-2, 1e-5], ids=['1e-2', '1e-5'])
def test_white(whitetype, B, eps):
data, cov = setup_data()
data, cov = setup_datatype(B, data, cov)
white = setup_white(whitetype, cov, eps)
new_data = data.dot(white)
if B == BTH:
cov = B.get_value(cov)
new_data = B.eval(new_data)
new_cov = np.cov(new_data.T)
verify(whitetype, eps, cov, new_cov)
#%% Main
if __name__ == '__main__':
pytest.main([__file__])
|
the-stack_0_21221 | feature_names = struct(
# Virtualize means that swift,clang read from llvm's in-memory file system
virtualize_frameworks = "apple.virtualize_frameworks",
# Some of the rules need to work sligntly differently under pure Xcode mode
compile_with_xcode = "xcode.compile_with_xcode",
# Use the ARM deps for the simulator - see rules/import_middleman.bzl
arm64_simulator_use_device_deps = "apple.arm64_simulator_use_device_deps",
bazel4_override_simulator_cpu_arm64 = "bazel4.override_simulator_cpu_arm64",
)
|
the-stack_0_21223 | """
Utilities used to convert a field to human classic reprentation of data.
"""
from lib.hachoir_core.tools import (
humanDuration, humanFilesize, alignValue,
durationWin64 as doDurationWin64,
deprecated)
from types import FunctionType, MethodType
from lib.hachoir_core.field import Field
def textHandler(field, handler):
assert isinstance(handler, (FunctionType, MethodType))
assert issubclass(field.__class__, Field)
field.createDisplay = lambda: handler(field)
return field
def displayHandler(field, handler):
assert isinstance(handler, (FunctionType, MethodType))
assert issubclass(field.__class__, Field)
field.createDisplay = lambda: handler(field.value)
return field
@deprecated("Use TimedeltaWin64 field type")
def durationWin64(field):
"""
Convert Windows 64-bit duration to string. The timestamp format is
a 64-bit number: number of 100ns. See also timestampWin64().
>>> durationWin64(type("", (), dict(value=2146280000, size=64)))
u'3 min 34 sec 628 ms'
>>> durationWin64(type("", (), dict(value=(1 << 64)-1, size=64)))
u'58494 years 88 days 5 hours'
"""
assert hasattr(field, "value") and hasattr(field, "size")
assert field.size == 64
delta = doDurationWin64(field.value)
return humanDuration(delta)
def filesizeHandler(field):
"""
Format field value using humanFilesize()
"""
return displayHandler(field, humanFilesize)
def hexadecimal(field):
"""
Convert an integer to hexadecimal in lower case. Returns unicode string.
>>> hexadecimal(type("", (), dict(value=412, size=16)))
u'0x019c'
>>> hexadecimal(type("", (), dict(value=0, size=32)))
u'0x00000000'
"""
assert hasattr(field, "value") and hasattr(field, "size")
size = field.size
padding = alignValue(size, 4) // 4
pattern = u"0x%%0%ux" % padding
return pattern % field.value
|
the-stack_0_21226 | # coding: utf-8
"""
RADON CTT Server API
This is API of the RADON Continuous Testing Tool (CTT) Server: <a href=\"https://github.com/radon-h2020/radon-ctt\">https://github.com/radon-h2020/radon-ctt<a/> # noqa: E501
OpenAPI spec version: 1.0.0-oas3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Execution(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'deployment_uuid': 'str',
'uuid': 'str'
}
attribute_map = {
'deployment_uuid': 'deployment_uuid',
'uuid': 'uuid'
}
def __init__(self, deployment_uuid=None, uuid=None): # noqa: E501
"""Execution - a model defined in Swagger""" # noqa: E501
self._deployment_uuid = None
self._uuid = None
self.discriminator = None
if deployment_uuid is not None:
self.deployment_uuid = deployment_uuid
if uuid is not None:
self.uuid = uuid
@property
def deployment_uuid(self):
"""Gets the deployment_uuid of this Execution. # noqa: E501
:return: The deployment_uuid of this Execution. # noqa: E501
:rtype: str
"""
return self._deployment_uuid
@deployment_uuid.setter
def deployment_uuid(self, deployment_uuid):
"""Sets the deployment_uuid of this Execution.
:param deployment_uuid: The deployment_uuid of this Execution. # noqa: E501
:type: str
"""
self._deployment_uuid = deployment_uuid
@property
def uuid(self):
"""Gets the uuid of this Execution. # noqa: E501
:return: The uuid of this Execution. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this Execution.
:param uuid: The uuid of this Execution. # noqa: E501
:type: str
"""
self._uuid = uuid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Execution, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Execution):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_21227 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for linear algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfe = tf.contrib.eager
class _PinvTest(object):
def expected_pinv(self, a, rcond):
"""Calls `np.linalg.pinv` but corrects its broken batch semantics."""
if a.ndim < 3:
return np.linalg.pinv(a, rcond)
if rcond is None:
rcond = 10. * max(a.shape[-2], a.shape[-1]) * np.finfo(a.dtype).eps
s = np.concatenate([a.shape[:-2], [a.shape[-1], a.shape[-2]]])
a_pinv = np.zeros(s, dtype=a.dtype)
for i in np.ndindex(a.shape[:(a.ndim - 2)]):
a_pinv[i] = np.linalg.pinv(
a[i],
rcond=rcond if isinstance(rcond, float) else rcond[i])
return a_pinv
def test_symmetric(self):
a_ = self.dtype([[1., .4, .5],
[.4, .2, .25],
[.5, .25, .35]])
a_ = np.stack([a_ + 1., a_], axis=0) # Batch of matrices.
a = tf.placeholder_with_default(
input=a_,
shape=a_.shape if self.use_static_shape else None)
if self.use_default_rcond:
rcond = None
else:
rcond = self.dtype([0., 0.01]) # Smallest 1 component is forced to zero.
expected_a_pinv_ = self.expected_pinv(a_, rcond)
a_pinv = tfp.math.pinv(a, rcond, validate_args=True)
a_pinv_ = self.evaluate(a_pinv)
self.assertAllClose(expected_a_pinv_, a_pinv_,
atol=1e-5, rtol=1e-5)
if not self.use_static_shape:
return
self.assertAllEqual(expected_a_pinv_.shape, a_pinv.shape)
def test_nonsquare(self):
a_ = self.dtype([[1., .4, .5, 1.],
[.4, .2, .25, 2.],
[.5, .25, .35, 3.]])
a_ = np.stack([a_ + 0.5, a_], axis=0) # Batch of matrices.
a = tf.placeholder_with_default(
input=a_,
shape=a_.shape if self.use_static_shape else None)
if self.use_default_rcond:
rcond = None
else:
# Smallest 2 components are forced to zero.
rcond = self.dtype([0., 0.25])
expected_a_pinv_ = self.expected_pinv(a_, rcond)
a_pinv = tfp.math.pinv(a, rcond, validate_args=True)
a_pinv_ = self.evaluate(a_pinv)
self.assertAllClose(expected_a_pinv_, a_pinv_,
atol=1e-5, rtol=1e-4)
if not self.use_static_shape:
return
self.assertAllEqual(expected_a_pinv_.shape, a_pinv.shape)
@tfe.run_all_tests_in_graph_and_eager_modes
class PinvTestDynamic32DefaultRcond(tf.test.TestCase, _PinvTest):
dtype = np.float32
use_static_shape = False
use_default_rcond = True
@tfe.run_all_tests_in_graph_and_eager_modes
class PinvTestStatic64DefaultRcond(tf.test.TestCase, _PinvTest):
dtype = np.float64
use_static_shape = True
use_default_rcond = True
@tfe.run_all_tests_in_graph_and_eager_modes
class PinvTestDynamic32CustomtRcond(tf.test.TestCase, _PinvTest):
dtype = np.float32
use_static_shape = False
use_default_rcond = False
@tfe.run_all_tests_in_graph_and_eager_modes
class PinvTestStatic64CustomRcond(tf.test.TestCase, _PinvTest):
dtype = np.float64
use_static_shape = True
use_default_rcond = False
def make_tensor_hiding_attributes(value, hide_shape, hide_value=True):
if not hide_value:
return tf.convert_to_tensor(value)
shape = None if hide_shape else getattr(value, 'shape', None)
return tf.placeholder_with_default(input=value, shape=shape)
class _LUInverse(object):
dtype = np.float32
use_static_shape = True
def test_non_batch(self):
x_ = np.array(
[[3, 4], [1, 2]],
dtype=self.dtype)
x = tf.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
y = tfp.math.lu_reconstruct(*tf.linalg.lu(x), validate_args=True)
y_ = self.evaluate(y)
if self.use_static_shape:
self.assertAllEqual(x_.shape, y.shape)
self.assertAllClose(x_, y_, atol=0., rtol=1e-3)
def test_batch(self):
x_ = np.array(
[
[[3, 4], [1, 2]],
[[7, 8], [3, 4]],
],
dtype=self.dtype)
x = tf.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
y = tfp.math.lu_reconstruct(*tf.linalg.lu(x), validate_args=True)
y_ = self.evaluate(y)
if self.use_static_shape:
self.assertAllEqual(x_.shape, y.shape)
self.assertAllClose(x_, y_, atol=0., rtol=1e-3)
@tfe.run_all_tests_in_graph_and_eager_modes
class LUInverseStatic(tf.test.TestCase, _LUInverse):
use_static_shape = True
@tfe.run_all_tests_in_graph_and_eager_modes
class LUInverseDynamic(tf.test.TestCase, _LUInverse):
use_static_shape = False
class _MatrixInverseLU(object):
dtype = np.float32
use_static_shape = True
def test_non_batch(self):
x_ = np.array(
[[3, 4], [1, 2]],
dtype=self.dtype)
x = tf.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
y = tfp.math.lu_matrix_inverse(*tf.linalg.lu(x), validate_args=True)
y_ = self.evaluate(y)
if self.use_static_shape:
self.assertAllEqual(x_.shape, y.shape)
self.assertAllClose(np.linalg.inv(x_), y_, atol=0., rtol=1e-3)
def test_batch(self):
x_ = np.array(
[
[[3, 4], [1, 2]],
[[7, 8], [3, 4]],
],
dtype=self.dtype)
x = tf.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
y = tfp.math.lu_matrix_inverse(*tf.linalg.lu(x), validate_args=True)
y_ = self.evaluate(y)
if self.use_static_shape:
self.assertAllEqual(x_.shape, y.shape)
self.assertAllClose(np.linalg.inv(x_), y_, atol=0., rtol=1e-3)
@tfe.run_all_tests_in_graph_and_eager_modes
class MatrixInverseLUStatic(tf.test.TestCase, _MatrixInverseLU):
use_static_shape = True
@tfe.run_all_tests_in_graph_and_eager_modes
class MatrixInverseLUDynamic(tf.test.TestCase, _MatrixInverseLU):
use_static_shape = False
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_21228 | import pytest
from .sample_plugins import PluginInactive, PluginSample
@pytest.mark.parametrize(
"plugin_id, plugin_path, status_code",
[
(PluginSample.PLUGIN_ID, "/webhook/paid", 200),
(PluginInactive.PLUGIN_ID, "/webhook/paid", 404),
("wrong.id", "/webhook/", 404),
],
)
def test_plugin_webhook_view(
plugin_id, plugin_path, status_code, client, settings, monkeypatch
):
settings.PLUGINS = [
"saleor.plugins.tests.sample_plugins.PluginSample",
"saleor.plugins.tests.sample_plugins.PluginInactive",
]
response = client.post(f"/plugins/{plugin_id}{plugin_path}")
assert response.status_code == status_code
|
the-stack_0_21229 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Abstract base module for all botorch acquisition functions.
"""
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod
from typing import Optional, Callable
from botorch.exceptions import BotorchWarning, UnsupportedError
from botorch.models.model import Model
from botorch.posteriors.posterior import Posterior
from torch import Tensor
from torch.nn import Module
class AcquisitionFunction(Module, ABC):
r"""Abstract base class for acquisition functions."""
def __init__(self, model: Model) -> None:
r"""Constructor for the AcquisitionFunction base class.
Args:
model: A fitted model.
"""
super().__init__()
self.add_module("model", model)
@classmethod
def _deprecate_acqf_objective(
cls,
posterior_transform: Optional[Callable[[Posterior], Posterior]],
objective: Optional[Module],
) -> Optional[Callable[[Posterior], Posterior]]:
from botorch.acquisition.objective import (
ScalarizedObjective,
ScalarizedPosteriorTransform,
)
if objective is None:
return posterior_transform
warnings.warn(
f"{cls.__name__} got a non-MC `objective`. The non-MC "
"AcquisitionObjectives and the `objective` argument to"
"AnalyticAcquisitionFunctions are DEPRECATED and will be removed in the"
"next version. Use `posterior_transform` instead.",
DeprecationWarning,
)
if not isinstance(objective, ScalarizedObjective):
raise UnsupportedError(
f"{cls.__name__} only supports ScalarizedObjective "
"(DEPRECATED) type objectives."
)
return ScalarizedPosteriorTransform(
weights=objective.weights, offset=objective.offset
)
def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None:
r"""Informs the acquisition function about pending design points.
Args:
X_pending: `n x d` Tensor with `n` `d`-dim design points that have
been submitted for evaluation but have not yet been evaluated.
"""
if X_pending is not None:
if X_pending.requires_grad:
warnings.warn(
"Pending points require a gradient but the acquisition function"
" will not provide a gradient to these points.",
BotorchWarning,
)
self.X_pending = X_pending.detach().clone()
else:
self.X_pending = X_pending
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate the acquisition function on the candidate set X.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of acquisition function values at the given
design points `X`.
"""
pass # pragma: no cover
class OneShotAcquisitionFunction(AcquisitionFunction, ABC):
r"""Abstract base class for acquisition functions using one-shot optimization"""
@abstractmethod
def get_augmented_q_batch_size(self, q: int) -> int:
r"""Get augmented q batch size for one-shot optimziation.
Args:
q: The number of candidates to consider jointly.
Returns:
The augmented size for one-shot optimization (including variables
parameterizing the fantasy solutions).
"""
pass # pragma: no cover
@abstractmethod
def extract_candidates(self, X_full: Tensor) -> Tensor:
r"""Extract the candidates from a full "one-shot" parameterization.
Args:
X_full: A `b x q_aug x d`-dim Tensor with `b` t-batches of `q_aug`
design points each.
Returns:
A `b x q x d`-dim Tensor with `b` t-batches of `q` design points each.
"""
pass # pragma: no cover
|
the-stack_0_21231 | import numpy as np
from autotune import TuningProblem
from autotune.space import *
import os, sys, time, json, math
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
from skopt.space import Real, Integer, Categorical
HERE = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(1, os.path.dirname(HERE)+ '/plopper')
from plopper import Plopper
# create an object of ConfigSpace
cs = CS.ConfigurationSpace(seed=1234)
# number of threads
# p0= CSH.OrdinalHyperparameter(name='p0', sequence=['4','5','6','7','8'], default_value='8')
p0= CSH.UniformIntegerHyperparameter(name='p0', lower=4, upper=8, default_value=8)
#block size for openmp dynamic schedule
p1= CSH.OrdinalHyperparameter(name='p1', sequence=['10','20','40','64','80','100','128','160','200'], default_value='100')
#omp parallel
p2= CSH.CategoricalHyperparameter(name='p2', choices=["#pragma omp parallel for", " "], default_value=' ')
cs.add_hyperparameters([p0, p1, p2])
# problem space
task_space = None
input_space = cs
output_space = Space([
Real(0.0, inf, name="time")
])
dir_path = os.path.dirname(os.path.realpath(__file__))
kernel_idx = dir_path.rfind('/')
kernel = dir_path[kernel_idx+1:]
obj = Plopper(dir_path+'/mmp.c',dir_path)
x1=['p0','p1','p2']
def myobj(point: dict):
def plopper_func(x):
x = np.asarray_chkfinite(x) # ValueError if any NaN or Inf
value = [point[x1[0]],point[x1[1]],point[x1[2]]]
print('CONFIG:',point)
params = ["P0","P1","P2"]
result = obj.findRuntime(value, params)
return result
x = np.array([point[f'p{i}'] for i in range(len(point))])
results = plopper_func(x)
print('OUTPUT:%f',results)
return results
Problem = TuningProblem(
task_space=None,
input_space=input_space,
output_space=output_space,
objective=myobj,
constraints=None,
model=None
)
|
the-stack_0_21233 | import cv2
import numpy as np
import time
CONFIDENCE = 0.5
SCORE_THRESHOLD = 0.5
IOU_THRESHOLD = 0.5
config_path = "cfg/yolov3.cfg"
weights_path = "weights/yolov3.weights"
font_scale = 1
thickness = 1
LABELS = open("data/coco.names").read().strip().split("\n")
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")
net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
cap = cv2.VideoCapture(0)
while True:
_, image = cap.read()
h, w = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
start = time.perf_counter()
layer_outputs = net.forward(ln)
time_took = time.perf_counter() - start
print("Time took:", time_took)
boxes, confidences, class_ids = [], [], []
# loop over each of the layer outputs
for output in layer_outputs:
# loop over each of the object detections
for detection in output:
# extract the class id (label) and confidence (as a probability) of
# the current object detection
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
# discard weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > CONFIDENCE:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[:4] * np.array([w, h, w, h])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
class_ids.append(class_id)
# perform the non maximum suppression given the scores defined before
idxs = cv2.dnn.NMSBoxes(boxes, confidences, SCORE_THRESHOLD, IOU_THRESHOLD)
font_scale = 1
thickness = 1
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
x, y = boxes[i][0], boxes[i][1]
w, h = boxes[i][2], boxes[i][3]
# draw a bounding box rectangle and label on the image
color = [int(c) for c in colors[class_ids[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color=color, thickness=thickness)
text = f"{labels[class_ids[i]]}: {confidences[i]:.2f}"
# calculate text width & height to draw the transparent boxes as background of the text
(text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=thickness)[0]
text_offset_x = x
text_offset_y = y - 5
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height))
overlay = image.copy()
cv2.rectangle(overlay, box_coords[0], box_coords[1], color=color, thickness=cv2.FILLED)
# add opacity (transparency to the box)
image = cv2.addWeighted(overlay, 0.6, image, 0.4, 0)
# now put the text (label: confidence %)
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale, color=(0, 0, 0), thickness=thickness)
cv2.imshow("image", image)
if ord("q") == cv2.waitKey(1):
break
cap.release()
cv2.destroyAllWindows() |
the-stack_0_21234 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 10 22:58:59 2021
@author: wapisani
"""
import os
import numpy as np
directory = r'F:\Documents\Programming\AoC\2021'
# directory = r'/Users/wapisani/Documents/Programming/AoC/2021'
os.chdir(directory)
with open('input_day11.txt','r') as handle:
data = [line.strip() for line in handle.readlines()]
# with open('sample_day11.txt','r') as handle:
# data = [line.strip() for line in handle.readlines()]
def getAdjacentCells(cell,nrows,ncols):
"""
Credit to https://github.com/statneutrino/AdventOfCode/blob/main/2021/python/day11.py
for the logic of this function.
Parameters
----------
position : tuple
(x,y)-coordinates of the cell to get adjacents of.
Returns
-------
Set of (x,y)-coordinates of adjacent cells to cell
"""
x,y = cell
adjacentCells = set()
if x != 0:
adjacentCells.add((x-1,y)) # cell above
if x != nrows - 1:
adjacentCells.add((x+1,y)) # cell below
if y != 0:
adjacentCells.add((x,y-1)) # cell left
if y != ncols - 1:
adjacentCells.add((x,y+1)) # cell right
if x != 0 and y != 0:
adjacentCells.add((x-1,y-1)) # cell upper-left
if x != 0 and y != ncols - 1:
adjacentCells.add((x-1,y+1)) # cell upper-right
if x != nrows - 1 and y != 0:
adjacentCells.add((x+1,y-1)) # cell below-left
if x != nrows - 1 and y != ncols - 1:
adjacentCells.add((x+1,y+1)) # cell below-right
return adjacentCells
nrows = len(data)
ncols = len(data[0])
energy_grid = np.zeros((nrows,ncols))
for i,line in enumerate(data):
for j,char in enumerate(line):
energy_grid[i,j] = int(char)
flashes_list = [] # count of flashes per step
for step in range(100): # part 1 is 100 steps
n_flashes = 0
energy_grid += 1 # increase energy of all octopi 1
flash_locs = np.where(energy_grid == 10)
# Now get the flash locations into a usable format
x = [value for value in flash_locs[0]]
y = [value for value in flash_locs[1]]
xy = []
for i,value in enumerate(x):
xy.append((value,y[i]))
n_flashes += len(xy) # Get number of flashes for those that have flashed
for flashed in xy:
adjacentCells = getAdjacentCells(flashed, nrows, ncols)
for cell in adjacentCells:
i,j = cell
energy_grid[i,j] += 1
e = energy_grid[i,j]
if e == 10:
n_flashes += 1 # This should only count the number of adjacent cells that flash
xy.append((i,j))
for flashed in xy:
i,j = flashed
energy_grid[i,j] = 0 # reset
flashes_list.append(n_flashes)
print(f'In 100 steps, there were {sum(flashes_list)} flashes.')
energy_grid = np.zeros((nrows,ncols))
for i,line in enumerate(data):
for j,char in enumerate(line):
energy_grid[i,j] = int(char)
flashes_list = [] # count of flashes per step
simultaneous_flag = True
step = 0
while simultaneous_flag:
n_flashes = 0
energy_grid += 1 # increase energy of all octopi 1
flash_locs = np.where(energy_grid == 10)
# Now get the flash locations into a usable format
x = [value for value in flash_locs[0]]
y = [value for value in flash_locs[1]]
xy = []
for i,value in enumerate(x):
xy.append((value,y[i]))
n_flashes += len(xy) # Get number of flashes for those that have flashed
for flashed in xy:
adjacentCells = getAdjacentCells(flashed, nrows, ncols)
for cell in adjacentCells:
i,j = cell
energy_grid[i,j] += 1
e = energy_grid[i,j]
if e == 10:
n_flashes += 1 # This should only count the number of adjacent cells that flash
xy.append((i,j))
for flashed in xy:
i,j = flashed
energy_grid[i,j] = 0 # reset
# Check if all of the octopuses flashed simultaneously
# There are nrows * ncols number of octopuses so if the number of flashes
# is greater than nrows * ncols - 1 then they all flashed.
if n_flashes > nrows*ncols -1:
simultaneous_flag = False
flashes_list.append(n_flashes)
step += 1
print(f'In {step} steps, all of the octopuses flashed simultaneously.') |
the-stack_0_21236 | import numpy as np
import pandas as pd
import operator
from random import randrange
from sklearn import preprocessing
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from simple_kNN.distanceMetrics import distanceMetrics
from simple_kNN.kFoldCV import kFoldCV
from simple_kNN.kNNClassifier import kNNClassifier
from simple_kNN.datasets import load_iris
def readData(fileName):
'''
Description:
This method is to read the data from a given file
'''
data = []
labels = []
with open(fileName, "r") as file:
lines = file.readlines()
for line in lines:
splitline = line.strip().split(',')
data.append(splitline)
labels.append(splitline[-1])
return data, labels
def readDatawithoutkfcv(fileName):
'''
Description:
This method is to read the data from a given file
'''
data = []
labels = []
with open(fileName, "r") as file:
lines = file.readlines()
for line in lines:
splitline = line.strip().split(',')
data.append(splitline[:-1])
labels.append(splitline[-1])
return data, labels
# ### Hayes-Roth Data
print('***** Without KFold Cross Validation *****')
trainFile = 'Datasets/HayesRoth/hayes-roth.data'
trainData, trainLabel = readDatawithoutkfcv(trainFile)
trainFeatures = []
for row in trainData:
index = row[0:]
temp = [int(item) for item in index]
trainFeatures.append(temp)
trainLabels = [int(label) for label in trainLabel]
knn=kNNClassifier()
knn.fit(trainFeatures, trainLabels)
testFile = 'Datasets/HayesRoth/hayes-roth.test'
testData, testLabel = readData(testFile)
testFeatures = []
for row in testData:
index = row[0:]
temp = [int(item) for item in index]
testFeatures.append(temp)
testLabels = [int(label) for label in testLabel]
eucPredictions = knn.predict(testFeatures, 3, 'euclidean')
print('***** Confusion Matrix *****')
print(confusion_matrix(testLabels, eucPredictions))
# **Create an object for k-Fold cross validation class**
print('***** With KFold Cross Validation *****')
trainData, trainLabel = readData(trainFile)
trainFeatures = []
for row in trainData:
index = row[0:]
temp = [int(item) for item in index]
trainFeatures.append(temp)
trainLabels = [int(label) for label in trainLabel]
kfcv = kFoldCV()
# **Call the Evaluation function of kFCV class**
#
# *kfcv.kFCVEvaluate(data, foldCount, neighborCount, distanceMetric)*
print('*'*20)
print('Hayes Roth Data')
kfcv.kFCVEvaluate(trainFeatures, 10, 3, 'euclidean')
kfcv.kFCVEvaluate(trainFeatures, 10, 3, 'manhattan')
kfcv.kFCVEvaluate(trainFeatures, 10, 3, 'hamming')
# ### Car Evaluation Data
carFile = 'Datasets/CarEvaluation/car.data'
carData, carLabel = readData(carFile)
df = pd.DataFrame(carData)
df = df.apply(preprocessing.LabelEncoder().fit_transform)
carFeatures = df.values.tolist()
carLabels = [car[-1] for car in carFeatures]
print('*'*20)
print('Car Evaluation Data')
kfcv.kFCVEvaluate(carFeatures, 10, 3, 'euclidean')
kfcv.kFCVEvaluate(carFeatures, 10, 3, 'manhattan')
kfcv.kFCVEvaluate(carFeatures, 10, 3, 'hamming')
# ### Breast Cancer Data
print('*'*20)
print('Breast Cancer Data')
cancerFile = 'Datasets/BreastCancer/breast-cancer.data'
cancerData, cancerLabel = readData(cancerFile)
cdf = pd.DataFrame(cancerData)
cdf = cdf.apply(preprocessing.LabelEncoder().fit_transform)
cancerFeatures = cdf.values.tolist()
cancerLabels = [cancer[-1] for cancer in cancerFeatures]
kfcv.kFCVEvaluate(cancerFeatures, 10, 3, 'euclidean')
kfcv.kFCVEvaluate(cancerFeatures, 10, 3, 'manhattan')
kfcv.kFCVEvaluate(cancerFeatures, 10, 3, 'hamming')
|
the-stack_0_21238 | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeDeployRequest(JDCloudRequest):
"""
查询一个部署任务详情
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeDeployRequest, self).__init__(
'/regions/{regionId}/deploy/{deployId}', 'GET', header, version)
self.parameters = parameters
class DescribeDeployParameters(object):
def __init__(self, regionId, deployId, ):
"""
:param regionId: 地域ID
:param deployId: Deploy Id
"""
self.regionId = regionId
self.deployId = deployId
|
the-stack_0_21241 | # -*- coding: utf-8 -*-
# Copyright (c) 2021, libracore and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from PyPDF2 import PdfFileWriter
from frappe.utils.data import now
class ArbeitsBacklog(Document):
pass
def create_abl(typ, mitgliedschaft):
new_abl = frappe.get_doc({
"doctype": "Arbeits Backlog",
"typ": typ,
"mv_mitgliedschaft": mitgliedschaft.name,
"sektion_id": mitgliedschaft.sektion_id
})
new_abl.insert(ignore_permissions=True)
frappe.db.commit()
@frappe.whitelist()
def kuendigungs_massendruck():
ausstehende_drucke = frappe.db.get_list('Arbeits Backlog',
filters={
'status': 'Open',
'typ': 'Kündigung verarbeiten'
},
fields=['name', 'mv_mitgliedschaft']
)
if len(ausstehende_drucke) > 0:
exist_kuendigungen_folder()
output = PdfFileWriter()
for ausstehender_druck in ausstehende_drucke:
mitgliedschaft = frappe.get_doc("Mitgliedschaft", ausstehender_druck.mv_mitgliedschaft)
abl = frappe.get_doc("Arbeits Backlog", ausstehender_druck.name)
output = frappe.get_print("Mitgliedschaft", mitgliedschaft.name, 'Kündigungsbestätigung', as_pdf = True, output = output)
mitgliedschaft.kuendigung_verarbeiten = 0
mitgliedschaft.save()
abl.status = 'Completed'
abl.save(ignore_permissions=True)
pdf = frappe.utils.pdf.get_file_data_from_writer(output)
_file = frappe.get_doc({
"doctype": "File",
"file_name": "Kündigungen_Sammeldruck_{datetime}.pdf".format(datetime=now().replace(" ", "_")),
"folder": "Home/Kündigungen",
"is_private": 1,
"content": pdf
})
_file.save(ignore_permissions=True)
return 'done'
else:
return 'keine daten'
def exist_kuendigungen_folder():
exist = frappe.db.sql("""SELECT COUNT(`name`) AS `qty` FROM `tabFile` WHERE `name` = 'Home/Kündigungen' AND `is_folder` = 1""", as_dict=True)
if exist[0].qty != 1:
new_folder = frappe.get_doc({
"doctype": "File",
"file_name": "Kündigungen",
"folder": "Home",
"is_folder": 1
})
new_folder.insert(ignore_permissions=True)
frappe.db.commit()
return True
|
the-stack_0_21242 | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Pytorch modules
some classes are modified from HuggingFace
(https://github.com/huggingface/transformers)
"""
import copy
import json
import logging
from io import open
from ntpath import join
import ipdb
import torch
from torch import nn
from apex.normalization.fused_layer_norm import FusedLayerNorm
from torch.nn.modules import dropout
import math
from .transformer import OPTEncoder
from .timesformer import TimesFormerEncoder
import random
import numpy as np
from .videoswin import SwinTransformer3D
import time
from utils.logger import LOGGER
import torch.nn.functional as F
#logger = logging.getLogger(__name__)
# class OPTConfig(object):
# """Configuration class to store the configuration of a `OPTModel`.
# """
# def __init__(self,
# vocab_size_or_config_json_file,
# hidden_size=768,
# num_hidden_layers=12,
# num_attention_heads=12,
# intermediate_size=3072,
# hidden_act="gelu",
# hidden_dropout=0.1,
# attention_dropout=0.1,
# max_position_embeddings=512,
# type_vocab_size=2,
# initializer_range=0.02):
# """Constructs OPTConfig.
# Args:
# vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in
# `OPTModel`.
# hidden_size: Size of the encoder layers and the pooler layer.
# num_hidden_layers: Number of hidden layers in the Transformer
# encoder.
# num_attention_heads: Number of attention heads for each attention
# layer in the Transformer encoder.
# intermediate_size: The size of the "intermediate" (i.e.
# feed-forward) layer in the Transformer encoder.
# hidden_act: The non-linear activation function (function or string)
# in the encoder and pooler. If string, "gelu", "relu" and
# "swish" are supported.
# hidden_dropout: The dropout probabilitiy for all fully
# connected layers in the embeddings, encoder, and pooler.
# attention_dropout: The dropout ratio for the attention
# probabilities.
# max_position_embeddings: The maximum sequence length that this
# model might ever be used with. Typically set this to something
# large just in case (e.g., 512 or 1024 or 2048).
# type_vocab_size: The vocabulary size of the `token_type_ids` passed
# into `OPTModel`.
# initializer_range: The sttdev of the truncated_normal_initializer
# for initializing all weight matrices.
# """
# if isinstance(vocab_size_or_config_json_file, str):
# with open(vocab_size_or_config_json_file,
# "r", encoding='utf-8') as reader:
# json_config = json.loads(reader.read())
# for key, value in json_config.items():
# self.__dict__[key] = value
# elif isinstance(vocab_size_or_config_json_file, int):
# self.vocab_size = vocab_size_or_config_json_file
# self.hidden_size = hidden_size
# self.num_hidden_layers = num_hidden_layers
# self.num_attention_heads = num_attention_heads
# self.hidden_act = hidden_act
# self.intermediate_size = intermediate_size
# self.hidden_dropout = hidden_dropout
# self.attention_dropout = attention_dropout
# self.max_position_embeddings = max_position_embeddings
# self.type_vocab_size = type_vocab_size
# self.initializer_range = initializer_range
# else:
# raise ValueError("First argument must be either a vocabulary size "
# "(int) or the path to a pretrained model config "
# "file (str)")
class OPTConfig(object):
def __init__(self,
config):
if isinstance(config, dict):
for key, value in config.items():
self.__dict__[key] = value
else:
raise ValueError("First argument must be either a vocabulary size "
"(int) or the path to a pretrained model config "
"file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `OPTConfig` from a
Python dictionary of parameters."""
config = OPTConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `OPTConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class OPTPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, OPTConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of "
"class `OPTConfig`. To create a model from a Google "
"pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses
# truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0,
std=self.config.initializer_range)
elif isinstance(module, FusedLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, config, state_dict, *inputs, **kwargs):
"""
Instantiate a OPTPreTrainedModel from a pre-trained model file or a
pytorch state dict.
Params:
config_file: config json file
state_dict: an state dictionnary
*inputs, **kwargs: additional input for the specific OPT class
"""
# Load config
#config = OPTConfig.from_json_file(config_file)
# config = OPTConfig(vocab_size_or_config_json_file = config_file)
config = OPTConfig(config)
LOGGER.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if metadata is None
else metadata.get(prefix[:-1], {}))
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys,
unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.')
for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
LOGGER.info("Weights of {} not initialized from "
"pretrained model: {}".format(
model.__class__.__name__, str(missing_keys)))
if len(unexpected_keys) > 0:
LOGGER.info("Weights from pretrained model not used in "
"{}: {}".format(
model.__class__.__name__, str(unexpected_keys)))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for '
'{}:\n\t{}'.format(
model.__class__.__name__,
"\n\t".join(error_msgs)))
return model
class OPTTextEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.layernorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout)
self.mask_prob = getattr(config,'txt_mask_prob', 0.15)
self.mask_token = 103
self.range = [106, config.vocab_size]
def forward(self, txt_tokens, perform_mask = False):
txt_labels = None
if perform_mask:
txt_tokens = txt_tokens.clone() ### important, must have
txt_tokens, txt_labels = self.perform_mask(txt_tokens)
words_embeddings = self.word_embeddings(txt_tokens)
position_ids = torch.arange(words_embeddings.shape[1], dtype=torch.long, device= words_embeddings.device).unsqueeze(0)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
embeddings = self.layernorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings, position_embeddings, txt_labels
def perform_mask(self, txt_tokens):
labels = torch.zeros_like(txt_tokens).fill_(-1)
for i in range(txt_tokens.shape[0]):
for j in range(1, txt_tokens.shape[1]):
prob = random.random()
if txt_tokens[i][j]!=0 and prob < self.mask_prob:
src_token = txt_tokens[i][j].item()
prob /= self.mask_prob
if prob < 0.8:
txt_tokens[i][j] = self.mask_token
elif prob < 0.9:
txt_tokens[i][j] = random.choice(list(range(*self.range)))
labels[i][j] = src_token
#### at least mask one token
if all(labels[i] == -1):
src_token = txt_tokens[i][1].item()
prob = random.random()
if prob < 0.8:
txt_tokens[i][1] = self.mask_token
elif prob < 0.9:
txt_tokens[i][1] = random.choice(list(range(*self.range)))
labels[i][1] = src_token
return txt_tokens, labels
class TimesformerVideoEmbeddings(nn.Module):
def __init__(self, config, video_cfg):
super().__init__()
self.sample_num = video_cfg['sample_num']
self.patch_size = video_cfg['patch_size']
self.token_length_per_frame = (video_cfg['resolution'] // self.patch_size) **2
self.first_conv = nn.Conv2d(3, config.hidden_size, kernel_size = self.patch_size,
stride = self.patch_size, padding=0)
self.position_embeddings = nn.Embedding(self.token_length_per_frame + 1, config.hidden_size)
self.frame_embedding = nn.Embedding(10,config.hidden_size) ###assert max 10 frames
# self.layernorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout)
self.cls_token = nn.Parameter(0.02 * torch.randn(1, 1, config.hidden_size))
self.token_drop_ratio = video_cfg.get('token_drop_ratio', 0)
self.mask_prob = getattr(config,'video_mask_prob', 0.15)
self.mask_token = nn.Parameter(0.02 * torch.randn(config.hidden_size))
self.drop_masktoken = getattr(config,'drop_masktoken', False)
def forward(self, video_pixels, perform_mask = False): ### shape Bxnx3xHxW
b, n, c, h, w = video_pixels.shape
video_pixels = video_pixels.reshape(b*n, c,h,w)
# video_pixels_raw = None
video_mask_indicator = None
video_labels = None
if perform_mask:
video_pixels_raw = video_pixels.reshape(b*n , c, h//self.patch_size, self.patch_size, w//self.patch_size, self.patch_size)
video_pixels_raw = video_pixels_raw.permute(0, 2, 4, 3, 5, 1).reshape(b,-1, c*self.patch_size*self.patch_size)
video_pixels = self.first_conv(video_pixels) ### B*n, 768,h,w
video_pixels = video_pixels.permute(0,2,3,1) ### B*n, h,w,768
video_pixels = video_pixels.reshape(b,-1,video_pixels.shape[-1])
if perform_mask:
video_mask_indicator = torch.zeros(video_pixels.shape[:2]).long().cuda()
# for i in range(video_pixels.shape[0]):
# for j in range(video_pixels.shape[1]):
# if random.random() < self.mask_prob:
# video_mask_indicator[i][j] = 1
### make sure every frame mask same number of tokens
video_mask_indicator = video_mask_indicator.reshape(b*self.sample_num, -1)
mask_num = int(self.mask_prob*video_mask_indicator.shape[1])
video_mask_indicator[:,:mask_num] = 1
for i in range(video_mask_indicator.shape[0]):
shuffle_idx = torch.randperm(video_mask_indicator.shape[1])
video_mask_indicator[i] = video_mask_indicator[i][shuffle_idx]
video_mask_indicator = video_mask_indicator.reshape(b,-1)
video_pixels[video_mask_indicator.bool()] = self.mask_token
video_labels = video_pixels_raw[video_mask_indicator.bool()]
assert self.token_drop_ratio == 0 , 'mask conflicts with drop token.'
batch_size = video_pixels.shape[0]
cls_token = self.cls_token.expand(batch_size,-1,-1)
video_tokens = torch.cat((cls_token,video_pixels),dim=1)
video_pos_ids = [0] + list(range(1, self.token_length_per_frame + 1)) * self.sample_num
video_pos_ids = torch.tensor(video_pos_ids, dtype=torch.long, device=video_pixels.device).unsqueeze(0)
position_embeddings = self.position_embeddings(video_pos_ids)
frame_ids = [i for i in range(self.sample_num) for j in range(self.token_length_per_frame)]
frame_ids = torch.tensor(frame_ids, dtype=torch.long, device=video_pixels.device).unsqueeze(0)
position_embeddings[:,1:] += self.frame_embedding(frame_ids)
embeddings = video_tokens + position_embeddings
#embeddings = self.layernorm(embeddings)
embeddings = self.dropout(embeddings)
if self.drop_masktoken:
cls_embedding = embeddings[:,0:1]
res_embedding = embeddings[:,1:]
dim = res_embedding.shape[-1]
unmasked_idx = ~video_mask_indicator.bool()
res_embedding = res_embedding[unmasked_idx].reshape(b,-1,dim)
embeddings = torch.cat((cls_embedding, res_embedding),dim=1)
elif self.training and self.token_drop_ratio > 0:
position_embeddings = position_embeddings.expand(embeddings.shape[0],-1,-1)
embeddings_p1 = embeddings[:,:1] ### cls token do not drop
position_embeddings_p1 = position_embeddings[:,:1]
embeddings_p2 = embeddings[:,1:]
position_embeddings_p2 = position_embeddings[:,1:]
b, n, c = embeddings_p2.shape
embeddings_p2 = embeddings_p2.reshape(b*self.sample_num, self.token_length_per_frame, c)
position_embeddings_p2 = position_embeddings_p2.reshape(b*self.sample_num, self.token_length_per_frame, c)
src_len = embeddings_p2.shape[1]
tgt_len = int((1 - self.token_drop_ratio) * src_len)
tmp = list(range(src_len))
gather_idx = [ random.sample(tmp,tgt_len) for i in range(embeddings_p2.shape[0]) ]
for i in gather_idx:
i.sort()
gather_idx = torch.tensor(gather_idx).to(video_pos_ids).unsqueeze(-1).expand(-1,-1,embeddings_p2.shape[-1])
embeddings_p2 = torch.gather(embeddings_p2, 1 , gather_idx)
position_embeddings_p2 = torch.gather(position_embeddings_p2, 1 , gather_idx)
embeddings_p2 = embeddings_p2.reshape(b,-1,c)
position_embeddings_p2 = position_embeddings_p2.reshape(b,-1,c)
embeddings = torch.cat((embeddings_p1, embeddings_p2),dim=1)
position_embeddings = torch.cat((position_embeddings_p1, position_embeddings_p2),dim=1)
return embeddings, position_embeddings, video_mask_indicator, video_labels
class VitVideoEmbeddings(nn.Module):
def __init__(self, config, video_cfg):
super().__init__()
self.sample_num = video_cfg['sample_num']
self.token_length_per_frame = (video_cfg['resolution'] // video_cfg['patch_size']) **2
self.first_conv = nn.Conv2d(3, config.hidden_size, kernel_size = video_cfg['patch_size'],
stride = video_cfg['patch_size'], padding=0)
self.position_embeddings = nn.Embedding(self.token_length_per_frame + 1, config.hidden_size)
self.frame_embedding = nn.Embedding(10,config.hidden_size) ###assert max 10 frames
# self.layernorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout)
self.cls_token = nn.Parameter(0.02 * torch.randn(1, 1, config.hidden_size))
self.drop_ratio = video_cfg.get('drop_ratio',0)
self.video_encoder_type = config.video_encoder_type
def forward(self, video_pixels): ### shape Bxnx3xHxW
b, n, c, h, w = video_pixels.shape
video_pixels = video_pixels.reshape(b*n, c,h,w)
video_pixels = self.first_conv(video_pixels) ### B*n, 768,h,w
video_pixels = video_pixels.permute(0,2,3,1) ### B*n, h,w,768
video_pixels = video_pixels.reshape(video_pixels.shape[0],-1,video_pixels.shape[-1])
batch_size = video_pixels.shape[0]
cls_token = self.cls_token.expand(batch_size,-1,-1)
video_tokens = torch.cat((cls_token,video_pixels),dim=1)
video_pos_ids = list(range(self.token_length_per_frame + 1))
video_pos_ids = torch.tensor(video_pos_ids, dtype=torch.long, device=video_pixels.device).unsqueeze(0)
position_embeddings = self.position_embeddings(video_pos_ids)
if self.video_encoder_type == 'vit_local':
frame_ids = [i for i in range(self.sample_num) for j in range(self.token_length_per_frame + 1)]
else:
frame_ids = [i for i in range(self.sample_num)]
frame_ids = torch.tensor(frame_ids, dtype=torch.long, device=video_pixels.device).unsqueeze(0)
frame_embeddings = self.frame_embedding(frame_ids)
embeddings = video_tokens + position_embeddings
#embeddings = self.layernorm(embeddings)
embeddings = self.dropout(embeddings)
if self.training and self.drop_ratio > 0:
embeddings_p1 = embeddings[:,0] ### cls token do not drop
embeddings_p2 = embeddings[:,1:]
src_len = embeddings_p2.shape[1]
tgt_len = int((1 - self.drop_ratio) * src_len)
tmp = list(range(src_len))
gather_idx = [ random.sample(tmp,tgt_len) for i in range(embeddings_p2.shape[0]) ]
for i in gather_idx:
i.sort()
gather_idx = torch.tensor(gather_idx).to(video_pos_ids).unsqueeze(-1).expand(-1,-1,embeddings_p2.shape[-1])
embeddings_p2 = torch.gather(embeddings_p2, 1 , gather_idx)
embeddings = torch.cat((embeddings_p1.unsqueeze(1), embeddings_p2),dim=1)
return embeddings, position_embeddings, frame_embeddings
class OPTAudioEmbeddings(nn.Module):
def __init__(self, config, audio_cfg):
super().__init__()
self.patch_size = audio_cfg['patch_size']
self.token_length_per_frame = (audio_cfg['melbins'] // self.patch_size) * (audio_cfg['target_length'] // self.patch_size)
self.first_conv = nn.Conv2d(1, config.hidden_size, kernel_size = self.patch_size,
stride = self.patch_size, padding=0)
self.position_embeddings = nn.Embedding(self.token_length_per_frame + 1, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout)
self.cls_token = nn.Parameter(0.02 * torch.randn(1, 1, config.hidden_size))
def forward(self, audio_spectrograms): ### shape Bx128x1024
audio_spectrograms = self.first_conv(audio_spectrograms.unsqueeze(1))
b,c,_,_=audio_spectrograms.shape
audio_tokens = audio_spectrograms.permute(0,2,3,1).reshape(b,-1,c)
cls_token = self.cls_token.expand(b,-1,-1)
audio_tokens = torch.cat((cls_token,audio_tokens),dim=1)
audio_pos_ids = list(range(self.token_length_per_frame + 1))
audio_pos_ids = torch.tensor(audio_pos_ids, dtype=torch.long, device=audio_spectrograms.device).unsqueeze(0)
position_embeddings = self.position_embeddings(audio_pos_ids)
embeddings = audio_tokens + position_embeddings
#embeddings = self.layernorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings, position_embeddings
class Config():
def __init__(self):
self.void = 'void'
class OPTModel(nn.Module):
def __init__(self, config, video_cfg, audio_cfg):
super().__init__()
base_cfg = Config()
base_cfg.attention_dropout = 0.1
base_cfg.hidden_act= "gelu"
base_cfg.hidden_dropout= 0.1
base_cfg.hidden_size= 768
base_cfg.initializer_range = 0.02
base_cfg.intermediate_size = 3072
base_cfg.num_attention_heads = 12
### txt embedding and txt encoder
model_cfg_txt = copy.copy(base_cfg)
model_cfg_txt.num_hidden_layers = config.txt_layer_num
model_cfg_txt.vocab_size = config.vocab_size
model_cfg_txt.max_position_embeddings = config.max_position_embeddings
self.txt_embeddings = OPTTextEmbeddings(model_cfg_txt)
self.txt_encoder = OPTEncoder(model_cfg_txt, mode='postnorm')
### video embedding and video encoder
self.video_encoder_type = config.video_encoder_type
self.frame_num = video_cfg['sample_num']
self.patch_size = video_cfg['patch_size']
self.token_length_per_frame = (video_cfg['resolution'] // self.patch_size) **2
model_cfg_video = copy.copy(base_cfg)
model_cfg_video.num_hidden_layers = config.video_layer_num
model_cfg_video.video_encoder_type = config.video_encoder_type
if self.video_encoder_type.startswith('vit'):
self.video_embeddings = VitVideoEmbeddings(model_cfg_video, video_cfg)
self.video_encoder = OPTEncoder(config, mode='prenorm')
elif self.video_encoder_type.startswith('timesformer'):
self.video_embeddings = TimesformerVideoEmbeddings(model_cfg_video, video_cfg)
self.video_encoder = TimesFormerEncoder(model_cfg_video,video_cfg)
elif self.video_encoder_type == 'videoswin':
self.time_stride = config.videoswin_timestride
self.video_encoder = SwinTransformer3D(time_stride = self.time_stride)
self.sample_num = video_cfg['sample_num']
self.token_length_per_frame = (video_cfg['resolution'] // video_cfg['patch_size']) **2
self.position_embeddings = nn.Embedding(self.token_length_per_frame, config.hidden_size)
self.frame_embedding = nn.Embedding(10,config.hidden_size) ###assert max 10 frames
### audio embedding and audio encoder
model_cfg_audio = copy.copy(base_cfg)
model_cfg_audio.num_hidden_layers = config.audio_layer_num
self.audio_embeddings = OPTAudioEmbeddings(model_cfg_audio, audio_cfg)
self.audio_encoder = OPTEncoder(model_cfg_audio, mode='prenorm')
### multimodal encoder
model_cfg_multimodal = copy.deepcopy(base_cfg)
model_cfg_multimodal.num_hidden_layers = config.multimodal_layer_num
multimodal_norm_mode = config.multimodal_norm_mode
self.multimodal_encoder = OPTEncoder(model_cfg_multimodal, mode= multimodal_norm_mode)
#self.caption_encoder = self.multimodal_encoder
self.cls_token_TV = nn.Parameter(0.02 * torch.randn(1, 1, base_cfg.hidden_size))
self.txt_type_embeddings = nn.Parameter(0.02 * torch.randn(1, 1, base_cfg.hidden_size))
self.video_type_embeddings = nn.Parameter(0.02 * torch.randn(1, 1, base_cfg.hidden_size))
self.audio_type_embeddings = nn.Parameter(0.02 * torch.randn(1, 1, base_cfg.hidden_size))
self.reuse_embedding = config.reuse_embedding
self.average_video = config.average_video
self.average_video_mode = getattr(config,'average_video_mode','space')
self.average_audio_mode = getattr(config,'average_audio_mode','space')
#### single modality encoder weights
self.txt_encoder_weights = config.txt_encoder_weights
self.video_encoder_weights = config.video_encoder_weights
self.audio_encoder_weights = config.audio_encoder_weights
self.audio_cfg = audio_cfg
def forward_txt_encoder(self, txt_tokens, perform_mask=False):
attn_mask_txt = (txt_tokens != 0).long()
txt_embeddings, txt_position_embeddings, txt_labels = self.txt_embeddings(txt_tokens, perform_mask=perform_mask)
attn_masks = attn_mask_txt.unsqueeze(1).expand(-1, txt_tokens.shape[-1], -1).clone()
# if multimodal_uniattn:
# attn_masks = torch.tril(attn_masks)
attn_masks = attn_masks.unsqueeze(1)
attn_masks = attn_masks.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
##### if not add the following line, the mlm accuracy will boost a lot, which deserves futher research!!
### which means the txt encoder is bidirection and the multimodal encoder is uni-direction can improve the mlm
attn_masks = (1.0 - attn_masks) * -10000.0
txt_output = self.txt_encoder(txt_embeddings, attn_masks)
return txt_output, txt_position_embeddings, attn_mask_txt, txt_labels
def forward_video_encoder(self, video_pixels, perform_mask=False):
### b,n,c,H,W
if self.video_encoder_type.startswith('vit'):
video_embeddings, position_embeddings, frame_embeddings = self.video_embeddings(video_pixels) #[b, (n*f+1), c]
video_output = self.video_encoder(video_embeddings)
video_output = video_output.reshape(-1, self.frame_num, *video_output.shape[-2:])
batch_size, hidden_size = video_output.shape[0], video_output.shape[-1]
if self.video_encoder_type == 'vit_global':
video_output = video_output[:,:,0]
position_embeddings = frame_embeddings
elif self.video_encoder_type == 'vit_local':
video_output = video_output.reshape(batch_size, -1, hidden_size)
position_embeddings = position_embeddings.repeat(1,self.frame_num,1)
position_embeddings = position_embeddings + frame_embeddings
return video_output, position_embeddings
elif self.video_encoder_type.startswith('timesformer'):
video_embeddings, position_embeddings, video_mask_indicator, video_labels = self.video_embeddings(video_pixels, perform_mask = perform_mask) #[b, (n*f+1), c]
video_output = self.video_encoder(video_embeddings)
elif self.video_encoder_type == 'videoswin':
video_output = self.video_encoder(video_pixels.transpose(1,2))
## b,c,n,h,w
video_output = video_output.permute(0, 2, 3, 4, 1)
### b,n,h,w,c
video_output = video_output.reshape(video_output.shape[0],-1,video_output.shape[-1])
### b, n*h*w, c
#video_output = torch.cat((self.video_cls_token.expand(video_output.shape[0],-1,-1),video_output),dim=1)
sample_num = self.sample_num // self.time_stride
video_pos_ids = list(range(self.token_length_per_frame)) * sample_num
video_pos_ids = torch.tensor(video_pos_ids, dtype=torch.long, device=video_pixels.device).unsqueeze(0)
position_embeddings = self.position_embeddings(video_pos_ids)
frame_ids = [i for i in range(sample_num) for j in range(self.token_length_per_frame)]
frame_ids = torch.tensor(frame_ids, dtype=torch.long, device=video_pixels.device).unsqueeze(0)
position_embeddings += self.frame_embedding(frame_ids)
return video_output, position_embeddings, video_mask_indicator, video_labels
def forward_audio_encoder(self, audio_spectrograms):
audio_embeddings, position_embeddings = self.audio_embeddings(audio_spectrograms)
audio_output = self.audio_encoder(audio_embeddings)
return audio_output, position_embeddings
def get_multimodal_forward_input_txt(self, txt_output, txt_position_embedding):
if self.reuse_embedding:
txt_output = txt_output + self.txt_type_embeddings + txt_position_embedding
return txt_output
def get_multimodal_forward_input_video(self, video_output, video_position_embedding, video_mask_indicator):
if self.video_embeddings.drop_masktoken: #### refill in the mask_token
cls_embedding = video_output[:,0:1]
res_embedding = video_output[:,1:]
b,_,c = video_output.shape
n = self.frame_num * self.token_length_per_frame
fillin_embedding = torch.zeros((b,n,c)).to(video_output)
fillin_embedding[:,:] = self.video_embeddings.mask_token
unmasked_idx = ~video_mask_indicator.bool()
fillin_embedding[unmasked_idx] = res_embedding.reshape(-1,c)
video_output = torch.cat((cls_embedding, fillin_embedding),dim=1)
if self.reuse_embedding:
video_output = video_output + self.video_type_embeddings + video_position_embedding
if self.average_video:
batch_size,_,hidden_size = video_output.shape
average_video = video_output[:,1:].reshape(batch_size,self.frame_num, self.token_length_per_frame,hidden_size)
if self.average_video_mode == 'time':
average_video = average_video.mean(dim=1)
elif self.average_video_mode == 'space':
average_video = average_video.mean(dim=2)
else:
raise NotImplementedError
video_output = torch.cat((video_output[:,0:1], average_video),dim=1)
attn_masks_video = torch.ones(*video_output.shape[:2]).long().cuda()
return video_output, attn_masks_video
def get_multimodal_forward_input_audio(self, audio_output, audio_position_embedding):
if self.reuse_embedding:
audio_output = audio_output + self.audio_type_embeddings + audio_position_embedding
if self.average_audio_mode == 'space':
average_audio = audio_output[:,1:]
average_audio = average_audio.mean(dim=1,keepdim=True)
audio_output = torch.cat((audio_output[:,0:1], average_audio),dim=1)
elif self.average_audio_mode == 'none':
pass
else:
raise NotImplementedError
attn_masks_audio = torch.ones(*audio_output.shape[:2]).long().cuda()
return audio_output, attn_masks_audio
def forward_multimodal_encoder(self, txt_output, attn_masks_txt, video_output, attn_masks_video, audio_output=None, attn_masks_audio=None):
if audio_output is None and attn_masks_audio is None: #### m2
attn_masks_multimodal_clstoken = torch.ones(attn_masks_txt.shape[0]).to(attn_masks_txt).unsqueeze(1)
attn_masks = torch.cat((attn_masks_multimodal_clstoken, attn_masks_txt, attn_masks_video),dim=1)
attn_masks = attn_masks.unsqueeze(1).unsqueeze(2)
attn_masks = attn_masks.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attn_masks = (1.0 - attn_masks) * -10000.0
multimodal_input = torch.cat((self.cls_token_TV.expand(txt_output.shape[0],-1,-1), txt_output, video_output),dim=1)
multimodal_output = self.multimodal_encoder(multimodal_input, attn_masks)
else: #### m3
attn_masks_multimodal_clstoken = torch.ones(attn_masks_txt.shape[0]).to(attn_masks_txt).unsqueeze(1)
attn_masks = torch.cat((attn_masks_multimodal_clstoken, attn_masks_txt, attn_masks_video,attn_masks_audio),dim=1)
attn_masks = attn_masks.unsqueeze(1).unsqueeze(2)
attn_masks = attn_masks.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attn_masks = (1.0 - attn_masks) * -10000.0
multimodal_input = torch.cat((self.cls_token_TV.expand(txt_output.shape[0],-1,-1), \
txt_output, video_output, audio_output),dim=1)
multimodal_output = self.multimodal_encoder(multimodal_input, attn_masks)
return multimodal_output
def forward_caption_encoder(self, txt_tokens, attn_mask_txt, video_input, attn_masks_video, \
audio_input=None, attn_masks_audio=None, perform_mask=True):
txt_embeddings, _ , txt_labels = self.txt_embeddings(txt_tokens, perform_mask = perform_mask)
if self.reuse_embedding:
txt_embeddings = txt_embeddings + self.txt_type_embeddings
### m2
if audio_input is None and attn_masks_audio is None:
attn_masks = torch.cat((attn_mask_txt, attn_masks_video),dim=1)
total_len = attn_masks.shape[1]
txt_len = txt_tokens.shape[1]
attn_masks = attn_masks.unsqueeze(1).expand(-1, total_len, -1).clone()
attn_masks[:, : txt_len, : txt_len] = torch.tril(attn_masks[:, : txt_len, : txt_len])
attn_masks[:, txt_len:, : txt_len] = 0
attn_masks = attn_masks.unsqueeze(1)
attn_masks = attn_masks.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attn_masks = (1.0 - attn_masks) * -10000.0
caption_input = torch.cat((txt_embeddings, video_input),dim=1)
### m3
else:
attn_masks = torch.cat((attn_mask_txt,attn_masks_video, attn_masks_audio),dim=1)
total_len = attn_masks.shape[1]
txt_len = txt_tokens.shape[1]
attn_masks = attn_masks.unsqueeze(1).expand(-1, total_len, -1).clone()
attn_masks[:, : txt_len, : txt_len] = torch.tril(attn_masks[:, : txt_len, : txt_len])
attn_masks[:, txt_len:, : txt_len] = 0
attn_masks = attn_masks.unsqueeze(1)
attn_masks = attn_masks.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attn_masks = (1.0 - attn_masks) * -10000.0
caption_input = torch.cat((txt_embeddings, video_input, audio_input),dim=1)
caption_output = self.multimodal_encoder(caption_input, attn_masks)
return caption_output, txt_labels
def forward_retrieval_encoder(self, txt_input=None, attn_masks_txt=None, video_input=None,
attn_masks_video=None, audio_input=None, attn_masks_audio=None):
if txt_input is not None and video_input is None and audio_input is None:
attn_masks = attn_masks_txt.unsqueeze(1).expand(-1, attn_masks_txt.shape[-1], -1)
attn_masks = attn_masks.unsqueeze(1)
attn_masks = attn_masks.to(dtype=next(self.parameters()).dtype)
attn_masks = (1.0 - attn_masks) * -10000.0
multimodal_output = self.multimodal_encoder(txt_input, attn_masks)
elif video_input is not None and txt_input is None and audio_input is None: ###assert no mask
multimodal_output = self.multimodal_encoder(video_input)
elif audio_input is not None and txt_input is None and video_input is None: #### assert no mask
multimodal_output = self.multimodal_encoder(audio_input)
elif video_input is not None and audio_input is not None and txt_input is None:
attn_masks_multimodal_clstoken = torch.ones(attn_masks_video.shape[0]).to(attn_masks_video).unsqueeze(1)
attn_masks = torch.cat((attn_masks_multimodal_clstoken, attn_masks_video,attn_masks_audio),dim=1)
attn_masks = attn_masks.unsqueeze(1).unsqueeze(2)
attn_masks = attn_masks.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attn_masks = (1.0 - attn_masks) * -10000.0
multimodal_input = torch.cat((self.cls_token_TV.expand(video_input.shape[0],-1,-1), \
video_input, audio_input),dim=1)
multimodal_output = self.multimodal_encoder(multimodal_input, attn_masks)
else:
raise NotImplementedError
return multimodal_output
def initialize_weights(self):
if self.txt_encoder_weights :
self.initialize_txt_weights()
if self.video_encoder_weights:
self.initialize_video_weights()
if self.audio_encoder_weights:
self.initialize_audio_weights()
def initialize_txt_weights(self):
bert_weight = torch.load(self.txt_encoder_weights)
txt_weight = {}
### word_embedding_weights:
txt_weight['txt_embeddings.word_embeddings.weight'] = bert_weight['bert.embeddings.word_embeddings.weight']
### position_embedding weights:
txt_weight['txt_embeddings.position_embeddings.weight'] = bert_weight['bert.embeddings.position_embeddings.weight']
txt_weight['txt_embeddings.layernorm.weight'] = bert_weight['bert.embeddings.LayerNorm.gamma']
txt_weight['txt_embeddings.layernorm.bias'] = bert_weight['bert.embeddings.LayerNorm.beta']
for i in range(12):
txt_weight['txt_encoder.layer.'+str(i)+'.attention.linears.0.weight'] = bert_weight['bert.encoder.layer.'+str(i)+'.attention.self.query.weight']
txt_weight['txt_encoder.layer.'+str(i)+'.attention.linears.0.bias'] = bert_weight['bert.encoder.layer.'+str(i)+'.attention.self.query.bias']
txt_weight['txt_encoder.layer.'+str(i)+'.attention.linears.1.weight'] = bert_weight['bert.encoder.layer.'+str(i)+'.attention.self.key.weight']
txt_weight['txt_encoder.layer.'+str(i)+'.attention.linears.1.bias'] = bert_weight['bert.encoder.layer.'+str(i)+'.attention.self.key.bias']
txt_weight['txt_encoder.layer.'+str(i)+'.attention.linears.2.weight'] = bert_weight['bert.encoder.layer.'+str(i)+'.attention.self.value.weight']
txt_weight['txt_encoder.layer.'+str(i)+'.attention.linears.2.bias'] = bert_weight['bert.encoder.layer.'+str(i)+'.attention.self.value.bias']
txt_weight['txt_encoder.layer.'+str(i)+'.attention.linears.3.weight'] = bert_weight['bert.encoder.layer.'+str(i)+'.attention.output.dense.weight']
txt_weight['txt_encoder.layer.'+str(i)+'.attention.linears.3.bias'] = bert_weight['bert.encoder.layer.'+str(i)+'.attention.output.dense.bias']
txt_weight['txt_encoder.layer.'+str(i)+'.ff_layer.linear1.weight'] = bert_weight['bert.encoder.layer.'+str(i)+'.intermediate.dense.weight']
txt_weight['txt_encoder.layer.'+str(i)+'.ff_layer.linear1.bias'] = bert_weight['bert.encoder.layer.'+str(i)+'.intermediate.dense.bias']
txt_weight['txt_encoder.layer.'+str(i)+'.ff_layer.linear2.weight'] = bert_weight['bert.encoder.layer.'+str(i)+'.output.dense.weight']
txt_weight['txt_encoder.layer.'+str(i)+'.ff_layer.linear2.bias'] = bert_weight['bert.encoder.layer.'+str(i)+'.output.dense.bias']
txt_weight['txt_encoder.layer.'+str(i)+'.layernorm1.weight'] = bert_weight['bert.encoder.layer.'+str(i)+'.attention.output.LayerNorm.gamma']
txt_weight['txt_encoder.layer.'+str(i)+'.layernorm1.bias'] = bert_weight['bert.encoder.layer.'+str(i)+'.attention.output.LayerNorm.beta']
txt_weight['txt_encoder.layer.'+str(i)+'.layernorm2.weight'] = bert_weight['bert.encoder.layer.'+str(i)+'.output.LayerNorm.gamma']
txt_weight['txt_encoder.layer.'+str(i)+'.layernorm2.bias'] = bert_weight['bert.encoder.layer.'+str(i)+'.output.LayerNorm.beta']
missing_keys, unexpected_keys = self.load_state_dict(txt_weight, strict=False)
#LOGGER.info(f'missing_keys in txt encoder: {missing_keys}')
LOGGER.info(f'unexpected_keys in txt encoder: {unexpected_keys}')
del(bert_weight)
del(txt_weight)
def initialize_video_weights(self):
if self.video_encoder_type.startswith('timesformer'):
video_weight={}
vit_weight = np.load(self.video_encoder_weights)
video_weight['video_embeddings.cls_token'] = trans(vit_weight['cls'])
video_weight['video_embeddings.first_conv.weight'] = trans(vit_weight['embedding/kernel']).permute(3,2,0,1) ### need to permute?
video_weight['video_embeddings.first_conv.bias'] = trans(vit_weight['embedding/bias'])
video_weight['video_embeddings.position_embeddings.weight'] = trans(vit_weight['Transformer/posembed_input/pos_embedding']).squeeze()
#'video_embeddings.mask_embedding.weight',
#'video_embeddings.layernorm.weight',
#'video_embeddings.layernorm.bias'
for i in range(12):
video_weight['video_encoder.layer.'+str(i)+'.attention_space.linears.0.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/query/kernel']).reshape(768,-1).permute(1,0)
video_weight['video_encoder.layer.'+str(i)+'.attention_space.linears.0.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/query/bias']).reshape(768)
video_weight['video_encoder.layer.'+str(i)+'.attention_space.linears.1.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/key/kernel']).reshape(768,-1).permute(1,0)
video_weight['video_encoder.layer.'+str(i)+'.attention_space.linears.1.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/key/bias']).reshape(768)
video_weight['video_encoder.layer.'+str(i)+'.attention_space.linears.2.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/value/kernel']).reshape(768,-1).permute(1,0)
video_weight['video_encoder.layer.'+str(i)+'.attention_space.linears.2.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/value/bias']).reshape(768)
video_weight['video_encoder.layer.'+str(i)+'.attention_space.linears.3.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/out/kernel']).reshape(-1,768).permute(1,0)
video_weight['video_encoder.layer.'+str(i)+'.attention_space.linears.3.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/out/bias'])
video_weight['video_encoder.layer.'+str(i)+'.attention_time.linears.0.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/query/kernel']).reshape(768,-1).permute(1,0).fill_(0)
video_weight['video_encoder.layer.'+str(i)+'.attention_time.linears.0.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/query/bias']).reshape(768).fill_(0)
video_weight['video_encoder.layer.'+str(i)+'.attention_time.linears.1.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/key/kernel']).reshape(768,-1).permute(1,0).fill_(0)
video_weight['video_encoder.layer.'+str(i)+'.attention_time.linears.1.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/key/bias']).reshape(768).fill_(0)
video_weight['video_encoder.layer.'+str(i)+'.attention_time.linears.2.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/value/kernel']).reshape(768,-1).permute(1,0).fill_(0)
video_weight['video_encoder.layer.'+str(i)+'.attention_time.linears.2.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/value/bias']).reshape(768).fill_(0)
video_weight['video_encoder.layer.'+str(i)+'.attention_time.linears.3.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/out/kernel']).reshape(-1,768).permute(1,0).fill_(0)
video_weight['video_encoder.layer.'+str(i)+'.attention_time.linears.3.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/out/bias']).fill_(0)
video_weight['video_encoder.layer.'+str(i)+'.ff_layer.linear1.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MlpBlock_3/Dense_0/kernel']).permute(1,0)
video_weight['video_encoder.layer.'+str(i)+'.ff_layer.linear1.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MlpBlock_3/Dense_0/bias'])
video_weight['video_encoder.layer.'+str(i)+'.ff_layer.linear2.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MlpBlock_3/Dense_1/kernel']).permute(1,0)
video_weight['video_encoder.layer.'+str(i)+'.ff_layer.linear2.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MlpBlock_3/Dense_1/bias'])
video_weight['video_encoder.layer.'+str(i)+'.layernorm2.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/LayerNorm_0/scale'])
video_weight['video_encoder.layer.'+str(i)+'.layernorm2.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/LayerNorm_0/bias'])
video_weight['video_encoder.layer.'+str(i)+'.layernorm3.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/LayerNorm_2/scale'])
video_weight['video_encoder.layer.'+str(i)+'.layernorm3.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/LayerNorm_2/bias'])
video_weight['video_encoder.last_layernorm.weight'] = trans(vit_weight['Transformer/encoder_norm/scale'])
video_weight['video_encoder.last_layernorm.bias'] = trans(vit_weight['Transformer/encoder_norm/bias'])
else:
raise NotImplementedError
missing_keys, unexpected_keys = self.load_state_dict(video_weight, strict=False)
#LOGGER.info(f'missing_keys in video encoder: {missing_keys}')
LOGGER.info(f'unexpected_keys in video encoder: {unexpected_keys}')
del(vit_weight)
del(video_weight)
def initialize_audio_weights(self):
vit_weight = np.load(self.audio_encoder_weights)
audio_weight = {}
audio_weight['audio_embeddings.cls_token'] = trans(vit_weight['cls'])
first_conv_weight = trans(vit_weight['embedding/kernel']).permute(3,2,0,1)
first_conv_weight = torch.mean(first_conv_weight,dim=1,keepdim=True)
audio_weight['audio_embeddings.first_conv.weight'] = first_conv_weight ### need to permute?
audio_weight['audio_embeddings.first_conv.bias'] = trans(vit_weight['embedding/bias'])
pos_weight = trans(vit_weight['Transformer/posembed_input/pos_embedding']).squeeze()
pos_weight_cls = pos_weight[0:1]
pos_weight_oth = pos_weight[1:]
if self.audio_cfg['patch_size'] == 32:
src_patch_num = 7
elif self.audio_cfg['patch_size'] == 16:
src_patch_num = 14
pos_weight_oth = pos_weight_oth.reshape(src_patch_num,src_patch_num,-1).permute(2,0,1).unsqueeze(0)
tar_patch_num_height = self.audio_cfg['melbins'] // self.audio_cfg['patch_size']
tar_patch_num_width = self.audio_cfg['target_length'] // self.audio_cfg['patch_size']
pos_weight_oth = F.interpolate(pos_weight_oth, size = (tar_patch_num_height,tar_patch_num_width),mode='bilinear').squeeze().permute(1,2,0).reshape(-1,768)
pos_weight_oth = torch.cat((pos_weight_cls,pos_weight_oth),dim=0)
audio_weight['audio_embeddings.position_embeddings.weight'] = pos_weight_oth
for i in range(12):
audio_weight['audio_encoder.layer.'+str(i)+'.attention.linears.0.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/query/kernel']).reshape(768,-1).permute(1,0)
audio_weight['audio_encoder.layer.'+str(i)+'.attention.linears.0.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/query/bias']).reshape(768)
audio_weight['audio_encoder.layer.'+str(i)+'.attention.linears.1.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/key/kernel']).reshape(768,-1).permute(1,0)
audio_weight['audio_encoder.layer.'+str(i)+'.attention.linears.1.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/key/bias']).reshape(768)
audio_weight['audio_encoder.layer.'+str(i)+'.attention.linears.2.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/value/kernel']).reshape(768,-1).permute(1,0)
audio_weight['audio_encoder.layer.'+str(i)+'.attention.linears.2.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/value/bias']).reshape(768)
audio_weight['audio_encoder.layer.'+str(i)+'.attention.linears.3.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/out/kernel']).reshape(-1,768).permute(1,0)
audio_weight['audio_encoder.layer.'+str(i)+'.attention.linears.3.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MultiHeadDotProductAttention_1/out/bias'])
audio_weight['audio_encoder.layer.'+str(i)+'.ff_layer.linear1.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MlpBlock_3/Dense_0/kernel']).permute(1,0)
audio_weight['audio_encoder.layer.'+str(i)+'.ff_layer.linear1.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MlpBlock_3/Dense_0/bias'])
audio_weight['audio_encoder.layer.'+str(i)+'.ff_layer.linear2.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MlpBlock_3/Dense_1/kernel']).permute(1,0)
audio_weight['audio_encoder.layer.'+str(i)+'.ff_layer.linear2.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/MlpBlock_3/Dense_1/bias'])
audio_weight['audio_encoder.layer.'+str(i)+'.layernorm1.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/LayerNorm_0/scale'])
audio_weight['audio_encoder.layer.'+str(i)+'.layernorm1.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/LayerNorm_0/bias'])
audio_weight['audio_encoder.layer.'+str(i)+'.layernorm2.weight'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/LayerNorm_2/scale'])
audio_weight['audio_encoder.layer.'+str(i)+'.layernorm2.bias'] = trans(vit_weight['Transformer/encoderblock_'+str(i)+'/LayerNorm_2/bias'])
audio_weight['audio_encoder.last_layernorm.weight'] = trans(vit_weight['Transformer/encoder_norm/scale'])
audio_weight['audio_encoder.last_layernorm.bias'] = trans(vit_weight['Transformer/encoder_norm/bias'])
missing_keys, unexpected_keys = self.load_state_dict(audio_weight, strict=False)
#LOGGER.info(f'missing_keys in audio encoder: {missing_keys}')
LOGGER.info(f'unexpected_keys in audio encoder: {unexpected_keys}')
del(vit_weight)
del(audio_weight)
def trans(x):
return torch.from_numpy(x) |
the-stack_0_21243 | import traceback
from django.http import Http404, HttpResponse, HttpResponseRedirect, JsonResponse
from ledger.payments.utils import oracle_parser
from django.conf import settings
from django.db import transaction
from wsgiref.util import FileWrapper
from rest_framework import viewsets, serializers, status, generics, views
from rest_framework.decorators import detail_route, list_route, renderer_classes, parser_classes
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from rest_framework.permissions import IsAuthenticated, AllowAny, IsAdminUser, BasePermission
from rest_framework.pagination import PageNumberPagination
from django.urls import reverse
from mooringlicensing.components.main.models import (#Region, District, Tenure,
#ApplicationType, #ActivityMatrix, AccessType, Park, Trail, ActivityCategory, Activity,
#RequiredDocument,
Question,
GlobalSettings,
TemporaryDocumentCollection,
)
from mooringlicensing.components.main.serializers import ( # RegionSerializer, DistrictSerializer, TenureSerializer,
# ApplicationTypeSerializer, #ActivityMatrixSerializer, AccessTypeSerializer, ParkSerializer, ParkFilterSerializer, TrailSerializer, ActivitySerializer, ActivityCategorySerializer,
# RequiredDocumentSerializer,
QuestionSerializer,
GlobalSettingsSerializer,
OracleSerializer,
TemporaryDocumentCollectionSerializer,
BookingSettlementReportSerializer, # BookingSettlementReportSerializer, LandActivityTabSerializer, MarineActivityTabSerializer, EventsParkSerializer, TrailTabSerializer, FilmingParkSerializer
)
from mooringlicensing.components.main.process_document import save_document, cancel_document, delete_document
from mooringlicensing.components.main.utils import add_cache_control
from django.core.exceptions import ValidationError
from django.db.models import Q
from mooringlicensing.components.payments_ml import reports
from mooringlicensing.components.proposals.models import Proposal
from mooringlicensing.components.proposals.serializers import ProposalSerializer
#from mooringlicensing.components.bookings.utils import oracle_integration
#from mooringlicensing.components.bookings import reports
from ledger.checkout.utils import create_basket_session, create_checkout_session, place_order_submission, get_cookie_basket
from collections import namedtuple
import json
from decimal import Decimal
import logging
from mooringlicensing.settings import PAYMENT_SYSTEM_PREFIX, SYSTEM_NAME
logger = logging.getLogger('mooringlicensing')
#class ApplicationTypeViewSet(viewsets.ReadOnlyModelViewSet):
# #queryset = ApplicationType.objects.all().order_by('order')
# queryset = ApplicationType.objects.none()
# serializer_class = ApplicationTypeSerializer
#
# def get_queryset(self):
# return ApplicationType.objects.order_by('order').filter(visible=True)
class GlobalSettingsViewSet(viewsets.ReadOnlyModelViewSet):
queryset = GlobalSettings.objects.all().order_by('id')
serializer_class = GlobalSettingsSerializer
#class RequiredDocumentViewSet(viewsets.ReadOnlyModelViewSet):
# queryset = RequiredDocument.objects.all()
# serializer_class = RequiredDocumentSerializer
class QuestionViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Question.objects.all()
serializer_class = QuestionSerializer
class PaymentViewSet(viewsets.ModelViewSet):
#queryset = Proposal.objects.all()
queryset = Proposal.objects.none()
#serializer_class = ProposalSerializer
serializer_class = ProposalSerializer
lookup_field = 'id'
def create(self, request, *args, **kwargs):
response = super(PaymentViewSet, self).create(request, *args, **kwargs)
# here may be placed additional operations for
# extracting id of the object and using reverse()
fallback_url = request.build_absolute_uri('/')
return add_cache_control(HttpResponseRedirect(redirect_to=fallback_url + '/success/'))
class BookingSettlementReportView(views.APIView):
renderer_classes = (JSONRenderer,)
def get(self,request,format=None):
try:
http_status = status.HTTP_200_OK
#parse and validate data
report = None
data = {
"date":request.GET.get('date'),
}
serializer = BookingSettlementReportSerializer(data=data)
serializer.is_valid(raise_exception=True)
filename = 'Booking Settlement Report-{}'.format(str(serializer.validated_data['date']))
# Generate Report
report = reports.booking_bpoint_settlement_report(serializer.validated_data['date'])
if report:
response = HttpResponse(FileWrapper(report), content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(filename)
return response
else:
raise serializers.ValidationError('No report was generated.')
except serializers.ValidationError:
raise
except Exception as e:
traceback.print_exc()
def oracle_integration(date, override):
system = PAYMENT_SYSTEM_PREFIX
#oracle_codes = oracle_parser(date, system, 'Commercial Operator Licensing', override=override)
# oracle_codes = oracle_parser(date, system, 'WildlifeCompliance', override=override)
oracle_codes = oracle_parser(date, system, SYSTEM_NAME, override=override)
class OracleJob(views.APIView):
renderer_classes = [JSONRenderer,]
def get(self, request, format=None):
try:
data = {
"date":request.GET.get("date"),
"override": request.GET.get("override")
}
serializer = OracleSerializer(data=data)
serializer.is_valid(raise_exception=True)
oracle_integration(serializer.validated_data['date'].strftime('%Y-%m-%d'),serializer.validated_data['override'])
data = {'successful':True}
return add_cache_control(Response(data))
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
raise serializers.ValidationError(repr(e.error_dict)) if hasattr(e, 'error_dict') else serializers.ValidationError(e)
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e[0]))
class TemporaryDocumentCollectionViewSet(viewsets.ModelViewSet):
queryset = TemporaryDocumentCollection.objects.all()
serializer_class = TemporaryDocumentCollectionSerializer
#def get_queryset(self):
# # import ipdb; ipdb.set_trace()
# #user = self.request.user
# if is_internal(self.request):
# return TemporaryDocumentCollection.objects.all()
# return TemporaryDocumentCollection.objects.none()
def create(self, request, *args, **kwargs):
print("create temp doc coll")
print(request.data)
try:
with transaction.atomic():
serializer = TemporaryDocumentCollectionSerializer(
data=request.data,
)
serializer.is_valid(raise_exception=True)
if serializer.is_valid():
instance = serializer.save()
save_document(request, instance, comms_instance=None, document_type=None)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e, 'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
# raise serializers.ValidationError(repr(e[0].encode('utf-8')))
raise serializers.ValidationError(repr(e[0]))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST'])
@renderer_classes((JSONRenderer,))
def process_temp_document(self, request, *args, **kwargs):
print("process_temp_document")
print(request.data)
try:
instance = self.get_object()
action = request.data.get('action')
#comms_instance = None
if action == 'list':
pass
elif action == 'delete':
delete_document(request, instance, comms_instance=None, document_type='temp_document')
elif action == 'cancel':
cancel_document(request, instance, comms_instance=None, document_type='temp_document')
elif action == 'save':
save_document(request, instance, comms_instance=None, document_type='temp_document')
returned_file_data = [dict(
file=d._file.url,
id=d.id,
name=d.name,
) for d in instance.documents.all() if d._file]
return Response({'filedata': returned_file_data})
except Exception as e:
print(traceback.print_exc())
raise e
|
the-stack_0_21244 | #!/bin/env python2.7
# -*- coding: utf-8 -*-
from flask import *
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from random import randint
import binascii, os, json
import yaml, requests
from redis_session import RedisSessionInterface
# Load and parse config file
config = yaml.load(file('config.yaml', 'r'))
encrypt = config['encrypt']
app = Flask(__name__, static_url_path='/static')
app.config['recaptcha'] = config['recaptcha']
app.debug = config['debug']
app.session_interface = RedisSessionInterface(config['redis'])
def aesEncrypt(text, secKey):
pad = 16 - len(text) % 16
text = text + pad * chr(pad)
encryptor = AES.new(secKey, 1)
cipherText = encryptor.encrypt(text)
cipherText = binascii.b2a_hex(cipherText).upper()
return cipherText
def encrypted_request(jsonDict):
jsonStr = json.dumps(jsonDict, separators = (",", ":"))
encText = aesEncrypt(jsonStr, secretKey)
data = {
'eparams': encText,
}
return data
nonce = encrypt['nonce']
n, e = int(encrypt["n"], 16), int(encrypt["e"], 16)
def req_netease(url, payload):
data = encrypted_request(payload)
r = requests.post(url, data = data, headers = headers)
result = json.loads(r.text)
if result['code'] != 200:
return None
return result
def req_netease_detail(songId):
payload = {"method": "POST", "params": {"c": "[{id:%d}]" % songId}, "url": "http://music.163.com/api/v3/song/detail"}
data = req_netease('http://music.163.com/api/linux/forward', payload)
if data is None or data['songs'] is None or len(data['songs']) != 1:
return None
song = data['songs'][0]
return song
def req_netease_url(songId, rate):
payload = {"method": "POST", "params": {"ids": [songId],"br": rate}, "url": "http://music.163.com/api/song/enhance/player/url"}
data = req_netease('http://music.163.com/api/linux/forward', payload)
if data is None or data['data'] is None or len(data['data']) != 1:
return None
song = data['data'][0]
if song['code'] != 200 or song['url'] is None:
return None
# song['url'] = song['url'].replace('http:', '')
return song
def req_recaptcha(response, remote_ip):
r = requests.post('https://www.google.com/recaptcha/api/siteverify', data = {
'secret': config['recaptcha']['secret'],
'response': response,
'remoteip': remote_ip
});
result = json.loads(r.text);
print("req_recaptcha from %s, result: %s" % (remote_ip, r.text))
return result['success']
print("Generating secretKey for current session...")
secretKey = binascii.a2b_hex(encrypt['secret'])
headers = {
'Referer': 'http://music.163.com',
'X-Real-IP': '118.88.88.88',
'Cookie': 'os=linux; appver=1.0.0.1026; osver=Ubuntu%2016.10',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
}
def sign_request(songId, rate):
h = SHA256.new()
h.update(str(songId))
h.update(str(rate))
h.update(config["sign_salt"])
return h.hexdigest()
def is_verified(session):
if not config['recaptcha']:
return True
return 'verified' in session and session['verified'] > 0
def set_verified(session):
if config['recaptcha']:
session['verified'] = randint(10, 20)
def decrease_verified(session):
if config['recaptcha']:
session['verified'] -= 1;
@app.route("/")
def index():
verified = is_verified(session)
return render_template('index.j2', verified = verified)
@app.route("/backdoor")
def backdoor():
if app.debug:
set_verified(session)
return 'ok!'
@app.route('/s/<path:path>')
def static_route(path):
return app.send_static_file(path)
@app.route("/sign/<int:songId>/<int:rate>", methods=['POST'])
def generate_sign(songId, rate):
if not is_verified(session):
# 首先检查谷歌验证
if 'g-recaptcha-response' not in request.form \
or not req_recaptcha(
request.form['g-recaptcha-response'],
request.headers[config['ip_header']] if config['ip_header'] else request.remote_addr
):
#
return jsonify({"verified": is_verified(session), "errno": 2})
set_verified(session)
# 请求歌曲信息, 然后签个名
decrease_verified(session)
song = req_netease_detail(songId)
if song is None:
return jsonify({"verified": is_verified(session), "errno": 1})
return jsonify({
"verified": True,
"sign": sign_request(songId, rate),
"song": {
"id": song['id'],
"name": song['name'],
"artist": [{"id": a['id'], "name": a['name']} for a in song['ar']]
}
})
@app.route("/<int:songId>/<int:rate>/<sign>")
def get_song_url(songId, rate, sign):
if sign_request(songId, rate) != sign:
return abort(403)
song = req_netease_url(songId, rate)
if song is None:
return abort(404)
response = redirect(song['url'], code=302)
response.headers["max-age"] = song['expi']
return response
if __name__ == "__main__":
print("Running...")
app.run()
|
the-stack_0_21245 | """Main helper"""
__docformat__ = "numpy"
import argparse
import json
from datetime import datetime, timedelta
from typing import List, Union, Tuple, Optional
import matplotlib.pyplot as plt
import mplfinance as mpf
import numpy as np
import pandas as pd
import pandas_market_calendars as mcal
import plotly.graph_objects as go
import pyEX
import pytz
import requests
import yfinance as yf
from alpha_vantage.timeseries import TimeSeries
from numpy.core.fromnumeric import transpose
from plotly.subplots import make_subplots
from scipy import stats
from gamestonk_terminal.config_terminal import theme
from gamestonk_terminal import config_terminal as cfg
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
plot_autoscale,
get_user_timezone_or_invalid,
print_rich_table,
)
from gamestonk_terminal.rich_config import console
# pylint: disable=no-member,too-many-branches,C0302
INTERVALS = [1, 5, 15, 30, 60]
SOURCES = ["yf", "av", "iex"]
def search(
query: str,
amount: int,
) -> None:
"""Search selected query for tickers.
Parameters
----------
query : str
The search term used to find company tickers.
amount : int
The amount of companies shown.
"""
equities_list = (
"https://raw.githubusercontent.com/JerBouma/FinanceDatabase/master/"
"Database/Equities/Equities List.json"
)
request = requests.get(equities_list)
equities = json.loads(request.text)
equities_query = {
key: value
for key, value in equities.items()
if (query in key.lower()) or (query in value.lower())
}
equities_dataframe = pd.DataFrame(
equities_query.items(),
index=equities_query.values(),
columns=["Company", "Ticker"],
)
if equities_dataframe.empty:
raise ValueError("No companies found. \n")
print_rich_table(
equities_dataframe.iloc[:amount],
show_index=False,
headers=["Company", "Ticker"],
title="Search Results",
)
console.print("")
def load(
ticker: str,
start: datetime = (datetime.now() - timedelta(days=1100)),
interval: int = 1440,
end: datetime = datetime.now(),
prepost: bool = False,
source: str = "yf",
iexrange: str = "ytd",
):
"""
Load a symbol to perform analysis using the string above as a template. Optional arguments and their
descriptions are listed above. The default source is, yFinance (https://pypi.org/project/yfinance/).
Alternatively, one may select either AlphaVantage (https://www.alphavantage.co/documentation/)
or IEX Cloud (https://iexcloud.io/docs/api/) as the data source for the analysis.
Please note that certain analytical features are exclusive to the source.
To load a symbol from an exchange outside of the NYSE/NASDAQ default, use yFinance as the source and
add the corresponding exchange to the end of the symbol. i.e. ‘BNS.TO’.
BNS is a dual-listed stock, there are separate options chains and order books for each listing.
Opportunities for arbitrage may arise from momentary pricing discrepancies between listings
with a dynamic exchange rate as a second order opportunity in ForEx spreads.
Find the full list of supported exchanges here:
https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html
Certain analytical features, such as VWAP, require the ticker to be loaded as intraday
using the ‘-i x’ argument. When encountering this error, simply reload the symbol using
the interval argument. i.e. ‘load -t BNS -s YYYY-MM-DD -i 1 -p’ loads one-minute intervals,
including Pre/After Market data, using the default source, yFinance.
Certain features, such as the Prediction menu, require the symbol to be loaded as daily and not intraday.
Parameters
----------
ticker: str
Ticker to get data
start: datetime
Start date to get data from with
interval: int
Interval (in minutes) to get data 1, 5, 15, 30, 60 or 1440
end: datetime
End date to get data from with
prepost: bool
Pre and After hours data
source: str
Source of data extracted
iexrange: str
Timeframe to get IEX data.
Returns
-------
df_stock_candidate: pd.DataFrame
Dataframe of data
"""
# Daily
if interval == 1440:
# Alpha Vantage Source
if source == "av":
ts = TimeSeries(key=cfg.API_KEY_ALPHAVANTAGE, output_format="pandas")
# pylint: disable=unbalanced-tuple-unpacking
df_stock_candidate, _ = ts.get_daily_adjusted(
symbol=ticker, outputsize="full"
)
df_stock_candidate.columns = [
val.split(". ")[1].capitalize() for val in df_stock_candidate.columns
]
df_stock_candidate = df_stock_candidate.rename(
columns={
"Adjusted close": "Adj Close",
}
)
# Check that loading a stock was not successful
# pylint: disable=no-member
if df_stock_candidate.empty:
console.print("")
return pd.DataFrame()
df_stock_candidate.index = df_stock_candidate.index.tz_localize(None)
# pylint: disable=no-member
df_stock_candidate.sort_index(ascending=True, inplace=True)
# Slice dataframe from the starting date YYYY-MM-DD selected
df_stock_candidate = df_stock_candidate[
(df_stock_candidate.index >= start.strftime("%Y-%m-%d"))
& (df_stock_candidate.index <= end.strftime("%Y-%m-%d"))
]
# Yahoo Finance Source
elif source == "yf":
df_stock_candidate = yf.download(
ticker,
start=start,
end=end,
progress=False,
)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
console.print("")
return pd.DataFrame()
df_stock_candidate.index.name = "date"
# IEX Cloud Source
elif source == "iex":
client = pyEX.Client(api_token=cfg.API_IEX_TOKEN, version="v1")
df_stock_candidate = client.chartDF(ticker, timeframe=iexrange)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
console.print("")
return pd.DataFrame()
df_stock_candidate = df_stock_candidate[
["close", "fHigh", "fLow", "fOpen", "fClose", "volume"]
]
df_stock_candidate = df_stock_candidate.rename(
columns={
"close": "Close",
"fHigh": "High",
"fLow": "Low",
"fOpen": "Open",
"fClose": "Adj Close",
"volume": "Volume",
}
)
df_stock_candidate.sort_index(ascending=True, inplace=True)
s_start = df_stock_candidate.index[0]
s_interval = f"{interval}min"
else:
s_int = str(interval) + "m"
s_interval = s_int + "in"
d_granularity = {"1m": 6, "5m": 59, "15m": 59, "30m": 59, "60m": 729}
s_start_dt = datetime.utcnow() - timedelta(days=d_granularity[s_int])
s_date_start = s_start_dt.strftime("%Y-%m-%d")
df_stock_candidate = yf.download(
ticker,
start=s_date_start if s_start_dt > start else start.strftime("%Y-%m-%d"),
progress=False,
interval=s_int,
prepost=prepost,
)
# Check that loading a stock was not successful
if df_stock_candidate.empty:
console.print("")
return pd.DataFrame()
df_stock_candidate.index = df_stock_candidate.index.tz_localize(None)
if s_start_dt > start:
s_start = pytz.utc.localize(s_start_dt)
else:
s_start = start
df_stock_candidate.index.name = "date"
s_intraday = (f"Intraday {s_interval}", "Daily")[interval == 1440]
console.print(
f"\nLoading {s_intraday} {ticker.upper()} stock "
f"with starting period {s_start.strftime('%Y-%m-%d')} for analysis.",
)
return df_stock_candidate
def display_candle(
s_ticker: str,
df_stock: pd.DataFrame,
use_matplotlib: bool,
intraday: bool = False,
add_trend: bool = False,
ma: Optional[Tuple[int, ...]] = None,
asset_type: str = "Stock",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Shows candle plot of loaded ticker. [Source: Yahoo Finance, IEX Cloud or Alpha Vantage]
Parameters
----------
df_stock: pd.DataFrame
Stock dataframe
s_ticker: str
Ticker name
use_matplotlib: bool
Flag to use matplotlib instead of interactive plotly chart
intraday: bool
Flag for intraday data for plotly range breaks
add_trend: bool
Flag to add high and low trends to chart
mov_avg: Tuple[int]
Moving averages to add to the candle
asset_type_: str
String to include in title
external_axes : Optional[List[plt.Axes]], optional
External axes (2 axes are expected in the list), by default None
asset_type_: str
String to include in title
"""
if add_trend:
if (df_stock.index[1] - df_stock.index[0]).total_seconds() >= 86400:
df_stock = find_trendline(df_stock, "OC_High", "high")
df_stock = find_trendline(df_stock, "OC_Low", "low")
if use_matplotlib:
ap0 = []
if add_trend:
if "OC_High_trend" in df_stock.columns:
ap0.append(
mpf.make_addplot(df_stock["OC_High_trend"], color=theme.up_color),
)
if "OC_Low_trend" in df_stock.columns:
ap0.append(
mpf.make_addplot(df_stock["OC_Low_trend"], color=theme.down_color),
)
candle_chart_kwargs = {
"type": "candle",
"style": theme.mpf_style,
"volume": True,
"addplot": ap0,
"xrotation": theme.xticks_rotation,
"scale_padding": {"left": 0.3, "right": 1, "top": 0.8, "bottom": 0.8},
"update_width_config": {
"candle_linewidth": 0.6,
"candle_width": 0.8,
"volume_linewidth": 0.8,
"volume_width": 0.8,
},
"warn_too_much_data": 10000,
}
kwargs = {"mav": ma} if ma else {}
if external_axes is None:
candle_chart_kwargs["returnfig"] = True
candle_chart_kwargs["figratio"] = (10, 7)
candle_chart_kwargs["figscale"] = 1.10
candle_chart_kwargs["figsize"] = plot_autoscale()
fig, _ = mpf.plot(df_stock, **candle_chart_kwargs, **kwargs)
fig.suptitle(
f"{asset_type} {s_ticker}",
x=0.055,
y=0.965,
horizontalalignment="left",
)
theme.visualize_output(force_tight_layout=False)
else:
if len(external_axes) != 1:
console.print("[red]Expected list of 1 axis items./n[/red]")
return
(ax1,) = external_axes
candle_chart_kwargs["ax"] = ax1
mpf.plot(df_stock, **candle_chart_kwargs)
else:
fig = make_subplots(
rows=2,
cols=1,
shared_xaxes=True,
vertical_spacing=0.06,
subplot_titles=(f"{s_ticker}", "Volume"),
row_width=[0.2, 0.7],
)
fig.add_trace(
go.Candlestick(
x=df_stock.index,
open=df_stock.Open,
high=df_stock.High,
low=df_stock.Low,
close=df_stock.Close,
name="OHLC",
),
row=1,
col=1,
)
if ma:
plotly_colors = [
"black",
"teal",
"blue",
"purple",
"orange",
"gray",
"deepskyblue",
]
for idx, ma_val in enumerate(ma):
temp = df_stock["Adj Close"].copy()
temp[f"ma{ma_val}"] = df_stock["Adj Close"].rolling(ma_val).mean()
temp = temp.dropna()
fig.add_trace(
go.Scatter(
x=temp.index,
y=temp[f"ma{ma_val}"],
name=f"MA{ma_val}",
mode="lines",
line=go.scatter.Line(
color=plotly_colors[np.mod(idx, len(plotly_colors))]
),
),
row=1,
col=1,
)
if add_trend:
if "OC_High_trend" in df_stock.columns:
fig.add_trace(
go.Scatter(
x=df_stock.index,
y=df_stock["OC_High_trend"],
name="High Trend",
mode="lines",
line=go.scatter.Line(color="green"),
),
row=1,
col=1,
)
if "OC_Low_trend" in df_stock.columns:
fig.add_trace(
go.Scatter(
x=df_stock.index,
y=df_stock["OC_Low_trend"],
name="Low Trend",
mode="lines",
line=go.scatter.Line(color="red"),
),
row=1,
col=1,
)
colors = [
"red" if row.Open < row["Adj Close"] else "green"
for _, row in df_stock.iterrows()
]
fig.add_trace(
go.Bar(
x=df_stock.index,
y=df_stock.Volume,
name="Volume",
marker_color=colors,
),
row=2,
col=1,
)
fig.update_layout(
yaxis_title="Stock Price ($)",
xaxis=dict(
rangeselector=dict(
buttons=list(
[
dict(
count=1,
label="1m",
step="month",
stepmode="backward",
),
dict(
count=3,
label="3m",
step="month",
stepmode="backward",
),
dict(count=1, label="YTD", step="year", stepmode="todate"),
dict(
count=1,
label="1y",
step="year",
stepmode="backward",
),
dict(step="all"),
]
)
),
rangeslider=dict(visible=False),
type="date",
),
)
fig.update_layout(
updatemenus=[
dict(
buttons=[
dict(
label="linear",
method="relayout",
args=[{"yaxis.type": "linear"}],
),
dict(
label="log", method="relayout", args=[{"yaxis.type": "log"}]
),
]
)
]
)
if intraday:
fig.update_xaxes(
rangebreaks=[
dict(bounds=["sat", "mon"]),
dict(bounds=[20, 9], pattern="hour"),
]
)
fig.show(config=dict({"scrollZoom": True}))
console.print("")
def quote(other_args: List[str], s_ticker: str):
"""Ticker quote
Parameters
----------
other_args : List[str]
Argparse arguments
s_ticker : str
Ticker
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="quote",
description="Current quote for stock ticker",
)
if s_ticker:
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="s_ticker",
default=s_ticker,
help="Stock ticker",
)
else:
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="s_ticker",
required="-h" not in other_args,
help="Stock ticker",
)
# Price only option.
parser.add_argument(
"-p",
"--price",
action="store_true",
dest="price_only",
default=False,
help="Price only",
)
try:
# For the case where a user uses: 'quote BB'
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-t")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
except SystemExit:
console.print("")
return
ticker = yf.Ticker(ns_parser.s_ticker)
# If price only option, return immediate market price for ticker.
if ns_parser.price_only:
console.print(
f"Price of {ns_parser.s_ticker} {ticker.info['regularMarketPrice']} \n"
)
return
try:
quote_df = pd.DataFrame(
[
{
"Symbol": ticker.info["symbol"],
"Name": ticker.info["shortName"],
"Price": ticker.info["regularMarketPrice"],
"Open": ticker.info["regularMarketOpen"],
"High": ticker.info["dayHigh"],
"Low": ticker.info["dayLow"],
"Previous Close": ticker.info["previousClose"],
"Volume": ticker.info["volume"],
"52 Week High": ticker.info["fiftyTwoWeekHigh"],
"52 Week Low": ticker.info["fiftyTwoWeekLow"],
}
]
)
quote_df["Change"] = quote_df["Price"] - quote_df["Previous Close"]
quote_df["Change %"] = quote_df.apply(
lambda x: f'{((x["Change"] / x["Previous Close"]) * 100):.2f}%',
axis="columns",
)
for c in [
"Price",
"Open",
"High",
"Low",
"Previous Close",
"52 Week High",
"52 Week Low",
"Change",
]:
quote_df[c] = quote_df[c].apply(lambda x: f"{x:.2f}")
quote_df["Volume"] = quote_df["Volume"].apply(lambda x: f"{x:,}")
quote_df = quote_df.set_index("Symbol")
quote_data = transpose(quote_df)
print_rich_table(quote_data, title="Ticker Quote", show_index=True)
except KeyError:
console.print(f"Invalid stock ticker: {ns_parser.s_ticker}")
console.print("")
return
def load_ticker(
ticker: str, start_date: Union[str, datetime], end_date: Union[str, datetime] = ""
) -> pd.DataFrame:
"""Loads a ticker data from Yahoo Finance, adds a data index column data_id and Open-Close High/Low columns.
Parameters
----------
ticker : str
The stock ticker.
start_date : Union[str,datetime]
Start date to load stock ticker data formatted YYYY-MM-DD.
end_date : Union[str,datetime]
End date to load stock ticker data formatted YYYY-MM-DD.
Returns
-------
DataFrame
A Panda's data frame with columns Open, High, Low, Close, Adj Close, Volume, date_id, OC-High, OC-Low.
"""
if end_date:
df_data = yf.download(ticker, start=start_date, end=end_date, progress=False)
else:
df_data = yf.download(ticker, start=start_date, progress=False)
df_data.index = pd.to_datetime(df_data.index)
df_data["date_id"] = (df_data.index.date - df_data.index.date.min()).astype(
"timedelta64[D]"
)
df_data["date_id"] = df_data["date_id"].dt.days + 1
df_data["OC_High"] = df_data[["Open", "Close"]].max(axis=1)
df_data["OC_Low"] = df_data[["Open", "Close"]].min(axis=1)
return df_data
def process_candle(df_data: pd.DataFrame) -> pd.DataFrame:
"""Process DataFrame into candle style plot
Parameters
----------
df_data : DataFrame
Stock dataframe.
Returns
-------
DataFrame
A Panda's data frame with columns Open, High, Low, Close, Adj Close, Volume, date_id, OC-High, OC-Low.
"""
df_data["date_id"] = (df_data.index.date - df_data.index.date.min()).astype(
"timedelta64[D]"
)
df_data["date_id"] = df_data["date_id"].dt.days + 1
df_data["OC_High"] = df_data[["Open", "Close"]].max(axis=1)
df_data["OC_Low"] = df_data[["Open", "Close"]].min(axis=1)
df_data["ma20"] = df_data["Close"].rolling(20).mean().fillna(method="bfill")
df_data["ma50"] = df_data["Close"].rolling(50).mean().fillna(method="bfill")
return df_data
def find_trendline(
df_data: pd.DataFrame, y_key: str, high_low: str = "high"
) -> pd.DataFrame:
"""Attempts to find a trend line based on y_key column from a given stock ticker data frame.
Parameters
----------
df_data : DataFrame
The stock ticker data frame with at least date_id, y_key columns.
y_key : str
Column name to base the trend line on.
high_low: str, optional
Either "high" or "low". High is the default.
Returns
-------
DataFrame
If a trend is successfully found,
An updated Panda's data frame with a trend data {y_key}_trend column.
If no trend was found,
An original Panda's data frame
"""
for iteration in [3, 4, 5, 6, 7]:
df_temp = df_data.copy()
while len(df_temp) > iteration:
reg = stats.linregress(
x=df_temp["date_id"],
y=df_temp[y_key],
)
if high_low == "high":
df_temp = df_temp.loc[
df_temp[y_key] > reg[0] * df_temp["date_id"] + reg[1]
]
else:
df_temp = df_temp.loc[
df_temp[y_key] < reg[0] * df_temp["date_id"] + reg[1]
]
if len(df_temp) > 1:
break
if len(df_temp) == 1:
return df_data
reg = stats.linregress(
x=df_temp["date_id"],
y=df_temp[y_key],
)
df_data[f"{y_key}_trend"] = reg[0] * df_data["date_id"] + reg[1]
return df_data
def additional_info_about_ticker(ticker: str) -> str:
"""Additional information about trading the ticker such as exchange, currency, timezone and market status
Parameters
----------
ticker : str
The stock ticker to extract if stock market is open or not
Returns
-------
str
Additional information about trading the ticker
"""
extra_info = ""
if ticker:
# outside US exchange
if "." in ticker:
ticker_info = yf.Ticker(ticker).info
extra_info += "\n[param]Datetime: [/param]"
if (
"exchangeTimezoneName" in ticker_info
and ticker_info["exchangeTimezoneName"]
):
dtime = datetime.now(
pytz.timezone(ticker_info["exchangeTimezoneName"])
).strftime("%Y %b %d %H:%M")
extra_info += dtime
extra_info += "\n[param]Timezone: [/param]"
extra_info += ticker_info["exchangeTimezoneName"]
else:
extra_info += "\n[param]Datetime: [/param]"
extra_info += "\n[param]Timezone: [/param]"
extra_info += "\n[param]Exchange: [/param]"
if "exchange" in ticker_info and ticker_info["exchange"]:
exchange_name = ticker_info["exchange"]
extra_info += exchange_name
extra_info += "\n[param]Currency: [/param]"
if "currency" in ticker_info and ticker_info["currency"]:
extra_info += ticker_info["currency"]
extra_info += "\n[param]Market: [/param]"
if "exchange" in ticker_info and ticker_info["exchange"]:
if exchange_name in mcal.get_calendar_names():
calendar = mcal.get_calendar(exchange_name)
sch = calendar.schedule(
start_date=(datetime.now() - timedelta(days=3)).strftime(
"%Y-%m-%d"
),
end_date=(datetime.now() + timedelta(days=3)).strftime(
"%Y-%m-%d"
),
)
user_tz = get_user_timezone_or_invalid()
if user_tz != "INVALID":
is_market_open = calendar.open_at_time(
sch,
pd.Timestamp(
datetime.now().strftime("%Y-%m-%d %H:%M"), tz=user_tz
),
)
if is_market_open:
extra_info += "OPEN"
else:
extra_info += "CLOSED"
else:
extra_info += "\n[param]Datetime: [/param]"
dtime = datetime.now(pytz.timezone("America/New_York")).strftime(
"%Y %b %d %H:%M"
)
extra_info += dtime
extra_info += "\n[param]Timezone: [/param]America/New_York"
extra_info += "\n[param]Currency: [/param]USD"
extra_info += "\n[param]Market: [/param]"
calendar = mcal.get_calendar("NYSE")
sch = calendar.schedule(
start_date=(datetime.now() - timedelta(days=3)).strftime("%Y-%m-%d"),
end_date=(datetime.now() + timedelta(days=3)).strftime("%Y-%m-%d"),
)
user_tz = get_user_timezone_or_invalid()
if user_tz != "INVALID":
is_market_open = calendar.open_at_time(
sch,
pd.Timestamp(datetime.now().strftime("%Y-%m-%d %H:%M"), tz=user_tz),
)
if is_market_open:
extra_info += "OPEN"
else:
extra_info += "CLOSED"
else:
extra_info += "\n[param]Datetime: [/param]"
extra_info += "\n[param]Timezone: [/param]"
extra_info += "\n[param]Exchange: [/param]"
extra_info += "\n[param]Market: [/param]"
extra_info += "\n[param]Currency: [/param]"
return extra_info + "\n"
|
the-stack_0_21248 | from abc import ABC
import torch
class Loss(ABC):
def compute(self, *args, **kwargs):
pass
class SpERTLoss(Loss):
def __init__(self, rel_criterion, entity_criterion, model, optimizer, scheduler, max_grad_norm):
self._rel_criterion = rel_criterion
self._entity_criterion = entity_criterion
self._model = model
self._optimizer = optimizer
self._scheduler = scheduler
self._max_grad_norm = max_grad_norm
def compute(self, entity_logits, rel_logits, entity_types, rel_types, entity_sample_masks, rel_sample_masks):
# entity loss
entity_logits = entity_logits.view(-1, entity_logits.shape[-1])
entity_types = entity_types.view(-1)
entity_sample_masks = entity_sample_masks.view(-1).float()
entity_loss = self._entity_criterion(entity_logits, entity_types)
entity_loss = (entity_loss * entity_sample_masks).sum() / entity_sample_masks.sum()
# relation loss
rel_sample_masks = rel_sample_masks.view(-1).float()
rel_count = rel_sample_masks.sum()
if rel_count.item() != 0:
rel_logits = rel_logits.view(-1, rel_logits.shape[-1])
rel_types = rel_types.view(-1, rel_types.shape[-1])
rel_loss = self._rel_criterion(rel_logits, rel_types)
rel_loss = rel_loss.sum(-1) / rel_loss.shape[-1]
rel_loss = (rel_loss * rel_sample_masks).sum() / rel_count
# joint loss
train_loss = entity_loss + rel_loss
else:
# corner case: no positive/negative relation samples
train_loss = entity_loss
# print(train_loss,entity_loss)
train_loss.backward()
torch.nn.utils.clip_grad_norm_(self._model.parameters(), self._max_grad_norm)
self._optimizer.step()
self._scheduler.step()
self._model.zero_grad()
return train_loss.item()
|
the-stack_0_21249 | # ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file tests/fullscale/linearelasticity/nofaults-3d/sheartraction_soln.py
#
# @brief Analytical solution to shear displacement/traction problem.
#
# 3-D uniform shear test.
#
# --->
# ----------
# | |
# | | | ^
# v | | |
# | |
# ----------
# <--
#
# Dirichlet boundary conditions
# boundary_xneg: Ux(-6000,y,z) = a*y, Uy(-6000,y,z) = a*x, Uz=0
# boundary_yneg: Ux(x,-6000,z) = a*y, Uy(x,-6000,z) = a*y, Uz=0
# boundary_zneg: Uz=0
# Neumann boundary conditions
# \tau_shear_horiz(x,0,z) = -2*mu*a
# \tau_shear_horiz(+6000,y,z) = +2*mu*a
import numpy
# Physical properties
p_density = 2500.0
p_vs = 3000.0
p_vp = 5291.502622129181
p_mu = p_density * p_vs**2
p_lambda = p_density * p_vp**2 - 2 * p_mu
# Uniform stress field (plane strain)
sxx = 0.0
syy = 0.0
szz = 0.0
sxy = 5.0e+6
syz = 0.0
sxz = 0.0
# Uniform strain field
exx = 1.0 / (2 * p_mu) * (sxx - p_lambda / (3 * p_lambda + 2 * p_mu) * (sxx + syy + szz))
eyy = 1.0 / (2 * p_mu) * (syy - p_lambda / (3 * p_lambda + 2 * p_mu) * (sxx + syy + szz))
ezz = 1.0 / (2 * p_mu) * (szz - p_lambda / (3 * p_lambda + 2 * p_mu) * (sxx + syy + szz))
exy = 1.0 / (2 * p_mu) * (sxy)
eyz = 1.0 / (2 * p_mu) * (syz)
exz = 1.0 / (2 * p_mu) * (sxz)
# ----------------------------------------------------------------------
class AnalyticalSoln(object):
"""Analytical solution to shear problem.
"""
SPACE_DIM = 3
TENSOR_SIZE = 6
def __init__(self):
self.fields = {
"displacement": self.displacement,
"density": self.density,
"shear_modulus": self.shear_modulus,
"bulk_modulus": self.bulk_modulus,
"cauchy_strain": self.strain,
"cauchy_stress": self.stress,
"initial_amplitude": {
"bc_yneg": self.displacement,
"bc_xneg": self.displacement,
"bc_xpos": self.bc_xpos_traction,
"bc_ypos": self.bc_ypos_traction,
"bc_zneg": self.displacement,
}
}
return
def getField(self, name, mesh_entity, pts):
if name == "initial_amplitude":
field = self.fields[name][mesh_entity](pts)
else:
field = self.fields[name](pts)
return field
def displacement(self, locs):
"""Compute displacement field at locations.
"""
(npts, dim) = locs.shape
disp = numpy.zeros((1, npts, self.SPACE_DIM), dtype=numpy.float64)
disp[0,:, 0] = exx * locs[:, 0] + exy * locs[:, 1] + exz * locs[:, 2]
disp[0,:, 1] = exy * locs[:, 0] + eyy * locs[:, 1] + eyz * locs[:, 2]
disp[0,:, 2] = exz * locs[:, 0] + eyz * locs[:, 1] + ezz * locs[:, 2]
return disp
def density(self, locs):
"""Compute density field at locations.
"""
(npts, dim) = locs.shape
density = p_density * numpy.ones((1, npts, 1), dtype=numpy.float64)
return density
def shear_modulus(self, locs):
"""Compute shear modulus field at locations.
"""
(npts, dim) = locs.shape
shear_modulus = p_mu * numpy.ones((1, npts, 1), dtype=numpy.float64)
return shear_modulus
def bulk_modulus(self, locs):
"""Compute bulk modulus field at locations.
"""
(npts, dim) = locs.shape
bulk_modulus = (p_lambda + 2.0 / 3.0 * p_mu) * numpy.ones((1, npts, 1), dtype=numpy.float64)
return bulk_modulus
def strain(self, locs):
"""Compute strain field at locations.
"""
(npts, dim) = locs.shape
strain = numpy.zeros((1, npts, self.TENSOR_SIZE), dtype=numpy.float64)
strain[0,:, 0] = exx
strain[0,:, 1] = eyy
strain[0,:, 2] = ezz
strain[0,:, 3] = exy
strain[0,:, 4] = eyz
strain[0,:, 5] = exz
return strain
def stress(self, locs):
"""Compute stress field at locations.
"""
(npts, dim) = locs.shape
stress = numpy.zeros((1, npts, self.TENSOR_SIZE), dtype=numpy.float64)
stress[0,:, 0] = sxx
stress[0,:, 1] = syy
stress[0,:, 2] = szz
stress[0,:, 3] = sxy
stress[0,:, 4] = syz
stress[0,:, 5] = sxz
return stress
def bc_xpos_traction(self, locs):
"""Compute initial traction at locations.
"""
(npts, dim) = locs.shape
traction = numpy.zeros((1, npts, self.SPACE_DIM), dtype=numpy.float64)
traction[0,:, 0] = sxy
traction[0,:, 1] = 0.0
traction[0,:, 2] = 0.0
return traction
def bc_ypos_traction(self, locs):
"""Compute initial traction at locations.
"""
(npts, dim) = locs.shape
traction = numpy.zeros((1, npts, self.SPACE_DIM), dtype=numpy.float64)
traction[0,:, 0] = -sxy
traction[0,:, 1] = 0.0
traction[0,:, 2] = 0.0
return traction
# End of file
|
the-stack_0_21250 | """
This file offers the methods to automatically retrieve the graph Pelagibacter ubique HTCC7217.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def PelagibacterUbiqueHtcc7217(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Pelagibacter ubique HTCC7217 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Pelagibacter ubique HTCC7217 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="PelagibacterUbiqueHtcc7217",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_21252 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""utils for server"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import struct
from ernie_gen.propeller.service import interface_pb2
def slot_to_numpy(slot):
"""doc"""
if slot.type == interface_pb2.Slot.FP32:
dtype = np.float32
type_str = 'f'
elif slot.type == interface_pb2.Slot.INT32:
type_str = 'i'
dtype = np.int32
elif slot.type == interface_pb2.Slot.INT64:
dtype = np.int64
type_str = 'q'
else:
raise RuntimeError('know type %s' % slot.type)
num = len(slot.data) // struct.calcsize(type_str)
arr = struct.unpack('%d%s' % (num, type_str), slot.data)
shape = slot.dims
ret = np.array(arr, dtype=dtype).reshape(shape)
return ret
def numpy_to_slot(arr):
"""doc"""
if arr.dtype == np.float32:
dtype = interface_pb2.Slot.FP32
elif arr.dtype == np.int32:
dtype = interface_pb2.Slot.INT32
elif arr.dtype == np.int64:
dtype = interface_pb2.Slot.INT64
else:
raise RuntimeError('know type %s' % arr.dtype)
pb = interface_pb2.Slot(
type=dtype, dims=list(arr.shape), data=arr.tobytes())
return pb
def slot_to_paddlearray(slot):
"""doc"""
import paddle.fluid.core as core
if slot.type == interface_pb2.Slot.FP32:
dtype = np.float32
type_str = 'f'
elif slot.type == interface_pb2.Slot.INT32:
dtype = np.int32
type_str = 'i'
elif slot.type == interface_pb2.Slot.INT64:
dtype = np.int64
type_str = 'q'
else:
raise RuntimeError('know type %s' % slot.type)
num = len(slot.data) // struct.calcsize(type_str)
arr = struct.unpack('%d%s' % (num, type_str), slot.data)
ret = core.PaddleTensor(data=np.array(arr, dtype=dtype).reshape(slot.dims))
return ret
def paddlearray_to_slot(arr):
"""doc"""
import paddle.fluid.core as core
if arr.dtype == core.PaddleDType.FLOAT32:
dtype = interface_pb2.Slot.FP32
type_str = 'f'
arr_data = arr.data.float_data()
elif arr.dtype == core.PaddleDType.INT32:
dtype = interface_pb2.Slot.INT32
type_str = 'i'
arr_data = arr.data.int32_data()
elif arr.dtype == core.PaddleDType.INT64:
dtype = interface_pb2.Slot.INT64
type_str = 'q'
arr_data = arr.data.int64_data()
else:
raise RuntimeError('know type %s' % arr.dtype)
data = struct.pack('%d%s' % (len(arr_data), type_str), *arr_data)
pb = interface_pb2.Slot(type=dtype, dims=list(arr.shape), data=data)
return pb
def nparray_list_serialize(arr_list):
"""doc"""
slot_list = [numpy_to_slot(arr) for arr in arr_list]
slots = interface_pb2.Slots(slots=slot_list)
return slots.SerializeToString()
def nparray_list_deserialize(string):
"""doc"""
slots = interface_pb2.Slots()
slots.ParseFromString(string)
return [slot_to_numpy(slot) for slot in slots.slots]
|
the-stack_0_21254 | #!/usr/bin/env python
# coding: utf-8
# 24Oct2018 JBH
# given a uce data file in format shown below and a gff file, show gff lines matching uce pos for each uce
# also shows a summary line per uce including intergenic
# presumes uce data has mapped uces to a single point represented by its start pos:
# uce-4323 NC_006088.4 2744945
import sys, re, time, os.path
from pprint import pprint
class Debug():
debugging = False
@staticmethod
def print_scaf_counts(scaf_map, scaf_list):
if not Debug.debugging:
return
for s in range(len(scaf_list)):
scaf = scaf_list[s]
sys.stderr.write("{} {}\n".format(scaf, len(scaf_map[scaf])))
@staticmethod
def pprint_scaf_uces(scaf_map, scaf):
if not Debug.debugging:
return
for u in scaf_map[scaf]:
pprint(vars(u))
def remove_version_suffix(name): # e.g. NC_006088.4 to NC_006088
return re.sub("\.[0-9]*$","", name)
def to_int(digits_str, errval=0): # do not throw a ValueError exception if input happens to not be a string of just digits
return int(digits_str) if digits_str.isdigit() else errval
def get_subfld(fld, sep=";", which=0): # returns first semi-colon delimited field by default
return fld.split(sep)[which]
def sort_scafmap_by_uce_pos(scaf_map):
for scaf in scaf_map:
scaf_map[scaf].sort(key=lambda uceinfo: uceinfo.pos)
class TypeUtil: # container for the routines that map uce type string types (exon, intron, etc) to other strings for display
type_totals = {}
@classmethod
def inc_totals_from_types(cls, uce_typs): # e.g.: gene(ID=gene126) mRNA intron mRNA intron mRNA intron mRNA intron
shorthand = cls.shorthand_str(uce_typs)
cls.inc_totals(shorthand)
@classmethod
def display_str(cls): # type_totals map has shorthand str for key, "EI", "E", "I", "N", and count for value
tss = cls.type_totals.copy() # so we can delete items as we convert them
cls.display = ""
cls.total = 0
def add_count(shrt, msg): # e.g., tss["E"] counts uces with only exon overlaps
if shrt in tss:
cls.total += tss[shrt]
cls.display += msg.format(tss[shrt])
del tss[shrt]
add_count("E", "{} exon-only ")
add_count("I", "{} intron-only ")
add_count("EI","{} exon-and-intron ")
add_count("N", "{} intergenic ")
# now handle any that we didn't expect to occur
for rare in tss:
nm = rare if rare != "" else "unclassified"
cls.display += "{} '{}' ".format(tss[rare], nm)
cls.total += tss[rare]
cls.display = "{} total: {}".format(cls.total, cls.display.rstrip(" "))
return cls.display
@classmethod
def clear_totals(cls):
type_totals.clear()
# helper methods
@classmethod
def inc_totals(cls, shorthand): # "E", "I", "EI" is shorthand
cls.type_totals[shorthand] = (cls.type_totals[shorthand] + 1) if shorthand in cls.type_totals else 1
@staticmethod
def shorthand_str(uce_typs): # we append a list to the totals_list with 'E', 'I', 'N' as needed
uce_totals_counts = TypeUtil.uce_type_counts(uce_typs) # counts for the current uce
typ_str = "" # will have an E for exon, I for Intron and N for iNtergenic. so one with exon and intron would be "EI"
if uce_totals_counts["exon"] > 0:
typ_str += "E"
if uce_totals_counts["intron"] > 0:
typ_str += "I"
if uce_totals_counts["intergenic"] > 0:
typ_str += "N"
return typ_str
@staticmethod
def uce_type_counts(uce_typs): # given the string with the uce gff line types, return a map of their counts
uce_totals = {} # total for the current uce
uce_totals["exon"]=0; uce_totals["intron"]=0; uce_totals["intergenic"]=0
for w in uce_typs.split(" "):
if w in ["exon", "intron", "intergenic"]:
uce_totals[w] += 1
return uce_totals
# end class TypeUtil
def create_uce_scaf_map(ucefile, remove_version = True):
class Uce_info(object): pass
# create dict for each scaffold (name in fld 1) and store uce info
# (uce name, start pos, empty to start list of matching gff lines) for each uce in scaffold
scaf_map = {} # holds info about each scaf's uces
scaf_list = [] # scaf names in order of occurrence so we can loop in same order later
for ln in ucefile:
ln = ln.rstrip("\n")
uceflds = ln.split("\t")
uce_nm = uceflds[0]
uce_pos = int(uceflds[2])
uce_scaf = uceflds[1]
if remove_version:
uce_scaf = remove_version_suffix(uce_scaf) # get rid of version info in scaffold name
if not uce_scaf in scaf_map:
scaf_map[uce_scaf] = []
scaf_list.append(uce_scaf)
# store info about the use in the relevant scaffold map
uce_info = Uce_info()
uce_info.uce = uce_nm;
uce_info.pos = uce_pos
uce_info.gff_lns = []
scaf_map[uce_scaf].append(uce_info)
# in case the ucefile was not in uce position sorted order, this will take care of that
sort_scafmap_by_uce_pos(scaf_map)
return scaf_map, scaf_list
def map_uces_to_gff_lines(uce_file, gff_file, begpos_ix = 3, endpos_ix = 4, exclude_list = ["region"], remove_version = True):
ucefile = open(uce_file, "r")
gff = open(gff_file, "r")
uces_by_scaf_map, scaf_list = create_uce_scaf_map(ucefile, remove_version)
Debug.print_scaf_counts(uces_by_scaf_map, scaf_list)
for gff_ln in gff:
# validate line
if len(gff_ln) < 3 or gff_ln[0] == "#": # ignore empty or comment lines
continue
cur_flds = gff_ln.split("\t")
if len(cur_flds) <= endpos_ix: # line too short
continue
ln_type = cur_flds[2]
if ln_type in exclude_list:
continue
begpos = to_int(cur_flds[begpos_ix])
endpos = to_int(cur_flds[endpos_ix])
if begpos < 1 or endpos < 1:
continue
if begpos > endpos: # swap them so we always have smaller position followed by larger
begpos, endpos = endpos, begpos
# line looks good, see if its extent has any of its scaffold's uce start positions in it
scaf = cur_flds[0]
if remove_version:
scaf = remove_version_suffix(scaf)
if not scaf in uces_by_scaf_map:
continue
# there are uces on this scaffold: see if this gff line is in one of the uce's scopes.
# given certain assumptions about the order of the gff lines this could be done more
# efficiently, however since isoforms make the order a little more complicated we will
# do a search through all the specific scaffold's uce space for each line.
for u in uces_by_scaf_map[scaf]:
if begpos <= u.pos <= endpos: # this uce is the scope of this line
u.gff_lns.append(gff_ln.rstrip("\n"))
return uces_by_scaf_map, scaf_list
def display_uce_gff_info(uces_by_scaf_map, scaf_list, show_summary = True, show_lines = False):
show_summary = show_summary or not show_lines # if both are False we still want to show the summaries
type_totals = {} # totals for exon_only, intron_only, exin_and_intron, or intergenic UCEs in the gff
last_scaf = ""; last_pos = 0 # so we can output distance from last UCE as the 5th field
num_uces = 0; num_gfflns = 0
for scaf in scaf_list:
for u in uces_by_scaf_map[scaf]:
num_uces += 1; num_gfflns += len(u.gff_lns)
distance = "" if scaf != last_scaf else u.pos-last_pos
last_scaf = scaf; last_pos = u.pos
uce_info = "{}\t{}\t{}\t".format(u.uce, scaf, u.pos)
if len(u.gff_lns) > 0:
if show_summary: # uce name, scaf, begpos, all the gff line types associated with this uce
typ = ""
for l in u.gff_lns:
flds = l.split("\t")
typ += flds[2]
if flds[2] == "gene":
gid = get_subfld(flds[8])
if gid != "":
typ += "(" + gid + ")"
typ += " "
sys.stdout.write("{}{}\t{}\t{}\n".format(uce_info, TypeUtil.shorthand_str(typ), distance, typ))
if show_lines: # show uce name then gff line
for ln in u.gff_lns:
sys.stdout.write("{}\t{}\n".format(u.uce, ln))
else:
typ = "intergenic"
sys.stdout.write("{}N\t{}\t{}\n".format(uce_info, distance, typ))
TypeUtil.inc_totals_from_types(typ)
return num_uces, num_gfflns
def map_and_display(uce_filename, gff_filename, exclude_list, show_summary, show_lines):
start = time.time()
# gather up a map per scaffold of the uces in that scaffold and the gff lines overlapping each such uce
uces_by_scaf_map, scaf_list = map_uces_to_gff_lines(uce_filename, gff_filename, exclude_list = exclude_list)
# display the info based on defaults or user preferences that override them
num_uces, num_gfflns = display_uce_gff_info(uces_by_scaf_map, scaf_list, show_summary, show_lines)
# show what was done and how long it took
duration = (time.time() - start) / 1000 * 1000
m, s = divmod(duration, 60); h, mrem = divmod(m, 60)
sys.stderr.write("\r{} uces {} gff lines ({}m{:.3f}s)\n".format(num_uces, num_gfflns, int(m), s))
sys.stderr.write("{}\n".format(TypeUtil.display_str()))
def usage(exit_code = 1):
msg = """
usage: uce_gff_lines.py <uce_name_pos_file> <gff_file> [-lines [-nosummary]] [<gff_line_type_to_exclude> ...]
Input is a file with UCE info lines and a gff file, preferably with introns added
(for this use you can use add_intron_to_gff.py or other programs).
Each UCE info line should have 3 tabbed fields e.g: uce-4323 NC_006088.4 2744945
where the 3rd field is the start position of the uce (end position is optional).
Default output shows a summary for each UCE info line showing it, the number of gff lines
and the type of each gff line overlapping the uce start position.
If you use the -lines option, it also outputs each gff line that refers to the UCE
outputting the UCE name prefixed as the first tabbed field of the line. When using
the -lines option you can suppress summaries with -nosummary.
Non-hyphen command line terms are considered types of gff lines to exclude from consideration.
This might be CDS or mRNA. Comparisons are case sensitive. Term "region" excluded by default.
The intergenic UCEs are shown in both cases. You can screen out any summary lines, including
intergenic, and just retain the gff lines by piping output to: awk '! ($3~/^[0-9]+$/)' or you
can remove gff_lines retaining only the summary by piping to: awk '($3~/^[0-9]+$/)'
"""
sys.stderr.write(msg)
sys.exit(exit_code)
def getoptions(argv, min_args, exit_code = 1):
if len(argv) < min_args:
usage(exit_code)
def checkfile(fname):
if not os.path.isfile(fname):
sys.stderr.write("File not found: {}\n".format(fname))
sys.exit(exit_code)
class options: pass
options.uce_filename = argv[1]; checkfile(options.uce_filename)
options.gff_filename = argv[2]; checkfile(options.gff_filename)
options.show_summary = True
options.show_lines = False
options.excludes = ["region", "Region", "REGION"]
# handle the options after the 2 file names. file names must be in those positions.
for op in range(3,len(argv)):
arg = argv[op]
if arg[:3] == "-li":
options.show_lines = True
elif arg[:5] == "-nosu":
options.show_summary = False
elif arg[0] != '-':
options.excludes.append(arg)
elif arg == "-debug":
Debug.debugging = True
else:
sys.stderr.write("invalid option: {}\n".format(arg))
return options
def main(argv): # pass in argv so we can load this file and call this as uce_gff_lines.main(sys.argv) from another python file
ops = getoptions(argv, 3) # need at least 3 args: prog_name, uce_filename, gff_filename
map_and_display(ops.uce_filename, ops.gff_filename, ops.excludes, ops.show_summary, ops.show_lines)
if __name__ == '__main__':
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE,SIG_DFL) # don't complain if pipe closes output (head or less commands will cause this)
try:
main(sys.argv)
except (KeyboardInterrupt, SystemExit): # don't do stack trace for Ctrl+C
sys.exit(0)
# end of program
|
the-stack_0_21255 | """
TODO: save/load the stream?
TODO: make keyframe timestamps non-inclusive
random notes:
when running the program, a selection box appears?
load
record
logic of keyframes:
* next keyframe can't have the same timestamp as prev keyframe.
* last packet in prev keyframe can have the same timestamp as first packet in the new keyframe.
You'll also have to use two world instances - one for playback/rewind/forward, and the other for keyframe
generation. The other world instance is never visible and can skip generating animation objects and so on.
# NB! this system DROPS packets that arrive later than the sync_window_seconds.
NB! this system OVERWRITES packet timestamps for packets that arrive later than the sync_window_seconds.
"""
import logging
llog = logging.getLogger(__name__) # the name 'log' is taken in sdl2
import time
class KeyframeSlot:
def __init__(self, timestamp, keyframe, packets=None):
self.timestamp = timestamp
self.keyframe = keyframe
# [(timestamp, packet), ..]
self.packets = [] if packets == None else packets
class SyncBuffer:
""" timesynchronize objects from in-order streams. add timestamp/stream_id/object triples, get timesorted objects back. """
def __init__(self, sync_window_seconds=5.):
""" sync_window_seconds - will only return entries that are older than this.
if None, then return entries as soon as they arrive; no sorting. """
self.sync_window_seconds = sync_window_seconds
self.streams = {} # stream_id: packets_list
self.sorted_packets = [] # [(timestamp, packet), ..]
self.last_sorted_time = None
def tick(self):
""" Run the sorting algorithm on the received packets given to put_packet() """
# get all older than sync_window_seconds packets and append them in order to the last keyframeslot packets-list.
if self.streams:
t = time.time()
streams = self.streams.values()
while 1:
popstream = None
poptime = None
# find the oldest packet of any stream. stream is just a packet list.
for stream in streams:
if stream and (popstream == None or stream[0][0] < poptime):
popstream = stream
poptime = stream[0][0]
# found the stream with the youngest packet.
# remove the packet from sync-stream and append it to "keyframe" if the packet is sufficiently old.
if popstream and (self.sync_window_seconds == None or t - poptime >= self.sync_window_seconds):
self.sorted_packets.append( popstream.pop(0) )
self.last_sorted_time = poptime
else:
break
def get_sorted_packets(self):
""" Return [(timestamp, packet), ..], clear local buf. """
l = self.sorted_packets
self.sorted_packets = []
return l
def put_packet(self, timestamp, packet, stream_id):
""" Add a packet. Will decide if the packet is too old and disdcard it, or how to order it if not.
This is not a general solution to the syncing problem - assumes that packets with the same stream_id are ordered. """
stream = self.streams.get(stream_id, None)
if not stream:
stream = self.streams[stream_id] = []
if stream:
if stream[-1][0] > timestamp:
llog.warning("overwriting timestamp (%.2f s) for packet: %s", stream[-1][0] - timestamp, packet)
timestamp = stream[-1][0]
#assert stream[-1][0] <= timestamp, "\nnew packet %s: %s\nold packet %s: %s\n" % (timestamp, packet, stream[-1][0], stream[-1][1])
# this is a bit complicated...
# replace timestamp with last sorted packet timestamp if the given timestamp is smaller.
# this assures that all keyframes contain data with timestamps inside the keyframe period
# and also assures that output of this SyncBuffer is always time-ordered.
# the other possibility would be to just drop the packet. don't know which is better.
if self.last_sorted_time != None and timestamp < self.last_sorted_time:
timestamp = self.last_sorted_time
stream.append( (timestamp, packet) )
class WorldStreamer:
MAX_PACKETS_PER_KEYFRAME_HINT = 500
def __init__(self, sync_window_seconds=5.):
""" sync_window_seconds - will only return entries that are older than this.
if None, then return entries as soon as they arrive; no sorting. """
self.sync_window_seconds = sync_window_seconds
self.keyframeslots = [] # KeyframeSlot objects
self.streams = {} # stream_id: packets_list
self.num_packets_sorted = 0 # statistics
self.syncbuffer = SyncBuffer(sync_window_seconds)
# timepoints of sorted data. timestamps are read from the packets.
self.start_time = None
self.end_time = None # timestamp of the last sorted packet.
self.current_time = None # will never go beyond the limits of start_time and end_time
self.wanted_time = None # used by get_delta_packets(). like current_time, but can go beyond end_time and won't stop when there's no new packets for a while.
def tick(self):
""" Also returns a list of fresly sorted packets to be used on world creation """
self.syncbuffer.tick()
sorted_packets = self.syncbuffer.get_sorted_packets()
if sorted_packets:
if not self.keyframeslots:
self.put_keyframe({}, sorted_packets[0][0])
self.end_time = sorted_packets[-1][0]
self.num_packets_sorted += len(sorted_packets)
#for packet in sorted_packets:
# self.keyframeslots[-1].packets.append( packet )
self.keyframeslots[-1].packets.extend( sorted_packets )
return sorted_packets
def need_keyframe(self):
""" Add a new keyframe if this returns True. """
# makes sure that EVERY sorted packet has been handled. otherwise the world state gets out of sync.
if self.keyframeslots and len(self.keyframeslots[-1].packets) >= self.MAX_PACKETS_PER_KEYFRAME_HINT:
if self.keyframeslots[-1].packets[-1][0] > self.keyframeslots[-1].packets[0][0]:
return True
return False
def put_keyframe(self, keyframe, timestamp=None):
""" Sets the packet stream starting point and maybe also starts the recording process. Call periodically. """
assert keyframe != None
if self.start_time == None:
assert timestamp != None
self.start_time = timestamp
self.end_time = timestamp
# a little hack to not drop the first packet before using seek. Problem is that only the first time get_delta_packets
# is called after the first packet/s, the first parameter to get_packets has to be inclusive. Too much work to do it so,
# so we'll use this offset hack.
self.current_time = timestamp - 0.00001
self.wanted_time = timestamp
if timestamp == None:
timestamp = self.end_time
if self.keyframeslots: # ensure timestamp is newer than previous
assert self.keyframeslots[-1].timestamp < timestamp
self.keyframeslots.append( KeyframeSlot(timestamp, keyframe) )
def put_packet(self, timestamp, packet, stream_id):
self.syncbuffer.put_packet(timestamp, packet, stream_id)
def seek(self, timestamp):
""" Set playback time (self.current_time) to timestamp. Clip time between available data (self.start_time and self.end_time).
Return (keyframe, packet_list) that represent complete state of the system at the given time. Return (None, None) if no keyframe exists yet. """
if self.start_time == None:
return None, None
else:
timestamp = min(timestamp, self.end_time)
timestamp = max(timestamp, self.start_time)
self.current_time = timestamp
self.wanted_time = timestamp
keyframe, packet_list = self.get_seek_state(self.current_time)
assert keyframe != None
assert packet_list != None
return keyframe, packet_list
def get_delta_packets(self, dt):
""" Move self.current_time forward by dt (if possible) and return packet_list [(timestamp, packet), ..] for that dt.
self.current_time will be clipped by self.end_time. return empty list if no data yet.
"""
if self.start_time == None or dt == 0.:
return []
else:
assert self.current_time != None
self.wanted_time += dt
packet_list = self.get_packets(self.current_time, self.wanted_time)
#llog.info("get from delta current_time %.3f wanted_time %.3f len %i", self.current_time, self.wanted_time, len(packet_list))
self.current_time = min(self.wanted_time, self.end_time)
return packet_list
def get_current_time(self):
""" Everything prior to this is set in stone in this SyncBuf. Changes only with self.seek and
self.get_delta_packets and is always at least self.sync_window_seconds in the past.
Returns None if no keyframe received yet with put_keyframe().
Use this, or self.wanted_time as the current simulation time. self.wanted_time is smooth, but
new packets can appear before it, and never before self.current_time. """
return self.current_time
#
# lower-level functions
#
def get_prev_keyframe(self, timestamp):
""" return the first (timestamp, keyframe) pair that came before the timestamp or at the exact timestamp.
return (None, None) if timestamp is earlier than the first keyframe. """
kfs, i = self._get_prev_keyframeslot(timestamp)
if kfs:
return kfs.timestamp, kfs.keyframe
else:
return None, None
def _get_prev_keyframeslot(self, timestamp):
""" Return (keyframeslot, index) - the first keyframeslot and its index in
self.keyframeslots that came before the timestamp or at the exact timestamp.
Return (None, None) if timestamp is earlier than the first keyframe or no keyframe exists yet. """
for i, kf in enumerate(reversed(self.keyframeslots)):
if kf.timestamp <= timestamp:
return kf, len(self.keyframeslots) - i - 1
# wanted timestamp is earlier than the first keyframe
return None, None
def get_packets(self, start_time, end_time, end_is_inclusive=True):
""" return list of packets [(timestamp, packet), ..] between these timestamp.
if end_is_inclusive is False, then start_time is inclusive. """
assert end_time >= start_time
# first, get keyframes that contain the start/end times.
keyframeslot1, i1 = self._get_prev_keyframeslot(start_time)
keyframeslot2, i2 = self._get_prev_keyframeslot(end_time)
if keyframeslot1 == keyframeslot2:
keyframeslot2 = None
# for cache..
#if self._prev_get_packets_end_time == start_time:
# pass
#self._prev_get_packets_end_time = end_time
result1 = result2 = []
if end_is_inclusive:
if keyframeslot1:
# p[0] is timestamp, p[1] is packet. keyframeslot1 is a KeyframeSlot object.
result1 = [p for p in keyframeslot1.packets if start_time < p[0] <= end_time]
if keyframeslot2:
result2 = [p for p in keyframeslot2.packets if start_time < p[0] <= end_time]
else:
if keyframeslot1:
result1 = [p for p in keyframeslot1.packets if start_time <= p[0] < end_time]
if keyframeslot2:
result2 = [p for p in keyframeslot2.packets if start_time <= p[0] < end_time]
result = result1
# get full packets lists of keyframeslots that are between the edge slots.
if i1 != None and i2 != None and i2 - i1 > 1:
for kfs in self.keyframeslots[i1+1:i2]:
result.extend(kfs.packets)
result.extend(result2)
return result
def get_seek_state(self, timestamp):
""" Return (keyframe, packet_list)
Get keyframe and packets starting from that keyframe. The pair represents complete state of the system at the given time.
Return (None, None) if timestamp is earlier than the first keyframe or no keyframe exists yet.
timestamp is inclusive. """
keyframeslot, i = self._get_prev_keyframeslot(timestamp)
if keyframeslot == None:
return None, None
else:
return keyframeslot.keyframe, [p for p in keyframeslot.packets if p[0] <= timestamp]
def load_file(self, filename):
""" load the whole file to ram """
pass
|
the-stack_0_21257 | """
first_order_lagrangian_algorithm.py
Returns the minimizer of the function
X0 - initial guess for X;
Lambda0 - initial guess for Lambda;
func - anonimous function (objective function)
Dy - anonimous function gradient (objective function)
Dh - anonimous function gradient (equality constraints function)
Dg - anonimous function gradient (nonequality constraints function)
h - anonimous function (equality constraints function)
g - anonimous function (nonequality constraints function)
"""
import numpy as np
def first_order_lagrangian_algorithm(X0, Lambda0, Mu0, func, Dy, Dh, Dg, h, g, options):
epsilon = 10e-3;
alpha = 0.01; # step size for X
beta = 0.01; # step size for Lambda
gamma = 0.01 # step size for Mu
report = {};
N_iter_max = options['N_iter_max'];
tolerance_x = options['tolerance_x'];
tolerance_y = options['tolerance_y'];
X_lower = options['x_lower'];
X_upper = options['x_upper'];
progress_x = np.zeros((X0.size, N_iter_max + 1));
progress_y = np.zeros((1, N_iter_max + 1));
progress_x[:, [0]] = X0;
progress_y[0, [0]] = func(X0);
m = Lambda0.shape[0];
n = X0.shape[0];
X_old = X0;
Lambda_old = Lambda0;
Mu_old = Mu0;
W_old = np.concatenate((X0, Lambda0, Mu0), axis=0);
for iter_no in range(1, N_iter_max + 1):
dX = -(Dy(X_old) + Dh(X_old) @ Lambda_old + Dg(X_old) @ Mu_old); # direction for X
dLambda = h(X_old); # direction for Lambda
dMu = g(X_old); # direction for Mu
X = X_old + alpha * dX;
Lambda = Lambda_old + beta * dLambda;
Mu = Mu_old + gamma * dMu;
indx_limits = (Mu < 0);
Mu[indx_limits] = 0;
# # # Projection onto the box constraints of X (causes numerical instability):
# indx_limits = (X < X_lower);
# X[indx_limits] = X_lower[indx_limits];
# indx_limits = (X > X_upper);
# X[indx_limits] = X_upper[indx_limits];
W = np.concatenate((X, Lambda, Mu), axis=0);
progress_x[:, [iter_no]] = X;
progress_y[0, [iter_no]] = func(X);
if (np.linalg.norm(h(X)) < epsilon and np.linalg.norm(g(X)) < epsilon):
print('Tolerance in h(X) and g(X) is reached in %d iterations, exit..' % (iter_no));
break;
# if (np.linalg.norm(W - W_old) < tolerance_x * np.linalg.norm(W_old)):
# print('Tolerance in X is reached in %d iterations, exit..' % (iter_no));
# break;
# if (np.abs(progress_y[0, [iter_no]] - progress_y[0, [iter_no - 1]]) < tolerance_y * np.abs(progress_y[0, [iter_no - 1]])):
# print('Tolerance in Y is reached in %d iterations, exit..' % (iter_no));
# break;
X_old = X;
Lambda_old = Lambda;
Mu_old = Mu;
W_old = W;
report = {'N_iter_max' : N_iter_max, 'iter_no' : iter_no, 'X0' : X0, 'X' : X, 'progress_x' : progress_x, 'progress_y' : progress_y};
return (X, report);
|
the-stack_0_21258 | from gcn.inits import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1./keep_prob)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
Implementation inspired by keras (http://keras.io).
# Properties
name: String, defines the variable scope of the layer.
logging: Boolean, switches Tensorflow histogram logging on/off
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
_log_vars(): Log all variables
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
class Dense(Layer):
"""Dense layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False,
act=tf.nn.relu, bias=False, featureless=False, **kwargs):
super(Dense, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = glorot([input_dim, output_dim],
name='weights')
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# transform
output = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
class GraphConvolution(Layer):
"""Graph convolution layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0.,
sparse_inputs=False, act=tf.nn.relu, bias=False,
featureless=False, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.support = placeholders['support']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
self.output_dim = output_dim
self.input_dim = input_dim
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
for i in range(len(self.support)):
self.vars['weights_' + str(i)] = glorot([input_dim, output_dim],
name='weights_' + str(i))
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
self.inputs = inputs
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# convolve
supports = list()
assert len(self.support) == 1
for i in range(len(self.support)):
if not self.featureless:
pre_sup = dot(x, self.vars['weights_' + str(i)],
sparse=self.sparse_inputs)
else:
pre_sup = self.vars['weights_' + str(i)]
support = dot(self.support[i], pre_sup, sparse=True)
supports.append(support)
assert len(supports) == 1
output = tf.add_n(supports)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
def relprop(self, relinp, support2):
z = {}
z_1 = {}
z_2 = {}
if "array" not in str(type(self.input_)):
vals = {}
for a, b in zip(self.input_[0], self.input_[1]):
if a[0] not in vals: vals[(a[0])]=[]
vals[(a[0])].append((a[1], b))
for i, k in support2.keys():
aik = support2[(i, k)]
for j in range(self.output_dim):
if "array" not in str(type(self.input_)):
if i in vals:
for l, val in vals[i]:
temp2 = val
z[(i, j, k, l)] = aik * temp2 * self.weight_[l, j]
if (i, j) not in z_1:
z_1[(i, j)] = []
z_1[(i, j)].append(z[(i, j, k, l)])
if (k, l) not in z_2:
z_2[(k ,l)] = []
z_2[(k, l)].append((z[(i, j, k, l)], (i, j)))
else:
for l in range(self.input_dim):
temp2 = self.input_[k, l]
z[(i, j, k, l)] = aik * temp2 * self.weight_[l, j]
if (i, j) not in z_1:
z_1[(i, j)] = []
z_1[(i, j)].append(z[(i, j, k, l)])
if (k, l) not in z_2:
z_2[(k ,l)] = []
z_2[(k, l)].append((z[(i, j, k, l)], (i, j)))
for i in range(support2.shape[0]):
for j in range(self.output_dim):
temp = 0.0
if (i,j) not in z_1:
continue
for k in z_1[(i, j)]:
temp += k
z_1[(i, j)] = float(relinp[i, j]) / temp
relout = np.zeros((support2.shape[0], self.input_dim))
for k, l in z_2:
ans = 0.0
for val, k in z_2[(k, l)]:
ans += float(val) * z_1[k]
relout[k, l] = ans
return relout
|
the-stack_0_21259 | """ Utilities for working with Images and common neuroimaging spaces
Images are very general things, and don't know anything about the kinds of
spaces they refer to, via their coordinate map.
There are a set of common neuroimaging spaces. When we create neuroimaging
Images, we want to place them in neuroimaging spaces, and return information
about common neuroimaging spaces.
We do this by putting information about neuroimaging spaces in functions and
variables in the ``nipy.core.reference.spaces`` module, and in this module.
This keeps the specific neuroimaging spaces out of our Image object.
>>> from nipy.core.api import Image, vox2mni, rollimg, xyz_affine, as_xyz_image
Make a standard 4D xyzt image in MNI space.
First the data and affine:
>>> data = np.arange(24).reshape((1,2,3,4))
>>> affine = np.diag([2,3,4,1]).astype(float)
We can add the TR (==2.0) to make the full 5x5 affine we need
>>> img = Image(data, vox2mni(affine, 2.0))
>>> img.affine
array([[ 2., 0., 0., 0., 0.],
[ 0., 3., 0., 0., 0.],
[ 0., 0., 4., 0., 0.],
[ 0., 0., 0., 2., 0.],
[ 0., 0., 0., 0., 1.]])
In this case the neuroimaging 'xyz_affine' is just the 4x4 from the 5x5 in the image
>>> xyz_affine(img)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
However, if we roll time first in the image array, we can't any longer get an
xyz_affine that makes sense in relationship to the voxel data:
>>> img_t0 = rollimg(img, 't')
>>> xyz_affine(img_t0) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AxesError: First 3 input axes must correspond to X, Y, Z
But we can fix this:
>>> img_t0_affable = as_xyz_image(img_t0)
>>> xyz_affine(img_t0_affable)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
It also works with nibabel images, which can only have xyz_affines:
>>> import nibabel as nib
>>> nimg = nib.Nifti1Image(data, affine)
>>> xyz_affine(nimg)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
"""
import sys
import numpy as np
from ...fixes.nibabel import io_orientation
from ..image.image import Image
from ..reference import spaces as rsp
from ..reference.coordinate_map import AffineTransform
def xyz_affine(img, name2xyz=None):
""" Return xyz affine from image `img` if possible, or raise error
Parameters
----------
img : ``Image`` instance or nibabel image
It has a ``coordmap`` or method ``get_affine``
name2xyz : None or mapping
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Not used for nibabel `img` input.
Returns
-------
xyz_aff : (4,4) array
voxel to X, Y, Z affine mapping
Raises
------
SpaceTypeError : if `img` does not have an affine coordinate map
AxesError : if not all of x, y, z recognized in `img` ``coordmap`` range
AffineError : if axes dropped from the affine contribute to x, y, z
coordinates
Examples
--------
>>> from nipy.core.api import vox2mni, Image
>>> arr = np.arange(24).reshape((2,3,4,1)).astype(float)
>>> img = Image(arr, vox2mni(np.diag([2,3,4,5,1])))
>>> img.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='voxels', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64),
affine=array([[ 2., 0., 0., 0., 0.],
[ 0., 3., 0., 0., 0.],
[ 0., 0., 4., 0., 0.],
[ 0., 0., 0., 5., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> xyz_affine(img)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
Nibabel images always have xyz affines
>>> import nibabel as nib
>>> nimg = nib.Nifti1Image(arr, np.diag([2,3,4,1]))
>>> xyz_affine(nimg)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
"""
try:
return img.get_affine()
except AttributeError:
return rsp.xyz_affine(img.coordmap, name2xyz)
def is_xyz_affable(img, name2xyz=None):
""" Return True if the image `img` has an xyz affine
Parameters
----------
img : ``Image`` or nibabel ``SpatialImage``
If ``Image`` test ``img.coordmap``. If a nibabel image, return True
name2xyz : None or mapping
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Not used for nibabel `img` input.
Returns
-------
tf : bool
True if `img` has an xyz affine, False otherwise
Examples
--------
>>> from nipy.core.api import vox2mni, Image, rollimg
>>> arr = np.arange(24).reshape((2,3,4,1))
>>> img = Image(arr, vox2mni(np.diag([2,3,4,5,1])))
>>> img.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='voxels', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64),
affine=array([[ 2., 0., 0., 0., 0.],
[ 0., 3., 0., 0., 0.],
[ 0., 0., 4., 0., 0.],
[ 0., 0., 0., 5., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> is_xyz_affable(img)
True
>>> time0_img = rollimg(img, 't')
>>> time0_img.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('l', 'i', 'j', 'k'), name='voxels', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64),
affine=array([[ 0., 2., 0., 0., 0.],
[ 0., 0., 3., 0., 0.],
[ 0., 0., 0., 4., 0.],
[ 5., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> is_xyz_affable(time0_img)
False
Nibabel images always have xyz affines
>>> import nibabel as nib
>>> nimg = nib.Nifti1Image(arr, np.diag([2,3,4,1]))
>>> is_xyz_affable(nimg)
True
"""
try:
xyz_affine(img, name2xyz)
except rsp.SpaceError:
return False
return True
def as_xyz_image(img, name2xyz=None):
""" Return version of `img` that has a valid xyz affine, or raise error
Parameters
----------
img : ``Image`` instance or nibabel image
It has a ``coordmap`` attribute (``Image``) or a ``get_affine`` method
(nibabel image object)
name2xyz : None or mapping
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Not used for nibabel `img` input.
Returns
-------
reo_img : ``Image`` instance or nibabel image
Returns image of same type as `img` input. If necessary, `reo_img` has
its data and coordmap changed to allow it to return an xyz affine. If
`img` is already xyz affable we return the input unchanged (``img is
reo_img``).
Raises
------
SpaceTypeError : if `img` does not have an affine coordinate map
AxesError : if not all of x, y, z recognized in `img` ``coordmap`` range
AffineError : if axes dropped from the affine contribute to x, y, z
coordinates
"""
try:
aff = xyz_affine(img, name2xyz)
except (rsp.AxesError, rsp.AffineError):
pass
else:
return img
cmap = img.coordmap
order = rsp.xyz_order(cmap.function_range, name2xyz)
# Reorder reference to canonical order
reo_img = img.reordered_reference(order)
# Which input axes correspond?
ornt = io_orientation(reo_img.coordmap.affine)
current_in_order = ornt[:,0]
# Set nan to inf to make np.argsort work for old numpy versions
current_in_order[np.isnan(current_in_order)] = np.inf
# Do we have the first three axes somewhere?
if not set((0,1,2)).issubset(current_in_order):
raise rsp.AxesError("One of x, y or z outputs missing a "
"corresponding input axis")
desired_input_order = np.argsort(current_in_order)
reo_img = reo_img.reordered_axes(list(desired_input_order))
try:
aff = xyz_affine(reo_img, name2xyz)
except rsp.SpaceError:
# Python 2.5 / 3 compatibility
e = sys.exc_info()[1]
raise e.__class__("Could not reorder so xyz coordinates did not "
"depend on the other axis coordinates: " +
str(e))
return reo_img
def make_xyz_image(data, xyz_affine, world, metadata=None):
""" Create 3D+ image embedded in space named in `world`
Parameters
----------
data : object
Object returning array from ``np.asarray(obj)``, and having ``shape``
attribute. Should have at least 3 dimensions (``len(shape) >= 3``), and
these three first 3 dimensions should be spatial
xyz_affine : (4, 4) array-like or tuple
if (4, 4) array-like (the usual case), then an affine relating spatial
dimensions in data (dimensions 0:3) to mm in XYZ space given in `world`.
If a tuple, then contains two values: the (4, 4) array-like, and a
sequence of scalings for the dimensions greater than 3. See examples.
world : str or XYZSpace or CoordSysMaker or CoordinateSystem
World 3D space to which affine refers. See ``spaces.get_world_cs()``
metadata : None or mapping, optional
metadata for created image. Defaults to None, giving empty metadata.
Returns
-------
img : Image
image containing `data`, with coordmap constructed from `affine` and
`world`, and with default voxel input coordinates. If the data has more
than 3 dimensions, and you didn't specify the added zooms with a tuple
`xyz_affine` parameter, the coordmap affine gets filled out with extra
ones on the diagonal to give an (N+1, N+1) affine, with ``N =
len(data.shape)``
Examples
--------
>>> data = np.arange(24).reshape((2, 3, 4))
>>> aff = np.diag([4, 5, 6, 1])
>>> img = make_xyz_image(data, aff, 'mni')
>>> img
Image(
data=array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
<BLANKLINE>
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]]),
coordmap=AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='voxels', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S'), name='mni', coord_dtype=float64),
affine=array([[ 4., 0., 0., 0.],
[ 0., 5., 0., 0.],
[ 0., 0., 6., 0.],
[ 0., 0., 0., 1.]])
))
Now make data 4D; we just add 1. to the diagonal for the new dimension
>>> data4 = data[..., None]
>>> img = make_xyz_image(data4, aff, 'mni')
>>> img.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='voxels', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x=L->R', 'mni-y=P->A', 'mni-z=I->S', 't'), name='mni', coord_dtype=float64),
affine=array([[ 4., 0., 0., 0., 0.],
[ 0., 5., 0., 0., 0.],
[ 0., 0., 6., 0., 0.],
[ 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 1.]])
)
We can pass in a scalar or tuple to specify scaling for the extra dimension
>>> img = make_xyz_image(data4, (aff, 2.0), 'mni')
>>> img.coordmap.affine
array([[ 4., 0., 0., 0., 0.],
[ 0., 5., 0., 0., 0.],
[ 0., 0., 6., 0., 0.],
[ 0., 0., 0., 2., 0.],
[ 0., 0., 0., 0., 1.]])
>>> data5 = data4[..., None]
>>> img = make_xyz_image(data5, (aff, (2.0, 3.0)), 'mni')
>>> img.coordmap.affine
array([[ 4., 0., 0., 0., 0., 0.],
[ 0., 5., 0., 0., 0., 0.],
[ 0., 0., 6., 0., 0., 0.],
[ 0., 0., 0., 2., 0., 0.],
[ 0., 0., 0., 0., 3., 0.],
[ 0., 0., 0., 0., 0., 1.]])
"""
N = len(data.shape)
if N < 3:
raise ValueError('Need data with at least 3 dimensions')
if type(xyz_affine) is tuple:
xyz_affine, added_zooms = xyz_affine
# Could be scalar added zooms
try:
len(added_zooms)
except TypeError:
added_zooms = (added_zooms,)
if len(added_zooms) != (N - 3):
raise ValueError('Wrong number of added zooms')
else:
added_zooms = (1,) * (N - 3)
xyz_affine = np.asarray(xyz_affine)
if not xyz_affine.shape == (4, 4):
raise ValueError("Expecting 4 x 4 affine")
# Make coordinate map
world_cm = rsp.get_world_cs(world, N)
voxel_cm = rsp.voxel_csm(N)
if N > 3:
affine = np.diag((1., 1, 1) + added_zooms + (1,))
affine[:3, :3] = xyz_affine[:3, :3]
affine[:3, -1] = xyz_affine[:3, 3]
else:
affine = xyz_affine
cmap = AffineTransform(voxel_cm, world_cm, affine)
return Image(data, cmap, metadata)
|
the-stack_0_21260 | # -*- coding: utf-8 -*-
from __future__ import division
import argparse
import bz2 # Compression library
from datetime import datetime
import os
import pickle
import sys
import numpy as np
import torch
from tqdm import trange
from pathlib import Path
# These 2 lines must go before the import from src/
base_dir = Path(__file__).resolve().parent.parent.parent
sys.path.append(str(base_dir))
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
from flatland.envs.malfunction_generators import malfunction_from_params, MalfunctionParameters
#from flatland.utils.rendertools import RenderTool, AgentRenderVariant
from src.graph_observations import GraphObsForRailEnv
from src.predictions import ShortestPathPredictorForRailEnv
from src.rainbow.agent import RainbowAgent
from src.rainbow.memory import ReplayMemory
from src.rainbow.test import test
def main(args):
# Show options and values
print(' ' * 26 + 'Options')
for k, v in vars(args).items():
print(' ' * 26 + k + ': ' + str(v))
# Where to save models
results_dir = os.path.join('results', args.id)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
# These are saved in a .pth
metrics = {'episodes': [], # originally 'steps'
'rewards': [],
'Qs': [],
'best_avg_done_agents': -float('inf'),
'best_avg_reward': -float('inf')}
np.random.seed(args.seed)
torch.manual_seed(np.random.randint(1, 10000))
# Set cpu or gpu
if torch.cuda.is_available() and not args.disable_cuda:
args.device = torch.device('cuda')
torch.cuda.manual_seed(np.random.randint(1, 10000))
torch.backends.cudnn.enabled = args.enable_cudnn
else:
args.device = torch.device('cpu')
# Simple ISO 8601 timestamped logger
def log(s):
print('[' + str(datetime.now().strftime('%Y-%m-%dT%H:%M:%S')) + '] ' + s)
def load_memory(memory_path, disable_bzip):
if disable_bzip:
with open(memory_path, 'rb') as pickle_file:
return pickle.load(pickle_file)
else:
with bz2.open(memory_path, 'rb') as zipped_pickle_file:
return pickle.load(zipped_pickle_file)
def save_memory(memory, memory_path, disable_bzip):
if disable_bzip:
with open(memory_path, 'wb') as pickle_file:
pickle.dump(memory, pickle_file)
else:
with bz2.open(memory_path, 'wb') as zipped_pickle_file:
pickle.dump(memory, zipped_pickle_file)
rail_generator = sparse_rail_generator(max_num_cities=args.max_num_cities,
seed=args.seed,
grid_mode=args.grid_mode,
max_rails_between_cities=args.max_rails_between_cities,
max_rails_in_city=args.max_rails_in_city,
)
# Maps speeds to % of appearance in the env
speed_ration_map = {1.: 0.25, # Fast passenger train
1. / 2.: 0.25, # Fast freight train
1. / 3.: 0.25, # Slow commuter train
1. / 4.: 0.25} # Slow freight train
schedule_generator = sparse_schedule_generator(speed_ration_map)
stochastic_data = MalfunctionParameters(
malfunction_rate=args.malfunction_rate, # Rate of malfunctions
min_duration=args.min_duration, # Minimal duration
max_duration=args.max_duration # Max duration
)
observation_builder = GraphObsForRailEnv(predictor=ShortestPathPredictorForRailEnv(max_depth=args.prediction_depth))
# Construct the environment with the given observation, generators, predictors, and stochastic data
env = RailEnv(width=args.width,
height=args.height,
rail_generator=rail_generator,
schedule_generator=schedule_generator,
number_of_agents=args.num_agents,
obs_builder_object=observation_builder,
malfunction_generator_and_process_data=malfunction_from_params(stochastic_data)
)
env.reset()
state_size = args.prediction_depth * 4 + 4 # TODO
# action_space = args.network_action_space
network_action_dict = {}
railenv_action_dict = {}
qvalues = {} # Map handle: q value for this step
# Init agent
dqn = RainbowAgent(args, state_size, env)
# If a model is provided, and evaluate is false, presumably we want to resume, so try to load memory
if args.model is not None and not args.evaluate:
if not args.memory:
raise ValueError('Cannot resume training without memory save path. Aborting...')
elif not os.path.exists(args.memory):
raise ValueError('Could not find memory file at {path}. Aborting...'.format(path=args.memory))
mem = load_memory(args.memory, args.disable_bzip_memory)
else:
# Init one replay buffer for each agent (TODO) Must be updated when the number of agents change
mems = [ReplayMemory(args, int(args.memory_capacity/args.num_agents)) for a in range(args.num_agents)]
# mem = ReplayMemory(args, args.memory_capacity) # Init empty replay buffer
priority_weight_increase = (1 - args.priority_weight) / (args.T_max - args.learn_start)
# Construct validation memory
val_mem = ReplayMemory(args, args.evaluation_size)
T = 0
all_done = True
update_values = [False] * env.get_num_agents() # Used to update agent if action was performed in this step
# Number of transitions to do for validating Q
print("Validating Q...")
while T < args.evaluation_size:
for a in range(env.get_num_agents()):
if all_done:
state, info = env.reset()
all_done = False
for a in range(env.get_num_agents()):
action = np.random.choice(np.arange(5))
railenv_action_dict.update({a: action})
next_state, reward, done, info = env.step(railenv_action_dict)
val_mem.append(state[0], None, None, all_done) # TODO Using only state from agent 0 for now
all_done = done['__all__']
state = next_state
T += 1
if args.evaluate:
dqn.eval() # Set DQN (online network) to evaluation mode
avg_done_agents, avg_reward, avg_norm_reward = test(args, 0, 0, dqn, val_mem, metrics, results_dir, evaluate=True) # Test
#print('Avg. reward: ' + str(avg_reward) + ' | Avg. Q: ' + str(avg_Q))
print('Avg. done agents: ' + str(avg_done_agents) + ' | Avg. cumulative reward: ' + str(avg_reward) +
' | Avg. normalized reward: ' + str(avg_norm_reward))
else:
# Training loop
print("Training started...")
dqn.train()
################## Episodes loop #######################
for ep in trange(1, args.num_episodes + 1):
# Reset env at the beginning of one episode
state, info = env.reset()
# Pick first action - entering of agents is now random
for a in range(env.get_num_agents()):
action = np.random.choice((0,2))
railenv_action_dict.update({a: action})
next_state, reward, done, info = env.step(railenv_action_dict) # Env first step
############## Steps loop ##########################
for T in range(1, args.T_max + 1):
if T % args.replay_frequency == 0:
dqn.reset_noise() # Draw a new set of noisy weights
for a in range(env.get_num_agents()):
if info['action_required'][a]:
network_action = dqn.act(state[a]) # Choose an action greedily (with noisy weights)
railenv_action = observation_builder.choose_railenv_action(a, network_action)
update_values[a] = True
qvalues.update({a: dqn.get_q_values(state[a])})
else:
network_action = 0
railenv_action = 0
update_values[a] = False
qvalues.update({a: [0, 0]}) # '0' if wasn't updated
# Update action dicts
railenv_action_dict.update({a: railenv_action})
network_action_dict.update({a: network_action})
next_state, reward, done, info = env.step(railenv_action_dict) # Env step
'''
if T == 100: # Print only at 100th steps of each episode
if args.debug:
for a in range(env.get_num_agents()):
print('#########################################')
print('Info for agent {}'.format(a))
print('Occupancy, first layer: {}'.format(state[a][:args.prediction_depth]))
print('Occupancy, second layer: {}'.format(
state[a][args.prediction_depth:args.prediction_depth * 2]))
print('Forks: {}'.format(state[a][args.prediction_depth * 2:args.prediction_depth * 3]))
print('Target: {}'.format(state[a][args.prediction_depth * 3:args.prediction_depth * 4]))
print('Priority: {}'.format(state[a][args.prediction_depth * 4]))
print('Max priority encountered: {}'.format(state[a][args.prediction_depth * 4 + 1]))
print('Num malfunctoning agents (globally): {}'.format(state[a][args.prediction_depth * 4 + 2]))
print(
'Num agents ready to depart (globally): {}'.format(state[a][args.prediction_depth * 4 + 3]))
print('Status: {}'.format(info['status'][a]))
print('Position: {}'.format(env.agents[a].position))
print('Moving? {} at speed: {}'.format(env.agents[a].moving, info['speed'][a]))
print('Action required? {}'.format(info['action_required'][a]))
print('Network action: {}'.format(network_action_dict[a]))
print('Railenv action: {}'.format(railenv_action_dict[a]))
print('Q values: {}'.format(qvalues[a]))
print('Rewards: {}'.format(reward))
'''
# Clip reward and update replay buffer
for a in range(env.get_num_agents()):
'''
* Reward is always in [-1, 1], so we shouldn't need clipping
if args.reward_clip > 0:
reward[a] = max(min(reward[a], args.reward_clip), -args.reward_clip)
'''
if update_values[a]: # Store transition only if this agent performed action in this time step
mems[a].append(state[a], network_action_dict[a], reward[a], done[a]) # Append to own buffer
#mem.append(state[a], network_action_dict[a], reward[a], done[a]) # Append transition to memory
# print('Clipped rewards: {}'.format(reward))
state = next_state.copy()
# Train and test
if ep >= args.learn_start: # Give time to accumulate experiences
# Anneal importance sampling weight β to 1
#mem.priority_weight = min(mem.priority_weight + priority_weight_increase, 1)
for a in range(args.num_agents):
mems[a].priority_weight = min(mems[a].priority_weight + priority_weight_increase, 1)
if T % args.replay_frequency == 0:
a = np.random.choice(np.arange(args.num_agents))
dqn.learn(mems[a]) # Learn randomly from one of the available replay buffer
# dqn.learn(mem) # Train with n-step distributional double-Q learning
# Update target network
if T % args.target_update == 0:
dqn.update_target_net()
if done['__all__']:
break
##### EPISODE END ##############
if (ep % args.evaluation_interval) == 0: # Evaluate only at the end of the episodes
dqn.eval() # Set DQN (online network) to evaluation mode
avg_done_agents, avg_reward, avg_norm_reward = test(args, T, ep, dqn, val_mem, metrics, results_dir) # Test
log(
'T = ' + str(T) + ' / ' + str(args.T_max) + ' | Avg. done agents: ' + str(avg_done_agents) +
' | Avg. reward: ' + str(avg_reward) + ' | Avg. normalized reward: ' + str(avg_norm_reward))
dqn.train() # Set DQN (online network) back to training mode
# If memory path provided, save it
if args.memory is not None:
save_memory(mems[0], args.memory, args.disable_bzip_memory) # Save only first replay buffer (?)
# save_memory(mem, args.memory, args.disable_bzip_memory)
# Checkpoint the network every 'checkpoint_interval' episodes
if (args.checkpoint_interval != 0) and (ep % args.checkpoint_interval == 0):
dqn.save(results_dir, 'checkpoint.pth')
if __name__ == '__main__':
# Hyperparameters
parser = argparse.ArgumentParser(description='Rainbow')
parser.add_argument('--id', type=str, default='default', help='Experiment ID')
parser.add_argument('--seed', type=int, default=123, help='Random seed')
parser.add_argument('--disable-cuda', action='store_true', help='Disable CUDA')
parser.add_argument('--T-max', type=int, default=int(50e6), metavar='STEPS', help='Number of training steps (4x number of frames)')
parser.add_argument('--max-episode-length', type=int, default=int(108e3), metavar='LENGTH', help='Max episode length in game frames (0 to disable)')
parser.add_argument('--history-length', type=int, default=4, metavar='T', help='Number of consecutive states processed')
parser.add_argument('--hidden-size', type=int, default=512, metavar='SIZE', help='Network hidden size')
parser.add_argument('--noisy-std', type=float, default=0.1, metavar='σ', help='Initial standard deviation of noisy linear layers')
parser.add_argument('--atoms', type=int, default=51, metavar='C', help='Discretised size of value distribution')
parser.add_argument('--V-min', type=float, default=-10, metavar='V', help='Minimum of value distribution support')
parser.add_argument('--V-max', type=float, default=10, metavar='V', help='Maximum of value distribution support')
parser.add_argument('--model', type=str, metavar='PARAMS', help='Pretrained model (state dict)')
parser.add_argument('--memory-capacity', type=int, default=int(1e6), metavar='CAPACITY', help='Experience replay memory capacity')
parser.add_argument('--replay-frequency', type=int, default=4, metavar='k', help='Frequency of sampling from memory')
parser.add_argument('--priority-exponent', type=float, default=0.5, metavar='ω', help='Prioritised experience replay exponent (originally denoted α)')
parser.add_argument('--priority-weight', type=float, default=0.4, metavar='β', help='Initial prioritised experience replay importance sampling weight')
parser.add_argument('--multi-step', type=int, default=3, metavar='n', help='Number of steps for multi-step return')
parser.add_argument('--discount', type=float, default=0.95, metavar='γ', help='Discount factor')
parser.add_argument('--target-update', type=int, default=int(8e3), metavar='τ', help='Number of steps after which to update target network')
parser.add_argument('--reward-clip', type=int, default=1, metavar='VALUE', help='Reward clipping (0 to disable)')
parser.add_argument('--learning-rate', type=float, default=0.0000625, metavar='η', help='Learning rate')
parser.add_argument('--adam-eps', type=float, default=1.5e-4, metavar='ε', help='Adam epsilon')
parser.add_argument('--batch-size', type=int, default=32, metavar='SIZE', help='Batch size')
parser.add_argument('--learn-start', type=int, default=int(20e3), metavar='EPISODES', help='Number of episodes before starting training')
parser.add_argument('--evaluate', action='store_true', help='Evaluate only')
parser.add_argument('--evaluation-interval', type=int, default=100000, metavar='EPISODES', help='Number of episodes between evaluations')
parser.add_argument('--evaluation-episodes', type=int, default=10, metavar='N', help='Number of evaluation episodes to average over')
# TODO: Note that DeepMind's evaluation method is running the latest agent for 500K frames ever every 1M steps
parser.add_argument('--evaluation-size', type=int, default=500, metavar='N', help='Number of transitions to use for validating Q')
parser.add_argument('--render', action='store_true', help='Display screen (testing only)')
parser.add_argument('--enable-cudnn', action='store_true', help='Enable cuDNN (faster but nondeterministic)')
parser.add_argument('--checkpoint-interval', type=int, default=0, help='How often to checkpoint the model, defaults to 0 (never checkpoint)')
parser.add_argument('--memory', help='Path to save/load the memory from')
parser.add_argument('--disable-bzip-memory', action='store_true', help='Don\'t zip the memory file. Not recommended (zipping is a bit slower and much, much smaller)')
parser.add_argument('--debug', action='store_true', help='Print more info during execution')
# Env parameters
# parser.add_argument('--state_size', type=int, help='Size of state to feed to the neural network') # Depends on prediction_depth
parser.add_argument('--network-action-space', type=int, default=2, help='Number of actions allowed in the environment')
parser.add_argument('--width', type=int, default=100, help='Environment width')
parser.add_argument('--height', type=int, default=100, help='Environment height')
parser.add_argument('--num-agents', type=int, default=50, help='Number of agents in the environment')
parser.add_argument('--max-num-cities', type=int, default=6, help='Maximum number of cities where agents can start or end')
# parser.add_argument('--seed', type=int, default=1, help='Seed used to generate grid environment randomly')
parser.add_argument('--grid-mode', type=bool, default=False, help='Type of city distribution, if False cities are randomly placed')
parser.add_argument('--max-rails-between-cities', type=int, default=4, help='Max number of tracks allowed between cities, these count as entry points to a city')
parser.add_argument('--max-rails-in-city', type=int, default=6, help='Max number of parallel tracks within a city allowed')
parser.add_argument('--malfunction_rate', type=int, default=1000, help='Rate of malfunction occurrence of single agent')
parser.add_argument('--min-duration', type=int, default=20, help='Min duration of malfunction')
parser.add_argument('--max-duration', type=int, default=50, help='Max duration of malfunction')
parser.add_argument('--observation-builder', type=str, default='GraphObsForRailEnv', help='Class to use to build observation for agent')
parser.add_argument('--predictor', type=str, default='ShortestPathPredictorForRailEnv', help='Class used to predict agent paths and help observation building')
# parser.add_argument('--bfs-depth', type=int, default=4, help='BFS depth of the graph observation')
parser.add_argument('--prediction-depth', type=int, default=108, help='Prediction depth for shortest path strategy, i.e. length of a path')
# parser.add_argument('--view-semiwidth', type=int, default=7, help='Semiwidth of field view for agent in local obs')
# parser.add_argument('--view-height', type=int, default=30, help='Height of the field view for agent in local obs')
# parser.add_argument('--offset', type=int, default=10, help='Offset of agent in local obs')
# Training parameters
parser.add_argument('--num-episodes', type=int, default=1000, help='Number of episodes on which to train the agents')
# Setup
args = parser.parse_args()
# Check arguments
'''
if args.offset > args.height:
raise ValueError("Agent offset can't be greater than view height in local obs")
if args.offset < 0:
raise ValueError("Agent offset must be a positive integer")
'''
main(args) |
the-stack_0_21261 | from __future__ import unicode_literals
import asyncio
import os
import traceback
import pytgcalls
from pyrogram import filters, idle
from pyrogram.errors.exceptions.bad_request_400 import \
ChatAdminRequired
from pyrogram.raw.functions.phone import CreateGroupCall
from pyrogram.raw.types import InputPeerChannel
from pyrogram.types import Message
# Initialize db
import db
db.init()
from db import db
from functions import (CHAT_ID, app, get_default_service, play_song,
session, telegram, BITRATE)
from misc import HELP_TEXT, REPO_TEXT
running = False # Tells if the queue is running or not
CLIENT_TYPE = pytgcalls.GroupCallFactory.MTPROTO_CLIENT_TYPE.PYROGRAM
PLAYOUT_FILE = "input.raw"
PLAY_LOCK = asyncio.Lock()
OUTGOING_AUDIO_BITRATE_KBIT = BITRATE
@app.on_message(
filters.command("dj_vlados_help")
& ~filters.private
& filters.chat(CHAT_ID)
)
async def help(_, message):
await message.reply_text(HELP_TEXT, quote=False)
@app.on_message(
filters.command("dj_vlados_join")
& ~filters.private
& filters.chat(CHAT_ID)
)
async def joinvc(_, message, manual=False):
if "call" in db:
return await message.reply_text(
"__**Я уже за вертушками, долбоеб**__"
)
os.popen(f"cp etc/sample_input.raw {PLAYOUT_FILE}")
vc = pytgcalls.GroupCallFactory(
app, CLIENT_TYPE, OUTGOING_AUDIO_BITRATE_KBIT
).get_file_group_call(PLAYOUT_FILE)
db["call"] = vc
try:
await vc.start(CHAT_ID)
except Exception:
peer = await app.resolve_peer(CHAT_ID)
startVC = CreateGroupCall(
peer=InputPeerChannel(
channel_id=peer.channel_id,
access_hash=peer.access_hash,
),
random_id=app.rnd_id() // 9000000000,
)
try:
await app.send(startVC)
await vc.start(CHAT_ID)
except ChatAdminRequired:
del db["call"]
return await message.reply_text(
"Сделай меня админом со всеми правами и поиграю тебе пластинки"
)
await message.reply_text(
"__**DJ Vlados ворвался за пульт и готов крутить музло.**__ \n\n**Псссс:** __Если нихуя не слышно,"
+ " напиши /dj_vlados_leave а потом /dj_vlados_join еще раз.__"
)
await message.delete()
@app.on_message(
filters.command("dj_vlados_leave")
& ~filters.private
& filters.chat(CHAT_ID)
)
async def leavevc(_, message):
if "call" in db:
await db["call"].leave_current_group_call()
await db["call"].stop()
del db["call"]
await message.reply_text("__**Пока уебки, для вас свой сет играл DJ Vlados**__")
await message.delete()
@app.on_message(
filters.command("volume")
& ~filters.private
& filters.chat(CHAT_ID)
)
async def volume_bot(_, message):
usage = "**Настройки громкости:**\n/volume [1-200]"
if len(message.command) != 2:
return await message.reply_text(usage)
if "call" not in db:
return await message.reply_text("Я не у пульта")
vc = db["call"]
volume = int(message.text.split(None, 1)[1])
if (volume < 1) or (volume > 200):
return await message.reply_text(usage)
try:
await vc.set_my_volume(volume=volume)
except ValueError:
return await message.reply_text(usage)
await message.reply_text(f"**Громкость выставил на {volume}**")
@app.on_message(
filters.command("pause")
& ~filters.private
& filters.chat(CHAT_ID)
)
async def pause_song_func(_, message):
if "call" not in db:
return await message.reply_text("**Я не у пульта**")
if "paused" in db:
if db.get("paused"):
return await message.reply_text("**Уже на паузе, чел**")
db["paused"] = True
db["call"].pause_playout()
await message.reply_text(
"**Воу воу воу, поставил на паузу. Давай скорее продолжим `/resume` **"
)
@app.on_message(
filters.command("resume")
& ~filters.private
& filters.chat(CHAT_ID)
)
async def resume_song(_, message):
if "call" not in db:
return await message.reply_text("**Я не у пульта**")
if "paused" in db:
if not db.get("paused"):
return await message.reply_text("**Да уже играем музяку**")
db["paused"] = False
db["call"].resume_playout()
await message.reply_text(
"**Погнали дальше! Но если опять нужна пауза, ты знаешь — `/pause` **"
)
@app.on_message(
filters.command("skip") & ~filters.private & filters.chat(CHAT_ID)
)
async def skip_func(_, message):
if "queue" not in db:
await message.reply_text("**Я не у пульта**")
return await message.delete()
queue = db["queue"]
if queue.empty() and ("playlist" not in db or not db["playlist"]):
await message.reply_text(
"__**Заявки на музыку пусты. Как и смысл твоей жизни**__"
)
return await message.delete()
db["skipped"] = True
await message.reply_text("__**Правильно, нахуй эту песенку! Следующая!**__")
await message.delete()
@app.on_message(
filters.command("play") & ~filters.private & filters.chat(CHAT_ID)
)
async def queuer(_, message):
global running
try:
usage = """
**Как заказать музыку:**
__/play Название_песни__
__/play Ответом_на_файл__"""
async with PLAY_LOCK:
if (
len(message.command) < 2
and not message.reply_to_message
):
return await message.reply_text(usage)
if "call" not in db:
return await message.reply_text(
"**Ты приходишь сюда, просишь что-то поставить... Но ты делаешь это неуважительно. Ты не предлагаешь мне сперва встать за пульт — /dj_vlados_join **"
)
if message.reply_to_message:
if message.reply_to_message.audio:
service = "telegram"
song_name = message.reply_to_message.audio.title
else:
return await message.reply_text(
"**Не, ответом на сообщение с файлом (mp3, wav, mp4 и тд)**"
)
else:
text = message.text.split("\n")[0]
text = text.split(None, 2)[1:]
service = text[0].lower()
services = ["youtube", "saavn"]
if service in services:
song_name = text[1]
else:
service = get_default_service()
song_name = " ".join(text)
if "http" in song_name or ".com" in song_name:
return await message.reply("Никаких ссылок")
requested_by = message.from_user.first_name
if "queue" not in db:
db["queue"] = asyncio.Queue()
if not db["queue"].empty() or db.get("running"):
await message.reply_text("__**Добавил в очередь__**")
await db["queue"].put(
{
"service": service or telegram,
"requested_by": requested_by,
"query": song_name,
"message": message,
}
)
if not db.get("running"):
db["running"] = True
await start_queue()
except Exception as e:
await message.reply_text(str(e))
e = traceback.format_exc()
print(e)
@app.on_message(
filters.command("queue")
& ~filters.private
& filters.chat(CHAT_ID)
)
async def queue_list(_, message):
if "queue" not in db:
db["queue"] = asyncio.Queue()
queue = db["queue"]
if queue.empty():
return await message.reply_text(
"__**Заявки на музыку пусты. Как и смысл твоей жизни**__"
)
if (
len(message.text.split()) > 1
and message.text.split()[1].lower() == "plformat"
):
pl_format = True
else:
pl_format = False
text = ""
for count, song in enumerate(queue._queue, 1):
if not pl_format:
text += (
f"**{count}. {song['service']}** "
+ f"| __{song['query']}__ | {song['requested_by']}\n"
)
else:
text += song["query"] + "\n"
if len(text) > 4090:
return await message.reply_text(
f"**У меня тут треков в количестве {queue.qsize()} шт. в заявках...**"
)
await message.reply_text(text)
# Queue handler
async def start_queue(message=None):
while db:
if "queue_breaker" in db and db.get("queue_breaker") != 0:
db["queue_breaker"] -= 1
if db["queue_breaker"] == 0:
del db["queue_breaker"]
break
if db["queue"].empty():
if "playlist" not in db or not db["playlist"]:
db["running"] = False
break
else:
await playlist(app, message, redirected=True)
data = await db["queue"].get()
service = data["service"]
if service == "telegram":
await telegram(data["message"])
else:
await play_song(
data["requested_by"],
data["query"],
data["message"],
service,
)
@app.on_message(
filters.command("delqueue")
& ~filters.private
& filters.chat(CHAT_ID)
)
async def clear_queue(_, message):
global db
if "call" not in db:
return await message.reply_text("**Я не у пульта**")
if ("queue" not in db or db["queue"].empty()) and (
"playlist" not in db or not db["playlist"]
):
return await message.reply_text("**Заявки уже и так пусты...**")
db["playlist"] = False
db["queue"] = asyncio.Queue()
await message.reply_text("**Нахуй всё это, начинаем с чистого листа заявок**")
@app.on_message(
filters.command("playlist")
& ~filters.private
& filters.chat(CHAT_ID)
)
async def playlist(_, message: Message, redirected=False):
if message.reply_to_message:
raw_playlist = message.reply_to_message.text
elif len(message.text) > 9:
raw_playlist = message.text[10:]
else:
usage = """
**Как юзать плейлист: да так же как и /play
Например:
__**/playlist название_песни1
название_песни2
название_песни3**__"""
return await message.reply_text(usage)
if "call" not in db:
return await message.reply_text("**Ты приходишь сюда, просишь что-то поставить... Но ты делаешь это неуважительно. Ты не предлагаешь мне сперва встать за пульт — /dj_vlados_join **")
if "playlist" not in db:
db["playlist"] = False
if "running" in db and db.get("running"):
db["queue_breaker"] = 1
db["playlist"] = True
db["queue"] = asyncio.Queue()
for line in raw_playlist.split("\n"):
services = ["youtube", "saavn"]
if line.split()[0].lower() in services:
service = line.split()[0].lower()
song_name = " ".join(line.split()[1:])
else:
service = "youtube"
song_name = line
requested_by = message.from_user.first_name
await db["queue"].put(
{
"service": service or telegram,
"requested_by": requested_by,
"query": song_name,
"message": message,
}
)
if not redirected:
db["running"] = True
await message.reply_text("**Начинаем играть плейлист**")
await start_queue(message)
async def main():
await app.start()
print("Bot started!")
await idle()
await session.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
the-stack_0_21262 | """
Utility functions for
- building and importing modules on test time, using a temporary location
- detecting if compilers are present
"""
import os
import sys
import subprocess
import tempfile
import shutil
import atexit
import textwrap
import re
import pytest
from numpy.compat import asbytes, asstr
from numpy.testing import temppath
from importlib import import_module
#
# Maintaining a temporary module directory
#
_module_dir = None
_module_num = 5403
def _cleanup():
global _module_dir
if _module_dir is not None:
try:
sys.path.remove(_module_dir)
except ValueError:
pass
try:
shutil.rmtree(_module_dir)
except OSError:
pass
_module_dir = None
def get_module_dir():
global _module_dir
if _module_dir is None:
_module_dir = tempfile.mkdtemp()
atexit.register(_cleanup)
if _module_dir not in sys.path:
sys.path.insert(0, _module_dir)
return _module_dir
def get_temp_module_name():
# Assume single-threaded, and the module dir usable only by this thread
global _module_num
d = get_module_dir()
name = "_test_ext_module_%d" % _module_num
_module_num += 1
if name in sys.modules:
# this should not be possible, but check anyway
raise RuntimeError("Temporary module name already in use.")
return name
def _memoize(func):
memo = {}
def wrapper(*a, **kw):
key = repr((a, kw))
if key not in memo:
try:
memo[key] = func(*a, **kw)
except Exception as e:
memo[key] = e
raise
ret = memo[key]
if isinstance(ret, Exception):
raise ret
return ret
wrapper.__name__ = func.__name__
return wrapper
#
# Building modules
#
@_memoize
def build_module(source_files, options=[], skip=[], only=[], module_name=None):
"""
Compile and import a f2py module, built from the given files.
"""
code = ("import sys; sys.path = %s; import numpy.f2py as f2py2e; "
"f2py2e.main()" % repr(sys.path))
d = get_module_dir()
# Copy files
dst_sources = []
f2py_sources = []
for fn in source_files:
if not os.path.isfile(fn):
raise RuntimeError("%s is not a file" % fn)
dst = os.path.join(d, os.path.basename(fn))
shutil.copyfile(fn, dst)
dst_sources.append(dst)
base, ext = os.path.splitext(dst)
if ext in ('.f90', '.f', '.c', '.pyf'):
f2py_sources.append(dst)
# Prepare options
if module_name is None:
module_name = get_temp_module_name()
f2py_opts = ['-c', '-m', module_name] + options + f2py_sources
if skip:
f2py_opts += ['skip:'] + skip
if only:
f2py_opts += ['only:'] + only
# Build
cwd = os.getcwd()
try:
os.chdir(d)
cmd = [sys.executable, '-c', code] + f2py_opts
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError("Running f2py failed: %s\n%s"
% (cmd[4:], asstr(out)))
finally:
os.chdir(cwd)
# Partial cleanup
for fn in dst_sources:
os.unlink(fn)
# Import
return import_module(module_name)
@_memoize
def build_code(source_code, options=[], skip=[], only=[], suffix=None,
module_name=None):
"""
Compile and import Fortran code using f2py.
"""
if suffix is None:
suffix = '.f'
with temppath(suffix=suffix) as path:
with open(path, 'w') as f:
f.write(source_code)
return build_module([path], options=options, skip=skip, only=only,
module_name=module_name)
#
# Check if compilers are available at all...
#
_compiler_status = None
def _get_compiler_status():
global _compiler_status
if _compiler_status is not None:
return _compiler_status
_compiler_status = (False, False, False)
# XXX: this is really ugly. But I don't know how to invoke Distutils
# in a safer way...
code = textwrap.dedent("""\
import os
import sys
sys.path = %(syspath)s
def configuration(parent_name='',top_path=None):
global config
from numpy.distutils.misc_util import Configuration
config = Configuration('', parent_name, top_path)
return config
from numpy.distutils.core import setup
setup(configuration=configuration)
config_cmd = config.get_config_cmd()
have_c = config_cmd.try_compile('void foo() {}')
print('COMPILERS:%%d,%%d,%%d' %% (have_c,
config.have_f77c(),
config.have_f90c()))
sys.exit(99)
""")
code = code % dict(syspath=repr(sys.path))
tmpdir = tempfile.mkdtemp()
try:
script = os.path.join(tmpdir, 'setup.py')
with open(script, 'w') as f:
f.write(code)
cmd = [sys.executable, 'setup.py', 'config']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmpdir)
out, err = p.communicate()
finally:
shutil.rmtree(tmpdir)
m = re.search(br'COMPILERS:(\d+),(\d+),(\d+)', out)
if m:
_compiler_status = (bool(int(m.group(1))), bool(int(m.group(2))),
bool(int(m.group(3))))
# Finished
return _compiler_status
def has_c_compiler():
return _get_compiler_status()[0]
def has_f77_compiler():
return _get_compiler_status()[1]
def has_f90_compiler():
return _get_compiler_status()[2]
#
# Building with distutils
#
@_memoize
def build_module_distutils(source_files, config_code, module_name, **kw):
"""
Build a module via distutils and import it.
"""
d = get_module_dir()
# Copy files
dst_sources = []
for fn in source_files:
if not os.path.isfile(fn):
raise RuntimeError("%s is not a file" % fn)
dst = os.path.join(d, os.path.basename(fn))
shutil.copyfile(fn, dst)
dst_sources.append(dst)
# Build script
config_code = textwrap.dedent(config_code).replace("\n", "\n ")
code = textwrap.dedent("""\
import os
import sys
sys.path = %(syspath)s
def configuration(parent_name='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('', parent_name, top_path)
%(config_code)s
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)
""") % dict(config_code=config_code, syspath=repr(sys.path))
script = os.path.join(d, get_temp_module_name() + '.py')
dst_sources.append(script)
with open(script, 'wb') as f:
f.write(asbytes(code))
# Build
cwd = os.getcwd()
try:
os.chdir(d)
cmd = [sys.executable, script, 'build_ext', '-i']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError("Running distutils build failed: %s\n%s"
% (cmd[4:], asstr(out)))
finally:
os.chdir(cwd)
# Partial cleanup
for fn in dst_sources:
os.unlink(fn)
# Import
__import__(module_name)
return sys.modules[module_name]
#
# Unittest convenience
#
class F2PyTest:
code = None
sources = None
options = []
skip = []
only = []
suffix = '.f'
module = None
module_name = None
def setup(self):
if sys.platform == 'win32':
pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)')
if self.module is not None:
return
# Check compiler availability first
if not has_c_compiler():
pytest.skip("No C compiler available")
codes = []
if self.sources:
codes.extend(self.sources)
if self.code is not None:
codes.append(self.suffix)
needs_f77 = False
needs_f90 = False
for fn in codes:
if fn.endswith('.f'):
needs_f77 = True
elif fn.endswith('.f90'):
needs_f90 = True
if needs_f77 and not has_f77_compiler():
pytest.skip("No Fortran 77 compiler available")
if needs_f90 and not has_f90_compiler():
pytest.skip("No Fortran 90 compiler available")
# Build the module
if self.code is not None:
self.module = build_code(self.code, options=self.options,
skip=self.skip, only=self.only,
suffix=self.suffix,
module_name=self.module_name)
if self.sources is not None:
self.module = build_module(self.sources, options=self.options,
skip=self.skip, only=self.only,
module_name=self.module_name)
|
the-stack_0_21263 | import schematics_to_swagger
from tests import models
WEATHER_REPORT_DEFINITION = {
'title': 'WeatherReport',
'type': 'object',
'description': 'Some sample class for Weather report',
'properties': {
'city': {
'type': 'string',
'maxLength': 50,
'readOnly': True
},
'temperature': {
'type': 'number',
'format': 'double',
'required': True
},
'author': {
'type': 'string',
'format': 'email',
},
'some_url': {
'type': 'string',
'format': 'uri',
},
'taken_at': {
'type': 'string',
'format': 'date-time'
}
}
}
WEATHER_STATS_DEF = {
'title': 'WeatherStats',
'type': 'object',
'description': None,
'properties': {
'last_report': {'$ref': '#/definitions/WeatherReport'},
'prev_reports': {
'type': 'array',
'items': {
'$ref': '#/definitions/WeatherReport'
}
},
'date_list': {
'type': 'array',
'items': {
'type': 'string',
'format': 'date-time'
}
}
},
}
def test_model_to_definition():
expected = WEATHER_REPORT_DEFINITION
definition = schematics_to_swagger.model_to_definition(models.WeatherReport)
assert expected == definition
def test_read_models_from_module():
expected = {
'WeatherReport': WEATHER_REPORT_DEFINITION,
'WeatherStats': WEATHER_STATS_DEF
}
data = schematics_to_swagger.read_models_from_module(models)
assert expected == data
def test_compound_type():
expected = WEATHER_STATS_DEF
data = schematics_to_swagger.model_to_definition(models.WeatherStats)
assert expected == data
|
the-stack_0_21268 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from docutils import frontend
from docutils import nodes
from docutils.parsers import rst
from docutils import utils
from rally.common.plugin import plugin
from rally.common import validation
from rally import plugins
from rally.task import scenario
from tests.unit import test
def _parse_rst(text):
parser = rst.Parser()
settings = frontend.OptionParser(
components=(rst.Parser,)).get_default_values()
document = utils.new_document(text, settings)
parser.parse(text, document)
return document.children
class DocstringsTestCase(test.TestCase):
def setUp(self):
super(DocstringsTestCase, self).setUp()
plugins.load()
def _validate_code_block(self, plg_cls, code_block):
ignored_params = ["self", "scenario_obj"]
params_count = code_block.co_argcount
params = code_block.co_varnames[:params_count]
param_data = plg_cls.get_info()["parameters"]
documented_params = [p["name"] for p in param_data]
result = []
for param in params:
if param not in ignored_params:
if param not in documented_params:
msg = ("Class: %(class)s Docstring for "
"%(scenario)s should"
" describe the '%(param)s' parameter"
" in the :param <name>: clause."
% {"class": plg_cls.__name__,
"scenario": plg_cls.get_name(),
"param": param})
result.append(msg)
return result
# the list with plugins names which use rst definitions in their docstrings
_HAS_VALID_DEFINITIONS = []
def _validate_rst(self, plugin_name, text, msg_buffer):
parsed_docstring = _parse_rst(text)
for item in parsed_docstring:
if (isinstance(item, nodes.definition_list)
and plugin_name not in self._HAS_VALID_DEFINITIONS):
msg_buffer.append("Plugin %s has a docstring with invalid "
"format. Re-check intend and required empty "
"lines between the list title and list "
"items." % plugin_name)
elif isinstance(item, nodes.system_message):
msg_buffer.append(
"A warning is caught while parsing docstring of '%s' "
"plugin: %s" % (plugin_name, item.astext()))
def _check_docstrings(self, msg_buffer):
for plg_cls in plugin.Plugin.get_all():
if not plg_cls.__module__.startswith("rally_openstack."):
continue
name = "%s (%s.%s)" % (plg_cls.get_name(),
plg_cls.__module__,
plg_cls.__name__)
doc_info = plg_cls.get_info()
if not doc_info["title"]:
msg_buffer.append("Plugin '%s' should have a docstring."
% name)
if doc_info["title"].startswith("Test"):
msg_buffer.append("One-line description for %s"
" should be declarative and not"
" start with 'Test(s) ...'"
% name)
# NOTE(andreykurilin): I never saw any real usage of
# reStructuredText definitions in our docstrings. In most cases,
# "definitions" means that there is an issue with intends or
# missed empty line before the list title and list items.
if doc_info["description"]:
self._validate_rst(plg_cls.get_name(),
doc_info["description"],
msg_buffer)
def _check_described_params(self, msg_buffer):
for plg_cls in plugin.Plugin.get_all():
msg = []
if hasattr(plg_cls, "run") and issubclass(
plg_cls, scenario.Scenario):
msg = self._validate_code_block(plg_cls,
plg_cls.run.__code__)
elif hasattr(plg_cls, "validate") and issubclass(
plg_cls, validation.Validator):
msg = self._validate_code_block(plg_cls,
plg_cls.__init__.__code__)
msg_buffer.extend(msg) if len(msg) else None
def test_all_plugins_have_docstrings(self):
msg_buffer = []
self._check_docstrings(msg_buffer)
self._check_described_params(msg_buffer)
if msg_buffer:
self.fail("\n%s" % "\n===============\n".join(msg_buffer))
|
the-stack_0_21269 | """Support for ASUSWRT devices."""
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_MODE,
CONF_PASSWORD,
CONF_PORT,
CONF_PROTOCOL,
CONF_SENSORS,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .const import (
CONF_DNSMASQ,
CONF_INTERFACE,
CONF_REQUIRE_IP,
CONF_SSH_KEY,
DATA_ASUSWRT,
DEFAULT_DNSMASQ,
DEFAULT_INTERFACE,
DEFAULT_SSH_PORT,
DOMAIN,
MODE_AP,
MODE_ROUTER,
PROTOCOL_SSH,
PROTOCOL_TELNET,
)
from .router import AsusWrtRouter
PLATFORMS = [Platform.DEVICE_TRACKER, Platform.SENSOR]
CONF_PUB_KEY = "pub_key"
SECRET_GROUP = "Password or SSH Key"
SENSOR_TYPES = ["devices", "upload_speed", "download_speed", "download", "upload"]
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_PROTOCOL, default=PROTOCOL_SSH): vol.In(
[PROTOCOL_SSH, PROTOCOL_TELNET]
),
vol.Optional(CONF_MODE, default=MODE_ROUTER): vol.In(
[MODE_ROUTER, MODE_AP]
),
vol.Optional(CONF_PORT, default=DEFAULT_SSH_PORT): cv.port,
vol.Optional(CONF_REQUIRE_IP, default=True): cv.boolean,
vol.Exclusive(CONF_PASSWORD, SECRET_GROUP): cv.string,
vol.Exclusive(CONF_SSH_KEY, SECRET_GROUP): cv.isfile,
vol.Exclusive(CONF_PUB_KEY, SECRET_GROUP): cv.isfile,
vol.Optional(CONF_SENSORS): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_INTERFACE, default=DEFAULT_INTERFACE): cv.string,
vol.Optional(CONF_DNSMASQ, default=DEFAULT_DNSMASQ): cv.string,
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the AsusWrt integration."""
if (conf := config.get(DOMAIN)) is None:
return True
# save the options from config yaml
options = {}
mode = conf.get(CONF_MODE, MODE_ROUTER)
for name, value in conf.items():
if name in [CONF_DNSMASQ, CONF_INTERFACE, CONF_REQUIRE_IP]:
if name == CONF_REQUIRE_IP and mode != MODE_AP:
continue
options[name] = value
hass.data[DOMAIN] = {"yaml_options": options}
# check if already configured
domains_list = hass.config_entries.async_domains()
if DOMAIN in domains_list:
return True
# remove not required config keys
if pub_key := conf.pop(CONF_PUB_KEY, ""):
conf[CONF_SSH_KEY] = pub_key
conf.pop(CONF_REQUIRE_IP, True)
conf.pop(CONF_SENSORS, {})
conf.pop(CONF_INTERFACE, "")
conf.pop(CONF_DNSMASQ, "")
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up AsusWrt platform."""
# import options from yaml if empty
yaml_options = hass.data.get(DOMAIN, {}).pop("yaml_options", {})
if not entry.options and yaml_options:
hass.config_entries.async_update_entry(entry, options=yaml_options)
router = AsusWrtRouter(hass, entry)
await router.setup()
router.async_on_close(entry.add_update_listener(update_listener))
async def async_close_connection(event):
"""Close AsusWrt connection on HA Stop."""
await router.close()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_close_connection)
)
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {DATA_ASUSWRT: router}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
router = hass.data[DOMAIN][entry.entry_id][DATA_ASUSWRT]
await router.close()
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Update when config_entry options update."""
router = hass.data[DOMAIN][entry.entry_id][DATA_ASUSWRT]
if router.update_options(entry.options):
await hass.config_entries.async_reload(entry.entry_id)
|
the-stack_0_21272 | import logging
from itertools import product
import numpy as np
from prune import nan_cnt, row_sum, prune
MISSING_CHAR = b'\xc2\xb7'.decode('utf8')
class Domain(object):
def __init__(self, length):
self.grid = -np.ones((length, length))
self.grid[0, 0] = 0
self.length = length
def __repr__(self):
return "\n".join(["".join(str(int(it)) if it > -1 else MISSING_CHAR
for it in row) for row in self.grid])
def __str__(self):
return "".join([str(np.where(col == 1)[0][0]) if sum(col == 1)
else MISSING_CHAR for col in self.grid.T])
def __getitem__(self, number):
return [row[number] for row in self.grid]
def __setitem__(self, position, number):
self.grid = np.array([[int(i == number) if index == position else it
for index, it in enumerate(row)]
for i, row in enumerate(self.grid)])
def __len__(self):
return self.grid.shape[0]
def __iter__(self):
for neg_pos, col in enumerate(self.grid.T[::-1]):
if sum(col) < 1:
break
position = self.length - (neg_pos + 1)
empty_positions = np.where(col == -1)[0][::-1]
for value in empty_positions:
yield value, position
def eval(self, value, position, how='min'):
""" Evaluate max or min possible solution
if `value` placed in `position` """
eval_numbers = list()
for n, col in enumerate(self.grid.T):
if 1 in col and n != position:
eval_numbers.append(np.where(col == 1)[0][0])
elif n == position:
eval_numbers.append(value)
else:
missing = np.where(col == -1)[0]
# "max": -1 -> last item
eval_numbers.append(missing[{'min': 0, 'max': -1}[how]])
return eval_numbers
def copy(self):
""" Make copy of Domain """
new_domain = Domain(len(self))
new_domain.grid = np.copy(self.grid)
return new_domain
def to_digits(self):
""" Convert domain into digits """
return [np.where(col == 1)[0][0] if sum(col) == 1 else -1
for col in self.grid.T]
def row_sum(self, row):
""" Sum of elements in row `row` """
return row_sum(self.grid[row])
def nan_cnt(self, row):
""" Count unresolved items in row """
return nan_cnt(self.grid[row])
def feasibility_test(self):
""" Test if domain is feasible """
rng = range(len(self))
for value, position in product(rng, rng):
it = self.grid[value, position]
if it > -1:
rowSum = self.row_sum(position)
nanCnt = self.nan_cnt(position)
available = range(rowSum, rowSum + nanCnt + 1)
left_values = [x for x in available if x != value]
try:
assert (it == 1 and value in list(available)) or \
(it == 0 and len(left_values))
except AssertionError as e:
return False
return True
def prune(self):
return prune(self)
if __name__ == "__main__":
pass
|
the-stack_0_21273 | """Mock classes used in tests."""
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.components.plex.const import CONF_SERVER, CONF_SERVER_IDENTIFIER
MOCK_SERVERS = [
{
CONF_HOST: "1.2.3.4",
CONF_PORT: 32400,
CONF_SERVER: "Plex Server 1",
CONF_SERVER_IDENTIFIER: "unique_id_123",
},
{
CONF_HOST: "4.3.2.1",
CONF_PORT: 32400,
CONF_SERVER: "Plex Server 2",
CONF_SERVER_IDENTIFIER: "unique_id_456",
},
]
class MockResource:
"""Mock a PlexAccount resource."""
def __init__(self, index):
"""Initialize the object."""
self.name = MOCK_SERVERS[index][CONF_SERVER]
self.clientIdentifier = MOCK_SERVERS[index][ # pylint: disable=invalid-name
CONF_SERVER_IDENTIFIER
]
self.provides = ["server"]
self._mock_plex_server = MockPlexServer(index)
def connect(self, timeout):
"""Mock the resource connect method."""
return self._mock_plex_server
class MockPlexAccount:
"""Mock a PlexAccount instance."""
def __init__(self, servers=1):
"""Initialize the object."""
self._resources = []
for index in range(servers):
self._resources.append(MockResource(index))
def resource(self, name):
"""Mock the PlexAccount resource lookup method."""
return [x for x in self._resources if x.name == name][0]
def resources(self):
"""Mock the PlexAccount resources listing method."""
return self._resources
class MockPlexServer:
"""Mock a PlexServer instance."""
def __init__(self, index=0, ssl=True):
"""Initialize the object."""
host = MOCK_SERVERS[index][CONF_HOST]
port = MOCK_SERVERS[index][CONF_PORT]
self.friendlyName = MOCK_SERVERS[index][ # pylint: disable=invalid-name
CONF_SERVER
]
self.machineIdentifier = MOCK_SERVERS[index][ # pylint: disable=invalid-name
CONF_SERVER_IDENTIFIER
]
prefix = "https" if ssl else "http"
self._baseurl = f"{prefix}://{host}:{port}"
@property
def url_in_use(self):
"""Return URL used by PlexServer."""
return self._baseurl
|
the-stack_0_21274 | import os
n_threads = 1
os.environ["NUMBA_NUM_THREADS"] = f"{n_threads}"
os.environ["MKL_NUM_THREADS"] = f"{n_threads}"
os.environ["OMP_NUM_THREADS"] = f"{n_threads}"
os.environ["NUMEXPR_NUM_THREADS"] = f"{n_threads}"
import respy as rp
from estimagic.differentiation.differentiation import jacobian
from estimagic.inference.likelihood_covs import cov_jacobian
from pathlib import Path
import pandas as pd
import numpy as np
from morris import elementary_effects
from time import time
from joblib import wrap_non_picklable_objects
from joblib.externals.loky import set_loky_pickler
start_params, options, data = rp.get_example_model("kw_94_one", with_data=True)
start_params = pd.read_csv("params.csv").set_index(["category", "name"])
options["simulation_agents"] = 4000
to_drop = [
('lagged_choice_1_edu', 'probability'),
('initial_exp_edu_10', 'probability'),
('maximum_exp', 'edu')
]
cov_path = Path("bld/cov.pickle")
if cov_path.exists():
cov = pd.read_pickle(cov_path)
else:
loglikeobs = rp.get_crit_func(
start_params, options, data, return_scalar=False)
jac = jacobian(loglikeobs, start_params, extrapolation=False)
reduced_jac = jac.drop(columns=to_drop)
cov = cov_jacobian(reduced_jac)
pd.to_pickle(cov, cov_path)
se = np.sqrt(np.diagonal(cov))
start_params["se"] = se.tolist() + [np.nan] * 3
cov_df = pd.DataFrame(cov, columns=start_params.index[:-3], index=start_params.index[:-3])
print("Jacobian done")
simfunc = rp.get_simulate_func(start_params, options)
to_append = start_params.loc[to_drop]
def qoi(params):
p1 = pd.concat([params, to_append])
p2 = p1.copy(deep=True)
p2.loc[("nonpec_edu", "constant"), "value"] += 500
df1 = simfunc(p1)
df2 = simfunc(p2)
return df2["Experience_Edu"].mean() - df1["Experience_Edu"].mean()
np.random.seed(5471)
start_params_short = start_params.drop(to_drop)
res = elementary_effects(qoi, start_params_short, cov_df, n_draws=10000, sampling_scheme="random", n_cores=30)
# res = pd.DataFrame(res)
pd.to_pickle(res, "bld/indices/kw_indices.pickle")
|
the-stack_0_21278 | """Customised test runner to output in JUnit-style XML."""
import os
import sys
# This will get templated in by the build rules.
TEST_NAMES = '__TEST_NAMES__'.split(',')
def initialise_coverage():
"""Imports & initialises the coverage module."""
import coverage
from coverage import control as coverage_control
_original_xml_file = coverage_control.XmlReporter.xml_file
# Fix up paths in coverage output which are absolute; we want paths relative to
# the repository root. Also skip empty __init__.py files.
def _xml_file(self, fr, analysis):
if '.pex' in fr.filename:
fr.filename = fr.filename[fr.filename.index('.pex') + 5:] # +5 to take off .pex/
if fr.filename == '__main__.py':
return # Don't calculate coverage for the synthetic entrypoint.
if not (fr.filename.endswith('__init__.py') and len(analysis.statements) <= 1):
analysis.filename = fr.filename
fr.relname = fr.filename
_original_xml_file(self, fr, analysis)
coverage_control.XmlReporter.xml_file = _xml_file
return coverage
def main():
"""Runs the tests. Returns an appropriate exit code."""
args = [arg for arg in sys.argv[1:]]
# Add .bootstrap dir to path, after the initial pex entry
sys.path = sys.path[:1] + [os.path.join(sys.path[0], '.bootstrap')] + sys.path[1:]
if os.getenv('COVERAGE'):
# It's important that we run coverage while we load the tests otherwise
# we get no coverage for import statements etc.
cov = initialise_coverage().coverage()
cov.start()
result = run_tests(args)
cov.stop()
omissions = ['*/third_party/*', '*/.bootstrap/*', '*/test_main.py']
# Exclude test code from coverage itself.
omissions.extend('*/%s.py' % module.replace('.', '/') for module in args)
import coverage
try:
cov.xml_report(outfile=os.getenv('COVERAGE_FILE'), omit=omissions, ignore_errors=True)
except coverage.CoverageException as err:
# This isn't fatal; the main time we've seen it is raised for "No data to report" which
# isn't an exception as far as we're concerned.
sys.stderr.write('Failed to calculate coverage: %s' % err)
return result
else:
return run_tests(args)
|
the-stack_0_21281 | """
Port of Manuel Guizar's code from:
http://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation
"""
import cupy as cp
import sigpy as sp
import numpy as np
import numpy.fft as fft
def _upsampled_dft(data, upsampled_region_size,
upsample_factor=1, axis_offsets=None):
"""
Upsampled DFT by matrix multiplication.
This code is intended to provide the same result as if the following
operations were performed:
- Embed the array "data" in an array that is ``upsample_factor`` times
larger in each dimension. ifftshift to bring the center of the
image to (1,1).
- Take the FFT of the larger array.
- Extract an ``[upsampled_region_size]`` region of the result, starting
with the ``[axis_offsets+1]`` element.
It achieves this result by computing the DFT in the output array without
the need to zeropad. Much faster and memory efficient than the zero-padded
FFT approach if ``upsampled_region_size`` is much smaller than
``data.size * upsample_factor``.
Parameters
----------
data : array
The input data array (DFT of original data) to upsample.
upsampled_region_size : integer or tuple of integers, optional
The size of the region to be sampled. If one integer is provided, it
is duplicated up to the dimensionality of ``data``.
upsample_factor : integer, optional
The upsampling factor. Defaults to 1.
axis_offsets : tuple of integers, optional
The offsets of the region to be sampled. Defaults to None (uses
image center)
Returns
-------
output : ndarray
The upsampled DFT of the specified region.
"""
# if people pass in an integer, expand it to a list of equal-sized sections
if not hasattr(upsampled_region_size, "__iter__"):
upsampled_region_size = [upsampled_region_size, ] * data.ndim
else:
if len(upsampled_region_size) != data.ndim:
raise ValueError("shape of upsampled region sizes must be equal "
"to input data's number of dimensions.")
if axis_offsets is None:
axis_offsets = [0, ] * data.ndim
else:
if len(axis_offsets) != data.ndim:
raise ValueError("number of axis offsets must be equal to input "
"data's number of dimensions.")
im2pi = 1j * 2 * np.pi
dim_properties = list(zip(data.shape, upsampled_region_size, axis_offsets))
for (n_items, ups_size, ax_offset) in dim_properties[::-1]:
kernel = ((np.arange(ups_size) - ax_offset)[:, None]
* fft.fftfreq(n_items, upsample_factor))
kernel = np.exp(-im2pi * kernel)
# Equivalent to:
# data[i, j, k] = kernel[i, :] @ data[j, k].T
data = np.tensordot(kernel, data, axes=(1, -1))
return data
def _compute_phasediff(cross_correlation_max):
"""
Compute global phase difference between the two images (should be
zero if images are non-negative).
Parameters
----------
cross_correlation_max : complex
The complex value of the cross correlation at its maximum point.
"""
return np.arctan2(cross_correlation_max.imag, cross_correlation_max.real)
def _compute_error(cross_correlation_max, src_amp, target_amp):
"""
Compute RMS error metric between ``src_image`` and ``target_image``.
Parameters
----------
cross_correlation_max : complex
The complex value of the cross correlation at its maximum point.
src_amp : float
The normalized average image intensity of the source image
target_amp : float
The normalized average image intensity of the target image
"""
error = 1.0 - cross_correlation_max * cross_correlation_max.conj() / \
(src_amp * target_amp)
return np.sqrt(np.abs(error))
def register_translation(src_image, target_image, upsample_factor=1,
space="real", return_error=True):
"""
Efficient subpixel image translation registration by cross-correlation.
This code gives the same precision as the FFT upsampled cross-correlation
in a fraction of the computation time and with reduced memory requirements.
It obtains an initial estimate of the cross-correlation peak by an FFT and
then refines the shift estimation by upsampling the DFT only in a small
neighborhood of that estimate by means of a matrix-multiply DFT.
Parameters
----------
src_image : array
Reference image.
target_image : array
Image to register. Must be same dimensionality as ``src_image``.
upsample_factor : int, optional
Upsampling factor. Images will be registered to within
``1 / upsample_factor`` of a pixel. For example
``upsample_factor == 20`` means the images will be registered
within 1/20th of a pixel. Default is 1 (no upsampling)
space : string, one of "real" or "fourier", optional
Defines how the algorithm interprets input data. "real" means data
will be FFT'd to compute the correlation, while "fourier" data will
bypass FFT of input data. Case insensitive.
return_error : bool, optional
Returns error and phase difference if on,
otherwise only shifts are returned
Returns
-------
shifts : ndarray
Shift vector (in pixels) required to register ``target_image`` with
``src_image``. Axis ordering is consistent with numpy (e.g. Z, Y, X)
error : float
Translation invariant normalized RMS error between ``src_image`` and
``target_image``.
phasediff : float
Global phase difference between the two images (should be
zero if images are non-negative).
References
----------
.. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms,"
Optics Letters 33, 156-158 (2008). :DOI:`10.1364/OL.33.000156`
.. [2] James R. Fienup, "Invariant error metrics for image reconstruction"
Optics Letters 36, 8352-8357 (1997). :DOI:`10.1364/AO.36.008352`
"""
# images must be the same shape
if src_image.shape != target_image.shape:
raise ValueError("Error: images must be same size for "
"register_translation")
# assume complex data is already in Fourier space
if space.lower() == 'fourier':
src_freq = src_image
target_freq = target_image
# real data needs to be fft'd.
elif space.lower() == 'real':
src_freq = sp.fft(src_image)
target_freq = sp.fft(target_image)
else:
raise ValueError("Error: register_translation only knows the \"real\" "
"and \"fourier\" values for the ``space`` argument.")
# Whole-pixel shift - Compute cross-correlation by an IFFT
shape = src_freq.shape
image_product = src_freq * target_freq.conj()
cross_correlation = sp.ifft(image_product)
# Locate maximum
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)),
cross_correlation.shape)
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
shifts = cp.array(maxima, dtype=np.float64)
shifts[shifts > midpoints] -= cp.array(shape)[shifts > midpoints]
if upsample_factor == 1:
if return_error:
src_amp = np.sum(np.abs(src_freq) ** 2) / src_freq.size
target_amp = np.sum(np.abs(target_freq) ** 2) / target_freq.size
CCmax = cross_correlation[maxima]
# If upsampling > 1, then refine estimate with matrix multiply DFT
else:
# Initial shift estimate in upsampled grid
shifts = np.round(shifts * upsample_factor) / upsample_factor
upsampled_region_size = np.ceil(upsample_factor * 1.5)
# Center of output array at dftshift + 1
dftshift = np.fix(upsampled_region_size / 2.0)
upsample_factor = np.array(upsample_factor, dtype=np.float64)
normalization = (src_freq.size * upsample_factor ** 2)
# Matrix multiply DFT around the current shift estimate
sample_region_offset = dftshift - shifts * upsample_factor
cross_correlation = _upsampled_dft(image_product.conj(),
upsampled_region_size,
upsample_factor,
sample_region_offset).conj()
cross_correlation /= normalization
# Locate maximum and map back to original pixel grid
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)),
cross_correlation.shape)
CCmax = cross_correlation[maxima]
maxima = np.array(maxima, dtype=np.float64) - dftshift
shifts = shifts + maxima / upsample_factor
if return_error:
src_amp = _upsampled_dft(src_freq * src_freq.conj(),
1, upsample_factor)[0, 0]
src_amp /= normalization
target_amp = _upsampled_dft(target_freq * target_freq.conj(),
1, upsample_factor)[0, 0]
target_amp /= normalization
# If its only one row or column the shift along that dimension has no
# effect. We set to zero.
for dim in range(src_freq.ndim):
if shape[dim] == 1:
shifts[dim] = 0
if return_error:
return shifts, _compute_error(CCmax, src_amp, target_amp), \
_compute_phasediff(CCmax)
else:
return shifts
|
the-stack_0_21285 | import os
from setuptools import setup, find_packages
import re
def get_version():
VERSIONFILE = "p2d/_version.py"
dictionary_for_exec = {}
exec(open(VERSIONFILE).read(), dictionary_for_exec)
return dictionary_for_exec['__version__']
setup(
name='pol2dom',
version=get_version(),
description='Transform a set of Polygon problems into a DOMjudge contest. Downloading from polygon, converting to the DOMjudge package format, and importing into a DOMjudge server.',
author='dario2994',
author_email='[email protected]',
url='https://github.com/dario2994/pol2dom',
license='MIT',
packages=find_packages(),
package_data={'': ['resources/*.tex']},
include_package_data=True,
entry_points={
'console_scripts': [
'p2d=p2d.p2d:main'
]
},
python_requires='>=3.7',
platforms='any',
install_requires=[
'pyyaml >= 5.3',
'requests >= 2.26',
'webcolors >= 1.0'
]
)
|
the-stack_0_21286 | ##############################################################################
#
# Copyright (c) 2001 Zope Foundation and Contributors
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this
# distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Classes: RecursiveGroupsPlugin
"""
from AccessControl import ClassSecurityInfo
from AccessControl.class_init import InitializeClass
from Acquisition import aq_parent
from zope.interface import Interface
from Products.PageTemplates.PageTemplateFile import PageTemplateFile
from Products.PluggableAuthService.interfaces.plugins \
import IGroupsPlugin
from Products.PluggableAuthService.PropertiedUser import PropertiedUser
from Products.PluggableAuthService.plugins.BasePlugin import BasePlugin
from Products.PluggableAuthService.utils import classImplements
class IRecursiveGroupsPlugin(Interface):
""" Marker interface.
"""
manage_addRecursiveGroupsPluginForm = PageTemplateFile(
'www/rgpAdd', globals(), __name__='manage_addRecursiveGroupsPluginForm')
def addRecursiveGroupsPlugin(dispatcher, id, title=None, REQUEST=None):
""" Add a RecursiveGroupsPlugin to a Pluggable Auth Service. """
rgp = RecursiveGroupsPlugin(id, title)
dispatcher._setObject(rgp.getId(), rgp)
if REQUEST is not None:
REQUEST['RESPONSE'].redirect('%s/manage_workspace'
'?manage_tabs_message='
'RecursiveGroupsPlugin+added.' %
dispatcher.absolute_url())
class SimpleGroup:
def __init__(self, id):
self._id = id
def getId(self):
return self._id
def getGroups(self):
return ()
def _addGroups(self, groups):
pass
class RecursiveGroupsPlugin(BasePlugin):
""" PAS plugin for recursively flattening a collection of groups
"""
meta_type = 'Recursive Groups Plugin'
security = ClassSecurityInfo()
def __init__(self, id, title=None):
self._id = self.id = id
self.title = title
#
# IGroupsPlugin implementation
#
@security.private
def getGroupsForPrincipal(self, user, request=None):
set = list(user.getGroups())
seen = []
parent = aq_parent(self)
while set:
test = set.pop(0)
if test in seen:
continue
seen.append(test)
new_groups = parent._getGroupsForPrincipal(
PropertiedUser(test).__of__(parent),
ignore_plugins=(self.getId(),))
if new_groups:
set.extend(new_groups)
return tuple(seen)
classImplements(RecursiveGroupsPlugin, IRecursiveGroupsPlugin, IGroupsPlugin)
InitializeClass(RecursiveGroupsPlugin)
|
the-stack_0_21287 | import cv2
import pyttsx3
import requests
import speech_recognition as sr
import datetime
import os
import random
import wikipedia
import webbrowser
import sys
import pyjokes
import pyautogui
import time
import instaloader
import pywhatkit
import PyPDF2
from tkinter.filedialog import *
# speech recognition is imported and renamed as sr for easy use
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[2].id)
# defining a function to convert text to speech
def speak(audio):
engine.say(audio)
print(audio)
engine.runAndWait()
# To convert voice into text
def takecommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print('listening...')
r.pause_threshold = 1
audio = r.listen(source, timeout=5, phrase_time_limit=5)
try:
print('Recognizing...')
query = r.recognize_google(audio, language='en-in')
print(f'user said: {query}')
except Exception as e:
speak("Say that again please...")
return "none"
return query
# To wish
def wish():
hour = int(datetime.datetime.now().hour)
if hour > 0 and hour <= 12:
speak('good morning')
elif hour > 12 and hour < 18:
speak('good afternoon')
else:
speak('good evening')
speak('i am friday. sir please tell me how can i help you')
# to get current time
def xtime():
Time = datetime.datetime.now().strftime("%I:%M:%S")
speak(Time)
# to get news
def news():
main_url = "https://newsapi.org/v2/top-headlines?country=in&apiKey=35ab651459ea426aab0664002b5b6250"
main_page = requests.get(main_url).json()
# print main page
articles = main_page["articles"]
# print articles
head = []
day = ["first", "second", "third", 'fourth', 'fifth', 'sixth', "seventh", "eighth", 'ninth', "tenth"]
for ar in articles:
head.append(ar["title"])
for i in range(len(day)):
# print(f"today's (day[i) news is: ", head[i
speak(f"today's {day[i]} news is: , {head[i]}")
# TO READ PDF
def pdf_reader():
speak("sir,please select the book")
book = askopenfile()
pdf_Reader = PyPDF2.PdfFileReader(book)
pages = pdf_Reader.numPages
speak(f"Total numbers of pages in this book {pages} ")
speak("sir please enter the page number i have to read")
pg = int(input("please enter the page number :"))
page = pdf_Reader.getPage(pg)
text = page.extractText()
speak(text)
def quiz(self):
quistions = ['1. Who is known as the father of Free Software Foundation?',
'2. In which year was the "C" programming language developed?',
'3.Who is known as the father of Internet?',
'4.Who used the binary system of numeration for the first time?',
'5.Who is the first computer programmer?', '6.In which year was computer graphics oringinated?',
'7.Who is the inventor of Supercomputer?', '8.What is the name of Worlds first digital Computer?',
'9.What is the name of Indian first indigenous supercomputer developed by CDAC named?',
'10.Which was the first Pocket Computer?',
'11.What is the name of the tablet introduced by Amazon?', '12.Who invented Computer Mouse?',
'13.Speak to Tweet is a service given by which giant?',
'14.Xoom is the name of tablet developed by which company?',
'15. WIT is the NASDAW code of which Indian IT company?', '16.Expand HDMI',
'17.What was developed by PYRA Labs and later sold to google?',
'18.Who is known as the father of World Wide Web?',
'19.Which mehod is used to connect to a remote computer?',
'20.What do you call a single point on a computer screen?',
'21.The CPU Chip used in a computer is made of which element?',
'22.Which computer was designed to be compact as possible?',
'23.What is the name of the device which produce hard copy graphics?',
'24.When is world computer literacy day celebrated?',
'25.The processing power of a CPU is measured in?', '26.Windows 10 was released on which day?',
'27.Which command is used to view the sub directory structure of a drive?',
'28.Round robin scheduling is he premptive version of?',
'29.When did IBM released its first version of DOS OS 1.0?',
'30.Start button was introduced in which operating system?',
'31.Groupon was rebranded in India to what name?',
'32.Which system software does the job of merging the records from two files to one?',
'33.While running DOS on a computer, which command is used to duplicate the entire diskette?',
'34.What is the name given to the organized collection of software that control the entire operation of c computer?',
'35.What is the name of the OS for the laptop MACLITE?',
'36.What is the memory range from 1k-640k called?',
'37.IBM released its first PC in 1981, what was the name of OS that was popular at that time?',
'38.How long is an IPV6 Address?', '39.Which protocol does DHCP uses at the transport layer?',
'40.Which protocol is used to send a destination network unknown messge back to originating host?',
'41.Which WLAN IEEE Specification allows upto 54 mbps at 2.4 Ghz?',
'42.Which protocol does PPP uses to identify the network layer protocol?', '43.HBA Stands for?',
'44.What is the second generation of the Web called?',
'45. If you have a CISCO Mesh network , what protocl allows multiple AP’s to connect with many redundant connection between nodes?',
'46.Which protocol is used to identify the hardware address of a local device?',
'47. PAT Address translation is also termed as what?',
'48. When was the term Social Networking first used?', '49.Who founded ‘myspace’?',
'50.Which social media allows a user to create map of exactly where a photo was taken?',
'51.What type of audience are primarily in a social network?',
'52.Twitter is an example of what service?',
'53.What is the character limit for a tweet in Twitter?',
'54.What was the largest social network prior to facebook?',
'55. Which social network does not have followers?',
'56.When was the social networking first become popular online?',
'57. Which is used to search and browse for information online?',
'58.What is Youtubes Slogan?', '59.P2P,B2B, and B2C are part of?',
'60. Winchester drive is also called as what?',
'61.What kind of connectors are used to connect a PC power supply to a hardware?',
'62.What is the term Wave Table Synthesis related to?',
'63.What type of memory is Pendrive?', '64.Which IRQ does the hard disk drive use?',
'65.Who invented Compact disc?', '66.What is Arrandale code name for?',
'67.What hardware was used by the initial generation of computers?',
'68. Which was the first computer made available for commercial use?',
'69.Name the first mechanical computer designed by Charles Babbage called',
'70.The concentric circles on the platter of hard disk is known as?',
'71.IRQ6 is commenly assigned to?',
'72.Which component in a PC regulated the color depth and screen resolution of a monitor?',
'73. A Computer programming language for simulating models of business activity is?',
'74.The words that are set aside by the programming language for its own use is called as what?',
'75.Which programming language is used for scientific calculations?',
'76.Which computer language is used on the Internet?',
'77.Which language is used for the developedment of various games?',
'78.Which language was devised by Dr, Seymour Papart?',
'79. Which computer language is used for Artificial Intelligence?',
'80.Who is the creator of PASCAL language?',
'81.A systems programming language for micro computers in the Intel family is called?',
'82. Which language is more suited to a structured program?',
'83.MS/DOS is written in which language?',
'84. A program element that allows structuring of a program in a different way is called?',
'85. A name given by Intel to high speed MOS technology is called?',
'86. Who is the new CEO of Flipkart.com ?']
answers = ['Richard Mathew Stallman', '1972', 'Vinton Cerf', 'Thomas Harriot', 'Ada Lovelace', '1940',
'Seymour Cray',
'Mark 1', 'Param', 'SHARP PC1211', 'Kindle Fire', 'Doughles Engelbert', 'Google', 'Motorola',
'Wipro',
'High Defenition Multimedia Interface', 'Blogger', 'Tim Berners Lee', 'Dialup', 'Pixel', 'Silica',
'Micro Computer', 'Plotter', 'December 2', 'MIPS', 'July 29', 'TREE', 'FIFO', '1981', 'Windows 95',
'Nearbuy', 'Utility Program', 'DISKCOPY', 'Operating System', 'OZ', 'Conventional memory', 'CP/M',
'128 bit (16 byte)', 'UDP', 'ICMP', 'IEEE 802.11G', 'NCP', 'Host Bus Adapter', 'Web 2.0', 'AWPP',
'Address Resolution Protocol', 'NAT Overload', '1954', 'Tom Anderson', 'Flickr', 'Joiners', '140',
'myspace',
'Google Plus', '2003', 'Netscape', 'Broadcast Yourself', 'Share Economy', 'Hard disk drive', 'Molex',
'Sound', 'Flash Memory', '14', 'James Russel', 'Intel Processor', 'Valves', 'UNIVAC',
'Analytical Engine',
'Tracks', 'Floppy Drive Controller', 'VRAM', 'GPSS', 'Control Structures', 'FORTRAN', 'Java', 'C++',
'LOGO',
'Prolog', 'Niclaus Wirth', 'PC/M', 'PASCAL', 'C++', 'Co-Routine', 'HMOS']
n = 0
i = 0
score = 0
life = 4
try:
while True:
print(quistions[n])
if life == 0:
speak('You lose')
break
ans = input('<^> :')
if ans.lower() == answers[n].lower():
print('correct')
life = life + 1
print('life: ', life)
score = score + 1
else:
score = score - 1
life = life - 1
print("Ans: ", answers[n])
print('wrong')
print('life: ', life)
n = n + 1
i = i + 1
except:
speak(f'your score is ', score)
# control speed of voice
def speed():
speak("sir ,should i increase or decrease the speed")
sp = takecommand().lower()
try:
if sp == 'increase':
engine.setProperty('rate', 150)
engine.runAndWait()
elif sp == 'decrease':
engine.setProperty('rate', 70)
engine.runAndWait()
except:
pass
# logic building for tasks
if __name__ == "__main__": # speak('hello sir')
wish()
while True:
query = takecommand().lower()
# logic building for tasks
if 'open notepad' in query:
npath = "C:\\Windows\\notepad.exe"
os.startfile(npath)
elif "close notepad" in query:
speak("okay sir ,closing the application")
os.system("taskkill /f /im notepad.exe")
elif "open command prompt" in query:
os.system("start cmd")
elif "close command prompt" in query:
speak("okay sir ,closing the application")
os.system("taskkill /f /im cmd.exe")
elif "open cmd" in query:
os.system("start cmd")
elif "close cmd" in query:
speak("okay sir ,closing the application")
os.system("taskkill /f /im cmd.exe")
elif "open whatsapp" in query:
wpath = "C:\\Users\\Lenovo\\AppData\\Local\\WhatsApp\\WhatsApp.exe"
os.system(wpath)
elif "close whatsapp" in query:
speak("okay sir ,closing the application")
os.system("taskkill /f /im whatsapp.exe")
elif "open chrome" in query:
cpath = "C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe"
os.system(cpath)
elif "close chrome" in query:
speak("okay sir ,closing the application")
os.system("taskkill /f /im chrome.exe")
elif "open opera" in query:
opath = "C:\\Users\\Lenovo\\AppData\\Local\\Programs\\Opera\\launcher.exe"
os.system(opath)
elif "close opera" in query:
speak("okay sir ,closing the application")
os.system("taskkill /f /im launcher.exe")
elif "open telegram" in query:
tpath = "C:\\Users\\Lenovo\\AppData\\Roaming\\Telegram Desktop\\Telegram.exe"
os.system(tpath)
elif "close telegram" in query:
speak("okay sir ,closing the application")
os.system("taskkill /f /im Telegram.exe")
elif "open vs code" in query:
vpath = "C:\\Users\\Lenovo\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.system(vpath)
elif "close vs code" in query:
speak("okay sir ,closing the application")
os.system("taskkill /f /im code.exe")
elif "open camera" in query:
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
cv2.imshow('webcam', img)
k = cv2.waitKey(50)
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
elif "play music" in query:
music_dir = "D:\\jarvis_project\\music"
songs = os.listdir(music_dir)
rd = random.choice(songs)
os.startfile(os.path.join(music_dir, rd))
elif "wikipedia" in query:
speak("searching wikipedia......")
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("according to wikipedia")
speak(results)
elif "search on youtube" in query:
speak("sir,what should i search")
search = takecommand().lower()
pywhatkit.playonyt(f"{search}")
elif "open stackoverflow" in query:
webbrowser.open('www.stackoverflow.com')
elif "what time is it" in query:
xtime()
elif "search on google" in query:
speak("sir, what should i search on google")
cm = takecommand().lower()
pywhatkit.search(f"{cm}")
elif "you can sleep" in query:
speak("thanks for using me sir, have a good day")
sys.exit()
elif "tell me a joke" in query:
joke = pyjokes.get_joke()
speak(joke)
elif "shutdown the system" in query:
os.system("shutdown /s /t 5")
speak("shut downing system..")
elif "restart the system" in query:
os.system("shutdown /r /t 5")
speak("restarting system")
elif "sleep this system" in query:
os.system("rundll32.exe powrproof.dll,setsuspendstate 0,1,0")
elif "switch window" in query:
pyautogui.keyDown("alt")
pyautogui.press("tab")
time.sleep(1)
pyautogui.keyUp("alt")
elif "tell me the news" in query:
speak("please wait sir, fetching the news")
news()
elif "where i am" in query or "where we are" in query:
speak('wait sir,let me check')
try:
ipADD = requests.get('https://api.ipify.org').text
url = 'https://get.geojs.io/v1/ip/geo/' + ipADD + '.json'
geo_requests = requests.get(url)
geo_data = geo_requests.json()
# print(geo data)
city = geo_data['city']
state = geo_data['state']
country = geo_data['country']
speak(f"sir iam not sure, i think we are in {city} city of {state} state of {country} country")
except Exception as e:
speak("sorry sir,due to network issue iam not able to find where we are")
pass
elif "instagram profile" in query or "profile on instagram" in query:
speak("sir please enter the user name correctly.")
name = input('Enter username here: ')
webbrowser.open(f"www.instagram.com/{name}")
speak(f"sir here is the profile of the user {name}")
time.sleep(1)
speak("sir would you like to download profile picture of this account.")
condition = takecommand().lower()
if "yes" in condition:
mod = instaloader.Instaloader() # instadownloader library
mod.download_profile(name, profile_pic_only=True)
speak("i am done sir, profile picture is saved in our main folder")
elif "take screenshot" in query or "take a screenshot" in query:
speak("sir, tell me the name the name for the screenshot")
name = takecommand().lower()
time.sleep(3)
img = pyautogui.screenshot()
img.save(f"{name}.png")
speak(" i am done sir,the screenshot is saved in our main folder")
# elif "read pdf" in query: not working need to fix the function
# pdf_reader()
elif "change speed of voice" in query or "change voice speed" in query or "voice speed" in query: # woking,need little more work
speed()
elif "thank you" in query:
speak("it's my pleasure sir")
elif "do calculations" in query or "can you calculate":
pass
elif "play a game" in query:
speak("ok sir, starting quiz")
quiz()
speak("sir, do you want to try again")
inp = input(' ').lower()
if inp == 'yes':
quiz()
|
the-stack_0_21290 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listreceivedbyaddress API
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def get_sub_array_from_array(object_array, to_match):
'''
Finds and returns a sub array from an array of arrays.
to_match should be a unique idetifier of a sub array
'''
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
return item
return []
def check_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found in object_array
"""
if should_not_find == True:
expected = { }
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects was matched %s"%(str(to_match)))
class ReceivedByTest(BitcoinTestFramework):
def run_test(self):
'''
listreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check not listed in listreceivedbyaddress because has 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{ },
True)
#Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence < 10
check_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence > 10, should not find Tx
check_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
#Empty Tx
addr = self.nodes[1].getnewaddress()
check_array_result(self.nodes[1].listreceivedbyaddress(0,True),
{"address":addr},
{"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
'''
getreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr,0)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
'''
listreceivedbyaccount + getreceivedbyaccount Test
'''
#set pre-state
addrArr = self.nodes[1].getnewaddress()
account = self.nodes[1].getaccount(addrArr)
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
balance_by_account = rec_by_accountArr = self.nodes[1].getreceivedbyaccount(account)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account:
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
self.nodes[1].generate(10)
self.sync_all()
# listreceivedbyaccount should return updated account balance
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
{"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account + Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
#Create a new account named "mynewaccount" that has a 0 balance
self.nodes[1].getaccountaddress("mynewaccount")
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
# Test includeempty of listreceivedbyaccount
if received_by_account_json["amount"] != Decimal("0.0"):
raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
# Test getreceivedbyaccount for 0 amount accounts
balance = self.nodes[1].getreceivedbyaccount("mynewaccount")
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
if __name__ == '__main__':
ReceivedByTest().main()
|
the-stack_0_21292 | import socketserver
import http.server
import sys
import cgi
import os
import subprocess
from export_events import updateEvents
from rewind7am import rewindTime
# Port settings
IP = ""
if len(sys.argv) > 1:
PORT = int(sys.argv[1])
else:
PORT = 8124
# serve render/ folder, not current folder
rootdir = os.getcwd()
os.chdir('render')
# Custom handler
class CustomHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
# default behavior
http.server.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
form = cgi.FieldStorage(
fp = self.rfile,
headers = self.headers,
environ = {'REQUEST_METHOD':'POST', 'CONTENT_TYPE':self.headers['Content-Type']})
result = 'NOT_UNDERSTOOD'
if self.path == '/refresh':
# recompute jsons. We have to pop out to root from render directory
# temporarily. It's a little ugly
refresh_time = form.getvalue('time')
os.chdir(rootdir) # pop out
updateEvents() # defined in export_events.py
os.chdir('render') # pop back to render directory
result = 'OK'
if self.path == '/addnote':
# add note at specified time and refresh
note = form.getvalue('note')
note_time = form.getvalue('time')
os.chdir(rootdir) # pop out
subprocess.check_output(['./note.sh', str(note_time)], input=note.encode())
updateEvents() # defined in export_events.py
os.chdir('render') # go back to render
result = 'OK'
if self.path == '/blog':
# add note at specified time and refresh
post = form.getvalue('post')
if post is None: post = ''
post_time = int(form.getvalue('time'))
os.chdir(rootdir) # pop out
trev = rewindTime(post_time)
with open(f'logs/blog_{post_time:d}.txt', 'w') as f:
f.write(post)
updateEvents() # defined in export_events.py
os.chdir('render') # go back to render
result = 'OK'
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(result.encode())
class BetterThreadingTCPServer(socketserver.ThreadingTCPServer):
allow_reuse_address = True
httpd = BetterThreadingTCPServer((IP, PORT), CustomHandler)
print(f'Serving ulogme, see it on http://localhost:{PORT}')
httpd.serve_forever()
|
the-stack_0_21294 | from pyramid.httpexceptions import HTTPBadRequest, HTTPFound, HTTPNotFound
from pyramid.view import (
forbidden_view_config,
notfound_view_config,
view_config,
view_defaults,
)
from lms.security import Permissions
from lms.services import ConsumerKeyError
@forbidden_view_config(path_info="/admin/*")
def logged_out(request):
return HTTPFound(location=request.route_url("pyramid_googleauth.login"))
@notfound_view_config(path_info="/admin/*", append_slash=True)
def notfound(_request):
return HTTPNotFound()
@view_defaults(request_method="GET", permission=Permissions.ADMIN)
class AdminViews:
def __init__(self, request):
self.request = request
self.application_instance_service = request.find_service(
name="application_instance"
)
@view_config(route_name="admin.index")
def index(self):
return HTTPFound(location=self.request.route_url("admin.instances"))
@view_config(
route_name="admin.instances",
renderer="lms:templates/admin/instances.html.jinja2",
) # pylint: disable=no-self-use
def instances(self):
return {}
@view_config(
route_name="admin.instances",
request_method="POST",
require_csrf=True,
)
def find_instance(self):
try:
consumer_key = self.request.params["query"]
except KeyError as err:
raise HTTPBadRequest() from err
try:
ai = self.application_instance_service.get(consumer_key)
except ConsumerKeyError:
self.request.session.flash(
f'No application instance found for {self.request.params["query"]}',
"errors",
)
return HTTPFound(location=self.request.route_url("admin.instances"))
return HTTPFound(
location=self.request.route_url(
"admin.instance", consumer_key=ai.consumer_key
),
)
@view_config(
route_name="admin.instance",
renderer="lms:templates/admin/instance.html.jinja2",
)
def show_instance(self):
ai = self._get_ai_or_404(self.request.matchdict["consumer_key"])
return {"instance": ai}
@view_config(
route_name="admin.instance",
request_method="POST",
require_csrf=True,
)
def update_instance(self):
ai = self._get_ai_or_404(self.request.matchdict["consumer_key"])
for setting, sub_setting in (
("canvas", "sections_enabled"),
("canvas", "groups_enabled"),
("blackboard", "files_enabled"),
("microsoft_onedrive", "files_enabled"),
):
enabled = self.request.params.get(f"{setting}.{sub_setting}") == "on"
ai.settings.set(setting, sub_setting, enabled)
self.request.session.flash(
f"Updated application instance {ai.consumer_key}", "messages"
)
return HTTPFound(
location=self.request.route_url(
"admin.instance", consumer_key=ai.consumer_key
)
)
def _get_ai_or_404(self, consumer_key):
try:
return self.application_instance_service.get(consumer_key)
except ConsumerKeyError as err:
raise HTTPNotFound() from err
|
the-stack_0_21295 | #!/usr/bin/env python3
import argparse
import os
import sys
from prettytable import PrettyTable
import puremagic as magic
from colorama import Fore, Style, init
import yara
from androguard.core.bytecodes.apk import APK
def deleteFiles():
if(os.path.exists("hash.txt")):
output = os.system("rm hash.txt")
if(os.path.exists("string.txt")):
output = os.system("rm string.txt")
def hashFile(file_to_analyze):
data_table = PrettyTable()
data_table.field_names = [Fore.YELLOW + "Hash type" + Fore.WHITE, Fore.BLUE + "Value" + Fore.WHITE]
hash_type = ["sha1", "sha256","sha512","md5"]
os.system("sha1sum "+ file_to_analyze + ">> hash.txt")
os.system("sha256sum "+ file_to_analyze + ">> hash.txt")
os.system("sha512sum "+ file_to_analyze + ">> hash.txt")
os.system("md5sum "+ file_to_analyze + ">> hash.txt")
with open("hash.txt") as f:
content = f.readlines()
for pos in range(0,4):
hash_value = content[pos].split(" ")
data_table.add_row([hash_value[0],hash_type[pos]])
print(data_table)
def yaraAnalysis(file_to_analyze):
data_table = PrettyTable()
data_table.field_names = [Fore.YELLOW + "Path of the rule" + Fore.WHITE, Fore.BLUE + "Rule" + Fore.WHITE]
onlyfiles = []
for dirpath, dirs, files in os.walk("YaraRules/"):
for filename in files:
if(".yar" in filename or ".rule" in filename):
onlyfiles.append(os.path.join(dirpath,filename))
match = ""
for i in onlyfiles:
rules = yara.compile(i)
temp_match = rules.match(file_to_analyze)
if temp_match != []:
for j in temp_match:
data_table.add_row([i,str(j)])
print(data_table)
def setupParser():
args = []
parser = argparse.ArgumentParser()
parser.add_argument("-y","--yara", required=False, help="Checks if some yara rule matches the file pass by argument.", type=str)
parser.add_argument("-s","--strings", required=False, help="Get the strings from the file.", type=str)
parser.add_argument("-a","--analyze", required=False, help="Analyze the file.", type=str)
parser.add_argument("-mf","--multifile", required=False, nargs='+', help="Analyze multiple files.")
parser.add_argument("-d","--docs", required=False, help="Analyze document files.",type=str)
parser.add_argument("-H","--hash",action='store_true', help="Scan the hash file.")
parser.add_argument("-mh","--multihash", required=False, nargs='+', help="Scan multiple hashes.", type=list)
parser.add_argument("-m","--metadata", required=False, help="Get metadata information.", type=str)
parser.add_argument("-dm","--domain", required=False, help="Extract URLs and IPs.", type=str)
parser.add_argument('-v', '--verbose',action='store_true',help='Verbose Output')
args = parser.parse_args()
return args
def analyze(file_to_analyze):
type_of_the_file = str(magic.magic_file(file_to_analyze))
# Windows Analysis
if "Windows Executable" in type_of_the_file or ".msi" in type_of_the_file or ".dll" in type_of_the_file or ".exe" in type_of_the_file or ".drv" in type_of_the_file or ".ocx " in type_of_the_file or ".sys" in type_of_the_file or ".cpl " in type_of_the_file or ".scr" in type_of_the_file:
print(Fore.GREEN + '--- Analyzing Windows executable ---'+Fore.WHITE)
#yaraAnalysis(file_to_analyze)
command = "python3 win_analysis.py " + file_to_analyze
os.system(command)
elif ".xltx" in type_of_the_file or ".xlam" in type_of_the_file or ".docm" in type_of_the_file or ".dotx" in type_of_the_file or ".pptm" in type_of_the_file or ".xlsm" in type_of_the_file or ".ppt" in type_of_the_file or ".doc" in type_of_the_file or ".xla" in type_of_the_file:
print(Fore.GREEN + 'Analyzing Windows document...'+Fore.WHITE)
def zeus(args):
if(args.strings):
output = os.system("strings --all "+ args.strings + "> string.txt")
if(args.verbose):
command = "python3 strings.py v"
os.system(command)
if(args.hash):
hashFile(args.strings)
elif (args.hash):
command = "python3 strings.py s"
os.system(command)
hashFile(args.strings)
else:
command = "python3 strings.py s"
os.system(command)
elif(args.analyze):
analyze(args.analyze)
if(args.hash):
hashFile(args.analyze)
elif(args.yara):
yaraAnalysis(args.yara)
if(args.hash):
hashFile(args.yara)
elif(args.multifile):
print("multifile")
elif(args.docs):
print("docs")
elif(args.multihash):
print("multihash")
elif(args.metadata):
print("metadata")
elif(args.domain):
print("domain")
else:
print("Error running the script")
sys.exit(1)
if __name__ == "__main__":
#os.system("python3 dependencies/ascii.py")
args = setupParser()
zeus(args)
deleteFiles()
|
the-stack_0_21296 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_legend05.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with legend options."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [79973376, 84140800]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_legend({'border': {'color': '#4F81BD'}})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
the-stack_0_21300 | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
n = int(input())
hunger_values = sorted(map(int, input().strip().split()))
ans = 0
for i in range(n - 2):
ans = max(ans, hunger_values[i + 2] - hunger_values[i])
print(ans)
|
the-stack_0_21302 | from base import BaseDataSet, BaseDataLoader
from utils import palette
import numpy as np
import os
import torch
import cv2
from PIL import Image
from glob import glob
from torch.utils.data import Dataset
from torchvision import transforms
class ADE20KDataset(BaseDataSet):
"""
ADE20K dataset
http://groups.csail.mit.edu/vision/datasets/ADE20K/
"""
def __init__(self, **kwargs):
self.num_classes = 150
self.palette = palette.ADE20K_palette
super(ADE20KDataset, self).__init__(**kwargs)
def _set_files(self):
if self.split in ["training", "validation"]:
self.image_dir = os.path.join(self.root, 'images', self.split)
self.label_dir = os.path.join(self.root, 'annotations', self.split)
self.files = [os.path.basename(path).split('.')[0] for path in glob(self.image_dir + '/*.jpg')]
else:
raise ValueError(f"Invalid split name {self.split}")
def _load_data(self, index):
image_id = self.files[index]
image_path = os.path.join(self.image_dir, image_id + '.jpg')
image = np.asarray(Image.open(image_path).convert('RGB'), dtype=np.float32)
label_path = os.path.join(self.label_dir, image_id + '.png')
label = np.asarray(Image.open(label_path), dtype=np.int32) - 1 # from -1 to 149
return image, label, image_id
class ADE20K(BaseDataLoader):
def __init__(self, data_dir, batch_size, split, crop_size=None, base_size=None, scale=True, num_workers=1,
val=False,
shuffle=False, flip=False, rotate=False, blur=False, augment=False, val_split=None, return_id=False):
self.MEAN = [0.48897059, 0.46548275, 0.4294]
self.STD = [0.22861765, 0.22948039, 0.24054667]
kwargs = {
'root': data_dir,
'split': split,
'mean': self.MEAN,
'std': self.STD,
'augment': augment,
'crop_size': crop_size,
'base_size': base_size,
'scale': scale,
'flip': flip,
'blur': blur,
'rotate': rotate,
'return_id': return_id,
'val': val
}
self.dataset = ADE20KDataset(**kwargs)
super(ADE20K, self).__init__(self.dataset, batch_size, shuffle, num_workers, val_split)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.