metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "38elements/django-simple-jsonschema",
"score": 2
} |
#### File: django-simple-jsonschema/django_simple_jsonschema/__init__.py
```python
from django.conf import settings
from django.http import HttpResponse
from jsonschema import Draft4Validator
import json
class SimpleJsonschemaMiddleware(object):
def __init__(self):
self.set_schemas(settings.SIMPLE_JSONSCHEMA)
self.default_encoding = 'utf-8'
def get_encoding(self, request):
return request.encoding if request.encoding else self.default_encoding
def set_schemas(self, simple_jsonschema):
self._schemas = {}
for key, schema in simple_jsonschema.items():
methods, view_name = key
if isinstance(methods, tuple):
for method in methods:
schema_id = method.upper() + ':' + view_name
self._schemas[schema_id] = Draft4Validator(schema)
elif isinstance(methods, str):
schema_id = methods.upper() + ':' + view_name
self._schemas[schema_id] = Draft4Validator(schema)
def get_schema(self, request):
view_name = request.resolver_match.view_name
method = request.method
key = method + ':' + view_name
return self._schemas[key]
def process_view(self, request, view_func, view_args, view_kwargs):
try:
schema = self.get_schema(request)
except KeyError:
return None
encoding = self.get_encoding(request)
json_data = json.loads(request.body.decode(encoding), encoding=encoding)
errors = list(schema.iter_errors(json_data))
if len(errors):
errors_data = {}
errors_data['url'] = request.path
errors_data['method'] = request.method
errors = [
{'message': e.message, 'path': list(e.path), 'schema_path': list(e.schema_path)}
for e in errors
]
errors_data['errors'] = errors
rv = json.dumps(errors_data)
return HttpResponse(rv, content_type='application/json')
setattr(request, 'json_data', json_data)
return None
```
#### File: management/commands/check_schema.py
```python
from django.core.management.base import BaseCommand
from django.utils import termcolors
from jsonschema import Draft4Validator
from jsonschema.exceptions import SchemaError
import json
class Command(BaseCommand):
can_import_settings = True
@property
def _jsonschema_exist(self):
from django.conf import settings
if not hasattr(settings, 'SIMPLE_JSONSCHEMA'):
return False
return True
@property
def _jsonschema_errors(self):
from django.conf import settings
errors = []
schemas = settings.SIMPLE_JSONSCHEMA
for url, schema in schemas.items():
try:
Draft4Validator.check_schema(schema)
except SchemaError as e:
errors.append({
'url': url,
'error': e,
'schema': json.dumps(schema, indent=4, sort_keys=True)
})
return errors
def handle(self, *args, **options):
success = termcolors.make_style(fg='green')
error = termcolors.make_style(fg='red')
if not self._jsonschema_exist:
not_exist = '[' + error('ERROR') + '] SIMPLE_JSONSCHEMA is not exist in settings.'
self.stdout.write(not_exist)
return
errors = self._jsonschema_errors
if len(errors):
for e in errors:
title = '\n[' + error('ERROR') + '] schema of ' + str(e['url']) + ' is invalid.'
self.stdout.write(title)
self.stdout.write('path: ' + str(list(e['error'].path)))
self.stdout.write('message: ' + e['error'].message)
self.stdout.write('schema:\n' + e['schema'] + '\n')
else:
self.stdout.write('[' + success('SUCCESS') + '] All jsonschemas are OK.')
``` |
{
"source": "38elements/feedhoos",
"score": 2
} |
#### File: folder/models/folder.py
```python
import json
from django.db import models
from feedhoos.reader.models.bookmark import BookmarkModel
from feedhoos.reader.models.bookmark import DEFAULT_FOLDER_ID
class FolderModel(models.Model):
title = models.CharField(max_length=32)
rating = models.PositiveSmallIntegerField(default=0)
@staticmethod
def get_json(folder_models):
dicts = map(lambda f: f.dict, folder_models)
folder_json = json.dumps(
dicts,
ensure_ascii=False, skipkeys=True
)
return folder_json
@staticmethod
def delete_and_change_bookmark(folder_id):
"""フォルダーを消す 。ブックマークのフォルダーidをデフォルトidにする。"""
folder_model = FolderModel.objects.get(pk=folder_id)
folder_model.delete()
BookmarkModel.objects.filter(folder_id=folder_id).update(folder_id=DEFAULT_FOLDER_ID)
@property
def dict(self):
d = {
"id": self.id,
"title": self.title.encode("utf-8"),
"rating": self.rating,
"type": "folder"
}
return d
class Meta:
app_label = 'folder'
```
#### File: folder/views/delete.py
```python
import json
from django.http import HttpResponse
from feedhoos.folder.models.folder import FolderModel
def execute(request):
folder_id = request.POST["id"]
FolderModel.delete_and_change_bookmark(folder_id)
response_json = json.dumps({"msg": "OK"}, ensure_ascii=False, skipkeys=True)
return HttpResponse(response_json, content_type='application/json')
```
#### File: reader/models/bookmark.py
```python
import json
from django.db import models
DEFAULT_FOLDER_ID = 0
class BookmarkModel(models.Model):
""" しおり """
feed_id = models.IntegerField(unique=True)
# このフィードを表示したときのFeedModelのlast_updated
# last_updatedまで読んだを記録する
last_updated = models.IntegerField(default=0)
rating = models.PositiveSmallIntegerField(default=0)
folder_id = models.IntegerField(default=DEFAULT_FOLDER_ID, db_index=True)
@staticmethod
def get_json(bookmark_models):
bookmarks_dict = {
"0": {
"rating": 6,
"folder_id": DEFAULT_FOLDER_ID
}
}
for b in bookmark_models:
bookmarks_dict[str(b.feed_id)] = {
"folder_id": b.folder_id,
"rating": b.rating,
}
bookmark_json = json.dumps(
bookmarks_dict,
ensure_ascii=False, skipkeys=True
)
return bookmark_json
class Meta:
app_label = 'reader'
```
#### File: reader/views/delete.py
```python
import json
from django.http import HttpResponse
from feedhoos.worker.models.entry import EntryModel
from feedhoos.finder.models.feed import FeedModel
from feedhoos.reader.models.bookmark import BookmarkModel
from django.db import transaction
@transaction.commit_on_success
def execute(request):
feed_id = request.POST["id"]
feed_id = int(feed_id)
FeedModel.objects.get(pk=feed_id).delete()
BookmarkModel.objects.filter(feed_id__exact=feed_id).delete()
EntryModel.objects.filter(
feed_id__exact=feed_id
).delete()
response_json = json.dumps({"msg": "OK"}, ensure_ascii=False, skipkeys=True)
return HttpResponse(response_json, content_type='application/json')
```
#### File: reader/views/timeline.py
```python
import json
from django.http import HttpResponse
from feedhoos.worker.models.entry import EntryModel
from feedhoos.finder.models.feed import FeedModel
def execute(request, feed_id="0", page="1"):
feed_id = int(feed_id)
feed_dict = {"id": feed_id, "title": "登録されているすべてのFeed"}
if feed_id:
# FIXME 該当するレコードがない時の処理
feed_model = FeedModel.objects.get(pk=feed_id)
feed_dict = feed_model.dict
entry_models = EntryModel.get_timeline(feed_id, page)
entry_dicts = map(lambda e: e.dict, entry_models)
timeline_json = json.dumps(
{"entries": entry_dicts, "feed": feed_dict},
ensure_ascii=False, skipkeys=True
)
return HttpResponse(timeline_json, content_type='application/json')
```
#### File: worker/models/entry.py
```python
from django.db import models
import time
import datetime
MAX_CONTENT_LENGTH = 10240
PER_PAGE = 200
class EntryModel(models.Model):
feed_id = models.IntegerField()
url = models.URLField(max_length=256)
title = models.CharField(max_length=64)
updated = models.IntegerField()
content = models.TextField()
@property
def dict(self):
d = {
"id": self.id,
"url": self.url.encode("utf-8"),
"feed_id": self.feed_id,
"title": self.title.encode("utf-8"),
"updated": self.updated_stftime,
"content": self.content.encode("utf-8"),
}
return d
@property
def updated_stftime(self):
datetime_obj = datetime.datetime.fromtimestamp(self.updated)
return datetime_obj.strftime('%Y-%m-%d %H:%M')
@staticmethod
def count(feed_id, min_updated=0):
feed_id = int(feed_id)
count = EntryModel.objects.all().filter(
feed_id=feed_id
).filter(
updated__gt=min_updated
).order_by("-updated").count()
return count
@staticmethod
def get_entries(feed_id, page, min_updated=None):
"""reading"""
feed_id = int(feed_id)
page = int(page)
start_index = (page - 1) * PER_PAGE
end_index = (page) * PER_PAGE
try:
query = EntryModel.objects.all().filter(
feed_id=feed_id
)
if min_updated:
query = query.filter(
updated__gt=min_updated
)
entries = query.order_by("-updated")[start_index:end_index]
except EntryModel.DoesNotExist:
entries = []
return entries
@staticmethod
def get_timeline(feed_id, page):
feed_id = int(feed_id)
page = int(page)
start_index = (page - 1) * PER_PAGE
end_index = (page) * PER_PAGE
try:
query = EntryModel.objects.all()
if feed_id:
query = query.filter(
feed_id=feed_id
)
entries = query.order_by("-updated")[start_index:end_index]
except EntryModel.DoesNotExist:
entries = []
return entries
@staticmethod
def get_folder(feed_ids, page):
page = int(page)
start_index = (page - 1) * PER_PAGE
end_index = (page) * PER_PAGE
try:
query = EntryModel.objects.filter(
feed_id__in=feed_ids
)
entries = query.order_by("-updated")[start_index:end_index]
except EntryModel.DoesNotExist:
entries = []
return entries
@staticmethod
def get_content(entry):
if ("content" in entry and entry.content and
len(entry.content) >= 1 and len(entry.content[0]["value"]) <= MAX_CONTENT_LENGTH):
return entry.content[0]["value"]
elif "summary" in entry and entry.summary:
return entry.summary if len(entry.summary) <= MAX_CONTENT_LENGTH else ""
else:
return ""
@staticmethod
def add(feed_id, entry):
entry_model = EntryModel(
feed_id=feed_id,
url=entry.link,
title=entry.title,
updated=int(time.mktime(entry.updated_parsed)),
content=EntryModel.get_content(entry)
)
entry_model.save()
return entry_model
class Meta:
app_label = 'worker'
index_together = (
("feed_id", "updated"),
)
unique_together = (
("url", "updated", "feed_id"),
)
``` |
{
"source": "38/plumber",
"score": 2
} |
#### File: examples/pyecho/echo.py
```python
import pservlet
def init(args):
return (pservlet.pipe_define("in", pservlet.PIPE_INPUT),
pservlet.pipe_define("out", pservlet.PIPE_OUTPUT))
def execute(s):
while True:
tmp = pservlet.pipe_read(s[0])
if not tmp:
if not pservlet.pipe_eof(s[0]):
pservlet.pipe_set_flag(s[0], pservlet.PIPE_PERSIST)
else:
pservlet.pipe_clr_flag(s[0], pservlet.PIPE_PERSIST)
return 0
else:
pservlet.pipe_write(s[1], tmp)
def cleanup(s):
return 0
```
#### File: pyservlet/PyServlet/Log.py
```python
from pservlet import LOG_FATAL, LOG_ERROR, LOG_NOTICE, LOG_WARNING, LOG_INFO, LOG_TRACE, LOG_DEBUG, log
def error(message):
log(LOG_ERROR, message)
def warning(message):
log(LOG_WARNING, message)
def notice(message):
log(LOG_NOTICE, message)
def info(message):
log(LOG_INFO, message)
def trace(message):
log(LOG_TRACE, message)
def debug(message):
log(LOG_DEBUG, message)
```
#### File: pyservlet/PyServlet/RLS.py
```python
import PyServlet.Type
import pservlet
class String(PyServlet.Type.ModelBase):
token = PyServlet.Type.ScopeToken()
def read(self):
rls_obj = pservlet.RLS_Object(pservlet.SCOPE_TYPE_STRING, self.token)
return pservlet.RLS_String.get_value(rls_obj)
def write(self, val):
rls_obj = pservlet.RLS_Object(pservlet.SCOPE_TYPE_STRING, -1, val)
self.token = rls_obj.get_token()
``` |
{
"source": "394781865/keras-yolo3",
"score": 3
} |
#### File: keras-yolo3/new_file/yolo_image.py
```python
import sys,os
import argparse
from yolo import YOLO, detect_video
from PIL import Image
import cv2
#制作适用于mtcnn的上半身检测数据集
def detect_img(yolo):
image_path = '/home/lichen/keras-yolo3/images/'
txt_data = open("/home/lichen/keras-yolo3/person.txt","w")
for img in os.listdir(image_path):
img_path = image_path + img
image = Image.open(img_path)
print(img_path)
try:
top, left, bottom, right = yolo.detect_image(image,img_path)
except:
continue
finally:
txt_data.write(str(img_path)+" ")
txt_data.write(str(left)+" ")
txt_data.write(str(top)+" ")
txt_data.write(str(abs(right-left))+" ")
txt_data.write(str(abs(bottom-top)))
txt_data.write("\n")
txt_data.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
parser.add_argument(
'--model', type=str,default="/home/lichen/keras-yolo3/model_data/yolo.h5",
help='path to model weight file, default ' + YOLO.get_defaults("model_path")
)
parser.add_argument(
'--anchors', type=str,default="/home/lichen/keras-yolo3/model_data/yolo_anchors.txt",
help='path to anchor definitions, default ' + YOLO.get_defaults("anchors_path")
)
parser.add_argument(
'--classes', type=str,default="person",
help='path to class definitions, default ' + YOLO.get_defaults("classes_path")
)
parser.add_argument(
'--gpu_num', type=int,default=0,
help='Number of GPU to use, default ' + str(YOLO.get_defaults("gpu_num"))
)
parser.add_argument(
"--input", nargs='?', type=str,required=False,default='/home/lichen/keras-yolo3/images/',
help = "Video input path"
)
parser.add_argument(
"--output", nargs='?', type=str, default="",
help = "[Optional] Video output path"
)
FLAGS = parser.parse_args()
detect_img(YOLO(**vars(FLAGS)))
``` |
{
"source": "395299296/liaotian-robot",
"score": 2
} |
#### File: app/main/forms.py
```python
from flask_mongoengine.wtf import model_form
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, TextAreaField, HiddenField, RadioField, FileField, IntegerField
from wtforms import widgets, ValidationError
from wtforms.validators import Required, Length, Email, Regexp, EqualTo, URL, Optional
from . import models
class PostForm(FlaskForm):
title = StringField('Title', validators=[Required()])
slug = StringField('Slug', validators=[Required()])
raw = TextAreaField('Content')
abstract = TextAreaField('Abstract')
category = StringField('Category')
tags_str = StringField('Tags')
post_id = HiddenField('post_id')
post_type = HiddenField('post_type')
from_draft = HiddenField('from_draft')
def validate_slug(self, field):
if self.from_draft.data and self.from_draft.data == 'true':
posts = models.Draft.objects.filter(slug=field.data)
else:
posts = models.Post.objects.filter(slug=field.data)
if posts.count() > 0:
if not self.post_id.data or str(posts[0].id) != self.post_id.data:
raise ValidationError('slug already in use')
SuPostForm = model_form(models.Post, exclude=['pub_time', 'update_time', 'content_html', 'category', 'tags', 'post_type'])
class WidgetForm(FlaskForm):
title = StringField('Title', validators=[Required()])
content = TextAreaField('Content', validators=[Required()])
content_type = RadioField('Content Type', choices=[('markdown', 'markdown'), ('html', 'html')], default='html')
priority = IntegerField(default=1000000)
class CommentForm(FlaskForm):
email = StringField('* Email', validators=[Required(), Length(1,128), Email()])
author = StringField('* Name', validators=[Required(), Length(1,128)])
homepage = StringField('Homepage', validators=[URL(), Optional()])
content = TextAreaField('* Comment <small><span class="label label-info">markdown</span></small>', validators=[Required()])
comment_id = HiddenField('comment_id')
class SessionCommentForm(FlaskForm):
email = HiddenField('* Email')
author = HiddenField('* Name')
homepage = HiddenField('Homepage')
content = TextAreaField('* Comment', validators=[Required()])
comment_id = HiddenField('comment_id')
class ImportCommentForm(FlaskForm):
content = TextAreaField('Content')
json_file = FileField('Json File')
import_format = RadioField('Import Format', choices=[('text', 'text'), ('file', 'file')], default='text')
```
#### File: app/main/views.py
```python
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from datetime import datetime, timedelta
from flask import request, redirect, render_template, url_for, abort, flash, g, session
from flask import current_app, make_response, send_from_directory
from flask.views import MethodView
# from flask.ext.login import login_required, current_user
from flask_login import login_required, current_user
from werkzeug.contrib.atom import AtomFeed
from mongoengine.queryset.visitor import Q
from . import models, signals, forms
from accounts.models import User
from accounts.permissions import admin_permission, editor_permission, writer_permission, reader_permission
from blog.config import RobotBlogSettings
from . import config, reply, receive, chatter
from random import randint
from imp import reload
from urllib.parse import quote, unquote
from collections import deque
from collections import defaultdict
import hashlib
import os
PER_PAGE = RobotBlogSettings['pagination'].get('per_page', 10)
ARCHIVE_PER_PAGE = RobotBlogSettings['pagination'].get('archive_per_page', 10)
BACKGROUND = RobotBlogSettings['background_image']
CACHE_CONTENT = defaultdict(deque)
def get_base_data():
pages = models.Post.objects.filter(post_type='page', is_draft=False)
blog_meta = RobotBlogSettings['blog_meta']
data = {
'blog_meta': blog_meta,
'pages': pages,
'bg_home': BACKGROUND['home'],
'bg_post': BACKGROUND['post'],
'bg_about': BACKGROUND['about'],
}
return data
def get_keywords():
reload(config)
keywords = config.Config.keys()
return '、'.join(keywords)
def get_content(content):
reload(config)
contentlist = deque()
for x in config.Config:
if x in content:
p = __import__("main.%s"%config.Config[x][0]) # import module
m = getattr(p,config.Config[x][0])
reload(m)
c = getattr(m,config.Config[x][1])
try:
contentlist = CACHE_CONTENT[x]
if not contentlist:
contentlist = deque(c().getContent(content))
if contentlist:
CACHE_CONTENT[x] = contentlist
if contentlist:
return contentlist.popleft()
except Exception as e:
print(str(e))
raise e
else:
content = ''
# contentlist = CACHE_CONTENT[content]
# if not contentlist:
# reload(chatter)
# contentlist = deque([chatter.Chatter().getContent(content)])
# if contentlist:
# CACHE_CONTENT[content] = contentlist
# if contentlist:
# return contentlist.popleft()
return {'type':'text', 'content':content}
def index():
return 'Hello'
def blog_index():
posts = models.Post.objects.filter(post_type='post', is_draft=False).order_by('-pub_time')
tags = posts.distinct('tags')
try:
cur_page = int(request.args.get('page', 1))
except ValueError:
cur_page = 1
cur_category = request.args.get('category')
cur_tag = request.args.get('tag')
keywords = request.args.get('keywords')
if keywords:
# posts = posts.filter(raw__contains=keywords )
posts = posts.filter(Q(raw__contains=keywords) | Q(title__contains=keywords))
if cur_category:
posts = posts.filter(category=cur_category)
if cur_tag:
posts = posts.filter(tags=cur_tag)
#group by aggregate
category_cursor = models.Post._get_collection().aggregate([
{ '$group' :
{ '_id' : {'category' : '$category' },
'name' : { '$first' : '$category' },
'count' : { '$sum' : 1 },
}
}
])
widgets = models.Widget.objects(allow_post_types='post')
posts = posts.paginate(page=cur_page, per_page=PER_PAGE)
data = get_base_data()
data['posts'] = posts
data['cur_category'] = cur_category
data['category_cursor'] = category_cursor
data['cur_tag'] = cur_tag
data['tags'] = tags
data['keywords'] = keywords
data['widgets'] = widgets
return render_template('main/index.html', **data)
def wechat_auth():
data = request.args
test = data.get('test','')
if test != '':
content = get_content(test)
return content['content']
signature = data.get('signature','')
if signature == '':
return 'error'
timestamp = data.get('timestamp','')
nonce = data.get('nonce','')
echostr = data.get('echostr','')
s = [timestamp,nonce,config.Token]
s.sort()
s = ''.join(s).encode('utf8')
if (hashlib.sha1(s).hexdigest() != signature):
return 'failed'
return make_response(echostr)
def wechat_inter():
xml_str = request.stream.read()
# print('Coming Post', xml_str)
recMsg = receive.parse_xml(xml_str)
toUser = recMsg.FromUserName
fromUser = recMsg.ToUserName
replyMsg = reply.Msg(toUser, fromUser)
if isinstance(recMsg, receive.TextMsg):
content = recMsg.Content
response = get_content(content)
msgType = response['type']
content = response['content']
if content != None:
if msgType == 'text':
replyMsg = reply.TextMsg(toUser, fromUser, content)
elif msgType == 'news':
replyMsg = reply.NewsMsg(toUser, fromUser, response['title'], response['content'], response['pic_url'], response['url'])
elif isinstance(recMsg, receive.ImageMsg):
pass
elif isinstance(recMsg, receive.EventMsg):
if recMsg.Event == 'subscribe':
content = config.Welcome.format(key=get_keywords())
replyMsg = reply.TextMsg(toUser, fromUser, content)
return replyMsg.send()
def girl_download(filename):
filename = unquote(filename)
path = os.path.abspath('girl')
if os.path.isfile(os.path.join(path, filename)):
return send_from_directory(path, filename)
abort(404)
def girl_download2(dirname, filename):
filename = unquote(filename)
dirname = unquote(dirname)
path = os.path.abspath('girl')
path = os.path.join(path, dirname)
if os.path.isfile(os.path.join(path, filename)):
return send_from_directory(path, filename)
abort(404)
def list_posts():
if request.method == 'GET':
data = request.args
print('Coming Get', data)
if not data:
return blog_index()
else:
return wechat_auth()
if request.method == 'POST':
return wechat_inter()
def list_wechats():
posts = models.Post.objects.filter(post_type='wechat', is_draft=False).order_by('-pub_time')
tags = posts.distinct('tags')
try:
cur_page = int(request.args.get('page', 1))
except ValueError:
cur_page = 1
cur_tag = request.args.get('tag')
keywords = request.args.get('keywords')
if keywords:
# posts = posts.filter(raw__contains=keywords )
posts = posts.filter(Q(raw__contains=keywords) | Q(title__contains=keywords))
if cur_tag:
posts = posts.filter(tags=cur_tag)
posts = posts.paginate(page=cur_page, per_page=PER_PAGE)
widgets = models.Widget.objects(allow_post_types='wechat')
data = get_base_data()
data['posts'] = posts
data['cur_tag'] = cur_tag
data['tags'] = tags
data['keywords'] = keywords
data['widgets'] = widgets
return render_template('main/wechat_list.html', **data)
def post_detail(slug, post_type='post', fix=False, is_preview=False):
if is_preview:
if not g.identity.can(reader_permission):
abort(401)
post = models.Draft.objects.get_or_404(slug=slug, post_type=post_type)
else:
post = models.Post.objects.get_or_404(slug=slug, post_type=post_type) if not fix else models.Post.objects.get_or_404(fix_slug=slug, post_type=post_type)
# this block is abandoned
if post.is_draft and current_user.is_anonymous:
abort(404)
data = get_base_data()
data['post'] = post
data['post_type'] = post_type
if request.form:
form = forms.CommentForm(obj=request.form)
else:
obj = {'author': session.get('author'), 'email': session.get('email'),'homepage': session.get('homepage'),}
form = forms.CommentForm(**obj)
# print session.get('email')
if request.form.get('robot-comment') and form.validate_on_submit():
robotblog_create_comment(form, post)
url = '{0}#comment'.format(url_for('main.post_detail', slug=slug))
msg = 'Succeed to comment, and it will be displayed when the administrator reviews it.'
flash(msg, 'success')
return redirect(url)
data['allow_donate'] = RobotBlogSettings['donation']['allow_donate']
data['donation_msg'] = RobotBlogSettings['donation']['donation_msg']
data['donation_img_url'] = RobotBlogSettings['donation']['donation_img_url']
data['display_wechat'] = RobotBlogSettings['wechat']['display_wechat']
data['wechat_msg'] = RobotBlogSettings['wechat']['wechat_msg']
data['wechat_image_url'] = RobotBlogSettings['wechat']['wechat_image_url']
data['wechat_title'] = RobotBlogSettings['wechat']['wechat_title']
data['display_copyright'] = RobotBlogSettings['copyright']['display_copyright']
data['copyright_msg'] = RobotBlogSettings['copyright']['copyright_msg']
data['allow_comment'] = RobotBlogSettings['blog_comment']['allow_comment']
if data['allow_comment']:
comment_type = RobotBlogSettings['blog_comment']['comment_type']
comment_shortname = RobotBlogSettings['blog_comment']['comment_opt'][comment_type]
comment_func = get_comment_func(comment_type)
data['comment_html'] = comment_func(slug, post.title, request.base_url, comment_shortname, form=form) if comment_func else ''
data['allow_share_article'] = RobotBlogSettings['allow_share_article']
# if data['allow_share_article']:
# data['share_html'] = jiathis_share()
# send signal
if not is_preview:
signals.post_visited.send(current_app._get_current_object(), post=post)
templates = {
'post': 'main/post.html',
'page': 'main/post.html',
'wechat': 'main/wechat_detail.html',
}
return render_template(templates[post_type], **data)
def post_preview(slug, post_type='post'):
return post_detail(slug=slug, post_type=post_type, is_preview=True)
def post_detail_general(slug, post_type):
is_preview = request.args.get('is_preview', 'false')
is_preview = True if is_preview.lower()=='true' else False
return post_detail(slug=slug, post_type=post_type, is_preview=is_preview)
def author_detail(username):
author = User.objects.get_or_404(username=username)
posts = models.Post.objects.filter(post_type='post', is_draft=False, author=author).order_by('-pub_time')
cur_page = request.args.get('page', 1)
posts = posts.paginate(page=int(cur_page), per_page=ARCHIVE_PER_PAGE)
data = get_base_data()
data['user'] = author
data['posts'] = posts
# data['category_cursor'] = category_cursor
# data['cur_tag'] = cur_tag
# data['tags'] = tags
# data['keywords'] = keywords
return render_template('main/author.html', **data)
def get_comment_func(comment_type):
# if comment_type == 'duoshuo':
# return duoshuo_comment
# else:
# return None
comment_func = {
'robotblog': robotblog_comment,
'duoshuo': duoshuo_comment,
}
return comment_func.get(comment_type)
def robotblog_comment(post_id, post_title, post_url, comment_shortname, form=None, *args, **kwargs):
template_name = 'main/comments.html'
comments = models.Comment.objects(post_slug=post_id, status='approved').order_by('pub_time')
# print comments[0].get_gavatar_url()
# if not form:
# if session.get('author'):
# print 'session'
# return 'session'
# data = {'author': session['author'], 'email': session['email'],'homepage': session['homepage'],}
# form = forms.SessionCommentForm(obj=data)
# else:
# print 'no session'
# return 'no session'
# form = forms.CommentForm(obj=request.form)
data = {
'form': form,
'comments': comments,
'slug': post_id,
}
return render_template(template_name, **data)
def robotblog_create_comment(form, post):
comment = models.Comment()
comment.author = form.author.data.strip()
comment.email = form.email.data.strip()
comment.homepage = form.homepage.data.strip() or None
comment.post_slug = post.slug
comment.post_title = post.title
comment.md_content = form.content.data.strip()
comment.save()
session['author'] = form.author.data.strip()
session['email'] = form.email.data.strip()
session['homepage'] = form.homepage.data.strip()
def duoshuo_comment(post_id, post_title, post_url, duoshuo_shortname, *args, **kwargs):
'''
Create duoshuo script by params
'''
template_name = 'main/misc/duoshuo.html'
data = {
'duoshuo_shortname': duoshuo_shortname,
'post_id': post_id,
'post_title': post_title,
'post_url': post_url,
}
return render_template(template_name, **data)
# def jiathis_share():
# '''
# Create duoshuo script by params
# '''
# template_name = 'main/misc/jiathis_share.html'
# return render_template(template_name)
def archive():
posts = models.Post.objects.filter(post_type='post', is_draft=False).order_by('-pub_time')
cur_category = request.args.get('category')
cur_tag = request.args.get('tag')
cur_page = request.args.get('page', 1)
if cur_category:
posts = posts.filter(category=cur_category)
if cur_tag:
posts = posts.filter(tags=cur_tag)
posts = posts.paginate(page=int(cur_page), per_page=ARCHIVE_PER_PAGE)
data = get_base_data()
data['posts'] = posts
return render_template('main/archive.html', **data)
def make_external(url):
return urljoin(request.url_root, url)
def get_post_footer(allow_donate=False, donation_msg=None,
display_wechat=False, wechat_msg=None,
display_copyright=False, copyright_msg=None, *args, **kwargs):
template_name = 'main/misc/post_footer.html'
data = {}
data['allow_donate'] = allow_donate
data['donation_msg'] = donation_msg
data['display_wechat'] = display_wechat
data['wechat_msg'] = wechat_msg
data['display_copyright'] = display_copyright
data['copyright_msg'] = copyright_msg
return render_template(template_name, **data)
def recent_feed():
feed_title = RobotBlogSettings['blog_meta']['name']
feed = AtomFeed(feed_title, feed_url=request.url, url=request.url_root)
# data = {}
# data['allow_donate'] = RobotBlogSettings['donation']['allow_donate']
# data['donation_msg'] = RobotBlogSettings['donation']['donation_msg']
# data['display_wechat'] = RobotBlogSettings['wechat']['display_wechat']
# data['wechat_msg'] = RobotBlogSettings['wechat']['wechat_msg']
# data['display_copyright'] = RobotBlogSettings['copyright']['display_copyright']
# data['copyright_msg'] = RobotBlogSettings['copyright']['copyright_msg']
# post_footer = get_post_footer(**data)
posts = models.Post.objects.filter(post_type='post', is_draft=False)[:15]
only_abstract_in_feed = RobotBlogSettings['only_abstract_in_feed']
content = 'abstract' if only_abstract_in_feed else 'content_html'
for post in posts:
# return post.get_absolute_url()
feed.add(post.title,
# unicode(post.content_html),
# post.abstract,
getattr(post, content),
content_type='html',
author=post.author.username,
url=post.get_absolute_url(),
updated=post.update_time,
published=post.pub_time)
return feed.get_response()
def sitemap():
"""Generate sitemap.xml. Makes a list of urls and date modified."""
pages=[]
#########################
# static pages
#########################
# ten_days_ago=(datetime.now() - timedelta(days=10)).date().isoformat()
# for rule in current_app.url_map.iter_rules():
# if "GET" in rule.methods and len(rule.arguments)==0:
# pages.append(
# [rule.rule,ten_days_ago]
# )
## user model pages
# users=User.query.order_by(User.modified_time).all()
# for user in users:
# url=url_for('user.pub',name=user.name)
# modified_time=user.modified_time.date().isoformat()
# pages.append([url,modified_time])
######################
# Post Pages
######################
posts = models.Post.objects.filter(is_draft=False, post_type='post')
for post in posts:
pages.append((post.get_absolute_url(), post.update_time.date().isoformat(), 'weekly', '0.8'))
######################
# Blog-Page Pages
######################
blog_pages = models.Post.objects.filter(is_draft=False, post_type='page')
for page in blog_pages:
pages.append((page.get_absolute_url(), page.update_time.date().isoformat(), 'monthly', '0.7'))
######################
# Wechat Pages
######################
posts = models.Post.objects.filter(is_draft=False, post_type='wechat')
for post in posts:
pages.append((post.get_absolute_url(), post.update_time.date().isoformat(), 'weekly', '0.6'))
sitemap_xml = render_template('main/sitemap.xml', pages=pages)
response= make_response(sitemap_xml)
response.headers["Content-Type"] = "application/xml"
return response
```
#### File: liaotian-robot/app/manage.py
```python
import os, sys
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
# from flask.ext.script import Manager, Server
from flask_script import Manager, Server
# from blog import create_app
# app = create_app(os.getenv('config') or 'default')
from blog import app
manager = Manager(app)
# Turn on debugger by default and reloader
manager.add_command("runserver", Server(
use_debugger = True,
use_reloader = True,
host = '0.0.0.0',
port = 8080)
)
@manager.command
def test():
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == "__main__":
manager.run()
``` |
{
"source": "398786172/keithley",
"score": 3
} |
#### File: Application_Specific/Increase_4-Wire_Resistance_Measurement_Capability_with_Common-side_Ohms/KEI3706A_CommonSideOhms_with_Scripting.py
```python
import socket
import struct
import math
import time
echoCmd = 0
functions_path = "commonside_funcs.tsp" # This file holds the set of TSP (Lua-
# based) functions that are called by
# the Python script to help minimize the
# amount of bytes needed to setup up and
# more importantly, extract readings from
# the instrument. The file is opened and
def instrConnect(mySocket, myAddress, myPort, timeOut, doReset, doIdQuery):
mySocket.connect((myAddress, myPort)) # input to connect must be a tuple
mySocket.settimeout(timeOut)
if doReset == 1:
instrSend(mySocket, "reset()")
if doIdQuery == 1:
tmpId = instrQuery(mySocket, "*IDN?", 100)
print(tmpId)
return mySocket
def instrDisconnect(mySocket):
mySocket.close()
return
def instrSend(mySocket, cmd):
if echoCmd == 1:
print(cmd)
cmd = "{0}\n".format(cmd)
mySocket.send(cmd.encode())
return
def instrQuery(mySocket, cmd, rcvSize):
instrSend(mySocket, cmd)
time.sleep(0.1)
return mySocket.recv(rcvSize).decode()
def Load_Functions(s):
# This function opens the functions.lua file in the same directory as
# the Python script and trasfers its contents to the DMM7510's internal
# memory. All the functions defined in the file are callable by the
# controlling program.
func_file = open(functions_path, "r")
contents = func_file.read()
func_file.close()
cmd = "loadandrunscript loadfuncs\n{0}\nendscript".format(contents)
instrSend(s, cmd)
print(s.recv(100).decode())
return
ip_address = "192.168.1.37" # Place your instrument's IP address here.
# Instrument ID String examples...
# LAN -> TCPIP0::134.63.71.209::inst0::INSTR
# USB -> USB0::0x05E6::0x2450::01419962::INSTR
# GPIB -> GPIB0::16::INSTR
# Serial -> ASRL4::INSTR
my_port = 5025
myRange = 10.0
myNplc = 1.0
filterCnt = 10
channelStr = "1001:1005"
bufferSize = 500
scanCount = 10
s = socket.socket() # Establish a TCP/IP socket object
# Open the socket connection
instrConnect(s, ip_address, my_port, 20000, 1, 1)
Load_Functions(s)
t1 = time.time() # Start the timer...
instrSend(s, "do_beep(0.5, 4000)")
# Configure channels for commonside-ohms...
instrSend(s, "csohmsSetup({}, {}, {}, \"{}\")".format(myRange, myNplc, filterCnt, channelStr))
# Define a custom buffer
instrSend(s, "setupBuffer({})".format(bufferSize))
# Configure the scan attributes
instrSend(s, "scanSetup({}, \"{}\")".format(scanCount, channelStr))
# Start the scan
instrSend(s, "initScan()")
# The following loop determines if a scan iteration has completed
# then outputs the readings and channel numbers.
rdgsCnt = 0
extractSize = 5
startIndex = 1
endIndex = 5
while endIndex <= 50:
rdgsCnt = int(float(instrQuery(s, "print(mybuffer.n)", 32)))
if (rdgsCnt >= endIndex):
print(instrQuery(s, "printbuffer({}, {}, mybuffer, mybuffer.channels)".format(startIndex, endIndex), 512)[:-1])
startIndex += extractSize
endIndex += extractSize
else:
time.sleep(0.5)
t2 = time.time() # Stop the timer...
instrSend(s, "computeStats()")
print("\n")
print("CH1001 MEAN, PK2PK, MAX, MIN = {}".format(instrQuery(s, "getChanStats(1001)", 128))[:-1])
print("CH1002 MEAN, PK2PK, MAX, MIN = {}".format(instrQuery(s, "getChanStats(1002)", 128))[:-1])
print("CH1003 MEAN, PK2PK, MAX, MIN = {}".format(instrQuery(s, "getChanStats(1003)", 128))[:-1])
print("CH1004 MEAN, PK2PK, MAX, MIN = {}".format(instrQuery(s, "getChanStats(1004)", 128))[:-1])
print("CH1005 MEAN, PK2PK, MAX, MIN = {}".format(instrQuery(s, "getChanStats(1005)", 128))[:-1])
print("\n")
# Close the socket connection
instrDisconnect(s)
# Notify the user of completion and the data streaming rate achieved.
print("done")
print("Time Elapsed: {0:.3f} s".format(t2-t1))
input("Press Enter to continue...")
exit()
```
#### File: DMM6500_DAQ6510/DMM6500_Python_Sockets_Driver/DAQ6510_04_4WResistance_Scan_w_InitialStateWrite.py
```python
import socket
import struct
import math
import time
import Keithley_DMM6500_Sockets_Driver as kei
from ISStreamer.Streamer import Streamer
def writeToInitialState(ch107, ch108, ch109):
streamer.log("CH107", ch107)
streamer.log("CH108", ch108)
streamer.log("CH109", ch109)
return
#===== MAIN PROGRAM STARTS HERE =====
ipAddress1 = "192.168.1.165"
port = 5025
timeout = 20.0
myFile = "dmm_functions.tsp"
bucketName = time.strftime("DAQ6510_Data_%Y-%m-%d_%H-%M-%S")
myAccessKey = "YOUR_ACCESSS_KEY_GOES_HERE"
streamer = Streamer(bucket_name=bucketName,
access_key=myAccessKey)
DAQ6510 = kei.DMM6500()
myID = DAQ6510.Connect(ipAddress1, 5025, 20000, 1, 1)
DAQ6510.echoCmd = 1
scanCount = 10
scanInterval = 1.0 # for this setup, limit to no less than 5s intervals
print(myID)
t1 = time.time()
DAQ6510.LoadScriptFile(myFile)
DAQ6510.SendCmd("do_beep(1.0, 3500)")
DAQ6510.Reset()
DAQ6510.SetFunction_4W_Resistance("107:109", DAQ6510.OCOMP.ON, DAQ6510.OLeadDetect.ON)
DAQ6510.SetMeasure_Range("107:109", DAQ6510.AutoRange.ON)
DAQ6510.SetMeasure_NPLC("107:109", 1.0)
DAQ6510.SetMeasure_AutoDelay("107:109", DAQ6510.DmmState.ON)
DAQ6510.SetMeasure_AutoZero("107:109", DAQ6510.DmmState.ON)
DAQ6510.SetMeasure_Count("107:109", 1)
DAQ6510.SetScan_BasicAttributes("107:109", scanCount, scanInterval)
DAQ6510.Init()
startIndex = 1
endIndex = 3
chanCnt = 3
targetCnt = scanCount * chanCnt
loopCnt = 1
accumCnt = DAQ6510.QueryCmd("print(defbuffer1.n)", 8)
while(endIndex < (targetCnt+1)):
myData = DAQ6510.GetScan_Data(chanCnt, startIndex, endIndex)
print("Scan {}: {}".format(loopCnt, myData))
myDataList = myData.split(",")
writeToInitialState(float(myDataList[0]), float(myDataList[1]), float(myDataList[2]))
startIndex += chanCnt
endIndex += chanCnt
loopCnt += 1
DAQ6510.Disconnect()
t2 = time.time()
# Notify the user of completion and the test time achieved.
print("done")
print("{0:.6f} s".format(t2-t1))
input("Press Enter to continue...")
exit()
exit()
```
#### File: DMM6500_DAQ6510/DMM6500_Python_VISA_Driver_Linux/DMM6500_02_PyVISA_Linux.py
```python
import visa
import time
rm = 0
myDmm = 0
def KEIDMM6500_Connect(rsrcString, getIdStr, timeout, doRst):
myDmm = rm.open_resource(rsrcString) #opens desired resource and sets it variable my_instrument
myDmm.write_termination = '\n'
myDmm.read_termination = '\n'
myDmm.send_end = True
if getIdStr == 1:
print(myDmm.query("*IDN?"))
myDmm.timeout = timeout
if doRst == 1:
myDmm.write('*RST')
#time.sleep(0.1)
return myDmm
def KEIDMM6500_Disconnect():
myDmm.close
return
def KEIDMM6500_Send(sndBuffer):
myDmm.write(sndBuffer)
return
def KEIDMM6500_Query(sndBuffer):
return myDmm.query(sndBuffer)
#================================================================================
# MAIN CODE GOES HERE
#================================================================================
t1 = time.time() # Capture start time....
#rm = visa.ResourceManager() # Opens the resource manager and sets it to variable rm
rm = visa.ResourceManager('@py')
myDmm = KEIDMM6500_Connect("TCPIP0::192.168.1.165::inst0::INSTR", 1, 20000, 1)
# Instrument ID String examples...
# LAN -> TCPIP0::172.16.58.3::inst0::INSTR
# GPIB -> GPIB0::16::INSTR
# Serial -> ASRL4::INSTR
KEIDMM6500_Send("*RST")
KEIDMM6500_Send(":SENS:FUNC \"FRES\"")
KEIDMM6500_Send(":SENS:FRES:RANG: AUTO ON")
KEIDMM6500_Send(":SENS:FRES:OCOM ON")
KEIDMM6500_Send(":SENS:FRES:AZER ON")
KEIDMM6500_Send(":SENS:FRES:NPLC 1")
print(KEIDMM6500_Query("READ?"))
KEIDMM6500_Disconnect()
rm.close
t2 = time.time() # Capture stop time...
print("{0:.3f} s".format((t2-t1)))
```
#### File: DMM6500_DAQ6510/DMM6500_Python_VISA_Driver_Linux/DMM6500_VISA_Driver_Linux.py
```python
import visa
import struct
import math
import time
from enum import Enum
# ======================================================================
# DEFINE THE DMM CLASS INSTANCE HERE
# ======================================================================
class DMM6500:
def __init__(self):
self.echoCmd = 1
self.myInstr = 0
# ======================================================================
# DEFINE INSTRUMENT CONNECTION AND COMMUNICATIONS FUNCTIONS HERE
# ======================================================================
#def Connect(self, myAddress, myPort, timeOut, doReset, doIdQuery):
# self.mySocket.connect((myAddress, myPort)) # input to connect must be a tuple
# self.mySocket.settimeout(timeOut)
# if doReset == 1:
# self.Reset()
# self.SendCmd("waitcomplete()")
# if doIdQuery == 1:
# tmpId = self.IDQuery()
# if doIdQuery == 1:
# return tmpId
# else:
# return
def Connect(self, rsrcMgr, rsrcString, timeout, doIdQuery, doReset, doClear):
self.myInstr = rsrcMgr.open_resource(rsrcString)
if doIdQuery == 1:
print(self.Query("*IDN?"))
if doReset == 1:
self.Write("reset()")
if doClear == 1:
self.myInstr.clear()
self.myInstr.timeout = timeout
return
#def Disconnect():
# self.myInstr.close()
# return
def Disconnect(self):
self.myInstr.close()
return
#def SendCmd(self, cmd):
# if self.echoCmd == 1:
# print(cmd)
# cmd = "{0}\n".format(cmd)
# self.mySocket.send(cmd.encode())
# return
def Write(self, cmd):
if self.echoCmd == 1:
print(cmd)
self.myInstr.write(cmd)
return
#def QueryCmd(self, cmd, rcvSize):
# self.SendCmd(cmd)
# time.sleep(0.1)
# return self.mySocket.recv(rcvSize).decode()
def Query(self, cmd):
if self.echoCmd == 1:
print(cmd)
return self.myInstr.query(cmd)
# ======================================================================
# DEFINE BASIC FUNCTIONS HERE
# ======================================================================
def Reset(self):
sndBuffer = "reset()"
self.Write(sndBuffer)
def IDQuery(self):
sndBuffer = "*IDN?"
return self.Query(sndBuffer)
def LoadScriptFile(self, filePathAndName):
# This function opens the functions.lua file in the same directory as
# the Python script and trasfers its contents to the DMM7510's internal
# memory. All the functions defined in the file are callable by the
# controlling program.
func_file = open(filePathAndName, "r")
contents = func_file.read()
func_file.close()
cmd = "if loadfuncs ~= nil then script.delete('loadfuncs') end"
self.Write(cmd)
cmd = "loadscript loadfuncs\n{0}\nendscript".format(contents)
self.Write(cmd)
print(self.Query("loadfuncs()"))
return
# ======================================================================
# DEFINE MEASUREMENT FUNCTIONS HERE
# ======================================================================
def SetMeasure_Function(self, myFunc):
if myFunc == self.MeasFunc.DCV:
funcStr = "dmm.FUNC_DC_VOLTAGE"
elif myFunc == self.MeasFunc.DCI:
funcStr = "dmm.FUNC_DC_CURRENT"
sndBuffer = "dmm.measure.func = {}".format(funcStr)
self.Write(sndBuffer)
return
def SetMeasure_Range(self, rng):
sndBuffer = "dmm.measure.range = {}".format(rng)
self.Write(sndBuffer)
return
def SetMeasure_NPLC(self, nplc):
sndBuffer = "dmm.measure.nplc = {}".format(nplc)
self.Write(sndBuffer)
return
def SetMeasure_InputImpedance(self, myZ):
if myZ == self.InputZ.Z_AUTO:
funcStr = "dmm.IMPEDANCE_AUTO"
elif myZ == self.InputZ.Z_10M:
funcStr = "dmm.IMPEDANCE_10M"
sndBuffer = "dmm.measure.inputimpedance = {}".format(funcStr)
self.Write(sndBuffer)
return
def SetMeasure_AutoZero(self, myState):
if myState == self.DmmState.OFF:
funcStr = "dmm.OFF"
elif myState == self.DmmState.ON:
funcStr = "dmm.ON"
sndBuffer = "dmm.measure.autozero.enable = {}".format(funcStr)
self.Write(sndBuffer)
return
def SetMeasure_FilterType(self, myFilter):
if myFilter == self.FilterType.REP:
funcStr = "dmm.FILTER_REPEAT_AVG"
elif myFilter == self.FilterType.MOV:
funcStr = "dmm.FILTER_MOVING_AVG"
sndBuffer = "dmm.measure.filter.type = {}".format(funcStr)
self.Write(sndBuffer)
return
def SetMeasure_FilterCount(self, count):
sndBuffer = "dmm.measure.filter.count = {}".format(count)
self.Write(sndBuffer)
return
def SetMeasure_FilterState(self, myState):
if myState == self.DmmState.OFF:
funcStr = "dmm.OFF"
elif myState == self.DmmState.ON:
funcStr = "dmm.ON"
sndBuffer = "dmm.measure.filter.enable = {}".format(funcStr)
self.Write(sndBuffer)
return
def Measure(self, count):
sndBuffer = "print(dmm.measure.read())"
return self.Query(sndBuffer)
def SetFunction_Temperature(self, *args):
# This function can be used to set up to three different measurement
# function attributes, but they are expected to be in a certain
# order....
# For simple front/rear terminal measurements:
# 1. Transducer (TC/RTD/Thermistor)
# 2. Transducer type
# For channel scan measurements:
# 1. Channel string
# 2. Transducer
# 3. Transducer type
if (len(args) == 0):
self.Write("dmm.measure.func = dmm.FUNC_TEMPERATURE")
else:
if (type(args[0]) != str):
self.Write("dmm.measure.func = dmm.FUNC_TEMPERATURE")
if(len(args) > 0):
xStr = "dmm.measure.transducer"
if(args[0] == self.Transducer.TC):
xStr2 = "dmm.TRANS_THERMOCOUPLE"
elif(args[0] == self.Transducer.RTD4):
xStr2 = "dmm.TRANS_FOURRTD"
elif(args[0] == self.Transducer.RTD3):
xStr2 = "dmm.TRANS_THREERTD"
elif(args[0] == self.Transducer.THERM):
xStr2 = "dmm.TRANS_THERMISTOR"
sndBuffer = "{} = {}".format(xStr, xStr2)
self.Write(sndBuffer)
if(len(args) > 1):
if(args[0] == self.Transducer.TC):
xStr = "dmm.measure.thermocouple"
if(args[1] == self.TCType.K):
xType = "dmm.THERMOCOUPLE_K"
elif(args[1] == self.TCType.J):
xType = "dmm.THERMOCOUPLE_J"
elif(args[1] == self.TCType.N):
xType = "dmm.THERMOCOUPLE_N"
sndBuffer = "{} = {}".format(xStr, xType)
self.Write(sndBuffer)
elif((args[0] == self.Transducer.RTD4) or (args[1] == self.Transducer.RTD3)):
if(args[0] == self.Transducer.RTD4):
xStr = "dmm.measure.fourrtd"
if(args[0] == self.Transducer.RTD3):
xStr = "dmm.measure.threertd"
if(args[1] == self.RTDType.PT100):
rtdType = "dmm.RTD_PT100"
elif(args[1] == self.RTDType.PT385):
rtdType = "dmm.RTD_PT385"
elif(args[1] == self.RTDType.PT3916):
rtdType = "dmm.RTD_PT3916"
elif(args[1] == self.RTDType.D100):
rtdType = "dmm.RTD_D100"
elif(args[1] == self.RTDType.F100):
rtdType = "dmm.RTD_F100"
elif(args[1] == self.RTDType.USER):
rtdType = "dmm.RTD_USER"
sndBuffer = "{} = {}".format(xStr, rtdType)
self.Write(sndBuffer)
elif(args[0] == self.Transducer.THERM):
xStr = "dmm.measure.thermistor"
if(args[1] == self.ThermType.TH2252):
thrmType = "dmm.THERM_2252"
elif(args[1] == self.ThermType.TH5K):
thrmType = "dmm.THERM_5000"
elif(args[1] == self.ThermType.TH10K):
thrmType = "dmm.THERM_10000"
sndBuffer = "{} = {}".format(xStr, thrmType)
self.Write(sndBuffer)
else:
setStr = "channel.setdmm(\"{}\", ".format(args[0])
self.Write("{}dmm.ATTR_MEAS_FUNCTION, dmm.FUNC_TEMPERATURE)".format(setStr))
if(len(args) > 1):
if(args[1] == self.Transducer.TC):
xStr = "dmm.TRANS_THERMOCOUPLE"
xStr2 = "dmm.ATTR_MEAS_THERMOCOUPLE"
elif(args[1] == self.Transducer.RTD4):
xStr = "dmm.TRANS_FOURRTD"
xStr2 = "dmm.ATTR_MEAS_FOUR_RTD"
elif(args[1] == self.Transducer.RTD3):
xStr = "dmm.TRANS_THREERTD"
xStr2 = "dmm.ATTR_MEAS_THREE_RTD"
elif(args[1] == self.Transducer.THERM):
xStr = "dmm.TRANS_THERMISTOR"
xStr2 = "dmm.ATTR_MEAS_THERMISTOR"
sndBuffer = "{}dmm.ATTR_MEAS_TRANSDUCER, {})".format(setStr, xStr)
self.Write(sndBuffer)
if(len(args) > 2):
if(args[1] == self.Transducer.TC):
if(args[2] == self.TCType.K):
xType = "dmm.THERMOCOUPLE_K"
elif(args[2] == self.TCType.J):
xType = "dmm.THERMOCOUPLE_J"
elif(args[2] == self.TCType.N):
xType = "dmm.THERMOCOUPLE_N"
#print("{}dmm.ATTR_MEAS_THERMOCOUPLE, {})".format(setStr, xType))
sndBuffer = "{}dmm.ATTR_MEAS_THERMOCOUPLE, {})".format(setStr, xType)
self.Write(sndBuffer)
elif((args[1] == self.Transducer.RTD4) or (args[1] == self.Transducer.RTD3)):
if(args[2] == self.RTDType.PT100):
rtdType = "dmm.RTD_PT100"
elif(args[2] == self.RTDType.PT385):
rtdType = "dmm.RTD_PT385"
elif(args[2] == self.RTDType.PT3916):
rtdType = "dmm.RTD_PT3916"
elif(args[2] == self.RTDType.D100):
rtdType = "dmm.RTD_F100"
elif(args[2] == self.RTDType.F100):
rtdType = "dmm.RTD_D100"
elif(args[2] == self.RTDType.USER):
rtdType = "dmm.RTD_USER"
sndBuffer = "{}{}, {})".format(setStr, xStr2, rtdType)
self.Write(sndBuffer)
if(args[1] == self.Transducer.THERM):
if(args[2] == self.ThermType.TH2252):
thrmType = "dmm.THERM_2252"
elif(args[2] == self.ThermType.TH5K):
thrmType = "dmm.THERM_5000"
elif(args[2] == self.ThermType.TH10K):
thrmType = "dmm.THERM_10000"
sndBuffer = "{}{}, {})".format(setStr, xStr2, thrmType)
self.Write(sndBuffer)
return
class MeasFunc(Enum):
DCV = 0
DCI = 1
class InputZ(Enum):
Z_AUTO = 0
Z_10M = 1
class DmmState(Enum):
OFF = 0
ON = 1
class FilterType(Enum):
REP = 0
MOV = 1
class Transducer(Enum):
TC = 0
RTD4 = 1
RTD3 = 2
THERM = 3
class TCType(Enum):
K = 0
J = 1
N = 2
class RTDType(Enum):
PT100 = 0
PT385 = 1
PT3916 = 2
D100 = 3
F100 = 4
USER = 5
class ThermType(Enum):
TH2252 = 0
TH5K = 1
TH10K = 2
def SetScan_BasicAttributes(self, *args):
self.Write("scan.create(\"{}\")".format(args[0]))
# Set the scan count
if(len(args) > 1):
self.Write("scan.scancount = {}".format(args[1]))
# Set the time between scans
if(len(args) > 2):
self.Write("scan.scaninterval = {}".format(args[2]))
return
def Init(self):
self.Write("waitcomplete()")
self.Write("trigger.model.initiate()")
return
def GetScan_Status(self):
return self.Query("print(trigger.model.state())")
def GetScan_Data(self, dataCount, startIndex, endIndex):
#charCnt = 24 * dataCount
accumCnt = int(self.Query("print(defbuffer1.n)")[0:-1])
while(accumCnt < endIndex):
accumCnt = int(self.Query("print(defbuffer1.n)")[0:-1])
rcvBuffer = self.Query("printbuffer({}, {}, defbuffer1)".format(startIndex, endIndex))[0:-1]
return rcvBuffer
```
#### File: Drivers/Power_Analyzer/PA3000_Send_Configuration.py
```python
import visa
import struct
import math
import time
import PowerAnalyzer_VISA_Driver as pa
def send_configuration_file(my_file, my_pa):
with open(my_file) as fp:
line = fp.readline()
cnt = 1
while line:
print("Line {}: {}".format(cnt, line.strip()))
line = fp.readline()
my_pa.SendCmd(line)
cnt += 1
return
#===== MAIN PROGRAM STARTS HERE =====
rm = visa.ResourceManager() # Opens the resource manager and sets it to variable rm
pa_instrument_string = "TCPIP0::192.168.1.122::5025::SOCKET"
# DAQ_Inst_1 = "TCPIP0::192.168.1.2::inst0::INSTR"
# Instrument ID String examples...
# LAN -> TCPIP0::172.16.58.3::inst0::INSTR or
# TCPIP0::192.168.1.122::5025::SOCKET
# USB -> USB0::0x05E6::0x2450::01419962::INSTR
# GPIB -> GPIB0::16::INSTR
# Serial -> ASRL4::INSTR
timeout = 20000
myFile = "CONFIG04EDIT.CFG"
PA3000 = pa.PowerAnalyzer()
myID = PA3000.Connect(rm, pa_instrument_string, timeout, 1, 1, 1)
t1 = time.time()
send_configuration_file(myFile, PA3000)
PA3000.Disconnect()
rm.close()
t2 = time.time()
# Notify the user of completion and the test time achieved.
print("done")
print("{0:.6f} s".format(t2-t1))
input("Press Enter to continue...")
exit()
```
#### File: PS-2231A/PyVISA/2231A_PyVISA_Examples.py
```python
import visa
import time
rm = 0
my_PS = 0
def KEI2231_Connect(rsrcString, getIdStr, timeout, doRst):
my_PS = rm.open_resource(rsrcString, baud_rate = 9600, data_bits = 8) #opens desired resource and sets it variable my_instrument
my_PS.write_termination = '\n'
my_PS.read_termination = '\n'
my_PS.send_end = True
my_PS.StopBits = 1
# my_PS.flow_control = # only available in PyVisa 1.9
#my_PS.baud_rate = 9600
if getIdStr == 1:
print(my_PS.query("*IDN?"))
#time.sleep(0.1)
my_PS.write('SYST:REM')
#print(my_PS.timeout)
my_PS.timeout = timeout
#print(my_PS.timeout)
if doRst == 1:
my_PS.write('*RST')
#time.sleep(0.1)
return my_PS
def KEI2231A_Disconnect():
my_PS.write('SYST:LOC')
my_PS.close
return
def KEI2231A_SelectChannel(myChan):
my_PS.write("INST:NSEL %d" % myChan)
#time.sleep(0.25)
return
def KEI2231A_SetVoltage(myV):
my_PS.write("VOLT %f" % myV)
#time.sleep(0.24)
return
def KEI2231A_SetCurrent(myI):
my_PS.write("CURR %f" % myI)
#time.sleep(0.24)
return
def KEI2231A_OutputState(myState):
if myState == 0:
my_PS.write("OUTP 0")
#time.sleep(0.25)
#my_PS.write("OUT:ENAB 0")
else:
my_PS.write("OUTP 1")
#time.sleep(0.25)
#my_PS.write("OUT:ENAB 1")
#time.sleep(0.25)
return
def KEI2231_Send(sndBuffer):
my_PS.write(sndBuffer)
return
def KEI2231_Query(sndBuffer):
return my_PS.query(sndBuffer)
#================================================================================
# MAIN CODE GOES HERE
#================================================================================
t1 = time.time() # Capture start time....
rm = visa.ResourceManager() # Opens the resource manager and sets it to variable rm
my_PS = KEI2231_Connect("ASRL3::INSTR", 1, 20000, 1)
KEI2231A_SelectChannel(1)
KEI2231A_SetVoltage(1.0)
KEI2231A_SetCurrent(1.0)
KEI2231A_OutputState(1)
time.sleep(0.25)
KEI2231A_OutputState(0)
KEI2231A_Disconnect()
rm.close
t2 = time.time() # Capture stop time...
print("{0:.3f} s".format((t2-t1)))
```
#### File: Series_3700A/Series_3700A_Python_Sockets_Driver/Series_3700A_Sockets_Driver.py
```python
import socket
import struct
import math
import time
from enum import Enum
# ======================================================================
# DEFINE THE DMM CLASS INSTANCE HERE
# ======================================================================
class KEI3706A:
def __init__(self):
self.echoCmd = 1
self.mySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.stubComms = 0
# ======================================================================
# DEFINE INSTRUMENT CONNECTION AND COMMUNICATIONS FUNCTIONS HERE
# ======================================================================
def Connect(self, myAddress, myPort, timeOut, doReset, doIdQuery):
if (self.stubComms == 0):
self.mySocket.connect((myAddress, myPort)) # input to connect must be a tuple
self.mySocket.settimeout(timeOut)
if doReset == 1:
self.Reset()
self.SendCmd("waitcomplete()")
if doIdQuery == 1:
tmpId = self.IDQuery()
if doIdQuery == 1:
return tmpId
else:
return
def Disconnect(self):
if (self.stubComms == 0):
self.mySocket.close()
return
def SendCmd(self, cmd):
if self.echoCmd == 1:
print(cmd)
cmd = "{0}\n".format(cmd)
if (self.stubComms == 0):
self.mySocket.send(cmd.encode())
return
def QueryCmd(self, cmd, rcvSize):
self.SendCmd(cmd)
time.sleep(0.1)
rcvString = ""
if (self.stubComms == 0):
rcvString = self.mySocket.recv(rcvSize).decode()
return rcvString
# ======================================================================
# DEFINE BASIC FUNCTIONS HERE
# ======================================================================
def Reset(self):
sndBuffer = "reset()"
self.SendCmd(sndBuffer)
def IDQuery(self):
sndBuffer = "*IDN?"
return self.QueryCmd(sndBuffer, 64)
def LoadScriptFile(self, filePathAndName):
# This function opens the functions.lua file in the same directory as
# the Python script and trasfers its contents to the DMM's internal
# memory. All the functions defined in the file are callable by the
# controlling program.
func_file = open(filePathAndName, "r")
contents = func_file.read()
func_file.close()
#cmd = "if loadfuncs ~= nil then script.delete('loadfuncs') end"
#self.SendCmd(cmd)
cmd = "loadscript loadfuncs\n{0}\nendscript".format(contents)
self.SendCmd(cmd)
cmd = "loadfuncs()"
print(self.QueryCmd(cmd, 32))
return
# ======================================================================
# DEFINE FUNCTIONS HERE
# ======================================================================
def Close(self, *args):
# first parameter is always a channel list string
self.SendCmd("channel.close(\"{}\")".format(args[0]))
return
def Open(self, *args):
# first parameter is always a channel list string
self.SendCmd("channel.open(\"{}\")".format(args[0]))
return
def Set_3761_Switch_Mode(self, mySlot, myMode):
sndBuffer = "slot[{}].voltsampsmode = {}".format(mySlot, myMode)
self.SendCmd(sndBuffer)
return
```
#### File: 2450-SMU/Leakage_Current_and_Insulation_Resistance/Leakage_Current_and_Insulation_Resistance_TSP.py
```python
import socket
import struct
import math
import time
import sys
echo_cmd = 1
"""*********************************************************************************
Function: instrument_connect(my_socket, ip_address string, my_port int)
Purpose: Open an instance of an instrument object for remote communication
over LAN/Ethernet.
Parameters:
my_socket - Instance of a socket object.
ip_address (string) - The TCP/IP address string associated with the
target instrument.
my_port (int) - The instrument connection port.
Returns:
my_socket - Updated instance of a socket object that includes
attributes of a valid connection.
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_connect(my_socket, my_address, my_port, timeout, do_reset, do_id_query):
my_socket.connect((my_address, my_port)) # input to connect must be a tuple
my_socket.settimeout(timeout)
if do_reset == 1:
instrument_write(my_socket, "*RST")
if do_id_query == 1:
tmp_id = instrument_query(my_socket, "*IDN?", 100)
print(tmp_id)
return my_socket
"""*********************************************************************************
Function: instrument_disconnect(my_socket)
Purpose: Break the LAN/Ethernet connection between the controlling computer
and the target instrument.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
Returns:
None
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_disconnect(my_socket):
my_socket.close()
return
"""*********************************************************************************
Function: instrument_write(my_socket, my_command)
Purpose: This function issues control commands to the target instrument.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
my_command (string) - The command issued to the instrument to make it
perform some action or service.
Returns:
None
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_write(my_socket, my_command):
if echo_cmd == 1:
print(my_command)
cmd = "{0}\n".format(my_command)
my_socket.send(cmd.encode())
return
"""*********************************************************************************
Function: instrument_read(my_socket, receive_size)
Purpose: This function asks the connected instrument to reply with some
previously requested information, typically queued up from a call
to instrument_write().
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
receive_size (int) - Size of the data/string to be returned to
the caller.
Returns:
reply_string (string) - The requested information returned from the
target instrument.
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_read(my_socket, receive_size):
return my_socket.recv(receive_size).decode()
"""*********************************************************************************
Function: instrument_query(my_socket, my_command, receive_size)
Purpose: This function issues control commands to the target instrument with
the expectation that data will be returned. For this function
instance, the returned data is (typically) in string format.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
my_command (string) - The command issued to the instrument to make it
perform some action or service.
receive_size (int) - The approximate number of bytes of data the caller
expects to be returned in the response from the
instrument.
Returns:
reply_string (string) - The requested information returned from the
target instrument. Obtained by way of a caller
to instrument_read().
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_query(my_socket, my_command, receive_size):
instrument_write(my_socket, my_command)
time.sleep(0.1)
return instrument_read(my_socket, receive_size)
"""*********************************************************************************
Function: instrument_query_binary(my_socket, my_command, expected_number_of_readings)
Purpose: This function issues control commands to the target instrument with
the expectation that data will be returned. For this function
instance, the returned data is specifically) expected to be in
binary format. Note that this function does not handle directing
the target instrument to return its data in binary format - it is
the user's responsibility to understand/issue the commands that
manage the binary formats.
Binary formatting can promote either single or double floating point
format of IEEE Std 754. This function assumes the following defaults:
* Normal byte order (non-swapped)
* Single precision format
* Little endian byte order
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
my_command (string) - The command issued to the instrument to make it
perform some action or service.
expected_number_of_readings (int) - This is number of readings that
is being requested to be returned
from the instrument to the caller.
Returns:
reply_string (string) - The requested information returned from the
target instrument. Obtained by way of a caller
to instrument_read().
Revisions:
2019-08-29 JJB Initial revision.
*********************************************************************************"""
def instrument_query_binary(my_socket, my_command, expected_number_of_readings):
receive_size = expected_number_of_readings * sys.getsizeof(float())
instrument_write(my_socket, my_command)
response = my_socket.recv(receive_size)
fmtStr = '%df' % expected_number_of_readings
altResp = struct.unpack(fmtStr, response[2:-1]) # Note the index offset applied
# to the response data. This is
# applicable to a majority of
# Keithley measurment tools. If
# you choose to repurpose this
# code for use with equipment from
# another vendor
return altResp
"""*********************************************************************************
To measure the leakage current or insulation resistance of a device, you need to apply a fixed voltage
to the device and measure the resulting current. Depending on the device under test, the measured
current is typically very small, usually less than 10 nA.
This application consists of two examples that demonstrate:
• How to use the Model 2450 to perform leakage current measurements on a capacitor
• How to use the Model 2450 to measure insulation resistance between the two conductors of a
coaxial cable
The only difference between these two application examples is that when you measure leakage
current, the results are returned in units of amperes. When you measure insulation resistance, the
results are returned in units of ohms.
The leakage current application applies the voltage for a specified period because the device needs
time to charge. In some cases, the resulting current is measured the entire time the device is biased.
In other cases, only one reading is made at the end of the soak period.
*********************************************************************************"""
ip_address = "192.168.1.25" # Place your instrument's IP address here.
my_port = 5025
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Establish a TCP/IP socket object
instrument_connect(s, ip_address, my_port, 20000, 0, 1) # Open the socket connection
t1 = time.time() # Start the timer...
instrument_write(s, "reset()") # Reset the SMU
instrument_write(s, "smu.source.func = smu.FUNC_DC_VOLTAGE") # Set to source voltage.
instrument_write(s, "smu.source.level = 20") # Set the source voltage to 20 V.
instrument_write(s, "smu.source.ilimit.level = 10e-3") # Set the current limit to 10 mA.
instrument_write(s, "smu.measure.func = smu.FUNC_DC_CURRENT") # Set the instrument to measure current.
instrument_write(s, "smu.measure.terminals = smu.TERMINALS_REAR") # Set to use rear-panel terminals.
instrument_write(s, "smu.measure.autorange = smu.ON") # Set the current range to autorange.
instrument_write(s, "smu.measure.nplc = 1") # Set the number of power line cycles to 1.
instrument_write(s, "trigger.model.load(\"DurationLoop\", 60, 0.2)") # Load the Duration Loop trigger model to run
# for 60 s at 200 ms intervals.
instrument_write(s, "smu.source.output = smu.ON") # Turn on the output.
instrument_write(s, "trigger.model.initiate()") # Initiate readings
# instrument_write(s, "waitcomplete()") # Allow time for all measurements to complete.
# Calculate the number of readings that will be collected.
expected_readings_count = int((run_duration / sample_interval) + 1)
j = 1
# Format a column header for the instrument feedback.
print("{0:10} | {1:<10} | {2:<10}".format("Rdg.Num.", "Time (s)", "Current (A)"))
while j <= expected_readings_count:
time.sleep(sample_interval)
# Check to make certain readings are in the buffer to extract.
end_index = int(instrument_query(s, "print(defbuffer1.n)").rstrip())
if end_index >= j:
# Get the reading + relative timestamp pairs out one at a time and print to the display.
temp_list = instrument_query(s, "printbuffer({0}, {1}, defbuffer1.relativetimestamps, defbuffer1.readings)".
format(j, j),(16*2)).rstrip().split(',')
print("{0:10} | {1:0.4f} | {2:0.4E}".format(counter, float(temp_list[0]), float(temp_list[1])))
j += 1
instrument_write(s, "smu.source.level = 0") # Discharge the capacitor to 0 V.
instrument_write(s, "smu.source.output = smu.OFF") # Turn off the output.
# Close the socket connection
instrument_disconnect(s)
t2 = time.time()
# Notify the user of completion and the data streaming rate achieved.
print("done")
print("Total Time Elapsed: {0:.3f} s".format(t2-t1))
input("Press Enter to continue...")
exit()
```
#### File: DAQ6510/Speed_Scanning_for_Increased_Test_Throughput/DAQ6510_Speed_Scanning_for_Increased_Test_Throughput_TSP.py
```python
import socket
import struct
import math
import time
import sys
echo_cmd = 0
"""*********************************************************************************
Function: instrument_connect(my_socket, ip_address string, my_port int)
Purpose: Open an instance of an instrument object for remote communication
over LAN/Ethernet.
Parameters:
my_socket - Instance of a socket object.
ip_address (string) - The TCP/IP address string associated with the
target instrument.
my_port (int) - The instrument connection port.
Returns:
my_socket - Updated instance of a socket object that includes
attributes of a valid connection.
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_connect(my_socket, my_address, my_port, timeout, do_reset, do_id_query):
my_socket.connect((my_address, my_port)) # input to connect must be a tuple
my_socket.settimeout(timeout)
if do_reset == 1:
instrument_write(my_socket, "reset()")
if do_id_query == 1:
tmp_id = instrument_query(my_socket, "*IDN?", 100)
print(tmp_id)
return my_socket
"""*********************************************************************************
Function: instrument_disconnect(my_socket)
Purpose: Break the LAN/Ethernet connection between the controlling computer
and the target instrument.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
Returns:
None
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_disconnect(my_socket):
my_socket.close()
return
"""*********************************************************************************
Function: instrument_write(my_socket, my_command)
This function issues control commands to the target instrument.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
my_command (string) - The command issued to the instrument to make it
perform some action or service.
Returns:
None
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_write(my_socket, my_command):
if echo_cmd == 1:
print(my_command)
cmd = "{0}\n".format(my_command)
my_socket.send(cmd.encode())
return
"""*********************************************************************************
Function: instrument_read(my_socket, receive_size)
Purpose: This function asks the connected instrument to reply with some
previously requested information, typically queued up from a call
to instrument_write().
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
receive_size (int) - Size of the data/string to be returned to
the caller.
Returns:
reply_string (string) - The requested information returned from the
target instrument.
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_read(my_socket, receive_size):
return my_socket.recv(receive_size).decode()
"""*********************************************************************************
Function: instrument_query(my_socket, my_command, receive_size)
Purpose: This function issues control commands to the target instrument with
the expectation that data will be returned. For this function
instance, the returned data is (typically) in string format.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
my_command (string) - The command issued to the instrument to make it
perform some action or service.
receive_size (int) - The approximate number of bytes of data the caller
expects to be returned in the response from the
instrument.
Returns:
reply_string (string) - The requested information returned from the
target instrument. Obtained by way of a caller
to instrument_read().
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_query(my_socket, my_command, receive_size):
instrument_write(my_socket, my_command)
time.sleep(0.1)
return instrument_read(my_socket, receive_size)
"""*********************************************************************************
Function: instrument_query_binary(my_socket, my_command, expected_number_of_readings)
Purpose: This function issues control commands to the target instrument with
the expectation that data will be returned. For this function
instance, the returned data is specifically) expected to be in
binary format. Note that this function does not handle directing
the target instrument to return its data in binary format - it is
the user's responsibility to understand/issue the commands that
manage the binary formats.
Binary formatting can promote either single or double floating point
format of IEEE Std 754. This function assumes the following defaults:
* Normal byte order (non-swapped)
* Single precision format
* Little endian byte order
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
my_command (string) - The command issued to the instrument to make it
perform some action or service.
expected_number_of_readings (int) - This is number of readings that
is being requested to be returned
from the instrument to the caller.
Returns:
reply_string (string) - The requested information returned from the
target instrument. Obtained by way of a caller
to instrument_read().
Revisions:
2019-08-29 JJB Initial revision.
*********************************************************************************"""
def instrument_query_binary(my_socket, my_command, expected_number_of_readings):
receive_size = expected_number_of_readings * sys.getsizeof(float())
instrument_write(my_socket, my_command)
response = my_socket.recv(receive_size)
fmtStr = '%df' % expected_number_of_readings
altResp = struct.unpack(fmtStr, response[2:-1])
return altResp
"""*********************************************************************************
There are three different multiplex modules available for use with the
DAQ6510. This application example demonstrates how each of the multiplexer
modules can impact productivity by changing test time. The multiplexer modules
all share the same basic code base for switching, scanning, and measuring.
Any limits on system speed are the result of the relays in the multiplexer
that switch the signals from the device under test (DUT) into the instrument.
The Model 7700 20-Channel Differential Multiplexer Module uses electromechanical
relays which have low contact resistance and contribute only a minor offset
potential (<1 Ω through the end of life and < 500 nV, respectively). This
results in the most accurate readings of the modules but with a 3 ms relay
closure time, the slowest scan time in comparison to other options.
The 7703 multiplexer module uses reed relays which have low contact resistance
(<1 Ω through the end of life), but a higher contact potential (6 μV max)
which contributes more signal offset and slightly less precise readings.
The benefit of this module is shorter relay close time (less than 1 ms) which
makes it approximately three times faster than the 7700.
The 7710 multiplexer module uses solid-state relays which have the highest
contact resistance and contact potential of the three options (<5 Ω and
<1 μV, respectively) and are therefore the least precise, however the 7710
has the overall speed advantage with a relay close time of less than 0.5
ms, making it twice as fast as the 7703 and at least six times faster than
the 7700.
*********************************************************************************"""
ip_address = "192.168.1.65" # Place your instrument's IP address here.
my_port = 5025
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Establish a TCP/IP socket object
# Open the socket connection
instrument_connect(s, ip_address, my_port, 20000, 0, 1)
t1 = time.time() # Start the timer...
instrument_write(s, "reset()") # Reset the DAQ6510
scan_count = 1000
instrument_write(s, "channel.setdmm(\"101:120\", dmm.ATTR_MEAS_FUNCTION, dmm.FUNC_DC_VOLTAGE)") # Set function to DCV
instrument_write(s, "channel.setdmm(\"101:120\", dmm.ATTR_MEAS_RANGE, 1)") # Set the fixed range at 1 V
instrument_write(s, "channel.setdmm(\"101:120\", dmm.ATTR_MEAS_RANGE_AUTO, dmm.OFF)") # Disable auto ranging
instrument_write(s, "channel.setdmm(\"101:120\", dmm.ATTR_MEAS_AUTO_ZERO, dmm.OFF)") # Turn off auto zero
instrument_write(s, "channel.setdmm(\"101:120\", dmm.ATTR_MEAS_DIGITS, dmm.DIGITS_4_5)") # Front panel shows only four significant digits (when in focus)
instrument_write(s, "channel.setdmm(\"101:120\", dmm.ATTR_MEAS_NPLC, 0.0005)") # Set fastest NPLC possible
instrument_write(s, "channel.setdmm(\"101:120\", dmm.ATTR_MEAS_APERTURE, 8.33333e-06)") # Set the aperture to the smallest possible value - redundant
# to setting the fastest NPLC, but provided here for review
instrument_write(s, "channel.setdmm(\"101:120\", dmm.ATTR_MEAS_LINE_SYNC, dmm.OFF)") # Turn off line sync
instrument_write(s, "channel.setdmm(\"101:120\", dmm.ATTR_MEAS_LIMIT_ENABLE_1, dmm.OFF)") # Turn off limit 1 & 2 tests
instrument_write(s, "channel.setdmm(\"101:120\", dmm.ATTR_MEAS_LIMIT_ENABLE_2, dmm.OFF)")
instrument_write(s, "format.data = format.REAL32") # Request that the buffer data be returned in
# IEEE Std. 754 single-precision format
instrument_write(s, "display.changescreen(display.SCREEN_PROCESSING)") # Switch to the "Processing" UI to ensure processing
# resources are focused on measuring, not displaying
# readings that are too fast to watch on the screen.
instrument_write(s, "scan.create(\"101:120\")") # Set up Scan
instrument_write(s, "scan.interval = 0") # Set trigger interval between scans to 0 s
channel_count = int(instrument_query(s, "print(scan.stepcount)", 16).rstrip()) # Get the number of channels configured in the scan
instrument_write(s, "scan.scancount = {0}".format(scan_count)) # Set the number of times the scan will be executed
sample_count = scan_count * channel_count
instrument_write(s, "defbuffer1.clear()") # Clear the reading buffer
instrument_write(s, "defbuffer1.capacity = {0}".format(sample_count)) # Set the number of points in the reading buffer
instrument_write(s, "waitcomplete()") # Allow time for all settings to take effect
instrument_write(s, "trigger.model.initiate()") # Initiate the scan
# Establish a loop which will iteratively extract the readings from the reading buffer
j = 1
chunk_size = 200 # This is the number of readings to extract from the reading buffer per each query
start_index = 1
end_index = chunk_size
accumulated_readings = 0
while accumulated_readings < sample_count:
readings_count = int(instrument_query(s, "print(defbuffer1.n)", 16).rstrip())
if readings_count >= end_index:
response = instrument_query_binary(s, "printbuffer({0}, {1}, defbuffer1.readings)".format(start_index, end_index), chunk_size)
print(response)
start_index += chunk_size
end_index += chunk_size
accumulated_readings += chunk_size
instrument_write(s, "display.changescreen(display.SCREEN_HOME)") # Switch to the HOME UI after scanning and processing
# is complete.
# Close the socket connection
instrument_disconnect(s)
t2 = time.time()
# Notify the user of completion and the data streaming rate achieved.
print("done")
print("Total Time Elapsed: {0:.3f} s".format(t2-t1))
input("Press Enter to continue...")
exit()
```
#### File: DAQ6510/Using the Model 7701 Multiplexer in a Switching-Only Application/DAQ6510_w_7701_Switch_Only_Example.py
```python
import socket
import struct
import math
import time
echo_cmd = 0
"""*********************************************************************************
Function: instrument_connect(my_socket, ip_address string, my_port int, timeout
do_reset, do_id_query)
Purpose: Open an instance of an instrument object for remote communication
over LAN/Ethernet.
Parameters:
my_socket - Instance of a socket object.
ip_address (string) - The TCP/IP address string associated with the
target instrument.
my_port (int) - The instrument connection port.
timeout (int) - The timeout limit for query/communication exchanges.
do_reset (int) - Determines whether the instrument is to be reset
upon connection to the instrument. Setting to 1
will perform the reset; setting to zero avoids it.
do_clear (int) - Determines whether the instrument is to be cleared
do_id_query (int) - Determines when the instrument is to echo its
identification string after it is initialized.
Returns:
my_socket - Updated instance of a socket object that includes
attributes of a valid connection.
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_connect(my_socket, my_address, my_port, timeout, do_reset, do_clear, do_id_query):
my_socket.connect((my_address, my_port)) # input to connect must be a tuple
my_socket.settimeout(timeout)
if do_reset == 1:
instrument_write(my_socket, "*RST")
if do_clear == 1:
instrument_write(my_socket, "*CLS")
if do_id_query == 1:
tmp_id = instrument_query(my_socket, "*IDN?", 100)
print(tmp_id)
return my_socket
"""*********************************************************************************
Function: instrument_disconnect(my_socket)
Purpose: Break the LAN/Ethernet connection between the controlling computer
and the target instrument.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
Returns:
None
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_disconnect(my_socket):
my_socket.close()
return
"""*********************************************************************************
Function: instrument_write(my_socket, my_command)
Purpose: This function issues control commands to the target instrument.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
my_command (string) - The command issued to the instrument to make it
perform some action or service.
Returns:
None
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_write(my_socket, my_command):
if echo_cmd == 1:
print(my_command)
cmd = "{0}\n".format(my_command)
my_socket.send(cmd.encode())
return
"""*********************************************************************************
Function: instrument_read(my_socket, receive_size)
Purpose: This function asks the connected instrument to reply with some
previously requested information, typically queued up from a call
to instrument_write().
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
receive_size (int) - Size of the data/string to be returned to
the caller.
Returns:
reply_string (string) - The requested information returned from the
target instrument.
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_read(my_socket, receive_size):
return my_socket.recv(receive_size).decode()
"""*********************************************************************************
Function: instrument_query(my_socket, my_command, receive_size)
Purpose: This function issues control commands to the target instrument with
the expectation that data will be returned. For this function
instance, the returned data is (typically) in string format.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
my_command (string) - The command issued to the instrument to make it
perform some action or service.
receive_size (int) - The approximate number of bytes of data the caller
expects to be returned in the response from the
instrument.
Returns:
reply_string (string) - The requested information returned from the
target instrument. Obtained by way of a caller
to instrument_read().
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_query(my_socket, my_command, receive_size):
instrument_write(my_socket, my_command)
return instrument_read(my_socket, receive_size)
"""*********************************************************************************
This example application demonstrates the use of the DAQ6510 in a switching
only application. The channel control reflects the use of the Model 7701
multiplexer card. The setup assumes the following:
A. A power source is applied to MUX2 and routed to any of three devices
connected to channels 117, 118, and 119.
B. A measurement device is applied to MUX1 and routed to any of six possible
test points connected to channels 101, 102, 103, 104, 105, and 106.
C. The jumpers between MUX1 and the DMM Input terminals and those between
MUX2 and the DMM Sense terminals have been removed from the Model 7701.
However, relays 34 and 35 are forced to their open state to help ensure
isolation.
D. For each MUX2 source connection to the device, each of the six MUX1
channels will be closed to allow for a measurement to occur then
open to allow for subsequent channel measurements.
*********************************************************************************"""
my_ip_address = "192.168.1.67" # Define your instrument's IP address here.
my_port = 5025 # Define your instrument's port number here.
do_instr_reset = 1
do_instr_clear = 0
do_instr_id_query = 1
t1 = time.time()
# Open the socket connections...
my_daq = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Establish a TCP/IP socket object
my_daq.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
instrument_connect(my_daq, my_ip_address, my_port, 20000, do_instr_reset, do_instr_clear, do_instr_id_query)
source_channel_list = (["117", "118", "119"])
measure_channel_list = (["101", "102", "103", "104", "105", "106"])
# open channels 134 and 135 to supplement isolation from the DAQ6510
# internal input and sense terminals
instrument_write(my_daq, "channel.multiple.open(\"134,135\")")
# close channel 133 to ensure the disconnect between the MUX1 and MUX2
# multiplexer banks
instrument_write(my_daq, "channel.multiple.close(\"133\")")
for s_chan in source_channel_list:
# close the source channel
instrument_write(my_daq, "channel.multiple.close(\"{0}\")".format(s_chan))
for m_chan in measure_channel_list:
# close the test point measurement channel
instrument_write(my_daq, "channel.multiple.close(\"{0}\")".format(m_chan))
# insert a delay representative of the measurement activity performed
# by the external instrument
time.sleep(0.5)
# open the test point measurement channel
instrument_write(my_daq, "channel.multiple.open(\"{0}\")".format(m_chan))
# open the source channel
instrument_write(my_daq, "channel.multiple.open(\"{0}\")".format(s_chan))
# add another delay representative of the external source management
#commands
time.sleep(0.5)
instrument_disconnect(my_daq)
t2 = time.time() # Stop the timer...
# Notify the user of completion and the data streaming rate achieved.
print("done")
print("Total Time Elapsed: {0:.3f} s".format(t2 - t1))
input("Press Enter to continue...")
exit()
```
#### File: DMM6500/Upload_and_Execute_a_Test_Sequence_to_the_Series_2260B_Power_Supply/Upload_and_Execute_Test_Sequence_File_to_2260B_Power_Supply_Rev_B.py
```python
import socket
import struct
import math
import time
import sys
echo_cmd = 0
"""*********************************************************************************
Function: instrument_connect(my_socket, ip_address string, my_port int, timeout
do_reset, do_id_query)
Purpose: Open an instance of an instrument object for remote communication
over LAN/Ethernet.
Parameters:
my_socket - Instance of a socket object.
ip_address (string) - The TCP/IP address string associated with the
target instrument.
my_port (int) - The instrument connection port.
timeout (int) - The timeout limit for query/communication exchanges.
do_reset (int) - Determines whether the instrument is to be reset
upon connection to the instrument. Setting to 1
will perform the reset; setting to zero avoids it.
do_clear (int) - Determines whether the instrument is to be cleared
do_id_query (int) - Deterines when the instrument is to echho its
identification string after it is initialized.
Returns:
my_socket - Updated instance of a socket object that includes
attributes of a valid connection.
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_connect(my_socket, my_address, my_port, timeout, do_reset, do_clear, do_id_query):
my_socket.connect((my_address, my_port)) # input to connect must be a tuple
my_socket.settimeout(timeout)
my_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if do_reset == 1:
instrument_write(my_socket, "*RST")
if do_clear == 1:
instrument_write(my_socket, "*CLS")
if do_id_query == 1:
tmp_id = instrument_query(my_socket, "*IDN?", 100)
print(tmp_id)
return my_socket
"""*********************************************************************************
Function: instrument_disconnect(my_socket)
Purpose: Break the LAN/Ethernet connection between the controlling computer
and the target instrument.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
Returns:
None
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_disconnect(my_socket):
my_socket.close()
return
"""*********************************************************************************
Function: instrument_write(my_socket, my_command)
Purpose: This function issues control commands to the target instrument.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
my_command (string) - The command issued to the instrument to make it
perform some action or service.
Returns:
None
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_write(my_socket, my_command):
if echo_cmd == 1:
print(my_command)
cmd = "{0}\n".format(my_command)
my_socket.send(cmd.encode())
return
"""*********************************************************************************
Function: instrument_read(my_socket, receive_size)
Purpose: This function asks the connected instrument to reply with some
previously requested information, typically queued up from a call
to instrument_write().
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
receive_size (int) - Size of the data/string to be returned to
the caller.
Returns:
reply_string (string) - The requested information returned from the
target instrument.
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_read(my_socket, receive_size):
return my_socket.recv(receive_size).decode()
"""*********************************************************************************
Function: instrument_query(my_socket, my_command, receive_size)
Purpose: This function issues control commands to the target instrument with
the expectation that data will be returned. For this function
instance, the returned data is (typically) in string format.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
my_command (string) - The command issued to the instrument to make it
perform some action or service.
receive_size (int) - The approximate number of bytes of data the caller
expects to be returned in the response from the
instrument.
Returns:
reply_string (string) - The requested information returned from the
target instrument. Obtained by way of a caller
to instrument_read().
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_query(my_socket, my_command, receive_size):
instrument_write(my_socket, my_command)
return instrument_read(my_socket, receive_size)
"""*********************************************************************************
Function: write_data(output_data_path, data_str)
Purpose: This function issues control commands to the target instrument.
Parameters:
output_data_path (string) - The file name and path of the file to be written
to. Note that the file is opened in append mode
and no previously existing data will be over-
written.
data_str (string) - The data to be written to file. It is up to the
user to format this data external to this
function prior to using it.
Returns:
None
Revisions:
2020-01-03 JJB Initial revision.
*********************************************************************************"""
def write_data(output_data_path, data_str):
# This function writes the floating point data to the
# target file.
# for f in floats:
ofile = open(output_data_path, "a") # append the target data
dataStr = "{0}\n".format(data_str)
ofile.write(dataStr)
ofile.close() # Close the data file.
return
def upload_test_sequence(instrument_object, file_and_path):
with open(file_and_path) as file_in:
n = 1 # The first line in the sequence file is the header and not intended to be part of the test sequence
for line in file_in:
if n != 1:
instrument_write(instrument_object, "append_test_to_global_table(\"{0}\")".format(line.rstrip('\r\n')))
n += 1
return
"""*********************************************************************************
This example shows how the user of a Keithley DMM6500 can load a TSP script file
and execute embedded functions. This allow the user to customize test operations
at the instrument level. In particular, this example shows how a user might
create a direct socket connection to the Series 2260B power supply and execute
a supply output test sequence that defines voltage/current levels, durations for
each defined step, and slew control.
This program is dependendent on two additional files:
A. The series_2260B_sequence_control.tsp script which....
1. Promotes the transfer of the test sequence file to a Lua table
on the DMM.
2. Initiates the sockets connection to the 2260B
3. Executes the uploaded test sequence.
B. A 2260B test sequence in *.csv format.
*********************************************************************************"""
my_ip_address = "192.168.1.104" # Define your instrument's IP address here.
my_port = 5025 # Define your instrument's port number here.
do_instr_reset = 1
do_instr_clear = 1
do_instr_id_query = 1
t1 = time.time()
# Open the socket connections...
my_instr = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Establish a TCP/IP socket object
instrument_connect(my_instr, my_ip_address, my_port, 20000, do_instr_reset, do_instr_clear, do_instr_id_query)
# Ready the instrument to receive the target TSP file contents
file = "series_2260B_sequence_control.tsp"
func_file = open(file, "r")
contents = func_file.read()
func_file.close()
instrument_write(my_instr, "if loadfuncs ~= nil then script.delete('loadfuncs') end")
# Load the script file in one large chunk then close out the loadfuncs wrapper script.
instrument_write(my_instr, "loadscript loadfuncs\n{0}\nendscript\n".format(contents))
# Call loadfuncs to load the contents of the script file into active memory
print(instrument_query(my_instr, "loadfuncs()", 32)) # Note that we are echoing a queried function here.
# You will note that the final line in the functions.tsp
# script file is a print() command that will push its
# contents to the output data queue.
instrument_write(my_instr, "do_beep(0.250, 1000, 3)")
file = "Test_Sequence_06.csv"
upload_test_sequence(my_instr, file)
ip_address_2260B = "192.168.1.117"
instrument_write(my_instr, "connect_to_2260B(\"{0}\")".format(ip_address_2260B))
instrument_write(my_instr, "enable_2260B_output({0}, {1}, {2})".format(0.0, 1.0, "ON"))
instrument_write(my_instr, "ps2260_execute_test_sequence()")
instrument_write(my_instr, "enable_2260B_output({0}, {1}, {2})".format(0.0, 1.0, "OFF"))
instrument_write(my_instr, "disconnect_from_2260B()")
instrument_disconnect(my_instr)
t2 = time.time() # Stop the timer...
# Notify the user of completion and the data streaming rate achieved.
print("done")
print("Total Time Elapsed: {0:.3f} s".format(t2 - t1))
input("Press Enter to continue...")
exit()
``` |
{
"source": "39biradar/pyTenable",
"score": 3
} |
#### File: tests/cs/test_uploads.py
```python
import pytest
from ..checker import single
def test_uploads_docker_push_name_typeerror(api):
'''test to raise the exception when the parameter passed is not as the expected type'''
with pytest.raises(TypeError):
api.uploads.docker_push(1)
def test_uploads_docker_push_tag_typeerror(api):
'''test to raise the exception when the parameter passed is not as the expected type'''
with pytest.raises(TypeError):
api.uploads.docker_push('example', tag=1)
def test_uploads_docker_push_cs_name_typeerror(api):
'''test to raise the exception when the parameter passed is not as the expected type'''
with pytest.raises(TypeError):
api.uploads.docker_push('example', tag='latest', cs_name=1)
def test_uploads_docker_push_cs_tag_typeerror(api):
'''test to raise the exception when the parameter passed is not as the expected type'''
with pytest.raises(TypeError):
api.uploads.docker_push('example', tag='latest', cs_tag=1)
@pytest.mark.skip(reason="Can't VCR because of docker.")
def test_uploads_docker_push(image_id):
'''test to check the type of image_id'''
single(image_id, str)
```
#### File: tests/sc/test_queries.py
```python
import pytest
from ..checker import check
from tenable.errors import APIError, UnexpectedValueError
from tests.pytenable_log_handler import log_exception
def test_queries_constructor_sort_field_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(sort_field=1, tool='1', type='1', filters=[('filtername', 'operator', 'value')])
def test_queries_constructor_description_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(description=1, tool='1', type='1')
def test_queries_constructor_sort_direction_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(sort_direction=1, tool='1', type='1')
def test_queries_constructor_sort_direction_unexpectedvalueerror(sc):
with pytest.raises(UnexpectedValueError):
sc.queries._constructor(sort_direction='nope', tool='1', type='1')
def test_queries_constructor_offset_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(offset='one', tool='1', type='1')
def test_queries_constructor_limit_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(limit='one', tool='1', type='1')
def test_queries_constructor_owner_id_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(owner_id='one', tool='1', type='1')
def test_queries_constructor_context_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(context=1, tool='1', type='1')
def test_queries_constructor_browse_cols_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(browse_cols=1, tool='1', type='1')
def test_queries_constructor_browse_sort_col_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(browse_sort_col=1, tool='1', type='1')
def test_queries_constructor_browse_sort_dir_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(browse_sort_direction=1, tool='1', type='1')
def test_queries_constructor_browse_sort_dir_unexpectedvalueerror(sc):
with pytest.raises(UnexpectedValueError):
sc.queries._constructor(browse_sort_direction='nope', tool='1', type='1')
def test_queries_constructor_tags_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(tags=1, tool='1', type='ticket', filters=[('filtername', 'operator', [1, 2])])
@pytest.mark.vcr()
def test_queries_constructor_success(sc):
query = sc.queries._constructor(
('filtername', 'operator', 'value'),
('asset', 'op', 2),
tool='vulndetails',
type='thistype',
tags='tag',
sort_field='field1',
sort_direction='asc',
offset=0,
limit=1000,
owner_id=1,
context='nothing',
browse_cols=['something'],
browse_sort_col='yes',
browse_sort_direction='asc',
query_id=1
)
assert isinstance(query, dict)
assert query == {
'tool': 'vulndetails',
'type': 'thistype',
'tags': 'tag',
'filters': [{
'filterName': 'filtername',
'operator': 'operator',
'value': 'value'
}, {
'filterName': 'asset',
'operator': 'op',
'value': {'id': '2'}
}],
'sortField': 'field1',
'sortDir': 'ASC',
'startOffset': 0,
'query_id': 1,
'endOffset': 1000,
'ownerID': '1',
'context': 'nothing',
'browseColumns': 'something',
'browseSortColumn': 'yes',
'browseSortDirection': 'ASC'
}
@pytest.fixture
def query(request, sc, vcr):
with vcr.use_cassette('test_queries_create_success'):
query = sc.queries.create('New Query', 'vulndetails', 'vuln',
('pluginID', '=', '19506'))
def teardown():
try:
with vcr.use_cassette('test_queries_delete_success'):
sc.queries.delete(int(query['id']))
except APIError as error:
log_exception(error)
request.addfinalizer(teardown)
return query
@pytest.mark.vcr()
def test_queries_create_success(sc, query):
assert isinstance(query, dict)
check(query, 'id', str)
check(query, 'name', str)
check(query, 'description', str)
check(query, 'tool', str)
check(query, 'type', str)
check(query, 'tags', str)
check(query, 'context', str)
check(query, 'browseColumns', str)
check(query, 'browseSortColumn', str)
check(query, 'browseSortDirection', str)
check(query, 'createdTime', str)
check(query, 'modifiedTime', str)
check(query, 'status', str)
check(query, 'filters', list)
for filter in query['filters']:
check(filter, 'filterName', str)
check(filter, 'operator', str)
check(filter, 'value', str)
check(query, 'canManage', str)
check(query, 'canUse', str)
check(query, 'creator', dict)
check(query['creator'], 'id', str)
check(query['creator'], 'username', str)
check(query['creator'], 'firstname', str)
check(query['creator'], 'lastname', str)
check(query, 'owner', dict)
check(query['owner'], 'id', str)
check(query['owner'], 'username', str)
check(query['owner'], 'firstname', str)
check(query['owner'], 'lastname', str)
check(query, 'ownerGroup', dict)
check(query['ownerGroup'], 'id', str)
check(query['ownerGroup'], 'name', str)
check(query['ownerGroup'], 'description', str)
check(query, 'targetGroup', dict)
check(query['targetGroup'], 'id', int)
check(query['targetGroup'], 'name', str)
check(query['targetGroup'], 'description', str)
@pytest.mark.vcr()
def test_queries_delete_success(sc, query):
sc.queries.delete(int(query['id']))
@pytest.mark.vcr()
def test_queries_details_success(sc, query):
query = sc.queries.details(int(query['id']))
assert isinstance(query, dict)
check(query, 'id', str)
check(query, 'name', str)
check(query, 'description', str)
check(query, 'tool', str)
check(query, 'type', str)
check(query, 'tags', str)
check(query, 'context', str)
check(query, 'browseColumns', str)
check(query, 'browseSortColumn', str)
check(query, 'browseSortDirection', str)
check(query, 'createdTime', str)
check(query, 'modifiedTime', str)
check(query, 'status', str)
check(query, 'filters', list)
for filter in query['filters']:
check(filter, 'filterName', str)
check(filter, 'operator', str)
check(filter, 'value', str)
check(query, 'canManage', str)
check(query, 'canUse', str)
check(query, 'creator', dict)
check(query['creator'], 'id', str)
check(query['creator'], 'username', str)
check(query['creator'], 'firstname', str)
check(query['creator'], 'lastname', str)
check(query, 'owner', dict)
check(query['owner'], 'id', str)
check(query['owner'], 'username', str)
check(query['owner'], 'firstname', str)
check(query['owner'], 'lastname', str)
check(query, 'ownerGroup', dict)
check(query['ownerGroup'], 'id', str)
check(query['ownerGroup'], 'name', str)
check(query['ownerGroup'], 'description', str)
check(query, 'targetGroup', dict)
check(query['targetGroup'], 'id', int)
check(query['targetGroup'], 'name', str)
check(query['targetGroup'], 'description', str)
@pytest.mark.vcr()
def test_queries_details_success_for_fields(sc, query):
q = sc.queries.details(int(query['id']), fields=["id", "name", "description"])
assert isinstance(q, dict)
check(q, 'id', str)
check(q, 'name', str)
check(q, 'description', str)
@pytest.mark.vcr()
def test_queries_edit_success(sc, query):
query = sc.queries.edit(int(query['id']), name='Updated Name')
assert isinstance(query, dict)
check(query, 'id', str)
check(query, 'name', str)
check(query, 'description', str)
check(query, 'tool', str)
check(query, 'type', str)
check(query, 'tags', str)
check(query, 'context', str)
check(query, 'browseColumns', str)
check(query, 'browseSortColumn', str)
check(query, 'browseSortDirection', str)
check(query, 'createdTime', str)
check(query, 'modifiedTime', str)
check(query, 'filters', list)
for filter in query['filters']:
check(filter, 'filterName', str)
check(filter, 'operator', str)
check(filter, 'value', str)
check(query, 'canManage', str)
check(query, 'canUse', str)
check(query, 'creator', dict)
check(query['creator'], 'id', str)
check(query['creator'], 'username', str)
check(query['creator'], 'firstname', str)
check(query['creator'], 'lastname', str)
check(query, 'owner', dict)
check(query['owner'], 'id', str)
check(query['owner'], 'username', str)
check(query['owner'], 'firstname', str)
check(query['owner'], 'lastname', str)
check(query, 'ownerGroup', dict)
check(query['ownerGroup'], 'id', str)
check(query['ownerGroup'], 'name', str)
check(query['ownerGroup'], 'description', str)
check(query, 'targetGroup', dict)
check(query['targetGroup'], 'id', int)
check(query['targetGroup'], 'name', str)
check(query['targetGroup'], 'description', str)
@pytest.mark.vcr()
def test_queries_list_success(sc, query):
queries = sc.queries.list()
assert isinstance(queries, dict)
for ltype in ['manageable', 'usable']:
for query in queries[ltype]:
assert isinstance(query, dict)
check(query, 'id', str)
check(query, 'name', str)
check(query, 'description', str)
@pytest.mark.vcr()
def test_queries_list_success_for_fields(sc):
queries = sc.queries.list(fields=["id", "name"])
assert isinstance(queries, dict)
for type in ['manageable', 'usable']:
for query in queries[type]:
assert isinstance(query, dict)
check(query, 'id', str)
check(query, 'name', str)
@pytest.mark.vcr()
def test_queries_tags_success(sc):
tags = sc.queries.tags()
assert isinstance(tags, list)
for tag in tags:
assert isinstance(tag, str)
@pytest.mark.vcr()
def test_queries_share_id_typeerror(sc):
with pytest.raises(TypeError):
sc.queries.share('one', 1)
@pytest.mark.vcr()
def test_queries_share_group_id_typeerror(sc):
with pytest.raises(TypeError):
sc.queries.share(1, 'one')
@pytest.mark.vcr()
def test_queries_share_success(sc, query, group):
query = sc.queries.share(int(query['id']), int(group['id']))
assert isinstance(query, dict)
check(query, 'id', str)
check(query, 'name', str)
check(query, 'description', str)
check(query, 'tool', str)
check(query, 'type', str)
check(query, 'tags', str)
check(query, 'context', str)
check(query, 'browseColumns', str)
check(query, 'browseSortColumn', str)
check(query, 'browseSortDirection', str)
check(query, 'createdTime', str)
check(query, 'modifiedTime', str)
check(query, 'status', str)
check(query, 'filters', list)
for filter in query['filters']:
check(filter, 'filterName', str)
check(filter, 'operator', str)
check(filter, 'value', str)
check(query, 'creator', dict)
check(query['creator'], 'id', str)
check(query['creator'], 'username', str)
check(query['creator'], 'firstname', str)
check(query['creator'], 'lastname', str)
check(query, 'owner', dict)
check(query['owner'], 'id', str)
check(query['owner'], 'username', str)
check(query['owner'], 'firstname', str)
check(query['owner'], 'lastname', str)
check(query, 'ownerGroup', dict)
check(query['ownerGroup'], 'id', str)
check(query['ownerGroup'], 'name', str)
check(query['ownerGroup'], 'description', str)
check(query, 'targetGroup', dict)
check(query['targetGroup'], 'id', int)
check(query['targetGroup'], 'name', str)
check(query['targetGroup'], 'description', str)
``` |
{
"source": "39xdgy/stock_prodection",
"score": 3
} |
#### File: stock_prodection/class_file/day_info.py
```python
class day_info:
def __init__(self, high, low, open, close):
self.high = high
self.low = low
self.open = open
self.close = close
self.DI = (high + low + (2*close))/4
self.EMA12 = None
self.EMA26 = None
self.DIF = None
self.MACD = None
self.OSC = None
self.OSCS = None
self.RSI6 = None
self.RSI12 = None
self.RSI_diff = None
self.RSV = None
self.K = None
self.D = None
self.J = None
def set_EMA12(self, EMA12):
self.EMA12 = EMA12
def set_EMA26(self, EMA26):
self.EMA26 = EMA26
def set_DIF(self, DIF):
self.DIF = DIF
def set_MACD(self, MACD):
self.MACD = MACD
def set_OSC(self, OSC):
self.OSC = OSC
def set_OSCS(self, OSCS):
self.OSCS = OSCS
def set_RSI6(self, RSI6):
self.RSI6 = RSI6
def set_RSI12(self, RSI12):
self.RSI12 = RSI12
def set_RSI_diff(self, RSI_diff):
self.RSI_diff = RSI_diff
def set_RSV(self, RSV):
self.RSV = RSV
def set_K(self, K):
self.K = K
def set_D(self, D):
self.D = D
def set_J(self, J):
self.J = J
def get_all_flag(self):
return [self.EMA12, self.EMA26,
self.DIF, self.MACD,
self.OSC, self.OSCS,
self.RSI6, self.RSI12, self.RSI_diff,
self.RSV, self.K, self.D, self.J]
``` |
{
"source": "3a1b2c3/nerf_pl",
"score": 2
} |
#### File: 3a1b2c3/nerf_pl/extract_color_mesh.py
```python
import torch
import os
import numpy as np
import cv2
from PIL import Image
from collections import defaultdict
from tqdm import tqdm
import mcubes
import open3d as o3d
from plyfile import PlyData, PlyElement
from argparse import ArgumentParser
from models.rendering import *
from models.nerf import *
from utils import load_ckpt
from datasets import dataset_dict
torch.backends.cudnn.benchmark = True
def get_opts():
parser = ArgumentParser()
parser.add_argument('--root_dir', type=str,
default='/home/ubuntu/data/nerf_example_data/nerf_synthetic/lego',
help='root directory of dataset')
parser.add_argument('--dataset_name', type=str, default='blender',
choices=['blender', 'llff'],
help='which dataset to validate')
parser.add_argument('--scene_name', type=str, default='test',
help='scene name, used as output ply filename')
parser.add_argument('--img_wh', nargs="+", type=int, default=[800, 800],
help='resolution (img_w, img_h) of the image')
parser.add_argument('--N_samples', type=int, default=64,
help='number of samples to infer the acculmulated opacity')
parser.add_argument('--chunk', type=int, default=32*1024,
help='chunk size to split the input to avoid OOM')
parser.add_argument('--ckpt_path', type=str, required=True,
help='pretrained checkpoint path to load')
parser.add_argument('--N_grid', type=int, default=256,
help='size of the grid on 1 side, larger=higher resolution')
parser.add_argument('--x_range', nargs="+", type=float, default=[-1.0, 1.0],
help='x range of the object')
parser.add_argument('--y_range', nargs="+", type=float, default=[-1.0, 1.0],
help='x range of the object')
parser.add_argument('--z_range', nargs="+", type=float, default=[-1.0, 1.0],
help='x range of the object')
parser.add_argument('--sigma_threshold', type=float, default=20.0,
help='threshold to consider a location is occupied')
parser.add_argument('--occ_threshold', type=float, default=0.2,
help='''threshold to consider a vertex is occluded.
larger=fewer occluded pixels''')
#### method using vertex normals ####
parser.add_argument('--use_vertex_normal', action="store_true",
help='use vertex normals to compute color')
parser.add_argument('--N_importance', type=int, default=64,
help='number of fine samples to infer the acculmulated opacity')
parser.add_argument('--near_t', type=float, default=1.0,
help='the near bound factor to start the ray')
return parser.parse_args()
@torch.no_grad()
def f(models, embeddings, rays, N_samples, N_importance, chunk, white_back):
"""Do batched inference on rays using chunk."""
B = rays.shape[0]
results = defaultdict(list)
for i in range(0, B, chunk):
rendered_ray_chunks = \
render_rays(models,
embeddings,
rays[i:i+chunk],
N_samples,
False,
0,
0,
N_importance,
chunk,
white_back,
test_time=True)
for k, v in rendered_ray_chunks.items():
results[k] += [v]
for k, v in results.items():
results[k] = torch.cat(v, 0)
return results
if __name__ == "__main__":
args = get_opts()
kwargs = {'root_dir': args.root_dir,
'img_wh': tuple(args.img_wh)}
if args.dataset_name == 'llff':
kwargs['spheric_poses'] = True
kwargs['split'] = 'test'
else:
kwargs['split'] = 'train'
dataset = dataset_dict[args.dataset_name](**kwargs)
embedding_xyz = Embedding(3, 10)
embedding_dir = Embedding(3, 4)
embeddings = {'xyz': embedding_xyz, 'dir': embedding_dir}
nerf_fine = NeRF()
load_ckpt(nerf_fine, args.ckpt_path, model_name='nerf_fine')
nerf_fine.cuda().eval()
# define the dense grid for query
N = args.N_grid
xmin, xmax = args.x_range
ymin, ymax = args.y_range
zmin, zmax = args.z_range
# assert xmax-xmin == ymax-ymin == zmax-zmin, 'the ranges must have the same length!'
x = np.linspace(xmin, xmax, N)
y = np.linspace(ymin, ymax, N)
z = np.linspace(zmin, zmax, N)
xyz_ = torch.FloatTensor(np.stack(np.meshgrid(x, y, z), -1).reshape(-1, 3)).cuda()
dir_ = torch.zeros_like(xyz_).cuda()
# sigma is independent of direction, so any value here will produce the same result
# predict sigma (occupancy) for each grid location
print('Predicting occupancy ...')
with torch.no_grad():
B = xyz_.shape[0]
out_chunks = []
for i in tqdm(range(0, B, args.chunk)):
xyz_embedded = embedding_xyz(xyz_[i:i+args.chunk]) # (N, embed_xyz_channels)
dir_embedded = embedding_dir(dir_[i:i+args.chunk]) # (N, embed_dir_channels)
xyzdir_embedded = torch.cat([xyz_embedded, dir_embedded], 1)
out_chunks += [nerf_fine(xyzdir_embedded)]
rgbsigma = torch.cat(out_chunks, 0)
sigma = rgbsigma[:, -1].cpu().numpy()
sigma = np.maximum(sigma, 0).reshape(N, N, N)
# perform marching cube algorithm to retrieve vertices and triangle mesh
print('Extracting mesh ...')
vertices, triangles = mcubes.marching_cubes(sigma, args.sigma_threshold)
##### Until mesh extraction here, it is the same as the original repo. ######
vertices_ = (vertices/N).astype(np.float32)
## invert x and y coordinates (WHY? maybe because of the marching cubes algo)
x_ = (ymax-ymin) * vertices_[:, 1] + ymin
y_ = (xmax-xmin) * vertices_[:, 0] + xmin
vertices_[:, 0] = x_
vertices_[:, 1] = y_
vertices_[:, 2] = (zmax-zmin) * vertices_[:, 2] + zmin
vertices_.dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]
face = np.empty(len(triangles), dtype=[('vertex_indices', 'i4', (3,))])
face['vertex_indices'] = triangles
PlyData([PlyElement.describe(vertices_[:, 0], 'vertex'),
PlyElement.describe(face, 'face')]).write(f'{args.scene_name}.ply')
# remove noise in the mesh by keeping only the biggest cluster
print('Removing noise ...')
mesh = o3d.io.read_triangle_mesh(f"{args.scene_name}.ply")
idxs, count, _ = mesh.cluster_connected_triangles()
max_cluster_idx = np.argmax(count)
triangles_to_remove = [i for i in range(len(face)) if idxs[i] != max_cluster_idx]
mesh.remove_triangles_by_index(triangles_to_remove)
mesh.remove_unreferenced_vertices()
print(f'Mesh has {len(mesh.vertices)/1e6:.2f} M vertices and {len(mesh.triangles)/1e6:.2f} M faces.')
vertices_ = np.asarray(mesh.vertices).astype(np.float32)
triangles = np.asarray(mesh.triangles)
# perform color prediction
# Step 0. define constants (image width, height and intrinsics)
W, H = args.img_wh
K = np.array([[dataset.focal, 0, W/2],
[0, dataset.focal, H/2],
[0, 0, 1]]).astype(np.float32)
# Step 1. transform vertices into world coordinate
N_vertices = len(vertices_)
vertices_homo = np.concatenate([vertices_, np.ones((N_vertices, 1))], 1) # (N, 4)
if args.use_vertex_normal: ## use normal vector method as suggested by the author.
## see https://github.com/bmild/nerf/issues/44
mesh.compute_vertex_normals()
rays_d = torch.FloatTensor(np.asarray(mesh.vertex_normals))
near = dataset.bounds.min() * torch.ones_like(rays_d[:, :1])
far = dataset.bounds.max() * torch.ones_like(rays_d[:, :1])
rays_o = torch.FloatTensor(vertices_) - rays_d * near * args.near_t
nerf_coarse = NeRF()
load_ckpt(nerf_coarse, args.ckpt_path, model_name='nerf_coarse')
nerf_coarse.cuda().eval()
results = f({'coarse': nerf_coarse, 'fine': nerf_fine}, embeddings,
torch.cat([rays_o, rays_d, near, far], 1).cuda(),
args.N_samples,
args.N_importance,
args.chunk,
dataset.white_back)
else: ## use my color average method. see README_mesh.md
## buffers to store the final averaged color
non_occluded_sum = np.zeros((N_vertices, 1))
v_color_sum = np.zeros((N_vertices, 3))
# Step 2. project the vertices onto each training image to infer the color
print('Fusing colors ...')
for idx in tqdm(range(len(dataset.image_paths))):
## read image of this pose
image = Image.open(dataset.image_paths[idx]).convert('RGB')
image = image.resize(tuple(args.img_wh), Image.LANCZOS)
image = np.array(image)
## read the camera to world relative pose
P_c2w = np.concatenate([dataset.poses[idx], np.array([0, 0, 0, 1]).reshape(1, 4)], 0)
P_w2c = np.linalg.inv(P_c2w)[:3] # (3, 4)
## project vertices from world coordinate to camera coordinate
vertices_cam = (P_w2c @ vertices_homo.T) # (3, N) in "right up back"
vertices_cam[1:] *= -1 # (3, N) in "right down forward"
## project vertices from camera coordinate to pixel coordinate
vertices_image = (K @ vertices_cam).T # (N, 3)
depth = vertices_image[:, -1:]+1e-5 # the depth of the vertices, used as far plane
vertices_image = vertices_image[:, :2]/depth
vertices_image = vertices_image.astype(np.float32)
vertices_image[:, 0] = np.clip(vertices_image[:, 0], 0, W-1)
vertices_image[:, 1] = np.clip(vertices_image[:, 1], 0, H-1)
## compute the color on these projected pixel coordinates
## using bilinear interpolation.
## NOTE: opencv's implementation has a size limit of 32768 pixels per side,
## so we split the input into chunks.
colors = []
remap_chunk = int(3e4)
for i in range(0, N_vertices, remap_chunk):
colors += [cv2.remap(image,
vertices_image[i:i+remap_chunk, 0],
vertices_image[i:i+remap_chunk, 1],
interpolation=cv2.INTER_LINEAR)[:, 0]]
colors = np.vstack(colors) # (N_vertices, 3)
## predict occlusion of each vertex
## we leverage the concept of NeRF by constructing rays coming out from the camera
## and hitting each vertex; by computing the accumulated opacity along this path,
## we can know if the vertex is occluded or not.
## for vertices that appear to be occluded from every input view, we make the
## assumption that its color is the same as its neighbors that are facing our side.
## (think of a surface with one side facing us: we assume the other side has the same color)
## ray's origin is camera origin
rays_o = torch.FloatTensor(dataset.poses[idx][:, -1]).expand(N_vertices, 3)
## ray's direction is the vector pointing from camera origin to the vertices
rays_d = torch.FloatTensor(vertices_) - rays_o # (N_vertices, 3)
rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
near = dataset.bounds.min() * torch.ones_like(rays_o[:, :1])
## the far plane is the depth of the vertices, since what we want is the accumulated
## opacity along the path from camera origin to the vertices
far = torch.FloatTensor(depth) * torch.ones_like(rays_o[:, :1])
results = f({'coarse': nerf_fine}, embeddings,
torch.cat([rays_o, rays_d, near, far], 1).cuda(),
args.N_samples,
0,
args.chunk,
dataset.white_back)
opacity = results['opacity_coarse'].cpu().numpy()[:, np.newaxis] # (N_vertices, 1)
opacity = np.nan_to_num(opacity, 1)
non_occluded = np.ones_like(non_occluded_sum) * 0.1/depth # weight by inverse depth
# near=more confident in color
non_occluded += opacity < args.occ_threshold
v_color_sum += colors * non_occluded
non_occluded_sum += non_occluded
# Step 3. combine the output and write to file
if args.use_vertex_normal:
v_colors = results['rgb_fine'].cpu().numpy() * 255.0
else: ## the combined color is the average color among all views
v_colors = v_color_sum/non_occluded_sum
v_colors = v_colors.astype(np.uint8)
v_colors.dtype = [('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
vertices_.dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]
vertex_all = np.empty(N_vertices, vertices_.dtype.descr+v_colors.dtype.descr)
for prop in vertices_.dtype.names:
vertex_all[prop] = vertices_[prop][:, 0]
for prop in v_colors.dtype.names:
vertex_all[prop] = v_colors[prop][:, 0]
face = np.empty(len(triangles), dtype=[('vertex_indices', 'i4', (3,))])
face['vertex_indices'] = triangles
PlyData([PlyElement.describe(vertex_all, 'vertex'),
PlyElement.describe(face, 'face')]).write(f'{args.scene_name}.ply')
print('Done!')
``` |
{
"source": "3aaaaaamer/traverse-curves",
"score": 3
} |
#### File: psapy/.ipynb_checkpoints/Vogel-checkpoint.py
```python
from __future__ import division
import math
import psapy.FluidProps as FluidProps
import matplotlib.pyplot as plt
Pressure=3000.0
Thickness=75.0
GasGrav=0.65
API= 28.0
GOR= 375.0
Temp=150.0
rw=0.328
re=1053.0
s=-1.5
Psat = FluidProps.Pbub(Temp,75,100,GasGrav, API, GOR)
def Darcy_IPR(k,h,visc, re,rw, s, P, OilFVF, nPoints):
"""Function to calculate IPR using Darcy's Equation. It returns a list with a pair of Pressure and rates"""
#Q= (k*h/visc)*(P-Pwf)/(141.2*OilFVF*visc*(math.log(re/rw)-0.75+s))
PwfList=[]
QList=[]
QList.append(0)
PwfList.append(P)
mStep=P/nPoints
i=1
while (i<=nPoints):
Pwf=PwfList[i-1]-mStep
Q= (k*h/visc)*(P-Pwf)/(141.2*OilFVF*visc*(math.log(re/rw)-0.75+s))
QList.append(Q)
PwfList.append(Pwf)
i=i+1
DarcyList=[QList,PwfList]
return DarcyList
def VogelIPR(P, Pb, Pwf, Qo, nPoints):
"""Function to calculate IPR using Vogel's Equation. It returns a list with a pair of Pressure and rates"""
PwfList=[]
QList=[]
QList.append(0)
PwfList.append(P)
VogelList=[]
mStep=P/nPoints
i=1
if Pwf>=Pb:
J=Qo/(P-Pwf)
else:
J=Qo/((P-Pb)+((Pb/1.8)*(1-0.2*(Pwf/Pb)-0.8*(Pwf/Pb)**2)))
while (i<=nPoints):
Pwfs=PwfList[i-1]-mStep
if Pwfs>=Pb:
Q=J*(P-Pwfs)
else:
Qb=J*(P-Pb)
Q=Qb+(J*Pb/1.8)*(1-0.2*(Pwfs/Pb)-0.8*(Pwfs/Pb)**2)
QList.append(Q)
PwfList.append(Pwfs)
i=i+1
VogelList=[QList,PwfList]
print(VogelList)
return VogelList
def Vogel_DarcyIPR(P, k,h,visc, re,rw, s, OilFVF,Temp, Pb, nPoints):
"""Function to calculate IPR using Vogel's Equation. It returns a list with a pair of Pressure and rates"""
PwfList=[]
QList=[]
QList.append(0)
PwfList.append(P)
VogelList=[]
mStep=P/nPoints
i=1
J= (k*h/visc)/(141.2*OilFVF*visc*(math.log(re/rw)-0.75+s))
while (i<=nPoints):
Pwfs=PwfList[i-1]-mStep
print(Pwfs)
if Pwfs>=Pb:
Q=J*(P-Pwfs)
else:
Qb=J*(P-Pb)
Q=Qb+(J*Pb/1.8)*(1-0.2*(Pwfs/Pb)-0.8*(Pwfs/Pb)**2)
QList.append(Q)
PwfList.append(Pwfs)
i=i+1
VogelList=[QList,PwfList]
return VogelList
``` |
{
"source": "3ach/pharmacap",
"score": 2
} |
#### File: pharmacap/models/oxycodone.py
```python
import math
# Come from literature
# mandema et al, j clin pharm
# characterization and validation of a pharmacokinetic model for controlled-release oxycodone
F_rel = 1.02
f_1 = 0.38
k_c1 = 1.11
k_c2 = 0.11
t_lag = 0.206
def plasma_concentration(time):
adjusted_time = time - t_lag
c1_component = f_1 * k_c1 * math.exp(-k_c1 * adjusted_time)
c2_component = (1 - f_1) * k_c2 * math.exp(-k_c2 * adjusted_time)
return F_rel * (c1_component + c2_component)
def convolve(x, y):
total_length = len(y) + len(x) - 1
x_flipped = x[::-1]
solution = []
for offset in range(-len(x) + 1, total_length):
instance = 0
for n in range(min(len(x), len(y))):
try:
if n + offset < 0:
instance += 0
else:
instance += y[n+offset] * x_flipped[n]
except:
instance += 0
solution.append(instance)
return solution
times = [x / 2 for x in range(20)]
concentrations = [plasma_concentration(time) for time in times]
doses = [math.exp(-x) + (math.exp(-(x - 5)) if x >=5 else 0) for x in range(11)]
print(doses)
c_concs = convolve(doses, concentrations)
max_concentration = max(c_concs)
norm_concentrations = [12 * (c_conc / max_concentration) for c_conc in c_concs]
for time, norm_concentration, concentration in zip(times, norm_concentrations, c_concs):
leds = "".join(['*' for _ in range(int(norm_concentration))])
print(f"{time}\t|{leds}\t\t\t\t|{concentration}")
``` |
{
"source": "3always8/Molugitg",
"score": 2
} |
#### File: Molugitg/crawling/post_lol_story.py
```python
import numpy as np
import pandas as pd
import io
import re
import gensim
from konlpy.tag import Hannanum, Kkma, Komoran, Mecab, Okt
from gensim.models import Word2Vec
from ckonlpy.tag import Twitter
import os.path
FILE_PATHS = ['lol_stories.csv', 'mbti_output.csv']
ADDITIONAL_WORDS_PATHS = ['lol_additional_words.csv', 'mbti_additional_words.csv']
OUTPUT_MODEL_PATH = './../models/lol_model.model'
TEXT_INDEXS = ['text', 'text']
EXCLUDE_WORDS = ['앤서니', '레이놀즈',
'스크롤', '그', '살펴보기', '스크롤하기',
'그녀', '루오', '마이클', '굴딩',
'데이나', '루어리', '그레이엄', '맥닐',
'제러드', '로슨', '로완', '윌리엄스',
'오브라이언', '마커스', '테럴', '스미스'
'이안', '마틴', '마이클', '하우겐', '위스크',
'매튜', '필립', '바르가스', '로라',
'미셋', '대니얼', '코츠', '아만다', '제프리',
'아비게일', '하비', '로완', '노엘', '윌리엄스',
'케이티', '치로니스', '이차오',
'데이비드', '슬래글', '오딘', '오스틴', '샤퍼']
WORD2VEC_WINDOW = 4
WORD2VEC_MIN_COUNT = 2
tokenized_text = pd.DataFrame([])
count = 0
for path in FILE_PATHS:
df = pd.read_csv(path, encoding='utf-8')
tokenized_text = pd.concat([tokenized_text, df[TEXT_INDEXS[count]]], axis=0, ignore_index=True)
count += 1
tokenized_text = tokenized_text[0]
# 토큰화 함수
hannanum = Hannanum() # 대상 text 길이 너무 길면 오류나는듯(대략 한글 12800자 정도)
kkma = Kkma()
komoran = Komoran()
twitter = Twitter()
# 커스텀 단어 추가
for path in ADDITIONAL_WORDS_PATHS:
additional_words = pd.read_csv(path, encoding='cp949', header=None)
for index, row in additional_words.iterrows():
twitter.add_dictionary(row[0], row[1])
def tokenizer(row):
pos_list = twitter.pos(row)
r = []
for word in pos_list:
if not word[1] in ["Josa", "Punctuation", "Foreign",
"Suffix", "Eomi", "Determiner", "PreEomi", "Adverb",
"Conjunction", "Exclamation"]:
w = word[0]
if w not in EXCLUDE_WORDS:
r.append(w)
rl = (" ".join(r)).strip()
return twitter.morphs(rl, stem=True)
tokenized_text = tokenized_text.apply(tokenizer)
print(tokenized_text.head())
print("최대 토큰 개수 :", (max(len(i) for i in tokenized_text)))
print("최소 토큰 개수 :", (min(len(i) for i in tokenized_text)))
print("평균 토큰 개수 :", sum(len(i) for i in tokenized_text)/len(tokenized_text))
new_model = Word2Vec(size = 200, window = WORD2VEC_WINDOW, min_count = WORD2VEC_MIN_COUNT, workers = 4, sg = 1)
new_model.build_vocab(tokenized_text)
total_examples = len(tokenized_text)
new_model.train(tokenized_text, total_examples=total_examples, epochs=new_model.iter)
print(new_model.wv.vocab.keys())
print("new vocab count:", len(new_model.wv.vocab.keys()))
model_result = new_model.wv.most_similar('빌지워터')
print(model_result)
new_model.save(OUTPUT_MODEL_PATH)
``` |
{
"source": "3amon/AnalysisListener",
"score": 2
} |
#### File: 3amon/AnalysisListener/main.py
```python
import os
import subprocess
import time
import shutil
WATCH_DIR = "./uploads"
PROCESSING_DIR = "./processing"
OUTPUT_DIR = "./output"
def moveDir(src, dest):
w = os.walk(src)
for root, dirs, files in os.walk(src):
for file in files:
shutil.move(os.path.join(src, file), os.path.join(dest, file))
while True:
time.sleep(10)
print "Looking for new files!"
moveDir(WATCH_DIR, PROCESSING_DIR);
# python ./pyAudioAnalysis/audioAnalysis.py featureExtractionDir -i ./output -mw 50.0 -ms 25.0
subprocess.Popen(
['python', './pyAudioAnalysis/audioAnalysis.py', 'featureExtractionDir', "-i", PROCESSING_DIR, '-mw', "50.0",
"-ms", "25.0"]).wait()
moveDir(PROCESSING_DIR, OUTPUT_DIR);
``` |
{
"source": "3amon/PiPsEyeListener",
"score": 3
} |
#### File: 3amon/PiPsEyeListener/main.py
```python
import pyaudio
import struct
import math
import wave
import datetime
import array
import sys
import subprocess
import os
INITIAL_THRESHOLD = 0.010
FORMAT = pyaudio.paInt16
FRAME_MAX_VALUE = 2 ** 15 - 1
NORMALIZE = (1.0 / FRAME_MAX_VALUE)
CHANNELS = 1
RATE = 16000
INPUT_BLOCK_TIME = 0.05
INPUT_FRAMES_PER_BLOCK = int(RATE*INPUT_BLOCK_TIME)
# if we get this many noisy blocks in a row, increase the threshold
OVERSENSITIVE = 10.0/INPUT_BLOCK_TIME
# if we get this many quiet blocks in a row, decrease the threshold
UNDERSENSITIVE = 10.0/INPUT_BLOCK_TIME
# 1 second of sound is necessary for us to care
SOUND_FILTER_LEN = 1.0/INPUT_BLOCK_TIME
NORMALIZE_MINUS_ONE_dB = 10 ** (-1.0 / 20)
# Our long moving average
LONG_AVG_LEN = 60.0 / INPUT_BLOCK_TIME
# Our short moving average
SHORT_AVG_LEN = 5.0 / INPUT_BLOCK_TIME
# Server scp key
SERVER_KEY = "server.pem"
SERVER_PATH = os.environ['AUDIO_LISTENER_PATH']
def get_rms( block ):
# iterate over the block.
sum_squares = 0.0
for sample in block:
# sample is a signed short in +/- 32768.
# normalize it to 1.0
n = sample * NORMALIZE
sum_squares += n*n
return math.sqrt( sum_squares / len(block) )
class ExpMovAvg(object):
def __init__(self, length):
self.length = length
self.count = 0
self.avg = 0.0
def average(self):
if(self.ready()):
return self.avg
else:
raise Exception("Moving average not ready!")
def ready(self):
return self.count > self.length
def add_value(self, point):
if(self.ready()):
self.avg = (self.avg * (self.length - 1) + point) / self.length
else:
self.count += 1
self.avg = (self.avg * (self.count - 1) + point) / self.count
class AudioLogger(object):
def __init__(self):
self.pa = pyaudio.PyAudio()
self.errorcount = 0
self.buffer = array.array('h')
self.short_avg = ExpMovAvg(SHORT_AVG_LEN)
self.long_avg = ExpMovAvg(LONG_AVG_LEN)
self.start()
def start(self):
self.stream = self.open_mic_stream()
self.recording = False
self.lookbackcache = []
def stop(self):
self.stream.close()
def find_input_device(self):
device_index = None
for i in range( self.pa.get_device_count() ):
devinfo = self.pa.get_device_info_by_index(i)
print( "Device %d: %s"%(i,devinfo["name"]) )
for keyword in ["usb"]:
if keyword in devinfo["name"].lower():
print( "Found an input: device %d - %s"%(i,devinfo["name"]) )
device_index = i
return device_index
if device_index == None:
print( "No preferred input found; using default input device." )
return device_index
def open_mic_stream( self ):
device_index = self.find_input_device()
stream = self.pa.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
input_device_index = device_index,
frames_per_buffer = INPUT_FRAMES_PER_BLOCK)
return stream
def write_file(self, suffix, frames):
fmt = '{fname}_%Y-%m-%d-%H-%M-%S.wav'
fileName = datetime.datetime.now().strftime(fmt).format(fname=suffix)
waveFile = wave.open(fileName, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(self.pa.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
data = struct.pack('<' + ('h' * len(frames)), *frames)
waveFile.writeframes(data)
waveFile.close()
print "Wrote :", fileName
return fileName
def send_file(self, fileName):
subprocess.Popen(["scp", "-i", SERVER_KEY, fileName, SERVER_PATH]).wait()
def listen(self):
try:
data_chunk = array.array('h', self.stream.read(INPUT_FRAMES_PER_BLOCK))
if sys.byteorder == 'big':
data_chunk.byteswap()
except IOError, e:
# dammit.
self.errorcount += 1
print( "(%d) Error recording: %s"%(self.errorcount,e) )
self.noisycount = 1
self.stop()
self.start()
return False
amplitude = get_rms( data_chunk )
self.long_avg.add_value(amplitude)
self.short_avg.add_value(amplitude)
self.lookbackcache.append((data_chunk, amplitude))
while len(self.lookbackcache) > SHORT_AVG_LEN:
self.lookbackcache.pop(0)
if(self.long_avg.ready() and self.short_avg.ready() and len(self.lookbackcache) == SHORT_AVG_LEN):
if(self.short_avg.average() > self.long_avg.average() * 1.1):
if not self.recording:
print "Recording started!"
self.recording = True
self.buffer = array.array('h')
# We need to dump the samples that started getting loud
loud_chunk_found = False
for (data_chunk, amplitude) in self.lookbackcache:
if loud_chunk_found or amplitude > self.long_avg.average() * 1.1:
loud_chunk_found = True
self.buffer.extend(data_chunk)
else:
# keep adding sound data while we are still significantly louder than the long avg
self.buffer.extend(data_chunk)
elif self.recording:
# Recording stopped
self.stop()
self.recording = False
fileName = self.write_file("event", self.buffer)
self.send_file(fileName)
self.start()
os.remove(fileName)
self.lookbackcache = []
return True
if __name__ == "__main__":
tt = AudioLogger()
while(True):
tt.listen()
``` |
{
"source": "3amon/twitter-robofact",
"score": 3
} |
#### File: 3amon/twitter-robofact/scrape_wikipedia.py
```python
import json, requests, html2text, re, nltk.data, time
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
def MakeTweet(text):
sents = sent_detector.tokenize(text.strip().replace('\n', ' '))
result = ''
for sent in sents:
newres = result + sent
if len(newres) > 140:
return result
result = newres
return result
def FilterTweet(text):
if 'may refer to' in text.lower():
return ''
if '##' in text:
return ''
if not text.endswith('.'):
return ''
if text.endswith('a.k.a.'):
return ''
if text.lower().startswith('this is a list'):
return ''
if len(text) < 75:
return ''
return text
with open('wik-scrape.txt', 'w') as fhandle:
while(True):
#'https://en.wikipedia.org/w/api.php?action=query&generator=random&grnnamespace=0&prop=extracts&exchars=500&format=json'
get_random_pages_query = 'https://en.wikipedia.org/w/api.php?action=query&generator=random&grnnamespace=0&prop=extracts&exchars=500&format=json'
r = requests.get(get_random_pages_query)
j = r.json()
pages = j["query"]["pages"]
for page in pages:
extract = pages[page]["extract"]
text = html2text.html2text(extract)
try:
res = FilterTweet(MakeTweet(text))
if len(res) > 0:
fhandle.write(res)
fhandle.write('\n')
print res
print ''
except UnicodeEncodeError:
pass
time.sleep(0)
``` |
{
"source": "3anga/brianbot",
"score": 2
} |
#### File: 3anga/brianbot/discord.py
```python
import discord, asyncio, logging, configuration
client = discord.Client()
@client.event
async def on_ready():
print(f'{client.user} подключен к Дискорду')
@client.event
async def on_member_join(member):
await member.create_dm()
await member.dm_channel.send(
f'Привет {member.name}. Провери #верификация!'
)
client.run(configuration.DISCORD_TOKEN)
``` |
{
"source": "3anga/jdbs2",
"score": 3
} |
#### File: 3anga/jdbs2/errors.py
```python
from json import dumps
from uuid import uuid4
def errorResponse(msg="Unknown error", code=500):
CONTENT = dumps({
'statusCode': code,
'message': msg,
'requestId': uuid4().hex
})
STATUS_CODE = code
HEADERS = {
'status': 'bad',
'Content-Type': 'application/json'
}
return CONTENT, STATUS_CODE, HEADERS
``` |
{
"source": "3ANov/salary_predict_trudohack",
"score": 3
} |
#### File: salary_predict_trudohack/neural_network_model/neural_predict.py
```python
import pickle
import re
from neural_network_model.neural_model import NeuralModel
TOKEN_RE = re.compile(r'[\w\d]+')
def tokenize_text_simple_regex(txt, min_token_size=4):
""" This func tokenize text with TOKEN_RE applied ealier """
txt = txt.lower()
all_tokens = TOKEN_RE.findall(txt)
return [token for token in all_tokens if len(token) >= min_token_size]
def print_prediction(input):
loaded_model = pickle.loads(NeuralModel.query.filter_by(file_name='model_one').first().file)
return loaded_model.predict([input])
```
#### File: 3ANov/salary_predict_trudohack/server.py
```python
from flask import Flask, render_template
from flask_restful import reqparse, Api, Resource
from flask_sqlalchemy import SQLAlchemy
from config import Config
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
api = Api(app)
parser = reqparse.RequestParser()
parser.add_argument('input_vacancy_text', type=str, help='Vacancy text')
#from neural_predict import print_prediction
import re
import sklearn
import pickle
class NeuralModel(db.Model):
file_name = db.Column(db.String, primary_key=True)
file = db.Column(db.LargeBinary)
TOKEN_RE = re.compile(r'[\w\d]+')
def tokenize_text_simple_regex(txt, min_token_size=4):
""" This func tokenize text with TOKEN_RE applied ealier """
txt = txt.lower()
all_tokens = TOKEN_RE.findall(txt)
return [token for token in all_tokens if len(token) >= min_token_size]
loaded_model = pickle.loads(NeuralModel.query.filter_by(file_name='model_one').first().file)
@app.route('/', methods=['GET'])
def main():
return render_template('index.html')
class NeuralResult(Resource):
def post(self):
args = parser.parse_args()
print(args)
#принимаем строку - возвращаем её длину
#как победить кодировку?
return {args['input_vacancy_text']: loaded_model.predict([args['input_vacancy_text']]).tolist()[0]}, 200
api.add_resource(NeuralResult, '/')
if __name__ == '__main__':
app.run(host='0.0.0.0')
``` |
{
"source": "3arii/UpSpectrum",
"score": 3
} |
#### File: UpSpectrum/Sound Analysis WeWalk/PyaudioRecorder.py
```python
def record_audio(file_src, duration):
"""
Returns an encoded WAV file. In case of an unicode
error give file_src as a linux type path file.
add 1 to the duration as it does - 1 for some reason.
"""
import pyaudio
import wave
chunk = 1024 # Record in chunks of 1024 Samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 2
fs = 44100 # This will recod at 44100 samples per second
seconds = duration
filesrc = file_src
p = pyaudio.PyAudio()
print("Recording...")
stream = p.open(format=sample_format,
channels=channels,
rate=fs,
frames_per_buffer=chunk,
input=True)
frames = []
# Storing data in chunks for 3 secs
for i in range(0, int(fs / chunk * seconds)):
data = stream.read(chunk)
frames.append(data)
# Stops and closes the Stream
stream.stop_stream
stream.close()
print("Finished Recording.")
# Save the file as a WAV file.
wf = wave.open(filesrc, "wb")
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(sample_format))
wf.setframerate(fs)
wf.writeframes(b"".join(frames))
wf.close()
# C:/Users/deniz/OneDrive/Documents/Deniz the Laps/Wewalk/Sound Analysis WeWalk/output.wav
``` |
{
"source": "3asm/PyDTMC",
"score": 2
} |
#### File: PyDTMC/pydtmc/computations.py
```python
__all__ = [
'calculate_periods',
'eigenvalues_sorted',
'find_cyclic_classes',
'find_lumping_partitions',
'gth_solve',
'rdl_decomposition',
'slem'
]
###########
# IMPORTS #
###########
# Standard
from itertools import (
chain
)
from math import (
gcd
)
# Libraries
import networkx as nx
import numpy as np
import numpy.linalg as npl
# Internal
from .custom_types import (
ofloat,
tarray,
tgraph,
tlist_int,
tparts,
trdl
)
#############
# FUNCTIONS #
#############
def _calculate_period(graph: tgraph) -> int:
g = 0
for scc in nx.strongly_connected_components(graph):
scc = list(scc)
levels = {scc: None for scc in scc}
vertices = levels.copy()
x = scc[0]
levels[x] = 0
current_level = [x]
previous_level = 1
while current_level:
next_level = []
for u in current_level:
for v in graph[u]:
if v not in vertices: # pragma: no cover
continue
level = levels[v]
if level is not None:
g = gcd(g, previous_level - level)
if g == 1:
return 1
else:
next_level.append(v)
levels[v] = previous_level
current_level = next_level
previous_level += 1
return g
def calculate_periods(graph: tgraph) -> tlist_int:
sccs = list(nx.strongly_connected_components(graph))
classes = [sorted(scc) for scc in sccs]
indices = sorted(classes, key=lambda x: (-len(x), x[0]))
periods = [0] * len(indices)
for scc in sccs:
scc_reachable = scc.copy()
for c in scc_reachable:
spl = nx.shortest_path_length(graph, c).keys()
scc_reachable = scc_reachable.union(spl)
index = indices.index(sorted(scc))
if (scc_reachable - scc) == set():
periods[index] = _calculate_period(graph.subgraph(scc))
else:
periods[index] = 1
return periods
def eigenvalues_sorted(m: tarray) -> tarray:
ev = npl.eigvals(m)
ev = np.sort(np.abs(ev))
return ev
def find_cyclic_classes(p: tarray) -> tarray:
size = p.shape[0]
v = np.zeros(size, dtype=int)
v[0] = 1
w = np.array([], dtype=int)
t = np.array([0], dtype=int)
d = 0
m = 1
while (m > 0) and (d != 1):
i = t[0]
j = 0
t = np.delete(t, 0)
w = np.append(w, i)
while j < size:
if p[i, j] > 0.0:
r = np.append(w, t)
k = np.sum(r == j)
if k > 0:
b = v[i] - v[j] + 1
d = gcd(d, b)
else:
t = np.append(t, j)
v[j] = v[i] + 1
j += 1
m = t.size
v = np.remainder(v, d)
indices = []
for u in np.unique(v):
indices.append(list(chain.from_iterable(np.argwhere(v == u))))
return indices
def find_lumping_partitions(p: tarray) -> tparts:
size = p.shape[0]
if size == 2:
return []
k = size - 1
indices = list(range(size))
possible_partitions = []
for i in range(2**k):
partition = []
subset = []
for position in range(size):
subset.append(indices[position])
if ((1 << position) & i) or position == k:
partition.append(subset)
subset = []
partition_length = len(partition)
if 2 <= partition_length < size:
possible_partitions.append(partition)
partitions = []
for partition in possible_partitions:
r = np.zeros((size, len(partition)), dtype=float)
for index, lumping in enumerate(partition):
for state in lumping:
r[state, index] = 1.0
# noinspection PyBroadException
try:
k = np.dot(np.linalg.inv(np.dot(np.transpose(r), r)), np.transpose(r))
except Exception: # pragma: no cover
continue
left = np.dot(np.dot(np.dot(r, k), p), r)
right = np.dot(p, r)
is_lumpable = np.array_equal(left, right)
if is_lumpable:
partitions.append(partition)
return partitions
def gth_solve(p: tarray) -> tarray:
a = np.copy(p)
n = a.shape[0]
for i in range(n - 1):
scale = np.sum(a[i, i + 1:n])
if scale <= 0.0: # pragma: no cover
n = i + 1
break
a[i + 1:n, i] /= scale
a[i + 1:n, i + 1:n] += np.dot(a[i + 1:n, i:i + 1], a[i:i + 1, i + 1:n])
x = np.zeros(n, dtype=float)
x[n - 1] = 1.0
for i in range(n - 2, -1, -1):
x[i] = np.dot(x[i + 1:n], a[i + 1:n, i])
x /= np.sum(x)
return x
def rdl_decomposition(p: tarray) -> trdl:
values, vectors = npl.eig(p)
indices = np.argsort(np.abs(values))[::-1]
values = values[indices]
vectors = vectors[:, indices]
r = np.copy(vectors)
d = np.diag(values)
l = npl.solve(np.transpose(r), np.eye(p.shape[0], dtype=float)) # noqa
k = np.sum(l[:, 0])
if not np.isclose(k, 0.0):
r[:, 0] *= k
l[:, 0] /= k
r = np.real(r)
d = np.real(d)
l = np.transpose(np.real(l)) # noqa
return r, d, l
def slem(m: tarray) -> ofloat:
ev = eigenvalues_sorted(m)
value = ev[~np.isclose(ev, 1.0)][-1]
if np.isclose(value, 0.0):
return None
return value
```
#### File: PyDTMC/pydtmc/markov_chain.py
```python
__all__ = [
'MarkovChain'
]
###########
# IMPORTS #
###########
# Standard
from copy import (
deepcopy
)
from inspect import (
getmembers,
isfunction,
stack,
trace
)
from itertools import (
chain
)
from math import (
gcd
)
# Libraries
import networkx as nx
import numpy as np
import numpy.linalg as npl
# Internal
from .base_class import (
BaseClass
)
from .computations import (
calculate_periods,
eigenvalues_sorted,
find_cyclic_classes,
find_lumping_partitions,
gth_solve,
rdl_decomposition,
slem
)
from .custom_types import (
oarray,
ofloat,
oint,
ointerval,
olist_str,
onumeric,
ostate,
ostates,
ostatus,
otimes_out,
owalk,
tany,
tarray,
tbcond,
tcache,
tgraph,
tgraphs,
tlist_array,
tlist_int,
tlist_str,
tlists_int,
tlists_str,
tmc,
tmc_dict,
tmc_dict_flex,
tnumeric,
tpart,
tparts,
trdl,
tredists,
tstate,
tstates,
ttfunc,
ttimes_in,
twalk,
tweights
)
from .decorators import (
alias,
cachedproperty,
random_output,
aliased
)
from .exceptions import (
ValidationError
)
from .files_io import (
read_csv,
read_json,
read_txt,
read_xml,
write_csv,
write_json,
write_txt,
write_xml
)
from .fitting import (
fit_function,
fit_walk
)
from .generators import (
approximation,
birth_death,
bounded,
canonical,
closest_reversible,
gamblers_ruin,
lazy,
lump,
random,
sub,
urn_model
)
from .measures import (
absorption_probabilities,
committor_probabilities,
expected_rewards,
expected_transitions,
first_passage_reward,
first_passage_probabilities,
hitting_probabilities,
hitting_times,
mean_absorption_times,
mean_first_passage_times_between,
mean_first_passage_times_to,
mean_number_visits,
mean_recurrence_times,
mixing_time,
sensitivity,
time_correlations,
time_relaxations
)
from .simulations import (
predict,
redistribute,
simulate,
walk_probability
)
from .utilities import (
create_rng,
generate_validation_error,
get_file_extension
)
from .validation import (
validate_boolean,
validate_boundary_condition,
validate_dictionary,
validate_enumerator,
validate_file_path,
validate_float,
validate_graph,
validate_hyperparameter,
validate_integer,
validate_interval,
validate_mask,
validate_matrix,
validate_partitions,
validate_rewards,
validate_state,
validate_state_names,
validate_states,
validate_status,
validate_time_points,
validate_transition_function,
validate_transition_matrix,
validate_vector
)
###########
# CLASSES #
###########
@aliased
class MarkovChain(metaclass=BaseClass):
"""
Defines a Markov chain with given transition matrix and state names.
:param p: the transition matrix.
:param states: the name of each state (*if omitted, an increasing sequence of integers starting at 1*).
:raises ValidationError: if any input argument is not compliant.
"""
def __init__(self, p: tnumeric, states: olist_str = None):
caller = stack()[1][3]
sm = [x[1].__name__ for x in getmembers(MarkovChain, predicate=isfunction) if x[1].__name__[0] != '_' and isinstance(MarkovChain.__dict__.get(x[1].__name__), staticmethod)]
if caller not in sm:
try:
p = validate_transition_matrix(p)
states = [str(i) for i in range(1, p.shape[0] + 1)] if states is None else validate_state_names(states, p.shape[0])
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
size = p.shape[0]
graph = nx.DiGraph(p)
graph = nx.relabel_nodes(graph, dict(zip(range(size), states)))
self.__cache: tcache = {}
self.__digraph: tgraph = graph
self.__p: tarray = p
self.__size: int = size
self.__states: tlist_str = states
def __eq__(self, other) -> bool:
if isinstance(other, MarkovChain):
return np.array_equal(self.p, other.p) and self.states == other.states
return False
def __hash__(self) -> int:
return hash((self.p.tobytes(), tuple(self.states)))
def __repr__(self) -> str:
return self.__class__.__name__
# noinspection PyListCreation
def __str__(self) -> str:
lines = ['']
lines.append('DISCRETE-TIME MARKOV CHAIN')
lines.append(f' SIZE: {self.size:d}')
lines.append(f' RANK: {self.rank:d}')
lines.append(f' CLASSES: {len(self.communicating_classes):d}')
lines.append(f' > RECURRENT: {len(self.recurrent_classes):d}')
lines.append(f' > TRANSIENT: {len(self.transient_classes):d}')
lines.append(f' ERGODIC: {("YES" if self.is_ergodic else "NO")}')
lines.append(f' > APERIODIC: {("YES" if self.is_aperiodic else "NO (" + str(self.period) + ")")}')
lines.append(f' > IRREDUCIBLE: {("YES" if self.is_irreducible else "NO")}')
lines.append(f' ABSORBING: {("YES" if self.is_absorbing else "NO")}')
lines.append(f' REGULAR: {("YES" if self.is_regular else "NO")}')
lines.append(f' REVERSIBLE: {("YES" if self.is_reversible else "NO")}')
lines.append(f' SYMMETRIC: {("YES" if self.is_symmetric else "NO")}')
lines.append('')
value = '\n'.join(lines)
return value
@cachedproperty
def __absorbing_states_indices(self) -> tlist_int:
indices = [index for index in range(self.__size) if np.isclose(self.__p[index, index], 1.0)]
return indices
@cachedproperty
def __classes_indices(self) -> tlists_int:
indices = [sorted([self.__states.index(c) for c in scc]) for scc in nx.strongly_connected_components(self.__digraph)]
return indices
@cachedproperty
def __communicating_classes_indices(self) -> tlists_int:
indices = sorted(self.__classes_indices, key=lambda x: (-len(x), x[0]))
return indices
@cachedproperty
def _cyclic_classes_indices(self) -> tlists_int:
if not self.is_irreducible:
return []
if self.is_aperiodic:
return self.__communicating_classes_indices.copy()
indices = find_cyclic_classes(self.__p)
indices = sorted(indices, key=lambda x: (-len(x), x[0]))
return indices
@cachedproperty
def __cyclic_states_indices(self) -> tlist_int:
indices = sorted(chain.from_iterable(self._cyclic_classes_indices))
return indices
@cachedproperty
def __eigenvalues_sorted(self) -> tarray:
ev = eigenvalues_sorted(self.__p)
return ev
@cachedproperty
def __rdl_decomposition(self) -> trdl:
r, d, l = rdl_decomposition(self.__p) # noqa
return r, d, l
@cachedproperty
def __recurrent_classes_indices(self) -> tlists_int:
indices = [index for index in self.__classes_indices if index not in self.__transient_classes_indices]
indices = sorted(indices, key=lambda x: (-len(x), x[0]))
return indices
@cachedproperty
def __recurrent_states_indices(self) -> tlist_int:
indices = sorted(chain.from_iterable(self.__recurrent_classes_indices))
return indices
@cachedproperty
def __slem(self) -> ofloat:
if not self.is_ergodic:
value = None
else:
value = slem(self.__p)
return value
@cachedproperty
def __states_indices(self) -> tlist_int:
indices = list(range(self.__size))
return indices
@cachedproperty
def __transient_classes_indices(self) -> tlists_int:
edges = {edge1 for (edge1, edge2) in nx.condensation(self.__digraph).edges}
indices = [self.__classes_indices[edge] for edge in edges]
indices = sorted(indices, key=lambda x: (-len(x), x[0]))
return indices
@cachedproperty
def __transient_states_indices(self) -> tlist_int:
indices = sorted(chain.from_iterable(self.__transient_classes_indices))
return indices
@cachedproperty
def absorbing_states(self) -> tlists_str:
"""
A property representing the absorbing states of the Markov chain.
"""
states = [*map(self.__states.__getitem__, self.__absorbing_states_indices)]
return states
@cachedproperty
def accessibility_matrix(self) -> tarray:
"""
A property representing the accessibility matrix of the Markov chain.
"""
a = self.adjacency_matrix
i = np.eye(self.__size, dtype=int)
am = (i + a)**(self.__size - 1)
am = (am > 0).astype(int)
return am
@cachedproperty
def adjacency_matrix(self) -> tarray:
"""
A property representing the adjacency matrix of the Markov chain.
"""
am = (self.__p > 0.0).astype(int)
return am
@cachedproperty
def communicating_classes(self) -> tlists_str:
"""
A property representing the communicating classes of the Markov chain.
"""
classes = [[*map(self.__states.__getitem__, i)] for i in self.__communicating_classes_indices]
return classes
@cachedproperty
def communication_matrix(self) -> tarray:
"""
A property representing the communication matrix of the Markov chain.
"""
cm = np.zeros((self.__size, self.__size), dtype=int)
for index in self.__communicating_classes_indices:
cm[np.ix_(index, index)] = 1
return cm
@cachedproperty
def cyclic_classes(self) -> tlists_str:
"""
A property representing the cyclic classes of the Markov chain.
"""
classes = [[*map(self.__states.__getitem__, i)] for i in self._cyclic_classes_indices]
return classes
@cachedproperty
def cyclic_states(self) -> tlists_str:
"""
A property representing the cyclic states of the Markov chain.
"""
states = [*map(self.__states.__getitem__, self.__cyclic_states_indices)]
return states
@cachedproperty
def determinant(self) -> float:
"""
A property representing the determinant of the transition matrix of the Markov chain.
"""
d = npl.det(self.__p)
return d
@cachedproperty
def entropy_rate(self) -> ofloat:
"""
| A property representing the entropy rate of the Markov chain.
| If the Markov chain has multiple stationary distributions, then :py:class:`None` is returned.
"""
if len(self.pi) > 1:
return None
pi = self.pi[0]
h = 0.0
for i in range(self.__size):
for j in range(self.__size):
if self.__p[i, j] > 0.0:
h += pi[i] * self.__p[i, j] * np.log(self.__p[i, j])
if np.isclose(h, 0.0):
return h
return -h
@cachedproperty
def entropy_rate_normalized(self) -> ofloat:
"""
| A property representing the entropy rate, normalized between 0 and 1, of the Markov chain.
| If the Markov chain has multiple stationary distributions, then :py:class:`None` is returned.
"""
h = self.entropy_rate
if h is None:
return None
if np.isclose(h, 0.0):
hn = 0.0
else:
ev = eigenvalues_sorted(self.adjacency_matrix)
hn = h / np.log(ev[-1])
hn = min(1.0, max(0.0, hn))
return hn
@cachedproperty
def fundamental_matrix(self) -> oarray:
"""
| A property representing the fundamental matrix of the Markov chain.
| If the Markov chain is not **absorbing** or has no transient states, then :py:class:`None` is returned.
"""
if not self.is_absorbing or len(self.transient_states) == 0:
return None
indices = self.__transient_states_indices
q = self.__p[np.ix_(indices, indices)]
i = np.eye(len(indices), dtype=float)
fm = npl.inv(i - q)
return fm
@cachedproperty
def implied_timescales(self) -> oarray:
"""
| A property representing the implied timescales of the Markov chain.
| If the Markov chain is not **ergodic**, then :py:class:`None` is returned.
"""
if not self.is_ergodic:
return None
ev = self.__eigenvalues_sorted[::-1]
it = np.append(np.inf, -1.0 / np.log(ev[1:]))
return it
@cachedproperty
def is_absorbing(self) -> bool:
"""
A property indicating whether the Markov chain is absorbing.
"""
if len(self.absorbing_states) == 0:
return False
indices = set(self.__states_indices)
absorbing_indices = set(self.__absorbing_states_indices)
transient_indices = set()
progress = True
unknown_states = None
while progress:
unknown_states = indices.copy() - absorbing_indices - transient_indices
known_states = absorbing_indices | transient_indices
progress = False
for i in unknown_states:
for j in known_states:
if self.__p[i, j] > 0.0:
transient_indices.add(i)
progress = True
break
result = len(unknown_states) == 0
return result
@cachedproperty
def is_aperiodic(self) -> bool:
"""
A property indicating whether the Markov chain is aperiodic.
"""
if self.is_irreducible:
result = set(self.periods).pop() == 1
elif all(period == 1 for period in self.periods):
result = True
else: # pragma: no cover
result = nx.is_aperiodic(self.__digraph)
return result
@cachedproperty
def is_canonical(self) -> bool:
"""
A property indicating whether the Markov chain has a canonical form.
"""
recurrent_indices = self.__recurrent_states_indices
transient_indices = self.__transient_states_indices
if len(recurrent_indices) == 0 or len(transient_indices) == 0:
return True
result = max(transient_indices) < min(recurrent_indices)
return result
@cachedproperty
def is_doubly_stochastic(self) -> bool:
"""
A property indicating whether the Markov chain is doubly stochastic.
"""
result = np.allclose(np.sum(self.__p, axis=0), 1.0)
return result
@cachedproperty
def is_ergodic(self) -> bool:
"""
A property indicating whether the Markov chain is ergodic or not.
"""
result = self.is_irreducible and self.is_aperiodic
return result
@cachedproperty
def is_irreducible(self) -> bool:
"""
A property indicating whether the Markov chain is irreducible.
"""
result = len(self.communicating_classes) == 1
return result
@cachedproperty
def is_regular(self) -> bool:
"""
A property indicating whether the Markov chain is regular.
"""
d = np.diagonal(self.__p)
nz = np.count_nonzero(d)
if nz > 0:
k = (2 * self.__size) - nz - 1
else:
k = self.__size ** self.__size - (2 * self.__size) + 2
result = np.all(self.__p ** k > 0.0)
return result
@cachedproperty
def is_reversible(self) -> bool:
"""
A property indicating whether the Markov chain is reversible.
"""
if len(self.pi) > 1:
return False
pi = self.pi[0]
x = pi[:, np.newaxis] * self.__p
result = np.allclose(x, np.transpose(x))
return result
@cachedproperty
def is_symmetric(self) -> bool:
"""
A property indicating whether the Markov chain is symmetric.
"""
result = np.allclose(self.__p, np.transpose(self.__p))
return result
@cachedproperty
def kemeny_constant(self) -> ofloat:
"""
| A property representing the Kemeny's constant of the fundamental matrix of the Markov chain.
| If the Markov chain is not **absorbing** or has no transient states, then :py:class:`None` is returned.
"""
fm = self.fundamental_matrix
if fm is None:
return None
if fm.size == 1:
kc = fm[0].item()
else:
kc = np.trace(fm).item()
return kc
@cachedproperty
def lumping_partitions(self) -> tparts:
"""
A property representing all the partitions of the Markov chain that satisfy the ordinary lumpability criterion.
"""
lp = find_lumping_partitions(self.__p)
return lp
@cachedproperty
def mixing_rate(self) -> ofloat:
"""
| A property representing the mixing rate of the Markov chain.
| If the Markov chain is not **ergodic** or the **SLEM** (second largest eigenvalue modulus) cannot be computed, then :py:class:`None` is returned.
"""
if self.__slem is None:
mr = None
else:
mr = -1.0 / np.log(self.__slem)
return mr
@property
def p(self) -> tarray:
"""
A property representing the transition matrix of the Markov chain.
"""
return self.__p
@cachedproperty
def period(self) -> int:
"""
A property representing the period of the Markov chain.
"""
if self.is_aperiodic:
period = 1
elif self.is_irreducible:
period = set(self.periods).pop()
else: # pragma: no cover
period = 1
for p in [self.periods[self.communicating_classes.index(recurrent_class)] for recurrent_class in self.recurrent_classes]:
period = (period * p) // gcd(period, p)
return period
@cachedproperty
def periods(self) -> tlist_int:
"""
A property representing the period of each communicating class defined by the Markov chain.
"""
periods = calculate_periods(self.__digraph)
return periods
@alias('stationary_distributions', 'steady_states')
@cachedproperty
def pi(self) -> tlist_array:
"""
| A property representing the stationary distributions of the Markov chain.
| **Aliases:** stationary_distributions, steady_states
"""
if self.is_irreducible:
s = np.reshape(gth_solve(self.__p), (1, self.__size))
else:
s = np.zeros((len(self.recurrent_classes), self.__size), dtype=float)
for index, indices in enumerate(self.__recurrent_classes_indices):
pr = self.__p[np.ix_(indices, indices)]
s[index, indices] = gth_solve(pr)
pi = []
for i in range(s.shape[0]):
pi.append(s[i, :])
return pi
@cachedproperty
def rank(self) -> int:
"""
A property representing the rank of the transition matrix of the Markov chain.
"""
r = npl.matrix_rank(self.__p)
return r
@cachedproperty
def recurrent_classes(self) -> tlists_str:
"""
A property representing the recurrent classes defined by the Markov chain.
"""
classes = [[*map(self.__states.__getitem__, i)] for i in self.__recurrent_classes_indices]
return classes
@cachedproperty
def recurrent_states(self) -> tlists_str:
"""
A property representing the recurrent states of the Markov chain.
"""
states = [*map(self.__states.__getitem__, self.__recurrent_states_indices)]
return states
@cachedproperty
def relaxation_rate(self) -> ofloat:
"""
| A property representing the relaxation rate of the Markov chain.
| If the Markov chain is not **ergodic** or the **SLEM** (second largest eigenvalue modulus) cannot be computed, then :py:class:`None` is returned.
"""
if self.__slem is None:
rr = None
else:
rr = 1.0 / self.spectral_gap
return rr
@property
def size(self) -> int:
"""
A property representing the size of the Markov chain.
"""
return self.__size
@cachedproperty
def spectral_gap(self) -> ofloat:
"""
| A property representing the spectral gap of the Markov chain.
| If the Markov chain is not **ergodic** or the **SLEM** (second largest eigenvalue modulus) cannot be computed, then :py:class:`None` is returned.
"""
if self.__slem is None:
sg = None
else:
sg = 1.0 - self.__slem
return sg
@property
def states(self) -> tlist_str:
"""
A property representing the states of the Markov chain.
"""
return self.__states
@cachedproperty
def topological_entropy(self) -> float:
"""
A property representing the topological entropy of the Markov chain.
"""
ev = eigenvalues_sorted(self.adjacency_matrix)
te = np.log(ev[-1])
return te
@cachedproperty
def transient_classes(self) -> tlists_str:
"""
A property representing the transient classes defined by the Markov chain.
"""
classes = [[*map(self.__states.__getitem__, i)] for i in self.__transient_classes_indices]
return classes
@cachedproperty
def transient_states(self) -> tlists_str:
"""
A property representing the transient states of the Markov chain.
"""
states = [*map(self.__states.__getitem__, self.__transient_states_indices)]
return states
def absorption_probabilities(self) -> oarray:
"""
The method computes the absorption probabilities of the Markov chain.
| **Notes:**
- If the Markov chain has no transient states, then :py:class:`None` is returned.
"""
if 'ap' not in self.__cache:
self.__cache['ap'] = absorption_probabilities(self)
return self.__cache['ap']
def are_communicating(self, state1: tstate, state2: tstate) -> bool:
"""
The method verifies whether the given states of the Markov chain are communicating.
:param state1: the first state.
:param state2: the second state.
:raises ValidationError: if any input argument is not compliant.
"""
try:
state1 = validate_state(state1, self.__states)
state2 = validate_state(state2, self.__states)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
a1 = self.accessibility_matrix[state1, state2] != 0
a2 = self.accessibility_matrix[state2, state1] != 0
result = a1 and a2
return result
def closest_reversible(self, distribution: onumeric = None, weighted: bool = False) -> tmc:
"""
The method computes the closest reversible of the Markov chain.
| **Notes:**
- The algorithm is described in `Computing the nearest reversible Markov chain (<NAME>, 2015) <http://doi.org/10.1002/nla.1967>`_.
:param distribution: the distribution of the states (*if omitted, the states are assumed to be uniformly distributed*).
:param weighted: a boolean indicating whether to use the weighted Frobenius norm.
:raises ValidationError: if any input argument is not compliant.
:raises ValueError: if the closest reversible could not be computed.
"""
try:
distribution = np.ones(self.__size, dtype=float) / self.__size if distribution is None else validate_vector(distribution, 'stochastic', False, self.__size)
weighted = validate_boolean(weighted)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
zeros = len(distribution) - np.count_nonzero(distribution)
if weighted and zeros > 0: # pragma: no cover
raise ValidationError('If the weighted Frobenius norm is used, the distribution must not contain zero-valued probabilities.')
if self.is_reversible:
p = np.copy(self.__p)
else:
p, error_message = closest_reversible(self.__p, distribution, weighted)
if error_message is not None: # pragma: no cover
raise ValueError(error_message)
mc = MarkovChain(p, self.__states)
if not mc.is_reversible: # pragma: no cover
raise ValueError('The closest reversible could not be computed.')
return mc
@alias('cp')
def committor_probabilities(self, committor_type: str, states1: tstates, states2: tstates) -> oarray:
"""
The method computes the committor probabilities between the given subsets of the state space defined by the Markov chain.
| **Notes:**
- If the Markov chain is not **ergodic**, then :py:class:`None` is returned.
- The method can be accessed through the following aliases: **cp**.
:param committor_type:
- **backward** for the backward committor;
- **forward** for the forward committor.
:param states1: the first subset of the state space.
:param states2: the second subset of the state space.
:raises ValidationError: if any input argument is not compliant.
"""
try:
committor_type = validate_enumerator(committor_type, ['backward', 'forward'])
states1 = validate_states(states1, self.__states, 'subset', True)
states2 = validate_states(states2, self.__states, 'subset', True)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
intersection = np.intersect1d(states1, states2)
if len(intersection) > 0: # pragma: no cover
raise ValidationError(f'The two sets of states must be disjoint. An intersection has been detected: {", ".join([str(i) for i in intersection])}.')
value = committor_probabilities(self, committor_type, states1, states2)
return value
@alias('conditional_distribution')
def conditional_probabilities(self, state: tstate) -> tarray:
"""
The method computes the probabilities, for all the states of the Markov chain, conditioned on the process being at the given state.
| **Notes:**
- The method can be accessed through the following aliases: **conditional_distribution**.
:param state: the current state.
:raises ValidationError: if any input argument is not compliant.
"""
try:
state = validate_state(state, self.__states)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = self.__p[state, :]
return value
@alias('er')
def expected_rewards(self, steps: int, rewards: tnumeric) -> tarray:
"""
The method computes the expected rewards of the Markov chain after **N** steps, given the reward value of each state.
| **Notes:**
- The method can be accessed through the following aliases: **er**.
:param steps: the number of steps.
:param rewards: the reward values.
:raises ValidationError: if any input argument is not compliant.
"""
try:
steps = validate_integer(steps, lower_limit=(0, True))
rewards = validate_rewards(rewards, self.__size)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = expected_rewards(self.__p, steps, rewards)
return value
@alias('et')
def expected_transitions(self, steps: int, initial_distribution: onumeric = None) -> tarray:
"""
The method computes the expected number of transitions performed by the Markov chain after *N* steps, given the initial distribution of the states.
| **Notes:**
- The method can be accessed through the following aliases: **et**.
:param steps: the number of steps.
:param initial_distribution: the initial distribution of the states (*if omitted, the states are assumed to be uniformly distributed*).
:raises ValidationError: if any input argument is not compliant.
"""
try:
steps = validate_integer(steps, lower_limit=(0, True))
initial_distribution = np.ones(self.__size, dtype=float) / self.__size if initial_distribution is None else validate_vector(initial_distribution, 'stochastic', False, self.__size)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = expected_transitions(self.__p, self.__rdl_decomposition, steps, initial_distribution)
return value
@alias('fpp')
def first_passage_probabilities(self, steps: int, initial_state: tstate, first_passage_states: ostates = None) -> tarray:
"""
The method computes the first passage probabilities of the Markov chain after *N* steps, given the initial state and, optionally, the first passage states.
| **Notes:**
- The method can be accessed through the following aliases: **fpp**.
:param steps: the number of steps.
:param initial_state: the initial state.
:param first_passage_states: the first passage states.
:raises ValidationError: if any input argument is not compliant.
"""
try:
steps = validate_integer(steps, lower_limit=(0, True))
initial_state = validate_state(initial_state, self.__states)
first_passage_states = None if first_passage_states is None else validate_states(first_passage_states, self.__states, 'regular', True)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = first_passage_probabilities(self, steps, initial_state, first_passage_states)
return value
@alias('fpr')
def first_passage_reward(self, steps: int, initial_state: tstate, first_passage_states: tstates, rewards: tnumeric) -> float:
"""
The method computes the first passage reward of the Markov chain after *N* steps, given the reward value of each state, the initial state and the first passage states.
| **Notes:**
- The method can be accessed through the following aliases: **fpr**.
:param steps: the number of steps.
:param initial_state: the initial state.
:param first_passage_states: the first passage states.
:param rewards: the reward values.
:raises ValidationError: if any input argument is not compliant.
:raises ValueError: if the Markov chain defines only two states.
"""
try:
initial_state = validate_state(initial_state, self.__states)
first_passage_states = validate_states(first_passage_states, self.__states, 'subset', True)
rewards = validate_rewards(rewards, self.__size)
steps = validate_integer(steps, lower_limit=(0, True))
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
if self.__size == 2: # pragma: no cover
raise ValueError('The Markov chain defines only two states and the first passage rewards cannot be computed.')
if initial_state in first_passage_states: # pragma: no cover
raise ValidationError('The first passage states cannot include the initial state.')
if len(first_passage_states) == (self.__size - 1): # pragma: no cover
raise ValidationError('The first passage states cannot include all the states except the initial one.')
value = first_passage_reward(self, steps, initial_state, first_passage_states, rewards)
return value
@alias('hp')
def hitting_probabilities(self, targets: ostates = None) -> tarray:
"""
The method computes the hitting probability, for the states of the Markov chain, to the given set of states.
| **Notes:**
- The method can be accessed through the following aliases: **hp**.
:param targets: the target states (*if omitted, all the states are targeted*).
:raises ValidationError: if any input argument is not compliant.
"""
try:
targets = self.__states_indices.copy() if targets is None else validate_states(targets, self.__states, 'regular', True)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = hitting_probabilities(self, targets)
return value
@alias('ht')
def hitting_times(self, targets: ostates = None) -> tarray:
"""
The method computes the hitting times, for all the states of the Markov chain, to the given set of states.
| **Notes:**
- The method can be accessed through the following aliases: **ht**.
:param targets: the target states (*if omitted, all the states are targeted*).
:raises ValidationError: if any input argument is not compliant.
"""
try:
targets = self.__states_indices.copy() if targets is None else validate_states(targets, self.__states, 'regular', True)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = hitting_times(self, targets)
return value
def is_absorbing_state(self, state: tstate) -> bool:
"""
The method verifies whether the given state of the Markov chain is absorbing.
:param state: the target state.
:raises ValidationError: if any input argument is not compliant.
"""
try:
state = validate_state(state, self.__states)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
result = state in self.__absorbing_states_indices
return result
def is_accessible(self, state_target: tstate, state_origin: tstate) -> bool:
"""
The method verifies whether the given target state is reachable from the given origin state.
:param state_target: the target state.
:param state_origin: the origin state.
:raises ValidationError: if any input argument is not compliant.
"""
try:
state_target = validate_state(state_target, self.__states)
state_origin = validate_state(state_origin, self.__states)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
result = self.accessibility_matrix[state_origin, state_target] != 0
return result
def is_cyclic_state(self, state: tstate) -> bool:
"""
The method verifies whether the given state is cyclic.
:param state: the target state.
:raises ValidationError: if any input argument is not compliant.
"""
try:
state = validate_state(state, self.__states)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
result = state in self.__cyclic_states_indices
return result
def is_recurrent_state(self, state: tstate) -> bool:
"""
The method verifies whether the given state is recurrent.
:param state: the target state.
:raises ValidationError: if any input argument is not compliant.
"""
try:
state = validate_state(state, self.__states)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
result = state in self.__recurrent_states_indices
return result
def is_transient_state(self, state: tstate) -> bool:
"""
The method verifies whether the given state is transient.
:param state: the target state.
:raises ValidationError: if any input argument is not compliant.
"""
try:
state = validate_state(state, self.__states)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
result = state in self.__transient_states_indices
return result
def lump(self, partitions: tpart) -> tmc:
"""
The method attempts to reduce the state space of the Markov chain with respect to the given partitions following the ordinary lumpability criterion.
:param partitions: the partitions of the state space.
:raises ValidationError: if any input argument is not compliant.
:raises ValueError: if the Markov chain defines only two states or is not lumpable with respect to the given partitions.
"""
try:
partitions = validate_partitions(partitions, self.__states)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
if self.__size == 2: # pragma: no cover
raise ValueError('The Markov chain defines only two states and cannot be lumped.')
p, states, error_message = lump(self.p, self.states, partitions)
if error_message is not None: # pragma: no cover
raise ValueError(error_message)
mc = MarkovChain(p, states)
return mc
@alias('mat')
def mean_absorption_times(self) -> oarray:
"""
The method computes the mean absorption times of the Markov chain.
| **Notes:**
- If the Markov chain is not **absorbing** or has no transient states, then :py:class:`None` is returned.
- The method can be accessed through the following aliases: **mat**.
"""
if 'mat' not in self.__cache:
self.__cache['mat'] = mean_absorption_times(self)
return self.__cache['mat']
@alias('mfpt_between', 'mfptb')
def mean_first_passage_times_between(self, origins: tstates, targets: tstates) -> ofloat:
"""
The method computes the mean first passage times between the given subsets of the state space.
| **Notes:**
- If the Markov chain is not **ergodic**, then :py:class:`None` is returned.
- The method can be accessed through the following aliases: **mfpt_between**, **mfptb**.
:param origins: the origin states.
:param targets: the target states.
:raises ValidationError: if any input argument is not compliant.
"""
try:
origins = validate_states(origins, self.__states, 'subset', True)
targets = validate_states(targets, self.__states, 'subset', True)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = mean_first_passage_times_between(self, origins, targets)
return value
@alias('mfpt_to', 'mfptt')
def mean_first_passage_times_to(self, targets: ostates = None) -> oarray:
"""
The method computes the mean first passage times, for all the states, to the given set of states.
| **Notes:**
- If the Markov chain is not **ergodic**, then :py:class:`None` is returned.
- The method can be accessed through the following aliases: **mfpt_to**, **mfptt**.
:param targets: the target states (*if omitted, all the states are targeted*).
:raises ValidationError: if any input argument is not compliant.
"""
try:
targets = None if targets is None else validate_states(targets, self.__states, 'regular', True)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = mean_first_passage_times_to(self, targets)
return value
@alias('mnv')
def mean_number_visits(self) -> oarray:
"""
The method computes the mean number of visits of the Markov chain.
| **Notes:**
- The method can be accessed through the following aliases: **mnv**.
"""
if 'mnv' not in self.__cache:
self.__cache['mnv'] = mean_number_visits(self)
return self.__cache['mnv']
@alias('mrt')
def mean_recurrence_times(self) -> oarray:
"""
The method computes the mean recurrence times of the Markov chain.
| **Notes:**
- If the Markov chain is not **ergodic**, then :py:class:`None` is returned.
- The method can be accessed through the following aliases: **mrt**.
"""
if 'mrt' not in self.__cache:
self.__cache['mrt'] = mean_recurrence_times(self)
return self.__cache['mrt']
@alias('mt')
def mixing_time(self, initial_distribution: onumeric = None, jump: int = 1, cutoff_type: str = 'natural') -> oint:
"""
The method computes the mixing time of the Markov chain, given the initial distribution of the states.
| **Notes:**
- If the Markov chain is not **ergodic**, then :py:class:`None` is returned.
- The method can be accessed through the following aliases: **mt**.
:param initial_distribution: the initial distribution of the states (*if omitted, the states are assumed to be uniformly distributed*).
:param jump: the number of steps in each iteration.
:param cutoff_type:
- **natural** for the natural cutoff;
- **traditional** for the traditional cutoff.
:raises ValidationError: if any input argument is not compliant.
"""
try:
initial_distribution = np.ones(self.__size, dtype=float) / self.__size if initial_distribution is None else validate_vector(initial_distribution, 'stochastic', False, self.__size)
jump = validate_integer(jump, lower_limit=(0, True))
cutoff_type = validate_enumerator(cutoff_type, ['natural', 'traditional'])
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
if cutoff_type == 'traditional':
cutoff = 0.25
else:
cutoff = 1.0 / (2.0 * np.exp(1.0))
value = mixing_time(self, initial_distribution, jump, cutoff)
return value
@alias('next')
@random_output()
def next_state(self, initial_state: tstate, output_index: bool = False, seed: oint = None) -> tstate:
"""
The method simulates a single random walk step.
| **Notes:**
- The method can be accessed through the following aliases: **next**.
:param initial_state: the initial state.
:param output_index: a boolean indicating whether to output the state index.
:param seed: a seed to be used as RNG initializer for reproducibility purposes.
:raises ValidationError: if any input argument is not compliant.
"""
try:
rng = create_rng(seed)
initial_state = validate_state(initial_state, self.__states)
output_index = validate_boolean(output_index)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = simulate(self, 1, initial_state, None, rng)[-1]
if not output_index:
value = self.__states[value]
return value
def predict(self, steps: int, initial_state: tstate, output_indices: bool = False) -> owalk:
"""
The method computes the most probable sequence of states produced by a random walk of *N* steps, given the initial state.
| **Notes:**
- In presence of probability ties :py:class:`None` is returned.
:param steps: the number of steps.
:param initial_state: the initial state.
:param output_indices: a boolean indicating whether to output the state indices.
:raises ValidationError: if any input argument is not compliant.
"""
try:
steps = validate_integer(steps, lower_limit=(0, True))
initial_state = validate_state(initial_state, self.__states)
output_indices = validate_boolean(output_indices)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = predict(self, steps, initial_state)
if value is not None and not output_indices:
value = [*map(self.__states.__getitem__, value)]
return value
def redistribute(self, steps: int, initial_status: ostatus = None, output_last: bool = True) -> tredists:
"""
The method simulates a redistribution of states of *N* steps.
:param steps: the number of steps.
:param initial_status: the initial state or the initial distribution of the states (*if omitted, the states are assumed to be uniformly distributed*).
:param output_last: a boolean indicating whether to output only the last distributions.
:raises ValidationError: if any input argument is not compliant.
"""
try:
steps = validate_integer(steps, lower_limit=(1, False))
initial_status = np.ones(self.__size, dtype=float) / self.__size if initial_status is None else validate_status(initial_status, self.__states)
output_last = validate_boolean(output_last)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = redistribute(self, steps, initial_status, output_last)
return value
def sensitivity(self, state: tstate) -> oarray:
"""
The method computes the sensitivity matrix of the stationary distribution with respect to a given state.
| **Notes:**
- If the Markov chain is not **irreducible**, then :py:class:`None` is returned.
:param state: the target state.
:raises ValidationError: if any input argument is not compliant.
"""
try:
state = validate_state(state, self.__states)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = sensitivity(self, state)
return value
@alias('tc')
def time_correlations(self, walk1: twalk, walk2: owalk = None, time_points: ttimes_in = 1) -> otimes_out:
"""
The method computes the time autocorrelations of a single observed sequence of states or the time cross-correlations of two observed sequences of states.
| **Notes:**
- If the Markov chain has multiple stationary distributions, then :py:class:`None` is returned.
- If a single time point is provided, then a :py:class:`float` is returned.
- The method can be accessed through the following aliases: **tc**.
:param walk1: the first observed sequence of states.
:param walk2: the second observed sequence of states.
:param time_points: the time point or a list of time points at which the computation is performed.
:raises ValidationError: if any input argument is not compliant.
"""
try:
walk1 = validate_states(walk1, self.__states, 'walk', False)
walk2 = None if walk2 is None else validate_states(walk2, self.__states, 'walk', False)
time_points = validate_time_points(time_points)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = time_correlations(self, self.__rdl_decomposition, walk1, walk2, time_points)
return value
@alias('tr')
def time_relaxations(self, walk: twalk, initial_distribution: onumeric = None, time_points: ttimes_in = 1) -> otimes_out:
"""
The method computes the time relaxations of an observed sequence of states with respect to the given initial distribution of the states.
| **Notes:**
- If the Markov chain has multiple stationary distributions, then :py:class:`None` is returned.
- If a single time point is provided, then a :py:class:`float` is returned.
- The method can be accessed through the following aliases: **tr**.
:param walk: the observed sequence of states.
:param initial_distribution: the initial distribution of the states (*if omitted, the states are assumed to be uniformly distributed*).
:param time_points: the time point or a list of time points at which the computation is performed.
:raises ValidationError: if any input argument is not compliant.
"""
try:
walk = validate_states(walk, self.__states, 'walk', False)
initial_distribution = np.ones(self.__size, dtype=float) / self.__size if initial_distribution is None else validate_vector(initial_distribution, 'stochastic', False, self.__size)
time_points = validate_time_points(time_points)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = time_relaxations(self, self.__rdl_decomposition, walk, initial_distribution, time_points)
return value
@alias('to_bounded')
def to_bounded_chain(self, boundary_condition: tbcond) -> tmc:
"""
The method returns a bounded Markov chain by adjusting the transition matrix of the original process using the specified boundary condition.
| **Notes:**
- The method can be accessed through the following aliases: **to_bounded**.
:param boundary_condition:
- a number representing the first probability of the semi-reflecting condition;
- a string representing the boundary condition type (either absorbing or reflecting).
:raises ValidationError: if any input argument is not compliant.
"""
try:
boundary_condition = validate_boundary_condition(boundary_condition)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
p, _ = bounded(self.__p, boundary_condition)
mc = MarkovChain(p, self.__states)
return mc
@alias('to_canonical')
def to_canonical_form(self) -> tmc:
"""
The method returns the canonical form of the Markov chain.
| **Notes:**
- The method can be accessed through the following aliases: **to_canonical**.
"""
p, _ = canonical(self.__p, self.__recurrent_states_indices, self.__transient_states_indices)
states = [*map(self.__states.__getitem__, self.__transient_states_indices + self.__recurrent_states_indices)]
mc = MarkovChain(p, states)
return mc
def to_dictionary(self) -> tmc_dict:
"""
The method returns a dictionary representing the Markov chain transitions.
"""
d = {}
for i in range(self.__size):
for j in range(self.__size):
d[(self.__states[i], self.__states[j])] = self.__p[i, j]
return d
def to_graph(self, multi: bool = False) -> tgraphs:
"""
The method returns a directed graph representing the Markov chain.
:param multi: a boolean indicating whether the graph is allowed to define multiple edges between two nodes.
:raises ValidationError: if any input argument is not compliant.
"""
try:
multi = validate_boolean(multi)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
if multi:
graph = nx.MultiDiGraph(self.__p)
graph = nx.relabel_nodes(graph, dict(zip(range(self.__size), self.__states)))
else:
graph = deepcopy(self.__digraph)
return graph
def to_file(self, file_path: str):
"""
The method writes a Markov chain to the given file.
| Only **csv**, **json**, **txt** and **xml** files are supported; data format is inferred from the file extension.
|
:param file_path: the location of the file in which the Markov chain must be written.
:raises OSError: if the file cannot be written.
:raises ValidationError: if any input argument is not compliant.
"""
try:
file_path = validate_file_path(file_path, True)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
file_extension = get_file_extension(file_path)
if file_extension not in ['.csv', '.json', '.txt', '.xml']: # pragma: no cover
raise ValidationError('Only csv, json, xml and plain text files are supported.')
d = self.to_dictionary()
if file_extension == '.csv':
write_csv(d, file_path)
elif file_extension == '.json':
write_json(d, file_path)
elif file_extension == '.txt':
write_txt(d, file_path)
else:
write_xml(d, file_path)
@alias('to_lazy')
def to_lazy_chain(self, inertial_weights: tweights = 0.5) -> tmc:
"""
The method returns a lazy Markov chain by adjusting the state inertia of the original process.
| **Notes:**
- The method can be accessed through the following aliases: **to_lazy**.
:param inertial_weights: the inertial weights to apply for the transformation.
:raises ValidationError: if any input argument is not compliant.
"""
try:
inertial_weights = validate_vector(inertial_weights, 'unconstrained', True, self.__size)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
p, _ = lazy(self.__p, inertial_weights)
mc = MarkovChain(p, self.__states)
return mc
def to_matrix(self) -> tarray:
"""
The method returns the transition matrix of the Markov chain.
"""
m = np.copy(self.__p)
return m
@alias('to_sub')
def to_sub_chain(self, states: tstates) -> tmc:
"""
The method returns a subchain containing all the given states plus all the states reachable from them.
| **Notes:**
- The method can be accessed through the following aliases: **to_sub**.
:param states: the states to include in the subchain.
:raises ValidationError: if any input argument is not compliant.
:raises ValueError: if the subchain is not a valid Markov chain.
"""
try:
states = validate_states(states, self.__states, 'subset', True)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
p, states, error_message = sub(self.__p, self.__states, self.adjacency_matrix, states)
if error_message is not None: # pragma: no cover
raise ValueError(error_message)
mc = MarkovChain(p, states)
return mc
def transition_probability(self, state_target: tstate, state_origin: tstate) -> float:
"""
The method computes the probability of a given state, conditioned on the process being at a given specific state.
:param state_target: the target state.
:param state_origin: the origin state.
:raises ValidationError: if any input argument is not compliant.
"""
try:
state_target = validate_state(state_target, self.__states)
state_origin = validate_state(state_origin, self.__states)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = self.__p[state_origin, state_target]
return value
@random_output()
def walk(self, steps: int, initial_state: ostate = None, final_state: ostate = None, output_indices: bool = False, seed: oint = None) -> twalk:
"""
The method simulates a random walk of *N* steps.
:param steps: the number of steps.
:param initial_state: the initial state (*if omitted, it is chosen uniformly at random*).
:param final_state: the final state of the walk (*if specified, the simulation stops as soon as it is reached even if not all the steps have been performed*).
:param output_indices: a boolean indicating whether to output the state indices.
:param seed: a seed to be used as RNG initializer for reproducibility purposes.
:raises ValidationError: if any input argument is not compliant.
"""
try:
rng = create_rng(seed)
steps = validate_integer(steps, lower_limit=(1, False))
initial_state = rng.randint(0, self.__size) if initial_state is None else validate_state(initial_state, self.__states)
final_state = None if final_state is None else validate_state(final_state, self.__states)
output_indices = validate_boolean(output_indices)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = simulate(self, steps, initial_state, final_state, rng)
if not output_indices:
value = [*map(self.__states.__getitem__, value)]
return value
def walk_probability(self, walk: twalk) -> float:
"""
The method computes the probability of a given sequence of states.
:param walk: the observed sequence of states.
:raises ValidationError: if any input argument is not compliant.
"""
try:
walk = validate_states(walk, self.__states, 'walk', False)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
value = walk_probability(self, walk)
return value
@staticmethod
def approximation(size: int, approximation_type: str, alpha: float, sigma: float, rho: float, k: ofloat = None) -> tmc:
"""
| The method approximates the Markov chain associated with the discretized version of the following first-order autoregressive process:
|
| :math:`y_t = (1 - \\rho) \\alpha + \\rho y_{t-1} + \\varepsilon_t`
| with :math:`\\varepsilon_t \\overset{i.i.d}{\\sim} \\mathcal{N}(0, \\sigma_{\\varepsilon}^{2})`
:param size: the size of the Markov chain.
:param approximation_type:
- **adda-cooper** for the Adda-Cooper approximation;
- **rouwenhorst** for the Rouwenhorst approximation;
- **tauchen** for the Tauchen approximation;
- **tauchen-hussey** for the Tauchen-Hussey approximation.
:param alpha: the constant term :math:`\\alpha`, representing the unconditional mean of the process.
:param sigma: the standard deviation of the innovation term :math:`\\varepsilon`.
:param rho: the autocorrelation coefficient :math:`\\rho`, representing the persistence of the process across periods.
:param k:
- In the Tauchen approximation, the number of standard deviations to approximate out to (*if omitted, the value is set to 3*).
- In the Tauchen-Hussey approximation, the standard deviation used for the gaussian quadrature (*if omitted, the value is set to an optimal default*).
:raises ValidationError: if any input argument is not compliant.
:raises ValueError: if the gaussian quadrature fails to converge in the Tauchen-Hussey approximation.
"""
try:
size = validate_integer(size, lower_limit=(2, False))
approximation_type = validate_enumerator(approximation_type, ['adda-cooper', 'rouwenhorst', 'tauchen', 'tauchen-hussey'])
alpha = validate_float(alpha)
sigma = validate_float(sigma, lower_limit=(0.0, True))
rho = validate_float(rho, lower_limit=(-1.0, False), upper_limit=(1.0, False))
if approximation_type == 'tauchen':
k = 3.0 if k is None else validate_float(k, lower_limit=(1.0, False))
elif approximation_type == 'tauchen-hussey':
k = ((0.5 + (rho / 4.0)) * sigma) + ((1 - (0.5 + (rho / 4.0))) * (sigma / np.sqrt(1.0 - rho**2.0))) if k is None else validate_float(k, lower_limit=(0.0, True))
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
p, states, error_message = approximation(size, approximation_type, alpha, sigma, rho, k)
if error_message is not None: # pragma: no cover
raise ValueError(error_message)
mc = MarkovChain(p, states)
return mc
@staticmethod
def birth_death(p: tarray, q: tarray, states: olist_str = None) -> tmc:
"""
The method generates a birth-death Markov chain of given size and from given probabilities.
:param q: the creation probabilities.
:param p: the annihilation probabilities.
:param states: the name of each state (*if omitted, an increasing sequence of integers starting at 1*).
:raises ValidationError: if any input argument is not compliant.
"""
try:
p = validate_vector(p, 'creation', False)
q = validate_vector(q, 'annihilation', False)
states = [str(i) for i in range(1, {p.shape[0], q.shape[0]}.pop() + 1)] if states is None else validate_state_names(states, {p.shape[0], q.shape[0]}.pop())
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
if p.shape[0] != q.shape[0]: # pragma: no cover
raise ValidationError('The vector of annihilation probabilities and the vector of creation probabilities must have the same size.')
if not np.all(q + p <= 1.0): # pragma: no cover
raise ValidationError('The sums of annihilation and creation probabilities must be less than or equal to 1.')
p, _ = birth_death(p, q)
mc = MarkovChain(p, states)
return mc
@staticmethod
def fit_function(possible_states: tlist_str, f: ttfunc, quadrature_type: str, quadrature_interval: ointerval = None) -> tmc:
"""
The method fits a Markov chain using the given transition function and the given quadrature type for the computation of nodes and weights.
| **Notes:**
- The transition function takes the four input arguments below and returns a numeric value:
- **x_index** an integer value representing the index of the i-th quadrature node;
- **x_value** a float value representing the value of the i-th quadrature node;
- **y_index** an integer value representing the index of the j-th quadrature node;
- **y_value** a float value representing the value of the j-th quadrature node.
:param possible_states: the possible states of the process.
:param f: the transition function of the process.
:param quadrature_type:
- **gauss-chebyshev** for the Gauss-Chebyshev quadrature;
- **gauss-legendre** for the Gauss-Legendre quadrature;
- **niederreiter** for the Niederreiter equidistributed sequence;
- **newton-cotes** for the Newton-Cotes quadrature;
- **simpson-rule** for the Simpson rule;
- **trapezoid-rule** for the Trapezoid rule.
:param quadrature_interval: the quadrature interval to use for the computation of nodes and weights (*if omitted, the interval [0, 1] is used*).
:raises ValidationError: if any input argument is not compliant.
:raises ValueError: if the Gauss-Legendre quadrature fails to converge.
"""
try:
possible_states = validate_state_names(possible_states)
f = validate_transition_function(f)
quadrature_type = validate_enumerator(quadrature_type, ['gauss-chebyshev', 'gauss-legendre', 'niederreiter', 'newton-cotes', 'simpson-rule', 'trapezoid-rule'])
quadrature_interval = (0.0, 1.0) if quadrature_interval is None else validate_interval(quadrature_interval)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
if quadrature_type == 'simpson-rule' and (len(possible_states) % 2) == 0: # pragma: no cover
raise ValidationError('The quadrature based on the Simpson rule requires an odd number of possible states.')
p, error_message = fit_function(possible_states, f, quadrature_type, quadrature_interval)
if error_message is not None: # pragma: no cover
raise ValueError(error_message)
mc = MarkovChain(p, possible_states)
return mc
@staticmethod
def fit_walk(fitting_type: str, possible_states: tlist_str, walk: twalk, k: tany = None) -> tmc:
"""
The method fits a Markov chain from an observed sequence of states using the specified fitting approach.
:param fitting_type:
- **map** for the maximum a posteriori fitting;
- **mle** for the maximum likelihood fitting.
:param possible_states: the possible states of the process.
:param walk: the observed sequence of states.
:param k:
| - In the maximum a posteriori fitting, the matrix for the a priori distribution (*if omitted, a default value of 1 is assigned to each matrix element*).
| - In the maximum likelihood fitting, a boolean indicating whether to apply a Laplace smoothing to compensate for the unseen transition combinations (*if omitted, the value is set to True*).
:raises ValidationError: if any input argument is not compliant.
"""
try:
fitting_type = validate_enumerator(fitting_type, ['map', 'mle'])
possible_states = validate_state_names(possible_states)
walk = validate_states(walk, possible_states, 'walk', False)
if fitting_type == 'map':
k = np.ones((len(possible_states), len(possible_states)), dtype=float) if k is None else validate_hyperparameter(k, len(possible_states))
else:
k = False if k is None else validate_boolean(k)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
p, _ = fit_walk(fitting_type, possible_states, walk, k)
mc = MarkovChain(p, possible_states)
return mc
@staticmethod
def from_dictionary(d: tmc_dict_flex) -> tmc:
"""
The method generates a Markov chain from the given dictionary, whose keys represent state pairs and whose values represent transition probabilities.
:param d: the dictionary to transform into the transition matrix.
:raises ValueError: if the transition matrix defined by the dictionary is not valid.
:raises ValidationError: if any input argument is not compliant.
"""
try:
d = validate_dictionary(d)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
states = [key[0] for key in d.keys() if key[0] == key[1]]
size = len(states)
if size < 2: # pragma: no cover
raise ValueError('The size of the transition matrix defined by the dictionary must be greater than or equal to 2.')
p = np.zeros((size, size), dtype=float)
for it, ip in d.items():
p[states.index(it[0]), states.index(it[1])] = ip
if not np.allclose(np.sum(p, axis=1), np.ones(size, dtype=float)): # pragma: no cover
raise ValueError('The rows of the transition matrix defined by the dictionary must sum to 1.')
mc = MarkovChain(p, states)
return mc
@staticmethod
def from_graph(graph: tgraphs) -> tmc:
"""
The method generates a Markov chain from the given directed graph, whose transition matrix is obtained through the normalization of the graph weights.
:raises ValidationError: if any input argument is not compliant.
"""
try:
graph = validate_graph(graph)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
states = list(graph.nodes)
size = len(states)
p = np.zeros((size, size), dtype=float)
edges = list(graph.edges(data='weight', default=0.0))
for edge in edges:
i = states.index(edge[0])
j = states.index(edge[1])
p[i, j] = float(edge[2])
p_sums = np.sum(p, axis=1)
for i in range(size):
if np.isclose(p_sums[i], 0.0): # pragma: no cover
p[i, :] = np.ones(p.shape[0], dtype=float) / size
else:
p[i, :] /= p_sums[i]
mc = MarkovChain(p, states)
return mc
@staticmethod
def from_file(file_path: str) -> tmc:
"""
The method reads a Markov chain from the given file.
| Only **csv**, **json**, **txt** and **xml** files are supported; data format is inferred from the file extension.
| In **csv** files, the header must contain the state names and the following rows must contain the probabilities.
| The following format settings are required:
- *Delimiter:* **comma**
- *Quoting:* **minimal**
- *Quote Character:* **double quote**
| In **json** files, data must be structured as an array of objects with the following properties:
- **state_from** *(string)*
- **state_to** *(string)*
- **probability** *(float or int)*
| In **txt** files, every line of the file must have the following format:
- **<state_from> <state_to> <probability>**
| In **xml** files, the structure must be defined as follows:
- *Root Element:* **MarkovChain**
- *Child Elements:* **Transition**\ *, with attributes:*
- **state_from** *(string)*
- **state_to** *(string)*
- **probability** *(float or int)*
:param file_path: the location of the file that defines the Markov chain.
:raises FileNotFoundError: if the file does not exist.
:raises OSError: if the file cannot be read or is empty.
:raises ValidationError: if any input argument is not compliant.
:raises ValueError: if the file contains invalid data.
"""
try:
file_path = validate_file_path(file_path, False)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
file_extension = get_file_extension(file_path)
if file_extension not in ['.csv', '.json', '.xml', '.txt']: # pragma: no cover
raise ValidationError('Only csv, json, xml and plain text files are supported.')
if file_extension == '.csv':
d = read_csv(file_path)
elif file_extension == '.json':
d = read_json(file_path)
elif file_extension == '.txt':
d = read_txt(file_path)
else:
d = read_xml(file_path)
states = [key[0] for key in d if key[0] == key[1]]
size = len(states)
if size < 2: # pragma: no cover
raise ValueError('The size of the transition matrix defined by the file must be greater than or equal to 2.')
p = np.zeros((size, size), dtype=float)
for it, ip in d.items():
p[states.index(it[0]), states.index(it[1])] = ip
if not np.allclose(np.sum(p, axis=1), np.ones(size, dtype=float)): # pragma: no cover
raise ValueError('The rows of the transition matrix defined by the file must sum to 1.')
mc = MarkovChain(p, states)
return mc
@staticmethod
def from_matrix(m: tnumeric, states: olist_str = None) -> tmc:
"""
The method generates a Markov chain with the given state names, whose transition matrix is obtained through the normalization of the given matrix.
:param m: the matrix to transform into the transition matrix.
:param states: the name of each state (*if omitted, an increasing sequence of integers starting at 1*).
:raises ValidationError: if any input argument is not compliant.
"""
try:
m = validate_matrix(m)
states = [str(i) for i in range(1, m.shape[0] + 1)] if states is None else validate_state_names(states, m.shape[0])
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
p = np.copy(m)
p_sums = np.sum(p, axis=1)
size = p.shape[0]
for i in range(size):
if np.isclose(p_sums[i], 0.0): # pragma: no cover
p[i, :] = np.ones(p.shape[0], dtype=float) / size
else:
p[i, :] /= p_sums[i]
mc = MarkovChain(p, states)
return mc
@staticmethod
def gamblers_ruin(size: int, w: float, states: olist_str = None) -> tmc:
"""
The method generates a gambler's ruin Markov chain of given size and win probability.
:param size: the size of the Markov chain.
:param w: the win probability.
:param states: the name of each state (*if omitted, an increasing sequence of integers starting at 1*).
:raises ValidationError: if any input argument is not compliant.
"""
try:
size = validate_integer(size, lower_limit=(3, False))
w = validate_float(w, lower_limit=(0.0, True), upper_limit=(1.0, True))
states = [str(i) for i in range(1, size + 1)] if states is None else validate_state_names(states, size)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
p, _ = gamblers_ruin(size, w)
mc = MarkovChain(p, states)
return mc
@staticmethod
def identity(size: int, states: olist_str = None) -> tmc:
"""
The method generates a Markov chain of given size based on an identity transition matrix.
:param size: the size of the Markov chain.
:param states: the name of each state (*if omitted, an increasing sequence of integers starting at 1*).
:raises ValidationError: if any input argument is not compliant.
"""
try:
size = validate_integer(size, lower_limit=(2, False))
states = [str(i) for i in range(1, size + 1)] if states is None else validate_state_names(states, size)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
p = np.eye(size, dtype=float)
mc = MarkovChain(p, states)
return mc
@staticmethod
def random(size: int, states: olist_str = None, zeros: int = 0, mask: onumeric = None, seed: oint = None) -> tmc:
"""
The method generates a Markov chain of given size with random transition probabilities.
| **Notes:**
- In the mask parameter, undefined transition probabilities are represented by **NaN** values.
:param size: the size of the Markov chain.
:param states: the name of each state (*if omitted, an increasing sequence of integers starting at 1*).
:param zeros: the number of zero-valued transition probabilities.
:param mask: a matrix representing locations and values of fixed transition probabilities.
:param seed: a seed to be used as RNG initializer for reproducibility purposes.
:raises ValidationError: if any input argument is not compliant.
"""
try:
rng = create_rng(seed)
size = validate_integer(size, lower_limit=(2, False))
states = [str(i) for i in range(1, size + 1)] if states is None else validate_state_names(states, size)
zeros = validate_integer(zeros, lower_limit=(0, False))
mask = np.full((size, size), np.nan, dtype=float) if mask is None else validate_mask(mask, size)
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
p, error_message = random(rng, size, zeros, mask)
if error_message is not None: # pragma: no cover
raise ValidationError(error_message)
mc = MarkovChain(p, states)
return mc
@staticmethod
def urn_model(n: int, model: str) -> tmc:
"""
The method generates a Markov chain of size **2N + 1** based on the specified urn model.
:param n: the number of elements in each urn.
:param model:
- **bernoulli-laplace** for the Bernoulli-Laplace urn model;
- **ehrenfest** for the Ehrenfest urn model.
:raises ValidationError: if any input argument is not compliant.
"""
try:
n = validate_integer(n, lower_limit=(1, False))
model = validate_enumerator(model, ['bernoulli-laplace', 'ehrenfest'])
except Exception as e: # pragma: no cover
raise generate_validation_error(e, trace()) from None
p, states, _ = urn_model(n, model)
mc = MarkovChain(p, states)
return mc
```
#### File: PyDTMC/pydtmc/simulations.py
```python
__all__ = [
'predict',
'redistribute',
'simulate',
'walk_probability'
]
###########
# IMPORTS #
###########
# Libraries
import numpy as np
# Internal
from .custom_types import (
oint,
olist_int,
tarray,
tlist_int,
tmc,
trand,
tredists
)
#############
# FUNCTIONS #
#############
def predict(mc: tmc, steps: int, initial_state: int) -> olist_int:
current_state = initial_state
value = [initial_state]
for _ in range(steps):
d = mc.p[current_state, :]
d_max = np.argwhere(d == np.max(d))
if d_max.size > 1:
return None
current_state = d_max.item()
value.append(current_state)
return value
def redistribute(mc: tmc, steps: int, initial_status: tarray, output_last: bool) -> tredists:
value = np.zeros((steps + 1, mc.size), dtype=float)
value[0, :] = initial_status
for i in range(1, steps + 1):
value[i, :] = value[i - 1, :].dot(mc.p)
value[i, :] /= np.sum(value[i, :])
if output_last:
return value[-1]
value = [np.ravel(distribution) for distribution in np.split(value, value.shape[0])]
return value
def simulate(mc: tmc, steps: int, initial_state: int, final_state: oint, rng: trand) -> tlist_int:
current_state = initial_state
value = [initial_state]
for _ in range(steps):
w = mc.p[current_state, :]
current_state = rng.choice(mc.size, size=1, p=w).item()
value.append(current_state)
if final_state is not None and current_state == final_state:
break
return value
def walk_probability(mc: tmc, walk: tlist_int) -> float:
p = 0.0
for (i, j) in zip(walk[:-1], walk[1:]):
if mc.p[i, j] > 0.0:
p += np.log(mc.p[i, j])
else:
p = -np.inf
break
value = np.exp(p)
return value
```
#### File: PyDTMC/pydtmc/utilities.py
```python
__all__ = [
'create_rng',
'generate_validation_error',
'get_file_extension'
]
###########
# IMPORTS #
###########
# Standard
import pathlib as pl
# Libraries
import numpy as np
import numpy.random as npr
import numpy.random.mtrand as nprm
# Internal
from .custom_types import (
oint,
tany,
texception,
titerable,
tmapping,
trand
)
from .exceptions import (
ValidationError
)
#############
# FUNCTIONS #
#############
# noinspection PyProtectedMember
def create_rng(seed: oint) -> trand:
if seed is None:
return nprm._rand
if isinstance(seed, (int, np.integer)):
return npr.RandomState(int(seed))
raise TypeError('The specified seed is not a valid RNG initializer.')
def generate_validation_error(e: texception, trace: tany) -> ValidationError:
arguments = ''.join(trace[0][4]).split('=', 1)[0].strip()
message = str(e).replace('@arg@', arguments)
return ValidationError(message)
def get_file_extension(file_path: str) -> str:
return ''.join(pl.Path(file_path).suffixes).lower()
def namedtuple_to_dictionary(obj: tany):
if isinstance(obj, tuple) and hasattr(obj, '_fields'):
# noinspection PyProtectedMember
return dict(zip(obj._fields, map(namedtuple_to_dictionary, obj)))
if isinstance(obj, titerable) and not isinstance(obj, str):
return type(obj)(map(namedtuple_to_dictionary, obj))
if isinstance(obj, tmapping):
return type(obj)(zip(obj.keys(), map(namedtuple_to_dictionary, obj.values())))
return obj
```
#### File: PyDTMC/tests/test_fitting.py
```python
import numpy as np
import numpy.testing as npt
# Internal
from pydtmc import (
MarkovChain
)
#########
# TESTS #
#########
def test_fit_function(possible_states, f, quadrature_type, quadrature_interval, value):
f = eval('lambda x_index, x_value, y_index, y_value: ' + f)
quadrature_interval = None if quadrature_interval is None else tuple(quadrature_interval)
mc = MarkovChain.fit_function(possible_states, f, quadrature_type, quadrature_interval)
actual = mc.p
expected = np.asarray(value)
npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
def test_fit_walk(fitting_type, possible_states, walk, k, value):
mc = MarkovChain.fit_walk(fitting_type, possible_states, walk, k)
actual = mc.p
expected = np.asarray(value)
npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
```
#### File: PyDTMC/tests/test_properties.py
```python
import numpy as np
import numpy.linalg as npl
import numpy.testing as npt
from pytest import (
skip
)
# Internal
from pydtmc import (
MarkovChain
)
#########
# TESTS #
#########
def test_attributes(p, is_absorbing, is_canonical, is_doubly_stochastic, is_ergodic, is_reversible, is_symmetric):
mc = MarkovChain(p)
actual = mc.is_absorbing
expected = is_absorbing
assert actual == expected
actual = mc.is_canonical
expected = is_canonical
assert actual == expected
actual = mc.is_doubly_stochastic
expected = is_doubly_stochastic
assert actual == expected
actual = mc.is_ergodic
expected = is_ergodic
assert actual == expected
actual = mc.is_reversible
expected = is_reversible
assert actual == expected
actual = mc.is_symmetric
expected = is_symmetric
assert actual == expected
def test_binary_matrices(p, accessibility_matrix, adjacency_matrix, communication_matrix):
mc = MarkovChain(p)
actual = mc.accessibility_matrix
expected = np.asarray(accessibility_matrix)
assert np.array_equal(actual, expected)
for i in range(mc.size):
for j in range(mc.size):
actual = mc.is_accessible(j, i)
expected = mc.accessibility_matrix[i, j] != 0
assert actual == expected
actual = mc.are_communicating(i, j)
expected = mc.accessibility_matrix[i, j] != 0 and mc.accessibility_matrix[j, i] != 0
assert actual == expected
actual = mc.adjacency_matrix
expected = np.asarray(adjacency_matrix)
assert np.array_equal(actual, expected)
actual = mc.communication_matrix
expected = np.asarray(communication_matrix)
assert np.array_equal(actual, expected)
def test_entropy(p, entropy_rate, entropy_rate_normalized, topological_entropy):
mc = MarkovChain(p)
actual = mc.entropy_rate
expected = entropy_rate
if actual is not None and expected is not None:
assert np.isclose(actual, expected)
else:
assert actual == expected
actual = mc.entropy_rate_normalized
expected = entropy_rate_normalized
if actual is not None and expected is not None:
assert np.isclose(actual, expected)
else:
assert actual == expected
actual = mc.topological_entropy
expected = topological_entropy
assert np.isclose(actual, expected)
def test_fundamental_matrix(p, fundamental_matrix, kemeny_constant):
mc = MarkovChain(p)
actual = mc.fundamental_matrix
expected = fundamental_matrix
if actual is not None and expected is not None:
expected = np.asarray(expected)
npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
else:
assert actual == expected
actual = mc.kemeny_constant
expected = kemeny_constant
if actual is not None and expected is not None:
assert np.isclose(actual, expected)
else:
assert actual == expected
def test_irreducibility(p):
mc = MarkovChain(p)
if not mc.is_irreducible:
skip('Markov chain is not irreducible.')
else:
actual = mc.states
expected = mc.recurrent_states
assert actual == expected
actual = len(mc.communicating_classes)
expected = 1
assert actual == expected
cf = mc.to_canonical_form()
actual = cf.p
expected = mc.p
npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
def test_lumping_partitions(p, lumping_partitions):
mc = MarkovChain(p)
actual = mc.lumping_partitions
expected = lumping_partitions
assert actual == expected
def test_matrix(p, determinant, rank):
mc = MarkovChain(p)
actual = mc.determinant
expected = determinant
assert np.isclose(actual, expected)
actual = mc.rank
expected = rank
assert actual == expected
def test_periodicity(p, period):
mc = MarkovChain(p)
actual = mc.period
expected = period
assert actual == expected
actual = mc.is_aperiodic
expected = period == 1
assert actual == expected
def test_regularity(p):
mc = MarkovChain(p)
if not mc.is_regular:
skip('Markov chain is not regular.')
else:
actual = mc.is_irreducible
expected = True
assert actual == expected
values = np.sort(np.abs(npl.eigvals(mc.p)))
actual = np.sum(np.logical_or(np.isclose(values, 1.0), values > 1.0))
expected = 1
assert actual == expected
def test_stationary_distributions(p, stationary_distributions):
mc = MarkovChain(p)
stationary_distributions = [np.array(stationary_distribution) for stationary_distribution in stationary_distributions]
actual = len(mc.pi)
expected = len(stationary_distributions)
assert actual == expected
actual = len(mc.pi)
expected = len(mc.recurrent_classes)
assert actual == expected
ss_matrix = np.vstack(mc.pi)
actual = npl.matrix_rank(ss_matrix)
expected = min(ss_matrix.shape)
assert actual == expected
for index, stationary_distribution in enumerate(stationary_distributions):
assert np.isclose(np.sum(mc.pi[index]), 1.0)
actual = mc.pi[index]
expected = stationary_distribution
npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
def test_transitions(p):
mc = MarkovChain(p)
transition_matrix = mc.p
states = mc.states
for index, state in enumerate(states):
actual = mc.conditional_probabilities(state)
expected = transition_matrix[index, :]
npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
for index1, state1 in enumerate(states):
for index2, state2 in enumerate(states):
actual = mc.transition_probability(state1, state2)
expected = transition_matrix[index2, index1]
assert np.isclose(actual, expected)
def test_times(p, mixing_rate, relaxation_rate, spectral_gap, implied_timescales):
mc = MarkovChain(p)
actual = mc.mixing_rate
expected = mixing_rate
if actual is not None and expected is not None:
assert np.isclose(actual, expected)
else:
assert actual == expected
actual = mc.relaxation_rate
expected = relaxation_rate
if actual is not None and expected is not None:
assert np.isclose(actual, expected)
else:
assert actual == expected
actual = mc.spectral_gap
expected = spectral_gap
if actual is not None and expected is not None:
assert np.isclose(actual, expected)
else:
assert actual == expected
actual = mc.implied_timescales
expected = implied_timescales
if actual is not None and expected is not None:
expected = np.asarray(expected)
npt.assert_allclose(actual, expected, rtol=1e-5, atol=1e-8)
else:
assert actual == expected
``` |
{
"source": "3asyPe/budget-manager",
"score": 2
} |
#### File: src/app/apis.py
```python
from rest_framework.decorators import api_view
from rest_framework.response import Response
@api_view(["POST"])
def test_api(request):
print(request.POST)
return Response({"test": "test"}, 200)
``` |
{
"source": "3bhady/UnFlow",
"score": 2
} |
#### File: e2eflow/core/supervised.py
```python
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
from .augment import random_photometric
from .flow_util import flow_to_color
from .losses import charbonnier_loss
from .flownet import flownet
from .unsupervised import _track_image, _track_loss, FLOW_SCALE
def supervised_loss(batch, params, normalization=None):
channel_mean = tf.constant(normalization[0]) / 255.0
im1, im2, flow_gt, mask_gt = batch
im1 = im1 / 255.0
im2 = im2 / 255.0
im_shape = tf.shape(im1)[1:3]
# -------------------------------------------------------------------------
im1_photo, im2_photo = random_photometric(
[im1, im2],
noise_stddev=0.04, min_contrast=-0.3, max_contrast=0.3,
brightness_stddev=0.02, min_colour=0.9, max_colour=1.1,
min_gamma=0.7, max_gamma=1.5)
_track_image(im1_photo, 'im1_photo')
_track_image(im2_photo, 'im2_photo')
_track_image(flow_to_color(flow_gt), 'flow_gt')
_track_image(mask_gt, 'mask_gt')
# Images for neural network input with mean-zero values in [-1, 1]
im1_photo = im1_photo - channel_mean
im2_photo = im2_photo - channel_mean
flownet_spec = params.get('flownet', 'S')
full_resolution = params.get('full_res')
train_all = params.get('train_all')
# -------------------------------------------------------------------------
# FlowNet
flows_fw = flownet(im1_photo, im2_photo,
flownet_spec=flownet_spec,
full_resolution=full_resolution,
train_all=train_all)
if not train_all:
flows_fw = [flows_fw[-1]]
final_loss = 0.0
for i, net_flows in enumerate(reversed(flows_fw)):
flow_fw = net_flows[0]
if params.get('full_res'):
final_flow_fw = flow_fw * FLOW_SCALE * 4
else:
final_flow_fw = tf.image.resize_bilinear(flow_fw, im_shape) * FLOW_SCALE * 4
_track_image(flow_to_color(final_flow_fw), 'flow_pred_' + str(i))
net_loss = charbonnier_loss(final_flow_fw - flow_gt, mask_gt)
final_loss += net_loss / (2 ** i)
regularization_loss = tf.add_n(slim.losses.get_regularization_losses())
final_loss += regularization_loss
_track_loss(regularization_loss, 'loss/regularization')
_track_loss(final_loss, 'loss/combined')
return final_loss
```
#### File: e2eflow/kitti/data.py
```python
import os
import sys
import numpy as np
import matplotlib.image as mpimg
from . import raw_records
from ..core.data import Data
from ..util import tryremove
from ..core.input import frame_name_to_num
def exclude_test_and_train_images(kitti_dir, exclude_lists_dir, exclude_target_dir,
remove=False):
to_move = []
def exclude_from_seq(day_name, seq_str, image, view, distance=10):
# image is the first frame of each frame pair to exclude
seq_dir_rel = os.path.join(day_name, seq_str, view, 'data')
seq_dir_abs = os.path.join(kitti_dir, seq_dir_rel)
target_dir_abs = os.path.join(exclude_target_dir, seq_dir_rel)
if not os.path.isdir(seq_dir_abs):
print("Not found: {}".format(seq_dir_abs))
return
try:
os.makedirs(target_dir_abs)
except:
pass
seq_files = sorted(os.listdir(seq_dir_abs))
image_num = frame_name_to_num(image)
try:
image_index = seq_files.index(image)
except ValueError:
return
# assume that some in-between files may be missing
start = max(0, image_index - distance)
stop = min(len(seq_files), image_index + distance + 2)
start_num = image_num - distance
stop_num = image_num + distance + 2
for i in range(start, stop):
filename = seq_files[i]
num = frame_name_to_num(filename)
if num < start_num or num >= stop_num:
continue
to_move.append((os.path.join(seq_dir_abs, filename),
os.path.join(target_dir_abs, filename)))
for filename in os.listdir(exclude_lists_dir):
exclude_list_path = os.path.join(exclude_lists_dir, filename)
with open(exclude_list_path) as f:
for line in f:
line = line.rstrip('\n')
if line.split(' ')[0].endswith('_10'):
splits = line.split(' ')[-1].split('\\')
image = splits[-1]
seq_str = splits[0]
day_name, seq_name = seq_str.split('_drive_')
seq_name = seq_name.split('_')[0] + '_extract'
seq_str = day_name + '_drive_' + seq_name
exclude_from_seq(day_name, seq_str, image, 'image_02')
exclude_from_seq(day_name, seq_str, image, 'image_03')
if remove:
print("Collected {} files. Deleting...".format(len(to_move)))
else:
print("Collected {} files. Moving...".format(len(to_move)))
for i, data in enumerate(to_move):
try:
src, dst = data
print("{} / {}: {}".format(i, len(to_move) - 1, src))
if remove:
os.remove(src)
else:
os.rename(src, dst)
except: # Some ranges may overlap
pass
return len(to_move)
class KITTIData(Data):
KITTI_RAW_URL = 'https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/'
KITTI_2012_URL = 'https://s3.eu-central-1.amazonaws.com/avg-kitti/data_stereo_flow.zip'
KITTI_2015_URL = 'https://s3.eu-central-1.amazonaws.com/avg-kitti/data_scene_flow.zip'
dirs = ['data_stereo_flow', 'data_scene_flow', 'kitti_raw']
def __init__(self, data_dir, stat_log_dir=None,
development=True, fast_dir=None):
super().__init__(data_dir, stat_log_dir,
development=development,
fast_dir=fast_dir)
def _fetch_if_missing(self):
self._maybe_get_kitti_raw()
self._maybe_get_kitti_2012()
self._maybe_get_kitti_2015()
def get_raw_dirs(self):
top_dir = os.path.join(self.current_dir, 'kitti_raw')
dirs = []
dates = os.listdir(top_dir)
for date in dates:
date_path = os.path.join(top_dir, date)
extracts = os.listdir(date_path)
for extract in extracts:
extract_path = os.path.join(date_path, extract)
image_02_folder = os.path.join(extract_path, 'image_02/data')
image_03_folder = os.path.join(extract_path, 'image_03/data')
dirs.extend([image_02_folder, image_03_folder])
return dirs
def _maybe_get_kitti_2012(self):
local_path = os.path.join(self.data_dir, 'data_stereo_flow')
if not os.path.isdir(local_path):
self._download_and_extract(self.KITTI_2012_URL, local_path)
def _maybe_get_kitti_2015(self):
local_path = os.path.join(self.data_dir, 'data_scene_flow')
if not os.path.isdir(local_path):
self._download_and_extract(self.KITTI_2015_URL, local_path)
def _maybe_get_kitti_raw(self):
base_url = self.KITTI_RAW_URL
local_dir = os.path.join(self.data_dir, 'kitti_raw')
records = raw_records.get_kitti_records(self.development)
downloaded_records = False
for i, record in enumerate(records):
date_str = record.split("_drive_")[0]
foldername = record + "_extract"
date_folder = os.path.join(local_dir, date_str)
if not os.path.isdir(date_folder):
os.makedirs(date_folder)
local_path = os.path.join(date_folder, foldername)
if not os.path.isdir(local_path):
url = base_url + record + "/" + foldername + '.zip'
print(url)
self._download_and_extract(url, local_dir)
downloaded_records = True
# Remove unused directories
tryremove(os.path.join(local_path, 'velodyne_points'))
tryremove(os.path.join(local_path, 'oxts'))
tryremove(os.path.join(local_path, 'image_00'))
tryremove(os.path.join(local_path, 'image_01'))
if downloaded_records:
print("Downloaded all KITTI raw files.")
exclude_target_dir = os.path.join(self.data_dir, 'exclude_target_dir')
exclude_lists_dir = '../files/kitti_excludes'
excluded = exclude_test_and_train_images(local_dir, exclude_lists_dir, exclude_target_dir,
remove=True)
```
#### File: e2eflow/middlebury/data.py
```python
import os
import sys
import numpy as np
import matplotlib.image as mpimg
from ..core.data import Data
from ..util import tryremove
class MiddleburyData(Data):
MDB_FLOW_URL = 'http://vision.middlebury.edu/flow/data/comp/zip/other-gt-flow.zip'
MDB_COLOR_URL = 'http://vision.middlebury.edu/flow/data/comp/zip/other-color-twoframes.zip'
MDB_EVAL_URL = 'http://vision.middlebury.edu/flow/data/comp/zip/eval-color-twoframes.zip'
dirs = ['middlebury']
def __init__(self, data_dir, stat_log_dir=None,
development=True, fast_dir=None):
super().__init__(data_dir, stat_log_dir,
development=development,
fast_dir=fast_dir)
def _fetch_if_missing(self):
local_path = os.path.join(self.data_dir, 'middlebury')
if not os.path.isdir(local_path):
self._download_and_extract(self.MDB_FLOW_URL, local_path)
self._download_and_extract(self.MDB_COLOR_URL, local_path)
self._download_and_extract(self.MDB_EVAL_URL, local_path)
for name in ['Beanbags', 'DogDance', 'MiniCooper', 'Walking']:
tryremove(os.path.join(local_path, 'other-data', name))
def get_raw_dirs(self):
raise NotImplementedError("Can not train on middlebury")
```
#### File: e2eflow/synthia/data.py
```python
import os
import sys
import numpy as np
import matplotlib.image as mpimg
from ..core.data import Data
from ..util import tryremove
URL = 'http://synthia-dataset.cvc.uab.cat/SYNTHIA_SEQS/'
SEQS = [ # SUMMER and WINTER from sequences `1 - 6`
'SYNTHIA-SEQS-01-SUMMER',
'SYNTHIA-SEQS-01-WINTER',
'SYNTHIA-SEQS-02-SUMMER',
'SYNTHIA-SEQS-02-WINTER',
'SYNTHIA-SEQS-04-SUMMER',
'SYNTHIA-SEQS-04-WINTER',
'SYNTHIA-SEQS-05-SUMMER',
'SYNTHIA-SEQS-05-WINTER',
'SYNTHIA-SEQS-06-SUMMER',
'SYNTHIA-SEQS-06-WINTER'
]
DEV_SEQS = ['SYNTHIA-SEQS-01-SUMMER']
class SynthiaData(Data):
dirs = ['synthia']
def __init__(self, data_dir, stat_log_dir=None,
development=True, fast_dir=None):
super().__init__(data_dir, stat_log_dir,
development=development,
fast_dir=fast_dir)
def _fetch_if_missing(self):
self._maybe_get_synthia()
def get_raw_dirs(self):
root_dir = os.path.join(self.current_dir, 'synthia')
dirs = []
seqs = os.listdir(root_dir)
for seq in seqs:
seq_dir = os.path.join(root_dir, seq, seq, 'RGB', 'Stereo_Left')
views = os.listdir(seq_dir)
for view in views:
view_dir = os.path.join(seq_dir, view)
dirs.extend([view_dir])
return dirs
def _maybe_get_synthia(self):
seqs = DEV_SEQS if self.development else SEQS
for seq in seqs:
root_dir = os.path.join(self.data_dir, 'synthia')
url = URL + seq + '.rar'
url_dir = os.path.join(root_dir, seq)
if not os.path.isdir(url_dir):
self._download_and_extract(url, url_dir, 'rar')
# Remove unused directories
tryremove(os.path.join(url_dir, seq, 'GT'))
tryremove(os.path.join(url_dir, seq, 'Depth'))
tryremove(os.path.join(url_dir, seq, 'CameraParams'))
tryremove(os.path.join(url_dir, 'RGB', 'Stereo_Right'))
```
#### File: e2eflow/test/test_losses.py
```python
import numpy as np
import tensorflow as tf
from ..core.losses import _smoothness_deltas, create_outgoing_mask, \
gradient_loss, compute_losses, ternary_loss
from ..core.input import read_png_image
class LossesTest(tf.test.TestCase):
def test_smoothness_deltas(self):
flow = np.ones([1,3,3,2], np.float32)
flow[0, :, :, 0] = [[0,0,0],
[0,8,3],
[0,1,0]]
flow[0, :, :, 1] = [[0,0,0],
[0,8,3],
[0,1,0]]
delta_u_, delta_v_, mask_ = _smoothness_deltas(flow)
delta_u_ = tf.multiply(delta_u_, mask_)
delta_v_ = tf.multiply(delta_v_, mask_)
sess = tf.Session()
delta_u, delta_v, mask = sess.run([delta_u_, delta_v_, mask_])
self.assertAllEqual(mask[0,:,:,0], [[1,1,0],
[1,1,0],
[1,1,0]])
self.assertAllEqual(mask[0,:,:,1], [[1,1,1],
[1,1,1],
[0,0,0]])
self.assertAllEqual(delta_u[0,:,:,0], [[0,0,0],
[-8,5,0],
[-1,1,0]])
self.assertAllEqual(delta_u[0,:,:,1], [[0,-8,-3],
[0,7,3],
[0,0,0]])
self.assertAllEqual(delta_v[0,:,:,0], [[0,0,0],
[-8,5,0],
[-1,1,0]])
self.assertAllEqual(delta_v[0,:,:,1], [[0,-8,-3],
[0,7,3],
[0,0,0]])
def test_create_outgoing_mask_all_directions(self):
flow = np.ones([1,3,3,2], np.float32)
flow[0, :, :, 0] = [[0,0,1],
[-1,3,0],
[0,1,0]]
flow[0, :, :, 1] = [[-1,0,0],
[0,0,0],
[1,-1,0]]
sess = tf.Session()
mask = sess.run(create_outgoing_mask(flow))
self.assertAllEqual(mask[0,:,:,0], [[0,1,0],
[0,0,1],
[0,1,1]])
def test_create_outgoing_mask_large_movement(self):
flow = np.ones([1,3,3,2], np.float32)
flow[0, :, :, 0] = [[3,2,1],
[2,1,0],
[0,-2,-1]]
flow[0, :, :, 1] = [[0,0,0],
[0,0,0],
[0,0,0]]
sess = tf.Session()
mask = sess.run(create_outgoing_mask(flow))
self.assertAllEqual(mask[0,:,:,0], [[0,0,0],
[1,1,1],
[1,0,1]])
# def test_forward_backward_loss(self):
# im1 = np.ones([1,3,3,3], np.float32)
# im2 = np.ones([1,3,3,3], np.float32)
# mask = np.ones([1,3,3,1], np.float32)
# mask[0, :, :, 0] = [[1,1,0],
# [1,1,0],
# [0,0,0]]
#
# flow_fw = np.ones([1,3,3,2], np.float32)
# flow_fw[0, :, :, 0] = [[1,1,1],
# [1,1,1],
# [1,1,1]]
# flow_fw[0, :, :, 1] = [[1,1,1],
# [1,1,1],
# [1,1,1]]
# flow_bw = np.ones([1,3,3,2], np.float32)
# flow_bw[0, :, :, 0] = [[-1,-1,-1],
# [-1,-1,-1],
# [-1,-1,-1]]
# flow_bw[0, :, :, 1] = [[-1,-1,-1],
# [-1,-1,-1],
# [-1,-1,-1]]
#
# sess = tf.Session()
# losses = sess.run(compute_losses(im1, im2, flow_fw, flow_bw, mask))
# self.assertAllClose(losses['fb'], 0.0, atol=1e-2)
def test_gradient_loss(self):
im1 = np.ones([1,3,3,3], np.float32)
im2 = np.ones([1,3,3,3], np.float32)
mask = np.ones([1,3,3,1], np.float32)
im1[0, :, :, 0] = [[0,1,0],
[0,2,0],
[0,3,4]]
im1[0, :, :, 1] = [[0,1,0],
[0,2,0],
[0,3,4]]
im1[0, :, :, 2] = [[0,1,0],
[0,2,0],
[0,3,4]]
im2[0, :, :, 0] = [[1,2,1],
[1,3,1],
[1,4,5]]
im2[0, :, :, 1] = [[1,2,1],
[1,3,1],
[1,4,5]]
im2[0, :, :, 2] = [[1,2,1],
[1,3,1],
[1,4,5]]
sess = tf.Session()
loss = sess.run(gradient_loss(im1, im2, mask))
self.assertAllClose(loss, 0.0, atol=1e-2)
def test_ternary_reference(self):
def _ternary_reference_test(im1_name, im2_name, expected):
with self.test_session(use_gpu=True) as sess:
im1 = tf.expand_dims(read_png_image([im1_name]), 0)
im2 = tf.expand_dims(read_png_image([im2_name]), 0)
_, height, width, _ = tf.unstack(tf.shape(im1))
mask = tf.ones([1, height, width, 1])
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
scale = tf.cast(height * width, tf.float32)
loss_ = ternary_loss(im1, im2, mask, max_distance=3, truncate=22) * scale
loss = sess.run(loss_)
print(loss)
#self.assertAllClose(loss, expected)
_ternary_reference_test('../test_data/frame_0011.png',
'../test_data/frame_0012.png',
8.86846e+06)
_ternary_reference_test('../test_data/frame_0016.png',
'../test_data/frame_0017.png',
6.75537e+06)
_ternary_reference_test('../test_data/frame_0018.png',
'../test_data/frame_0019.png',
8.22283e+06)
_ternary_reference_test('../test_data/frame_0028.png',
'../test_data/frame_0029.png',
8.05619e+06)
``` |
{
"source": "3bonbon/numbergame",
"score": 3
} |
#### File: numbergame/tests/test_is_valid_number.py
```python
import pytest
from numbergame import Player
from numbergame.model import is_valid_number
def test_valid_numbers():
assert is_valid_number('0123')
assert is_valid_number('9876')
assert is_valid_number('4739')
assert is_valid_number('1029')
def test_invalid_numbers():
assert not is_valid_number('0122')
assert not is_valid_number('0000')
assert not is_valid_number('123')
assert not is_valid_number('12345')
assert not is_valid_number('a')
assert not is_valid_number('123o')
assert not is_valid_number('atoz')
assert not is_valid_number('%^$@')
assert not is_valid_number('')
def test_valid_player_creation():
p = Player('0123')
def test_invalid_player_creation():
with pytest.raises(ValueError) as e:
p = Player('')
``` |
{
"source": "3bst0r/magenta",
"score": 2
} |
#### File: models/my_rnn/my_rnn_simple_train.py
```python
import os
import argparse
# import pydevd
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import keras
from magenta.models.my_rnn.my_simple_rnn_model import BASIC_EVENT_DIM
from magenta.models.my_rnn.my_simple_rnn_model import get_simple_rnn_model
from magenta.common import is_valid_file
from magenta.models.my_rnn.my_rnn_generate import melody_seq_to_midi
import uuid
BATCH_SIZE = 128
NUM_THREADS = 7
tmp_dir = os.environ["TMPDIR"]
# tf.disable_eager_execution()
print("executing eagerly:")
print(tf.executing_eagerly())
# tf.config.experimental_run_functions_eagerly(True)
tf.config.threading.set_inter_op_parallelism_threads(NUM_THREADS)
tf.config.threading.set_intra_op_parallelism_threads(NUM_THREADS)
def get_parse_function_shift(event_dim, label_shape=None):
sequence_features = {
'inputs': tf.io.FixedLenSequenceFeature(shape=[event_dim],
dtype=tf.float32),
'labels': tf.io.FixedLenSequenceFeature(shape=label_shape or [],
dtype=tf.int64)}
def shift_melody(example):
# one example is one melody
_, sequence = tf.parse_single_sequence_example(serialized=example,
sequence_features=sequence_features)
# return melody from first step as input and melody shifted by one step to the left as label
return sequence['inputs'][:-1], sequence['inputs'][1:]
return shift_melody
class CustomSaver(keras.callbacks.Callback):
def __init__(self, model_prefix):
self.model_prefix = model_prefix
def on_epoch_end(self, epoch, logs=None):
if logs is None:
logs = {}
# if (epoch % 2) == 0:
self.model.save(os.path.join(self.model_prefix, "model_{}.tf".format(epoch)), save_format='tf')
def main(sequence_example_file_path, model_prefix):
print(tf.__version__)
# read data
ds = tf.data.TFRecordDataset(sequence_example_file_path)
#ds = ds.map(get_parse_function_shift(BASIC_EVENT_DIM))
#ds = ds.shuffle(buffer_size=2048)
ds = ds.take(int(1E5))
ds = ds.padded_batch(batch_size=BATCH_SIZE, padded_shapes=([None, BASIC_EVENT_DIM], [None, BASIC_EVENT_DIM]))
# shape is now [2 : input and label sequences
# 128 : batch size
# ? : padded sequence length per batch
# 38] : event dimensionality
saver = CustomSaver(model_prefix)
model = get_simple_rnn_model(BASIC_EVENT_DIM, is_Training=True)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(ds, epochs=25, callbacks=[saver])
model.save(os.path.join(model_prefix, 'model_final.tf'), save_format='tf')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train RNN Model")
parser.add_argument('--sequence_example_file', dest="filename", required=True,
help="File containing sequence examples for training or evaluation",
type=lambda x: is_valid_file(parser, x))
parser.add_argument('--prefix', dest="prefix", required=True,
help="All model iterations will be saved in $TMP_DIR/prefix/")
args = parser.parse_args()
main(sequence_example_file_path=args.filename, model_prefix=os.path.join(tmp_dir, args.prefix))
``` |
{
"source": "3c1u/HiFiSinger-1",
"score": 4
} |
#### File: 3c1u/HiFiSinger-1/Arg_Parser.py
```python
import argparse
def Recursive_Parse(args_Dict):
parsed_Dict = {}
for key, value in args_Dict.items():
if isinstance(value, dict):
value = Recursive_Parse(value)
parsed_Dict[key]= value
args = argparse.Namespace()
args.__dict__ = parsed_Dict
return args
``` |
{
"source": "3c7/common-osint-model",
"score": 2
} |
#### File: common-osint-model/common_osint_model/certificate.py
```python
from cryptography.x509 import load_pem_x509_certificate
from cryptography.x509.extensions import SubjectAlternativeName
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.hashes import MD5, SHA1, SHA256
from cryptography.hazmat.backends.openssl.x509 import _Certificate as Certificate
from binascii import hexlify
from typing import Union
from datetime import timezone
from common_osint_model.utils import flatten, list_cleanup
def from_x509_pem(pem: Union[str, bytes]) -> dict:
"""
Converts a certificate in PEM format given as bytes or as string to a dictionary.
:param pem: PEM certificates as either string or bytes
:return: dictionary in common format
"""
g = {}
if isinstance(pem, str):
pem = pem.encode("ascii")
certificate = load_pem_x509_certificate(pem, default_backend())
g.update(certificate_dn_extraction(certificate))
g.update(certificate_fingerprint_extraction(certificate))
g.update(certificate_validity_extraction(certificate))
g.update(dict(serial_number=certificate.serial_number))
return list_cleanup(g)
def from_x509_pem_flattened(pem: Union[str, bytes]) -> dict:
"""
Wraps from_x509_pem and flattens the output dict
:param pem: PEM certificates as either string or bytes
:return: flattened dictionary in common format
"""
return flatten(from_x509_pem(pem))
def certificate_dn_extraction(certificate: Certificate) -> dict:
"""
Extracts distinguished names of the given certificate
:param certificate: object of type cryptography.hazmat.backends.openssl.x509._Certificate
:return: dictionary containing issuer and subject DN
"""
dns = dict(subject={}, issuer={}, subject_dn=None, issuer_dn=None)
terms = dict(
CN='common_name',
C='country',
L='locality',
ST='province',
O='organization',
OU='organizational_unit',
email='email_address'
)
dns["issuer_dn"] = certificate.issuer.rfc4514_string()
for term in certificate.issuer.rfc4514_string().split(','):
k, v = term.split("=")
key = terms[k.strip()]
if key in dns["issuer"].keys():
if isinstance(dns["issuer"][key], list):
dns["issuer"][key].append(v)
else:
dns["issuer"][key] = [dns["issuer"][key], v]
else:
dns["issuer"].update({
terms[k.strip()]: v
})
dns["subject_dn"] = certificate.subject.rfc4514_string()
for term in certificate.subject.rfc4514_string().split(','):
k, v = term.split("=")
key = terms[k.strip()]
if key in dns["subject"].keys():
if isinstance(dns["subject"][key], list):
dns["subject"][key].append(v)
else:
dns["subject"][key] = [dns["subject"][key], v]
else:
dns["subject"].update({
terms[k.strip()]: v
})
try:
subjectAltName = certificate.extensions.get_extension_for_oid(SubjectAlternativeName.oid)
except:
subjectAltName = None
if subjectAltName:
dns["subject"]["common_name"] = [dns["subject"]["common_name"]]
for v in subjectAltName.value:
if v.value not in dns["subject"]["common_name"]:
dns["subject"]["common_name"].append(v.value)
return dns
def certificate_fingerprint_extraction(certificate: Certificate) -> dict:
"""
Calculates certificate fingerprints as MD5, SHA1 and SHA256
:param certificate: object of type cryptography.hazmat.backends.openssl.x509._Certificate
:return: dictionary containing all fingerprints
"""
return {
"fingerprint": {
"md5": hexlify(certificate.fingerprint(MD5())).decode("ascii"),
"sha1": hexlify(certificate.fingerprint(SHA1())).decode("ascii"),
"sha256": hexlify(certificate.fingerprint(SHA256())).decode("ascii"),
}
}
def certificate_validity_extraction(certificate: Certificate) -> dict:
"""
Extracts validity information of given certificate
:param certificate: object of type cryptography.hazmat.backends.openssl.x509._Certificate
:return: dictionary containing the validity timestamps
"""
return {
"validity": {
"start": int(certificate.not_valid_before.timestamp()),
"start_readable": certificate.not_valid_before.replace(tzinfo=timezone.utc, microsecond=0).isoformat(),
"end": int(certificate.not_valid_after.timestamp()),
"end_readable": certificate.not_valid_after.replace(tzinfo=timezone.utc, microsecond=0).isoformat(),
"length": int((certificate.not_valid_after - certificate.not_valid_before).total_seconds())
}
}
```
#### File: common_osint_model/models/http.py
```python
import base64
from typing import Dict, List, Optional, Union
import mmh3
from pydantic import BaseModel
from common_osint_model.models import ShodanDataHandler, CensysDataHandler, BinaryEdgeDataHandler, Logger
from common_osint_model.utils import hash_all
class HTTPComponentContentFavicon(BaseModel, ShodanDataHandler, CensysDataHandler, BinaryEdgeDataHandler, Logger):
"""Represents the favicon which might be included in HTTP components."""
raw: Optional[str]
md5: Optional[str]
sha1: Optional[str]
sha256: Optional[str]
murmur: Optional[str]
shodan_murmur: Optional[str]
@classmethod
def from_shodan(cls, d: Dict):
"""Creates an instance of this class based on Shodan data given as dictionary."""
if not isinstance(d, Dict):
raise TypeError(f"Method HTTPComponentContentFavicon.from_shodan expects parameter d to be a dictionary, "
f"but it was {type(d)}.")
raw = d["http"]["favicon"]["data"]
raw = base64.b64decode(raw)
md5, sha1, sha256, murmur = hash_all(raw)
shodan_murmur = mmh3.hash(d["http"]["favicon"]["data"])
cls.info("Shodan's favicon hash only hashes the base64 encoded favicon, not the data itself. The hash can be "
"found as \"shodan_murmur\" in this instance. \"murmur\" and the other hashes are calculated based on "
"the raw data of the favicon.")
return HTTPComponentContentFavicon(
raw=d["http"]["favicon"]["data"],
md5=md5,
sha1=sha1,
sha256=sha256,
murmur=murmur,
shodan_murmur=shodan_murmur
)
@classmethod
def from_censys(cls, d: Dict):
"""Not supported by Censys right now."""
return None
@classmethod
def from_binaryedge(cls, d: Union[Dict, List]):
favicon = d["result"]["data"]["response"]["favicon"]["content"]
favicon_bytes = base64.b64decode(favicon.encode("utf-8"))
md5, sha1, sha256, murmur = hash_all(favicon_bytes)
shodan_murmur = mmh3.hash(favicon.encode("utf-8"))
return HTTPComponentContentFavicon(
raw=favicon,
md5=md5,
sha1=sha1,
sha256=sha256,
murmur=murmur,
shodan_murmur=shodan_murmur
)
class HTTPComponentContentRobots(BaseModel, ShodanDataHandler, CensysDataHandler):
"""Represents the robots.txt file in webroots."""
raw: Optional[str]
md5: Optional[str]
sha1: Optional[str]
sha256: Optional[str]
murmur: Optional[str]
@classmethod
def from_shodan(cls, d: Dict):
"""Creates an instance of this class based on Shodan data given as dictionary."""
if not isinstance(d, Dict):
raise TypeError(
f"Method HTTPComponentContentRobots.from_shodan expects parameter d to be a dictionary, "
f"but it was {type(d)}.")
raw = d["http"]["robots"].encode("utf-8")
md5, sha1, sha256, murmur = hash_all(raw)
return HTTPComponentContentRobots(
raw=raw,
md5=md5,
sha1=sha1,
sha256=sha256,
murmur=murmur
)
@classmethod
def from_censys(cls, d: Dict):
"""Not supported by Censys right now."""
return None
class HTTPComponentContentSecurity(BaseModel, ShodanDataHandler, CensysDataHandler):
"""Represents the security.txt file in webroots."""
raw: Optional[str]
md5: Optional[str]
sha1: Optional[str]
sha256: Optional[str]
murmur: Optional[str]
@classmethod
def from_shodan(cls, d: Dict):
"""Creates an instance of this class based on Shodan data given as dictionary."""
if not isinstance(d, Dict):
raise TypeError(
f"Method HTTPComponentContentRobots.from_shodan expects parameter d to be a dictionary, "
f"but it was {type(d)}.")
raw = d["http"]["securitytxt"].encode("utf-8")
md5, sha1, sha256, murmur = hash_all(raw)
return HTTPComponentContentRobots(
raw=raw,
md5=md5,
sha1=sha1,
sha256=sha256,
murmur=murmur
)
@classmethod
def from_censys(cls, d: Dict):
"""Not supported by Censys right now."""
return None
class HTTPComponentContent(BaseModel, ShodanDataHandler, CensysDataHandler, BinaryEdgeDataHandler, Logger):
"""Represents the content (body) of HTTP responses."""
raw: Optional[str]
length: Optional[int]
md5: Optional[str]
sha1: Optional[str]
sha256: Optional[str]
murmur: Optional[str]
favicon: Optional[HTTPComponentContentFavicon]
robots_txt: Optional[HTTPComponentContentRobots]
security_txt: Optional[HTTPComponentContentSecurity]
@classmethod
def from_shodan(cls, d: Dict):
"""Creates an instance of this class based on Shodan data given as dictionary."""
if not isinstance(d, Dict):
raise TypeError(f"Method HTTPComponentContent.from_shodan expects parameter d to be a dictionary, "
f"but it was {type(d)}.")
favicon = None
if "favicon" in d["http"]:
cls.debug("Favicon key found in Shodan data.")
favicon = HTTPComponentContentFavicon.from_shodan(d)
security_txt = None
if d["http"]["securitytxt"]:
cls.debug("Security.txt key found in Shodan data.")
security_txt = HTTPComponentContentSecurity.from_shodan(d)
robots_txt = None
if d["http"]["robots"]:
cls.debug("Robots.txt key found in Shodan data.")
robots_txt = HTTPComponentContentRobots.from_shodan(d)
raw = d["http"]["html"].encode("utf-8")
md5, sha1, sha256, murmur = hash_all(raw)
return HTTPComponentContent(
raw=raw,
length=len(raw),
md5=md5,
sha1=sha1,
sha256=sha256,
murmur=murmur,
favicon=favicon,
robots_txt=robots_txt,
security_txt=security_txt
)
@classmethod
def from_censys(cls, d: Dict):
"""Creates an instance of this class based on Censys (2.0) data given as dictionary."""
http = d["http"]["response"]
raw = http["body"] if http["body_size"] > 0 else ""
md5, sha1, sha256, murmur = hash_all(raw.encode("utf-8"))
return HTTPComponentContent(
raw=raw,
length=len(raw),
md5=md5,
sha1=sha1,
sha256=sha256,
murmur=murmur,
favicon=HTTPComponentContentFavicon.from_censys(d),
robots_txt=HTTPComponentContentRobots.from_censys(d),
security_txt=HTTPComponentContentSecurity.from_censys(d)
)
@classmethod
def from_binaryedge(cls, d: Union[Dict, List]):
"""Creates an instance of this class based on BinaryEdge data given as dictionary. Robots and Security.txt are
not supported by BinaryEdge."""
http_response = d["result"]["data"]["response"]
raw = http_response["body"]["content"]
md5, sha1, sha256, murmur = hash_all(raw.encode("utf-8"))
return HTTPComponentContent(
raw=raw,
length=len(raw),
md5=md5,
sha1=sha1,
sha256=sha256,
murmur=murmur,
favicon=HTTPComponentContentFavicon.from_binaryedge(d)
)
class HTTPComponent(BaseModel, ShodanDataHandler, CensysDataHandler, BinaryEdgeDataHandler):
"""Represents the HTTP component of services."""
headers: Optional[Dict[str, str]]
content: Optional[HTTPComponentContent]
@classmethod
def from_shodan(cls, d: Dict):
"""Creates an instance of this class based on Shodan data given as dictionary."""
if not isinstance(d, Dict):
raise TypeError(f"Method HTTPComponent.from_shodan expects parameter d to be a dictionary, "
f"but it was {type(d)}.")
content = HTTPComponentContent.from_shodan(d)
banner = d["data"]
lines = banner.split("\r\n")
headers = {}
for line in lines:
if ":" in line:
key, value = line.split(":", maxsplit=1)
headers[key.strip()] = value.strip()
return HTTPComponent(
headers=headers,
content=content
)
@classmethod
def from_censys(cls, d: Dict):
http = d["http"]["response"]
headers = {}
for k, v in http["headers"].items():
if k[0] == "_":
continue
headers.update({
k.replace("_", "-"): " ".join(v)
})
return HTTPComponent(
headers=headers,
content=HTTPComponentContent.from_censys(d)
)
@classmethod
def from_binaryedge(cls, d: Union[Dict, List]):
http_response = d["result"]["data"]["response"]
headers = http_response["headers"]["headers"]
return HTTPComponent(
headers=headers,
content=HTTPComponentContent.from_binaryedge(d)
)
```
#### File: common_osint_model/models/tls.py
```python
import binascii
from datetime import datetime
from typing import Dict, List, Optional, Union
import pytz
from cryptography.hazmat.primitives.hashes import MD5, SHA1, SHA256
from cryptography.x509 import OID_COMMON_NAME, ExtensionOID, DNSName, ExtensionNotFound
from cryptography.x509 import load_pem_x509_certificate
from pydantic import BaseModel
from common_osint_model.models import ShodanDataHandler, CensysDataHandler, BinaryEdgeDataHandler, Logger
class TLSComponentCertificateEntity(BaseModel, ShodanDataHandler, CensysDataHandler, BinaryEdgeDataHandler):
"""Represents certificate entities, typically issuer and subject."""
dn: Optional[str]
country: Optional[str]
state: Optional[str]
locality: Optional[str]
organization: Optional[str]
organizational_unit: Optional[str]
common_name: Optional[str]
email_address: Optional[str]
@classmethod
def from_shodan(cls, d: Dict):
"""Creates an instance of this class using a given Shodan data dictionary."""
if all(key not in d for key in ["C", "L", "CN", "O", "ST"]):
raise KeyError("The dictionary given to TLSComponentCertificateEntity.from_shodan is missing the typical "
"shodan keys.")
c = d.get("C", None)
st = d.get("ST", None)
l = d.get("L", None)
o = d.get("O", None)
ou = d.get("OU", None)
cn = d.get("CN", None)
email = d.get("emailAddress", None)
dn = ""
if c:
dn += f"C={c}, "
if st:
dn += f"ST={st}, "
if l:
dn += f"L={l}, "
if o:
dn += f"O={o}, "
if ou:
dn += f"OU={ou}, "
if cn:
if not email:
dn += f"CN={cn}"
else:
dn += f"CN={cn}/Email={email}"
elif not cn and email:
dn += f"Email={email}"
if len(dn) > 0:
while dn[-1] in [",", " "]:
dn = dn[:-1]
return TLSComponentCertificateEntity(
dn=dn,
country=c,
state=st,
locality=l,
organization=o,
organizational_unit=ou,
common_name=cn,
email=email
)
@classmethod
def from_censys(cls, d: Dict):
"""Creates an instance of this class based on Censys data given as dictionary."""
if all(key not in d for key in ["common_name", "locality", "organization", "organizational_unit", "province"]):
raise KeyError("The dictionary given to TLSComponentCertificateEntity.from_shodan is missing the typical "
"shodan keys.")
c = d.get("country", [])
st = d.get("province", [])
l = d.get("locality", [])
o = d.get("organization", [])
ou = d.get("organizational_unit", [])
cn = d.get("common_name", [])
email = d.get("email_address", [])
dn = ""
if c:
for item in c:
dn += f"C={item}, "
if st:
for item in st:
dn += f"ST={item}, "
if l:
for item in l:
dn += f"L={item}, "
if o:
for item in o:
dn += f"O={item}, "
if ou:
for item in ou:
dn += f"OU={item}, "
done = False
if email and cn:
if len(email) == 1 and len(cn) == 1:
dn += f"CN={cn[0]}/Email={email[0]}"
done = True
else:
for item in cn:
dn += f"CN={item}, "
for item in email:
dn += f"Email={item}, "
done = True
if cn and not done:
for item in cn:
dn += f"CN={item}, "
# This one is probably wrong.
if email and not done:
for item in email:
dn += f"Email={item}, "
while dn[-1] in [" ", ","]:
dn = dn[:-1]
return TLSComponentCertificateEntity(
dn=dn,
country=", ".join(c),
state=", ".join(st),
locality=", ".join(l),
organization=", ".join(o),
organizational_unit=", ".join(ou),
common_name=", ".join(cn),
email=", ".join(email)
)
@classmethod
def from_binaryedge(cls, d: Union[Dict, List]):
c = d.get("country_name", None)
st = d.get("state_or_province_name", None)
l = d.get("locality_name", None)
o = d.get("organization_name", None)
ou = d.get("organizational_unit_name", None)
cn = d.get("common_name", None)
email = d.get("email_address", None) # Todo: Check if this key is actually correct
dn = ""
if c:
dn += f"C={c}, "
if st:
dn += f"ST={st}, "
if l:
dn += f"L={l}, "
if o:
dn += f"O={o}, "
if ou:
dn += f"OU={ou}, "
if cn:
if not email:
dn += f"CN={cn}"
else:
dn += f"CN={cn}/Email={email}"
elif not cn and email:
dn += f"Email={email}"
while dn[-1] in [",", " "]:
dn = dn[:-1]
return TLSComponentCertificateEntity(
dn=dn,
country=c,
state=st,
locality=l,
organization=o,
organizational_unit=ou,
common_name=cn,
email=email
)
class TLSComponentCertificate(BaseModel, ShodanDataHandler, CensysDataHandler, BinaryEdgeDataHandler, Logger):
"""Represents certificates."""
issuer: Optional[TLSComponentCertificateEntity]
subject: Optional[TLSComponentCertificateEntity]
issued: Optional[datetime]
expires: Optional[datetime]
expired: Optional[bool]
# More specifically, this is a certificate extension, but we keep it here because it's easier this way.
alternative_names: Optional[List[str]]
# The certificate itself
pem: Optional[str]
md5: Optional[str]
sha1: Optional[str]
sha256: Optional[str]
murmur: Optional[str]
@property
def domains(self) -> List[str]:
domains = []
if self.subject.common_name:
domains.append(self.subject.common_name)
if self.alternative_names:
domains.extend(self.alternative_names)
return list(set(domains))
@classmethod
def from_shodan(cls, d: Dict):
"""Creates an instance of this class based on Shodan data given as dictionary."""
if not isinstance(d, Dict):
raise TypeError(f"Method TLSComponentCertificate.from_shodan expects parameter d to be a dictionary, "
f"but it was {type(d)}.")
issuer = TLSComponentCertificateEntity.from_shodan(d["ssl"]["cert"]["issuer"])
subject = TLSComponentCertificateEntity.from_shodan(d["ssl"]["cert"]["subject"])
issued = datetime.strptime(d["ssl"]["cert"]["issued"], "%Y%m%d%H%M%SZ")
expires = datetime.strptime(d["ssl"]["cert"]["expires"], "%Y%m%d%H%M%SZ")
expired = True if d["ssl"]["cert"]["expired"] in ["true", True] else False
altnames = []
pem = None
md5, sha1, sha256 = None, None, None
for cert_pem in d["ssl"]["chain"]:
cert = load_pem_x509_certificate(cert_pem.encode("utf-8"))
# Check if this certificate is the leaf certificate by comparing the common name
attributes = cert.subject.get_attributes_for_oid(OID_COMMON_NAME)
for attribute in attributes:
if attribute.value == subject.common_name:
pem = cert_pem
md5, sha1, sha256 = (
binascii.hexlify(cert.fingerprint(MD5())).decode("utf-8"),
binascii.hexlify(cert.fingerprint(SHA1())).decode("utf-8"),
binascii.hexlify(cert.fingerprint(SHA256())).decode("utf-8")
)
try:
ext = cert.extensions.get_extension_for_oid(ExtensionOID.SUBJECT_ALTERNATIVE_NAME)
except ExtensionNotFound:
continue
altnames.extend(ext.value.get_values_for_type(DNSName))
if len(altnames) == 0:
altnames = None
else:
# This removes duplicates
altnames = list(set(altnames))
return TLSComponentCertificate(
issuer=issuer,
subject=subject,
issued=issued,
expires=expires,
expired=expired,
alternative_names=altnames,
pem=pem,
md5=md5,
sha1=sha1,
sha256=sha256
)
@classmethod
def from_censys(cls, d: Dict):
"""Creates an instance of this class based on Censys data given as dictionary."""
cls.info("Censys does not provide raw certificate data, to hashes must be taken from the data and cannot be "
"calculated.")
return TLSComponentCertificate(
issuer=TLSComponentCertificateEntity.from_censys(d["issuer"]),
subject=TLSComponentCertificateEntity.from_censys(d["subject"]),
issued=None,
expires=None,
expired=None,
alternative_names=d.get("names", None),
sha256=d["fingerprint"]
)
@classmethod
def from_binaryedge(cls, d: Union[Dict, List]):
pem = d["as_pem"]
data = d["as_dict"]
cert = load_pem_x509_certificate(pem.encode("utf-8"))
md5, sha1, sha256 = (
binascii.hexlify(cert.fingerprint(MD5())).decode("utf-8"),
binascii.hexlify(cert.fingerprint(SHA1())).decode("utf-8"),
binascii.hexlify(cert.fingerprint(SHA256())).decode("utf-8")
)
issued = datetime.fromisoformat(data["validity"]["not_before"]).replace(tzinfo=pytz.utc)
expires = datetime.fromisoformat(data["validity"]["not_after"]).replace(tzinfo=pytz.utc)
expired = datetime.utcnow().replace(tzinfo=pytz.utc) < expires
return TLSComponentCertificate(
issuer=TLSComponentCertificateEntity.from_binaryedge(data["issuer"]),
subject=TLSComponentCertificateEntity.from_binaryedge(data["subject"]),
issued=issued,
expires=expires,
expired=expired,
alternative_names=data["extensions"]["subject_alt_name"],
pem=pem,
md5=md5,
sha1=sha1,
sha256=sha256
)
class TLSComponent(BaseModel, ShodanDataHandler, CensysDataHandler, BinaryEdgeDataHandler):
"""Represents the TLS component of services."""
certificate: TLSComponentCertificate
# Todo: Add other attributes relevant to TLS such as CipherSuits etc.
@classmethod
def from_shodan(cls, d: Dict):
"""Creates an instance of this class based on Shodan data given as dictionary."""
if not isinstance(d, Dict):
raise TypeError(f"Method TLSComponent.from_shodan expects parameter d to be a dictionary, "
f"but it was {type(d)}.")
return TLSComponent(
certificate=TLSComponentCertificate.from_shodan(d)
)
@classmethod
def from_censys(cls, d: Dict):
tls = d["tls"]
return TLSComponent(
certificate=TLSComponentCertificate.from_censys(tls["certificates"]["leaf_data"])
)
@classmethod
def from_binaryedge(cls, d: Union[Dict, List]):
"""Creates an instance of this class based on BinaryEdge data given as dictionary."""
certificate_chain = d["result"]["data"]["cert_info"]["certificate_chain"]
return TLSComponent(
certificate=TLSComponentCertificate.from_binaryedge(certificate_chain[0])
)
``` |
{
"source": "3c7/larva",
"score": 2
} |
#### File: larva/commands/command.py
```python
class Command:
def __init__(self):
pass
def run(self, **kwargs):
pass
```
#### File: larva/commands/config.py
```python
from .command import Command
from ..configurator import Configurator
from click import echo
import json
class ConfigCommand(Command):
def run(self, **kwargs):
config_type = kwargs.get('type', None)
mode_set = kwargs.get('mode_set', False)
dump = kwargs.get('dump', False)
clear = kwargs.get('clear', False)
configurator = Configurator()
if dump:
echo(json.dumps(configurator.config, indent=4))
return
if clear:
configurator.config = {}
configurator.save()
echo('config cleared')
return
if not mode_set:
echo(configurator.get_value(config_type))
return
configurator.set_value(config_type, mode_set)
configurator.save()
echo('{} set'.format(mode_set))
```
#### File: larva/larva/exceptions.py
```python
class LarvaException(Exception):
def __repr__(self):
return self.message
class AuthenticationFailureError(LarvaException):
pass
class NoUsernameGivenError(LarvaException):
pass
class NoURLGivenError(LarvaException):
pass
class CommandNotAvailableError(LarvaException):
pass
```
#### File: larva/larva/larva.py
```python
import click
from .commander import Commander
commander = Commander()
# LARVA GROUP
@click.group()
def larva():
"""
The commandline client to TheHive.
If you find any bugs, please make sure to report them on https://github.com/3c7/larva/issues
"""
pass
@click.command(name='list')
def list_cases(**kwargs):
"""This lists all cases. Some options will get added, soon."""
commander.invoke('list', **kwargs)
# CONFIG GROUP
@click.group()
def config():
"""Manage larva configuration"""
pass
@click.command()
@click.option('--set', 'mode_set', type=str, help='Set an username', default=False)
def username(mode_set):
"""Get and set username"""
commander.invoke('config',
type='username',
mode_set=mode_set)
@click.command()
@click.option('--set', 'mode_set', type=str, help='Set an url', default=False)
def url(mode_set):
"""Get and set url"""
commander.invoke('config',
type='url',
mode_set=mode_set)
@click.command(name='get')
def get_config():
"""Dump the configuration file"""
commander.invoke('config',
dump=True)
@click.command(name='clear')
def clear_config():
"""Clears the configuration file"""
commander.invoke('config',
clear=True)
# END CONFIG GROUP
# END LARVA GROUP
larva.add_command(list_cases)
larva.add_command(config)
config.add_command(username)
config.add_command(url)
config.add_command(get_config)
config.add_command(clear_config)
if __name__ == '__main__':
larva()
```
#### File: larva/larva/writer.py
```python
from click import echo
from termcolor import colored
class Writer:
"""This class handles output"""
def __init__(self):
self.tlps = ['TLP:WHITE', 'TLP:GREEN', 'TLP:AMBER', 'TLP:RED\t']
self.severities = ['LOW', 'MID', 'HIGH']
self.colormap = [self.white, self.green, self.amber, self.red]
def write_cases(self, cases):
echo('####\tTLP\t\tSev.\tOwner\t\tTitle')
echo('----\t---\t\t----\t-----\t\t-----')
for case in cases:
echo('{}\t{}\t{}\t{}\t\t{}'.format(case.get('caseId'),
self.tlp(case.get('tlp')),
self.severity(case.get('severity')),
case.get('owner'),
case.get('title')))
def tlp(self, t_num):
return self.colormap[t_num](self.tlps[t_num])
def severity(self, s_num):
return self.colormap[s_num](self.severities[s_num-1])
@staticmethod
def red(string):
return colored(string, 'red')
@staticmethod
def amber(string):
return colored(string, 'yellow')
@staticmethod
def green(string):
return colored(string, 'green')
@staticmethod
def white(string):
return colored(string, 'white')
``` |
{
"source": "3c7/vt2m",
"score": 2
} |
#### File: vt2m/lib/lib.py
```python
import json
import re
from datetime import datetime
from typing import Generator, Union, List, Optional, Dict
from urllib.parse import quote_plus, urlparse
import requests
from pymisp import MISPEvent, MISPObject
from vt import Client as VTClient
from vt2m.lib.output import print, print_err
def vt_request(api_key: str, url: str):
"""Use this instead of the VT API client."""
headers = {
"Accept": "application/json",
"x-apikey": api_key
}
response = requests.get(url, headers=headers)
if response.status_code > 302:
print_err("[WARN] Status code received from VT API is > 302.")
return response.json()
def vt_query(api_key: str, query: str, limit: Optional[int]) -> List:
"""Queries VT API and yields a list of results."""
if not limit:
limit = 100
with VTClient(apikey=api_key) as vt_client:
response = vt_client.get(f"/intelligence/search?query={quote_plus(query)}&limit={limit}")
results = response.json()
return results.get("data", [])
def process_results(results: Union[Generator, List], event: MISPEvent, comment: Optional[str],
disable_output: bool = False, extract_domains: bool = False) -> List[MISPObject]:
"""Processes VT results using the specific methods per VT object type."""
created_objects = []
for result in results:
if result["type"] == "file":
created_objects.append(
process_file(
file=result["attributes"],
event=event,
comment=comment,
disable_output=disable_output
)
)
elif result["type"] == "url":
created_objects.append(
process_url(
url=result["attributes"],
event=event,
comment=comment,
disable_output=disable_output,
extract_domain=extract_domains
)
)
elif result["type"] == "domain":
created_objects.append(
process_domain(
domain=result,
event=event,
comment=comment,
disable_output=disable_output
)
)
elif result["type"] == "ip-address":
created_objects.append(
process_ip(
ip=result,
event=event,
comment=comment,
disable_output=disable_output
)
)
else:
print_err(f"[ERR] Unknown entity type: {result['type']}")
continue
return created_objects
def process_file(file: Dict, event: MISPEvent, comment: Optional[str] = None,
disable_output: bool = False) -> MISPObject:
"""Adds files to MISP event as MISP objects."""
sha256 = file.get("sha256", None)
if not sha256:
raise KeyError("VirusTotal file object misses sha256 hash. This should not happen.")
if not disable_output:
print(f"[FILE] Processing {sha256}...")
f_obj = get_object_if_available(event, "file", "sha256", sha256)
if f_obj:
return f_obj
f_obj = event.add_object(name="file", comment=comment if comment else "")
f_obj.add_attribute("md5", simple_value=file["md5"])
f_obj.add_attribute("sha1", simple_value=file["sha1"])
f_obj.add_attribute("sha256", simple_value=sha256)
names = file.get("names", [])
if len(names) > 0:
for name in names:
f_obj.add_attribute("filename", simple_value=name, to_ids=False)
imp = file.get("pe_info", {}).get("imphash", None)
if imp:
f_obj.add_attribute("imphash", simple_value=imp)
vhash = file.get("vhash", None)
if vhash:
f_obj.add_attribute("vhash", simple_value=vhash)
tlsh = file.get("tlsh", None)
if tlsh:
f_obj.add_attribute("tlsh", simple_value=tlsh)
telfhash = file.get("telfhash", None)
if telfhash:
f_obj.add_attribute("telfhash", simple_value=telfhash)
creation_date = file.get("creation_date", None)
if creation_date:
creation_date = datetime.fromtimestamp(creation_date)
f_obj.add_attribute("compilation-timestamp", type="datetime", value=creation_date)
return f_obj
def process_url(url: Dict, event: MISPEvent, comment: Optional[str] = None, disable_output: bool = False,
extract_domain: bool = False) -> MISPObject:
"""Adds URLs to MISP event as MISP objects."""
url_string = url.get("url", None)
if not url_string:
raise KeyError("VirusTotal URL object missing the actual URL.")
if not disable_output:
print(f"[URL] Processing {url_string.replace('http', 'hxxp').replace('.', '[.]')}")
_, domain, resource_path, _, query_string, _ = urlparse(url_string)
port = None
if domain.count(":") == 1:
ip, port = domain.split(":")
if 0 < int(port, base=10) < 65536:
domain = ip
u_obj = get_object_if_available(event, "url", "url", url_string)
if u_obj:
if extract_domain:
create_domain_from_url(event, domain, u_obj, disable_output)
return u_obj
u_obj = event.add_object(name="url", comment=comment if comment else "")
u_obj.add_attribute("url", simple_value=url_string)
u_obj.add_attribute("domain", simple_value=domain, to_ids=False)
if resource_path:
u_obj.add_attribute("resource_path", simple_value=resource_path)
if query_string:
u_obj.add_attribute("query_string", simple_value=query_string)
if port:
u_obj.add_attribute("port", simple_value=port)
u_obj.add_attribute("first-seen", type="datetime", value=datetime.fromtimestamp(url["first_submission_date"]))
u_obj.add_attribute("last-seen", type="datetime", value=datetime.fromtimestamp(url["last_submission_date"]))
if extract_domain:
create_domain_from_url(event, domain, u_obj, disable_output)
return u_obj
def process_domain(domain: Dict, event: MISPEvent, comment: Optional[str] = None,
disable_output: bool = False) -> MISPObject:
"""Adds a domain object to a MISP event. Instead of the attributes sub-dictionary, this function needs the complete
VT object, in order to use the VT id."""
domain_name = domain.get("id", None)
if not domain_name:
raise KeyError("VirusTotal Domain object missing the ID.")
if not disable_output:
print(f"[DOMAIN] Processing {domain_name.replace('.', '[.]')}")
d_obj = get_object_if_available(event, "domain-ip", "domain", domain_name)
if d_obj:
return d_obj
domain = domain["attributes"]
d_obj = event.add_object(name="domain-ip", comment=comment if comment else "")
d_obj.add_attribute("domain", simple_value=domain_name)
for record in domain.get("last_dns_records", []):
t = record.get("type", None)
if not t:
continue
if t == "NS":
d_obj.add_attribute("domain", simple_value=record["value"], comment="NS record", to_ids=False)
elif t == "A" or t == "AAAA":
d_obj.add_attribute("ip", type="ip-dst", value=record["value"])
elif t == "MX":
d_obj.add_attribute("domain", simple_value=record["value"], comment="MX record", to_ids=False)
return d_obj
def process_ip(ip: Dict, event: MISPEvent, comment: Optional[str] = None, disable_output: bool = False) -> MISPObject:
"""Adds a domain-ip object to the MISP event. Instead of the attributes sub-dictionary, this function needs the
complete VT object, in order to use the VT id. """
ip_str = ip.get("id", None)
if not ip_str:
raise KeyError("VirusTotal IP object missing the ID.")
if not disable_output:
print(f"[IP] Processing {ip_str.replace('.', '[.]')}.")
i_obj = get_object_if_available(event, "domain-ip", "ip", ip_str)
if i_obj:
return i_obj
ip = ip["attributes"]
i_obj = event.add_object(name="domain-ip", comment=comment if comment else "")
i_obj.add_attribute("ip", type="ip-dst", value=ip_str)
i_obj.add_attribute("text", simple_value=f"AS: {ip.get('as_owner', 'not available')}")
cert = ip.get("last_https_certificate", None)
if cert:
cn = cert.get("subject", {}).get("CN", None)
if cn:
i_obj.add_attribute("domain", type="domain", value=cn, comment="Certificate Subject Common Name")
for alt in cert.get("extensions", {}).get("subject_alternative_name", []):
i_obj.add_attribute("domain", type="domain", value=alt, comment="Certificate Subject Alternative Name")
return i_obj
def process_submission(submission: Dict, event: MISPEvent, comment: Optional[str] = None,
disable_output: bool = False) -> MISPObject:
"""Adds a virustotal-submission object to the given MISP event."""
s_id = submission.get("source_key")
if not disable_output:
print(f"[SUB] Processing submission from submitter {s_id}.")
s_obj = get_object_if_available(event, "virustotal-submission", "submitter-id", s_id)
if s_obj:
return s_obj
s_obj = event.add_object(name="virustotal-submission", comment=comment if comment else "")
s_obj.add_attribute("submitter-id", type="text", value=s_id)
country = submission.get("country", None)
if country:
s_obj.add_attribute("country", type="text", value=country)
city = submission.get("city", None)
if city:
s_obj.add_attribute("city", type="text", value=city)
interface = submission.get("interface", None)
if interface:
s_obj.add_attribute("interface", type="text", value=interface)
upload_date = submission.get("date", None)
if upload_date:
upload_date = datetime.fromtimestamp(upload_date)
s_obj.add_attribute("date", type="datetime", value=upload_date)
filename = submission.get("filename", None)
if filename:
s_obj.add_attribute("filename", type="filename", value=filename)
return s_obj
def get_object_if_available(event: MISPEvent, object_name: str, attribute_relation: str,
value: str) -> Union[MISPObject, None]:
"""Returns an object if it's already available in the MISP event."""
objects = event.get_objects_by_name(object_name)
for obj in objects:
attributes = obj.get_attributes_by_relation(attribute_relation)
for attribute in attributes:
if attribute.value == value:
value = value.replace("http", "hxxp").replace(".", "[.]")
print_err(f"[{object_name.upper().split('-')[0]}] Object with value {value} already available.")
return obj
return None
def process_relations(api_key: str, objects: List[MISPObject], event: MISPEvent, relations_string: Optional[str],
detections: Optional[int], disable_output: bool = False, extract_domains: bool = False,
filter=None):
"""Creates related objects based on given relation string."""
# Todo: Add additional relations
if not relations_string or len(relations_string) == 0:
return
if "," in relations_string:
relations = relations_string.split(",")
else:
relations = [relations_string]
file_relations = ["execution_parents", "compressed_parents", "bundled_files", "dropped_files"]
url_relations = ["contacted_urls", "embedded_urls", "itw_urls"]
domain_relations = ["contacted_domains", "embedded_domains", "itw_domains"]
ip_relations = ["contacted_ips", "embedded_ips", "itw_ips"]
user_account_relations = ["submissions"]
for rel in relations:
if rel not in file_relations and rel not in url_relations and rel not in domain_relations and \
rel not in ip_relations and rel not in user_account_relations:
print_err(f"[REL] Relation {rel} not implemented (yet).")
continue
for obj in objects:
r_objs = get_related_objects(api_key, obj, rel, disable_output)
filtered = False
for r_obj_dict in r_objs:
if filter:
filtered = False
json_string = json.dumps(r_obj_dict)
for f in filter:
if f in json_string:
if not disable_output:
print(f"[FILTER] Filter {f} matched object {r_obj_dict.get('id', '<ID not given>')}, "
f"skipping...")
filtered = True
break
if filtered:
continue
r_obj_id = r_obj_dict.get("id", "<NO ID GIVEN>").replace(".", "[.]")
# Check the detection
stats_malicious = r_obj_dict["attributes"].get("last_analysis_stats", {}).get("malicious", 0)
if detections and isinstance(detections, int):
if not isinstance(stats_malicious, int):
print_err("[REL] Detection stats for are not given as integer therefore skipping the "
"check.")
else:
if stats_malicious < detections:
if not disable_output:
print(f"[REL] Skipping {r_obj_id} because malicious detections are lower than "
f"{detections}.")
continue
if rel in file_relations:
try:
r_obj = process_file(
file=r_obj_dict["attributes"],
event=event,
comment=f"Added via {rel} relation.",
disable_output=True
)
except KeyError as e:
print_err(f"[ERR] File misses key {e}, skipping...")
continue
elif rel in url_relations:
try:
r_obj = process_url(
url=r_obj_dict["attributes"],
event=event,
comment=f"Added via {rel} relation.",
disable_output=True,
extract_domain=extract_domains
)
except KeyError as e:
print_err(f"[ERR] URL misses key {e}, skipping...")
continue
elif rel in domain_relations:
try:
r_obj = process_domain(
domain=r_obj_dict,
event=event,
comment=f"Added via {rel} relation.",
disable_output=True
)
except KeyError as e:
print_err(f"[ERR] Domain misses key {e}, skipping...")
continue
elif rel in ip_relations:
try:
r_obj = process_ip(
ip=r_obj_dict,
event=event,
comment=f"Added via {rel} relation.",
disable_output=True
)
except KeyError as e:
print_err(f"[ERR] IP misses key {e}, skipping...")
continue
elif rel in user_account_relations:
try:
r_obj = process_submission(
submission=r_obj_dict["attributes"],
event=event,
comment=f"Added via {rel} relation."
)
except KeyError as e:
print_err(f"[ERR] Submission misses key {e}, skipping...")
continue
else:
print_err(f"[ERR] Could not process returned object \"{r_obj_id}\".")
continue
try:
if rel == "execution_parents":
add_reference(r_obj, obj.uuid, "executes")
elif rel == "compressed_parents":
add_reference(r_obj, obj.uuid, "contains")
elif rel == "bundled_files":
add_reference(obj, r_obj.uuid, "contains")
elif rel == "dropped_files":
add_reference(obj, r_obj.uuid, "drops")
elif "embedded_" in rel:
add_reference(obj, r_obj.uuid, "contains")
elif "contacted_" in rel:
add_reference(obj, r_obj.uuid, "connects-to")
elif "itw_" in rel:
add_reference(obj, r_obj.uuid, "downloaded-from")
elif rel == "submissions":
add_reference(r_obj, obj.uuid, "submitted")
else:
print_err(f"[REL] Could not determine relationship between {obj.uuid} and {r_obj.uuid}. "
f"Adding as generic \"related-to\".")
r_obj.add_reference(obj.uuid, "related-to")
except AttributeError as ae:
print_err(f"[ERR] Related object {r_obj_id} missing an attribute: {ae}")
# If the related object is not none, let's dump it to see what's wrong
if r_obj:
print_err(f"[ERR] Remote object dump:\n{r_obj.to_json()}")
continue
def add_reference(obj: MISPObject, to_obj_uuid: str, relationship_type: str):
"""Adds a reference, if not already available."""
if not reference_available(obj, to_obj_uuid, relationship_type):
obj.add_reference(to_obj_uuid, relationship_type)
else:
print_err(f"[REL] {obj.uuid} --{relationship_type}-> {to_obj_uuid} already available and therefore skipped.")
def reference_available(obj: MISPObject, referenced_uuid: str, relationship_type: str) -> bool:
"""Loops over given relationships and returns true if any relationship references the given uuid and type."""
for ref in obj.references:
if ref.referenced_uuid == referenced_uuid and ref.relationship_type == relationship_type:
return True
return False
def get_related_objects(api_key: str, obj: MISPObject, rel: str, disable_output: bool = False) -> List[Dict]:
"""Gets related objects from VT."""
if obj.name == "file":
vt_id = obj.get_attributes_by_relation("sha256")[0].value
else:
print_err("[REL] Currently only file objects are supported.")
return []
if not disable_output:
print(f"[REL] Receiving {rel} for {vt_id}...")
with VTClient(api_key) as client:
res = client.get(f"/files/{vt_id}/{rel}?limit=40").json()
if "error" in res:
print_err(f"[REL] Error during receiving related objects: {res['error']}.")
return []
related_objects = []
for related_object in res.get("data", []):
if "error" in related_object:
print_err(f"[REL] File {related_object['id']} not available on VT.")
else:
related_objects.append(related_object)
if not disable_output:
print(f"[REL] Got {len(related_objects)} {rel} objects.")
return related_objects
def get_vt_notifications(
vt_key: str,
filter: Optional[str] = None,
limit: int = 10
) -> List:
"""Requests notifications from VT API. Applies an optional filter."""
url = f"https://www.virustotal.com/api/v3/intelligence/hunting_notification_files?limit={limit}"
if filter:
url += f"&filter={quote_plus(filter)}"
data = vt_request(api_key=vt_key, url=url)
if "error" in data:
print_err(f"[ERR] Error occured during receiving notifications: {data['error']}")
return []
return data["data"]
def create_domain_from_url(event: MISPEvent, domain: str, u_obj: MISPObject, disable_output: bool = False):
"""Creates domain object from url object and adds a relation."""
if domain and len(domain) > 0:
if re.fullmatch(r"[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}", domain):
attribute_type = "ip"
elif ":" in domain:
attribute_type = "ip"
else:
attribute_type = "domain"
d_obj = get_object_if_available(event, "domain-ip", attribute_type, domain)
if not d_obj:
d_obj = event.add_object(name="domain-ip", comment=f"Extracted {attribute_type}")
d_obj.add_attribute(attribute_type, simple_value=domain)
add_reference(u_obj, d_obj.uuid, "contains")
if not disable_output:
print(f"[REL] Extracted {attribute_type} from {object_represent_string(u_obj)}.")
def object_represent_string(obj: MISPObject, include_uuid: bool = False) -> str:
"""Returns a string which represents the object."""
if obj.name == "file":
repr = obj.get_attributes_by_relation("sha256").pop()
elif obj.name == "domain-ip":
repr = obj.get_attributes_by_relation("domain").pop()
if not repr:
repr = obj.get_attributes_by_relation("ip").pop()
elif obj.name == "url":
repr = obj.get_attributes_by_relation("url").pop()
else:
s = f"[ERR] Given object name/type unknown: {obj.name}."
print_err(s)
raise TypeError(s)
if not repr:
s = f"[ERR] Given object does not include its representative attribute: {obj.to_json()}"
print_err(s)
raise KeyError(s)
defanged = repr.value.replace("http", "hxxp").replace(".", "[.]")
if include_uuid:
return defanged + "(" + obj.uuid + ")"
return defanged
def get_vt_retrohunts(vt_key: str, limit: Optional[int] = 40, filter: Optional[str] = "") -> List[Dict]:
"""Loads available retrohunts from the VT API."""
url = f"https://www.virustotal.com/api/v3/intelligence/retrohunt_jobs?limit={limit}"
if filter:
url += f"&filter={quote_plus(filter)}"
data = vt_request(api_key=vt_key, url=url)
if "error" in data:
print_err(f"[ERR] Error occured during receiving notifications: {data['error']}")
return []
return data["data"]
def get_retrohunt_rules(r: Dict) -> List[str]:
"""Extracts rules used within a retrohunt."""
rules = []
for line in r.get("attributes", {}).get("rules", "").splitlines():
line = line.strip()
if "rule" in line[:4]:
line = line.split("{")[0]
line = line.split(":")[0]
line = line[4:].strip()
rules.append(line)
return rules
def get_vt_retrohunt_files(vt_key: str, r_id: str, limit: Optional[int] = 100):
"""Retrieve file objects related to a retrohunt from VT."""
url = f"https://www.virustotal.com/api/v3/intelligence/retrohunt_jobs/{r_id}/matching_files?limit={limit}"
data = vt_request(api_key=vt_key, url=url)
if "error" in data:
print_err(f"[ERR] Error occured during receiving notifications: {data['error']}")
return []
return data["data"]
```
#### File: vt2m/lib/output.py
```python
from datetime import datetime
from typing import Dict
import typer
def print(*args, **kwargs) -> None:
"""Shadows python print method in order to use typer.echo instead."""
typer.echo(*args, **kwargs)
def print_err(s):
"""Wrapper for printing to stderr."""
if s[:5] == "[ERR]":
s = typer.style("[ERR]", fg="red") + s[5:]
elif s[:6] == "[WARN]":
s = typer.style("[WARN]", fg="yellow") + s[6:]
print(s, err=True)
def print_file_object(obj: Dict, *attributes: str) -> None:
"""Print file object with given attributes. Attributes are given in a list of strings which can end with a reserved
length passed to the format string, e.g., `attributes.sha256,40`."""
for idx, attrib in enumerate(attributes):
tmp = obj
keys = attrib.split(".")
if "," in keys[-1]:
keys[-1], length = keys[-1].split(",", maxsplit=1)
else:
length = None
try:
for idx2, key in enumerate(keys):
if idx2 == len(keys) - 1 and "date" in key:
try:
tmp = datetime.fromtimestamp(tmp[key]).isoformat()
except:
print_err(f"[WARN] Tried to parse {keys} as date, but was not successful.")
tmp = [key]
else:
tmp = tmp[key]
except KeyError:
tmp = "<Not found>"
if idx + 1 < len(attributes) and len(attributes) > 1:
if length:
print(f"{tmp:<{length}}", nl=False)
else:
print(tmp + " ", nl=False)
else:
if length:
print(f"{tmp:<{length}}")
else:
print(tmp)
```
#### File: vt2m/subcommands/notifications.py
```python
import os
from typing import List
import typer
from pymisp import PyMISP
from vt2m.lib.lib import print, print_err, get_vt_notifications, process_results, process_relations
from vt2m.lib.output import print_file_object
app = typer.Typer(help="Query and process VT notifications")
@app.command("list")
def list_notifications(
vt_key: str = typer.Option(None, help="VT API Key - can also be set via VT_KEY env"),
filter: str = typer.Option("", help="Filter to be used for filtering notifications"),
limit: int = typer.Option(10, help="Amount of notifications to grab"),
sha256: bool = typer.Option(False, "-s", "--sha256", help="Only show sha256 hashes")
):
"""List currently available VirusTotal notifications"""
if not vt_key:
vt_key = os.getenv("VT_KEY")
if not all([vt_key]):
print_err("[ERR] Not all required parameters were given.")
raise typer.Abort()
notifications = get_vt_notifications(
vt_key=vt_key,
filter=filter,
limit=limit
)
if len(notifications) == 0:
print_err("[WARN] No notifications found.")
raise typer.Exit(1)
if not sha256:
print(f"{'Rule':<40}{'Submission Date':<30}SHA256 Hash")
for notification in notifications:
if sha256:
print_file_object(notification, "attributes.sha256")
else:
print_file_object(
notification,
"context_attributes.rule_name,40",
"attributes.first_submission_date,30",
"attributes.sha256"
)
@app.command("import")
def import_notifications(
vt_key: str = typer.Option(None, help="VT API Key - can also be set via VT_KEY env"),
filter: str = typer.Option("", help="Filter to be used for filtering notifications"),
limit: int = typer.Option(10, help="Amount of notifications to grab"),
uuid: str = typer.Option(..., "--uuid", "-u", help="MISP event UUID"),
url: str = typer.Option(None, "--url", "-U", help="MISP URL - can be passed via MISP_URL env"),
key: str = typer.Option(None, "--key", "-K", help="MISP API Key - can be passed via MISP_KEY env"),
comment: str = typer.Option("", "--comment", "-c", help="Comment for new MISP objects"),
relations: str = typer.Option("", "--relations", "-r", help="Relations to resolve via VirusTotal"),
detections: int = typer.Option(0, "--detections", "-d",
help="Amount of detections a related VirusTotal object must at least have"),
extract_domains: bool = typer.Option(False, "--extract-domains", "-D",
help="Extract domains from URL objects and add them as related object"),
relation_filter: List[str] = typer.Option([], "--filter", "-f",
help="Filtering related objects by matching this string(s) "
"against json dumps of the objects"),
quiet: bool = typer.Option(False, "--quiet", "-q", help="Disable output")
):
"""Import files related to notifications"""
if not url:
url = os.getenv("MISP_URL", None)
if not key:
key = os.getenv("MISP_KEY", None)
if not vt_key:
vt_key = os.getenv("VT_KEY", None)
if not url or not key or not vt_key:
print_err("[ERR] URL and key must be given either through param or env.")
raise typer.Exit(-1)
misp = PyMISP(url, key)
misp.global_pythonify = True
event = misp.get_event(uuid)
files = get_vt_notifications(
vt_key=vt_key,
filter=filter,
limit=limit
)
created_objects = process_results(
results=files,
event=event,
comment=comment,
disable_output=quiet,
extract_domains=extract_domains
)
process_relations(
api_key=vt_key,
objects=created_objects,
event=event,
relations_string=relations,
detections=detections,
disable_output=quiet,
extract_domains=extract_domains,
filter=relation_filter
)
event.published = False
misp.update_event(event)
``` |
{
"source": "3Cement/MoviesDatabase---Django-REST-Framework",
"score": 2
} |
#### File: MoviesDatabase---Django-REST-Framework/movieapp/views.py
```python
from django.shortcuts import render, redirect, get_object_or_404
from django.views import generic
from rest_framework import viewsets, status
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import Movie, Comment
from .forms import CommentForm
from .serializers import MovieSerializer, CommentSerializer
import json
class MovieView(viewsets.ModelViewSet):
queryset = Movie.objects.all()
serializer_class = MovieSerializer
class CommentView(viewsets.ModelViewSet):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
class MovieDetailView(generic.DetailView):
model = Movie
template_name = 'movie_detail.html'
def get_context_data(self, **kwargs):
context = super(MovieDetailView, self).get_context_data(**kwargs)
context['comments_list'] = Comment.objects.all()
return context
def homepage(request):
return render(request, 'home.html')
def allmovies(request):
movie_list = Movie.objects.all()
return render(request, 'movie_list.html', {'movie_list': movie_list})
def allcomments(request):
comment_list = Comment.objects.all()
return render(request, 'comment_list.html', {'comment_list': comment_list})
def comments_by_movie_id(request, pk):
movie=get_object_or_404(Movie, pk = pk)
comment_list = Comment.objects.filter(movie=movie).all()
return render(request, 'comment_list_by_id.html', {'comment_list': comment_list, 'movie': movie})
def add_comment(request):
if request.method == 'POST':
form = CommentForm(request.POST or None)
if form.is_valid():
form.save()
#messages.success(request, ('Comment has been added!'))
return redirect('home.html')
else:
form = CommentForm()
#messages.success(request, ('You CommentFOrm is invalid'))
return redirect('home.html')
else:
form = CommentForm()
return render(request, 'comment_form.html', { 'form': form, },)
def results(request):
print('Looking for results')
movie = Movie()
if request.GET.get('searchTitle') is not None:
searchname = request.GET.get('searchTitle')
movieData = movie.getOMDBdata(searchname)
return render(request, 'home.html', {'movieData': movieData, 'search': True})
else:
return render(request, 'home.html')
'''
def get_movie(request, title):
if request.method =='GET':
try:
movie = Movie.objects.get(title=title)
response = json.dumps([{ 'Movie': movie.title, 'MovieData': movie.movieData }])
except:
response = json.dumps([{ 'Error': 'No movie with that title' }])
return HttpResponse(response, content_type='text/json')
'''
``` |
{
"source": "3ch01c/blinkpy",
"score": 3
} |
#### File: blinkpy/blinkpy/sync_module.py
```python
import logging
from requests.structures import CaseInsensitiveDict
from blinkpy import api
from blinkpy.camera import BlinkCamera
from blinkpy.helpers.constants import ONLINE
_LOGGER = logging.getLogger(__name__)
class BlinkSyncModule():
"""Class to initialize sync module."""
def __init__(self, blink, network_name, network_id, camera_list):
"""
Initialize Blink sync module.
:param blink: Blink class instantiation
"""
self.blink = blink
self._auth_header = blink.auth_header
self.network_id = network_id
self.region = blink.region
self.region_id = blink.region_id
self.name = network_name
self.serial = None
self.status = None
self.sync_id = None
self.host = None
self.summary = None
self.network_info = None
self.events = []
self.cameras = CaseInsensitiveDict({})
self.motion_interval = blink.motion_interval
self.motion = {}
self.last_record = {}
self.camera_list = camera_list
@property
def attributes(self):
"""Return sync attributes."""
attr = {
'name': self.name,
'id': self.sync_id,
'network_id': self.network_id,
'serial': self.serial,
'status': self.status,
'region': self.region,
'region_id': self.region_id,
}
return attr
@property
def urls(self):
"""Return device urls."""
return self.blink.urls
@property
def online(self):
"""Return boolean system online status."""
return ONLINE[self.status]
@property
def arm(self):
"""Return status of sync module: armed/disarmed."""
try:
return self.network_info['network']['armed']
except (KeyError, TypeError):
return None
@arm.setter
def arm(self, value):
"""Arm or disarm system."""
if value:
return api.request_system_arm(self.blink, self.network_id)
return api.request_system_disarm(self.blink, self.network_id)
def start(self):
"""Initialize the system."""
response = api.request_syncmodule(self.blink,
self.network_id)
try:
self.summary = response['syncmodule']
self.network_id = self.summary['network_id']
except (TypeError, KeyError):
_LOGGER.error(("Could not retrieve sync module information "
"with response: %s"), response, exc_info=True)
return False
try:
self.sync_id = self.summary['id']
self.serial = self.summary['serial']
self.status = self.summary['status']
except KeyError:
_LOGGER.error("Could not extract some sync module info: %s",
response,
exc_info=True)
self.network_info = api.request_network_status(self.blink,
self.network_id)
self.check_new_videos()
try:
for camera_config in self.camera_list:
if 'name' not in camera_config:
break
name = camera_config['name']
self.cameras[name] = BlinkCamera(self)
self.motion[name] = False
camera_info = self.get_camera_info(camera_config['id'])
self.cameras[name].update(camera_info,
force_cache=True,
force=True)
except KeyError:
_LOGGER.error("Could not create cameras instances for %s",
self.name,
exc_info=True)
return False
return True
def get_events(self, **kwargs):
"""Retrieve events from server."""
force = kwargs.pop('force', False)
response = api.request_sync_events(self.blink,
self.network_id,
force=force)
try:
return response['event']
except (TypeError, KeyError):
_LOGGER.error("Could not extract events: %s",
response,
exc_info=True)
return False
def get_camera_info(self, camera_id):
"""Retrieve camera information."""
response = api.request_camera_info(self.blink,
self.network_id,
camera_id)
try:
return response['camera'][0]
except (TypeError, KeyError):
_LOGGER.error("Could not extract camera info: %s",
response,
exc_info=True)
return []
def refresh(self, force_cache=False):
"""Get all blink cameras and pulls their most recent status."""
self.network_info = api.request_network_status(self.blink,
self.network_id)
self.check_new_videos()
for camera_name in self.cameras.keys():
camera_id = self.cameras[camera_name].camera_id
camera_info = self.get_camera_info(camera_id)
self.cameras[camera_name].update(camera_info,
force_cache=force_cache)
def check_new_videos(self):
"""Check if new videos since last refresh."""
try:
interval = self.blink.last_refresh - self.motion_interval*60
except TypeError:
# This is the first start, so refresh hasn't happened yet.
# No need to check for motion.
return False
resp = api.request_videos(self.blink,
time=interval,
page=1)
for camera in self.cameras.keys():
self.motion[camera] = False
try:
info = resp['media']
except (KeyError, TypeError):
_LOGGER.warning("Could not check for motion. Response: %s", resp)
return False
for entry in info:
try:
name = entry['device_name']
clip = entry['media']
timestamp = entry['created_at']
self.motion[name] = True
self.last_record[name] = {'clip': clip, 'time': timestamp}
except KeyError:
_LOGGER.debug("No new videos since last refresh.")
return True
```
#### File: blinkpy/tests/test_cameras.py
```python
import unittest
from unittest import mock
from blinkpy import blinkpy
from blinkpy.helpers.util import create_session, BlinkURLHandler
from blinkpy.sync_module import BlinkSyncModule
from blinkpy.camera import BlinkCamera
import tests.mock_responses as mresp
USERNAME = 'foobar'
PASSWORD = '<PASSWORD>'
CAMERA_CFG = {
'camera': [
{
'battery_voltage': 90,
'motion_alert': True,
'wifi_strength': -30,
'temperature': 68
}
]
}
@mock.patch('blinkpy.helpers.util.Session.send',
side_effect=mresp.mocked_session_send)
class TestBlinkCameraSetup(unittest.TestCase):
"""Test the Blink class in blinkpy."""
def setUp(self):
"""Set up Blink module."""
self.blink = blinkpy.Blink(username=USERNAME,
password=PASSWORD)
header = {
'Host': 'abc.zxc',
'TOKEN_AUTH': mresp.LOGIN_RESPONSE['authtoken']['authtoken']
}
# pylint: disable=protected-access
self.blink._auth_header = header
self.blink.session = create_session()
self.blink.urls = BlinkURLHandler('test')
self.blink.sync['test'] = BlinkSyncModule(self.blink, 'test', 1234, [])
self.camera = BlinkCamera(self.blink.sync['test'])
self.camera.name = 'foobar'
self.blink.sync['test'].cameras['foobar'] = self.camera
def tearDown(self):
"""Clean up after test."""
self.blink = None
def test_camera_update(self, mock_sess):
"""Test that we can properly update camera properties."""
config = {
'name': 'new',
'id': 1234,
'network_id': 5678,
'serial': '12345678',
'enabled': False,
'battery_voltage': 90,
'battery_state': 'ok',
'temperature': 68,
'wifi_strength': 4,
'thumbnail': '/thumb',
}
self.camera.last_record = ['1']
self.camera.sync.last_record = {
'new': {
'clip': '/test.mp4',
'time': '1970-01-01T00:00:00'
}
}
mock_sess.side_effect = [
mresp.MockResponse({'temp': 71}, 200),
'test',
'foobar'
]
self.camera.update(config)
self.assertEqual(self.camera.name, 'new')
self.assertEqual(self.camera.camera_id, '1234')
self.assertEqual(self.camera.network_id, '5678')
self.assertEqual(self.camera.serial, '12345678')
self.assertEqual(self.camera.motion_enabled, False)
self.assertEqual(self.camera.battery, 'ok')
self.assertEqual(self.camera.temperature, 68)
self.assertEqual(self.camera.temperature_c, 20)
self.assertEqual(self.camera.temperature_calibrated, 71)
self.assertEqual(self.camera.wifi_strength, 4)
self.assertEqual(self.camera.thumbnail,
'https://rest-test.immedia-semi.com/thumb.jpg')
self.assertEqual(self.camera.clip,
'https://rest-test.immedia-semi.com/test.mp4')
self.assertEqual(self.camera.image_from_cache, 'test')
self.assertEqual(self.camera.video_from_cache, 'foobar')
def test_no_thumbnails(self, mock_sess):
"""Tests that thumbnail is 'None' if none found."""
mock_sess.return_value = 'foobar'
self.camera.last_record = ['1']
config = {
'name': 'new',
'id': 1234,
'network_id': 5678,
'serial': '12345678',
'enabled': False,
'battery_voltage': 90,
'battery_state': 'ok',
'temperature': 68,
'wifi_strength': 4,
'thumbnail': '',
}
self.camera.sync.homescreen = {
'devices': []
}
self.assertEqual(self.camera.temperature_calibrated, None)
with self.assertLogs() as logrecord:
self.camera.update(config, force=True)
self.assertEqual(self.camera.thumbnail, None)
self.assertEqual(self.camera.last_record, ['1'])
self.assertEqual(self.camera.temperature_calibrated, 68)
self.assertEqual(
logrecord.output,
[("WARNING:blinkpy.camera:Could not retrieve calibrated "
"temperature."),
("WARNING:blinkpy.camera:Could not find thumbnail for camera new"
"\nNoneType: None")]
)
def test_no_video_clips(self, mock_sess):
"""Tests that we still proceed with camera setup with no videos."""
mock_sess.return_value = 'foobar'
config = {
'name': 'new',
'id': 1234,
'network_id': 5678,
'serial': '12345678',
'enabled': False,
'battery_voltage': 90,
'battery_state': 'ok',
'temperature': 68,
'wifi_strength': 4,
'thumbnail': '/foobar',
}
self.camera.sync.homescreen = {
'devices': []
}
self.camera.update(config, force_cache=True)
self.assertEqual(self.camera.clip, None)
self.assertEqual(self.camera.video_from_cache, None)
```
#### File: blinkpy/tests/test_util.py
```python
import unittest
from unittest import mock
import time
from blinkpy.helpers.util import Throttle, BlinkURLHandler
class TestUtil(unittest.TestCase):
"""Test the helpers/util module."""
def setUp(self):
"""Initialize the blink module."""
def tearDown(self):
"""Tear down blink module."""
def test_throttle(self):
"""Test the throttle decorator."""
calls = []
@Throttle(seconds=5)
def test_throttle():
calls.append(1)
now = int(time.time())
now_plus_four = now + 4
now_plus_six = now + 6
test_throttle()
self.assertEqual(1, len(calls))
# Call again, still shouldn't fire
test_throttle()
self.assertEqual(1, len(calls))
# Call with force
test_throttle(force=True)
self.assertEqual(2, len(calls))
# Call without throttle, shouldn't fire
test_throttle()
self.assertEqual(2, len(calls))
# Fake time as 4 seconds from now
with mock.patch('time.time', return_value=now_plus_four):
test_throttle()
self.assertEqual(2, len(calls))
# Fake time as 6 seconds from now
with mock.patch('time.time', return_value=now_plus_six):
test_throttle()
self.assertEqual(3, len(calls))
def test_throttle_per_instance(self):
"""Test that throttle is done once per instance of class."""
class Tester:
"""A tester class for throttling."""
def test(self):
"""Test the throttle."""
return True
tester = Tester()
throttled = Throttle(seconds=1)(tester.test)
self.assertEqual(throttled(), True)
self.assertEqual(throttled(), None)
def test_throttle_on_two_methods(self):
"""Test that throttle works for multiple methods."""
class Tester:
"""A tester class for throttling."""
@Throttle(seconds=3)
def test1(self):
"""Test function for throttle."""
return True
@Throttle(seconds=5)
def test2(self):
"""Test function for throttle."""
return True
tester = Tester()
now = time.time()
now_plus_4 = now + 4
now_plus_6 = now + 6
self.assertEqual(tester.test1(), True)
self.assertEqual(tester.test2(), True)
self.assertEqual(tester.test1(), None)
self.assertEqual(tester.test2(), None)
with mock.patch('time.time', return_value=now_plus_4):
self.assertEqual(tester.test1(), True)
self.assertEqual(tester.test2(), None)
with mock.patch('time.time', return_value=now_plus_6):
self.assertEqual(tester.test1(), None)
self.assertEqual(tester.test2(), True)
def test_legacy_subdomains(self):
"""Test that subdomain can be set to legacy mode."""
urls = BlinkURLHandler('test')
self.assertEqual(urls.subdomain, 'rest-test')
urls = BlinkURLHandler('test', legacy=True)
self.assertEqual(urls.subdomain, 'rest.test')
``` |
{
"source": "3cham/milvus",
"score": 2
} |
#### File: python_client/testcases/test_query_20.py
```python
import pytest
import random
import numpy as np
import pandas as pd
from pymilvus import DefaultConfig
from base.client_base import TestcaseBase
from common.code_mapping import ConnectionErrorMessage as cem
from common.code_mapping import CollectionErrorMessage as clem
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from utils.util_log import test_log as log
import utils.utils as ut
prefix = "query"
exp_res = "exp_res"
default_term_expr = f'{ct.default_int64_field_name} in [0, 1]'
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
binary_index_params = {"index_type": "BIN_IVF_FLAT", "metric_type": "JACCARD", "params": {"nlist": 64}}
default_entities = ut.gen_entities(ut.default_nb, is_normal=True)
default_pos = 5
default_int_field_name = "int64"
default_float_field_name = "float"
class TestQueryBase(TestcaseBase):
"""
test Query interface
query(collection_name, expr, output_fields=None, partition_names=None, timeout=None)
"""
@pytest.mark.tags(CaseLabel.L0)
def test_query(self):
"""
target: test query
method: query with term expr
expected: verify query result
"""
# create collection, insert default_nb, load collection
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
int_values = vectors[0][ct.default_int64_field_name].values.tolist()
pos = 5
term_expr = f'{ct.default_int64_field_name} in {int_values[:pos]}'
res = vectors[0].iloc[0:pos, :1].to_dict('records')
collection_w.query(term_expr, check_task=CheckTasks.check_query_results, check_items={exp_res: res})
@pytest.mark.tags(CaseLabel.L1)
def test_query_empty_collection(self):
"""
target: test query empty collection
method: query on a empty collection
expected: empty result
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
collection_w.load()
res, _ = collection_w.query(default_term_expr)
assert len(res) == 0
@pytest.mark.tags(CaseLabel.L0)
def test_query_auto_id_collection(self):
"""
target: test query with auto_id=True collection
method: test query with auto id
expected: query result is correct
"""
self._connect()
df = cf.gen_default_dataframe_data(ct.default_nb)
df[ct.default_int64_field_name] = None
insert_res, _, = self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name,
auto_id=True)
assert self.collection_wrap.num_entities == ct.default_nb
ids = insert_res[1].primary_keys
pos = 5
res = df.iloc[:pos, :1].to_dict('records')
self.collection_wrap.load()
# query with all primary keys
term_expr_1 = f'{ct.default_int64_field_name} in {ids[:pos]}'
for i in range(5):
res[i][ct.default_int64_field_name] = ids[i]
self.collection_wrap.query(term_expr_1, check_task=CheckTasks.check_query_results, check_items={exp_res: res})
# query with part primary keys
term_expr_2 = f'{ct.default_int64_field_name} in {[ids[0], 0]}'
self.collection_wrap.query(term_expr_2, check_task=CheckTasks.check_query_results,
check_items={exp_res: res[:1]})
@pytest.mark.tags(CaseLabel.L1)
def test_query_auto_id_not_existed_primary_values(self):
"""
target: test query on auto_id true collection
method: 1.create auto_id true collection 2.query with not existed primary keys
expected: query result is empty
"""
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_default_dataframe_data(ct.default_nb)
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
collection_w.load()
term_expr = f'{ct.default_int64_field_name} in [0, 1, 2]'
res, _ = collection_w.query(term_expr)
assert len(res) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_query_expr_none(self):
"""
target: test query with none expr
method: query with expr None
expected: raise exception
"""
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
error = {ct.err_code: 0, ct.err_msg: "The type of expr must be string"}
collection_w.query(None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_query_non_string_expr(self):
"""
target: test query with non-string expr
method: query with non-string expr, eg 1, [] ..
expected: raise exception
"""
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
exprs = [1, 2., [], {}, ()]
error = {ct.err_code: 0, ct.err_msg: "The type of expr must be string"}
for expr in exprs:
collection_w.query(expr, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_query_expr_invalid_string(self):
"""
target: test query with invalid expr
method: query with invalid string expr
expected: raise exception
"""
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
error = {ct.err_code: 1, ct.err_msg: "Invalid expression!"}
exprs = ["12-s", "中文", "a", " "]
for expr in exprs:
collection_w.query(expr, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason="repeat with test_query, waiting for other expr")
def test_query_expr_term(self):
"""
target: test query with TermExpr
method: query with TermExpr
expected: query result is correct
"""
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
res = vectors[0].iloc[:2, :1].to_dict('records')
collection_w.query(default_term_expr, check_task=CheckTasks.check_query_results, check_items={exp_res: res})
@pytest.mark.tags(CaseLabel.L1)
def test_query_expr_not_existed_field(self):
"""
target: test query with not existed field
method: query by term expr with fake field
expected: raise exception
"""
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix))
term_expr = 'field in [1, 2]'
error = {ct.err_code: 1, ct.err_msg: "fieldName(field) not found"}
collection_w.query(term_expr, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_query_expr_non_primary_fields(self):
"""
target: test query on non-primary non-vector fields
method: query on non-primary non-vector fields
expected: verify query result
"""
self._connect()
# construct dataframe and inert data
df = pd.DataFrame({
ct.default_int64_field_name: pd.Series(data=[i for i in range(ct.default_nb)]),
ct.default_int32_field_name: pd.Series(data=[np.int32(i) for i in range(ct.default_nb)], dtype="int32"),
ct.default_int16_field_name: pd.Series(data=[np.int16(i) for i in range(ct.default_nb)], dtype="int16"),
ct.default_float_field_name: pd.Series(data=[float(i) for i in range(ct.default_nb)], dtype="float32"),
ct.default_double_field_name: pd.Series(data=[np.double(i) for i in range(ct.default_nb)], dtype="double"),
ct.default_float_vec_field_name: cf.gen_vectors(ct.default_nb, ct.default_dim)
})
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name)
assert self.collection_wrap.num_entities == ct.default_nb
self.collection_wrap.load()
# query by non_primary non_vector scalar field
non_primary_field = [ct.default_int32_field_name, ct.default_int16_field_name,
ct.default_float_field_name, ct.default_double_field_name]
# exp res: first two rows and all fields expect last vec field
res = df.iloc[:2, :-1].to_dict('records')
for field in non_primary_field:
filter_values = df[field].tolist()[:2]
term_expr = f'{field} in {filter_values}'
self.collection_wrap.query(term_expr, output_fields=["*"],
check_task=CheckTasks.check_query_results, check_items={exp_res: res})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue #7521 #7522")
def test_query_expr_by_bool_field(self):
"""
target: test query by bool field and output binary field
method: 1.create and insert with [int64, float, bool, float_vec] fields
2.query by bool field, and output all int64, bool fields
expected: verify query result and output fields
"""
self._connect()
df = cf.gen_default_dataframe_data()
bool_values = pd.Series(data=[True if i % 2 == 0 else False for i in range(ct.default_nb)], dtype="bool")
df.insert(2, ct.default_bool_field_name, bool_values)
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name)
assert self.collection_wrap.num_entities == ct.default_nb
self.collection_wrap.load()
term_expr = f'{ct.default_bool_field_name} in [True]'
res, _ = self.collection_wrap.query(term_expr, output_fields=[ct.default_bool_field_name])
assert len(res) == ct.default_nb / 2
assert set(res[0].keys()) == set(ct.default_int64_field_name, ct.default_bool_field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_query_expr_by_int8_field(self):
"""
target: test query by int8 field
method: 1.create and insert with [int64, float, int8, float_vec] fields
2.query by int8 field, and output all scalar fields
expected: verify query result
"""
self._connect()
# construct collection from dataFrame according to [int64, float, int8, float_vec]
df = cf.gen_default_dataframe_data()
int8_values = pd.Series(data=[np.int8(i) for i in range(ct.default_nb)], dtype="int8")
df.insert(2, ct.default_int8_field_name, int8_values)
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name)
assert self.collection_wrap.num_entities == ct.default_nb
# query expression
term_expr = f'{ct.default_int8_field_name} in {[0]}'
# expected query result
res = []
# int8 range [-128, 127] so when nb=1200, there are many repeated int8 values equal to 0
for i in range(0, ct.default_nb, 256):
res.extend(df.iloc[i:i + 1, :-1].to_dict('records'))
self.collection_wrap.load()
self.collection_wrap.query(term_expr, output_fields=["*"],
check_task=CheckTasks.check_query_results, check_items={exp_res: res})
@pytest.mark.tags(CaseLabel.L1)
def test_query_expr_wrong_term_keyword(self):
"""
target: test query with wrong term expr keyword
method: query with wrong keyword term expr
expected: raise exception
"""
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
expr_1 = f'{ct.default_int64_field_name} inn [1, 2]'
error_1 = {ct.err_code: 1, ct.err_msg: f'unexpected token Identifier("inn")'}
collection_w.query(expr_1, check_task=CheckTasks.err_res, check_items=error_1)
expr_3 = f'{ct.default_int64_field_name} in not [1, 2]'
error_3 = {ct.err_code: 1, ct.err_msg: 'right operand of the InExpr must be array'}
collection_w.query(expr_3, check_task=CheckTasks.err_res, check_items=error_3)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("field", [ct.default_int64_field_name, ct.default_float_field_name])
def test_query_expr_not_in_term(self, field):
"""
target: test query with `not in` expr
method: query with not in expr
expected: verify query result
"""
self._connect()
df = cf.gen_default_dataframe_data()
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name)
assert self.collection_wrap.num_entities == ct.default_nb
self.collection_wrap.load()
values = df[field].tolist()
pos = 100
term_expr = f'{field} not in {values[pos:]}'
res = df.iloc[:pos, :2].to_dict('records')
self.collection_wrap.query(term_expr, output_fields=["*"],
check_task=CheckTasks.check_query_results, check_items={exp_res: res})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("pos", [0, ct.default_nb])
def test_query_expr_not_in_empty_and_all(self, pos):
"""
target: test query with `not in` expr
method: query with `not in` expr for (non)empty collection
expected: verify query result
"""
self._connect()
df = cf.gen_default_dataframe_data()
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name)
assert self.collection_wrap.num_entities == ct.default_nb
self.collection_wrap.load()
int64_values = df[ct.default_int64_field_name].tolist()
term_expr = f'{ct.default_int64_field_name} not in {int64_values[pos:]}'
res = df.iloc[:pos, :1].to_dict('records')
self.collection_wrap.query(term_expr, check_task=CheckTasks.check_query_results, check_items={exp_res: res})
@pytest.mark.tag(CaseLabel.L1)
def test_query_expr_random_values(self):
"""
target: test query with random filter values
method: query with random filter values, like [0, 2, 4, 3]
expected: correct query result
"""
self._connect()
df = cf.gen_default_dataframe_data(nb=100)
log.debug(df.head(5))
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name)
assert self.collection_wrap.num_entities == 100
self.collection_wrap.load()
# random_values = [random.randint(0, ct.default_nb) for _ in range(4)]
random_values = [0, 2, 4, 3]
term_expr = f'{ct.default_int64_field_name} in {random_values}'
res = df.iloc[random_values, :1].to_dict('records')
self.collection_wrap.query(term_expr, check_task=CheckTasks.check_query_results, check_items={exp_res: res})
@pytest.mark.tag(CaseLabel.L1)
def test_query_expr_not_in_random(self):
"""
target: test query with fixed filter values
method: query with fixed filter values
expected: correct query result
"""
self._connect()
df = cf.gen_default_dataframe_data(nb=50)
log.debug(df.head(5))
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name)
assert self.collection_wrap.num_entities == 50
self.collection_wrap.load()
random_values = [i for i in range(10, 50)]
log.debug(f'random values: {random_values}')
random.shuffle(random_values)
term_expr = f'{ct.default_int64_field_name} not in {random_values}'
res = df.iloc[:10, :1].to_dict('records')
self.collection_wrap.query(term_expr, check_task=CheckTasks.check_query_results, check_items={exp_res: res})
@pytest.mark.tags(CaseLabel.L1)
def test_query_expr_non_array_term(self):
"""
target: test query with non-array term expr
method: query with non-array term expr
expected: raise exception
"""
exprs = [f'{ct.default_int64_field_name} in 1',
f'{ct.default_int64_field_name} in "in"',
f'{ct.default_int64_field_name} in (mn)']
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
error = {ct.err_code: 1, ct.err_msg: "right operand of the InExpr must be array"}
for expr in exprs:
collection_w.query(expr, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_query_expr_empty_term_array(self):
"""
target: test query with empty array term expr
method: query with empty term expr
expected: empty result
"""
term_expr = f'{ct.default_int64_field_name} in []'
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
res, _ = collection_w.query(term_expr)
assert len(res) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_query_expr_inconsistent_mix_term_array(self):
"""
target: test query with term expr that field and array are inconsistent or mix type
method: 1.query with int field and float values
2.query with term expr that has int and float type value
expected: raise exception
"""
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix))
int_values = [[1., 2.], [1, 2.]]
error = {ct.err_code: 1, ct.err_msg: "type mismatch"}
for values in int_values:
term_expr = f'{ct.default_int64_field_name} in {values}'
collection_w.query(term_expr, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_query_expr_non_constant_array_term(self):
"""
target: test query with non-constant array term expr
method: query with non-constant array expr
expected: raise exception
"""
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
constants = [[1], (), {}]
error = {ct.err_code: 1, ct.err_msg: "unsupported leaf node"}
for constant in constants:
term_expr = f'{ct.default_int64_field_name} in [{constant}]'
collection_w.query(term_expr, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_query_output_field_none_or_empty(self):
"""
target: test query with none and empty output field
method: query with output field=None, field=[]
expected: return primary field
"""
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
for fields in [None, []]:
res, _ = collection_w.query(default_term_expr, output_fields=fields)
assert list(res[0].keys()) == [ct.default_int64_field_name]
@pytest.mark.tags(CaseLabel.L0)
def test_query_output_one_field(self):
"""
target: test query with output one field
method: query with output one field
expected: return one field
"""
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
res, _ = collection_w.query(default_term_expr, output_fields=[ct.default_float_field_name])
assert set(res[0].keys()) == set([ct.default_int64_field_name, ct.default_float_field_name])
@pytest.mark.tags(CaseLabel.L1)
def test_query_output_all_fields(self):
"""
target: test query with none output field
method: query with output field=None
expected: return all fields
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
all_fields = [ct.default_int64_field_name, ct.default_float_field_name, ct.default_float_vec_field_name]
res = df.iloc[:2].to_dict('records')
collection_w.load()
actual_res, _ = collection_w.query(default_term_expr, output_fields=all_fields,
check_task=CheckTasks.check_query_results,
check_items={exp_res: res, "with_vec": True})
assert set(actual_res[0].keys()) == set(all_fields)
@pytest.mark.tags(CaseLabel.L1)
def test_query_output_float_vec_field(self):
"""
target: test query with vec output field
method: specify vec field as output field
expected: return primary field and vec field
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
fields = [[ct.default_float_vec_field_name], [ct.default_int64_field_name, ct.default_float_vec_field_name]]
res = df.loc[:1, [ct.default_int64_field_name, ct.default_float_vec_field_name]].to_dict('records')
collection_w.load()
for output_fields in fields:
collection_w.query(default_term_expr, output_fields=output_fields,
check_task=CheckTasks.check_query_results,
check_items={exp_res: res, "with_vec": True})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("vec_fields", [[cf.gen_float_vec_field(name="float_vector1")]])
def test_query_output_multi_float_vec_field(self, vec_fields):
"""
target: test query and output multi float vec fields
method: a.specify multi vec field as output
b.specify output_fields with wildcard %
expected: verify query result
"""
# init collection with two float vector fields
schema = cf.gen_schema_multi_vector_fields(vec_fields)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_dataframe_multi_vec_fields(vec_fields=vec_fields)
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
# query with two vec output_fields
output_fields = [ct.default_int64_field_name, ct.default_float_vec_field_name]
for vec_field in vec_fields:
output_fields.append(vec_field.name)
res = df.loc[:1, output_fields].to_dict('records')
collection_w.load()
collection_w.query(default_term_expr, output_fields=output_fields,
check_task=CheckTasks.check_query_results,
check_items={exp_res: res, "with_vec": True})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("vec_fields", [[cf.gen_binary_vec_field()],
[cf.gen_binary_vec_field(), cf.gen_binary_vec_field("binary_vec1")]])
def test_query_output_mix_float_binary_field(self, vec_fields):
"""
target: test query and output mix float and binary vec fields
method: a.specify mix vec field as output
b.specify output_fields with wildcard %
expected: output binary vector and float vec
"""
# init collection with two float vector fields
schema = cf.gen_schema_multi_vector_fields(vec_fields)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_dataframe_multi_vec_fields(vec_fields=vec_fields)
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
# query with two vec output_fields
output_fields = [ct.default_int64_field_name, ct.default_float_vec_field_name]
for vec_field in vec_fields:
output_fields.append(vec_field.name)
res = df.loc[:1, output_fields].to_dict('records')
collection_w.load()
collection_w.query(default_term_expr, output_fields=output_fields,
check_task=CheckTasks.check_query_results,
check_items={exp_res: res, "with_vec": True})
# query with wildcard %
collection_w.query(default_term_expr, output_fields=["%"],
check_task=CheckTasks.check_query_results,
check_items={exp_res: res, "with_vec": True})
@pytest.mark.tags(CaseLabel.L1)
def test_query_output_binary_vec_field(self):
"""
target: test query with binary vec output field
method: specify binary vec field as output field
expected: return primary field and binary vec field
"""
collection_w, vectors = self.init_collection_general(prefix, insert_data=True, is_binary=True)[0:2]
fields = [[ct.default_binary_vec_field_name], [ct.default_int64_field_name, ct.default_binary_vec_field_name]]
for output_fields in fields:
res, _ = collection_w.query(default_term_expr, output_fields=output_fields)
assert list(res[0].keys()) == fields[-1]
@pytest.mark.tags(CaseLabel.L1)
def test_query_output_primary_field(self):
"""
target: test query with output field only primary field
method: specify int64 primary field as output field
expected: return int64 field
"""
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
res, _ = collection_w.query(default_term_expr, output_fields=[ct.default_int64_field_name])
assert list(res[0].keys()) == [ct.default_int64_field_name]
@pytest.mark.tags(CaseLabel.L1)
def test_query_output_not_existed_field(self):
"""
target: test query output not existed field
method: query with not existed output field
expected: raise exception
"""
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
error = {ct.err_code: 1, ct.err_msg: 'Field int not exist'}
output_fields = [["int"], [ct.default_int64_field_name, "int"]]
for fields in output_fields:
collection_w.query(default_term_expr, output_fields=fields, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_query_invalid_output_fields(self):
"""
target: test query with invalid output fields
method: query with invalid field fields
expected: raise exception
"""
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
output_fields = ["12-s", 1, [1, "2", 3], (1,), {1: 1}]
error = {ct.err_code: 0, ct.err_msg: f'Invalid query format. \'output_fields\' must be a list'}
for fields in output_fields:
collection_w.query(default_term_expr, output_fields=fields, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_query_output_fields_simple_wildcard(self):
"""
target: test query output_fields with simple wildcard (* and %)
method: specify output_fields as "*" and "*", "%"
expected: output all scale field; output all fields
"""
# init collection with fields: int64, float, float_vec, float_vector1
collection_w, df = self.init_multi_fields_collection_wrap(cf.gen_unique_str(prefix))
collection_w.load()
# query with wildcard scale(*)
output_fields = [ct.default_int64_field_name, ct.default_float_field_name]
res = df.loc[:1, output_fields].to_dict('records')
collection_w.query(default_term_expr, output_fields=["*"],
check_task=CheckTasks.check_query_results,
check_items={exp_res: res})
# query with wildcard %
output_fields2 = [ct.default_int64_field_name, ct.default_float_vec_field_name, ct.another_float_vec_field_name]
res2 = df.loc[:1, output_fields2].to_dict('records')
collection_w.query(default_term_expr, output_fields=["%"],
check_task=CheckTasks.check_query_results,
check_items={exp_res: res2, "with_vec": True})
# query with wildcard all fields: vector(%) and scale(*)
res3 = df.iloc[:2].to_dict('records')
collection_w.query(default_term_expr, output_fields=["*", "%"],
check_task=CheckTasks.check_query_results,
check_items={exp_res: res3, "with_vec": True})
@pytest.mark.tags(CaseLabel.L1)
def test_query_output_fields_part_scale_wildcard(self):
"""
target: test query output_fields with part wildcard
method: specify output_fields as wildcard and part field
expected: verify query result
"""
# init collection with fields: int64, float, float_vec, float_vector1
collection_w, df = self.init_multi_fields_collection_wrap(cf.gen_unique_str(prefix))
# query with output_fields=["*", float_vector)
res = df.iloc[:2, :3].to_dict('records')
collection_w.load()
collection_w.query(default_term_expr, output_fields=["*", ct.default_float_vec_field_name],
check_task=CheckTasks.check_query_results,
check_items={exp_res: res, "with_vec": True})
# query with output_fields=["*", float)
res2 = df.iloc[:2, :2].to_dict('records')
collection_w.load()
collection_w.query(default_term_expr, output_fields=["*", ct.default_float_field_name],
check_task=CheckTasks.check_query_results,
check_items={exp_res: res2})
@pytest.mark.tags(CaseLabel.L1)
def test_query_output_fields_part_vector_wildcard(self):
"""
target: test query output_fields with part wildcard
method: specify output_fields as wildcard and part field
expected: verify query result
"""
# init collection with fields: int64, float, float_vec, float_vector1
collection_w, df = self.init_multi_fields_collection_wrap(cf.gen_unique_str(prefix))
collection_w.load()
# query with output_fields=["%", float), expected: all fields
res = df.iloc[:2].to_dict('records')
collection_w.query(default_term_expr, output_fields=["%", ct.default_float_field_name],
check_task=CheckTasks.check_query_results,
check_items={exp_res: res, "with_vec": True})
# query with output_fields=["%", float_vector), expected: int64, float_vector, float_vector1
output_fields = [ct.default_int64_field_name, ct.default_float_vec_field_name, ct.another_float_vec_field_name]
res2 = df.loc[:1, output_fields].to_dict('records')
collection_w.query(default_term_expr, output_fields=["%", ct.default_float_vec_field_name],
check_task=CheckTasks.check_query_results,
check_items={exp_res: res2, "with_vec": True})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("output_fields", [["*%"], ["**"], ["*", "@"]])
def test_query_invalid_wildcard(self, output_fields):
"""
target: test query with invalid output wildcard
method: output_fields is invalid output wildcard
expected: raise exception
"""
# init collection with fields: int64, float, float_vec, float_vector1
collection_w, df = self.init_multi_fields_collection_wrap(cf.gen_unique_str(prefix))
collection_w.load()
# query with invalid output_fields
error = {ct.err_code: 1, ct.err_msg: f"Field {output_fields[-1]} not exist"}
collection_w.query(default_term_expr, output_fields=output_fields,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_query_partition(self):
"""
target: test query on partition
method: create a partition and query
expected: verify query result
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
partition_w = self.init_partition_wrap(collection_wrap=collection_w)
df = cf.gen_default_dataframe_data(ct.default_nb)
partition_w.insert(df)
assert collection_w.num_entities == ct.default_nb
partition_w.load()
res = df.iloc[:2, :1].to_dict('records')
collection_w.query(default_term_expr, partition_names=[partition_w.name],
check_task=CheckTasks.check_query_results, check_items={exp_res: res})
@pytest.mark.tags(CaseLabel.L1)
def test_query_partition_without_loading(self):
"""
target: test query on partition without loading
method: query on partition and no loading
expected: raise exception
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
partition_w = self.init_partition_wrap(collection_wrap=collection_w)
df = cf.gen_default_dataframe_data(ct.default_nb)
partition_w.insert(df)
assert partition_w.num_entities == ct.default_nb
error = {ct.err_code: 1, ct.err_msg: f'collection {collection_w.name} was not loaded into memory'}
collection_w.query(default_term_expr, partition_names=[partition_w.name],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_query_default_partition(self):
"""
target: test query on default partition
method: query on default partition
expected: verify query result
"""
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
res = vectors[0].iloc[:2, :1].to_dict('records')
collection_w.query(default_term_expr, partition_names=[ct.default_partition_name],
check_task=CheckTasks.check_query_results, check_items={exp_res: res})
@pytest.mark.tags(CaseLabel.L1)
def test_query_empty_partition(self):
"""
target: test query on empty partition
method: query on a empty collection
expected: empty query result
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
partition_w = self.init_partition_wrap(collection_wrap=collection_w)
assert partition_w.is_empty
partition_w.load()
res, _ = collection_w.query(default_term_expr, partition_names=[partition_w.name])
assert len(res) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_query_not_existed_partition(self):
"""
target: test query on a not existed partition
method: query on not existed partition
expected: raise exception
"""
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix))
collection_w.load()
partition_names = cf.gen_unique_str()
error = {ct.err_code: 1, ct.err_msg: f'PartitonName: {partition_names} not found'}
collection_w.query(default_term_expr, partition_names=[partition_names],
check_task=CheckTasks.err_res, check_items=error)
class TestQueryOperation(TestcaseBase):
"""
******************************************************************
The following cases are used to test query interface operations
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.parametrize("collection_name", [cf.gen_unique_str(prefix)])
def test_query_without_connection(self):
"""
target: test query without connection
method: close connect and query
expected: raise exception
"""
# init a collection with default connection
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
# remove default connection
self.connection_wrap.remove_connection(alias=DefaultConfig.DEFAULT_USING)
# list connection to check
self.connection_wrap.list_connections(check_task=ct.CheckTasks.ccr, check_items={ct.list_content: []})
# query after remove default connection
collection_w.query(default_term_expr, check_task=CheckTasks.err_res,
check_items={ct.err_code: 0, ct.err_msg: cem.ConnectFirst})
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("collection_name, data",
# [(cf.gen_unique_str(prefix), cf.gen_default_list_data(ct.default_nb))])
def test_query_without_loading(self):
"""
target: test query without loading
method: no loading before query
expected: raise exception
"""
# init a collection with default connection
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
# insert data to collection
collection_w.insert(data=cf.gen_default_list_data(ct.default_nb))
# check number of entities and that method calls the flush interface
assert collection_w.num_entities == ct.default_nb
# query without load
collection_w.query(default_term_expr, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: clem.CollNotLoaded % collection_name})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("term_expr", [f'{ct.default_int64_field_name} in [0]'])
def test_query_expr_single_term_array(self, term_expr):
"""
target: test query with single array term expr
method: query with single array value
expected: query result is one entity
"""
# init a collection and insert data
collection_w, vectors, binary_raw_vectors = self.init_collection_general(prefix, insert_data=True)[0:3]
# query the first row of data
check_vec = vectors[0].iloc[:, [0]][0:1].to_dict('records')
collection_w.query(term_expr, check_task=CheckTasks.check_query_results, check_items={exp_res: check_vec})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("term_expr", [f'{ct.default_int64_field_name} in [0]'])
def test_query_binary_expr_single_term_array(self, term_expr, check_content):
"""
target: test query with single array term expr
method: query with single array value
expected: query result is one entity
"""
# init a collection and insert data
collection_w, vectors, binary_raw_vectors = self.init_collection_general(prefix, insert_data=True,
is_binary=True)[0:3]
# query the first row of data
check_vec = vectors[0].iloc[:, [0]][0:1].to_dict('records')
collection_w.query(term_expr, check_task=CheckTasks.check_query_results, check_items={exp_res: check_vec})
@pytest.mark.tags(CaseLabel.L2)
def test_query_expr_all_term_array(self):
"""
target: test query with all array term expr
method: query with all array value
expected: verify query result
"""
# init a collection and insert data
collection_w, vectors, binary_raw_vectors = self.init_collection_general(prefix, insert_data=True)[0:3]
# data preparation
int_values = vectors[0][ct.default_int64_field_name].values.tolist()
term_expr = f'{ct.default_int64_field_name} in {int_values}'
check_vec = vectors[0].iloc[:, [0]][0:len(int_values)].to_dict('records')
# query all array value
collection_w.query(term_expr, check_task=CheckTasks.check_query_results, check_items={exp_res: check_vec})
@pytest.mark.tags(CaseLabel.L1)
def test_query_expr_half_term_array(self):
"""
target: test query with half array term expr
method: query with half array value
expected: verify query result
"""
half = ct.default_nb // 2
collection_w, partition_w, df_partition, df_default = self.insert_entities_into_two_partitions_in_half(half)
int_values = df_default[ct.default_int64_field_name].values.tolist()
term_expr = f'{ct.default_int64_field_name} in {int_values}'
res, _ = collection_w.query(term_expr)
assert len(res) == len(int_values)
@pytest.mark.tags(CaseLabel.L2)
def test_query_expr_repeated_term_array(self):
"""
target: test query with repeated term array on primary field with unique value
method: query with repeated array value
expected: return hit entities, no repeated
"""
collection_w, vectors, binary_raw_vectors = self.init_collection_general(prefix, insert_data=True)[0:3]
int_values = [0, 0, 0, 0]
term_expr = f'{ct.default_int64_field_name} in {int_values}'
res, _ = collection_w.query(term_expr)
assert len(res) == 1
assert res[0][ct.default_int64_field_name] == int_values[0]
@pytest.mark.tags(CaseLabel.L1)
def test_query_dup_ids_dup_term_array(self):
"""
target: test query on duplicate primary keys with dup term array
method: 1.create collection and insert dup primary keys
2.query with dup term array
expected: todo
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb=100)
df[ct.default_int64_field_name] = 0
mutation_res, _ = collection_w.insert(df)
assert mutation_res.primary_keys == df[ct.default_int64_field_name].tolist()
collection_w.load()
term_expr = f'{ct.default_int64_field_name} in {[0, 0, 0]}'
res = df.iloc[:, :2].to_dict('records')
collection_w.query(term_expr, output_fields=["*"], check_items=CheckTasks.check_query_results,
check_task={exp_res: res})
@pytest.mark.tags(CaseLabel.L0)
def test_query_after_index(self):
"""
target: test query after creating index
method: query after index
expected: query result is correct
"""
collection_w, vectors, binary_raw_vectors = self.init_collection_general(prefix, insert_data=True)[0:3]
default_field_name = ct.default_float_vec_field_name
collection_w.create_index(default_field_name, default_index_params)
collection_w.load()
int_values = [0]
term_expr = f'{ct.default_int64_field_name} in {int_values}'
check_vec = vectors[0].iloc[:, [0]][0:len(int_values)].to_dict('records')
collection_w.query(term_expr, check_task=CheckTasks.check_query_results, check_items={exp_res: check_vec})
@pytest.mark.tags(CaseLabel.L1)
def test_query_after_search(self):
"""
target: test query after search
method: query after search
expected: query result is correct
"""
limit = 1000
nb_old = 500
collection_w, vectors, binary_raw_vectors, insert_ids = \
self.init_collection_general(prefix, True, nb_old)
# 2. search for original data after load
vectors_s = [[random.random() for _ in range(ct.default_dim)] for _ in range(ct.default_nq)]
collection_w.search(vectors_s[:ct.default_nq], ct.default_float_vec_field_name,
ct.default_search_params, limit, "int64 >= 0",
check_task=CheckTasks.check_search_results,
check_items={"nq": ct.default_nq, "limit": nb_old, "ids": insert_ids})
# check number of entities and that method calls the flush interface
assert collection_w.num_entities == nb_old
term_expr = f'{ct.default_int64_field_name} in [0, 1]'
check_vec = vectors[0].iloc[:, [0]][0:2].to_dict('records')
collection_w.query(term_expr, check_task=CheckTasks.check_query_results, check_items={exp_res: check_vec})
@pytest.mark.tags(CaseLabel.L1)
def test_query_output_vec_field_after_index(self):
"""
target: test query output vec field after index
method: create index and specify vec field as output field
expected: return primary field and vec field
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb=5000)
collection_w.insert(df)
assert collection_w.num_entities == 5000
fields = [ct.default_int64_field_name, ct.default_float_vec_field_name]
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
res = df.loc[:1, [ct.default_int64_field_name, ct.default_float_vec_field_name]].to_dict('records')
collection_w.load()
collection_w.query(default_term_expr, output_fields=fields,
check_task=CheckTasks.check_query_results,
check_items={exp_res: res, "with_vec": True})
@pytest.mark.tags(CaseLabel.L2)
def test_query_output_binary_vec_field_after_index(self):
"""
target: test query output vec field after index
method: create index and specify vec field as output field
expected: return primary field and vec field
"""
collection_w, vectors = self.init_collection_general(prefix, insert_data=True, is_binary=True)[0:2]
fields = [ct.default_int64_field_name, ct.default_binary_vec_field_name]
collection_w.create_index(ct.default_binary_vec_field_name, binary_index_params)
assert collection_w.has_index()[0]
res, _ = collection_w.query(default_term_expr, output_fields=[ct.default_binary_vec_field_name])
assert list(res[0].keys()) == fields
@pytest.mark.tags(CaseLabel.L2)
def test_query_partition_repeatedly(self):
"""
target: test query repeatedly on partition
method: query on partition twice
expected: verify query result
"""
# create connection
self._connect()
# init collection
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
# init partition
partition_w = self.init_partition_wrap(collection_wrap=collection_w)
# insert data to partition
df = cf.gen_default_dataframe_data(ct.default_nb)
partition_w.insert(df)
# check number of entities and that method calls the flush interface
assert collection_w.num_entities == ct.default_nb
# load partition
partition_w.load()
# query twice
res_one, _ = collection_w.query(default_term_expr, partition_names=[partition_w.name])
res_two, _ = collection_w.query(default_term_expr, partition_names=[partition_w.name])
assert res_one == res_two
@pytest.mark.tags(CaseLabel.L2)
def test_query_another_partition(self):
"""
target: test query another partition
method: 1. insert entities into two partitions
2.query on one partition and query result empty
expected: query result is empty
"""
half = ct.default_nb // 2
collection_w, partition_w, _, _ = self.insert_entities_into_two_partitions_in_half(half)
term_expr = f'{ct.default_int64_field_name} in [{half}]'
# half entity in _default partition rather than partition_w
collection_w.query(term_expr, partition_names=[partition_w.name], check_task=CheckTasks.check_query_results,
check_items={exp_res: []})
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_partitions_multi_results(self):
"""
target: test query on multi partitions and get multi results
method: 1.insert entities into two partitions
2.query on two partitions and query multi result
expected: query results from two partitions
"""
half = ct.default_nb // 2
collection_w, partition_w, _, _ = self.insert_entities_into_two_partitions_in_half(half)
term_expr = f'{ct.default_int64_field_name} in [{half - 1}, {half}]'
# half entity in _default, half-1 entity in partition_w
res, _ = collection_w.query(term_expr, partition_names=[ct.default_partition_name, partition_w.name])
assert len(res) == 2
@pytest.mark.tags(CaseLabel.L2)
def test_query_multi_partitions_single_result(self):
"""
target: test query on multi partitions and get single result
method: 1.insert into two partitions
2.query on two partitions and query single result
expected: query from two partitions and get single result
"""
half = ct.default_nb // 2
collection_w, partition_w, df_partition, df_default = self.insert_entities_into_two_partitions_in_half(half)
term_expr = f'{ct.default_int64_field_name} in [{half}]'
# half entity in _default
res, _ = collection_w.query(term_expr, partition_names=[ct.default_partition_name, partition_w.name])
assert len(res) == 1
assert res[0][ct.default_int64_field_name] == half
"""
******************************************************************
The following classes are copied from pymilvus test
******************************************************************
"""
def init_data(connect, collection, nb=ut.default_nb, partition_names=None, auto_id=True):
"""
Generate entities and add it in collection
"""
if nb == 3000:
insert_entities = default_entities
else:
insert_entities = ut.gen_entities(nb, is_normal=True)
if partition_names is None:
if auto_id:
res = connect.insert(collection, insert_entities)
else:
res = connect.insert(collection, insert_entities, ids=[i for i in range(nb)])
else:
if auto_id:
res = connect.insert(collection, insert_entities, partition_name=partition_names)
else:
res = connect.insert(collection, insert_entities, ids=[i for i in range(nb)],
partition_name=partition_names)
connect.flush([collection])
ids = res.primary_keys
return insert_entities, ids
class TestQueryBase:
"""
test Query interface
query(collection_name, expr, output_fields=None, partition_names=None, timeout=None)
"""
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_query_invalid(self, connect, collection):
"""
target: test query
method: query with term expr
expected: verify query result
"""
entities, ids = init_data(connect, collection)
assert len(ids) == ut.default_nb
connect.load_collection(collection)
term_expr = f'{default_int_field_name} in {entities[:default_pos]}'
with pytest.raises(Exception):
connect.query(collection, term_expr)
@pytest.mark.tags(CaseLabel.L0)
def test_query_valid(self, connect, collection):
"""
target: test query
method: query with term expr
expected: verify query result
"""
entities, ids = init_data(connect, collection)
assert len(ids) == ut.default_nb
connect.load_collection(collection)
term_expr = f'{default_int_field_name} in {ids[:default_pos]}'
res = connect.query(collection, term_expr, output_fields=["*", "%"])
assert len(res) == default_pos
for _id, index in enumerate(ids[:default_pos]):
if res[index][default_int_field_name] == entities[0]["values"][index]:
assert res[index][default_float_field_name] == entities[1]["values"][index]
res = connect.query(collection, term_expr, output_fields=[ut.default_float_vec_field_name])
assert len(res) == default_pos
for _id, index in enumerate(ids[:default_pos]):
if res[index][default_int_field_name] == entities[0]["values"][index]:
ut.assert_equal_vector(res[index][ut.default_float_vec_field_name], entities[2]["values"][index])
@pytest.mark.tags(CaseLabel.L0)
def test_query_collection_not_existed(self, connect):
"""
target: test query not existed collection
method: query not existed collection
expected: raise exception
"""
collection = "not_exist"
with pytest.raises(Exception):
connect.query(collection, default_term_expr)
@pytest.mark.tags(CaseLabel.L0)
def test_query_invalid_collection_name(self, connect, get_collection_name):
"""
target: test query with invalid collection name
method: query with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception):
connect.query(collection_name, default_term_expr)
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("expr", [1, "1", "12-s", "中文", [], {}, ()])
def test_query_expr_invalid_string(self, connect, collection, expr):
"""
target: test query with non-string expr
method: query with non-string expr, eg 1, [] ..
expected: raise exception
"""
connect.load_collection(collection)
with pytest.raises(Exception):
connect.query(collection, expr)
@pytest.mark.parametrize("fields", ut.gen_invalid_strs())
@pytest.mark.tags(CaseLabel.L0)
def test_query_invalid_output_fields(self, connect, collection, fields):
"""
target: test query with invalid output fields
method: query with invalid field fields
expected: raise exception
"""
init_data(connect, collection)
connect.load_collection(collection)
with pytest.raises(Exception):
connect.query(collection, default_term_expr, output_fields=[fields])
``` |
{
"source": "3ck0n/ifsort",
"score": 3
} |
#### File: 3ck0n/ifsort/analyse.py
```python
import os
import json
import pathlib
import datetime
"""
Analyse.py
Generate a proposal to refactor a recursive file structure.
Idea is to sort a chaotic file structure to a scheme like
- Archivename YYYY
- MM Month
- DD name of series
- Original files
"""
source = pathlib.Path.cwd() / 'sample' # source directory
target = pathlib.Path.cwd() / 'target' # target directory
filedict = [] # List with files
def get_modified_datetime(file: pathlib):
"""
Returns a datetime with modified date of a given file.
"""
modified_timestamp = file.stat().st_mtime
modified = datetime.datetime.fromtimestamp(modified_timestamp)
return modified
def gen_folder_name(day, rpath: pathlib):
"""
Generates a target folder nape for a relative part and a day.
Scheme is 'DD relative-path_separated_by_underscore'
"""
sday = str(day).zfill(2)
spath = str(rpath)
spath = spath.replace(' ', '-')
spath = spath.replace('\\', '_')
spath = spath.lower()
if spath == '.':
spath = 'Day'
return f"{sday} {spath}"
def gen_relativ_path(modified, folder, file):
str = f"Rohdaten {modified.year}\\{modified.strftime('%m %B')}\\{folder}\\{file.name}"
return str
def analyse(source_directory, target_directory = '.'):
"""
Analysis a source_directory by creating a folder structure proposal for every file.
Sourcefile and moved file are documented in filedict global array to support analysis before
start moving them.
"""
for file in source_directory.rglob('*'):
filson = {}
print('.', end='')
if not file.is_dir():
rpath = file.parent.relative_to(source_directory)
modified = get_modified_datetime(file)
target_folder_name = gen_folder_name(modified.day, rpath)
filson['file'] = str(file)
filson['move'] = str(target_directory.joinpath(gen_relativ_path(modified, target_folder_name, file)))
filedict.append(filson)
print('!')
print(f'Analyse {source} recursive')
analyse(source, target)
print(f'Found {len(filedict)} files.')
with open("report.json", "w") as write_file:
json.dump(filedict, write_file, indent=4, sort_keys=True,)
print('Proposal saved in report.json.')
``` |
{
"source": "3cL1p5e7/ic",
"score": 2
} |
#### File: gitlab-ci/config/visualize.py
```python
import functools
import glob
import yaml
def process_config(filename):
with open(filename) as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
nodes = []
edges = []
for name, pipeline in config.items():
if name in (
"include",
"stages",
"default",
"variables",
"before_script",
"after_script",
):
continue
attributes = {}
for attr in ("stage", "needs", "dependencies"):
if attr in pipeline and pipeline[attr]:
value = pipeline[attr]
if isinstance(value, list):
value = ",".join(value)
attributes[attr] = value
nodes.append((name, attributes))
extends = pipeline.get("extends", None)
if extends:
if not isinstance(extends, list):
extends = [extends]
for destination in extends:
edges.append((name, destination))
return nodes, edges
def process_all_configs(directory, out):
nodes = []
edges = []
for filename in glob.glob(f"{directory}/*.yml"):
n, e = process_config(filename)
nodes.extend(n)
edges.extend(e)
pr = functools.partial(print, end="", file=out)
pr('digraph G {\nsize="46,33!"\nratio="fill"\n')
for name, attributes in nodes:
pr(f'"{name}"')
if attributes:
pr(" [")
sep = ""
for attribute, value in attributes.items():
pr(f'{sep}{attribute}="{value}"')
sep = ", "
pr("]")
pr("\n")
for name, extends in edges:
pr(f'"{name}" -> "{extends}"\n')
pr("}\n")
if __name__ == "__main__":
import sys
directory = "."
if len(sys.argv) > 1:
directory = sys.argv[1]
process_all_configs(directory, sys.stdout)
```
#### File: gitlab-ci/src/ci.py
```python
import binascii
import hashlib
import logging
import os
import pathlib
import platform
import shlex
import subprocess
import time
from contextlib import contextmanager
from os import getenv
from os import path
from typing import Iterable
from typing import List
from typing import Literal
from typing import Optional
from typing import overload
from typing import TypeVar
import beeline
_here = path.realpath(__file__)
_top = path.realpath(path.join(_here, "../../.."))
@overload
def sh(*popenargs, pipe_to: Optional[str] = None, capture: Literal[True], **kwargs) -> str:
...
@overload
def sh(*popenargs, pipe_to: Optional[str] = None, capture: bool = False, **kwargs):
...
def sh(
*popenargs,
pipe_to: Optional[str] = None,
capture: bool = False,
**kwargs,
):
cmdline = list(popenargs)
cmdline_extra = ""
native_shell = kwargs.get("shell", False)
if capture:
assert pipe_to is None, "the `capture` and `pipe_to` arguments are mutually exclusive"
if native_shell:
assert pipe_to is None, "don't use `pipe_to` when shell=True, just use native pipe"
assert len(cmdline) == 1, "don't pass multiple arguments when shell=True, they will not be preserved"
if pipe_to is not None:
kwargs["stdout"] = open(pipe_to, "w")
cmdline_extra = f" > {pipe_to}"
if native_shell:
logging.info(f"$ {cmdline[0]}")
else:
logging.info(f"$ {shlex.join(cmdline)}{cmdline_extra}")
if capture:
return subprocess.run(cmdline, text=True, stdout=subprocess.PIPE, **kwargs).stdout.strip()
else:
subprocess.run(cmdline, **kwargs).check_returncode()
def mkdir_p(dir):
logging.info(f"$ mkdir -p {dir}")
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
T = TypeVar("T")
S = TypeVar("S")
def flatten(ls: Iterable[Iterable[T]]) -> List[T]:
return [item for sublist in ls for item in sublist]
# set up honeycomb API
key = getenv("BUILDEVENT_APIKEY", "none")
dataset = getenv("BUILDEVENT_DATASET", "local")
beeline.init(writekey=key, debug=key == "none", dataset=dataset)
def buildevent(name):
"""Return a beeline context manager which is prefilled with the trace_id and parent_id set by GitLab."""
root_pipeline_id = getenv("PARENT_PIPELINE_ID", "")
if root_pipeline_id == "":
root_pipeline_id = getenv("CI_PIPELINE_ID")
return beeline.tracer(name, trace_id=root_pipeline_id, parent_id=getenv("CI_JOB_ID"))
@contextmanager
def log_section(header: str, name: Optional[str] = None, collapsed: bool = True):
"""
Generate a collapsible GitLab CI log section. Only the section header, not the name,
is displayed to the user, so a random hex string will be generated and used as the
section name unless you want to specify a name yourself.
"""
ts = int(time.time())
name = binascii.b2a_hex(os.urandom(6)).decode("utf-8") if name is None else name
collapse = "[collapsed=true]" if collapsed else ""
print(f"\x1b[0Ksection_start:{ts}:{name}{collapse}\r\x1b[0K{header}", flush=True)
try:
yield
finally:
print(f"\x1b[0Ksection_end:{ts}:{name}\r\x1b[0K", flush=True)
def show_sccache_stats():
wrapper = getenv("RUSTC_WRAPPER")
if wrapper is not None:
with log_section("Click here to see the sccache stats"):
sh(wrapper, "--show-stats")
@contextmanager
def cwd(dir: str):
"""
Execute some code with the current working directory of `dir`.
* If you pass a relative path as `dir`, it will be interpreted relative to the top level of the source tree.
* If you pass an absolute path, it will be used verbatim.
Restores the previous working directory when the context ends.
"""
stored = os.getcwd()
logging.info(f"$ pushd {dir}")
newpath = dir if path.isabs(dir) else path.join(ENV.top, dir)
os.chdir(newpath)
try:
yield
finally:
logging.info("$ popd")
os.chdir(stored)
def sha256(string: str) -> str:
return hashlib.sha256(string.encode("utf-8")).hexdigest()
class Env:
"""Stores a bunch of useful globals."""
def __init__(self) -> None:
target = getenv("CARGO_BUILD_TARGET")
if target is None:
sys = platform.system()
if sys == "Linux":
target = "x86_64-unknown-linux-gnu"
elif sys == "Darwin":
target = "x86_64-apple-darwin"
else:
raise Exception("unable to guess rust host triple")
self._cargo_build_target = target
self._top = _top
self._cargo_target_dir = getenv("CARGO_TARGET_DIR", path.join(self._top, "rs/target"))
self._is_gitlab = getenv("CI_JOB_ID", "") != ""
self._build_id = None
@property
def build_target(self):
return self._cargo_build_target
@property
def target(self):
return self.build_target
@property
def cargo_target_dir(self):
return self._cargo_target_dir
@property
def target_dir(self):
return self._cargo_target_dir
@property
def platform_target_dir(self):
"""Equivalent to path.join(target_dir, build_target)."""
return path.join(self._cargo_target_dir, self._cargo_build_target)
@property
def top(self):
"""The top level directory (where .git is)."""
return self._top
@property
def is_linux(self):
return self.build_target == "x86_64-unknown-linux-gnu"
@property
def is_macos(self):
return self.build_target == "x86_64-apple-darwin"
@property
def is_gitlab(self):
return self._is_gitlab
@property
def is_local(self):
return not self._is_gitlab
@property
def build_id(self):
if self._build_id is None:
self._build_id = sh("gitlab-ci/src/artifacts/find-build-id.sh", cwd=self._top, capture=True)
return self._build_id
ENV = Env()
```
#### File: src/gitlab_runners_check/create-gitlab-runner-jobs.py
```python
import os
import git
from gitlab import Gitlab
from jinja2 import Template
""" Generate a gitlab child pipeline dynamically in order to run a test on all idx runners"""
def main():
git_repo = git.Repo(".", search_parent_directories=True)
git_root = git_repo.git.rev_parse("--show-toplevel")
location = os.path.join(git_root, "gitlab-ci/src/gitlab_runners_check/runners2.j2")
template_file = open(location)
job_template = template_file.read()
template_file.close()
if not os.getenv("GITLAB_TOKEN"):
print("GITLAB_TOKEN env var not set")
os.exit(1)
token = os.getenv("GITLAB_TOKEN")
gl = Gitlab("https://gitlab.com", private_token=token, per_page=100)
# Gather all our runners into a nice list
dfinity_runners = []
for page in gl.runners.list(per_page=100, as_list=False, retry_transient_errors=True, scope="active"):
dfinity_runners.append(page)
idx_runners_list = []
for runner in dfinity_runners:
myrunner = gl.runners.get(runner.id)
tag_list = myrunner.tag_list
# Do not include a non-idx gitlab runner
if "dfinity" in tag_list:
idx_runners_list.append({"id": runner.id, "description": runner.description, "tags": tag_list})
# Render the template
dynamic_template = Template(job_template)
x = dynamic_template.render(items=idx_runners_list)
# Write the templated yml to our file for use as an artifact
runners_file = open("runners.yml", "w")
runners_file.write(x)
runners_file.close()
if __name__ == "__main__":
main()
```
#### File: src/job_scripts/guest_os_diskimg.py
```python
import logging
from os import environ
from os import getenv
from ci import buildevent
from ci import cwd
from ci import ENV
from ci import sh
from ci import sha256
def run():
build_extra_args = getenv("BUILD_EXTRA_ARGS", "")
build_extra_suffix = getenv("BUILD_EXTRA_SUFFIX", "")
build_out = f"build-out/disk-img{build_extra_suffix}"
build_tmp = f"build-tmp{build_extra_suffix}"
upload_target = f"guest-os/disk-img{build_extra_suffix}"
version = ENV.build_id
logging.info(f"Build ID: {version}")
with cwd("ic-os/guestos"):
# lib/guest-os-diskimg.sh fails if these are not set, which they aren't when running locally
offline_defaults = {"BUILD_EXTRA_ARGS": "", "BUILD_EXTRA_SUFFIX": "", "CI_JOB_ID": ""}
# override those variables with the already-set ones if there are any
script_env = {**offline_defaults, **environ.copy()}
sh(
"capsule",
"--passive",
"-v",
"-t",
sha256(f"{build_extra_args}{build_extra_suffix}\n"),
"-t",
version,
"-o",
f"{build_out}/**/*",
"--",
f"{ENV.top}/gitlab-ci/src/job_scripts/lib/guest-os-diskimg.sh",
build_out,
build_tmp,
upload_target,
version,
environ.get("CDPRNET", ""),
env=script_env,
)
if ENV.is_gitlab:
with buildevent("rclone"):
sh(
f"{ENV.top}/gitlab-ci/src/artifacts/rclone_upload.py",
f"--version={version}",
"--verbose",
build_out,
upload_target,
)
logging.info(f"Build ID (real or fake git revision): {version}")
```
#### File: src/log_metrics/log_metrics.py
```python
import datetime
import http.client
import json
import os
import random
import sys
import time
import traceback
import urllib.request
from pathlib import Path
from pprint import pprint
from typing import Any
from typing import Dict
from notify_slack import send_message
ES_NODES = ["elasticsearch-node-%s.dfinity.systems" % i for i in range(3)]
def get_data_from_files(data_dir: str, default_index: str) -> Dict[str, Dict[str, Any]]:
"""
Load data from files.
If a loaded JSON object contains a key called `__index`, the value of that key is
used as the index name, and all the data is included directly as-is into the ES document.
If the index name does not exist, the data is added to `default_index`, under a key
with the same name as the file minus the `.json` extension.
"""
outdir = Path(data_dir)
if not outdir.is_dir():
print("The additional data directory does not exist, skipping...")
return {}
json_files = [x for x in outdir.iterdir() if x.is_file() and x.name.lower().endswith(".json")]
data = {default_index: {}}
for json_file in json_files:
with json_file.open() as j:
try:
# Name the key the same as the filename without the extension.
file_data = json.load(j)
# Just to be on the safe side, if someone tries to send non-dict data
if not isinstance(file_data, dict):
file_data = {"data": file_data}
except Exception as e:
print(f"Could not load contents of {json_file.name}, skipping. Reason:\n%s" % e)
if "__index" in file_data:
# We have an index name.
index_name = file_data["__index"]
del file_data["__index"]
data[index_name] = file_data
else:
# No index name, use the default.
data[default_index][json_file.name[:-5]] = file_data
return data
def get_env_data() -> Dict[str, str]:
"""Get relevant metrics data from the environment."""
data = {}
for varname in [
"CI",
"CI_COMMIT_BEFORE_SHA",
"CI_COMMIT_BRANCH",
"CI_COMMIT_REF_NAME",
"CI_COMMIT_REF_PROTECTED",
"CI_COMMIT_SHA",
"CI_COMMIT_TAG",
"CI_COMMIT_TIMESTAMP",
"CI_CONCURRENT_ID",
"CI_CONCURRENT_PROJECT_ID",
"CI_ENVIRONMENT_NAME",
"CI_ENVIRONMENT_SLUG",
"CI_EXTERNAL_PULL_REQUEST_IID",
"CI_EXTERNAL_PULL_REQUEST_SOURCE_BRANCH_NAME",
"CI_EXTERNAL_PULL_REQUEST_SOURCE_BRANCH_SHA",
"CI_JOB_ID",
"CI_JOB_IMAGE",
"CI_JOB_MANUAL",
"CI_JOB_NAME",
"CI_JOB_STAGE",
"CI_JOB_STATUS",
"CI_MERGE_REQUEST_ID",
"CI_NODE_INDEX",
"CI_NODE_TOTAL",
"CI_PIPELINE_ID",
"CI_PIPELINE_SOURCE",
"CI_RUNNER_ID",
"GITLAB_USER_ID",
"GITLAB_USER_LOGIN",
"DISKIMG_BRANCH",
]:
if os.environ.get(varname):
data[varname] = os.environ[varname]
return data
def post_data(index_name: str, data: Dict[str, str]) -> http.client:
"""Post `data` to our ElasticSearch instance at `index_name`."""
# Shuffle the list so our attempts are in random order, instead of hammering
# hosts in order.
random.shuffle(ES_NODES)
exc = None
body = None
# 5 * 2 seconds = 5 minutes
for i in range(5):
node = ES_NODES[i % len(ES_NODES)]
req = urllib.request.Request(
f"http://{node}:9200/{index_name}/_doc/",
data=json.dumps(data).encode(),
headers={"content-type": "application/json"},
)
try:
response = urllib.request.urlopen(req, timeout=30)
break
except urllib.error.HTTPError as e:
body = e.read().decode()
exc = e
except Exception as e:
exc = e
print("Request failed, retry in 60 seconds")
time.sleep(2)
else:
job_id = os.environ.get("CI_JOB_ID")
job_url = os.environ.get("CI_JOB_URL")
print("Max retries exceeded")
error_message = (
"ERROR: log-metrics could not send data to ElasticSearch for "
f"{job_url}|job {job_id}>. "
"Exception details:\n```%s```" % "".join(traceback.format_exception(None, exc, exc.__traceback__))
)
error_message += f"\n\nResponse body:\n```\n{body}\n```"
send_message(message=error_message, channel="precious-bots")
sys.exit(0)
return response
def main():
"""Program entry main."""
timestamp = datetime.datetime.now().isoformat()
args = {"timestamp": timestamp}
for arg in sys.argv[1:]:
k, v = arg.split("=", 1)
k, v = k.strip(), v.strip()
try:
# If the argument can be converted to an int, do that.
v = int(v)
except ValueError:
try:
# If the argument can be converted to a float, do that.
v = float(v)
except ValueError:
pass
args[k] = v
default_index_name = "gitlab-ci-metrics-%s" % datetime.date.today().year
data = {default_index_name: args}
env_data = get_env_data()
data[default_index_name].update(env_data)
data_from_files = get_data_from_files("data_to_upload", default_index=default_index_name)
for index_name, value in data_from_files.items():
if index_name in data:
# This can really only be the default index, but it felt wrong
# to do `if index_name == default_index_name`.
data[index_name].update(value)
else:
data[index_name] = value
# We need the env data (and timestamp) to be added to all found indexes
# so that we can join this data with other jobs
data[index_name].update(env_data)
data[index_name]["timestamp"] = timestamp
errors = False
for index_name, value in data.items():
index_name = index_name.replace(":", "-")
print(f"Posting data to {index_name}:")
pprint(value, depth=2)
response = post_data(index_name, value)
if 200 <= response.status < 300:
print("Posted successfully.")
else:
errors = True
print(f"There was an error while posting to {index_name}: {response.read()}")
if errors:
exit("There were some errors.")
if __name__ == "__main__":
main()
```
#### File: src/notify_slack/notify_slack.py
```python
import argparse
import http.client
import json
import logging
import os
import urllib.request
def send_message(
message: str,
channel: str = "#precious-bots",
webhook_url: str = os.environ.get("SLACK_WEBHOOK_URL"),
dry_run: bool = None, # If dry_run is not provided, run based on env var CI
) -> http.client:
"""
Send the `message` to the provided Slack `channel`.
When not running on the CI, will instead print the message on the console.
"""
if not webhook_url:
raise ValueError("SLACK_WEBHOOK_URL env var not found")
if not channel.startswith("#") and not channel.startswith("@"):
channel = "#" + channel
if dry_run is None:
if os.environ.get("CI"):
dry_run = False
else:
dry_run = True
if dry_run:
logging.info("Mock Slack send_message to channel '%s': '%s' ", channel, message)
else:
logging.info("Slack send_message to channel '%s': '%s' ", channel, message)
data = {
"text": message,
"channel": channel,
}
req = urllib.request.Request(
webhook_url,
data=json.dumps(data).encode(),
headers={"content-type": "application/json"},
)
try:
response = urllib.request.urlopen(req, timeout=30)
return response
except urllib.error.HTTPError as e:
body = e.read().decode()
logging.error("Slack send_message failed with HTTP response body: %s", body)
except Exception:
logging.error("Slack send_message could not send the requested message.")
def non_empty_string(value: str) -> str:
"""Ensure that the `value` is not empty."""
if not value:
raise argparse.ArgumentTypeError("Cannot proceed with an empty value: '%s'" % value)
return value
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--webhook-url",
help="The Slack Webhook URL \
(default: environment variable SLACK_WEBHOOK_URL)",
type=non_empty_string,
nargs="?",
const=os.environ.get("SLACK_WEBHOOK_URL", ""),
default=os.environ.get("SLACK_WEBHOOK_URL", ""),
)
parser.add_argument(
"--channel",
help="The Slack channel name to which to post the message to \
(default: environment variable SLACK_CHANNEL)",
type=non_empty_string,
nargs="?",
const=os.environ.get("SLACK_CHANNEL", "#precious-bots"),
default=os.environ.get("SLACK_CHANNEL", "#precious-bots"),
)
parser.add_argument(
"--dry-run",
help="Whether to mock (log) sending Slack messages",
action="store_true",
)
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose mode")
parser.add_argument("message", help="The message to post to Slack")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
send_message(
message=args.message,
channel=args.channel,
webhook_url=args.webhook_url,
dry_run=args.dry_run,
)
if __name__ == "__main__":
main()
```
#### File: guestos/scripts/docker_extract.py
```python
import io
import json
import os
import shutil
import sys
import tarfile
def read_tar_contents(buffer):
"""Read tar file as map from filename -> content."""
with tarfile.open(fileobj=buffer, mode="r|*") as tf:
filemap = {}
for member in tf:
buf = member.tobuf() # noqa - no idea why buf is here
if member.type == tarfile.REGTYPE:
filemap[member.name] = tf.extractfile(member).read()
elif (member.type == tarfile.LNKTYPE) or (member.type == tarfile.SYMTYPE):
filemap[member.name] = member.linkname[3:]
return filemap
def get_layer_data(filemap):
"""Get the docker layer data from the filemap in correct order."""
manifest = json.loads(filemap["manifest.json"])
layers = manifest[0]["Layers"]
out = []
for layer in layers:
if isinstance(filemap[layer], str):
out.append(filemap[filemap[layer]])
else:
out.append(filemap[layer])
return tuple(out)
target_dir = sys.argv[1]
filemap = read_tar_contents(sys.stdin.buffer)
layers = get_layer_data(filemap)
for layer in layers:
tf = tarfile.open(fileobj=io.BytesIO(layer), mode="r")
# Process all members in the tarfile. They are either ordinary
# dirs/files/symlinks to be extracted, or they are "white-out" files:
# These direct to delete certain underlying files from previous layer.
for member in tf:
basename = os.path.basename(member.path)
dirname = os.path.dirname(member.path)
if basename.startswith(".wh."):
# This is a whiteout. Delete the file / directory.
basename_target = basename[4:]
target = os.path.join(target_dir, dirname, basename_target)
if os.path.isdir(target):
shutil.rmtree(target)
elif os.path.exists(target):
os.unlink(target)
else:
# Object to be created. Make sure that a previously existing
# object is removed. This is important because the python tarfile
# "extractall" method fails to overwrite symbolic links with
# new links.
target = os.path.join(target_dir, member.path)
if os.path.lexists(target):
if os.path.islink(target):
os.unlink(target)
else:
was_dir = os.path.isdir(target)
should_be_dir = member.isdir()
if was_dir:
if not should_be_dir:
shutil.rmtree(target)
else:
if should_be_dir:
os.unlink(target)
tf.extract(member, target_dir, numeric_owner=True)
os.utime(target, (0, 0), follow_symlinks=False)
```
#### File: guestos/tests/interactive.py
```python
import sys
import gflags
import ictools
import vmtools
FLAGS = gflags.FLAGS
gflags.DEFINE_string("disk_image", None, "Path to disk image to use for VMs")
gflags.MarkFlagAsRequired("disk_image")
def main(argv):
argv = FLAGS(argv)
machines = vmtools.pool().request_machines(
[
{"name": "node0", "ram": "6G", "disk": "100G", "cores": 1},
],
)
system_image = vmtools.SystemImage.open_local(FLAGS.disk_image)
ic_config = ictools.ic_prep(
subnets=[[machines[0].get_ipv6()]],
version=ictools.get_disk_image_version(system_image),
root_subnet=0,
)
config_image = ictools.build_ic_prep_inject_config(machines[0], ic_config, 0, ictools.build_ssh_extra_config())
vmtools.start_machine_local(machines[0], system_image, config_image, interactive=True)
machines[0].stop()
if __name__ == "__main__":
main(sys.argv)
```
#### File: ic_p8s_service_discovery/tests/e2e-test.py
```python
import json
import os
import shlex
import shutil
import subprocess
import tempfile
import time
import unittest
import urllib.request
from pathlib import Path
from shutil import which
from subprocess import Popen
from unittest import TestCase
IC_BINARY_NAME = "ic-p8s-sd"
# Don't start scraping mainnet
DEFAULT_ARGUMENTS = "--no-poll --listen-addr=[::]:11235"
# Seconds to wait for the deamon to start up
DAEMON_STARTUP_TIMEOUT_SECONDS = 270
# the following are the targets addresses of the root subnet of mainnet at
# registry version 0x6dc1. as we provide --no-poll to the daemon, the registry
# is *not* updated during the test and thus the addresses returned by the daemon
# do not change.
TDB26_TARGET_ADDRS = [
"[fc00:db20:35b:7399::5]",
"[2604:7e00:50:0:5000:20ff:fea7:efee]",
"[2604:3fc0:3002:0:5000:acff:fe31:12e8]",
"[2401:3f00:1000:24:5000:deff:fed6:1d7]",
"[2604:3fc0:2001:0:5000:b0ff:fe7b:ff55]",
"[2001:920:401a:1708:5000:4fff:fe92:48f1]",
"[2001:920:401a:1710:5000:28ff:fe36:512b]",
"[2fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b]",
"[2a01:138:900a:0:5000:2aff:fef4:c47e]",
"[2a0f:cd00:2:1:5000:3fff:fe36:cab8]",
"[2a0f:cd00:2:1:5000:87ff:fe58:ceba]",
"[2001:920:401a:1710:5000:d7ff:fe6f:fde7]",
"[2001:920:401a:1706:5000:87ff:fe11:a9a0]",
"[2001:920:401a:1708:5000:5fff:fec1:9ddb]",
"[fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b]",
"[fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b]",
"[fc00:e968:6179::de52:7100]",
"[fc00:db20:35b:7399::5]",
"[2a00:fb01:400:100:5000:ceff:fea2:bb0]",
"[2607:f758:c300:0:5000:72ff:fe35:3797]",
"[2607:f758:c300:0:5000:8eff:fe8b:d68]",
"[2fc00:db20:35b:7399::5]",
"[2600:c02:b002:15:5000:ceff:fecc:d5cd]",
"[2fc00:e968:6179::de52:7100]",
"[2607:f758:1220:0:5000:3aff:fe16:7aec]",
"[2607:f758:c300:0:5000:3eff:fe6d:af08]",
"[2607:f758:1220:0:5000:bfff:feb9:6794]",
"[fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b]",
"[fc00:db20:35b:7399::5]",
"[2607:f758:1220:0:5000:12ff:fe0c:8a57]",
"[2600:3004:1200:1200:5000:59ff:fe54:4c4b]",
"[2600:3006:1400:1500:5000:95ff:fe94:c948]",
"[2600:3000:6100:200:5000:c4ff:fe43:3d8a]",
"[2607:f1d0:10:1:5000:a7ff:fe91:44e]",
"[2a01:138:900a:0:5000:5aff:fece:cf05]",
"[2401:3f00:1000:23:5000:80ff:fe84:91ad]",
"[2600:2c01:21:0:5000:27ff:fe23:4839]",
]
class IcP8sDaemonTest(TestCase):
"""Tests for ic-titanium-p8s-daemon."""
def setUp(self):
"""Set up tests."""
self.targets_dir = tempfile.mkdtemp()
self.file_sd_dir = tempfile.mkdtemp()
self.daemon = start_daemon(Path(self.targets_dir), Path(self.file_sd_dir))
retry_with_timeout(lambda: get_request("replica"))
def test_mainnet_targets_expose(self):
"""test_mainnet_targets_expose."""
def get_tdb26_targets(content: bytes) -> list:
resp = json.loads(content)
return set(
item["targets"][0]
for item in filter(
lambda item: item["labels"].get("ic_subnet", "").startswith("tdb26"),
resp,
)
)
def assert_port_matches(targets, port):
expected_targets = set("{}:{}".format(item, port) for item in TDB26_TARGET_ADDRS)
self.assertEqual(targets, expected_targets)
jobs = [("replica", 9090), ("orchestrator", 9091), ("host_node_exporter", 9100), ("node_exporter", 9100)]
for src in [get_request, self.read_sd_file]:
for job in jobs:
assert_port_matches(get_tdb26_targets(src(job[0])), job[1])
def read_sd_file(self, job_name: str):
"""Read service discovery file."""
with open(os.path.join(self.file_sd_dir, job_name, "ic_p8s_sd.json")) as f:
return f.read()
def tearDown(self):
"""Tear down resources."""
self.daemon.kill()
self.daemon.wait()
shutil.rmtree(self.targets_dir)
shutil.rmtree(self.file_sd_dir)
def in_ci_env() -> bool:
"""Return true iff the process is running on CI (based on env variables)."""
return "CI_JOB_ID" in os.environ
def start_daemon(targets_dir: Path, file_sd_config_dir: Path) -> Popen:
"""Start the discovery daemon, either by invoking 'cargo run'."""
args = "{} --file-sd-base-path {} --targets-dir {}".format(DEFAULT_ARGUMENTS, file_sd_config_dir, targets_dir)
if in_ci_env():
# On CI, we assume that someone else cleanups after us.
tmpdir = tempfile.mkdtemp()
target_bin = f"{tmpdir}/{IC_BINARY_NAME}"
def sh(c):
subprocess.run(c, shell=True, check=True)
sh(f'gunzip -c -d "${{CI_PROJECT_DIR}}/artifacts/release/ic-p8s-sd.gz" >"{target_bin}"')
sh(f"chmod +x {target_bin}")
args = "{} {}".format(target_bin, args)
else:
bin_path = which("cargo")
args = "{} run --bin {} -- {}".format(bin_path, IC_BINARY_NAME, args)
p = Popen(shlex.split(args))
time.sleep(1)
r = p.poll()
if r is not None:
raise Exception("{} stopped. Return code: {}".format(IC_BINARY_NAME, r))
return p
def retry_with_timeout(f):
"""Retry f with timeout."""
start = time.time()
while True:
try:
res = get_request("replica")
print("Succeeded after {} seconds".format(time.time() - start))
return res
except Exception as e:
if time.time() - start > DAEMON_STARTUP_TIMEOUT_SECONDS:
raise Exception("Operation timed out") from e
def get_request(path: str) -> bytes:
"""Get request using given path."""
with urllib.request.urlopen("http://localhost:11235/{}".format(path)) as response:
return response.read()
if __name__ == "__main__":
unittest.main()
```
#### File: scalability/tests/mock.py
```python
import sys
import unittest
from unittest import TestCase
from unittest.mock import MagicMock
from unittest.mock import Mock
import experiment
from workload_experiment import WorkloadExperiment
class Test_Experiment(TestCase):
"""Implements a generic experiment with dependencies mocked away."""
def test_verify__mock(self):
"""Test passes when the experiment runs to end."""
sys.argv = ["mock.py", "--testnet", "abc", "--wg_testnet", "def", "--skip_generate_report", "True"]
experiment.parse_command_line_args()
exp = ExperimentMock()
# Mock functions that won't work without a proper IC deployment
exp.get_targets = Mock(return_value=["1.1.1.1", "2.2.2.2"])
exp.get_hostnames = Mock(return_value=["3.3.3.3", "4.4.4.4"])
exp.get_ic_version = MagicMock()
exp.get_subnet_for_target = MagicMock()
exp.get_subnet_info = Mock(return_value="{}")
exp.get_topology = Mock(return_value="{}")
exp.store_hardware_info = MagicMock()
exp.get_iter_logs_from_targets = MagicMock()
exp.install_canister = MagicMock()
exp.run_workload_generator = MagicMock()
exp.init_metrics = MagicMock()
exp.kill_workload_generator = MagicMock()
exp.turn_off_replica = MagicMock()
exp.check_workload_generator_installed = Mock(return_value=True)
exp.get_ic_version = MagicMock(return_value="deadbeef")
exp.wait_for_quiet = MagicMock(return_value=None)
exp.init()
exp.init_experiment()
exp.install_canister("some canister")
exp.start_experiment()
exp.run_experiment({})
exp.subnet_id = "abc"
exp.write_summary_file("test", {}, [], "some x value")
exp.end_experiment()
exp.install_canister.assert_called_once()
exp.run_workload_generator.assert_called_once()
exp.init_metrics.assert_called_once()
class ExperimentMock(WorkloadExperiment):
"""Logic for experiment 1."""
def __init__(self):
"""Construct experiment 1."""
super().__init__()
def run_experiment_internal(self, config):
"""Mock similar to experiment 1."""
return self.run_workload_generator(
self.machines,
self.target_nodes,
200,
outdir=self.iter_outdir,
duration=60,
)
if __name__ == "__main__":
unittest.main()
```
#### File: boundary_node/templates/ic_router_control_plane.py
```python
import argparse
import base64
import datetime
import glob
import json
import os
import re
import sys
from shutil import copyfile
BACKUPS_TO_KEEP = 20
def patch_subnet_export(out):
"""
Fix router table export names.
KLUDGE: the ic_router_table.js has export names derived
from network names. This export naming scheme runs into issues as
network names do not follow JS export naming conventions.
Following function named exports of the from
export main-net, export dash-board etc
to
export subnet;
This temporary function can be nuked once all boundary nodes are upgraded.
Keyword Arguments:
-----------------
out -- a list of strings representation of JS code that need to patched
"""
out[0] = "let subnet_table = {\n"
out[len(out) - 1] = "export default subnet_table;\n"
def replace_demarcated_section(lines, out, section, prefix):
"""
Replace section demarcated by magic start and end markers.
Keyword Arguments:
-----------------
lines -- List of strings that may contain start and end markers.
section: Section to be replaced with.
out -- Result of the replacement.
prefix --custom prefix to be placed before the start end marker.
Example Transformation:
lines = ["DontReplace",
"MAINTAINED BY ic_router_control_plane.py DO NOT EDIT BY HAND"
"{OldSection}"
"END MAINTAINED BY ic_router_control_plane.py DO NOT EDIT BY HAND"
"DontReplace"]
is converted to
out = ["DontReplace",
"MAINTAINED BY ic_router_control_plane.py DO NOT EDIT BY HAND"
"{NewSection}"
"END MAINTAINED BY ic_router_control_plane.py DO NOT EDIT BY HAND"
"DontReplace"]
NOTE: start and end are MAGIC strings for now. Changing them will
break boundary node upgrades.
"""
start = prefix + "MAINTAINED BY ic_router_control_plane.py DO NOT EDIT BY HAND"
end = prefix + "END MAINTAINED BY ic_router_control_plane.py DO NOT EDIT BY HAND"
section.insert(0, start + "\n")
section.append(end + "\n")
skip = False
for line in lines:
if line.startswith(start):
skip = True
out.extend(section)
elif line.startswith(end):
skip = False
continue
if not skip:
out.append(line)
def canister_id_to_hex(id):
id = id.replace("-", "") + "="
return base64.b32decode(id, casefold=True)[4:].hex()
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
argparser = argparse.ArgumentParser(description="Configure NGINX for IC routing.")
argparser.add_argument(
"routes_dir",
metavar="ROUTE_DIR",
type=str,
nargs=1,
help="a directory containing *.routes JSON files",
)
argparser.add_argument("nginx_file", metavar="NGINX_FILE", type=str, nargs=1, help="pathname of nginx.conf")
argparser.add_argument(
"njs_file",
metavar="NJS_FILE",
type=str,
nargs=1,
help="pathname of ic_router.js",
)
argparser.add_argument(
"trusted_certs_file",
metavar="CERTS_FILE",
type=str,
nargs=1,
help="pathname of trusted_certs.pem",
)
argparser.add_argument(
"--allow_node_socket_addrs",
metavar="NODE_ADDR,NODE_ADDR,...",
type=str,
nargs=1,
default=None,
help="a list of node socket addrs (e.g. [123:23]:8080) to allow, all others are not allowed",
)
argparser.add_argument(
"--deny_node_socket_addrs",
metavar="NODE_ADDR,NODE_ADDR,...",
type=str,
nargs=1,
default=None,
help="a list of socket addrs (e.g. [123:23]:8080) to deny, all others are allowed",
)
argparser.add_argument(
"--generate_upstream_declarations",
metavar="true/false",
type=str2bool,
nargs=1,
default=[True],
help="whether or not upstream declarations should be generated (false for rosetta front end)",
)
args = argparser.parse_args(sys.argv[1:])
routes_dir = args.routes_dir[0]
nginx_conf_file = args.nginx_file[0]
ic_router_file = args.njs_file[0]
trusted_certs_file = args.trusted_certs_file[0]
allow_node_socket_addrs = args.allow_node_socket_addrs
if allow_node_socket_addrs:
allow_node_socket_addrs = [x for y in allow_node_socket_addrs for x in y.split(",")]
deny_node_socket_addrs = args.deny_node_socket_addrs
if deny_node_socket_addrs:
deny_node_socket_addrs = [x for y in deny_node_socket_addrs for x in y.split(",")]
generate_upstream_declarations = args.generate_upstream_declarations[0]
def permit_node_addr(node_socket_addr):
if allow_node_socket_addrs is not None:
return node_socket_addr in allow_node_socket_addrs
else:
if deny_node_socket_addrs is not None:
return node_socket_addr not in deny_node_socket_addrs
else:
return True
# find highest registery version *.routes file
routes_file = None
routes_files = sorted(os.listdir(routes_dir), reverse=True)
for f in routes_files:
if re.match("^\\d+.routes$", f):
routes_file = f
break
if not routes_file:
print("no *.routes file found")
sys.exit(1)
print("routes_file", routes_file)
with open(os.path.join(sys.argv[1], routes_file)) as f:
data = json.load(f)
class obj(object):
"""Class to Convert JSON to objects."""
def __init__(self, d):
"""Convert JSON to objects."""
for a, b in d.items():
if isinstance(b, (list, tuple)):
setattr(self, a, [obj(x) if isinstance(x, dict) else x for x in b])
else:
setattr(self, a, obj(b) if isinstance(b, dict) else b)
data = obj(data)
nginx_out = []
ic_router_out = []
trusted_certs_out = []
nns_node_ids = set([n.node_id for s in data.subnets if s.subnet_id == data.nns_subnet_id for n in s.nodes])
nodes = [n for s in data.subnets for n in s.nodes]
upstreams_section = []
for node in nodes:
def print_upstream(suffix, max_conns):
upstreams_section.append("upstream %s%s {\n" % (node.node_id, suffix))
sockaddrary = node.socket_addr.split(":")
port = sockaddrary.pop()
if node.socket_addr[0] != "[" and len(sockaddrary) > 1:
# add brackets for IPv6
socket_addr = "[%s]:%s" % (":".join(sockaddrary), port)
else:
socket_addr = node.socket_addr
upstreams_section.append(" server %s %s;\n" % (socket_addr, max_conns))
upstreams_section.append("}\n")
if generate_upstream_declarations:
# The default is rate limited in nginx conf, not by max_conns
print_upstream("", "")
# Query calls are rate limited by max_conns
max_conns = "max_conns=%d" % (50 if (node.node_id in nns_node_ids) else 100)
print_upstream("-query", max_conns)
trusted_certs_out.append(node.tls_certificate_pem)
nginx_lines = []
with open(nginx_conf_file, "r") as default_file:
nginx_lines = default_file.readlines()
replace_demarcated_section(nginx_lines, nginx_out, upstreams_section, "# ")
ic_router_section = []
canister_range_starts = []
canister_range_ends = []
canister_subnets = []
canister_subnet_ids = []
for canister_route in sorted(data.canister_routes, key=lambda r: canister_id_to_hex(r.start_canister_id)):
canister_range_starts.append(" '%s',\n" % canister_id_to_hex(canister_route.start_canister_id))
canister_range_ends.append(" '%s',\n" % canister_id_to_hex(canister_route.end_canister_id))
canister_subnets.append(" '%s',\n" % canister_route.subnet_id)
canister_subnet_ids.append(canister_route.subnet_id)
ic_router_section.append("canister_range_starts: [\n")
ic_router_section.extend(canister_range_starts)
ic_router_section.append("],\n")
ic_router_section.append("canister_range_ends: [\n")
ic_router_section.extend(canister_range_ends)
ic_router_section.append("],\n")
ic_router_section.append("canister_subnets: [\n")
ic_router_section.extend(canister_subnets)
ic_router_section.append("],\n")
nns_subnet_index = canister_subnet_ids.index(data.nns_subnet_id)
ic_router_section.append("nns_subnet_index: %s,\n" % nns_subnet_index)
subnet_node_ids = []
subnet_nodes = []
for subnet in sorted(data.subnets, key=lambda s: canister_subnet_ids.index(s.subnet_id)):
subnet_node_ids.append(" [\n")
subnet_nodes.append(" [\n")
for node in subnet.nodes:
if not permit_node_addr(node.socket_addr):
continue
subnet_node_ids.append(" '%s',\n" % node.node_id)
subnet_nodes.append(" '%s',\n" % node.socket_addr)
subnet_node_ids.append(" ],\n")
subnet_nodes.append(" ],\n")
ic_router_section.append("subnet_node_ids: [\n")
ic_router_section.extend(subnet_node_ids)
ic_router_section.append("],\n")
ic_router_section.append("subnet_nodes: [\n")
ic_router_section.extend(subnet_nodes)
ic_router_section.append("],\n")
ic_router_lines = []
with open(ic_router_file, "r") as default_file:
ic_router_lines = default_file.readlines()
replace_demarcated_section(ic_router_lines, ic_router_out, ic_router_section, "// ")
patch_subnet_export(ic_router_out)
backup_time = datetime.datetime.now().strftime("%Y_%m_%d-%H:%M:%S")
ic_router_file_backup = ic_router_file + "." + backup_time
print("backing up %s to %s" % (ic_router_file, ic_router_file_backup))
copyfile(ic_router_file, ic_router_file_backup)
with open(ic_router_file, "w") as f:
f.writelines(ic_router_out)
# do not create an empty file as this will cause NGINX to fail
if trusted_certs_out:
trusted_certs_file_backup = trusted_certs_file + "." + backup_time
print("backing up %s to %s" % (trusted_certs_file, trusted_certs_file_backup))
if os.path.exists(trusted_certs_file):
copyfile(trusted_certs_file, trusted_certs_file_backup)
with open(trusted_certs_file, "w") as f:
f.writelines(trusted_certs_out)
nginx_conf_file_backup = nginx_conf_file + "." + backup_time
print("backing up %s to %s" % (nginx_conf_file, nginx_conf_file_backup))
copyfile(nginx_conf_file, nginx_conf_file_backup)
with open(nginx_conf_file, "w") as f:
f.writelines(nginx_out)
# cleanup backups
def cleanup_backups(fn):
files = sorted(glob.glob(fn + ".*"))
# largest (newest) first
files.reverse()
if len(files) > BACKUPS_TO_KEEP:
files = files[BACKUPS_TO_KEEP:]
for f in files:
os.remove(f)
cleanup_backups(ic_router_file)
cleanup_backups(nginx_conf_file)
if trusted_certs_out:
cleanup_backups(trusted_certs_file)
``` |
{
"source": "3coins/jupyterlab-telemetry",
"score": 3
} |
#### File: jupyterlab-telemetry/jupyterlab_telemetry/handlers.py
```python
import json
from jupyter_server.base.handlers import APIHandler
from jupyter_server.utils import url_path_join
import tornado
class TelemetryHandler(APIHandler):
"""
A handler that receives and stores telemetry data from the client.
"""
@tornado.web.authenticated
def put(self, *args, **kwargs):
# Parse the data from the request body
raw = self.request.body.strip().decode(u'utf-8')
try:
decoder = json.JSONDecoder()
session_log = decoder.decode(raw)
except Exception as e:
raise tornado.web.HTTPError(400, str(e))
self.log.info(session_log)
self.set_status(204)
self.finish()
def setup_handlers(web_app):
host_pattern = ".*$"
base_url = web_app.settings["base_url"]
route_pattern = url_path_join(base_url, "telemetry")
handlers = [(route_pattern, TelemetryHandler)]
web_app.add_handlers(host_pattern, handlers)
``` |
{
"source": "3con/cointrader",
"score": 2
} |
#### File: cointrader/cointrader/config.py
```python
import sys
import os
import logging
import logging.config
if (sys.version_info > (3, 0)):
# Python 3 code in this block
import configparser
else:
# Python 2 code in this block
import ConfigParser as configparser
DEFAULT_CONFIG = ".cointrader.ini"
def get_path_to_config():
env = os.getenv("HOME")
return os.path.join(env, DEFAULT_CONFIG)
class Config(object):
def __init__(self, configfile=None):
self.verbose = False
self.market = "poloniex"
self.api_key = None
self.api_secret = None
if configfile:
logging.config.fileConfig(configfile.name)
config = configparser.ConfigParser()
config.readfp(configfile)
exchange = config.get("DEFAULT", "exchange")
self.api_key = config.get(exchange, "api_key")
self.api_secret = config.get(exchange, "api_secret")
@property
def api(self):
if not self.api_key or not self.api_secret:
raise RuntimeError("API not configured")
return self.api_key, self.api_secret
```
#### File: cointrader/cointrader/helpers.py
```python
import datetime
from termcolor import colored
from terminaltables import AsciiTable
def colorize_value(value):
if value < 0:
return colored(value, "red")
else:
return colored(value, "green")
# def render_signal_details(signals):
# out = [["Indicator", "Signal", "Details"]]
# for s in signals:
# out.append([s, signals[s].value, signals[s].details])
# table = AsciiTable(out).table
# return "\n".join(["\nSignal:", table])
def render_signal_detail(signal):
if signal.buy:
return colored("BUY", "green")
if signal.sell:
return colored("SELL", "red")
else:
return "WAIT"
def render_user_options(options):
out = []
for o in options:
out.append("{}) {}".format(o[0], o[1]))
return "\n".join(out)
def render_bot_title(bot, market, chart):
out = ["\n"]
data = chart._data
if len(data) > 1:
last = data[-2]
else:
last = data[-1]
current = data[-1]
values = {}
values["date"] = datetime.datetime.utcfromtimestamp(current["date"])
if current["close"] > last["close"]:
values["rate"] = colored(current["close"], "green")
else:
values["rate"] = colored(current["close"], "red")
change_percent = (current["close"] - last["close"]) / current["close"] * 100
values["change_percent"] = round(change_percent, 4)
values["url"] = market.url
values["btc"] = bot.btc
values["amount"] = bot.amount
values["currency"] = market.currency
t = u"{date} [{btc} BTC {amount} {currency}] | {rate} ({change_percent}%) | {url}".format(**values)
out.append("=" * len(t))
out.append(t)
out.append("=" * len(t))
return "\n".join(out)
def render_bot_statistic(stat):
out = [["", stat["start"], stat["end"], "CHANGE %"]]
out.append(["COINTRADER", stat["trader_start_value"], stat["trader_end_value"], "{}".format(colorize_value(round(stat["profit_cointrader"], 4)))])
out.append(["MARKET", stat["market_start_value"], stat["market_end_value"], "{}".format(colorize_value(round(stat["profit_chart"], 4)))])
table = AsciiTable(out).table
return "\n".join(["\nStatistic:", table])
def render_bot_tradelog(trades):
out = [["DATE", "TYPE", "RATE", "COINS", "COINS'", "BTC", "BTC'"]]
for trade in trades:
values = []
values.append(trade.date)
values.append(trade.order_type)
values.append(trade.rate)
if trade.order_type == "BUY":
values.append("--")
values.append(trade.amount_taxed)
values.append(trade.btc)
values.append("--")
elif trade.order_type == "SELL":
values.append(trade.amount)
values.append("--")
values.append("--")
values.append(trade.btc_taxed)
else:
values.append(trade.amount)
values.append("--")
values.append(trade.btc)
values.append("--")
out.append(values)
table = AsciiTable(out).table
return "\n".join(["\nTradelog:", table])
```
#### File: cointrader/cointrader/strategy.py
```python
import datetime
import logging
from cointrader.indicators import (
WAIT, BUY, SELL, Signal, macdh_momententum, macdh, double_cross
)
log = logging.getLogger(__name__)
class Strategy(object):
"""Docstring for Strategy. """
def __str__(self):
return "{}".format(self.__class__)
def __init__(self):
self.signals = {}
"""Dictionary with details on the signal(s)
{"indicator": {"signal": 1, "details": Foo}}"""
def signal(self, chart):
"""Will return either a BUY, SELL or WAIT signal for the given
market"""
raise NotImplementedError
class NullStrategy(Strategy):
"""The NullStrategy does nothing than WAIT. It will emit not BUY or
SELL signal and is therefor the default strategy when starting
cointrader to protect the user from loosing money by accident."""
def signal(self, chart):
"""Will return either a BUY, SELL or WAIT signal for the given
market"""
signal = Signal(WAIT, datetime.datetime.utcnow())
self.signals["WAIT"] = signal
return signal
class Klondike(Strategy):
def signal(self, chart):
signal = macdh_momententum(chart)
self.signals["MACDH_MOMEMENTUM"] = signal
if signal.buy:
return signal
elif signal.sell:
return signal
return Signal(WAIT, datetime.datetime.utcfromtimestamp(chart.date))
class Followtrend(Strategy):
"""Simple trend follow strategie."""
def __init__(self):
Strategy.__init__(self)
self._macd = WAIT
def signal(self, chart):
# Get current chart
closing = chart.values()
self._value = closing[-1][1]
self._date = datetime.datetime.utcfromtimestamp(closing[-1][0])
# MACDH is an early indicator for trend changes. We are using the
# MACDH as a precondition for trading signals here and required
# the MACDH signal a change into a bullish/bearish market. This
# signal stays true as long as the signal changes.
macdh_signal = macdh(chart)
if macdh_signal.value == BUY:
self._macd = BUY
if macdh_signal.value == SELL:
self._macd = SELL
log.debug("macdh signal: {}".format(self._macd))
# Finally we are using the double_cross signal as confirmation
# of the former MACDH signal
dc_signal = double_cross(chart)
if self._macd == BUY and dc_signal.value == BUY:
signal = dc_signal
elif self._macd == SELL and dc_signal.value == SELL:
signal = dc_signal
else:
signal = Signal(WAIT, dc_signal.date)
log.debug("Final signal @{}: {}".format(signal.date, signal.value))
self.signals["DC"] = signal
return signal
``` |
{
"source": "3cor/spw",
"score": 2
} |
#### File: 3cor/spw/app.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
import modules.passgen as pg
import modules.mongodb as mg
import modules.xcrypt as xc
import hashlib
import os
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 722)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)
self.stackedWidget.setObjectName("stackedWidget")
self.pageWC = QtWidgets.QWidget()
self.pageWC.setObjectName("pageWC")
self.gridLayout_2 = QtWidgets.QGridLayout(self.pageWC)
self.gridLayout_2.setObjectName("gridLayout_2")
self.wcPushButtonSignin = QtWidgets.QPushButton(self.pageWC)
self.wcPushButtonSignin.setObjectName("wcPushButtonSignin")
self.gridLayout_2.addWidget(self.wcPushButtonSignin, 10, 0, 1, 1)
self.wcLineEditPass = QtWidgets.QLineEdit(self.pageWC)
self.wcLineEditPass.setObjectName("wcLineEditPass")
self.gridLayout_2.addWidget(self.wcLineEditPass, 9, 0, 1, 1)
self.wcLineEditUser = QtWidgets.QLineEdit(self.pageWC)
self.wcLineEditUser.setText("")
self.wcLineEditUser.setObjectName("wcLineEditUser")
self.gridLayout_2.addWidget(self.wcLineEditUser, 8, 0, 1, 1)
self.wcPushButtonSignup = QtWidgets.QPushButton(self.pageWC)
self.wcPushButtonSignup.setObjectName("wcPushButtonSignup")
self.gridLayout_2.addWidget(self.wcPushButtonSignup, 11, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.pageWC)
self.label_4.setWordWrap(True)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 7, 0, 1, 1)
self.stackedWidget.addWidget(self.pageWC)
self.pageRG = QtWidgets.QWidget()
self.pageRG.setObjectName("pageRG")
self.verticalLayout = QtWidgets.QVBoxLayout(self.pageRG)
self.verticalLayout.setObjectName("verticalLayout")
self.label_3 = QtWidgets.QLabel(self.pageRG)
font = QtGui.QFont()
font.setPointSize(36)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.rgLineEditUser = QtWidgets.QLineEdit(self.pageRG)
self.rgLineEditUser.setObjectName("rgLineEditUser")
self.verticalLayout.addWidget(self.rgLineEditUser)
self.rgLineEditPass = QtWidgets.QLineEdit(self.pageRG)
self.rgLineEditPass.setObjectName("rgLineEditPass")
self.verticalLayout.addWidget(self.rgLineEditPass)
self.rgLineEditConfirm = QtWidgets.QLineEdit(self.pageRG)
self.rgLineEditConfirm.setObjectName("rgLineEditConfirm")
self.verticalLayout.addWidget(self.rgLineEditConfirm)
self.rgPushButtonSubmit = QtWidgets.QPushButton(self.pageRG)
self.rgPushButtonSubmit.setObjectName("rgPushButtonSubmit")
self.verticalLayout.addWidget(self.rgPushButtonSubmit)
self.rgPushButtonBack = QtWidgets.QPushButton(self.pageRG)
self.rgPushButtonBack.setObjectName("rgPushButtonBack")
self.verticalLayout.addWidget(self.rgPushButtonBack)
self.stackedWidget.addWidget(self.pageRG)
self.pageGEN = QtWidgets.QWidget()
self.pageGEN.setObjectName("pageGEN")
self.gridLayout_5 = QtWidgets.QGridLayout(self.pageGEN)
self.gridLayout_5.setObjectName("gridLayout_5")
self.label = QtWidgets.QLabel(self.pageGEN)
self.label.setWordWrap(True)
self.label.setObjectName("label")
self.gridLayout_5.addWidget(self.label, 0, 0, 1, 2)
self.tabGenerate = QtWidgets.QTabWidget(self.pageGEN)
self.tabGenerate.setObjectName("tabGenerate")
self.tabPassword = QtWidgets.QWidget()
self.tabPassword.setObjectName("tabPassword")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.tabPassword)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.formLayout_2 = QtWidgets.QFormLayout()
self.formLayout_2.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_2.setObjectName("formLayout_2")
self.label_5 = QtWidgets.QLabel(self.tabPassword)
self.label_5.setObjectName("label_5")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.label_6 = QtWidgets.QLabel(self.tabPassword)
self.label_6.setObjectName("label_6")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_6)
self.lineEditDG = QtWidgets.QLineEdit(self.tabPassword)
self.lineEditDG.setObjectName("lineEditDG")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.lineEditDG)
self.checkBoxSymbol = QtWidgets.QCheckBox(self.tabPassword)
self.checkBoxSymbol.setChecked(True)
self.checkBoxSymbol.setObjectName("checkBoxSymbol")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.checkBoxSymbol)
self.lineEditSymbol = QtWidgets.QLineEdit(self.tabPassword)
self.lineEditSymbol.setObjectName("lineEditSymbol")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.lineEditSymbol)
self.lineEditNC = QtWidgets.QLineEdit(self.tabPassword)
self.lineEditNC.setObjectName("lineEditNC")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.lineEditNC)
self.verticalLayout_2.addLayout(self.formLayout_2)
self.pushButtonGenWord = QtWidgets.QPushButton(self.tabPassword)
self.pushButtonGenWord.setObjectName("pushButtonGenWord")
self.verticalLayout_2.addWidget(self.pushButtonGenWord)
self.textEditPassword = QtWidgets.QPlainTextEdit(self.tabPassword)
self.textEditPassword.setObjectName("textEditPassword")
self.verticalLayout_2.addWidget(self.textEditPassword)
self.tabGenerate.addTab(self.tabPassword, "")
self.tabPassphrase = QtWidgets.QWidget()
self.tabPassphrase.setObjectName("tabPassphrase")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.tabPassphrase)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.formLayout_3 = QtWidgets.QFormLayout()
self.formLayout_3.setObjectName("formLayout_3")
self.lineEditNW = QtWidgets.QLineEdit(self.tabPassphrase)
self.lineEditNW.setObjectName("lineEditNW")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lineEditNW)
self.label_7 = QtWidgets.QLabel(self.tabPassphrase)
self.label_7.setObjectName("label_7")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_7)
self.verticalLayout_3.addLayout(self.formLayout_3)
self.pushButtonGenPhrase = QtWidgets.QPushButton(self.tabPassphrase)
self.pushButtonGenPhrase.setObjectName("pushButtonGenPhrase")
self.verticalLayout_3.addWidget(self.pushButtonGenPhrase)
self.textEditPassphrase = QtWidgets.QPlainTextEdit(self.tabPassphrase)
self.textEditPassphrase.setObjectName("textEditPassphrase")
self.verticalLayout_3.addWidget(self.textEditPassphrase)
self.tabGenerate.addTab(self.tabPassphrase, "")
self.gridLayout_5.addWidget(self.tabGenerate, 1, 0, 1, 1)
self.pushButtonBackToManager = QtWidgets.QPushButton(self.pageGEN)
self.pushButtonBackToManager.setObjectName("pushButtonBackToManager")
self.gridLayout_5.addWidget(self.pushButtonBackToManager, 2, 0, 1, 1)
self.stackedWidget.addWidget(self.pageGEN)
self.pageEntry = QtWidgets.QWidget()
self.pageEntry.setObjectName("pageEntry")
self.gridLayout_4 = QtWidgets.QGridLayout(self.pageEntry)
self.gridLayout_4.setObjectName("gridLayout_4")
self.pushButtonGen = QtWidgets.QPushButton(self.pageEntry)
self.pushButtonGen.setObjectName("pushButtonGen")
self.gridLayout_4.addWidget(self.pushButtonGen, 3, 3, 1, 1)
self.pushButtonDelete = QtWidgets.QPushButton(self.pageEntry)
self.pushButtonDelete.setObjectName("pushButtonDelete")
self.gridLayout_4.addWidget(self.pushButtonDelete, 3, 2, 1, 1)
self.pushButtonEdit = QtWidgets.QPushButton(self.pageEntry)
self.pushButtonEdit.setObjectName("pushButtonEdit")
self.gridLayout_4.addWidget(self.pushButtonEdit, 3, 1, 1, 1)
self.pushButtonNew = QtWidgets.QPushButton(self.pageEntry)
self.pushButtonNew.setObjectName("pushButtonNew")
self.gridLayout_4.addWidget(self.pushButtonNew, 3, 0, 1, 1)
self.pushButtonLock = QtWidgets.QPushButton(self.pageEntry)
self.pushButtonLock.setObjectName("pushButtonLock")
self.gridLayout_4.addWidget(self.pushButtonLock, 3, 4, 1, 1)
self.tableEntry = QtWidgets.QTableWidget(self.pageEntry)
self.tableEntry.setRowCount(0)
self.tableEntry.setColumnCount(0)
self.tableEntry.setObjectName("tableEntry")
self.gridLayout_4.addWidget(self.tableEntry, 5, 0, 1, 5)
self.label_8 = QtWidgets.QLabel(self.pageEntry)
self.label_8.setObjectName("label_8")
self.gridLayout_4.addWidget(self.label_8, 1, 0, 1, 5)
self.stackedWidget.addWidget(self.pageEntry)
self.pageEntryCUD = QtWidgets.QWidget()
self.pageEntryCUD.setObjectName("pageEntryCUD")
self.gridLayout_6 = QtWidgets.QGridLayout(self.pageEntryCUD)
self.gridLayout_6.setObjectName("gridLayout_6")
self.detLineEditURL = QtWidgets.QLineEdit(self.pageEntryCUD)
self.detLineEditURL.setObjectName("detLineEditURL")
self.gridLayout_6.addWidget(self.detLineEditURL, 5, 0, 1, 1)
self.detOK = QtWidgets.QPushButton(self.pageEntryCUD)
self.detOK.setObjectName("detOK")
self.gridLayout_6.addWidget(self.detOK, 6, 0, 1, 1)
self.detLineEditName = QtWidgets.QLineEdit(self.pageEntryCUD)
self.detLineEditName.setObjectName("detLineEditName")
self.gridLayout_6.addWidget(self.detLineEditName, 1, 0, 1, 1)
self.detCancel = QtWidgets.QPushButton(self.pageEntryCUD)
self.detCancel.setObjectName("detCancel")
self.gridLayout_6.addWidget(self.detCancel, 7, 0, 1, 1)
self.detLineEditUser = QtWidgets.QLineEdit(self.pageEntryCUD)
self.detLineEditUser.setObjectName("detLineEditUser")
self.gridLayout_6.addWidget(self.detLineEditUser, 2, 0, 1, 1)
self.detLineEditConfirm = QtWidgets.QLineEdit(self.pageEntryCUD)
self.detLineEditConfirm.setObjectName("detLineEditConfirm")
self.gridLayout_6.addWidget(self.detLineEditConfirm, 4, 0, 1, 1)
self.detLineEditPass = QtWidgets.QLineEdit(self.pageEntryCUD)
self.detLineEditPass.setObjectName("detLineEditPass")
self.gridLayout_6.addWidget(self.detLineEditPass, 3, 0, 1, 1)
self.detLabelEntry = QtWidgets.QLabel(self.pageEntryCUD)
self.detLabelEntry.setObjectName("detLabelEntry")
self.gridLayout_6.addWidget(self.detLabelEntry, 0, 0, 1, 1)
self.stackedWidget.addWidget(self.pageEntryCUD)
self.gridLayout.addWidget(self.stackedWidget, 0, 1, 1, 1)
self.labelOutput = QtWidgets.QLabel(self.centralwidget)
self.labelOutput.setText("")
self.labelOutput.setObjectName("labelOutput")
self.gridLayout.addWidget(self.labelOutput, 1, 1, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.stackedWidget.setCurrentIndex(0)
self.tabGenerate.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.wcLineEditUser, self.wcLineEditPass)
MainWindow.setTabOrder(self.wcLineEditPass, self.wcPushButtonSignin)
MainWindow.setTabOrder(self.wcPushButtonSignin, self.wcPushButtonSignup)
MainWindow.setTabOrder(self.wcPushButtonSignup, self.rgLineEditUser)
MainWindow.setTabOrder(self.rgLineEditUser, self.rgLineEditPass)
MainWindow.setTabOrder(self.rgLineEditPass, self.rgLineEditConfirm)
MainWindow.setTabOrder(self.rgLineEditConfirm, self.rgPushButtonSubmit)
MainWindow.setTabOrder(self.rgPushButtonSubmit, self.rgPushButtonBack)
MainWindow.setTabOrder(self.rgPushButtonBack, self.tabGenerate)
MainWindow.setTabOrder(self.tabGenerate, self.lineEditNC)
MainWindow.setTabOrder(self.lineEditNC, self.lineEditDG)
MainWindow.setTabOrder(self.lineEditDG, self.lineEditSymbol)
MainWindow.setTabOrder(self.lineEditSymbol, self.checkBoxSymbol)
MainWindow.setTabOrder(self.checkBoxSymbol, self.pushButtonGenWord)
MainWindow.setTabOrder(self.pushButtonGenWord, self.textEditPassword)
MainWindow.setTabOrder(self.textEditPassword, self.pushButtonBackToManager)
MainWindow.setTabOrder(self.pushButtonBackToManager, self.pushButtonNew)
MainWindow.setTabOrder(self.pushButtonNew, self.pushButtonEdit)
MainWindow.setTabOrder(self.pushButtonEdit, self.pushButtonDelete)
MainWindow.setTabOrder(self.pushButtonDelete, self.pushButtonGen)
MainWindow.setTabOrder(self.pushButtonGen, self.pushButtonLock)
MainWindow.setTabOrder(self.pushButtonLock, self.tableEntry)
MainWindow.setTabOrder(self.tableEntry, self.detLineEditName)
MainWindow.setTabOrder(self.detLineEditName, self.detLineEditUser)
MainWindow.setTabOrder(self.detLineEditUser, self.lineEditNW)
MainWindow.setTabOrder(self.lineEditNW, self.detLineEditPass)
MainWindow.setTabOrder(self.detLineEditPass, self.detLineEditConfirm)
MainWindow.setTabOrder(self.detLineEditConfirm, self.detLineEditURL)
MainWindow.setTabOrder(self.detLineEditURL, self.detOK)
MainWindow.setTabOrder(self.detOK, self.detCancel)
MainWindow.setTabOrder(self.detCancel, self.textEditPassphrase)
MainWindow.setTabOrder(self.textEditPassphrase, self.pushButtonGenPhrase)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.wcPushButtonSignin.setText(_translate("MainWindow", "Sign in"))
self.wcLineEditPass.setPlaceholderText(_translate("MainWindow", "Password"))
self.wcLineEditUser.setPlaceholderText(_translate("MainWindow", "Username"))
self.wcPushButtonSignup.setText(_translate("MainWindow", "Sign up"))
self.label_4.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"><span style=\" font-size:36pt;\">Simple Password Manager</span></p><p align=\"center\"><span style=\" font-size:8pt; font-style:italic;\">Developed by <NAME></span></p><p align=\"center\"><br/></p><p align=\"center\">Welcome to Simple Passsword Manager!</p><p align=\"center\">Simple Password Manager helps you to create your own password database and store it on MongoDB Atlas. </p><p align=\"center\">Please sign in using your registed username or sign up for a new account.</p></body></html>"))
self.label_3.setText(_translate("MainWindow", "<html><head/><body><p>Create new account</p><p><span style=\" font-size:11pt;\">Your password will be a master key.</span></p><p><span style=\" font-size:11pt;\">Master key is used to encrypt and decrypt your password database.</span></p><p><span style=\" font-size:11pt;\">If you lose your password, you cannot unlock your database!</span></p></body></html>"))
self.rgLineEditUser.setPlaceholderText(_translate("MainWindow", "Username"))
self.rgLineEditPass.setPlaceholderText(_translate("MainWindow", "Password"))
self.rgLineEditConfirm.setPlaceholderText(_translate("MainWindow", "Confirm Password"))
self.rgPushButtonSubmit.setText(_translate("MainWindow", "Submit"))
self.rgPushButtonBack.setText(_translate("MainWindow", "Back"))
self.label.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"><span style=\" font-size:36pt;\">Simple Password Generator</span></p><p align=\"center\"><br/></p><p align=\"center\">Simple password generator helps your generate cryptographically strong random password.</p><p align=\"center\"><br/></p><p>Here you have to options:</p><p>1. Generate random password from alphabet characters, numbers, or symbols.</p><p>2. Genrate random passphrase from wordlists. </p><p>This option is easier to remember. However, it is more susceptible to dictionary attack.</p></body></html>"))
self.label_5.setText(_translate("MainWindow", "Number of characters"))
self.label_6.setText(_translate("MainWindow", "Minimum digits"))
self.lineEditDG.setText(_translate("MainWindow", "3"))
self.checkBoxSymbol.setText(_translate("MainWindow", "Include symbols"))
self.lineEditSymbol.setText(_translate("MainWindow", "{}()[].,:;+-*/&|<>=~$"))
self.lineEditNC.setText(_translate("MainWindow", "8"))
self.pushButtonGenWord.setText(_translate("MainWindow", "Generate password"))
self.tabGenerate.setTabText(self.tabGenerate.indexOf(self.tabPassword), _translate("MainWindow", "Password"))
self.lineEditNW.setText(_translate("MainWindow", "8"))
self.label_7.setText(_translate("MainWindow", "Number of words"))
self.pushButtonGenPhrase.setText(_translate("MainWindow", "Generate passphrase"))
self.tabGenerate.setTabText(self.tabGenerate.indexOf(self.tabPassphrase), _translate("MainWindow", "Passphrase"))
self.pushButtonBackToManager.setText(_translate("MainWindow", "Back to manager"))
self.pushButtonGen.setText(_translate("MainWindow", "Generate Pass"))
self.pushButtonDelete.setText(_translate("MainWindow", "Delete entry"))
self.pushButtonEdit.setText(_translate("MainWindow", "Edit entry"))
self.pushButtonNew.setText(_translate("MainWindow", "New entry"))
self.pushButtonLock.setText(_translate("MainWindow", "Lock"))
self.label_8.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"><br/><span style=\" font-size:36pt;\">Password manager</span></p><p align=\"center\"><span style=\" font-size:14pt;\">Manage your password database</span></p><p align=\"center\">All data are encrypted with your masterkey when they are uploaded to the cloud</p><p align=\"center\"><br/></p></body></html>"))
self.detLineEditURL.setPlaceholderText(_translate("MainWindow", "URL"))
self.detOK.setText(_translate("MainWindow", "OK"))
self.detLineEditName.setPlaceholderText(_translate("MainWindow", "Name"))
self.detCancel.setText(_translate("MainWindow", "Cancel"))
self.detLineEditUser.setPlaceholderText(_translate("MainWindow", "Username"))
self.detLineEditConfirm.setPlaceholderText(_translate("MainWindow", "Confirm Password"))
self.detLineEditPass.setPlaceholderText(_translate("MainWindow", "Password"))
self.detLabelEntry.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"><span style=\" font-size:36pt;\">ENTRY</span></p></body></html>"))
# DO NOT MODIFY GENERATED UI CODE PAST HERE
self.config()
user = None
pwd = None
def config(self):
# Add signal to buttons
self.wcPushButtonSignin.clicked.connect(self.signIn)
self.wcPushButtonSignup.clicked.connect(self.signUp)
self.rgPushButtonSubmit.clicked.connect(self.rgSubmit)
self.rgPushButtonBack.clicked.connect(self.rgBack)
self.pushButtonNew.clicked.connect(self.etNew)
self.pushButtonEdit.clicked.connect(self.etEdit)
self.pushButtonDelete.clicked.connect(self.etDelete)
self.pushButtonGen.clicked.connect(self.etGenerate)
self.pushButtonLock.clicked.connect(self.etLockDB)
self.detOK.clicked.connect(self.detClickOK)
self.detCancel.clicked.connect(self.detClickCancel)
self.pushButtonGen.clicked.connect(self.goToGen)
self.pushButtonGenWord.clicked.connect(self.genPassword)
self.pushButtonGenPhrase.clicked.connect(self.genPassphrase)
self.pushButtonBackToManager.clicked.connect(self.backToManager)
# Config table
self.tableEntry.setRowCount(0)
self.tableEntry.setColumnCount(4)
self.tableEntry.setHorizontalHeaderLabels("Name;Username;Password;URL".split(";"))
self.tableEntry.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
header = self.tableEntry.horizontalHeader()
header.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
header.setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch)
header.setSectionResizeMode(2, QtWidgets.QHeaderView.Stretch)
header.setSectionResizeMode(3, QtWidgets.QHeaderView.Stretch)
def output(self, msg):
msg = '> ' + msg
print(msg)
self.labelOutput.setText(msg)
def display(self, i):
self.stackedWidget.setCurrentIndex(i)
def checkUser(self, usr, pwd):
pwd = hashlib.md5(pwd.encode('utf-8')).hexdigest()
db = mg.connect()
result = mg.get_user(db, usr, pwd)
print(result)
if result > 0:
return True
else:
return False
def encrypt(self, msg):
result = xc.password_encrypt(msg.encode(), self.pwd).decode('utf-8')
print(result)
return result
def decrypt(self, msg):
result = xc.password_decrypt(bytes(msg, 'utf-8'), self.pwd).decode()
print(result)
return result
# pageWC
def signIn(self):
usr = self.wcLineEditUser.text()
pwd = self.wcLineEditPass.text()
print('signIn()')
print('Username:' + usr)
print('Password:' + pwd)
if usr.strip() != '' and pwd.strip() != '':
if self.checkUser(usr, pwd):
# If user is found
self.user = usr
self.pwd = <PASSWORD>
self.syncTable()
self.display(3)
self.output("Sign in successfully!")
else:
self.output("Wrong Username/Password")
else:
self.output("Please enter Username/Password")
def signUp(self):
print('signUp()')
self.output("Signing you up...")
self.display(1)
# pageRG
def rgSubmit(self):
usr = self.rgLineEditUser.text()
pwd = self.rgLineEditPass.text()
cfp = self.rgLineEditConfirm.text()
print('rgSubmit()')
print('Username:' + usr)
print('Password:' + pwd)
print('Confirm Password:' + cfp)
if usr.strip() != '' and pwd.strip() != '':
if pwd == cfp:
if not self.checkUser(usr, pwd):
# If user is not found
self.user = usr
self.pwd = <PASSWORD>
self.addUser(usr, pwd)
self.syncTable()
self.display(3)
self.output('Sign up succesfully!')
self.rgLineEditUser.setText('')
self.rgLineEditPass.setText('')
self.rgLineEditConfirm.setText('')
else:
print("Username is already used!")
else:
self.output("Passwords are not matched!")
else:
self.output("Please enter Username/Password")
def rgBack(self):
print('rgBack()')
self.display(0)
def addUser(self, usr, pwd):
pwd = hashlib.md5(pwd.encode('utf-8')).hexdigest()
db = mg.connect()
mg.add_user(db, usr, pwd)
# pageEntry
def etNew(self):
self.etMode = 'new'
self.detLineEditName.setText('')
self.detLineEditUser.setText('')
self.detLineEditPass.setText('')
self.detLineEditConfirm.setText('')
self.detLineEditURL.setText('')
self.detLabelEntry.setText('Add new entry')
self.display(4)
self.output('Adding entry...')
def etEdit(self):
row = self.tableEntry.currentRow()
self.etMode = 'edit'
self.detLineEditName.setText(self.tableEntry.item(row, 0).text())
self.detLineEditUser.setText(self.tableEntry.item(row, 1).text())
self.detLineEditPass.setText(self.tableEntry.item(row, 2).text())
self.detLineEditConfirm.setText(self.tableEntry.item(row, 2).text())
self.detLineEditURL.setText(self.tableEntry.item(row, 3).text())
self.detLabelEntry.setText('Edit entry')
self.display(4)
self.output('Editing entry...')
def etDelete(self):
self.etMode = 'delete'
self.deleteEntry()
def etGenerate(self):
self.display(2)
def etLockDB(self):
self.user = None
self.pwd = None
self.wcLineEditUser.setText('')
self.wcLineEditPass.setText('')
self.display(0)
# pageEntryCUD
def syncTable(self):
self.tableEntry.setRowCount(0)
database = mg.connect()
result = mg.findall(database, self.user)
for r in result:
entry = [
r['name'],
self.decrypt(r['user']),
self.decrypt(r['pass']),
self.decrypt(r['url'])
]
self.table_appender(self.tableEntry, *entry)
# print(r)
def detClickOK(self):
name = self.detLineEditName.text()
usr = self.detLineEditUser.text()
pwd = self.detLineEditPass.text()
pcf = self.detLineEditConfirm.text()
url = self.detLineEditURL.text()
print('detOK()')
print('Name:' + name)
print('Username:' + usr)
print('Password:' + pwd)
print('Confirm Password:' + pcf)
print('URL:' + url)
if name.strip() != '' and usr.strip() != '' and pwd.strip() != '' and pcf.strip() != '' and url.strip() != '':
if pwd == pcf:
# Conditions passed!
if self.etMode == 'new':
self.addEntry(name, usr, pwd, url)
if self.etMode == 'edit':
self.editEntry(name, usr, pwd, url)
else:
self.output("Passwords are not matched!")
else:
self.output("All field must be filled!")
def detClickCancel(self):
self.display(3)
self.output('You can manage your password here')
def addEntry(self, name, usr, pwd, url):
entry = {
'owner': self.user,
'name': name,
'user': usr,
'pass': <PASSWORD>,
'url': url
}
# Encrypt every entry
for k, v in entry.items():
if k != 'owner' and k != 'name':
entry[k] = self.encrypt(v)
db = mg.connect()
ret = mg.add_entry(db, self.user, entry)
print(ret.inserted_id)
self.syncTable()
self.display(3)
self.output('Entry is added')
def editEntry(self, name, usr, pwd, url):
curr = self.getCurrentRow()
entry = {
'owner': self.user,
'name': name,
'user': usr,
'pass': <PASSWORD>,
'url': url
}
# Encrypt every entry
for k, v in entry.items():
if k != 'owner' and k != 'name':
entry[k] = self.encrypt(v)
db = mg.connect()
mg.edit_entry(db, self.user, curr, entry)
self.syncTable()
self.display(3)
self.output('Entry is edited')
def deleteEntry(self):
curr = self.getCurrentRow()
db = mg.connect()
mg.delete_entry(db, self.user, curr)
self.syncTable()
self.output('Entry is deleted')
def getCurrentRow(self):
# get selected row
row = self.tableEntry.currentRow()
selected = (self.tableEntry.item(row, 0).text())
return selected
def table_appender(self, widget, *args):
def set_columns(len, pos):
if pos == len-1:
widget.setItem(widget.rowCount()-1, pos, QtWidgets.QTableWidgetItem(args[pos]))
else:
widget.setItem(widget.rowCount()-1, pos, QtWidgets.QTableWidgetItem(args[pos]))
set_columns(len, pos+1)
widget.insertRow(widget.rowCount())
set_columns(widget.columnCount(), 0)
def goToGen(self):
self.display(2)
self.output('You can generate secure password or passphrase here')
# pageGen
def genPassword(self):
n = int(self.lineEditNC.text())
digit = int(self.lineEditDG.text())
with_symbol = self.checkBoxSymbol.isChecked()
symbols = self.lineEditSymbol.text()
result = pg.secure(n, digit, with_symbol, symbols)
self.textEditPassword.setPlainText(result)
self.output('Password is generated')
def genPassphrase(self):
n = int(self.lineEditNW.text())
dir_path = os.path.dirname(os.path.realpath(__file__))
filepath = os.path.join(dir_path, 'data/words.txt')
print(filepath)
result = pg.passphrase(n, filepath)
self.textEditPassphrase.setPlainText(result)
self.output('Passpharse is generated')
def backToManager(self):
self.display(3)
self.textEditPassword.setPlainText('')
self.textEditPassphrase.setPlainText('')
self.output('You can manage your password here')
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
``` |
{
"source": "3CP-Studios/Space-Adventure",
"score": 3
} |
#### File: game/Game_lib/game.py
```python
import pygame
UP=1
DOWN=2
LEFT=3
RIGHT=4
class Bullet(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((10, 20))
self.image.fill(YELLOW)
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -10
def update(self):
self.rect.y += self.speedy
# kill if it moves off the top of the screen
if self.rect.bottom < 0:
self.kill()
class Game:
def __init__(self, screen, display):
print("Using Game-lib v1.0")
self.sc=screen
self.dis=display
def shoot(self, start_pos, speed, direction=UP):
pass
def game(screen, display): return Game(screen, display)
if __name__ == '__name__':
pass
``` |
{
"source": "3crabs/class-book",
"score": 2
} |
#### File: class-book/groups/views.py
```python
from django.core.mail import EmailMessage
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from accounting.models import Attendance, Result
from accounting.templatetags import my_tags
from class_book import settings
from groups.models import Group, Student
from subjects.models import Subject
import xlwt
def groups(request):
if request.POST:
item = Group(name=request.POST['name'])
item.save()
object_list = Group.objects.all().order_by("name")
return render(request, 'groups/index.html', locals())
def group(request, pk):
group = Group.objects.get(id=pk)
if 'delete' in request.POST:
group.delete()
object_list = Group.objects.all().order_by("name")
return render(request, 'groups/index.html', locals())
group = Group.objects.get(id=pk)
subject_list = Subject.objects.all().order_by("name")
return render(request, 'groups/info.html', locals())
def group_students(request, pk):
if request.POST:
item = Student(
name=request.POST['name'],
email=request.POST['email'],
group_id=pk,
)
item.save()
group = Group.objects.get(id=pk)
subjects = group.subjects.all()
for subject in subjects:
for lesson in subject.lesson_set.all():
attendance = Attendance()
attendance.student = item
attendance.lesson = lesson
attendance.save()
for task in subject.task_set.all():
result = Result()
result.student = item
result.task = task
result.save()
group = Group.objects.get(id=pk)
return render(request, 'groups/info.html', locals())
def group_student(request, pk, id):
student = Student.objects.get(id=id)
if 'delete' in request.POST:
student.delete()
group = Group.objects.get(id=pk)
subject_list = Subject.objects.all().order_by("name")
return render(request, 'groups/info.html', locals())
def group_subjects(request, pk):
if request.POST:
group = Group.objects.get(id=pk)
subject = Subject.objects.get(id=request.POST['subject'])
group.subjects.add(subject)
group.save()
group = Group.objects.get(id=pk)
for student in group.student_set.all():
for lesson in subject.lesson_set.all():
attendance = Attendance()
attendance.student = student
attendance.lesson = lesson
attendance.save()
for task in subject.task_set.all():
result = Result()
result.student = student
result.task = task
result.save()
group = Group.objects.get(id=pk)
subject_list = Subject.objects.all().order_by("name")
return render(request, 'groups/info.html', locals())
def group_subject(request, pk, id):
subject = Subject.objects.get(id=id)
if 'delete' in request.POST:
group = Group.objects.get(id=pk)
group.subjects.remove(subject)
group.save()
group = Group.objects.get(id=pk)
subject_list = Subject.objects.all().order_by("name")
return render(request, 'groups/info.html', locals())
else:
group = Group.objects.get(id=pk)
itogs = {}
for student in group.student_set.all():
itogs[student.id] = student.id + 1
print(itogs)
return render(request, 'accouting/index.html', locals())
def create_xls_(group, subject):
book = xlwt.Workbook(encoding="utf-8")
sheet = book.add_sheet(group.name)
sheet.write(0, 0, "Успеваемость группы " + group.name + " по предмету " + subject.name)
row = 1
col = 0
sheet.write(row, col, "Посещаемость")
row += 1
sheet.write(row, col, "Студент")
col += 1
for lesson in subject.lesson_set.all():
sheet.write(row, col, lesson.name)
col += 1
sheet.write(row, col, "Посещаемость")
row += 1
col = 0
for student in group.student_set.all():
sheet.write(row, col, student.name)
col += 1
for attendance in student.attendance_set.filter(lesson__subject_id=subject.id):
sheet.write(row, col, attendance.visit)
col += 1
sheet.write(row, col, my_tags.lessons(student, subject))
row += 1
col = 0
sheet.write(row, col, "Результаты")
row += 1
sheet.write(row, col, "Студент")
col += 1
for task in subject.task_set.all():
sheet.write(row, col, task.name)
col += 1
sheet.write(row, col, "Успеваемость")
row += 1
col = 0
for student in group.student_set.all():
sheet.write(row, col, student.name)
col += 1
for result in student.result_set.filter(task__subject_id=subject.id):
sheet.write(row, col, result.rating)
col += 1
sheet.write(row, col, my_tags.tasks(student, subject))
row += 1
col = 0
path = "groups/static/docs/spreadsheet-" + str(group.id) + "-" + str(subject.id) + ".xlsx"
book.save(path)
return path
def create_xls(request, pk, id):
group = Group.objects.get(id=pk)
subject = group.subjects.get(id=id)
path = create_xls_(group, subject)
file = open(path, 'rb')
response = HttpResponse(file, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=table.xlsx'
return response
def sending(request, pk, id):
group = Group.objects.get(id=pk)
students = group.student_set.all()
emails = [student.email for student in students]
email = EmailMessage(
'Результаты',
'Здравствуй, вот ваша успеваемость',
settings.EMAIL_HOST_USER,
emails
)
path = create_xls_(group, Subject.objects.get(id=id))
email.attach_file(path)
email.send()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
```
#### File: class-book/subjects/views.py
```python
from django.shortcuts import render
from accounting.models import Attendance, Result
from groups.models import Group
from subjects.models import Subject, Task, Lesson
def subjects(request):
if request.POST:
item = Subject(
name=request.POST['name'],
level_3=request.POST['level_3'],
level_4=request.POST['level_4'],
level_5=request.POST['level_5'],
)
item.save()
object_list = Subject.objects.all().order_by("name")
return render(request, 'subjects/index.html', locals())
def subject(request, pk):
subject = Subject.objects.get(id=pk)
if 'delete' in request.POST:
subject.delete()
object_list = Subject.objects.all().order_by("name")
return render(request, 'subjects/index.html', locals())
subject = Subject.objects.get(id=pk)
return render(request, 'subjects/info.html', locals())
def subject_tasks(request, pk):
if request.POST:
item = Task(
name=request.POST['name'],
deadline=request.POST['date'],
subject_id=pk,
)
item.save()
groups = Group.objects.all()
for group in groups:
if item.subject in group.subjects.all():
for student in group.student_set.all():
result = Result()
result.student = student
result.task = item
result.save()
subject = Subject.objects.get(id=pk)
return render(request, 'subjects/info.html', locals())
def subject_lessons(request, pk):
if request.POST:
item = Lesson(
name=request.POST['name'],
date=request.POST['date'],
subject_id=pk,
)
item.save()
groups = Group.objects.all()
for group in groups:
if item.subject in group.subjects.all():
for student in group.student_set.all():
attendance = Attendance()
attendance.student = student
attendance.lesson = item
attendance.save()
subject = Subject.objects.get(id=pk)
return render(request, 'subjects/info.html', locals())
def subject_task(request, pk, id):
task = Task.objects.get(id=id)
if 'delete' in request.POST:
task.delete()
subject = Subject.objects.get(id=pk)
return render(request, 'subjects/info.html', locals())
def subject_lesson(request, pk, id):
lesson = Lesson.objects.get(id=id)
if 'delete' in request.POST:
lesson.delete()
subject = Subject.objects.get(id=pk)
return render(request, 'subjects/info.html', locals())
``` |
{
"source": "3crabs/wallet",
"score": 3
} |
#### File: 3crabs/wallet/Database.py
```python
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, Session
from models.Base import Base
class Database:
__instance = None
def __init__(self):
with open('database_name.txt') as f:
database_name = f.readline()
self.engine = create_engine(database_name, echo=True)
Base.metadata.create_all(self.engine)
@classmethod
def get_instance(cls):
if not cls.__instance:
cls.__instance = Database()
return cls.__instance
@classmethod
def new_base(cls):
cls.__instance = Database()
return cls.__instance
def session(self) -> Session:
return sessionmaker(bind=self.engine)()
```
#### File: wallet/models/Flow.py
```python
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
from models.Base import Base
class Flow(Base):
__tablename__ = 'flows'
id = Column(Integer, primary_key=True)
money = Column(Integer)
category_id = Column(Integer, ForeignKey('categories.id'))
category = relationship("Category", backref='flows')
def __init__(self, money, category_id: int):
self.money = money
self.category_id = category_id
``` |
{
"source": "3CV11-IS-Equipo-4/poda-api",
"score": 3
} |
#### File: src/usuarios/auth.py
```python
import jwt
import datetime
def encode_auth_token_usuario(usuario_email, SECRET_KEY):
"""
Genera el token de autenticación para un usuario.
"""
try:
payload = {
'exp' : datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=0),
'iat' : datetime.datetime.utcnow(),
'sub' : usuario_email
}
return jwt.encode(payload, SECRET_KEY, algorithm='HS256')
except Exception as e:
return e
def decode_auth_token_usuario(auth_token, SECRET_KEY):
"""
Decodifica un token de autenticación para un usuario.
"""
try:
payload = jwt.decode(auth_token, SECRET_KEY, algorithms=['HS256'])
return payload['sub']
except jwt.ExpiredSignatureError:
return -1
except jwt.InvalidTokenError:
return -2
``` |
{
"source": "3d510/saleor-easyship",
"score": 2
} |
#### File: checkout/views/__init__.py
```python
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from saleor.checkout.views.utils import shipping_info
from saleor.easyship.api import post
from saleor.product.models import Product
from saleor.shipping.models import ShippingMethod, Shipment
from .discount import add_voucher_form, validate_voucher
from .shipping import (anonymous_user_shipping_address_view,
user_shipping_address_view)
from .summary import (
summary_with_shipping_view, anonymous_summary_without_shipping,
summary_without_shipping)
from .validators import (
validate_cart, validate_shipping_address,
validate_shipping_method, validate_is_shipping_required)
from ..core import load_checkout
from ..forms import ShippingMethodForm
from ...account.forms import LoginForm
import json
@load_checkout
@validate_cart
@validate_is_shipping_required
def index_view(request, checkout):
"""Redirect to the initial step of checkout."""
return redirect('checkout:shipping-address')
@load_checkout
@validate_voucher
@validate_cart
@validate_is_shipping_required
@add_voucher_form
def shipping_address_view(request, checkout):
"""Display the correct shipping address step."""
if request.user.is_authenticated:
return user_shipping_address_view(request, checkout)
return anonymous_user_shipping_address_view(request, checkout)
@load_checkout
@validate_voucher
@validate_cart
@validate_is_shipping_required
@validate_shipping_address
@add_voucher_form
def shipping_method_view(request, checkout):
"""Display the shipping method selection step."""
# print(checkout.__dict__)
country_code = checkout.shipping_address.country.code
shipping_method_country_ids = checkout.storage['shipping_method_country_ids']
related_products = checkout.storage.get('related_products', [])
related_product_objs = [(item[0], Product.objects.get(pk=item[1])) for item in related_products]
print(related_product_objs)
shipping_method_form = ShippingMethodForm(
country_code, request.POST or None,
initial={'method': checkout.shipping_method}
, shipping_method_country_ids=shipping_method_country_ids
)
if shipping_method_form.is_valid():
checkout.shipping_method = shipping_method_form.cleaned_data['method']
selected_courier = shipping_method_form.cleaned_data['method']
# selected_courier_id = ShippingMethod.objects.get(name=selected_courier_name).courier_id
ship_info = shipping_info(checkout)
ship_info['selected_courier_id'] = selected_courier.shipping_method.courier_id
import json
print(json.dumps(ship_info, indent=True))
shipment = post("shipment/v1/shipments", ship_info)['shipment']
print(shipment, request.user)
# Shipment.objects.create(
# easyship_shipment_id=shipment['easyship_shipment_id'],
# platform_order_number=shipment['platform_order_number'],
# min_delivery_time=shipment['selected_courier']['min_delivery_time'],
# max_delivery_time=shipment['selected_courier']['max_delivery_time'],
# user=request.user
# )
checkout.storage['easyship_shipment_id'] = shipment['easyship_shipment_id']
checkout.storage['platform_order_number'] = shipment['platform_order_number']
checkout.storage['min_delivery_time'] = shipment['selected_courier']['min_delivery_time']
checkout.storage['max_delivery_time'] = shipment['selected_courier']['max_delivery_time']
return redirect('checkout:summary')
return TemplateResponse(
request, 'checkout/shipping_method.html',
context={
'shipping_method_form': shipping_method_form,
'checkout': checkout,
'related_product_objs': related_product_objs
})
@load_checkout
@validate_voucher
@validate_cart
@add_voucher_form
def summary_view(request, checkout):
"""Display the correct order summary."""
if checkout.is_shipping_required:
view = validate_shipping_address(summary_with_shipping_view)
view = validate_shipping_method(view)
return view(request, checkout)
if request.user.is_authenticated:
return summary_without_shipping(request, checkout)
return anonymous_summary_without_shipping(request, checkout)
@load_checkout
@validate_cart
def login(request, checkout):
"""Allow the user to log in prior to checkout."""
if request.user.is_authenticated:
return redirect('checkout:index')
form = LoginForm()
return TemplateResponse(request, 'checkout/login.html', {'form': form})
```
#### File: saleor/easyship/api.py
```python
import requests
import json
HOSTING_URL = "https://api.easyship.com/"
HEADER = {"Content-Type": "application/json", "Authorization": "Bearer <KEY>}
# def get_token():
# return "<KEY>
def get(endpoint, payload={}):
r = requests.get(HOSTING_URL + endpoint, headers = HEADER, params=payload)
return json.loads(r.text)
def post(endpoint, data):
r = requests.post(HOSTING_URL + endpoint, data=json.dumps(data), headers=HEADER)
return json.loads(r.text)
## example
# print(get("reference/v1/categories"))
# print(post("rate/v1/rates", data={
# "origin_country_alpha2": "SG",
# "origin_postal_code": "059405",
# "destination_country_alpha2": "US",
# "destination_postal_code": 10030,
# "taxes_duties_paid_by": "Sender",
# "is_insured": False,
# "items": [
# {
# "actual_weight": 1.2,
# "height": 10,
# "width": 15,
# "length": 20,
# "category": "mobiles",
# "declared_currency": "SGD",
# "declared_customs_value": 100
# }
# ]
# })["rates"][0]["shipment_charge_total"])
``` |
{
"source": "3DAlgoLab/Mastering-Concurrency-in-Python",
"score": 3
} |
#### File: Mastering-Concurrency-in-Python/Chapter17/example2.py
```python
import sys; sys.setswitchinterval(.000001)
import threading
def foo():
global my_list
my_list.append(1)
my_list = []
threads = []
for i in range(1000):
thread = threading.Thread(target=foo)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print(f'Final list length: {len(my_list)}.')
print('Finished.')
```
#### File: Mastering-Concurrency-in-Python/Chapter18/example4.py
```python
def read_data():
for i in range(5):
print('Inside the inner for loop...')
yield i * 2
result = read_data()
for i in range(6):
print('Inside the outer for loop...')
print(next(result))
print('Finished.')
```
#### File: Mastering-Concurrency-in-Python/Chapter18/example6.py
```python
import socket, select, types
from collections import namedtuple
from operator import mul
from functools import reduce
###########################################################################
# Reactor
Session = namedtuple('Session', ['address', 'file'])
sessions = {} # { csocket : Session(address, file)}
callback = {} # { csocket : callback(client, line) }
generators = {} # { csocket : inline callback generator }
# Main event loop
def reactor(host, port):
sock = socket.socket()
sock.bind((host, port))
sock.listen(5)
sock.setblocking(0) # Make asynchronous
sessions[sock] = None
print(f'Server up, running, and waiting for call on {host} {port}')
try:
while True:
# Serve existing clients only if they already have data ready
ready_to_read, _, _ = select.select(sessions, [], [], 0.1)
for conn in ready_to_read:
if conn is sock:
conn, cli_address = sock.accept()
connect(conn, cli_address)
continue
line = sessions[conn].file.readline()
if line:
callback[conn](conn, line.rstrip())
else:
disconnect(conn)
finally:
sock.close()
def connect(conn, cli_address):
sessions[conn] = Session(cli_address, conn.makefile())
gen = process_request(conn)
generators[conn] = gen
callback[conn] = gen.send(None) # Start the generator
def disconnect(conn):
gen = generators.pop(conn)
gen.close()
sessions[conn].file.close()
conn.close()
del sessions[conn]
del callback[conn]
@types.coroutine
def readline(conn):
def inner(conn, line):
gen = generators[conn]
try:
callback[conn] = gen.send(line) # Continue the generator
except StopIteration:
disconnect(conn)
line = yield inner
return line
###########################################################################
# User's Business Logic
async def process_request(conn):
print(f'Received connection from {sessions[conn].address}')
mode = 'sum'
try:
conn.sendall(b'<welcome: starting in sum mode>\n')
while True:
line = await readline(conn)
if line == 'quit':
conn.sendall(b'connection closed\r\n')
return
if line == 'sum':
conn.sendall(b'<switching to sum mode>\r\n')
mode = 'sum'
continue
if line == 'product':
conn.sendall(b'<switching to product mode>\r\n')
mode = 'product'
continue
print(f'{sessions[conn].address} --> {line}')
try:
nums = list(map(int, line.split(',')))
except ValueError:
conn.sendall(
b'ERROR. Enter only integers separated by commas\n')
continue
if mode == 'sum':
conn.sendall(b'Sum of input integers: %a\r\n'
% str(sum(nums)))
else:
conn.sendall(b'Product of input integers: %a\r\n'
% str(reduce(mul, nums, 1)))
finally:
print(f'{sessions[conn].address} quit')
if __name__ == '__main__':
reactor('localhost', 8080)
``` |
{
"source": "3DAlgoLab/pyqtgraph",
"score": 3
} |
#### File: pyqtgraph/graphicsItems/DateAxisItem.py
```python
import sys
import numpy as np
import time
from datetime import datetime, timedelta
from .AxisItem import AxisItem
from collections import OrderedDict
__all__ = ['DateAxisItem']
MS_SPACING = 1/1000.0
SECOND_SPACING = 1
MINUTE_SPACING = 60
HOUR_SPACING = 3600
DAY_SPACING = 24 * HOUR_SPACING
WEEK_SPACING = 7 * DAY_SPACING
MONTH_SPACING = 30 * DAY_SPACING
YEAR_SPACING = 365 * DAY_SPACING
if sys.platform == 'win32':
_epoch = datetime.utcfromtimestamp(0)
def utcfromtimestamp(timestamp):
return _epoch + timedelta(seconds=timestamp)
else:
utcfromtimestamp = datetime.utcfromtimestamp
MIN_REGULAR_TIMESTAMP = (datetime(1, 1, 1) - datetime(1970,1,1)).total_seconds()
MAX_REGULAR_TIMESTAMP = (datetime(9999, 1, 1) - datetime(1970,1,1)).total_seconds()
SEC_PER_YEAR = 365.25*24*3600
def makeMSStepper(stepSize):
def stepper(val, n):
if val < MIN_REGULAR_TIMESTAMP or val > MAX_REGULAR_TIMESTAMP:
return np.inf
val *= 1000
f = stepSize * 1000
return (val // (n*f) + 1) * (n*f) / 1000.0
return stepper
def makeSStepper(stepSize):
def stepper(val, n):
if val < MIN_REGULAR_TIMESTAMP or val > MAX_REGULAR_TIMESTAMP:
return np.inf
return (val // (n*stepSize) + 1) * (n*stepSize)
return stepper
def makeMStepper(stepSize):
def stepper(val, n):
if val < MIN_REGULAR_TIMESTAMP or val > MAX_REGULAR_TIMESTAMP:
return np.inf
d = utcfromtimestamp(val)
base0m = (d.month + n*stepSize - 1)
d = datetime(d.year + base0m // 12, base0m % 12 + 1, 1)
return (d - datetime(1970, 1, 1)).total_seconds()
return stepper
def makeYStepper(stepSize):
def stepper(val, n):
if val < MIN_REGULAR_TIMESTAMP or val > MAX_REGULAR_TIMESTAMP:
return np.inf
d = utcfromtimestamp(val)
next_year = (d.year // (n*stepSize) + 1) * (n*stepSize)
if next_year > 9999:
return np.inf
next_date = datetime(next_year, 1, 1)
return (next_date - datetime(1970, 1, 1)).total_seconds()
return stepper
class TickSpec:
""" Specifies the properties for a set of date ticks and computes ticks
within a given utc timestamp range """
def __init__(self, spacing, stepper, format, autoSkip=None):
"""
============= ==========================================================
Arguments
spacing approximate (average) tick spacing
stepper a stepper function that takes a utc time stamp and a step
steps number n to compute the start of the next unit. You
can use the make_X_stepper functions to create common
steppers.
format a strftime compatible format string which will be used to
convert tick locations to date/time strings
autoSkip list of step size multipliers to be applied when the tick
density becomes too high. The tick spec automatically
applies additional powers of 10 (10, 100, ...) to the list
if necessary. Set to None to switch autoSkip off
============= ==========================================================
"""
self.spacing = spacing
self.step = stepper
self.format = format
self.autoSkip = autoSkip
def makeTicks(self, minVal, maxVal, minSpc):
ticks = []
n = self.skipFactor(minSpc)
x = self.step(minVal, n)
while x <= maxVal:
ticks.append(x)
x = self.step(x, n)
return (np.array(ticks), n)
def skipFactor(self, minSpc):
if self.autoSkip is None or minSpc < self.spacing:
return 1
factors = np.array(self.autoSkip, dtype=np.float64)
while True:
for f in factors:
spc = self.spacing * f
if spc > minSpc:
return int(f)
factors *= 10
class ZoomLevel:
""" Generates the ticks which appear in a specific zoom level """
def __init__(self, tickSpecs, exampleText):
"""
============= ==========================================================
tickSpecs a list of one or more TickSpec objects with decreasing
coarseness
============= ==========================================================
"""
self.tickSpecs = tickSpecs
self.utcOffset = 0
self.exampleText = exampleText
def tickValues(self, minVal, maxVal, minSpc):
# return tick values for this format in the range minVal, maxVal
# the return value is a list of tuples (<avg spacing>, [tick positions])
# minSpc indicates the minimum spacing (in seconds) between two ticks
# to fullfill the maxTicksPerPt constraint of the DateAxisItem at the
# current zoom level. This is used for auto skipping ticks.
allTicks = []
valueSpecs = []
# back-project (minVal maxVal) to UTC, compute ticks then offset to
# back to local time again
utcMin = minVal - self.utcOffset
utcMax = maxVal - self.utcOffset
for spec in self.tickSpecs:
ticks, skipFactor = spec.makeTicks(utcMin, utcMax, minSpc)
# reposition tick labels to local time coordinates
ticks += self.utcOffset
# remove any ticks that were present in higher levels
tick_list = [x for x in ticks.tolist() if x not in allTicks]
allTicks.extend(tick_list)
valueSpecs.append((spec.spacing, tick_list))
# if we're skipping ticks on the current level there's no point in
# producing lower level ticks
if skipFactor > 1:
break
return valueSpecs
YEAR_MONTH_ZOOM_LEVEL = ZoomLevel([
TickSpec(YEAR_SPACING, makeYStepper(1), '%Y', autoSkip=[1, 5, 10, 25]),
TickSpec(MONTH_SPACING, makeMStepper(1), '%b')
], "YYYY")
MONTH_DAY_ZOOM_LEVEL = ZoomLevel([
TickSpec(MONTH_SPACING, makeMStepper(1), '%b'),
TickSpec(DAY_SPACING, makeSStepper(DAY_SPACING), '%d', autoSkip=[1, 5])
], "MMM")
DAY_HOUR_ZOOM_LEVEL = ZoomLevel([
TickSpec(DAY_SPACING, makeSStepper(DAY_SPACING), '%a %d'),
TickSpec(HOUR_SPACING, makeSStepper(HOUR_SPACING), '%H:%M', autoSkip=[1, 6])
], "MMM 00")
HOUR_MINUTE_ZOOM_LEVEL = ZoomLevel([
TickSpec(DAY_SPACING, makeSStepper(DAY_SPACING), '%a %d'),
TickSpec(MINUTE_SPACING, makeSStepper(MINUTE_SPACING), '%H:%M',
autoSkip=[1, 5, 15])
], "MMM 00")
HMS_ZOOM_LEVEL = ZoomLevel([
TickSpec(SECOND_SPACING, makeSStepper(SECOND_SPACING), '%H:%M:%S',
autoSkip=[1, 5, 15, 30])
], "99:99:99")
MS_ZOOM_LEVEL = ZoomLevel([
TickSpec(MINUTE_SPACING, makeSStepper(MINUTE_SPACING), '%H:%M:%S'),
TickSpec(MS_SPACING, makeMSStepper(MS_SPACING), '%S.%f',
autoSkip=[1, 5, 10, 25])
], "99:99:99")
def getOffsetFromUtc():
"""Retrieve the utc offset respecting the daylight saving time"""
ts = time.localtime()
if ts.tm_isdst:
utc_offset = time.altzone
else:
utc_offset = time.timezone
return utc_offset
class DateAxisItem(AxisItem):
"""
**Bases:** :class:`AxisItem <pyqtgraph.AxisItem>`
An AxisItem that displays dates from unix timestamps.
The display format is adjusted automatically depending on the current time
density (seconds/point) on the axis. For more details on changing this
behaviour, see :func:`setZoomLevelForDensity() <pyqtgraph.DateAxisItem.setZoomLevelForDensity>`.
Can be added to an existing plot e.g. via
:func:`setAxisItems({'bottom':axis}) <pyqtgraph.PlotItem.setAxisItems>`.
"""
def __init__(self, orientation='bottom', utcOffset=None, **kwargs):
"""
Create a new DateAxisItem.
For `orientation` and `**kwargs`, see
:func:`AxisItem.__init__ <pyqtgraph.AxisItem.__init__>`.
"""
super(DateAxisItem, self).__init__(orientation, **kwargs)
# Set the zoom level to use depending on the time density on the axis
if utcOffset is None:
utcOffset = getOffsetFromUtc()
self.utcOffset = utcOffset
self.zoomLevels = OrderedDict([
(np.inf, YEAR_MONTH_ZOOM_LEVEL),
(5 * 3600*24, MONTH_DAY_ZOOM_LEVEL),
(6 * 3600, DAY_HOUR_ZOOM_LEVEL),
(15 * 60, HOUR_MINUTE_ZOOM_LEVEL),
(30, HMS_ZOOM_LEVEL),
(1, MS_ZOOM_LEVEL),
])
self.autoSIPrefix = False
def tickStrings(self, values, scale, spacing):
tickSpecs = self.zoomLevel.tickSpecs
tickSpec = next((s for s in tickSpecs if s.spacing == spacing), None)
try:
dates = [utcfromtimestamp(v - self.utcOffset) for v in values]
except (OverflowError, ValueError, OSError):
# should not normally happen
return ['%g' % ((v-self.utcOffset)//SEC_PER_YEAR + 1970) for v in values]
formatStrings = []
for x in dates:
try:
s = x.strftime(tickSpec.format)
if '%f' in tickSpec.format:
# we only support ms precision
s = s[:-3]
elif '%Y' in tickSpec.format:
s = s.lstrip('0')
formatStrings.append(s)
except ValueError: # Windows can't handle dates before 1970
formatStrings.append('')
return formatStrings
def tickValues(self, minVal, maxVal, size):
density = (maxVal - minVal) / size
self.setZoomLevelForDensity(density)
values = self.zoomLevel.tickValues(minVal, maxVal, minSpc=self.minSpacing)
return values
def setZoomLevelForDensity(self, density):
"""
Setting `zoomLevel` and `minSpacing` based on given density of seconds per pixel
The display format is adjusted automatically depending on the current time
density (seconds/point) on the axis. You can customize the behaviour by
overriding this function or setting a different set of zoom levels
than the default one. The `zoomLevels` variable is a dictionary with the
maximal distance of ticks in seconds which are allowed for each zoom level
before the axis switches to the next coarser level. To customize the zoom level
selection, override this function.
"""
padding = 10
# Size in pixels a specific tick label will take
if self.orientation in ['bottom', 'top']:
def sizeOf(text):
return self.fontMetrics.boundingRect(text).width() + padding
else:
def sizeOf(text):
return self.fontMetrics.boundingRect(text).height() + padding
# Fallback zoom level: Years/Months
self.zoomLevel = YEAR_MONTH_ZOOM_LEVEL
for maximalSpacing, zoomLevel in self.zoomLevels.items():
size = sizeOf(zoomLevel.exampleText)
# Test if zoom level is too fine grained
if maximalSpacing/size < density:
break
self.zoomLevel = zoomLevel
# Set up zoomLevel
self.zoomLevel.utcOffset = self.utcOffset
# Calculate minimal spacing of items on the axis
size = sizeOf(self.zoomLevel.exampleText)
self.minSpacing = density*size
def linkToView(self, view):
super(DateAxisItem, self).linkToView(view)
# Set default limits
_min = MIN_REGULAR_TIMESTAMP
_max = MAX_REGULAR_TIMESTAMP
if self.orientation in ['right', 'left']:
view.setLimits(yMin=_min, yMax=_max)
else:
view.setLimits(xMin=_min, xMax=_max)
def generateDrawSpecs(self, p):
# Get font metrics from QPainter
# Not happening in "paint", as the QPainter p there is a different one from the one here,
# so changing that font could cause unwanted side effects
if self.style['tickFont'] is not None:
p.setFont(self.style['tickFont'])
self.fontMetrics = p.fontMetrics()
# Get font scale factor by current window resolution
return super(DateAxisItem, self).generateDrawSpecs(p)
```
#### File: pyqtgraph/graphicsItems/NonUniformImage.py
```python
from ..Qt import QtGui, QtCore
import math
import numpy as np
from ..colormap import ColorMap
from .GraphicsObject import GraphicsObject
from .. import mkBrush, mkPen
from .. import functions as fn
class NonUniformImage(GraphicsObject):
"""
**Bases:** :class:`GraphicsObject <pyqtgraph.GraphicsObject>`
GraphicsObject displaying an image with non-uniform sample points. It's
commonly used to display 2-d or slices of higher dimensional data that
have a regular but non-uniform grid e.g. measurements or simulation results.
"""
def __init__(self, x, y, z, border=None):
GraphicsObject.__init__(self)
# convert to numpy arrays
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = np.asarray(z, dtype=np.float64)
if x.ndim != 1 or y.ndim != 1:
raise Exception("x and y must be 1-d arrays.")
if np.any(np.diff(x) < 0) or np.any(np.diff(y) < 0):
raise Exception("The values in x and y must be monotonically increasing.")
if len(z.shape) != 2 or z.shape != (x.size, y.size):
raise Exception("The length of x and y must match the shape of z.")
# default colormap (black - white)
self.cmap = ColorMap(pos=[0.0, 1.0], color=[(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)])
self.data = (x, y, z)
self.lut = None
self.border = border
self.generatePicture()
def setLookupTable(self, lut, autoLevel=False):
lut.sigLevelsChanged.connect(self.generatePicture)
lut.gradient.sigGradientChanged.connect(self.generatePicture)
self.lut = lut
if autoLevel:
_, _, z = self.data
f = z[np.isfinite(z)]
lut.setLevels(f.min(), f.max())
self.generatePicture()
def setColorMap(self, cmap):
self.cmap = cmap
self.generatePicture()
def getHistogram(self, **kwds):
"""Returns x and y arrays containing the histogram values for the current image.
For an explanation of the return format, see numpy.histogram().
"""
z = self.data[2]
z = z[np.isfinite(z)]
hist = np.histogram(z, **kwds)
return hist[1][:-1], hist[0]
def generatePicture(self):
x, y, z = self.data
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(mkPen(None))
# normalize
if self.lut is not None:
mn, mx = self.lut.getLevels()
else:
f = z[np.isfinite(z)]
mn = f.min()
mx = f.max()
# draw the tiles
for i in range(x.size):
for j in range(y.size):
value = z[i, j]
if np.isneginf(value):
value = 0.0
elif np.isposinf(value):
value = 1.0
elif math.isnan(value):
continue # ignore NaN
else:
value = (value - mn) / (mx - mn) # normalize
if self.lut:
color = self.lut.gradient.getColor(value)
else:
color = self.cmap.mapToQColor(value)
p.setBrush(mkBrush(color))
# left, right, bottom, top
l = x[0] if i == 0 else (x[i - 1] + x[i]) / 2
r = (x[i] + x[i + 1]) / 2 if i < x.size - 1 else x[-1]
b = y[0] if j == 0 else (y[j - 1] + y[j]) / 2
t = (y[j] + y[j + 1]) / 2 if j < y.size - 1 else y[-1]
p.drawRect(QtCore.QRectF(l, t, r - l, b - t))
if self.border is not None:
p.setPen(self.border)
p.setBrush(fn.mkBrush(None))
p.drawRect(self.boundingRect())
p.end()
self.update()
def paint(self, p, *args):
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
return QtCore.QRectF(self.picture.boundingRect())
```
#### File: parametertree/tests/test_Parameter.py
```python
import pytest
from pyqtgraph.parametertree import Parameter
def test_parameter_hasdefault():
opts = {'name': 'param', 'type': int, 'value': 1}
# default unspecified
p = Parameter(**opts)
assert p.hasDefault()
assert p.defaultValue() == opts["value"]
p.setDefault(2)
assert p.hasDefault()
assert p.defaultValue() == 2
# default specified
p = Parameter(default=0, **opts)
assert p.hasDefault()
assert p.defaultValue() == 0
# default specified as None
p = Parameter(default=None, **opts)
assert not p.hasDefault()
def test_unpack_parameter():
# test that **unpacking correctly returns child name/value maps
params = [
dict(name='a', type='int', value=1),
dict(name='b', type='str', value='2'),
dict(name='c', type='float', value=3.0),
]
p = Parameter.create(name='params', type='group', children=params)
result = dict(**p)
assert 'a' in result
assert result['a'] == 1
assert 'b' in result
assert result['b'] == '2'
assert 'c' in result
assert result['c'] == 3.0
```
#### File: pyqtgraph/widgets/DiffTreeWidget.py
```python
from ..Qt import QtGui, QtCore
from collections import OrderedDict
from .DataTreeWidget import DataTreeWidget
from .. import functions as fn
import types, traceback
import numpy as np
__all__ = ['DiffTreeWidget']
class DiffTreeWidget(QtGui.QWidget):
"""
Widget for displaying differences between hierarchical python data structures
(eg, nested dicts, lists, and arrays)
"""
def __init__(self, parent=None, a=None, b=None):
QtGui.QWidget.__init__(self, parent)
self.layout = QtGui.QHBoxLayout()
self.setLayout(self.layout)
self.trees = [DataTreeWidget(self), DataTreeWidget(self)]
for t in self.trees:
self.layout.addWidget(t)
if a is not None:
self.setData(a, b)
def setData(self, a, b):
"""
Set the data to be compared in this widget.
"""
self.data = (a, b)
self.trees[0].setData(a)
self.trees[1].setData(b)
return self.compare(a, b)
def compare(self, a, b, path=()):
"""
Compare data structure *a* to structure *b*.
Return True if the objects match completely.
Otherwise, return a structure that describes the differences:
{ 'type': bool
'len': bool,
'str': bool,
'shape': bool,
'dtype': bool,
'mask': array,
}
"""
bad = (255, 200, 200)
diff = []
# generate typestr, desc, childs for each object
typeA, descA, childsA, _ = self.trees[0].parse(a)
typeB, descB, childsB, _ = self.trees[1].parse(b)
if typeA != typeB:
self.setColor(path, 1, bad)
if descA != descB:
self.setColor(path, 2, bad)
if isinstance(a, dict) and isinstance(b, dict):
keysA = set(a.keys())
keysB = set(b.keys())
for key in keysA - keysB:
self.setColor(path+(key,), 0, bad, tree=0)
for key in keysB - keysA:
self.setColor(path+(key,), 0, bad, tree=1)
for key in keysA & keysB:
self.compare(a[key], b[key], path+(key,))
elif isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
for i in range(max(len(a), len(b))):
if len(a) <= i:
self.setColor(path+(i,), 0, bad, tree=1)
elif len(b) <= i:
self.setColor(path+(i,), 0, bad, tree=0)
else:
self.compare(a[i], b[i], path+(i,))
elif isinstance(a, np.ndarray) and isinstance(b, np.ndarray) and a.shape == b.shape:
tableNodes = [tree.nodes[path].child(0) for tree in self.trees]
if a.dtype.fields is None and b.dtype.fields is None:
eq = self.compareArrays(a, b)
if not np.all(eq):
for n in tableNodes:
n.setBackground(0, fn.mkBrush(bad))
#for i in np.argwhere(~eq):
else:
if a.dtype == b.dtype:
for i,k in enumerate(a.dtype.fields.keys()):
eq = self.compareArrays(a[k], b[k])
if not np.all(eq):
for n in tableNodes:
n.setBackground(0, fn.mkBrush(bad))
#for j in np.argwhere(~eq):
# dict: compare keys, then values where keys match
# list:
# array: compare elementwise for same shape
def compareArrays(self, a, b):
intnan = -9223372036854775808 # happens when np.nan is cast to int
anans = np.isnan(a) | (a == intnan)
bnans = np.isnan(b) | (b == intnan)
eq = anans == bnans
mask = ~anans
eq[mask] = np.allclose(a[mask], b[mask])
return eq
def setColor(self, path, column, color, tree=None):
brush = fn.mkBrush(color)
# Color only one tree if specified.
if tree is None:
trees = self.trees
else:
trees = [self.trees[tree]]
for tree in trees:
item = tree.nodes[path]
item.setBackground(column, brush)
def _compare(self, a, b):
"""
Compare data structure *a* to structure *b*.
"""
# Check test structures are the same
assert type(info) is type(expect)
if hasattr(info, '__len__'):
assert len(info) == len(expect)
if isinstance(info, dict):
for k in info:
assert k in expect
for k in expect:
assert k in info
self.compare_results(info[k], expect[k])
elif isinstance(info, list):
for i in range(len(info)):
self.compare_results(info[i], expect[i])
elif isinstance(info, np.ndarray):
assert info.shape == expect.shape
assert info.dtype == expect.dtype
if info.dtype.fields is None:
intnan = -9223372036854775808 # happens when np.nan is cast to int
inans = np.isnan(info) | (info == intnan)
enans = np.isnan(expect) | (expect == intnan)
assert np.all(inans == enans)
mask = ~inans
assert np.allclose(info[mask], expect[mask])
else:
for k in info.dtype.fields.keys():
self.compare_results(info[k], expect[k])
else:
try:
assert info == expect
except Exception:
raise NotImplementedError("Cannot compare objects of type %s" % type(info))
```
#### File: pyqtgraph/widgets/HistogramLUTWidget.py
```python
from ..Qt import QtGui, QtCore
from .GraphicsView import GraphicsView
from ..graphicsItems.HistogramLUTItem import HistogramLUTItem
__all__ = ['HistogramLUTWidget']
class HistogramLUTWidget(GraphicsView):
"""QWidget wrapper for :class:`~pyqtgraph.HistogramLUTItem`.
All parameters are passed along in creating the HistogramLUTItem.
"""
def __init__(self, parent=None, *args, **kargs):
background = kargs.pop('background', 'default')
GraphicsView.__init__(self, parent, useOpenGL=False, background=background)
self.item = HistogramLUTItem(*args, **kargs)
self.setCentralItem(self.item)
self.orientation = kargs.get('orientation', 'vertical')
if self.orientation == 'vertical':
self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
self.setMinimumWidth(95)
else:
self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
self.setMinimumHeight(95)
def sizeHint(self):
if self.orientation == 'vertical':
return QtCore.QSize(115, 200)
else:
return QtCore.QSize(200, 115)
def __getattr__(self, attr):
return getattr(self.item, attr)
``` |
{
"source": "3DAlgoLab/spinningup",
"score": 2
} |
#### File: spinningup/experiment1/my_pg1.py
```python
import torch
import torch.nn as nn
from torch.distributions.categorical import Categorical
from torch.optim import Adam
import gym
import numpy as np
def mlp(sizes, activation=nn.ReLU6, output_activation=nn.Identity):
layers = []
for j in range(len(sizes) - 1):
act = activation if j < len(sizes) - 2 else output_activation
layers += [nn.Linear(sizes[j], sizes[j + 1], act())]
return nn.Sequential(*layers)
def list_to_tensor(list_arr, dtype=torch.float32):
return torch.tensor(np.array(list_arr), dtype=dtype)
def train(env_name, hidden_sizes=[32], lr=1e-2, epochs=50,
batch_size=5000, render=False):
assert env_name
env = gym.make(env_name)
obs_dim = env.observation_space.shape[0]
n_acts = env.action_space.n
logits_net = mlp(sizes=[obs_dim] + hidden_sizes + [n_acts])
# action distribution
def get_policy(obs):
logits = logits_net(obs)
return Categorical(logits=logits)
def get_action(obs):
return get_policy(obs).sample().item()
def compute_loss(obs, act, weights):
logp = get_policy(obs).log_prob(act)
return -(logp * weights).mean()
optimizer = Adam(logits_net.parameters(), lr=lr)
def train_one_epoch():
batch_obs = []
batch_acts = []
batch_weights = []
batch_rets = []
batch_lens = []
# reset episode-specific variables
obs = env.reset()
done = False
ep_rews = [] # list for rewards accrued throughout ep.
finished_rending_this_epoch = False
while True:
if not finished_rending_this_epoch and render:
env.render()
# save obs
batch_obs.append(obs.copy())
# act
act = get_action(torch.as_tensor(obs, dtype=torch.float32))
obs, rew, done, _ = env.step(act)
# save action, reward
batch_acts.append(act)
ep_rews.append(rew)
if done:
ep_ret, ep_len = sum(ep_rews), len(ep_rews)
batch_rets.append(ep_ret)
batch_lens.append(ep_len)
# the weight for each logprob(a|s) is R(tau)
batch_weights += [ep_ret] * ep_len
# reset episode-specific variables
obs, done, ep_rews = env.reset(), False, []
finished_rending_this_epoch = True
if len(batch_obs) > batch_size:
break
optimizer.zero_grad()
if i == 5: # TODO: remove after check
breakpoint()
batch_loss = compute_loss(obs=list_to_tensor(batch_obs),
act=list_to_tensor(batch_acts, dtype=torch.int32),
weights=list_to_tensor(batch_weights))
batch_loss.backward()
optimizer.step()
return batch_loss, batch_rets, batch_lens
# training loop
for i in range(epochs):
batch_loss, batch_rets, batch_lens = train_one_epoch()
print(f"epoch: {i:3d}\t loss:{batch_loss:.3f}\t"
f"return: {np.mean(batch_rets):.3f}\t ep_len: {np.mean(batch_lens):.3f}\t")
if __name__ == "__main__":
# Test
# m = Categorical(torch.tensor([1., 1, 1, 1, 1]))
# for i in range(10):
# r = m.sample()
# print(r)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', '--env', type=str, default='CartPole-v0')
parser.add_argument('--render', action='store_true')
parser.add_argument('--lr', type=float, default=1e-2)
args = parser.parse_args()
print("\nSimplest PG")
train(env_name=args.env_name, render=args.render, lr=args.lr)
``` |
{
"source": "3DAlgoLab/vispy",
"score": 2
} |
#### File: basics/visuals/line_prototype.py
```python
import sys
import numpy as np
from vispy import app, gloo, visuals
from vispy.visuals.filters import Clipper, ColorFilter
from vispy.visuals.shaders import MultiProgram
from vispy.visuals.collections import PointCollection
from vispy.visuals.transforms import STTransform
from vispy.scene import SceneCanvas
from vispy.scene.visuals import create_visual_node
class LineVisual(visuals.Visual):
"""Example of a very simple GL-line visual.
This shows the minimal set of methods that need to be reimplemented to
make a new visual class.
"""
def __init__(self, pos=None, color=(1, 1, 1, 1)):
vcode = """
attribute vec2 a_pos;
void main() {
gl_Position = $transform(vec4(a_pos, 0., 1.));
gl_PointSize = 10.;
}
"""
fcode = """
void main() {
gl_FragColor = $color;
}
"""
visuals.Visual.__init__(self, vcode=vcode, fcode=fcode)
self.pos_buf = gloo.VertexBuffer()
# The Visual superclass contains a MultiProgram, which is an object
# that behaves like a normal shader program (you can assign shader
# code, upload values, set template variables, etc.) but internally
# manages multiple ModularProgram instances, one per view.
# The MultiProgram is accessed via the `shared_program` property, so
# the following modifications to the program will be applied to all
# views:
self.shared_program['a_pos'] = self.pos_buf
self.shared_program.frag['color'] = color
self._need_upload = False
# Visual keeps track of draw mode, index buffer, and GL state. These
# are shared between all views.
self._draw_mode = 'line_strip'
self.set_gl_state('translucent', depth_test=False)
if pos is not None:
self.set_data(pos)
def set_data(self, pos):
self._pos = pos
self._need_upload = True
def _prepare_transforms(self, view=None):
view.view_program.vert['transform'] = view.transforms.get_transform()
def _prepare_draw(self, view=None):
"""This method is called immediately before each draw.
The *view* argument indicates which view is about to be drawn.
"""
if self._need_upload:
# Note that pos_buf is shared between all views, so we have no need
# to use the *view* argument in this example. This will be true
# for most visuals.
self.pos_buf.set_data(self._pos)
self._need_upload = False
class PointVisual(LineVisual):
"""Another simple visual class.
Due to the simplicity of these example classes, it was only necessary to
subclass from LineVisual and set the draw mode to 'points'. A more
fully-featured PointVisual class might not follow this approach.
"""
def __init__(self, pos=None, color=(1, 1, 1, 1)):
LineVisual.__init__(self, pos, color)
self._draw_mode = 'points'
class PlotLineVisual(visuals.CompoundVisual):
"""An example compound visual that draws lines and points.
To the user, the compound visual behaves exactly like a normal visual--it
has a transform system, draw() and bounds() methods, etc. Internally, the
compound visual automatically manages proxying these transforms and methods
to its sub-visuals.
"""
def __init__(self, pos=None, line_color=(1, 1, 1, 1),
point_color=(1, 1, 1, 1)):
self._line = LineVisual(pos, color=line_color)
self._point = PointVisual(pos, color=point_color)
visuals.CompoundVisual.__init__(self, [self._line, self._point])
class PointCollectionVisual(visuals.Visual):
"""Thin wrapper around a point collection.
Note: This is currently broken!
"""
def __init__(self):
prog = MultiProgram(vcode='', fcode='')
self.points = PointCollection("agg", color="shared", program=prog)
visuals.Visual.__init__(self, program=prog)
def _prepare_draw(self, view):
if self.points._need_update:
self.points._update()
self._draw_mode = self.points._mode
self._index_buffer = self.points._indices_buffer
def append(self, *args, **kwargs):
self.points.append(*args, **kwargs)
def _prepare_transforms(self, view=None):
pass
@property
def color(self):
return self.points['color']
@color.setter
def color(self, c):
self.points['color'] = c
class PanZoomTransform(STTransform):
def __init__(self, canvas=None, aspect=None, **kwargs):
self._aspect = aspect
self.attach(canvas)
STTransform.__init__(self, **kwargs)
def attach(self, canvas):
""" Attach this tranform to a canvas """
self._canvas = canvas
canvas.events.mouse_wheel.connect(self.on_mouse_wheel)
canvas.events.mouse_move.connect(self.on_mouse_move)
def on_mouse_move(self, event):
if event.is_dragging:
dxy = event.pos - event.last_event.pos
button = event.press_event.button
if button == 1:
self.move(dxy)
elif button == 2:
center = event.press_event.pos
if self._aspect is None:
self.zoom(np.exp(dxy * (0.01, -0.01)), center)
else:
s = dxy[1] * -0.01
self.zoom(np.exp(np.array([s, s])), center)
def on_mouse_wheel(self, event):
self.zoom(np.exp(event.delta * (0.01, -0.01)), event.pos)
canvas = app.Canvas(keys='interactive', size=(900, 600), show=True,
title="Visual Canvas")
pos = np.random.normal(size=(1000, 2), loc=0, scale=50).astype('float32')
pos[0] = [0, 0]
# Make a line visual
line = LineVisual(pos=pos)
line.transforms.canvas = canvas
line.transform = STTransform(scale=(2, 1), translate=(20, 20))
panzoom = PanZoomTransform(canvas)
line.transforms.scene_transform = panzoom
panzoom.changed.connect(lambda ev: canvas.update())
# Attach color filter to all views (current and future) of the visual
line.attach(ColorFilter((1, 1, 0.5, 0.7)))
# Attach a clipper just to this view. The Clipper filter requires a
# transform that maps from the framebuffer coordinate system to the
# clipping coordinates.
tr = line.transforms.get_transform('framebuffer', 'canvas')
line.attach(Clipper((20, 20, 260, 260), transform=tr), view=line)
# Make a view of the line that will draw its shadow
shadow = line.view()
shadow.transforms.canvas = canvas
shadow.transform = STTransform(scale=(2, 1), translate=(25, 25))
shadow.transforms.scene_transform = panzoom
shadow.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow)
tr = shadow.transforms.get_transform('framebuffer', 'canvas')
shadow.attach(Clipper((20, 20, 260, 260), transform=tr), view=shadow)
# And make a second view of the line with different clipping bounds
view = line.view()
view.transforms.canvas = canvas
view.transform = STTransform(scale=(2, 0.5), translate=(450, 150))
tr = view.transforms.get_transform('framebuffer', 'canvas')
view.attach(Clipper((320, 20, 260, 260), transform=tr), view=view)
# Make a compound visual
plot = PlotLineVisual(pos, (0.5, 1, 0.5, 0.2), (0.5, 1, 1, 0.3))
plot.transforms.canvas = canvas
plot.transform = STTransform(translate=(80, 450), scale=(1.5, 1))
tr = plot.transforms.get_transform('framebuffer', 'canvas')
plot.attach(Clipper((20, 320, 260, 260), transform=tr), view=plot)
# And make a view on the compound
view2 = plot.view()
view2.transforms.canvas = canvas
view2.transform = STTransform(scale=(1.5, 1), translate=(450, 400))
tr = view2.transforms.get_transform('framebuffer', 'canvas')
view2.attach(Clipper((320, 320, 260, 260), transform=tr), view=view2)
# And a shadow for the view
shadow2 = plot.view()
shadow2.transforms.canvas = canvas
shadow2.transform = STTransform(scale=(1.5, 1), translate=(455, 405))
shadow2.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow2)
tr = shadow2.transforms.get_transform('framebuffer', 'canvas')
shadow2.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2)
# Example of a collection visual
collection = PointCollectionVisual()
collection.transforms.canvas = canvas
collection.transform = STTransform(translate=(750, 150))
collection.append(np.random.normal(loc=0, scale=20, size=(10000, 3)),
itemsize=5000)
collection.color = (1, 0.5, 0.5, 1), (0.5, 0.5, 1, 1)
shadow3 = collection.view()
shadow3.transforms.canvas = canvas
shadow3.transform = STTransform(scale=(1, 1), translate=(752, 152))
shadow3.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow3)
# tr = shadow3.transforms.get_transform('framebuffer', 'canvas')
# shadow3.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2)
order = [shadow, line, view, plot, shadow2, view2, shadow3, collection]
@canvas.connect
def on_draw(event):
canvas.context.clear((0.3, 0.3, 0.3, 1.0))
for v in order:
v.draw()
def on_resize(event):
# Set canvas viewport and reconfigure visual transforms to match.
vp = (0, 0, canvas.physical_size[0], canvas.physical_size[1])
canvas.context.set_viewport(*vp)
for v in order:
v.transforms.configure(canvas=canvas, viewport=vp)
canvas.events.resize.connect(on_resize)
on_resize(None)
Line = create_visual_node(LineVisual)
canvas2 = SceneCanvas(keys='interactive', title='Scene Canvas', show=True)
v = canvas2.central_widget.add_view(margin=10)
v.border_color = (1, 1, 1, 1)
v.bgcolor = (0.3, 0.3, 0.3, 1)
v.camera = 'panzoom'
line2 = Line(pos, parent=v.scene)
def mouse(ev):
print(ev)
v.events.mouse_press.connect(mouse)
if __name__ == '__main__':
if sys.flags.interactive != 1:
app.run()
```
#### File: basics/visuals/networkx_layout.py
```python
from vispy import scene
from vispy.visuals.graphs import layouts
import networkx as nx
g = nx.erdos_renyi_graph(100, .2)
layout = layouts.get_layout("networkx_layout", graph=g, layout="spring")
# create canvas
canvas = scene.SceneCanvas(title='Simple NetworkX Graph', size=(
600, 600), bgcolor='black', show=True)
view = canvas.central_widget.add_view('panzoom')
visual = scene.visuals.Graph(
layout.adj, layout=layout, line_color=(1, 1, 1, .5), arrow_type="stealth",
arrow_size=30, node_symbol="disc", node_size=20,
face_color=(1, 0, 0, 1), border_width=0.0, animate=False, directed=False,
parent=view.scene)
@canvas.events.draw.connect
def on_draw(event):
if not visual.animate_layout():
canvas.update()
canvas.show()
```
#### File: vispy/app/qt.py
```python
from .backends import qt_lib
if qt_lib is None:
raise RuntimeError("Module backends._qt should not be imported directly.")
elif qt_lib in 'pyqt4':
from PyQt4 import QtGui
QWidget, QGridLayout = QtGui.QWidget, QtGui.QGridLayout # Compat
elif qt_lib == 'pyside':
from PySide import QtGui
QWidget, QGridLayout = QtGui.QWidget, QtGui.QGridLayout # Compat
elif qt_lib == 'pyqt5':
from PyQt5 import QtWidgets
QWidget, QGridLayout = QtWidgets.QWidget, QtWidgets.QGridLayout # Compat
elif qt_lib == 'pyside2':
from PySide2 import QtWidgets
QWidget, QGridLayout = QtWidgets.QWidget, QtWidgets.QGridLayout # Compat
elif qt_lib == 'pyside6':
from PySide6 import QtWidgets
QWidget, QGridLayout = QtWidgets.QWidget, QtWidgets.QGridLayout # Compat
elif qt_lib:
raise RuntimeError("Invalid value for qt_lib %r." % qt_lib)
else:
raise RuntimeError("Module backends._qt should not be imported directly.")
class QtCanvas(QWidget):
"""Qt widget containing a vispy Canvas.
This is a convenience class that allows a vispy canvas to be embedded
directly into a Qt application.
All methods and properties of the Canvas are wrapped by this class.
Parameters
----------
parent : QWidget or None
The Qt parent to assign to this widget.
canvas : instance or subclass of Canvas
The vispy Canvas to display inside this widget, or a Canvas subclass
to instantiate using any remaining keyword arguments.
"""
def __init__(self, parent=None, canvas=None, **kwargs):
from .canvas import Canvas
if canvas is None:
canvas = Canvas
if issubclass(canvas, Canvas):
canvas = canvas(**kwargs)
elif len(**kwargs) > 0:
raise TypeError('Invalid keyword arguments: %s' %
list(kwargs.keys()))
if not isinstance(canvas, Canvas):
raise TypeError('canvas argument must be an instance or subclass '
'of Canvas.')
QWidget.__init__(self, parent)
self.layout = QGridLayout()
self.setLayout(self.layout)
self.layout.setContentsMargins(0, 0, 0, 0)
self._canvas = canvas
self.layout.addWidget(canvas.native)
self.setSizePolicy(canvas.native.sizePolicy())
def __getattr__(self, attr):
if hasattr(self._canvas, attr):
return getattr(self._canvas, attr)
else:
raise AttributeError(attr)
def update(self):
"""Call update() on both this widget and the internal canvas."""
QWidget.update(self)
self._canvas.update()
class QtSceneCanvas(QtCanvas):
"""Convenience class embedding a vispy SceneCanvas inside a QWidget.
See QtCanvas.
"""
def __init__(self, parent=None, **kwargs):
from ..scene.canvas import SceneCanvas
QtCanvas.__init__(self, parent, canvas=SceneCanvas, **kwargs)
```
#### File: vispy/geometry/parametric.py
```python
import numpy as np
from .normals import normals
def surface(func, umin=0, umax=2 * np.pi, ucount=64, urepeat=1.0,
vmin=0, vmax=2 * np.pi, vcount=64, vrepeat=1.0):
"""
Computes the parameterization of a parametric surface
func: function(u,v)
Parametric function used to build the surface
"""
vtype = [('position', np.float32, 3),
('texcoord', np.float32, 2),
('normal', np.float32, 3)]
itype = np.uint32
# umin, umax, ucount = 0, 2*np.pi, 64
# vmin, vmax, vcount = 0, 2*np.pi, 64
vcount += 1
ucount += 1
n = vcount * ucount
Un = np.repeat(np.linspace(0, 1, ucount, endpoint=True), vcount)
Vn = np.tile(np.linspace(0, 1, vcount, endpoint=True), ucount)
U = umin + Un * (umax - umin)
V = vmin + Vn * (vmax - vmin)
vertices = np.zeros(n, dtype=vtype)
for i, (u, v) in enumerate(zip(U, V)):
vertices["position"][i] = func(u, v)
vertices["texcoord"][:, 0] = Un * urepeat
vertices["texcoord"][:, 1] = Vn * vrepeat
indices = []
for i in range(ucount - 1):
for j in range(vcount - 1):
indices.append(i * (vcount) + j)
indices.append(i * (vcount) + j + 1)
indices.append(i * (vcount) + j + vcount + 1)
indices.append(i * (vcount) + j + vcount)
indices.append(i * (vcount) + j + vcount + 1)
indices.append(i * (vcount) + j)
indices = np.array(indices, dtype=itype)
vertices["normal"] = normals(vertices["position"],
indices.reshape(len(indices)//3, 3))
return vertices, indices
```
#### File: gloo/gl/dummy.py
```python
from . import BaseGLProxy, _copy_gl_functions
from ._constants import * # noqa
class DummyProxy(BaseGLProxy):
"""A dummy backend that can be activated when the GL is not
processed in this process. Each GL function call will raise an
error.
"""
def __call__(self, funcname, returns, *args):
raise RuntimeError('Cannot call %r (or any other GL function), '
'since GL is disabled.' % funcname)
# Instantiate proxy and inject functions
_proxy = DummyProxy()
_copy_gl_functions(_proxy, globals())
```
#### File: gloo/tests/test_buffer.py
```python
import unittest
import numpy as np
from vispy.testing import run_tests_if_main
from vispy.gloo.buffer import (Buffer, DataBuffer, DataBufferView,
VertexBuffer, IndexBuffer)
# -----------------------------------------------------------------------------
class BufferTest(unittest.TestCase):
# Default init
# ------------
def test_init_default(self):
"""Test buffer init"""
# No data
B = Buffer()
assert B.nbytes == 0
glir_cmd = B._glir.clear()[-1]
assert glir_cmd[0] == 'CREATE'
# With data
data = np.zeros(100)
B = Buffer(data=data)
assert B.nbytes == data.nbytes
glir_cmd = B._glir.clear()[-1]
assert glir_cmd[0] == 'DATA'
# With nbytes
B = Buffer(nbytes=100)
assert B.nbytes == 100
glir_cmd = B._glir.clear()[-1]
assert glir_cmd[0] == 'SIZE'
# Wrong data
self.assertRaises(ValueError, Buffer, data, 4)
self.assertRaises(ValueError, Buffer, data, data.nbytes)
# Check setting the whole buffer clear pending operations
# -------------------------------------------------------
def test_set_whole_data(self):
data = np.zeros(100)
B = Buffer(data=data)
B._glir.clear()
B.set_data(data=data)
glir_cmds = B._glir.clear()
assert len(glir_cmds) == 2
assert glir_cmds[0][0] == 'SIZE'
assert glir_cmds[1][0] == 'DATA'
# And sub data
B.set_subdata(data[:50], 20)
glir_cmds = B._glir.clear()
assert len(glir_cmds) == 1
assert glir_cmds[0][0] == 'DATA'
assert glir_cmds[0][2] == 20 # offset
# And sub data
B.set_subdata(data)
glir_cmds = B._glir.clear()
assert glir_cmds[-1][0] == 'DATA'
# Wrong ways to set subdata
self.assertRaises(ValueError, B.set_subdata, data[:50], -1) # neg
self.assertRaises(ValueError, B.set_subdata, data, 10) # no fit
# Check stored data is data
# -------------------------
def test_data_storage(self):
data = np.zeros(100)
B = Buffer(data=data)
B.set_data(data=data[:50], copy=False)
glir_cmd = B._glir.clear()[-1]
assert glir_cmd[-1].base is data
# Check setting oversized data
# ----------------------------
def test_oversized_data(self):
data = np.zeros(10)
B = Buffer(data=data)
# with self.assertRaises(ValueError):
# B.set_data(np.ones(20))
self.assertRaises(ValueError, B.set_subdata, np.ones(20), offset=0)
# Check negative offset
# ---------------------
def test_negative_offset(self):
data = np.zeros(10)
B = Buffer(data=data)
# with self.assertRaises(ValueError):
# B.set_data(np.ones(1), offset=-1)
self.assertRaises(ValueError, B.set_subdata, np.ones(1), offset=-1)
# Check offlimit offset
# ---------------------
def test_offlimit_offset(self):
data = np.zeros(10)
B = Buffer(data=data)
# with self.assertRaises(ValueError):
# B.set_data(np.ones(1), offset=10 * data.dtype.itemsize)
self.assertRaises(ValueError, B.set_subdata,
np.ones(1), offset=10 * data.dtype.itemsize)
# Buffer size
# -----------
def test_buffer_size(self):
data = np.zeros(10)
B = Buffer(data=data)
assert B.nbytes == data.nbytes
# Resize
# ------
def test_buffer_resize(self):
data = np.zeros(10)
B = Buffer(data=data)
data = np.zeros(20)
B.set_data(data)
assert B.nbytes == data.nbytes
# -----------------------------------------------------------------------------
class DataBufferTest(unittest.TestCase):
# Default init
# ------------
def test_default_init(self):
# Check default storage and copy flags
data = np.ones(100)
B = DataBuffer(data)
assert B.nbytes == data.nbytes
assert B.offset == 0
assert B.size == 100
assert B.itemsize == data.itemsize
assert B.stride == data.itemsize
assert B.dtype == data.dtype
# Given data must be actual numeric data
self.assertRaises(TypeError, DataBuffer, 'this is not nice data')
# Default init with structured data
# ---------------------------------
def test_structured_init(self):
# Check structured type
dtype = np.dtype([('position', np.float32, 3),
('texcoord', np.float32, 2),
('color', np.float32, 4)])
data = np.zeros(10, dtype=dtype)
B = DataBuffer(data)
assert B.nbytes == data.nbytes
assert B.offset == 0
assert B.size == 10
assert B.itemsize == data.itemsize
assert B.stride == data.itemsize
assert B.dtype == data.dtype
# No CPU storage
# --------------
def test_no_storage_copy(self):
data = np.ones(100, np.float32)
B = DataBuffer(data)
assert B.stride == 4
# Wrong storage
# -------------
def test_non_contiguous_storage(self):
# Ask to have CPU storage and to use data as storage
# Not possible since data[::2] is not contiguous
data = np.ones(100, np.float32)
data_given = data[::2]
B = DataBuffer(data_given)
assert B.stride == 4*2
# Get buffer field
# ----------------
def test_getitem_field(self):
dtype = np.dtype([('position', np.float32, 3),
('texcoord', np.float32, 2),
('color', np.float32, 4)])
data = np.zeros(10, dtype=dtype)
B = DataBuffer(data)
Z = B["position"]
assert Z.nbytes == 10 * 3 * np.dtype(np.float32).itemsize
assert Z.offset == 0
assert Z.size == 10
assert Z.itemsize == 3 * np.dtype(np.float32).itemsize
assert Z.stride == (3 + 2 + 4) * np.dtype(np.float32).itemsize
assert Z.dtype == (np.float32, 3)
Z = B["texcoord"]
assert Z.nbytes == 10 * 2 * np.dtype(np.float32).itemsize
assert Z.offset == 3 * np.dtype(np.float32).itemsize
assert Z.size == 10
assert Z.itemsize == 2 * np.dtype(np.float32).itemsize
assert Z.stride == (3 + 2 + 4) * np.dtype(np.float32).itemsize
assert Z.dtype == (np.float32, 2)
Z = B["color"]
assert Z.nbytes == 10 * 4 * np.dtype(np.float32).itemsize
assert Z.offset == (2 + 3) * np.dtype(np.float32).itemsize
assert Z.size == 10
assert Z.itemsize == 4 * np.dtype(np.float32).itemsize
assert Z.stride == (3 + 2 + 4) * np.dtype(np.float32).itemsize
assert Z.dtype == (np.float32, 4)
# Get view via index
# ------------------
def test_getitem_index(self):
dtype = np.dtype([('position', np.float32, 3),
('texcoord', np.float32, 2),
('color', np.float32, 4)])
data = np.zeros(10, dtype=dtype)
B = DataBuffer(data)
Z = B[0:1]
assert Z.base == B
assert Z.id == B.id
assert Z.nbytes == 1 * (3 + 2 + 4) * np.dtype(np.float32).itemsize
assert Z.offset == 0
assert Z.size == 1
assert Z.itemsize == (3 + 2 + 4) * np.dtype(np.float32).itemsize
assert Z.stride == (3 + 2 + 4) * np.dtype(np.float32).itemsize
assert Z.dtype == B.dtype
assert 'DataBufferView' in repr(Z)
# There's a few things we cannot do with a view
self.assertRaises(RuntimeError, Z.set_data, data)
self.assertRaises(RuntimeError, Z.set_subdata, data)
self.assertRaises(RuntimeError, Z.resize_bytes, 20)
self.assertRaises(RuntimeError, Z.__getitem__, 3)
self.assertRaises(RuntimeError, Z.__setitem__, 3, data)
# View get invalidated when base is resized
# -----------------------------------------
def test_invalid_view_after_resize(self):
dtype = np.dtype([('position', np.float32, 3),
('texcoord', np.float32, 2),
('color', np.float32, 4)])
data = np.zeros(10, dtype=dtype)
B = DataBuffer(data)
Y = B['position']
Z = B[5:]
B.resize_bytes(5)
assert Y._valid is False
assert Z._valid is False
# View get invalidated after setting oversized data
# -------------------------------------------------
def test_invalid_view_after_set_data(self):
dtype = np.dtype([('position', np.float32, 3),
('texcoord', np.float32, 2),
('color', np.float32, 4)])
data = np.zeros(10, dtype=dtype)
B = DataBuffer(data)
Z = B[5:]
B.set_data(np.zeros(15, dtype=dtype))
assert Z._valid is False
# Set data on base buffer : ok
# ----------------------------
def test_set_data_base(self):
dtype = np.dtype([('position', np.float32, 3),
('texcoord', np.float32, 2),
('color', np.float32, 4)])
data = np.zeros(10, dtype=dtype)
B = DataBuffer(data)
B.set_data(data)
last_cmd = B._glir.clear()[-1]
assert last_cmd[0] == 'DATA'
# Extra kwargs are caught
self.assertRaises(TypeError, B.set_data, data, foo=4)
# Check set_data using offset in data buffer
# ------------------------------------------
def test_set_data_offset(self):
data = np.zeros(100, np.float32)
subdata = data[:10]
B = DataBuffer(data)
B.set_subdata(subdata, offset=10)
last_cmd = B._glir.clear()[-1]
offset = last_cmd[2]
assert offset == 10*4
def test_getitem(self):
dtype = np.dtype([('position', np.float32, 3),
('texcoord', np.float32, 2),
('color', np.float32, 4)])
data = np.zeros(10, dtype=dtype)
B = DataBuffer(data)
assert B[1].dtype == dtype
assert B[1].size == 1
assert B[-1].dtype == dtype
assert B[-1].size == 1
self.assertRaises(IndexError, B.__getitem__, +999)
self.assertRaises(IndexError, B.__getitem__, -999)
def test_setitem(self):
dtype = np.dtype([('position', np.float32, 3),
('texcoord', np.float32, 2),
('color', np.float32, 4)])
data = np.zeros(10, dtype=dtype)
B = DataBuffer(data)
B[1] = data[0]
B[-1] = data[0]
B[:5] = data[:5]
B[5:0] = data[:5] # Weird, but we apparently support this
# the below is no longer supported starting with NumPy 1.14.0
# this used to be converted to a single element array of the above
# dtype filled with 0s. This is no longer supported.
# B[1] = b'' # Gets converted into array of dtype. Lists do not work
# the below doesn't work on all systems (I guess?)
# B[1] = 0
B[1] = ([0, 0, 0], [0, 0], [0, 0, 0, 0],)
self.assertRaises(IndexError, B.__setitem__, +999, data[0])
self.assertRaises(IndexError, B.__setitem__, -999, data[0])
self.assertRaises(TypeError, B.__setitem__, [], data[0])
# Setitem + broadcast
# ------------------------------------------------------
def test_setitem_broadcast(self):
dtype = np.dtype([('position', np.float32, 3),
('texcoord', np.float32, 2),
('color', np.float32, 4)])
data = np.zeros(10, dtype=dtype)
B = DataBuffer(data)
self.assertRaises(ValueError, B.__setitem__, 'position', (1, 2, 3))
# Set every 2 item
# ------------------------------------------------------
def test_setitem_strided(self):
dtype = np.dtype([('position', np.float32, 3),
('texcoord', np.float32, 2),
('color', np.float32, 4)])
data1 = np.zeros(10, dtype=dtype)
data2 = np.ones(10, dtype=dtype)
B = DataBuffer(data1)
s = slice(None, None, 2)
self.assertRaises(ValueError, B.__setitem__, s, data2[::2])
# Set half the array
# ------------------------------------------------------
def test_setitem_half(self):
dtype = np.dtype([('position', np.float32, 3),
('texcoord', np.float32, 2),
('color', np.float32, 4)])
data1 = np.zeros(10, dtype=dtype)
data2 = np.ones(10, dtype=dtype)
B = DataBuffer(data1)
B._glir.clear()
B[:5] = data2[:5]
glir_cmds = B._glir.clear()
assert len(glir_cmds) == 1
set_data = glir_cmds[0][-1]
assert np.allclose(set_data['position'], data2['position'][:5])
assert np.allclose(set_data['texcoord'][:5], data2['texcoord'][:5])
assert np.allclose(set_data['color'][:5], data2['color'][:5])
# Set field without storage: error
# --------------------------------
def test_setitem_field_no_storage(self):
dtype = np.dtype([('position', np.float32, 3),
('texcoord', np.float32, 2),
('color', np.float32, 4)])
data = np.zeros(10, dtype=dtype)
B = DataBuffer(data)
self.assertRaises(ValueError, B.__setitem__, 'position', (1, 2, 3))
# Set every 2 item without storage: error
# ----------------------------------------
def test_every_two_item_no_storage(self):
dtype = np.dtype([('position', np.float32, 3),
('texcoord', np.float32, 2),
('color', np.float32, 4)])
data = np.zeros(10, dtype=dtype)
B = DataBuffer(data)
# with self.assertRaises(ValueError):
# B[::2] = data[::2]
s = slice(None, None, 2)
self.assertRaises(ValueError, B.__setitem__, s, data[::2])
# Resize
# ------
def test_resize(self):
data = np.zeros(10)
B = DataBuffer(data=data)
data = np.zeros(20)
B.set_data(data)
assert B.nbytes == data.nbytes
# Resize now allowed using ellipsis
# -----------------------------
def test_no_resize_ellipsis(self):
data = np.zeros(10)
B = DataBuffer(data=data)
data = np.zeros(30)
self.assertRaises(ValueError, B.__setitem__, Ellipsis, data)
# Broadcast when using ellipses
def test_broadcast_ellipsis(self):
data = np.zeros(10)
B = DataBuffer(data=data)
data = np.zeros(5)
B[Ellipsis] = data
glir_cmd = B._glir.clear()[-1]
assert glir_cmd[-1].shape == (10,)
class DataBufferViewTest(unittest.TestCase):
def test_init_view(self):
data = np.zeros(10)
B = DataBuffer(data=data)
V = DataBufferView(B, 1)
assert V.size == 1
V = DataBufferView(B, slice(0, 5))
assert V.size == 5
V = DataBufferView(B, slice(5, 0))
assert V.size == 5
V = DataBufferView(B, Ellipsis)
assert V.size == 10
self.assertRaises(TypeError, DataBufferView, B, [])
self.assertRaises(ValueError, DataBufferView, B, slice(0, 10, 2))
# -----------------------------------------------------------------------------
class VertexBufferTest(unittest.TestCase):
# VertexBuffer allowed base types
# -------------------------------
def test_init_allowed_dtype(self):
for dtype in (np.uint8, np.int8, np.uint16, np.int16, np.float32):
V = VertexBuffer(np.zeros((10, 3), dtype=dtype))
names = V.dtype.names
assert V.dtype[names[0]].base == dtype
assert V.dtype[names[0]].shape == (3,)
for dtype in (np.float64, np.int64):
self.assertRaises(TypeError, VertexBuffer,
np.zeros((10, 3), dtype=dtype))
# Tuple/list is also allowed
V = VertexBuffer([1, 2, 3])
assert V.size == 3
assert V.itemsize == 4
#
V = VertexBuffer([[1, 2], [3, 4], [5, 6]])
assert V.size == 3
assert V.itemsize == 2 * 4
# Convert
data = np.zeros((10,), 'uint8')
B = VertexBuffer(data)
assert B.dtype[0].base == np.uint8
assert B.dtype[0].itemsize == 1
#
data = np.zeros((10, 2), 'uint8')
B = VertexBuffer(data)
assert B.dtype[0].base == np.uint8
assert B.dtype[0].itemsize == 2
B.set_data(data, convert=True)
assert B.dtype[0].base == np.float32
assert B.dtype[0].itemsize == 8
B = VertexBuffer(data[::2].copy())
# This is converted to 1D
B = VertexBuffer([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]])
assert B.size == 10
# Not allowed
self.assertRaises(TypeError, VertexBuffer, dtype=np.float64)
# self.assertRaises(TypeError, VertexBuffer, [[1,2,3,4,5],[1,2,3,4,5]])
# VertexBuffer not allowed base types
# -----------------------------------
def test_init_not_allowed_dtype(self):
for dtype in (np.uint32, np.int32, np.float64):
# with self.assertRaises(TypeError):
# V = VertexBuffer(dtype=dtype)
self.assertRaises(TypeError, VertexBuffer, dtype=dtype)
def test_glsl_type(self):
data = np.zeros((10,), np.float32)
B = VertexBuffer(data)
C = B[1:]
assert B.glsl_type == ('attribute', 'float')
assert C.glsl_type == ('attribute', 'float')
data = np.zeros((10, 2), np.float32)
B = VertexBuffer(data)
C = B[1:]
assert B.glsl_type == ('attribute', 'vec2')
assert C.glsl_type == ('attribute', 'vec2')
data = np.zeros((10, 4), np.float32)
B = VertexBuffer(data)
C = B[1:]
assert B.glsl_type == ('attribute', 'vec4')
assert C.glsl_type == ('attribute', 'vec4')
# -----------------------------------------------------------------------------
class IndexBufferTest(unittest.TestCase):
# IndexBuffer allowed base types
# ------------------------------
def test_init_allowed_dtype(self):
# allowed dtypes
for dtype in (np.uint8, np.uint16, np.uint32):
b = IndexBuffer(np.zeros(10, dtype=dtype))
b.dtype == dtype
# no data => no dtype
V = IndexBuffer()
V.dtype is None
# Not allowed dtypes
for dtype in (np.int8, np.int16, np.int32,
np.float16, np.float32, np.float64):
# with self.assertRaises(TypeError):
# V = IndexBuffer(dtype=dtype)
data = np.zeros(10, dtype=dtype)
self.assertRaises(TypeError, IndexBuffer, data)
# Prepare some data
dtype = np.dtype([('position', np.float32, 3),
('texcoord', np.float32, 2), ])
sdata = np.zeros(10, dtype=dtype)
# Normal data is
data = np.zeros([1, 2, 3], np.uint8)
B = IndexBuffer(data)
assert B.dtype == np.uint8
# We can also convert
B.set_data(data, convert=True)
assert B.dtype == np.uint32
# Structured data not allowed
self.assertRaises(TypeError, IndexBuffer, dtype=dtype)
self.assertRaises(TypeError, B.set_data, sdata)
run_tests_if_main()
```
#### File: gloo/tests/test_globject.py
```python
from vispy.testing import run_tests_if_main
from vispy.gloo.globject import GLObject
def test_globject():
"""Test gl object uinique id and GLIR CREATE command"""
objects = [GLObject() for i in range(10)]
ids = [ob.id for ob in objects]
# Verify that each id is unique (test should not care how)
assert len(set(ids)) == len(objects)
# Verify that glir commands have been created
commands = []
for ob in objects:
commands.extend(ob._glir.clear())
assert len(commands) == len(objects)
for cmd in commands:
assert cmd[0] == 'CREATE'
# Delete
ob = objects[-1]
q = ob._glir # get it now, because its gone after we delete it
ob.delete()
cmd = q.clear()[-1]
assert cmd[0] == 'DELETE'
run_tests_if_main()
```
#### File: widgets/tests/test_colorbar.py
```python
from vispy.scene.widgets import ColorBarWidget
from vispy.testing import (requires_application, TestingCanvas,
run_tests_if_main)
from vispy.testing.image_tester import assert_image_approved
def create_colorbar(pos, orientation, label='label string here'):
colorbar = ColorBarWidget(pos=pos,
orientation=orientation,
label=label,
cmap='autumn',
border_color='white',
border_width=2)
colorbar.label.color = 'white'
colorbar.label.font_size = 5
colorbar.ticks[0].color = 'white'
colorbar.ticks[0].font_size = 5
colorbar.ticks[1].color = 'white'
colorbar.ticks[1].font_size = 5
return colorbar
@requires_application()
def test_colorbar_widget():
with TestingCanvas() as c:
colorbar_top = create_colorbar(pos=(50, 50),
label="my label",
orientation='top')
c.draw_visual(colorbar_top)
assert_image_approved(c.render(), 'visuals/colorbar/top.png')
assert colorbar_top.label.text == "my label"
run_tests_if_main()
```
#### File: vispy/util/eq.py
```python
from numpy import ndarray, bool_
def eq(a, b):
"""The great missing equivalence function: Guaranteed evaluation
to a single bool value.
"""
if a is b:
return True
if a is None or b is None:
return True if a is None and b is None else False
try:
e = a == b
except ValueError:
return False
except AttributeError:
return False
except Exception:
print("a:", str(type(a)), str(a))
print("b:", str(type(b)), str(b))
raise
t = type(e)
if t is bool:
return e
elif t is bool_:
return bool(e)
elif isinstance(e, ndarray):
try:
# disaster: if a is empty and b is not, then e.all() is True
if a.shape != b.shape:
return False
except Exception:
return False
if (hasattr(e, 'implements') and e.implements('MetaArray')):
return e.asarray().all()
else:
return e.all()
else:
raise Exception("== operator returned type %s" % str(type(e)))
```
#### File: vispy/util/logs.py
```python
import base64
import logging
import sys
import inspect
import re
import traceback
import json
from functools import partial
import numpy as np
###############################################################################
# LOGGING (some adapted from mne-python)
def _get_vispy_caller():
"""Helper to get vispy calling function from the stack"""
records = inspect.stack()
# first few records are vispy-based logging calls
for record in records[5:]:
module = record[0].f_globals['__name__']
if module.startswith('vispy'):
line = str(record[0].f_lineno)
func = record[3]
cls = record[0].f_locals.get('self', None)
clsname = "" if cls is None else cls.__class__.__name__ + '.'
caller = "{0}:{1}{2}({3}): ".format(module, clsname, func, line)
return caller
return 'unknown'
# class _WrapStdOut(object):
# """Class to work around how doctest captures stdout"""
# def __getattr__(self, name):
# # Even more ridiculous than this class, this must be sys.stdout (not
# # just stdout) in order for this to work (tested on OSX and Linux)
# return getattr(sys.stdout, name)
class _VispyFormatter(logging.Formatter):
"""Formatter that optionally prepends caller"""
def __init__(self):
logging.Formatter.__init__(self, '%(levelname)s: %(message)s')
self._vispy_prepend_caller = False
def _vispy_set_prepend(self, prepend):
self._vispy_prepend_caller = prepend
def format(self, record):
out = logging.Formatter.format(self, record)
if self._vispy_prepend_caller:
out = _get_vispy_caller() + out
return out
class _VispyStreamHandler(logging.StreamHandler):
"""Stream handler allowing matching and recording
This handler has two useful optional additions:
1. Recording emitted messages.
2. Performing regexp substring matching.
Prepending of traceback information is done in _VispyFormatter.
"""
def __init__(self):
logging.StreamHandler.__init__(self, sys.stderr)
self._vispy_formatter = _lf
self.setFormatter(self._vispy_formatter)
self._vispy_match = None
self._vispy_emit_list = list()
self._vispy_set_emit_record(False)
self._vispy_set_match(None)
self._vispy_print_msg = True
def _vispy_emit_match_andor_record(self, record):
"""Log message emitter that optionally matches and/or records"""
test = record.getMessage()
match = self._vispy_match
if (match is None or re.search(match, test) or
re.search(match, _get_vispy_caller())):
if self._vispy_emit_record:
fmt_rec = self._vispy_formatter.format(record)
self._vispy_emit_list.append(fmt_rec)
if self._vispy_print_msg:
return logging.StreamHandler.emit(self, record)
else:
return
def _vispy_set_match(self, match):
old_match = self._vispy_match
self._vispy_match = match
# Triage here to avoid a bunch of if's later (more efficient)
if match is not None or self._vispy_emit_record:
self.emit = self._vispy_emit_match_andor_record
else:
self.emit = partial(logging.StreamHandler.emit, self)
return old_match
def _vispy_set_emit_record(self, record):
self._vispy_emit_record = record
match = self._vispy_match
# Triage here to avoid a bunch of if's later (more efficient)
if match is not None or self._vispy_emit_record:
self.emit = self._vispy_emit_match_andor_record
else:
self.emit = partial(logging.StreamHandler.emit, self)
def _vispy_reset_list(self):
self._vispy_emit_list = list()
logger = logging.getLogger('vispy')
_lf = _VispyFormatter()
_lh = _VispyStreamHandler() # needs _lf to exist
logger.addHandler(_lh)
logging_types = dict(debug=logging.DEBUG, info=logging.INFO,
warning=logging.WARNING, error=logging.ERROR,
critical=logging.CRITICAL)
def set_log_level(verbose, match=None, return_old=False):
"""Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
match : str | None
String to match. Only those messages that both contain a substring
that regexp matches ``'match'`` (and the ``verbose`` level) will be
displayed.
return_old : bool
If True, return the old verbosity level and old match.
Notes
-----
If ``verbose=='debug'``, then the ``vispy`` method emitting the log
message will be prepended to each log message, which is useful for
debugging. If ``verbose=='debug'`` or ``match is not None``, then a
small performance overhead is added. Thus it is suggested to only use
these options when performance is not crucial.
See also
--------
vispy.util.use_log_level
"""
# This method is responsible for setting properties of the handler and
# formatter such that proper messages (possibly with the vispy caller
# prepended) are displayed. Storing log messages is only available
# via the context handler (use_log_level), so that configuration is
# done by the context handler itself.
if isinstance(verbose, bool):
verbose = 'info' if verbose else 'warning'
if isinstance(verbose, str):
verbose = verbose.lower()
if verbose not in logging_types:
raise ValueError('Invalid argument "%s"' % verbose)
verbose = logging_types[verbose]
else:
raise TypeError('verbose must be a bool or string')
logger = logging.getLogger('vispy')
old_verbose = logger.level
old_match = _lh._vispy_set_match(match)
logger.setLevel(verbose)
if verbose <= logging.DEBUG:
_lf._vispy_set_prepend(True)
else:
_lf._vispy_set_prepend(False)
out = None
if return_old:
out = (old_verbose, old_match)
return out
class use_log_level(object):
"""Context manager that temporarily sets logging level
Parameters
----------
level : str
See ``set_log_level`` for options.
match : str | None
The string to match.
record : bool
If True, the context manager will keep a record of the logging
messages generated by vispy. Otherwise, an empty list will
be returned.
print_msg : bool
If False, printing of (all) messages will be suppressed. This is
mainly useful in testing. False only works in `record=True` mode, if
not recording messages, consider setting `level` appropriately.
Returns
-------
records : list
As a context manager, an empty list or the list of logging messages
will be returned (depending on the input ``record``).
"""
# This method mostly wraps to set_log_level, but also takes
# care of enabling/disabling message recording in the formatter.
def __init__(self, level, match=None, record=False, print_msg=True):
self._new_level = level
self._new_match = match
self._print_msg = print_msg
self._record = record
if match is not None and not isinstance(match, str):
raise TypeError('match must be None or str')
def __enter__(self):
# set the log level
old_level, old_match = set_log_level(self._new_level,
self._new_match, return_old=True)
for key, value in logging_types.items():
if value == old_level:
old_level = key
self._old_level = old_level
self._old_match = old_match
if not self._print_msg:
_lh._vispy_print_msg = False
# set handler to record, if appropriate
_lh._vispy_reset_list()
if self._record:
_lh._vispy_set_emit_record(True)
return _lh._vispy_emit_list
else:
return list()
def __exit__(self, type, value, traceback):
# reset log level
set_log_level(self._old_level, self._old_match)
# reset handler
if self._record:
_lh._vispy_set_emit_record(False)
if not self._print_msg:
_lh._vispy_print_msg = True # set it back
def log_exception(level='warning', tb_skip=2):
"""
Send an exception and traceback to the logger.
This function is used in cases where an exception is handled safely but
nevertheless should generate a descriptive error message. An extra line
is inserted into the stack trace indicating where the exception was caught.
Parameters
----------
level : str
See ``set_log_level`` for options.
tb_skip : int
The number of traceback entries to ignore, prior to the point where
the exception was caught. The default is 2.
"""
stack = "".join(traceback.format_stack()[:-tb_skip])
tb = traceback.format_exception(*sys.exc_info())
msg = tb[0] # "Traceback (most recent call last):"
msg += stack
msg += " << caught exception here: >>\n"
msg += "".join(tb[1:]).rstrip()
logger.log(logging_types[level], msg)
logger.log_exception = log_exception # make this easier to reach
def _handle_exception(ignore_callback_errors, print_callback_errors, obj,
cb_event=None, node=None):
"""Helper for prining errors in callbacks
See EventEmitter._invoke_callback for a use example.
"""
if not hasattr(obj, '_vispy_err_registry'):
obj._vispy_err_registry = {}
registry = obj._vispy_err_registry
if cb_event is not None:
cb, event = cb_event
exp_type = 'callback'
else:
exp_type = 'node'
type_, value, tb = sys.exc_info()
tb = tb.tb_next # Skip *this* frame
sys.last_type = type_
sys.last_value = value
sys.last_traceback = tb
del tb # Get rid of it in this namespace
# Handle
if not ignore_callback_errors:
raise
if print_callback_errors != "never":
this_print = 'full'
if print_callback_errors in ('first', 'reminders'):
# need to check to see if we've hit this yet
if exp_type == 'callback':
key = repr(cb) + repr(event)
else:
key = repr(node)
if key in registry:
registry[key] += 1
if print_callback_errors == 'first':
this_print = None
else: # reminders
ii = registry[key]
# Use logarithmic selection
# (1, 2, ..., 10, 20, ..., 100, 200, ...)
if ii == (2 ** int(np.log2(ii))):
this_print = ii
else:
this_print = None
else:
registry[key] = 1
if this_print == 'full':
logger.log_exception()
if exp_type == 'callback':
logger.error("Invoking %s for %s" % (cb, event))
else: # == 'node':
logger.error("Drawing node %s" % node)
elif this_print is not None:
if exp_type == 'callback':
logger.error("Invoking %s repeat %s"
% (cb, this_print))
else: # == 'node':
logger.error("Drawing node %s repeat %s"
% (node, this_print))
def _serialize_buffer(buffer, array_serialization=None):
"""Serialize a NumPy array."""
if array_serialization == 'binary':
return buffer.ravel().tobytes()
elif array_serialization == 'base64':
return {'storage_type': 'base64',
'buffer': base64.b64encode(buffer).decode('ascii')
}
raise ValueError("The array serialization method should be 'binary' or "
"'base64'.")
class NumPyJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return _serialize_buffer(obj, array_serialization='base64')
elif isinstance(obj, np.generic):
return obj.item()
return json.JSONEncoder.default(self, obj)
```
#### File: util/svg/transformable.py
```python
from . element import Element
from . transform import Transform
class Transformable(Element):
"""Transformable SVG element"""
def __init__(self, content=None, parent=None):
Element.__init__(self, content, parent)
if isinstance(content, str):
self._transform = Transform()
self._computed_transform = self._transform
else:
self._transform = Transform(content.get("transform", None))
self._computed_transform = self._transform
if parent:
self._computed_transform = self._transform + \
self.parent.transform
@property
def transform(self):
return self._computed_transform
```
#### File: graphs/layouts/circular.py
```python
import numpy as np
from ..util import _straight_line_vertices, issparse
def circular(adjacency_mat, directed=False):
"""Places all nodes on a single circle.
Parameters
----------
adjacency_mat : matrix or sparse
The graph adjacency matrix
directed : bool
Whether the graph is directed. If this is True, is will also
generate the vertices for arrows, which can be passed to an
ArrowVisual.
Yields
------
(node_vertices, line_vertices, arrow_vertices) : tuple
Yields the node and line vertices in a tuple. This layout only yields a
single time, and has no builtin animation
"""
if issparse(adjacency_mat):
adjacency_mat = adjacency_mat.tocoo()
num_nodes = adjacency_mat.shape[0]
t = np.linspace(0, 2 * np.pi, num_nodes, endpoint=False)
# Visual coordinate system is between 0 and 1, so generate a circle with
# radius 0.5 and center it at the point (0.5, 0.5).
node_coords = (0.5 * np.array([np.cos(t), np.sin(t)]) + 0.5).T
node_coords = node_coords.astype(np.float32)
line_vertices, arrows = _straight_line_vertices(adjacency_mat,
node_coords, directed)
yield node_coords, line_vertices, arrows
```
#### File: graphs/layouts/force_directed.py
```python
import numpy as np
try:
from scipy.sparse import issparse
except ImportError:
def issparse(*args, **kwargs):
return False
from ..util import _straight_line_vertices, _rescale_layout
class fruchterman_reingold(object):
r"""Fruchterman-Reingold implementation adapted from NetworkX.
In the Fruchterman-Reingold algorithm, the whole graph is modelled as a
collection of particles, it runs a simplified particle simulation to
find a nice layout for the graph.
Paramters
---------
optimal : number
Optimal distance between nodes. Defaults to :math:`1/\\sqrt{N}` where
N is the number of nodes.
iterations : int
Number of iterations to perform for layout calculation.
pos : array
Initial positions of the nodes
Notes
-----
The algorithm is explained in more detail in the original paper [1]_.
.. [1] Fruchterman, <NAME>, and <NAME>. "Graph drawing by
force-directed placement." Softw., Pract. Exper. 21.11 (1991),
1129-1164.
"""
def __init__(self, optimal=None, iterations=50, pos=None):
self.dim = 2
self.optimal = optimal
self.iterations = iterations
self.num_nodes = None
self.pos = pos
def __call__(self, adjacency_mat, directed=False):
"""
Starts the calculation of the graph layout.
This is a generator, and after each iteration it yields the new
positions for the nodes, together with the vertices for the edges
and the arrows.
There are two solvers here: one specially adapted for SciPy sparse
matrices, and the other for larger networks.
Parameters
----------
adjacency_mat : array
The graph adjacency matrix.
directed : bool
Wether the graph is directed or not. If this is True,
it will draw arrows for directed edges.
Yields
------
layout : tuple
For each iteration of the layout calculation it yields a tuple
containing (node_vertices, line_vertices, arrow_vertices). These
vertices can be passed to the `MarkersVisual` and `ArrowVisual`.
"""
if adjacency_mat.shape[0] != adjacency_mat.shape[1]:
raise ValueError("Adjacency matrix should be square.")
self.num_nodes = adjacency_mat.shape[0]
if issparse(adjacency_mat):
# Use the sparse solver
solver = self._sparse_fruchterman_reingold
else:
solver = self._fruchterman_reingold
for result in solver(adjacency_mat, directed):
yield result
def _fruchterman_reingold(self, adjacency_mat, directed=False):
if self.optimal is None:
self.optimal = 1 / np.sqrt(self.num_nodes)
if self.pos is None:
# Random initial positions
pos = np.asarray(
np.random.random((self.num_nodes, self.dim)),
dtype=np.float32
)
else:
pos = self.pos.astype(np.float32)
# Yield initial positions
line_vertices, arrows = _straight_line_vertices(adjacency_mat, pos,
directed)
yield pos, line_vertices, arrows
# The initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
t = 0.1
# Simple cooling scheme.
# Linearly step down by dt on each iteration so last iteration is
# size dt.
dt = t / float(self.iterations+1)
# The inscrutable (but fast) version
# This is still O(V^2)
# Could use multilevel methods to speed this up significantly
for iteration in range(self.iterations):
delta_pos = _calculate_delta_pos(adjacency_mat, pos, t,
self.optimal)
pos += delta_pos
_rescale_layout(pos)
# cool temperature
t -= dt
# Calculate edge vertices and arrows
line_vertices, arrows = _straight_line_vertices(adjacency_mat,
pos, directed)
yield pos, line_vertices, arrows
def _sparse_fruchterman_reingold(self, adjacency_mat, directed=False):
# Optimal distance between nodes
if self.optimal is None:
self.optimal = 1 / np.sqrt(self.num_nodes)
# Change to list of list format
# Also construct the matrix in COO format for easy edge construction
adjacency_arr = adjacency_mat.toarray()
adjacency_coo = adjacency_mat.tocoo()
if self.pos is None:
# Random initial positions
pos = np.asarray(
np.random.random((self.num_nodes, self.dim)),
dtype=np.float32
)
else:
pos = self.pos.astype(np.float32)
# Yield initial positions
line_vertices, arrows = _straight_line_vertices(adjacency_coo, pos,
directed)
yield pos, line_vertices, arrows
# The initial "temperature" is about .1 of domain area (=1x1)
# This is the largest step allowed in the dynamics.
t = 0.1
# Simple cooling scheme.
# Linearly step down by dt on each iteration so last iteration is
# size dt.
dt = t / float(self.iterations+1)
for iteration in range(self.iterations):
delta_pos = _calculate_delta_pos(adjacency_arr, pos, t,
self.optimal)
pos += delta_pos
_rescale_layout(pos)
# Cool temperature
t -= dt
# Calculate line vertices
line_vertices, arrows = _straight_line_vertices(adjacency_coo,
pos, directed)
yield pos, line_vertices, arrows
def _calculate_delta_pos(adjacency_arr, pos, t, optimal):
"""Helper to calculate the delta position"""
# XXX eventually this should be refactored for the sparse case to only
# do the necessary pairwise distances
delta = pos[:, np.newaxis, :] - pos
# Distance between points
distance2 = (delta*delta).sum(axis=-1)
# Enforce minimum distance of 0.01
distance2 = np.where(distance2 < 0.0001, 0.0001, distance2)
distance = np.sqrt(distance2)
# Displacement "force"
displacement = np.zeros((len(delta), 2))
for ii in range(2):
displacement[:, ii] = (
delta[:, :, ii] *
((optimal * optimal) / (distance*distance) -
(adjacency_arr * distance) / optimal)).sum(axis=1)
length = np.sqrt((displacement**2).sum(axis=1))
length = np.where(length < 0.01, 0.1, length)
delta_pos = displacement * t / length[:, np.newaxis]
return delta_pos
```
#### File: vispy/visuals/gridmesh.py
```python
from .mesh import MeshVisual
from ..geometry import create_grid_mesh, MeshData
class GridMeshVisual(MeshVisual):
"""Displays a mesh in a Cartesian grid about x,y,z coordinates.
This makes it simple to generate a mesh from e.g. the output
of numpy.meshgrid.
All arguments are optional, though they can be changed
individually later with the set_data method.
Parameters
----------
xs : ndarray
A 2d array of x coordinates for the vertices of the mesh. Must
have the same dimensions as ys and zs.
ys : ndarray
A 2d array of y coordinates for the vertices of the mesh. Must
have the same dimensions as xs and zs.
zs : ndarray
A 2d array of z coordinates for the vertices of the mesh. Must
have the same dimensions as xs and ys.
colors : ndarray | None
The colors of the points of the mesh. Should be either a
(width, height, 4) array of rgba colors at each grid point or
a (width, height, 3) array of rgb colors at each grid point.
Defaults to None, in which case the default color of a
MeshVisual is used.
shading : str | None
Same as for the `MeshVisual` class. Defaults to 'smooth'.
**kwargs :
Other arguments are passed directly to MeshVisual.
"""
def __init__(self, xs, ys, zs, colors=None, shading='smooth',
**kwargs):
if xs is None or ys is None or zs is None:
raise ValueError('All of xs, ys and zs must be initialised '
'with arrays.')
self._xs = None
self._ys = None
self._zs = None
self.__vertices = None
self.__meshdata = MeshData()
MeshVisual.__init__(self, shading=shading, **kwargs)
self.set_data(xs, ys, zs, colors)
def set_data(self, xs=None, ys=None, zs=None, colors=None):
"""Update the mesh data.
Parameters
----------
xs : ndarray | None
A 2d array of x coordinates for the vertices of the mesh.
ys : ndarray | None
A 2d array of y coordinates for the vertices of the mesh.
zs : ndarray | None
A 2d array of z coordinates for the vertices of the mesh.
colors : ndarray | None
The color at each point of the mesh. Must have shape
(width, height, 4) or (width, height, 3) for rgba or rgb
color definitions respectively.
"""
if xs is None:
xs = self._xs
self.__vertices = None
if ys is None:
ys = self._ys
self.__vertices = None
if zs is None:
zs = self._zs
self.__vertices = None
if self.__vertices is None:
vertices, indices = create_grid_mesh(xs, ys, zs)
self._xs = xs
self._ys = ys
self._zs = zs
if self.__vertices is None:
vertices, indices = create_grid_mesh(self._xs, self._ys, self._zs)
self.__meshdata.set_vertices(vertices)
self.__meshdata.set_faces(indices)
if colors is not None:
self.__meshdata.set_vertex_colors(colors.reshape(
colors.shape[0] * colors.shape[1], colors.shape[2]))
MeshVisual.set_data(self, meshdata=self.__meshdata)
```
#### File: vispy/visuals/rectangle.py
```python
from __future__ import division
import numpy as np
from ..color import Color
from .polygon import PolygonVisual
class RectangleVisual(PolygonVisual):
"""
Displays a 2D rectangle with optional rounded corners
Parameters
----------
center : array
Center of the rectangle
color : instance of Color
The fill color to use.
border_color : instance of Color
The border color to use.
border_width : int
Border width in pixels.
Line widths > 1px are only
guaranteed to work when using `border_method='agg'` method.
height : float
Length of the rectangle along y-axis
Defaults to 1.0
width : float
Length of the rectangle along x-axis
Defaults to 1.0
radius : float | array
Radii of curvatures of corners in clockwise order from top-left
Defaults to 0.
**kwargs : dict
Keyword arguments to pass to `PolygonVisual`.
"""
def __init__(self, center=None, color='black', border_color=None,
border_width=1, height=1.0, width=1.0,
radius=[0., 0., 0., 0.], **kwargs):
self._height = height
self._width = width
self._color = Color(color)
self._border_color = Color(border_color)
self._border_width = border_width
self._radius = radius
self._center = center
# triangulation can be very slow
kwargs.setdefault('triangulate', False)
PolygonVisual.__init__(self, pos=None, color=color,
border_color=border_color,
border_width=border_width, **kwargs)
self._mesh.mode = 'triangle_fan'
self._regen_pos()
self._update()
@staticmethod
def _generate_vertices(center, radius, height, width):
half_height = height / 2.
half_width = width / 2.
hw = min(half_height, half_width)
if isinstance(radius, (list, tuple)):
if len(radius) != 4:
raise ValueError("radius must be float or 4 value tuple/list"
" (got %s of length %d)" % (type(radius),
len(radius)))
if (radius > np.ones(4) * hw).all():
raise ValueError('Radius of curvature cannot be greater than\
half of min(width, height)')
radius = np.array(radius, dtype=np.float32)
else:
if radius > hw:
raise ValueError('Radius of curvature cannot be greater than\
half of min(width, height)')
radius = np.ones(4) * radius
num_segments = (radius / hw * 500.).astype(int)
bias1 = np.ones(4) * half_width - radius
bias2 = np.ones(4) * half_height - radius
corner1 = np.empty([num_segments[0]+1, 3], dtype=np.float32)
corner2 = np.empty([num_segments[1]+1, 3], dtype=np.float32)
corner3 = np.empty([num_segments[2]+1, 3], dtype=np.float32)
corner4 = np.empty([num_segments[3]+1, 3], dtype=np.float32)
start_angle = 0.
end_angle = np.pi / 2.
theta = np.linspace(end_angle, start_angle, num_segments[0]+1)
corner1[:, 0] = center[0] - bias1[0] - radius[0] * np.sin(theta)
corner1[:, 1] = center[1] - bias2[0] - radius[0] * np.cos(theta)
corner1[:, 2] = 0
theta = np.linspace(start_angle, end_angle, num_segments[1]+1)
corner2[:, 0] = center[0] + bias1[1] + radius[1] * np.sin(theta)
corner2[:, 1] = center[1] - bias2[1] - radius[1] * np.cos(theta)
corner2[:, 2] = 0
theta = np.linspace(end_angle, start_angle, num_segments[2]+1)
corner3[:, 0] = center[0] + bias1[2] + radius[2] * np.sin(theta)
corner3[:, 1] = center[1] + bias2[2] + radius[2] * np.cos(theta)
corner3[:, 2] = 0
theta = np.linspace(start_angle, end_angle, num_segments[3]+1)
corner4[:, 0] = center[0] - bias1[3] - radius[3] * np.sin(theta)
corner4[:, 1] = center[1] + bias2[3] + radius[3] * np.cos(theta)
corner4[:, 2] = 0
output = np.concatenate(([[center[0], center[1], 0.]],
[[center[0] - half_width, center[1], 0.]],
corner1,
[[center[0], center[1] - half_height, 0.]],
corner2,
[[center[0] + half_width, center[1], 0.]],
corner3,
[[center[0], center[1] + half_height, 0.]],
corner4,
[[center[0] - half_width, center[1], 0.]]))
vertices = np.array(output, dtype=np.float32)
return vertices
@property
def center(self):
"""The center of the ellipse"""
return self._center
@center.setter
def center(self, center):
"""The center of the ellipse"""
self._center = center
self._regen_pos()
self._update()
@property
def height(self):
"""The height of the rectangle."""
return self._height
@height.setter
def height(self, height):
if height <= 0.:
raise ValueError('Height must be positive')
self._height = height
self._regen_pos()
self._update()
@property
def width(self):
"""The width of the rectangle."""
return self._width
@width.setter
def width(self, width):
if width <= 0.:
raise ValueError('Width must be positive')
self._width = width
self._regen_pos()
self._update()
@property
def radius(self):
"""The radius of curvature of rounded corners."""
return self._radius
@radius.setter
def radius(self, radius):
self._radius = radius
self._regen_pos()
self._update()
def _regen_pos(self):
vertices = self._generate_vertices(center=self._center,
radius=self._radius,
height=self._height,
width=self._width)
# don't use the center point and only use X/Y coordinates
vertices = vertices[1:, ..., :2]
self._pos = vertices
``` |
{
"source": "3dani33/ePaper_polaroid",
"score": 3
} |
#### File: ePaper_polaroid/src/display.py
```python
from waveshare_epd import epd1in54_V2
from PIL import Image
# two functions for cropping images to square
# from: https://note.nkmk.me/en/python-pillow-square-circle-thumbnail/
def crop_center(img, crop_width, crop_height):
img_width, img_height = img.size
return img.crop(((img_width - crop_width) // 2,
(img_height - crop_height) // 2,
(img_width + crop_width) // 2,
(img_height + crop_height) // 2))
def crop_max_square(img):
return crop_center(img, min(img.size), min(img.size))
def setup():
global epd
epd = epd1in54_V2.EPD()
epd.init(0)
epd.sleep()
def display_image(path):
im = Image.open(path)
# im.show() # for debugging
im_cropped = crop_max_square(im)
# im_cropped.show() # for debugging
im_resized = im_cropped.resize((200, 200))
# im_resized.show() # for debugging
im_bw = im_resized.convert('1')
# im_bw.show() for debugging
global epd
epd.init(0)
epd.display(epd.getbuffer(im_bw))
epd.sleep()
if __name__ == '__main__':
setup()
display_image('temp.jpg')
``` |
{
"source": "3dani33/Kamerabasierte_Anwendungen",
"score": 3
} |
#### File: 3dani33/Kamerabasierte_Anwendungen/run_algorithm.py
```python
import plac
import numpy as np
import cv2
import os
from algorithm import motion_detection, get_algorithm_version
import file_handler
from evaluate_algorithm import update_frame_count
RED = (0, 0, 255)
WHITE = (255, 255, 255)
def load_video(file):
video = cv2.VideoCapture(file)
framerate = video.get(cv2.CAP_PROP_FPS)
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
print('framerate: {}, frame count: {}'.format(framerate, frame_count))
return (video, framerate, frame_count)
@plac.pos('file', 'The video file path to run the algorithm on.')
@plac.flg('show', 'Show the algorithm output live')
def run_algorithm(file: str, show: bool=False) -> None:
filename = os.path.basename(file)
print('Run algorithm on file: {}'.format(filename))
# load video
video, framerate, frame_count = load_video(file)
# try to find reference file
reference_available, reference = file_handler.load_reference_data(filename)
reference = update_frame_count(frame_count, reference)
# processing loop
result = np.zeros(frame_count ,dtype=bool)
intensity = np.zeros(frame_count)
for n in range(frame_count):
_, frame = video.read()
# error in frame count
if frame is None:
print('End of video! frame {} of {}'.format(n, frame_count))
print('Slicing result list to match new frame count: {}'.format(n))
result = result[:n]
frame_count = n
break
# pass image to run_algorithm and save the result
result[n], intensity[n], img_back_sub = motion_detection(frame)
if show:
# add indicator for reference
if reference_available:
col = RED if reference['reference'][n] else WHITE
cv2.circle(frame, center=(10, 10), radius=10, color=col, thickness=-1)
cv2.circle(img_back_sub, center=(10, 10), radius=10, color=col, thickness=-1)
# grab a few frames with motion
# if col == RED and n < 50:
# cv2.imwrite('img/filter_noise_{}.png'.format(n), img_back_sub)
# add indicator for algorithm
col = RED if result[n] else WHITE
cv2.circle(frame, center=(20, 10), radius=10, color=col, thickness=-1)
cv2.circle(img_back_sub, center=(20, 10), radius=10, color=col, thickness=-1)
# add frame number
font = cv2.FONT_HERSHEY_SIMPLEX
string = '{}/{}'.format(n,frame_count)
cv2.putText(frame, string, (100,15), font, 0.5,(255,255,255), 1, cv2.LINE_AA)
cv2.putText(img_back_sub, string, (100,15), font, 0.5,(255,255,255), 1, cv2.LINE_AA)
# show image
cv2.imshow('Doggy Cam: Standard View', frame)
cv2.imshow('Doggy Cam: Background Substraction', img_back_sub)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
# allow quitting by pressing q
print('Quitting...')
exit()
# 2-D array with intensity and the annotated values for
# clean up, if wrong frame_count can be fixed
temp = np.vstack((reference['reference'],intensity))
temp = temp.T
# export temp to csv
file_handler.save_csv(filename, get_algorithm_version(), temp)
# save result with framerate and frame count in pickle file
file_handler.save_algorithm_result(filename, framerate, frame_count, result, get_algorithm_version())
if __name__ == '__main__':
plac.call(run_algorithm)
``` |
{
"source": "3darkman/faction-builder-api",
"score": 3
} |
#### File: core/models/ability.py
```python
from django.db import models
class Ability(models.Model):
"""Ability to be choosed to creating a faction"""
name = models.CharField(max_length=255,)
description = models.TextField(blank=True)
def __str__(self):
return self.name
```
#### File: core/models/faction_type.py
```python
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from core.models import Domain, CategorySlot, StartingProfile
class FactionType(models.Model):
name = models.CharField(max_length=250)
is_default = models.BooleanField()
domain = models.ForeignKey(Domain, on_delete=models.CASCADE)
age = models.SmallIntegerField()
slots = GenericRelation(CategorySlot)
starting_profile = GenericRelation(StartingProfile)
def __str__(self):
return self.name
``` |
{
"source": "3db/3db",
"score": 3
} |
#### File: controls/blender/background.py
```python
from typing import Any, Dict
from colorsys import hsv_to_rgb
import torch as ch
from ...try_bpy import bpy
from ..base_control import PostProcessControl
class BackgroundControl(PostProcessControl):
"""Control that replace the transparent background of a render (i.e., the
alpha channel) with a given color specified in HSV by the control parameters.
Continuous parameters:
- ``H``, ``S`` and ``V``: the hue, saturation, and value of the color to
fill the background with. (default range: ``[0, 1]``)
.. admonition:: Example images
.. thumbnail:: /_static/logs/background/images/image_1.png
:width: 100
:group: background
.. thumbnail:: /_static/logs/background/images/image_2.png
:width: 100
:group: background
.. thumbnail:: /_static/logs/background/images/image_3.png
:width: 100
:group: background
.. thumbnail:: /_static/logs/background/images/image_4.png
:width: 100
:group: background
.. thumbnail:: /_static/logs/background/images/image_5.png
:width: 100
:group: background
Varying all parameters across their ranges.
"""
def __init__(self, root_folder: str):
continuous_dims = {
'H': (0., 1.),
'S': (0., 1.),
'V': (0., 1.),
}
super().__init__(root_folder,
continuous_dims=continuous_dims)
def apply(self, render: ch.Tensor, control_args: Dict[str, Any]) -> ch.Tensor:
check_result = self.check_arguments(control_args)
assert check_result[0], check_result[1]
bpy.context.scene.render.film_transparent = True
rgb_color = hsv_to_rgb(control_args['H'], control_args['S'], control_args['V'])
rgb_color = ch.tensor(rgb_color)[:, None, None].float()
alpha = render[3:, :, :]
img = render[:3, :, :] * alpha + (1 - alpha) * rgb_color
return img
Control = BackgroundControl
```
#### File: threedb/policies/grid_search.py
```python
import numpy as np
from itertools import product
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
class GridSearchPolicy:
def __init__(self, continuous_dim, discrete_sizes, samples_per_dim):
self.continuous_dim = continuous_dim
self.discrete_sizes = discrete_sizes
self.samples_per_dim = samples_per_dim
def hint_scheduler(self):
total_queries = self.samples_per_dim ** self.continuous_dim
total_queries *= np.prod(self.discrete_sizes)
return 1, int(total_queries)
def run(self, render_and_send):
continuous_values = np.linspace(0, 1, self.samples_per_dim)
discrete_spaces = []
for n in self.discrete_sizes:
discrete_spaces.append(np.arange(n))
result = []
for continuous_instance in product(*([continuous_values] * self.continuous_dim)):
for discrete_instance in product(*discrete_spaces):
result.append((continuous_instance, discrete_instance))
for r in chunks(result, 1000):
render_and_send(r)
Policy = GridSearchPolicy
```
#### File: threedb/rendering/base_renderer.py
```python
import torch as ch
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional, Tuple
from threedb.rendering.utils import ControlsApplier
from typing import List
class RenderEnv(ABC): pass
class RenderObject(ABC): pass
class BaseRenderer(ABC):
NAME: str = 'BaseRenderer'
KEYS: List[str] = ['rgb']
def __init__(self,
root_dir: str,
render_settings: Dict[str, Any],
env_extensions: List[str] = []) -> None:
self.root_dir = root_dir
self.env_extensions = env_extensions
self.args = render_settings
@staticmethod
@abstractmethod
def enumerate_models(search_dir: str) -> List[str]:
"""
Given a root folder, returns all valid .blend files (according to a
per-renderer convention).
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def enumerate_environments(search_dir: str) -> List[str]:
"""
Given a root folder, returns all files in root/blender_environments/ which
have extensions in ENV_EXTENSIONS above.
"""
raise NotImplementedError
@abstractmethod
def declare_outputs(self) -> Dict[str, Tuple[List[int], str]]:
"""
This function declares what the output of render() will be, based on the
renderer settings. Returns a dictionary mapping keys to (dtype, size)
tuples---the output of render() is string-to-tensor dictionary whose
tensors will be checked against the return value of this function for
both size and type.
A basic implementation which suffices for most applications is provided
in the abstract class :class:`threedb.rendering.base_renderer.BaseRenderer`.
"""
raise NotImplementedError
@abstractmethod
def load_model(self, model: str) -> RenderObject:
"""
Given a root directory and a model id, loads the model into the renderer
and returns the corresponding object.
"""
raise NotImplementedError
@abstractmethod
def get_model_uid(self, model: RenderObject) -> str:
"""
Given an 3D model, return its UID as assigned by the renderer.
"""
@abstractmethod
def load_env(self, env: str) -> Optional[RenderEnv]:
"""
Given a root folder and environment ID, load the environment into
the renderer. If needed, returns an environment object, to be passed
back to the render() function.
"""
raise NotImplementedError
@abstractmethod
def setup_render(self,
model: Optional[RenderObject],
env: Optional[RenderEnv]) -> None:
"""
Perform setup operations for rendering. Called only when the model or
environment being rendered changes---otherwise, only render() will be
called. No return value.
"""
raise NotImplementedError
@abstractmethod
def get_context_dict(self, model_uid: str, object_class: int) -> Dict[str, Any]:
"""Returns, for a given model id, a "context" dictionary; this context
will be passed to the controls (see :class::class:`threedb.controls.base_control.PreProcessControl` for more info).
Parameters
----------
model_uid : str
The ID of the model being rendered.
object_class : int
The class label for the model.
Returns
-------
Dict[str, Any]
A context dictionary
"""
@abstractmethod
def render(self,
model_uid: str,
loaded_model: RenderObject,
loaded_env: RenderEnv) -> Dict[str, ch.Tensor]:
"""[summary]
Parameters
----------
model_uid : str
Render a model and environment. You can assume that setup_render() has
been called with the relevant model and object in context. This function
should also handle applying the pre and post-processing controls.
loaded_model : RenderObject
The model that was most recently loaded and passed to setup_render.
loaded_env : RenderEnv
the environment that was most recently loaded and passed to setup_render.
Returns
-------
Dict[str, ch.Tensor]
A dictionary mapping result keys (e.g., 'rgb', 'segmentation', etc.)
to PyTorch tensor outputs.
"""
raise NotImplementedError
```
#### File: threedb/rendering/render_blender.py
```python
import re
from glob import glob
from multiprocessing import cpu_count
from os import path
from tempfile import TemporaryDirectory
from typing import Tuple, Dict, Optional, Iterable, List, Any
from ..try_bpy import bpy
import cv2
import numpy as np
import torch as ch
from .base_renderer import BaseRenderer, RenderEnv, RenderObject
from .utils import ControlsApplier
IMAGE_FORMAT = 'png'
ENV_EXTENSIONS = ['blend', 'exr', 'hdr']
"""
Utility functions
"""
def _get_model_path(root_folder: str, model: str) -> str:
return path.join(root_folder, 'blender_models', model)
def _get_env_path(root_folder: str, env: str) -> str:
return path.join(root_folder, 'blender_environments', env)
def _setup_nice_PNG(input_node: Any) -> None:
input_node.use_node_format = False
input_node.format.file_format = "PNG"
input_node.format.compression = 0
input_node.format.color_depth = "16"
class Blender(BaseRenderer):
NAME: str = 'Blender'
KEYS: List[str] = ['rgb', 'segmentation', 'uv', 'depth']
def __init__(self, root_dir: str, render_settings: Dict[str, Any], _ = None) -> None:
super().__init__(root_dir, render_settings, ENV_EXTENSIONS)
self.main_nodes = []
self.post_process_nodes = []
@staticmethod
def enumerate_models(search_dir: str) -> List[str]:
"""
Given a root folder, returns all .blend files in root/blender_models/
"""
return [path.basename(x) for x in glob(_get_model_path(search_dir, '*.blend'))]
@staticmethod
def enumerate_environments(search_dir: str) -> List[str]:
all_files = map(lambda x: path.basename(x), glob(_get_env_path(search_dir, '*.*')))
return list(filter(lambda x: x.split('.')[-1] in ENV_EXTENSIONS, all_files))
def declare_outputs(self) -> Dict[str, Tuple[List[int], str]]:
imsize = [self.args['resolution'], self.args['resolution']]
output_channels: Dict[str, Tuple[List[int], str]] = {'rgb': ([3, *imsize], 'float32')}
if self.args['with_uv']:
output_channels['uv'] = ([4, *imsize], 'float32')
if self.args['with_depth']:
output_channels['depth'] = ([4, *imsize], 'float32')
if self.args['with_segmentation']:
output_channels['segmentation'] = ([1, *imsize], 'int32')
return output_channels
def load_model(self, model: str) -> RenderObject:
basename, filename = path.split(_get_model_path(self.root_dir, model))
uid = filename.replace('.blend', '')
blendfile = path.join(basename, uid + '.blend')
section = "\\Object\\"
object = uid
filepath = uid + '.blend'
directory = blendfile + section
filename = object
bpy.ops.wm.append(
filepath=filepath,
filename=filename,
directory=directory)
return bpy.data.objects[uid]
def get_model_uid(self, loaded_model):
return loaded_model.name
def load_env(self, env: str) -> Optional[RenderEnv]:
full_env_path = _get_env_path(self.root_dir, env)
if env.endswith('.blend'): # full blender file
bpy.ops.wm.open_mainfile(filepath=full_env_path)
else: # HDRI env
bpy.ops.wm.read_homefile()
bpy.data.objects.remove(bpy.data.objects["Cube"], do_unlink=True)
bpy.data.objects.remove(bpy.data.objects["Light"], do_unlink=True)
bpy.context.scene.render.film_transparent = False
world = bpy.context.scene.world
node_tree = world.node_tree
output_node = world.node_tree.get_output_node('CYCLES')
[node_tree.links.remove(x) for x in output_node.inputs[0].links]
background_node = node_tree.nodes.new(type="ShaderNodeBackground")
node_tree.links.new(background_node.outputs[0], output_node.inputs[0])
img = bpy.data.images.load(full_env_path)
env_texture_node = node_tree.nodes.new(type="ShaderNodeTexEnvironment")
env_texture_node.image = img
node_tree.links.new(env_texture_node.outputs[0], background_node.inputs[0])
def _before_render(self) -> None:
"""
Private utility function to be called from render()
"""
# COLOR settings for render
bpy.context.scene.display_settings.display_device = 'None'
bpy.context.scene.sequencer_colorspace_settings.name = 'Raw'
bpy.context.view_layer.update()
bpy.context.scene.view_settings.view_transform = 'Standard'
bpy.context.scene.view_settings.look = 'None'
for node in self.main_nodes:
node.mute = False
for node in self.post_process_nodes:
node.mute = True
def _before_preprocess(self) -> None:
# COLOR SETTINGS for RGB output
bpy.context.scene.display_settings.display_device = 'sRGB'
bpy.context.scene.sequencer_colorspace_settings.name = 'sRGB'
bpy.context.view_layer.update()
bpy.context.scene.view_settings.view_transform = 'Filmic'
bpy.context.scene.view_settings.look = 'None'
for node in self.main_nodes:
node.mute = True
for node in self.post_process_nodes:
node.mute = False
def _setup_render_device(self, scene: Any, prefs: Any):
gpu_id: int = self.args['gpu_id']
cpu_cores: Optional[int] = self.args['cpu_cores']
cprefs = prefs.addons['cycles'].preferences
cprefs.get_devices() # important to update the list of devices
for device in cprefs.devices:
device.use = False
if cpu_cores:
scene.render.threads_mode = 'FIXED'
cores_available = cpu_count()
assert cpu_cores <= cores_available, f'Your machine has only {cpu_cores} cores.'
scene.render.threads = max(1, cpu_cores)
if gpu_id == -1:
scene.cycles.device = 'CPU'
cpu_devices = [x for x in cprefs.devices if x.type == 'CPU']
cpu_devices[0].use = True
else:
scene.cycles.device = 'GPU'
gpu_devices = [x for x in cprefs.devices if x.type == 'CUDA']
if len(gpu_devices) != 0:
gpu_devices[gpu_id].use = True
else:
raise ValueError('No GPUs found.')
for d in cprefs.devices:
print(f'Device {d.name} ({d.type}) used? {d.use}')
def setup_render(self, model: Optional[RenderObject], env: Optional[RenderEnv]) -> None:
while self.main_nodes:
self.main_nodes.pop()
while self.post_process_nodes:
self.post_process_nodes.pop()
scene = bpy.context.scene
bpy.context.scene.render.engine = 'CYCLES'
prefs = bpy.context.preferences
self._setup_render_device(scene, prefs)
bpy.context.scene.cycles.samples = self.args['samples']
bpy.context.scene.render.tile_x = self.args['tile_size']
bpy.context.scene.render.tile_y = self.args['tile_size']
bpy.context.scene.render.resolution_x = self.args['resolution']
bpy.context.scene.render.resolution_y = self.args['resolution']
bpy.context.scene.render.use_persistent_data = True
scene.use_nodes = True
nodes = scene.node_tree.nodes
links = scene.node_tree.links
scene.view_layers["View Layer"].use_pass_uv = self.args['with_uv']
bpy.context.scene.view_layers["View Layer"].use_pass_z = self.args['with_depth']
bpy.context.scene.view_layers["View Layer"].use_pass_object_index = self.args['with_segmentation']
scene.use_nodes = True
scene.name = 'main_scene'
for node in list(nodes):
nodes.remove(node)
layers_node = nodes.new(type="CompositorNodeRLayers")
self.main_nodes.append(layers_node)
file_output_node = nodes.new(type="CompositorNodeOutputFile")
file_output_node.name = 'exr_output'
self.main_nodes.append(file_output_node)
file_output_node.format.file_format = "OPEN_EXR"
file_output_node.format.exr_codec = 'NONE'
output_slots = file_output_node.file_slots
output_slots.remove(file_output_node.inputs[0])
output_slots.new("render_exr")
links.new(layers_node.outputs[0], file_output_node.inputs[0])
if self.args['with_depth']:
output_slots.new("depth")
_setup_nice_PNG(file_output_node.file_slots["depth"])
math_node = nodes.new(type="CompositorNodeMath")
self.main_nodes.append(math_node)
links.new(layers_node.outputs["Depth"], math_node.inputs[0])
math_node.operation = "DIVIDE"
math_node.inputs[1].default_value = self.args['max_depth']
links.new(math_node.outputs[0], file_output_node.inputs["depth"])
if self.args['with_uv']:
output_slots.new("uv")
_setup_nice_PNG(file_output_node.file_slots["uv"])
links.new(layers_node.outputs["UV"], file_output_node.inputs["uv"])
if self.args['with_segmentation']:
output_slots.new("segmentation")
_setup_nice_PNG(file_output_node.file_slots["segmentation"])
file_output_node.file_slots["segmentation"].format.color_mode = "BW"
math_node = nodes.new(type="CompositorNodeMath")
self.main_nodes.append(math_node)
links.new(layers_node.outputs["IndexOB"], math_node.inputs[0])
math_node.operation = "DIVIDE"
math_node.inputs[1].default_value = 65535.0
links.new(math_node.outputs[0], file_output_node.inputs["segmentation"])
input_image = nodes.new(type="CompositorNodeImage")
self.post_process_nodes.append(input_image)
input_image.name = "input_image"
file_output_node = nodes.new(type="CompositorNodeOutputFile")
file_output_node.name = "rgb_output"
self.post_process_nodes.append(file_output_node)
output_slots = file_output_node.file_slots
output_slots.remove(file_output_node.inputs[0])
output_slots.new("rgb")
file_output_node.format.file_format = "PNG"
file_output_node.format.compression = 0
file_output_node.format.color_depth = "8"
links.new(input_image.outputs["Image"], file_output_node.inputs["rgb"])
def get_context_dict(self, model_uid: str, object_class: int) -> Dict[str, Any]:
obj = bpy.context.scene.objects[model_uid]
# 0 is background so we shift everything by 1
obj.pass_index = object_class + 1
return {'object': obj}
def render(self,
model_uid: str,
loaded_model: RenderObject,
loaded_env: RenderEnv) -> Dict[str, ch.Tensor]:
output = {}
with TemporaryDirectory() as temp_folder:
scene = bpy.context.scene
scene.node_tree.nodes['exr_output'].base_path = temp_folder
self._before_render()
bpy.ops.render.render(use_viewport=False, write_still=False)
self._before_preprocess()
written_file = glob(path.join(temp_folder, '*.exr'))
blender_loaded_image = bpy.data.images.load(written_file[0])
scene.node_tree.nodes["input_image"].image = blender_loaded_image
scene.node_tree.nodes["rgb_output"].format.file_format = IMAGE_FORMAT.upper()
scene.node_tree.nodes['rgb_output'].base_path = temp_folder
bpy.ops.render.render(use_viewport=False, write_still=False)
all_files = glob(path.join(temp_folder, "*.png"))
for full_filename in all_files:
name = re.sub(r'[0-9]+.png', '', path.basename(full_filename))
img = cv2.imread(full_filename, cv2.IMREAD_UNCHANGED)
if name == 'segmentation':
img = img[:, :, None] # Add extra dimension for the channel
img = img.astype('int32') - 1 # Go back from the 1 index to the 0 index
# We needed 1 index for the classes because we can only read images with
# positive integers
elif img.dtype is np.dtype(np.uint16):
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA)
img = img.astype('float32') / (2**16 - 1)
else:
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA)
img = img.astype('float32') / (2**8 - 1)
output[name] = ch.from_numpy(img).permute(2, 0, 1)
# Avoid memory leak by keeping all EXR rendered so far in memory
bpy.data.images.remove(blender_loaded_image)
return output
Renderer = Blender
```
#### File: threedb/result_logging/json_logger.py
```python
import copy
import importlib
import shutil
from os import path
from typing import Any, Dict, Iterable, Tuple
import numpy as np
import orjson as json
import torch as ch
from threedb.result_logging.base_logger import BaseLogger
from threedb.utils import CyclicBuffer
def clean_key(k: Iterable[str]) -> str:
"""
Utility function for formatting keys.
This is a no-op if the input is a string, otherwise expects an iterable
of strings, which it joins with a period.
"""
return k if isinstance(k, str) else '.'.join(k)
def clean_value(val: Any):
"""
Utility function for formatting tensors.
Converts torch tensors to numpy, is a no-op for all other types.
"""
return val.numpy() if ch.is_tensor(val) else val
def json_default(obj: np.ndarray) -> str:
"""
Another utility function; turns floats into strings, otherwise (if the
input does not have type ``np.float64`` raises a ``TypeError``.
"""
if isinstance(obj, np.ndarray):
return str(obj)
raise TypeError
def clean_log(log_d: dict, key_blacklist: Tuple[str, str] = ('image', 'result_ix')) -> dict:
"""
Cleans a dictionary for log-writing. In particular, all keys (expected to
be either strings or iterables of strings) are converted to strings, and
all torch tensors are converted to numpy arrays.
"""
cleaned = {}
for k, val in log_d.items():
if k in key_blacklist:
continue
clean_v = clean_log(val) if isinstance(val, dict) else clean_value(val)
cleaned[clean_key(k)] = clean_v
return cleaned
class JSONLogger(BaseLogger):
def __init__(self,
root_dir: str,
result_buffer: CyclicBuffer,
config: Dict[str, Dict[str, Any]]) -> None:
"""
A logger that logs all an experiments meta-data and results into a JSON file.
"""
super().__init__(root_dir, result_buffer, config)
fname = path.join(root_dir, 'details.log')
self.handle = open(fname, 'ab+')
self.regid = self.buffer.register()
self.evaluator = importlib.import_module(self.config['evaluation']['module']).Evaluator
if 'label_map' in config['inference']:
classmap_fname = path.join(root_dir, 'class_maps.json')
print(f"==> [Saving class maps to {classmap_fname}]")
shutil.copyfile(config['inference']['label_map'], classmap_fname)
print(f'==> [Logging to the JSON file {fname} with regid {self.regid}]')
def log(self, item: Dict[str, Any]) -> None:
"""Concrete implementation of
:meth:`threedb.result_logging.base_logger.BaseLogger.log`.
Parameters
----------
item : Dict[str, Any]
The item to be logged.
"""
item = copy.deepcopy(item)
rix = item['result_ix']
buffer_data = self.buffer[rix]
result = {k: v for (k, v) in buffer_data.items() if k in self.evaluator.KEYS}
for k in ['id', 'environment', 'model', 'render_args']:
result[k] = item[k]
result['output_type'] = self.evaluator.output_type
cleaned = clean_log(result)
encoded = json.dumps(cleaned, default=json_default,
option=json.OPT_SERIALIZE_NUMPY | json.OPT_APPEND_NEWLINE)
self.buffer.free(rix, self.regid)
self.handle.write(encoded)
def end(self):
"""Concrete implementation of
:meth:`threedb.result_logging.base_logger.BaseLogger.end`.
Closes the necessary file handle.
"""
self.handle.close()
Logger = JSONLogger
``` |
{
"source": "3D-Beacons/3d-beacons-hub-api",
"score": 3
} |
#### File: 3d-beacons-hub-api/tests/utils.py
```python
import json
async def request_get_stub(url: str, stub_for: str, status_code: int = 200):
"""Returns an object with stub response.
Args:
url (str): A request URL.
stub_for (str): Type of stub required.
Returns:
StubResponse: A StubResponse object.
"""
return StubResponse(stub_for=stub_for, status_code=status_code)
class StubResponse:
"""A Stub response class to return a response from JSON."""
def __init__(self, status_code: int, stub_for: str) -> None:
self.status_code = status_code
self.prefix = "tests/stubs/"
self.data = {}
with open(f"{self.prefix}/{stub_for}.json") as f:
self.data = json.load(f)
def json(self):
return self.data
``` |
{
"source": "3dcauldron/repo-to-rename",
"score": 3
} |
#### File: repo-to-rename/vendingmachine/database.py
```python
from pymongo import MongoClient
from bson.objectid import ObjectId
import gridfs
from datetime import datetime
import mimetypes
from pprint import pprint
class Database(object):
def __init__(self,config=None):
self.databaseUrl = ''
self.dbname = ''
self.dbuser = ''
self.dbpass = ''
#get database info from file or parameters
if config is None:
self.configFile = 'vendingmachine/config.txt'
self.loadConfig()
else:
self.databaseUrl = config['dburl']
self.dbname = config['dbname']
#initialize database client
uri = 'mongodb://%s:%s@%s/%s?authMechanism=SCRAM-SHA-1'%(self.dbuser,self.dbpass,self.databaseUrl,self.dbname)
self.dbclient = MongoClient(uri)
#defind the database name
self.db = self.dbclient[self.dbname]
#define the db file system name
self.fs = gridfs.GridFS(self.db)
#define the collection names
self.config = self.db.config
self.things = self.db.things
def loadConfig(self):
print("loading config")
with open(self.configFile) as config:
for line in config:
line = line.replace('\n','')
line = line.split('::')
if line[0].upper() == 'DBURL':
self.databaseUrl = line[1]
elif line[0].upper() == 'DBNAME':
self.dbname = line[1]
elif line[0].upper() == 'DBUSER':
self.dbuser = line[1]
elif line[0].upper() == 'DBPASS':
self.dbpass = line[1]
def cleanSku(self,sku):
sku = sku.upper()
sku = sku.replace(' ','')
sku = sku.replace('\n','')
sku = sku.replace('\t','')
return sku
def getSettings(self):
return self.config.find_one({"_id":"settings"})
def getLocationsData(self, ins):
return self.db['locations'].find(ins)
def insertLocation(self,location):
self.db.locations.insert_one(location.to_dict())
def deleteLocation(self,locationID):
locationID = ObjectId(locationID)
self.db.locations.delete_one({'_id':locationID})
def getLocationById(self,locationID):
locationID = ObjectId(locationID)
return self.db.locations.find_one({'_id':locationID})
def updateLocation(self,locationID,location):
locationID = ObjectId(locationID)
l = location.to_dict()
self.db.locations.find_one_and_update({'_id':locationID},
{"$set": {
"name": l['name'],
"address": l['address'],
"contact_number": l['contact_number'],
"GPS": l['GPS'],
"machine": l['machine'],
"history": l['history'],
"notes": l['notes'],
"contract": l['contract']
}})
if __name__ == "__main__":
db = Database()
print(db.getSettings())
input()
``` |
{
"source": "3DCdsc/Introduction_to_Programming",
"score": 3
} |
#### File: docs/Day_4/main.py
```python
from flask import Flask
import pyrebase
import json
app = Flask(__name__)
config = {
"apiKey": "API KEY",
"authDomain": "AUTH DOMAIN",
"databaseURL": "DATABASE_URL"
}
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/books', methods=["GET"])
def get_books():
"""
This endpoint returns all books from firebase
If specified, available filters:
- title
- category
"""
return 'These are all my books'
@app.route('/upload-books', methods=["POST"])
def upload_book():
"""
This endpoint takes in a book via FormData and writes it to firebase.
Returns the Book object
"""
return
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "3ddelano/graph-visualizer-python",
"score": 3
} |
#### File: graph_gui/graph_data/edge.py
```python
class Edge:
_id = 0
def __init__(self, *args):
if len(args) <= 3:
# args = [nodeA, nodeB, weigth?]
# Give a unique id
self.id = Edge._id
Edge._id += 1
self.nodeA = args[0]
self.nodeB = args[1]
if len(args) >= 3:
self.weight = float(args[2])
else:
self.weight = 1.0
else:
# args = [id, nodeA, nodeB, weight]
self.id = args[0]
self.nodeA = args[1]
self.nodeB = args[2]
Edge._id = self.id + 1 # For the next edge
self.weight = args[3]
```
#### File: graph_gui/graph_data/graph.py
```python
import os
import heapq
from .interfaces.graph_interface import GraphInterface
from .node import Node
from .edge import Edge
class Graph(GraphInterface):
def __init__(self):
self.nodes = []
self.edges = []
# ----------
# Node Methods
# ----------
def add_node(self, node):
self.nodes.append(node)
def delete_node(self, node_or_id):
node = self.resolve_node(node_or_id)
if node:
self.nodes.remove(node)
def get_adjacent_nodes(self, node_or_id):
node = self.resolve_node(node_or_id)
if not node:
return []
connected_nodes = []
for edge in self.edges:
if edge.nodeA == node:
connected_nodes.append(edge.nodeB)
elif edge.nodeB == node:
connected_nodes.append(edge.nodeA)
return connected_nodes
def get_node_by_id(self, node_id):
for node in self.nodes:
if node.id == node_id:
return node
return None
# ----------
# Edge Methods
# ----------
def add_edge(self, edge):
self.edges.append(edge)
def update_edge_weight(self, edge_or_id, weight):
edge = self.resolve_edge(edge_or_id)
if edge:
edge.weight = weight
def delete_edge(self, edge_or_id):
edge = self.resolve_edge(edge_or_id)
self.edges.remove(edge)
def get_adjacent_edges(self, node_or_id):
node = self.resolve_node(node_or_id)
if not node:
return []
connected_edges = []
for edge in self.edges:
if edge.nodeA == node or edge.nodeB == node:
connected_edges.append(edge)
return connected_edges
def get_edge_by_id(self, edge_id):
for edge in self.edges:
if edge.id == edge_id:
return edge
return None
def get_edge_between_nodes(self, nodeA_or_id, nodeB_or_id):
nodeA = self.resolve_node(nodeA_or_id)
nodeB = self.resolve_node(nodeB_or_id)
if not nodeA or not nodeB:
return None
for edge in self.edges:
if (edge.nodeA == nodeA and edge.nodeB == nodeB) or (
edge.nodeA == nodeB and edge.nodeB == nodeA
):
return edge
return None
# ----------
# Pathfinding Methods
# ----------
def find_shortest_path(self, algorithm, start_node_or_id, end_node_or_id):
found_path = self._get_shortest_path(
algorithm, start_node_or_id, end_node_or_id
)
if found_path:
return found_path["final_path"]
def animate_shortest_path(self, algorithm, start_node_or_id, end_node_or_id):
return self._get_shortest_path(algorithm, start_node_or_id, end_node_or_id)
def _get_shortest_path(self, algorithm, start_node_or_id, end_node_or_id):
start_node = self.resolve_node(start_node_or_id)
end_node = self.resolve_node(end_node_or_id)
if not start_node or not end_node:
print("Invalid start or end node to find shortest path")
return None
if algorithm == "dijkstra":
return self.dijkstra(start_node, end_node)
elif algorithm == "astar":
return self.astar(start_node, end_node)
elif algorithm == "bfs":
return self.bfs(start_node, end_node)
elif algorithm == "dfs":
return self.dfs(start_node, end_node)
else:
print("Invalid algorithm to find shortest path")
return None
def get_euclidean_distance(self, nodeA, nodeB):
x1 = nodeA.pos[0]
y1 = nodeA.pos[1]
x2 = nodeB.pos[0]
y2 = nodeB.pos[1]
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
def get_manhattan_distance(self, nodeA, nodeB):
x1 = nodeA.pos[0]
y1 = nodeA.pos[1]
x2 = nodeB.pos[0]
y2 = nodeB.pos[1]
return abs(x1 - x2) + abs(y1 - y2)
def dijkstra(self, start_node, end_node):
dist = {}
# Initialize distances to infinity
for node in self.nodes:
dist[node.id] = 1e7
dist[start_node.id] = 0
cur_node = start_node
path = {}
path[start_node.id] = None
cur_node = start_node
visited_nodes = []
visited_edges = []
while cur_node:
visited_nodes.append(cur_node.id)
for adj_node in self.get_adjacent_nodes(cur_node):
if adj_node.id in visited_nodes:
continue
edge = self.get_edge_between_nodes(cur_node, adj_node)
visited_edges.append([cur_node.id, adj_node.id, edge])
edge_cost = edge.weight
# Relaxation
if dist[cur_node.id] + edge_cost < dist[adj_node.id]:
dist[adj_node.id] = dist[cur_node.id] + edge_cost
path[adj_node.id] = cur_node.id
else:
path[adj_node.id] = edge_cost
# Find the next node to visit (the one with least cost from the starting node)
dist_list = []
for node_id, d in dist.items():
if not node_id in visited_nodes:
dist_list.append([node_id, d])
dist_list = sorted(dist_list, key=lambda x: x[1])
if len(dist_list) == 0:
cur_node = None
else:
cur_node_id = dist_list[0][0]
cur_node = self.get_node_by_id(cur_node_id)
return {
"final_path": self.build_path(path, start_node, end_node),
"visited_nodes": visited_nodes,
"visited_edges": visited_edges,
}
def astar(self, start_node, end_node, heu_func="euclidean"):
path = {}
g_scores = {node.id: float("inf") for node in self.nodes} # g_score
g_scores[start_node.id] = 0
# Straight line distance from any node to the end node
heu = {} # h_score
if heu_func == "euclidean":
heu = {
node.id: self.get_euclidean_distance(node, end_node)
for node in self.nodes
}
elif heu_func == "manhattan":
heu = {
node.id: self.get_manhattan_distance(node, end_node)
for node in self.nodes
}
else:
print("Invalid heuristic function in A*")
raise Exception("Invalid heuristic function in A*")
f_scores = {}
f_scores[start_node.id] = g_scores[start_node.id] + heu[start_node.id]
openset = [] # f_score stored in a min heap
heapq.heappush(openset, (heu[start_node.id], start_node))
visited_nodes = []
visited_edges = []
while len(openset) > 0:
_f_score, current_node = heapq.heappop(openset)
if current_node == end_node:
# Reached the goal
print("Reached the goal node in A*")
return {
"final_path": self.build_path(path, start_node, end_node),
"visited_nodes": visited_nodes,
"visited_edges": visited_edges,
}
# Visiting the node with id=current_node.id
visited_nodes.append(current_node.id)
for node in self.get_adjacent_nodes(current_node.id):
# Get the edge between cur_noed and each neighbour
edge = self.get_edge_between_nodes(current_node.id, node.id)
temp_g_score = g_scores[current_node.id] + edge.weight
temp_f_score = temp_g_score + heu[node.id]
visited_edges.append([current_node.id, node.id, edge])
if node.id in visited_nodes and temp_f_score >= g_scores[node.id]:
continue
if not node.id in visited_nodes or temp_f_score < f_scores[node.id]:
g_scores[node.id] = temp_g_score
f_scores[node.id] = temp_f_score
path[node.id] = current_node.id
if not (f_scores[node.id], node) in openset:
heapq.heappush(openset, (f_scores[node.id], node))
# No path found
print("No path found in A*")
return {
"final_path": [],
"visited_nodes": visited_nodes,
"visited_edges": visited_edges,
}
def bfs(self, start_node, end_node):
visited_nodes = []
visited_edges = []
queue = []
queue.append(start_node)
path = {}
while queue:
cur_node = queue.pop(0)
visited_nodes.append(cur_node.id)
if cur_node == end_node:
# Reached end node
break
for adj_node in self.get_adjacent_nodes(cur_node):
if adj_node.id in visited_nodes:
continue
edge = self.get_edge_between_nodes(cur_node, adj_node)
visited_edges.append([cur_node.id, adj_node.id, edge])
path[adj_node.id] = cur_node.id
if adj_node == end_node:
queue = []
break
queue.append(adj_node)
return {
"final_path": self.build_path(path, start_node, end_node),
"visited_nodes": visited_nodes,
"visited_edges": visited_edges,
}
def dfs(self, start_node, end_node):
visited_nodes = []
visited_edges = []
stack = []
stack.append(start_node)
path = {}
while stack:
cur_node = stack[-1]
stack.pop()
if not cur_node.id in visited_nodes:
for visited_node_id in visited_nodes:
edge = self.get_edge_between_nodes(
cur_node, self.get_node_by_id(visited_node_id)
)
if edge:
visited_edges.append([visited_node_id, cur_node.id, edge])
path[cur_node.id] = visited_node_id
visited_nodes.append(cur_node.id)
if cur_node.id == end_node.id:
stack = []
break
for adj_node in self.get_adjacent_nodes(cur_node):
if adj_node.id in visited_nodes:
continue
stack.append(adj_node)
print("Visited nodes: ", [str(node_id) for node_id in visited_nodes])
return {
"final_path": self.build_path(path, start_node, end_node),
"visited_nodes": visited_nodes,
"visited_edges": visited_edges,
}
def build_path(self, path, start_node, end_node):
current_node = end_node
path_list = []
current_node_id = current_node.id
start_node_id = start_node.id
while current_node_id and current_node_id != start_node_id:
path_list.append(current_node_id)
current_node_id = path[current_node_id]
path_list.append(start_node_id)
path_list.reverse()
edge_path = []
if len(path_list) < 2:
return None
# Find the edge between every 2 adjacent nodes in path_list
for i in range(len(path_list) - 1):
edge = self.get_edge_between_nodes(path_list[i], path_list[i + 1])
edge_path.append(edge)
ret = []
for i in range(len(path_list)):
if i == 0:
ret.append([path_list[i], None])
else:
ret.append([path_list[i], edge_path[i - 1]])
return ret
# ----------
# Misc Methods
# ----------
def save_to_files(self, nodelist_path, edgelist_path):
# Save the list of nodes to a csv file
print(f"Saving graph to {nodelist_path} and {edgelist_path}")
try:
with open(nodelist_path, "w") as file:
for i, node in enumerate(self.nodes):
# id,x,y
line = f"{node.id},{node.pos[0]},{node.pos[1]}"
if i != len(self.nodes) - 1:
line += "\n"
file.write(line)
with open(edgelist_path, "w") as file:
for i, edge in enumerate(self.edges):
# id,nodeA_id,nodeB_id,weight
line = f"{edge.id},{edge.nodeA.id},{edge.nodeB.id},{edge.weight}"
if i != len(self.edges) - 1:
line += "\n"
file.write(line)
return True
except Exception as e:
return False
def load_from_files(self, nodelist_path, edgelist_path):
# Load nodes and edges from nodelist_path and edgelist_path
if not os.path.exists(nodelist_path) or not os.path.exists(edgelist_path):
print(
f"Load Graph Error: No {nodelist_path} and {edgelist_path} files found"
)
return
print(f"Loading graph from {nodelist_path} and {edgelist_path}")
self.nodes = []
self.edges = []
with open(nodelist_path, "r") as file:
for line in file:
# id,x,y
line = line.strip().split(",")
node = Node(int(line[0]), float(line[1]), float(line[2]))
self.add_node(node)
with open(edgelist_path, "r") as file:
for line in file:
# id,nodeA_id,nodeB_id,weight?
line = line.strip().split(",")
nodeA = self.get_node_by_id(int(line[1]))
nodeB = self.get_node_by_id(int(line[2]))
weight = 1.0
if len(line) >= 4:
weight = float(line[3])
edge = Edge(int(line[0]), nodeA, nodeB, weight)
self.add_edge(edge)
def resolve_node(self, node_or_id):
node = None
if isinstance(node_or_id, int):
node = self.get_node_by_id(node_or_id)
if isinstance(node_or_id, float):
node = self.get_node_by_id(int(node_or_id))
elif isinstance(node_or_id, Node):
node = node_or_id
return node
def resolve_edge(self, edge_or_id):
edge = None
if isinstance(edge_or_id, int):
edge = self.get_edge_by_id(edge_or_id)
elif isinstance(edge_or_id, Edge):
edge = edge_or_id
return edge
```
#### File: src/graph_gui/graphgui.py
```python
import os
from time import time, sleep
import tkinter.filedialog as filedialog
import tkinter as tk
from .graph_data.graph import Graph
from .constants import START_NODE_COLOR, END_NODE_COLOR, SCREEN_BG_COLOR
from .animations.path_animation import PathAnimation
from .animations.bfs_animation import BFSAnimation
from .animations.dfs_animation import DFSAnimation
from .interfaces.drawable import Drawable
from .edgegui import EdgeGUI
from .nodegui import NodeGUI
VERSION = "v.1.2"
DOUBLE_CLICK_TIME = 0.3
NODELIST_FILEPATH = "nodelist.csv"
EDGELIST_FILEPATH = "edgelist.csv"
ANIMATION_FRAME_DURATION = 9
class GraphGUI(Graph, Drawable):
def __init__(self, canvas):
# Selected node
self.selected_node = None
self.dragging_node = None
# Selected edge
self.selected_edge = None
self.dragging_edge = None
# [nodeA_pos_x, nodeA_pos_y, nodeB_pos_x, nodeB_pos_y, drag_start_x, drag_start_y]
self.dragging_edge_offsets = [0, 0, 0, 0, 0, 0]
# Double click time
self.last_leftclick_time = time()
self.last_rightclick_time = time()
self.animation = None
self.frames = 0
self.help_visible = False
self.node_id_visible = False
# Path finding UI
self.path_algorithm_name = tk.StringVar(value="dijkstra")
self.start_node = None
self.goal_node = None
self.path = []
super().__init__()
self.init_ui(canvas)
def deselect_nodes(self):
if self.selected_node:
self.selected_node.is_selected = False
self.selected_node = None
if self.dragging_node:
self.dragging_node.is_dragging = False
self.dragging_node = None
def deselect_edges(self):
if self.selected_edge:
self.selected_edge.is_selected = False
self.selected_edge = None
if self.dragging_edge:
self.dragging_edge.is_dragging = False
self.dragging_edge = None
def deselect_path(self):
for i in range(len(self.path)):
node = self.get_node_by_id(self.path[i][0])
edge = self.path[i][1]
node.is_selected = False
if edge:
edge.is_selected = False
edge.nodeA.is_selected = False
edge.nodeB.is_selected = False
if self.start_node and self.goal_node:
self.start_node = None
self.goal_node = None
self.path = []
def get_clicked_node(self, x, y):
for node in self.nodes:
if node.is_clicked(x, y):
return node
return None
def get_clicked_edge(self, x, y):
for edge in self.edges:
if edge.is_clicked(x, y):
return edge
return None
def on_right_click(self, x, y):
self.deselect_path()
clicked_edge = self.get_clicked_edge(x, y)
if not clicked_edge:
# Empty area was right clicked
self.deselect_edges()
self.last_rightclick_time = time()
return
# Got a right click on an edge
self.deselect_nodes()
is_double_click = False
time_since_last_rightclick = time() - self.last_rightclick_time
if time_since_last_rightclick <= DOUBLE_CLICK_TIME:
# Got a double click on an edge
is_double_click = True
if is_double_click:
# It was a double click
# If a edge was previously selected deselect it
if self.selected_edge:
self.selected_edge.is_selected = False
self.selected_edge = None
# Start dragging this edge
clicked_edge.is_dragging = True
self.dragging_edge = clicked_edge
self.dragging_edge_offsets = [
clicked_edge.nodeA.pos[0],
clicked_edge.nodeA.pos[1],
clicked_edge.nodeB.pos[0],
clicked_edge.nodeB.pos[1],
x,
y,
]
else:
# It was a single right click
if self.dragging_edge:
# An edge was being dragged stop the drag
self.dragging_edge.is_dragging = False
self.dragging_edge = None
elif self.selected_edge:
# There was already a selected edge
if clicked_edge == self.selected_edge:
# The same edge was selected again
# Deselect it
self.selected_edge.is_selected = False
self.selected_edge = None
else:
# A different edge was selected
# Deselect the selected edge and select the clicked edge
self.selected_edge.is_selected = False
self.selected_edge = clicked_edge
self.selected_edge.is_selected = True
else:
# There was no selected node
# Store the node as selected node
self.selected_edge = clicked_edge
clicked_edge.is_selected = True
self.last_rightclick_time = time()
def on_left_click(self, x, y):
self.deselect_path()
clicked_node = self.get_clicked_node(x, y)
self.deselect_edges()
if clicked_node:
# A node was left clicked
time_since_last_leftclick = time() - self.last_leftclick_time
if time_since_last_leftclick <= DOUBLE_CLICK_TIME:
# Got a double left click
# Start dragging this node
clicked_node.is_dragging = True
self.dragging_node = clicked_node
# If a node was selected deselect it
if self.selected_node:
self.selected_node.is_selected = False
self.selected_node = None
else:
# It was a single click
if self.dragging_node:
# A node was being dragged stop the drag and select that node
self.dragging_node.is_dragging = False
self.dragging_node = None
elif self.selected_node:
# There was already a selected node
if clicked_node == self.selected_node:
# The same node was clicked again
# Deselect it
self.selected_node.is_selected = False
self.selected_node = None
else:
# A different node was clicked
# Create an edge between the two nodes if there isnt one
edge_exists = self.get_edge_between_nodes(
self.selected_node, clicked_node
)
if not edge_exists:
edge = EdgeGUI(self.selected_node, clicked_node)
self.add_edge(edge)
# Deselect the selected node and select the clicked node
self.selected_node.is_selected = False
self.selected_node = clicked_node
self.selected_node.is_selected = True
else:
# There was no selected node
# Store the node as selected node
self.selected_node = clicked_node
clicked_node.is_selected = True
else:
# Empty area was clicked
node = NodeGUI(x, y)
self.add_node(node)
if self.selected_node:
# A node is already selected
# Draw an edge from selected node to new clicked node
edge = EdgeGUI(self.selected_node, node)
self.add_edge(edge)
# Deselect the selected node
self.selected_node.is_selected = False
# Select the new node
self.selected_node = node
self.selected_node.is_selected = True
else:
# There was no selected node
# Mark the new node as the selected one
node.is_selected = True
self.selected_node = node
self.last_leftclick_time = time()
def ondrag(self, x, y):
if self.dragging_node:
# Node is being dragged
self.dragging_node.pos[0] = x
self.dragging_node.pos[1] = y
elif self.dragging_edge:
# Edge is being dragged
nodeA = self.dragging_edge.nodeA
nodeB = self.dragging_edge.nodeB
offsets = self.dragging_edge_offsets
nodeA.pos[0] = offsets[0] + x - offsets[4]
nodeA.pos[1] = offsets[1] + y - offsets[5]
nodeB.pos[0] = offsets[2] + x - offsets[4]
nodeB.pos[1] = offsets[3] + y - offsets[5]
def on_delete(self):
if self.selected_node:
# Delete the node
node = self.selected_node
node.is_selected = False
self.delete_node(node)
# Delete connected edges
connected_edges = self.get_adjacent_edges(node)
for edge in connected_edges:
self.delete_edge(edge)
self.selected_node = None
elif self.selected_edge:
# Delete the edge
edge = self.selected_edge
edge.is_selected = False
self.delete_edge(edge)
self.selected_edge = None
def draw(self, tur, canvas):
# Check if animation ended
if self.animation and self.animation.is_ended():
self.animation = None
self.deselect_path()
sleep(1)
# Animate the animation if any
animation_nodes = []
animation_edges = []
if self.animation:
if self.frames % ANIMATION_FRAME_DURATION == 0:
# Take a animation step
self.animation.one_step()
# Get the drawn nodes and edges from animation
animation_nodes = self.animation.get_drawn_nodes()
animation_edges = self.animation.get_drawn_edges()
# Draw all edges
for edge in self.edges:
if not isinstance(edge, EdgeGUI):
continue
animation_drew_edge = False
for edge_data in animation_edges:
if edge == edge_data["edge"]:
edge.draw(tur, color=edge_data["color"])
animation_drew_edge = True
break
if not animation_drew_edge:
edge.draw(tur)
edge.draw_weight(canvas)
# Draw all nodes
for node in self.nodes:
if not isinstance(node, NodeGUI):
continue
animation_drew_node = False
for node_data in animation_nodes:
if node == node_data["node"]:
node.draw(tur, color=node_data["color"])
animation_drew_node = True
break
if not animation_drew_node:
if node == self.start_node:
node.draw(tur, color=START_NODE_COLOR)
elif node == self.goal_node:
node.draw(tur, color=END_NODE_COLOR)
else:
node.draw(tur)
if self.node_id_visible:
node.draw_id(canvas)
self.frames += 1
# Show help text
self.draw_help(canvas)
def draw_help(self, canvas):
main_lines = ["H key - Toggle help text"]
lines = [
"Single Left Click - Add node / Select Node",
"Single Right Click - Select Edge",
"Double Left Click - Move Node",
"Double Right Click - Move Edge",
"",
"D key - Delete Node/Edge",
"W key - Update Edge Weight",
"S key - Save Data",
"L key - Load Data",
"B key - Start BFS at selected node",
"N key - Start DFS at selected node",
"F key - Toggle node Id visibility",
"",
"github.com/3ddelano/graph-visualizer-python",
]
font_size = 10
font = f"Arial {font_size} normal"
draw_y = 50
for line in main_lines:
# Draw the text
canvas.create_text(
0, draw_y, text=line, font=font, fill="white", anchor="w"
)
draw_y += font_size + 10
if not self.help_visible:
return
for line in lines:
# Draw the text
canvas.create_text(
0, draw_y, text=line, font=font, fill="white", anchor="w"
)
draw_y += font_size + 10
def on_bfs_start(self):
# Check if a node is selected
if not self.selected_node:
print("No node is selected for BFS")
tk.messagebox.showerror("Error", "No node is selected for BFS")
return
# Start bfs from the selected node
print("Starting BFS at node id=", self.selected_node.id)
self.animation = BFSAnimation(self)
self.animation.set_start_node(self.selected_node)
self.deselect_nodes()
tk.messagebox.showinfo("BFS result", self.animation.get_result_string())
def on_dfs_start(self):
# Check if a node is selected
if not self.selected_node:
print("No node is selected for DFS")
tk.messagebox.showerror("Error", "No node is selected for DFS")
return
# Start dfs from the selected node
print("Starting DFS at node id=", self.selected_node.id)
self.animation = DFSAnimation(self)
self.animation.set_start_node(self.selected_node)
self.deselect_nodes()
tk.messagebox.showinfo("DFS result", self.animation.get_result_string())
def on_help_toggle(self):
self.help_visible = not self.help_visible
def on_nodeid_toggle(self):
self.node_id_visible = not self.node_id_visible
def on_update_weight(self):
if not self.selected_edge:
print("No edge is selected to set weight")
tk.messagebox.showerror(
"Set Weight Error", "No edge is selected to set weight"
)
return
default_weight = round(
self.get_euclidean_distance(
self.selected_edge.nodeA, self.selected_edge.nodeB
),
2,
)
new_weight = tk.simpledialog.askstring(
title="Set Edge Weight",
prompt="Enter the new weight for the edge",
initialvalue=str(default_weight),
)
if new_weight is None:
return
try:
new_weight = float(new_weight)
self.update_edge_weight(self.selected_edge, new_weight)
except Exception as e:
print("Invalid weight provided to update edge weight")
tk.messagebox.showerror(
"Update Weight Error",
"Invalid weight. Weight should be a valid number.",
)
return
def on_save(self):
save_folder = filedialog.askdirectory(mustexist=True)
if save_folder == "":
# User cancelled the save
return
success = self.save_to_files(
os.path.join(save_folder, NODELIST_FILEPATH),
os.path.join(save_folder, EDGELIST_FILEPATH),
)
if success:
tk.messagebox.showinfo(
"Saving Graph", "Graph saved to nodelist.csv and edgelist.csv"
)
def on_load(self):
load_folder = filedialog.askdirectory(mustexist=True)
if load_folder == "":
# User cancelled the laod
return
node_path = os.path.join(load_folder, NODELIST_FILEPATH)
edge_path = os.path.join(load_folder, EDGELIST_FILEPATH)
if not os.path.exists(node_path):
tk.messagebox.showerror(
"Loading Graph Error", "nodelist.csv file not found"
)
return
if not os.path.exists(edge_path):
tk.messagebox.showerror(
"Loading Graph Error", "edgelist.csv file not found"
)
return
self.deselect_nodes()
self.deselect_edges()
self.deselect_path()
self.load_from_files(node_path, edge_path)
self.convert_graph_to_gui()
def on_set_start_node(
self,
):
if not self.selected_node:
print("No node is selected")
tk.messagebox.showerror("Set Start Node Error", "No node is selected")
return
self.start_node = self.selected_node
self.deselect_nodes()
tk.messagebox.showinfo("Set Start Node", "Start node set successfully")
def on_set_end_node(
self,
):
if not self.selected_node:
print("No node is selected")
tk.messagebox.showerror("Set Goal Node Error", "No node is selected")
return
self.goal_node = self.selected_node
self.deselect_nodes()
tk.messagebox.showinfo("Set Goal Node", "Goal node set successfully")
def on_find_path(self):
if self.animation:
self.animation = None
# Ensure that start and goal nodes are set
if not self.start_node:
tk.messagebox.showerror("Find Path Error", "Start node not set")
return
if not self.goal_node:
tk.messagebox.showerror("Find Path Error", "Goal node not set")
return
temp_start_node = self.start_node
temp_end_node = self.goal_node
self.deselect_path()
self.start_node = temp_start_node
self.goal_node = temp_end_node
# Array of node ids to be used for the path
self.path = []
self.path = self.find_shortest_path(
self.path_algorithm_name.get(), self.start_node, self.goal_node
)
if len(self.path) < 2:
tk.messagebox.showerror("Find Path Error", "No path found")
return
for i in range(len(self.path)):
node = self.get_node_by_id(self.path[i][0])
edge = self.path[i][1]
node.is_selected = True
if edge:
edge.is_selected = True
def on_anim_find_path(self):
# Ensure that start and goal nodes are set
if not self.start_node:
tk.messagebox.showerror("Find Path Error", "Start node not set")
return
if not self.goal_node:
tk.messagebox.showerror("Find Path Error", "Goal node not set")
return
temp_start_node = self.start_node
temp_end_node = self.goal_node
self.deselect_path()
self.start_node = temp_start_node
self.goal_node = temp_end_node
animate_data = self.animate_shortest_path(
self.path_algorithm_name.get(), self.start_node, self.goal_node
)
path = animate_data["final_path"]
if not path or (path and len(path) < 2):
tk.messagebox.showerror("Animate Path Error", "No path found")
return
edges = animate_data["visited_edges"]
print(f"Starting {self.path_algorithm_name.get()} path animation")
tk.messagebox.showinfo(
"Path Finding Statistics",
f"Number of nodes visited: {len(animate_data['visited_nodes'])}",
)
self.animation = PathAnimation(
self, self.start_node, self.goal_node, path, edges
)
def convert_graph_to_gui(self):
self.nodes = [NodeGUI(node.id, node.pos[0], node.pos[1]) for node in self.nodes]
self.edges = [
EdgeGUI(
edge.id,
self.get_node_by_id(edge.nodeA.id),
self.get_node_by_id(edge.nodeB.id),
edge.weight,
)
for edge in self.edges
]
def init_ui(self, canvas):
frame = tk.Frame(canvas.master.master)
frame.config(bg=SCREEN_BG_COLOR)
frame.place(x=10, y=10)
pad_x = 1
tk.Button(frame, text="Help Text", command=self.on_help_toggle).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Node Id", command=self.on_nodeid_toggle).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Load", command=self.on_load).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Save", command=self.on_save).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Delete", command=self.on_delete).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Set Weight", command=self.on_update_weight).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="BFS Anim", command=self.on_bfs_start).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="DFS Anim", command=self.on_dfs_start).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Set Start Node", command=self.on_set_start_node).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Set Goal Node", command=self.on_set_end_node).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Find Path", command=self.on_find_path).pack(
padx=pad_x, side=tk.LEFT
)
tk.Button(frame, text="Anim Find Path", command=self.on_anim_find_path).pack(
padx=pad_x, side=tk.LEFT
)
# Create radio buttons
tk.Radiobutton(
frame,
text="BFS",
variable=self.path_algorithm_name,
value="bfs",
).pack(side=tk.LEFT, padx=(5, 0))
tk.Radiobutton(
frame,
text="DFS",
variable=self.path_algorithm_name,
value="dfs",
).pack(side=tk.LEFT)
tk.Radiobutton(
frame,
text="Dijkstra",
variable=self.path_algorithm_name,
value="dijkstra",
).pack(side=tk.LEFT)
tk.Radiobutton(
frame,
text="A*",
variable=self.path_algorithm_name,
value="astar",
).pack(side=tk.LEFT)
```
#### File: graph_gui/interfaces/animation_interface.py
```python
import abc
class AnimationInterface(abc.ABC):
@abc.abstractmethod
def one_step(self):
pass
@abc.abstractmethod
def is_ended(self):
pass
@abc.abstractmethod
def get_drawn_nodes(self):
pass
@abc.abstractmethod
def get_drawn_edges(self):
pass
```
#### File: graph_gui/interfaces/drawable.py
```python
import abc
class Drawable(abc.ABC):
@abc.abstractmethod
def draw(self, tur, *args):
pass
``` |
{
"source": "3ddelano/mongo-driver-godot",
"score": 3
} |
#### File: src/gdscript_docs_maker/convert_to_markdown.py
```python
import re
import json
from argparse import Namespace
from typing import List
from gdscript_docs_maker.gdscript.enumeration import Enumeration
from .command_line import OutputFormats
from .config import LOGGER
from .gdscript_objects import GDScriptClasses
from .gdscript.member import Member
from .gdscript.function import Function
from .gdscript.gdscript_class import GDScriptClass, BUILTIN_CLASSES
from .gdscript.project_info import ProjectInfo
from .mkdocs import MkDocsFrontMatter
from .make_markdown import (
MarkdownDocument,
MarkdownSection,
make_bold,
make_code_block,
make_comment,
make_heading,
make_link,
make_list,
make_table_header,
make_table_row,
surround_with_html,
)
GODOT_DOCS_URL = "https://docs.godotengine.org/en/stable/classes/class_{}.html"
def convert_to_markdown(
classes: GDScriptClasses, arguments: Namespace, info: ProjectInfo
) -> List[MarkdownDocument]:
"""
Takes a list of dictionaries that each represent one GDScript class to
convert to markdown and returns a list of markdown documents.
"""
markdown: List[MarkdownDocument] = []
# if arguments.make_index:
# markdown.append(_write_index_page(classes, info))
for entry in classes:
markdown.append(_as_markdown(classes, entry, arguments))
return markdown
def _as_markdown(
classes: GDScriptClasses, gdscript: GDScriptClass, arguments: Namespace
) -> MarkdownDocument:
"""
Converts the data for a GDScript class to a markdown document,
using the command line options.
"""
content: List[str] = []
output_format: OutputFormats = arguments.format
name: str = gdscript.name
if "abstract" in gdscript.metadata:
name += " " + surround_with_html("(abstract)", "small")
if output_format == OutputFormats.MKDOCS:
front_matter: MkDocsFrontMatter = MkDocsFrontMatter.from_data(
gdscript, arguments
)
content += front_matter.as_string_list()
content += [
make_comment(
"Auto-generated from JSON by GDScript docs maker. "
"Do not edit this document directly."
)
+ "\n"
]
# -----
# ----- Title
content += [*make_heading(name, 1)]
if gdscript.extends:
extends_list: List[str] = gdscript.get_extends_tree(classes)
# Get the links to each extend in the extends_tree
extends_links = []
for entry in extends_list:
link = make_link(entry, "../" + entry)
if entry.lower() in BUILTIN_CLASSES:
# Built-in reference
link = make_link(entry, GODOT_DOCS_URL.format(entry.lower()))
extends_links.append(link)
content += [make_bold("Extends:") + " " + " < ".join(extends_links)]
# -----
# ----- Description
description = _replace_references(classes, gdscript, gdscript.description)
if description != "":
content += [*MarkdownSection("Description", 2, [description]).as_text()]
quick_links = []
if gdscript.signals:
quick_links.append(make_link("Signals", "#signals"))
if gdscript.enums:
quick_links.append(make_link("Enumerations", "#enumerations"))
if gdscript.constants:
quick_links.append(make_link("Constants", "#constants-descriptions"))
if gdscript.members:
quick_links.append(make_link("Properties", "#property-descriptions"))
if gdscript.functions:
quick_links.append(make_link("Methods", "#method-descriptions"))
if gdscript.sub_classes:
quick_links.append(make_link("Sub-classes", "#sub-classes"))
if len(quick_links) > 0:
content += [""] + make_list(quick_links)
if gdscript.signals:
content += MarkdownSection(
"Signals", 2, _write_signals(classes, gdscript, output_format)
).as_text()
content += _write_class(classes, gdscript, output_format)
sub_documents: List[MarkdownDocument] = []
if gdscript.sub_classes:
content += MarkdownSection(
"Sub-classes",
2,
make_list(
[
make_link(sub_class.name, "./sub_classes/" + sub_class.name)
for sub_class in gdscript.sub_classes
]
),
).as_text()
for cls in gdscript.sub_classes:
sub_documents.append(_as_markdown(classes, cls, arguments))
# content += make_heading("Sub-classes", 2)
# content.append("")
# for cls in gdscript.sub_classes:
# content += _write_class(classes, cls, output_format, 3, True)
return MarkdownDocument(gdscript.name, content, sub_documents)
def _write_class(
classes: GDScriptClasses,
gdscript: GDScriptClass,
output_format: OutputFormats,
heading_level=2,
) -> List[str]:
markdown: List[str] = []
# if is_inner_class:
# markdown += make_heading(gdscript.name, heading_level)
for attribute, title in [
("enums", "Enumerations"),
("constants", "Constants Descriptions"),
("members", "Property Descriptions"),
("functions", "Method Descriptions"),
]:
if not getattr(gdscript, attribute):
continue
markdown += MarkdownSection(
title,
heading_level,
_write(attribute, classes, gdscript, output_format),
).as_text()
return markdown
def _write_summary(gdscript: GDScriptClass, key: str) -> List[str]:
element_list = getattr(gdscript, key)
if not element_list:
return []
markdown: List[str] = make_table_header(["Type", "Name"])
return markdown + [make_table_row(item.summarize()) for item in element_list]
def _write(
attribute: str,
classes: GDScriptClasses,
gdscript: GDScriptClass,
output_format: OutputFormats,
heading_level: int = 3,
) -> List[str]:
assert hasattr(gdscript, attribute)
markdown: List[str] = []
for element in getattr(gdscript, attribute):
# assert element is Element
# -----
# ----- Heading
heading = element.get_heading_as_string()
if isinstance(element, Member):
if element.is_exported:
heading += " " + surround_with_html("(export)", "small")
markdown.extend(make_heading(heading, heading_level))
# -----
# ----- Signature
if isinstance(element, Enumeration):
markdown.extend(
[
make_code_block(
f"enum {element.name} {json.dumps(element.values, indent = 4)}\n"
),
"",
]
)
else:
markdown.extend(
[
make_code_block(element.signature),
"",
]
)
# -----
# ----- Description
description_first = False
if isinstance(element, Function):
description_first = True
unique_attributes = element.get_unique_attributes_as_markdown()
unique_attributes = [
_replace_references(classes, gdscript, x) for x in unique_attributes
]
description: str = _replace_references(classes, gdscript, element.description)
if description_first:
markdown.append(description)
markdown.append("")
markdown.extend(unique_attributes)
else:
markdown.extend(unique_attributes)
markdown.append("")
markdown.append(description)
return markdown
def _write_signals(
classes: GDScriptClasses, gdscript: GDScriptClass, output_format: OutputFormats
) -> List[str]:
ret_signals = []
for s in gdscript.signals:
signal = "{}\n{}\n{}".format(
"".join(make_heading(s.name, 3)),
make_code_block(s.signature),
_replace_references(classes, gdscript, s.description),
)
ret_signals.append(signal)
return ret_signals
# def _write_index_page(classes: GDScriptClasses, info: ProjectInfo) -> MarkdownDocument:
# title: str = "{} ({})".format(info.name, surround_with_html(info.version, "small"))
# content: List[str] = [
# *MarkdownSection(title, 1, info.description).as_text(),
# *MarkdownSection("Contents", 2, _write_table_of_contents(classes)).as_text(),
# ]
# return MarkdownDocument("index", content)
# def _write_table_of_contents(classes: GDScriptClasses) -> List[str]:
# toc: List[str] = []
# by_category = classes.get_grouped_by_category()
# for group in by_category:
# indent: str = ""
# first_class: GDScriptClass = group[0]
# category: str = first_class.category
# if category:
# toc.append("- {}".format(make_bold(category)))
# indent = " "
# for gdscript_class in group:
# link: str = (
# indent + "- " + make_link(gdscript_class.name, gdscript_class.name)
# )
# toc.append(link)
# return toc
def _replace_references(
classes: GDScriptClasses, gdscript: GDScriptClass, description: str
) -> str:
"""Finds and replaces references to other classes or methods in the
`description`."""
ERROR_MESSAGES = {
"class": "Class {} not found in the class index.",
"member": "Symbol {} not found in {}. The name might be incorrect.",
}
ERROR_TAIL = "The name might be incorrect."
references: list = re.findall(r"\[.+\]", description)
for reference in references:
# Matches [ClassName], [symbol], and [ClassName.symbol]
match: re.Match | None = re.match(
r"\[([A-Z][a-zA-Z0-9]*)?\.?([a-z0-9_]+)?\]", reference
)
if not match:
continue
class_name, member = match[1], match[2]
is_builtin_class = False
if class_name and class_name not in classes.class_index:
if class_name.lower() in BUILTIN_CLASSES:
is_builtin_class = True
else:
LOGGER.warning(ERROR_MESSAGES["class"].format(class_name) + ERROR_TAIL)
continue
if member and class_name:
if member not in classes.class_index[class_name]:
LOGGER.warning(
ERROR_MESSAGES["member"].format(member, class_name) + ERROR_TAIL
)
continue
elif member and member not in classes.class_index[gdscript.name]:
LOGGER.warning(
ERROR_MESSAGES["member"].format(member, gdscript.name) + ERROR_TAIL
)
continue
display_text, path = "", "../"
if class_name:
display_text, path = class_name, class_name
if class_name and member:
display_text += "."
path += "/"
if member:
display_text += member
path += "#" + member.replace("_", "-")
if is_builtin_class:
display_text = class_name
path = GODOT_DOCS_URL.format(class_name.lower())
link: str = make_link(display_text, path)
description = description.replace(reference, link, 1)
return description
```
#### File: src/gdscript_docs_maker/gdscript_objects.py
```python
import itertools
import operator
from typing import List
from .gdscript.gdscript_class import GDScriptClass
class GDScriptClasses(list):
"""
Container for a list of GDScriptClass objects
Provides methods for filtering and grouping GDScript classes
"""
def __init__(self, *args):
super(GDScriptClasses, self).__init__(args[0])
self.class_index = {
gdscript_class.name: gdscript_class.symbols for gdscript_class in self
}
def _get_grouped_by(self, attribute: str) -> List[List[GDScriptClass]]:
if not self or attribute not in self[0].__dict__:
return []
groups = []
get_attribute = operator.attrgetter(attribute)
data = sorted(self, key=get_attribute)
for key, group in itertools.groupby(data, get_attribute):
groups.append(list(group))
return groups
def get_grouped_by_category(self) -> List[List[GDScriptClass]]:
"""
Returns a list of lists of GDScriptClass objects, grouped by their `category`
attribute
"""
return self._get_grouped_by("category")
@staticmethod
def from_dict_list(data: List[dict]):
ret_gdscript_classes = []
for entry in data:
if "name" not in entry:
continue
ret_gdscript_class = GDScriptClass.from_dict(entry)
if ret_gdscript_class.hidden:
continue
ret_gdscript_classes.append(ret_gdscript_class)
return GDScriptClasses(ret_gdscript_classes)
```
#### File: gdscript_docs_maker/gdscript/project_info.py
```python
from dataclasses import dataclass
@dataclass
class ProjectInfo:
name: str
description: str
version: str
@staticmethod
def from_dict(data: dict):
return ProjectInfo(data["name"], data["description"], data["version"])
```
#### File: tests/unit/test_enumeration.py
```python
from gdscript_docs_maker.gdscript.enumeration import Enumeration
enumeration_data = {
"name": "ReturnDocument",
"value": {"BEFORE": 0, "AFTER": 1},
"data_type": "Dictionary",
"signature": 'const ReturnDocument: Dictionary = {"AFTER":1,"BEFORE":0}',
"description": " Docs for ReturnDocument\n",
}
def test_from_dict():
enumeration_data["description"] = " Wrapper\n"
e: Enumeration = Enumeration.from_dict(enumeration_data)
assert e.name == "ReturnDocument"
assert e.values == {"AFTER": 1, "BEFORE": 0}
def test_from_dict_parses_hidden():
enumeration_data["description"] = " hey\n @hidden\n"
e: Enumeration = Enumeration.from_dict(enumeration_data)
assert e.hidden is True
``` |
{
"source": "3D-e-Chem/python-modified-tanimoto",
"score": 3
} |
#### File: python-modified-tanimoto/kripodb/canned.py
```python
from __future__ import absolute_import
import numpy as np
import pandas as pd
from requests import HTTPError
from .db import FragmentsDb
from .pairs import similar, open_similarity_matrix
from .pharmacophores import PharmacophoresDb, as_phar
from .webservice.client import WebserviceClient, IncompleteFragments, IncompletePharmacophores
class IncompleteHits(Exception):
def __init__(self, absent_identifiers, hits):
"""List of hits and list of identifiers for which no information could be found
Args:
absent_identifiers (List[str]): List of identifiers for which no information could be found
hits (pandas.DataFrame): Data frame with query_fragment_id, hit_frag_id and score columns
"""
message = 'Some query fragment identifiers could not be found'
super(IncompleteHits, self).__init__(message)
self.absent_identifiers = absent_identifiers
self.hits = hits
def similarities(queries, similarity_matrix_filename_or_url, cutoff, limit=1000):
"""Find similar fragments to queries based on similarity matrix.
Args:
queries (List[str]): Query fragment identifiers
similarity_matrix_filename_or_url (str): Filename of similarity matrix file or base url of kripodb webservice
cutoff (float): Cutoff, similarity scores below cutoff are discarded.
limit (int): Maximum number of hits for each query.
Default is 1000. Use is None for no limit.
Examples:
Fragments similar to '3j7u_NDP_frag24' fragment.
>>> import pandas as pd
>>> from kripodb.canned import similarities
>>> queries = pd.Series(['3j7u_NDP_frag24'])
>>> hits = similarities(queries, 'data/similaritys.h5', 0.55)
>>> len(hits)
11
Retrieved from web service instead of local similarity matrix file.
Make sure the web service is running,
for example by `kripodb serve data/similarities.h5 data/fragments.sqlite data/pharmacophores.h5`.
>>> hits = similarities(queries, 'http://localhost:8084/kripo', 0.55)
>>> len(hits)
11
Returns:
pandas.DataFrame: Data frame with query_fragment_id, hit_frag_id and score columns
Raises:
IncompleteHits: When one or more of the identifiers could not be found.
"""
hits = []
absent_identifiers = []
if similarity_matrix_filename_or_url.startswith('http'):
client = WebserviceClient(similarity_matrix_filename_or_url)
for query in queries:
try:
qhits = client.similar_fragments(query, cutoff, limit)
hits.extend(qhits)
except HTTPError as e:
if e.response.status_code == 404:
absent_identifiers.append(query)
else:
similarity_matrix = open_similarity_matrix(similarity_matrix_filename_or_url)
for query in queries:
try:
for query_id, hit_id, score in similar(query, similarity_matrix, cutoff, limit):
hit = {'query_frag_id': query_id,
'hit_frag_id': hit_id,
'score': score,
}
hits.append(hit)
except KeyError:
absent_identifiers.append(query)
similarity_matrix.close()
if absent_identifiers:
if len(hits) > 0:
df = pd.DataFrame(hits, columns=['query_frag_id', 'hit_frag_id', 'score'])
else:
# empty hits array will give dataframe without columns
df = pd.DataFrame({'query_frag_id': pd.Series(dtype=str),
'hit_frag_id': pd.Series(dtype=str),
'score': pd.Series(dtype=np.double)
}, columns=['query_frag_id', 'hit_frag_id', 'score'])
raise IncompleteHits(absent_identifiers, df)
return pd.DataFrame(hits, columns=['query_frag_id', 'hit_frag_id', 'score'])
def fragments_by_pdb_codes(pdb_codes, fragments_db_filename_or_url, prefix=''):
"""Retrieve fragments based on PDB codes.
See http://www.rcsb.org/pdb/ for PDB structures.
Args:
pdb_codes (List[str]): List of PDB codes
fragments_db_filename_or_url (str): Filename of fragments db or base url of kripodb webservice
prefix (str): Prefix for output columns
Examples:
Fetch fragments of '2n2k' PDB code
>>> from kripodb.canned import fragments_by_pdb_codes
>>> pdb_codes = pd.Series(['2n2k'])
>>> fragments = fragments_by_pdb_codes(pdb_codes, 'data/fragments.sqlite')
>>> len(fragments)
3
Retrieved from web service instead of local fragments db file.
Make sure the web service is running,
for example by `kripodb serve data/similarities.h5 data/fragments.sqlite data/pharmacophores.h5`.
>>> fragments = fragments_by_pdb_codes(pdb_codes, 'http://localhost:8084/kripo')
>>> len(fragments)
3
Returns:
pandas.DataFrame: Data frame with fragment information
Raises:
IncompleteFragments: When one or more of the identifiers could not be found.
"""
if fragments_db_filename_or_url.startswith('http'):
client = WebserviceClient(fragments_db_filename_or_url)
try:
fragments = client.fragments_by_pdb_codes(pdb_codes)
except IncompleteFragments as e:
df = pd.DataFrame(e.fragments)
df.rename(columns=lambda x: prefix + x, inplace=True)
raise IncompleteFragments(e.absent_identifiers, df)
else:
fragmentsdb = FragmentsDb(fragments_db_filename_or_url)
fragments = []
absent_identifiers = []
for pdb_code in pdb_codes:
try:
for fragment in fragmentsdb.by_pdb_code(pdb_code):
fragments.append(fragment)
except LookupError as e:
absent_identifiers.append(pdb_code)
if absent_identifiers:
df = pd.DataFrame(fragments)
df.rename(columns=lambda x: prefix + x, inplace=True)
raise IncompleteFragments(absent_identifiers, df)
df = pd.DataFrame(fragments)
df.rename(columns=lambda x: prefix + x, inplace=True)
return df
def fragments_by_id(fragment_ids, fragments_db_filename_or_url, prefix=''):
"""Retrieve fragments based on fragment identifier.
Args:
fragment_ids (List[str]): List of fragment identifiers
fragments_db_filename_or_url (str): Filename of fragments db or base url of kripodb webservice
prefix (str): Prefix for output columns
Examples:
Fetch fragments of '2n2k_MTN_frag1' fragment identifier
>>> from kripodb.canned import fragments_by_id
>>> fragment_ids = pd.Series(['2n2k_MTN_frag1'])
>>> fragments = fragments_by_id(fragment_ids, 'data/fragments.sqlite')
>>> len(fragments)
1
Retrieved from web service instead of local fragments db file.
Make sure the web service is running,
for example by `kripodb serve data/similarities.h5 data/fragments.sqlite data/pharmacophores.h5`.
>>> fragments = fragments_by_id(fragment_ids,, 'http://localhost:8084/kripo')
>>> len(fragments)
1
Returns:
pandas.DataFrame: Data frame with fragment information
Raises:
IncompleteFragments: When one or more of the identifiers could not be found.
"""
if fragments_db_filename_or_url.startswith('http'):
client = WebserviceClient(fragments_db_filename_or_url)
try:
fragments = client.fragments_by_id(fragment_ids)
except IncompleteFragments as e:
df = pd.DataFrame(e.fragments)
df.rename(columns=lambda x: prefix + x, inplace=True)
raise IncompleteFragments(e.absent_identifiers, df)
else:
fragmentsdb = FragmentsDb(fragments_db_filename_or_url)
fragments = []
absent_identifiers = []
for frag_id in fragment_ids:
try:
fragments.append(fragmentsdb[frag_id])
except KeyError:
absent_identifiers.append(frag_id)
if absent_identifiers:
df = pd.DataFrame(fragments)
df.rename(columns=lambda x: prefix + x, inplace=True)
raise IncompleteFragments(absent_identifiers, df)
df = pd.DataFrame(fragments)
df.rename(columns=lambda x: prefix + x, inplace=True)
return df
def pharmacophores_by_id(fragment_ids, pharmacophores_db_filename_or_url):
"""Fetch pharmacophore points by fragment identifiers
Args:
fragment_ids (pd.Series): List of fragment identifiers
pharmacophores_db_filename_or_url: Filename of pharmacophores db or base url of kripodb webservice
Returns:
pandas.Series: Pandas series with pharmacophores as string in phar format.
Fragment without pharmacophore will return None
Examples:
Fragments similar to '3j7u_NDP_frag24' fragment.
>>> from kripodb.canned import pharmacophores_by_id
>>> fragment_ids = pd.Series(['2n2k_MTN_frag1'], ['Row0'])
>>> pharmacophores = pharmacophores_by_id(fragment_ids, 'data/pharmacophores.h5')
>>> len(pharmacophores)
1
Retrieved from web service instead of local pharmacophores db file.
Make sure the web service is running,
for example by `kripodb serve data/similarities.h5 data/fragments.sqlite data/pharmacophores.h5`.
>>> pharmacophores = pharmacophores_by_id(fragment_ids, 'http://localhost:8084/kripo')
>>> len(pharmacophores)
1
"""
pphors = pd.Series([], dtype=str)
if pharmacophores_db_filename_or_url.startswith('http'):
client = WebserviceClient(pharmacophores_db_filename_or_url)
try:
pphorsarray = client.pharmacophores(fragment_ids)
pphors = pd.Series(pphorsarray, fragment_ids.index, dtype=str)
except IncompletePharmacophores as e:
pphors = pd.Series(e.pharmacophores, fragment_ids.index, dtype=str)
raise IncompletePharmacophores(e.absent_identifiers, pphors)
else:
with PharmacophoresDb(pharmacophores_db_filename_or_url) as pharmacophoresdb:
absent_identifiers = []
for row_id, frag_id in fragment_ids.iteritems():
try:
phar = as_phar(frag_id, pharmacophoresdb[frag_id])
pphors[row_id] = phar
except KeyError:
pphors[row_id] = None
absent_identifiers.append(frag_id)
if absent_identifiers:
raise IncompletePharmacophores(absent_identifiers, pphors)
return pphors
```
#### File: kripodb/script/__init__.py
```python
from __future__ import absolute_import
import argparse
import sys
from .fingerprints import make_fingerprints_parser
from .fragments import make_fragments_parser
from .similarities import make_similarities_parser
from .dive import make_dive_parsers
from ..webservice.server import serve_app
from .pharmacophores import make_pharmacophores_parser
from ..version import __version__
def make_parser():
"""Creates a parser with sub commands
Returns:
argparse.ArgumentParser: parser with sub commands
"""
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version=__version__)
subparsers = parser.add_subparsers(dest='subcommand')
make_fingerprints_parser(subparsers)
make_fragments_parser(subparsers)
make_similarities_parser(subparsers)
make_dive_parsers(subparsers)
serve_sc(subparsers)
make_pharmacophores_parser(subparsers)
return parser
def serve_sc(subparsers):
sc = subparsers.add_parser('serve',
help='Serve similarity matrix, fragments db and pharmacophores db as webservice')
sc.add_argument('similarities', type=str, help='Filename of similarity matrix hdf5 file')
sc.add_argument('fragments', type=str, help='Filename of fragments sqlite database file')
sc.add_argument('pharmacophores', type=str, help='Filename of pharmacophores hdf5 file')
sc.add_argument('--internal_port',
type=int,
default=8084,
help='TCP port on which to listen (default: %(default)s)')
sc.add_argument('--external_url',
type=str,
default='http://localhost:8084/kripo',
help='URL which should be used in Swagger spec (default: %(default)s)')
sc.set_defaults(func=serve_app)
def main(argv=sys.argv[1:]):
"""Main script function.
Calls run method of selected sub commandos.
Args:
argv (list[str]): List of command line arguments
"""
parser = make_parser()
args = parser.parse_args(argv)
fargs = vars(args)
if 'func' in fargs:
func = args.func
del(fargs['subcommand'])
del(fargs['func'])
func(**fargs)
else:
if 'subcommand' in args:
parser.parse_args([args.subcommand, '--help'])
else:
parser.print_help()
```
#### File: kripodb/script/pharmacophores.py
```python
import argparse
from ..db import FragmentsDb
from ..pharmacophores import PharmacophoresDb, read_pphore_sdfile, as_phar
def dir2db_run(startdir, pharmacophoresdb, nrrows):
with PharmacophoresDb(pharmacophoresdb, 'a', expectedrows=nrrows) as db:
db.add_dir(startdir)
def add_sc(sc):
parser = sc.add_parser('add', help='Add pharmacophores from directory to database')
parser.add_argument('startdir', help='Directory to start finding *.pphores.sd.gz and *.pphores.txt files in')
parser.add_argument('pharmacophoresdb', help='Name of pharmacophore db file')
parser.add_argument('--nrrows',
type=int,
default=2 ** 16,
help='''Number of expected pharmacophores,
only used when database is created
(default: %(default)s)''')
parser.set_defaults(func=dir2db_run)
def get_run(pharmacophoresdb, query, output):
with PharmacophoresDb(pharmacophoresdb) as db:
db.write_phar(output, query)
def get_sc(sc):
parser = sc.add_parser('get', help='Retrieve pharmacophore of a fragment')
parser.add_argument('pharmacophoresdb', help='Name of pharmacophore db file')
parser.add_argument('--query', type=str, help='Query fragment identifier', default=None)
parser.add_argument('--output', type=argparse.FileType('w'), default='-', help="Phar formatted text file")
parser.set_defaults(func=get_run)
def filter_run(inputfn, fragmentsdb, outputfn):
frags = FragmentsDb(fragmentsdb)
fragids2keep = set([f.encode() for f in frags.id2label().values()])
with PharmacophoresDb(inputfn) as dbin:
expectedrows = len(dbin.points)
with PharmacophoresDb(outputfn, 'w', expectedrows=expectedrows) as dbout:
col_names = [colName for colName in dbin.points.table.colpathnames]
rowout = dbout.points.table.row
for rowin in dbin.points.table.iterrows():
if rowin['frag_id'] in fragids2keep:
for col_name in col_names:
rowout[col_name] = rowin[col_name]
rowout.append()
dbout.points.table.flush()
def filter_sc(sc):
parser = sc.add_parser('filter', help='Filter pharmacophores')
parser.add_argument('inputfn', help='Name of input pharmacophore db file')
parser.add_argument('--fragmentsdb',
default='fragments.db',
help='Name of fragments db file, fragments present in db are passed '
'(default: %(default)s)')
parser.add_argument('outputfn', help='Name of output pharmacophore db file')
parser.set_defaults(func=filter_run)
def merge_sc(sc):
parser = sc.add_parser('merge', help='Merge pharmacophore database files into new one')
parser.add_argument('ins', nargs='+', help='Input pharmacophore database files')
parser.add_argument('out', help='Output pharmacophore database file')
parser.set_defaults(func=merge_pharmacophore_dbs)
def merge_pharmacophore_dbs(ins, out):
nr_rows = 0
for in_fn in ins:
with PharmacophoresDb(in_fn) as in_db:
nr_rows += len(in_db)
with PharmacophoresDb(out, 'a', expectedrows=nr_rows) as db:
for in_fn in ins:
with PharmacophoresDb(in_fn) as in_db:
db.append(in_db)
def phar2db_sc(sc):
parser = sc.add_parser('import', help='Convert phar formatted file to pharmacophore database file')
parser.add_argument('infile', type=argparse.FileType('r'), help='Input phar formatted file')
parser.add_argument('outfile', help='Output pharmacophore database file')
parser.add_argument('--nrrows',
type=int,
default=2 ** 16,
help='''Number of expected pharmacophores,
only used when database is created
(default: %(default)s)''')
parser.set_defaults(func=phar2db)
def phar2db(infile, outfile, nrrows):
with PharmacophoresDb(outfile, 'a', expectedrows=nrrows) as out_db:
out_db.read_phar(infile)
def sd2phar_sc(sc):
parser = sc.add_parser('sd2phar', help='Convert sd formatted pharmacophore file to phar formatted file')
parser.add_argument('infile', type=argparse.FileType('rb'), help='Input sd formatted file')
parser.add_argument('outfile', type=argparse.FileType('w'), help='Output phar formatted file')
parser.add_argument('--frag_id', type=str, help='Fragment identifier', default='frag')
parser.set_defaults(func=sd2phar)
def sd2phar(infile, outfile, frag_id):
points = read_pphore_sdfile(infile)
phar = as_phar(frag_id, points)
outfile.write(phar)
def make_pharmacophores_parser(subparsers):
"""Creates a parser for pharmacophores sub commands
Args:
subparsers (argparse.ArgumentParser): Parser to which sub commands are added
"""
sc = subparsers.add_parser('pharmacophores', help='Pharmacophores').add_subparsers()
add_sc(sc)
get_sc(sc)
filter_sc(sc)
merge_sc(sc)
phar2db_sc(sc)
sd2phar_sc(sc)
```
#### File: kripodb/webservice/client.py
```python
from __future__ import absolute_import
import requests
from rdkit.Chem.AllChem import MolFromMolBlock
from requests import HTTPError
class Incomplete(Exception):
def __init__(self, message, absent_identifiers):
super(Incomplete, self).__init__(message)
self.absent_identifiers = absent_identifiers
class IncompleteFragments(Incomplete):
def __init__(self, absent_identifiers, fragments):
"""List of fragments and list of identifiers for which no information could be found
Args:
absent_identifiers (List[str]): List of identifiers for which no information could be found
fragments (List[dict]): List of fragment information that could be retrieved
"""
message = 'Some identifiers could not be found'
super(IncompleteFragments, self).__init__(message, absent_identifiers)
self.fragments = fragments
class IncompletePharmacophores(Incomplete):
def __init__(self, absent_identifiers, pharmacophores):
"""List of fragments and list of identifiers for which no information could be found
Args:
absent_identifiers (List[str]): List of identifiers for which no information could be found
pharmacophores (List[dict]): List of pharmacophores that could be retrieved
"""
message = 'Some identifiers could not be found'
super(IncompletePharmacophores, self).__init__(message, absent_identifiers)
self.pharmacophores = pharmacophores
class WebserviceClient(object):
"""Client for kripo web service
Example:
>>> client = WebserviceClient('http://localhost:8084/kripo')
>>> client.similar_fragments('3j7u_NDP_frag24', 0.85)
[{'query_frag_id': '3j7u_NDP_frag24', 'hit_frag_id': '3j7u_NDP_frag23', 'score': 0.8991}]
Args:
base_url (str): Base url of web service. e.g. http://localhost:8084/kripo
"""
def __init__(self, base_url):
self.base_url = base_url
def similar_fragments(self, fragment_id, cutoff, limit=1000):
"""Find similar fragments to query.
Args:
fragment_id (str): Query fragment identifier
cutoff (float): Cutoff, similarity scores below cutoff are discarded.
limit (int): Maximum number of hits. Default is None for no limit.
Returns:
list[dict]: Query fragment identifier, hit fragment identifier and similarity score
Raises:
request.HTTPError: When fragment_id could not be found
"""
url = self.base_url + '/fragments/{fragment_id}/similar'.format(fragment_id=fragment_id)
params = {'cutoff': cutoff, 'limit': limit}
response = requests.get(url, params)
response.raise_for_status()
return response.json()
def fragments_by_pdb_codes(self, pdb_codes, chunk_size=450):
"""Retrieve fragments by their PDB code
Args:
pdb_codes (List[str]): List of PDB codes
chunk_size (int): Number of PDB codes to retrieve in a single http request
Returns:
list[dict]: List of fragment information
Raises:
requests.HTTPError: When one of the PDB codes could not be found.
"""
return self._fetch_chunked_fragments('pdb_codes', pdb_codes, chunk_size)
def fragments_by_id(self, fragment_ids, chunk_size=100):
"""Retrieve fragments by their identifier
Args:
fragment_ids (List[str]): List of fragment identifiers
chunk_size (int): Number of fragment to retrieve in a single http request
Returns:
list[dict]: List of fragment information
Raises:
IncompleteFragments: When one or more of the identifiers could not be found.
"""
return self._fetch_chunked_fragments('fragment_ids', fragment_ids, chunk_size)
def _fetch_chunked_fragments(self, idtype, ids, chunk_size):
fragments = []
absent_identifiers = []
for start in range(0, len(ids), chunk_size):
stop = chunk_size + start
(chunk_fragments, chunk_absent_identifiers) = self._fetch_fragments(idtype, ids[start:stop])
fragments += chunk_fragments
absent_identifiers += chunk_absent_identifiers
if chunk_absent_identifiers:
raise IncompleteFragments(absent_identifiers, fragments)
return fragments
def _fetch_fragments(self, idtype, ids):
url = self.base_url + '/fragments?{idtype}={ids}'.format(idtype=idtype, ids=','.join(ids))
absent_identifiers = []
try:
response = requests.get(url)
response.raise_for_status()
fragments = response.json()
except HTTPError as e:
if e.response.status_code == 404:
body = e.response.json()
fragments = body['fragments']
absent_identifiers = body['absent_identifiers']
else:
raise e
# Convert molblock string to RDKit Mol object
for fragment in fragments:
if fragment['mol'] is not None:
fragment['mol'] = MolFromMolBlock(fragment['mol'])
return fragments, absent_identifiers
def pharmacophores(self, fragment_ids):
absent_identifiers = []
pharmacophores = []
for fragment_id in fragment_ids:
url = self.base_url + '/fragments/{0}.phar'.format(fragment_id)
try:
response = requests.get(url)
response.raise_for_status()
pharmacophore = response.text
pharmacophores.append(pharmacophore)
except HTTPError as e:
if e.response.status_code == 404:
pharmacophores.append(None)
absent_identifiers.append(fragment_id)
else:
raise e
if absent_identifiers:
raise IncompletePharmacophores(absent_identifiers, pharmacophores)
return pharmacophores
```
#### File: tests/script/test_fingerprints.py
```python
from __future__ import absolute_import
from six import StringIO
import kripodb.script as script
import kripodb.script.fingerprints
def test_pairs_subcommand_defaults():
parser = script.make_parser()
args = parser.parse_args(['fingerprints', 'similarities', '--fragmentsdbfn', 'fragdb', 'fp1', 'fp2', 'outfn'])
assert args.func == kripodb.script.fingerprints.pairs_run
fargs = vars(args)
del(fargs['func'])
expected = {
'subcommand': 'fingerprints',
'out_format': 'hdf5',
'cutoff': 0.45,
'out_file': 'outfn',
'fragmentsdbfn': 'fragdb',
'mean_onbit_density': 0.01,
'nomemory': False,
'fingerprintsfn2': 'fp2',
'fingerprintsfn1': 'fp1',
'ignore_upper_triangle': False,
}
assert fargs == expected
def test_meanbitdensity():
out = StringIO()
kripodb.script.fingerprints.meanbitdensity_run('data/fingerprints.sqlite', out)
assert out.getvalue() == '0.0077683\n'
```
#### File: tests/script/test_pharmacophores.py
```python
from six import StringIO
from kripodb.script.pharmacophores import get_run, sd2phar
def test_get__onefrag():
out_file = StringIO()
get_run('data/pharmacophores.h5', '3wsj_MK1_frag1', out_file)
result = out_file.getvalue()
nr_lines = len(result.split('\n'))
assert '3wsj_MK1_frag1' in result and \
'LIPO' in result and \
'$$$$' in result and \
nr_lines == 50
def test_get__all():
out_file = StringIO()
get_run('data/pharmacophores.h5', None, out_file)
result = out_file.getvalue()
nr_lines = len(result.split('\n'))
assert '3wsj_MK1_frag1' in result and \
'LIPO' in result and \
'$$$$' in result and \
nr_lines == 17611
def test_sd2phar(example1_sdfile, example1_pharblock):
out_file = StringIO()
frag_id = 'some_frag_id'
sd2phar(example1_sdfile, out_file, frag_id)
assert out_file.getvalue() == example1_pharblock
```
#### File: python-modified-tanimoto/tests/test_db.py
```python
from __future__ import absolute_import
import sqlite3
from sys import version_info
from pyroaring import BitMap
import blosc
from mock import call, Mock
import pytest
from rdkit.Chem import MolFromSmiles, MolToSmiles
import six
import kripodb.db as db
@pytest.mark.skipif(version_info < (3,),
reason="requires python3")
def test_adapt_BitMap():
bs = BitMap([1, 3, 5, 8])
result = db.adapt_BitMap(bs)
expected = blosc.compress(bs.serialize(), cname='zstd')
assert result == expected
def test_convert_BitMap():
bs = BitMap([1, 3, 5, 8])
result = db.convert_BitMap(blosc.compress(bs.serialize(), cname='zstd'))
assert result == bs
class TestFastInserter(object):
@pytest.fixture
def cursor(self):
return Mock()
@pytest.fixture
def fast_inserter(self, cursor):
return db.FastInserter(cursor)
def test_with(self, cursor, fast_inserter):
with fast_inserter:
cursor.execute.assert_has_calls([call('PRAGMA journal_mode=WAL'),
call('PRAGMA synchronous=OFF')])
cursor.execute.assert_has_calls([call('PRAGMA journal_mode=DELETE'),
call('PRAGMA synchronous=FULL')])
@pytest.fixture
def fragmentsdb():
return db.FragmentsDb(':memory:')
class TestFragmentsDBEmpty(object):
def test_id2label(self, fragmentsdb):
assert fragmentsdb.id2label() == {}
def test_label2id(self, fragmentsdb):
assert fragmentsdb.label2id() == {}
def test_getitem_keyerror(self, fragmentsdb):
key = 'id1'
with pytest.raises(KeyError):
fragmentsdb[key]
def test_by_pdb_code(self, fragmentsdb):
pdb_code = '1kvm'
with pytest.raises(LookupError):
fragmentsdb.by_pdb_code(pdb_code)
def test_add_fragments_from_shelve_weirdid(self, fragmentsdb):
result = fragmentsdb.add_fragment_from_shelve('1muu-GDX', {})
assert result is None
def test_add_fragments_from_shelve_weirdid2(self, fragmentsdb):
result = fragmentsdb.add_fragment_from_shelve('1muu-GDX-B', {})
assert result is None
def test_len(self, fragmentsdb):
assert len(fragmentsdb) == 0
def test_add_fragment(self, fragmentsdb):
fragmentsdb.add_fragment(
nr_r_groups=1,
pdb_code='1muu',
atom_codes='C5D,O5D,PA,O1A,O2A,O3A,PB,O2B,O3B,O1B,C1*,O5*,C5*,C6*,O6A,O6B,C2*,O2*,C3*,O3*,C4*,O4*',
het_code='GDX',
hash_code='0d6ced7ce686f4da',
frag_nr=7,
frag_id='1muu_GDX_frag7',
het_seq_nr=1005,
het_chain='B',
prot_chain='A',
)
expected = {
'nr_r_groups': 1,
'smiles': None,
'mol': None,
'pdb_code': '1muu',
'atom_codes': 'C5D,O5D,PA,O1A,O2A,O3A,PB,O2B,O3B,O1B,C1*,O5*,C5*,C6*,O6A,O6B,C2*,O2*,C3*,O3*,C4*,O4*',
'het_code': 'GDX',
'hash_code': '0d6ced7ce686f4da',
'frag_nr': 7,
'frag_id': '1muu_GDX_frag7',
'rowid': 1,
'het_seq_nr': 1005,
'het_chain': 'B',
'prot_chain': 'A',
'pdb_title': None,
'prot_name': None,
'ec_number': None,
'uniprot_acc': None,
'uniprot_name': None,
}
frag = fragmentsdb['1muu_GDX_frag7']
assert frag == expected
@pytest.fixture
def myshelve():
return {
'1muu-GDX-frag7': {
'atomCodes': 'C5D,O5D,PA,O1A,O2A,O3A,PB,O2B,O3B,O1B,C1*,O5*,C5*,C6*,O6A,O6B,C2*,O2*,C3*,O3*,C4*,O4*',
'hashcode': '0d6ced7ce686f4da',
'ligID': '1muu-A-GDX-1005-B',
'numRgroups': '1'
}
}
@pytest.fixture
def filled_fragmentsdb(fragmentsdb, myshelve):
fragmentsdb.add_fragments_from_shelve(myshelve)
mol = MolFromSmiles('[*]COP(=O)([O-])OP(=O)([O-])OC1OC(C(=O)[O-])C(O)C(O)C1O')
mol.SetProp('_Name', '1muu_GDX_frag7')
fragmentsdb.add_molecule(mol)
pdbs = [{
'chainId': 'A',
'structureId': '1muu',
'structureTitle': '2.0 A crystal structure of GDP-mannose dehydrogenase',
'ecNo': '1.1.1.132',
'uniprotAcc': 'P11759',
'compound': 'GDP-mannose 6-dehydrogenase',
'uniprotRecommendedName': 'GDP-mannose 6-dehydrogenase',
}, {
# pdbs which has no fragment should be skipped
'chainId': 'A',
'structureId': '2n2k',
'structureTitle': 'Ensemble structure of the closed state of Lys63-linked diubiquitin in the absence of a ligand',
'ecNo': None,
'uniprotAcc': 'P0CG48',
'compound': 'ubiquitin',
'uniprotRecommendedName': 'Polyubiquitin-C',
}]
fragmentsdb.add_pdbs(pdbs)
return fragmentsdb
@pytest.fixture
def expected_fragment():
return {
'nr_r_groups': 1,
'smiles': '[*]COP(=O)([O-])OP(=O)([O-])OC1OC(C(=O)[O-])C(O)C(O)C1O',
'pdb_code': '1muu',
'atom_codes': 'C5D,O5D,PA,O1A,O2A,O3A,PB,O2B,O3B,O1B,C1*,O5*,C5*,C6*,O6A,O6B,C2*,O2*,C3*,O3*,C4*,O4*',
'het_code': 'GDX',
'hash_code': '0d6ced7ce686f4da',
'frag_nr': 7,
'frag_id': '1muu_GDX_frag7',
'rowid': 1,
'het_seq_nr': 1005,
'het_chain': 'B',
'prot_chain': 'A',
'pdb_title': '2.0 A crystal structure of GDP-mannose dehydrogenase',
'prot_name': 'GDP-mannose 6-dehydrogenase',
'ec_number': '1.1.1.132',
'uniprot_acc': 'P11759',
'uniprot_name': 'GDP-mannose 6-dehydrogenase',
}
class TestFragmentsDBFilled(object):
def test_getitem(self, filled_fragmentsdb, expected_fragment):
fragment = filled_fragmentsdb['1muu_GDX_frag7']
assert MolToSmiles(fragment['mol']) == '[*]COP(=O)([O-])OP(=O)([O-])OC1OC(C(=O)[O-])C(O)C(O)C1O'
del fragment['mol']
assert fragment == expected_fragment
def test_id2label(self, filled_fragmentsdb):
assert filled_fragmentsdb.id2label() == {1: '1muu_GDX_frag7'}
def test_label2id(self, filled_fragmentsdb):
assert filled_fragmentsdb.label2id() == {'1muu_GDX_frag7': 1}
def test_by_pdb_code(self, filled_fragmentsdb, expected_fragment):
pdb_code = '1muu'
fragments = filled_fragmentsdb.by_pdb_code(pdb_code)
del fragments[0]['mol']
assert fragments == [expected_fragment]
def test_by_uppercase_pdb_code(self, filled_fragmentsdb, expected_fragment):
pdb_code = '1MUU'
fragments = filled_fragmentsdb.by_pdb_code(pdb_code)
del fragments[0]['mol']
assert fragments == [expected_fragment]
def test_len(self, filled_fragmentsdb):
assert len(filled_fragmentsdb) == 1
def test_duplicate(self, filled_fragmentsdb, myshelve):
with pytest.raises(sqlite3.IntegrityError):
filled_fragmentsdb.add_fragments_from_shelve(myshelve)
def test_iterate(self, filled_fragmentsdb, expected_fragment):
fragments = [f for f in filled_fragmentsdb]
del fragments[0]['mol']
assert fragments == [expected_fragment]
def test_is_ligand_stored_exists_true(self, filled_fragmentsdb):
assert filled_fragmentsdb.is_ligand_stored('1muu', 'GDX')
def test_is_ligand_stored_absent_false(self, filled_fragmentsdb):
assert not filled_fragmentsdb.is_ligand_stored('1muu', '111')
class TestHetSeqNr(object):
def test_isnumber(self, filled_fragmentsdb):
fragment = filled_fragmentsdb['1muu_GDX_frag7']
assert fragment['het_seq_nr'] == 1005
def test_nan(self, fragmentsdb):
myshelve = {
'1hoo-GNP-frag1': {
'atomCodes': 'PG,O1G,O2G,O3G,N3B,PB,O1B,O2B,O3A,PA,O1A,O2A,O5*,C5*,C4*,O4*,C3*,O3*,C2*,O2*,C1*,N9,C8,N7,C5,C6,O6,N1,C2,N2,N3,C4',
'hashcode': 'be4ce041f2a35721',
'ligID': '1hoo-A-GNP-432B-A',
'numRgroups': '0'
}
}
fragmentsdb.add_fragments_from_shelve(myshelve)
fragmentsdb.add_pdbs([{
'chainId': 'A',
'structureId': '1hoo',
'structureTitle': 'STRUCTURE OF GUANINE NUCLEOTIDE (GPPCP) COMPLEX OF ADENYLOSUCCINATE SYNTHETASE FROM E. COLI AT PH 6.5 AND 25 DEGREES CELSIUS',
'ecNo': '6.3.4.4',
'uniprotAcc': 'P0A7D4',
'uniprotRecommendedName': 'Adenylosuccinate synthetase',
'compound': 'ADENYLOSUCCINATE SYNTHETAS',
}])
fragment = fragmentsdb['1hoo_GNP_frag1']
assert fragment['het_seq_nr'] == 432
@pytest.fixture
def fingerprintsdb():
fdb = db.FingerprintsDb(':memory:')
yield fdb
fdb.close()
@pytest.fixture
def bitsets(fingerprintsdb):
return fingerprintsdb.as_dict(100)
class TestBitMapDictEmpty(object):
def test_default_number_of_bits(self, fingerprintsdb):
bitsets = db.IntbitsetDict(fingerprintsdb)
assert bitsets.number_of_bits is None
def test_get_number_of_bits(self, bitsets):
assert bitsets.number_of_bits == 100
def test_set_number_of_bits(self, bitsets):
bitsets.number_of_bits = 200
assert bitsets.number_of_bits == 200
def test_delete_number_of_bits(self, bitsets):
del bitsets.number_of_bits
assert bitsets.number_of_bits is None
def test_len_empty(self, bitsets):
assert len(bitsets) == 0
def test_contains_false(self, bitsets):
assert 'id1' not in bitsets
def test_update(self, bitsets):
bs = BitMap([1, 3, 5, 8])
other = {'id1': bs}
bitsets.update(other)
result = {k: v for k, v in six.iteritems(bitsets)}
assert result == other
def test_getitem_keyerror(self, bitsets):
with pytest.raises(KeyError) as e:
bitsets['id1']
assert e.value.args == ('id1',)
@pytest.fixture
def sample_BitMap():
return BitMap([1, 3, 5, 8])
@pytest.fixture
def filled_bitsets(bitsets, sample_BitMap):
bid = 'id1'
bitsets[bid] = sample_BitMap
return bitsets
class TestBitMapDictFilled(object):
def test_getitem(self, filled_bitsets, sample_BitMap):
result = filled_bitsets['id1']
assert result == sample_BitMap
def test_len_filled(self, filled_bitsets):
assert len(filled_bitsets) == 1
def test_contains_true(self, filled_bitsets):
assert 'id1' in filled_bitsets
def test_del(self, filled_bitsets):
del filled_bitsets['id1']
assert len(filled_bitsets) == 0
def test_keys(self, filled_bitsets):
result = list(filled_bitsets.keys())
expected = ['id1']
assert result == expected
def test_iteritems(self, filled_bitsets, sample_BitMap):
result = {k: v for k, v in six.iteritems(filled_bitsets)}
expected = {'id1': sample_BitMap}
assert result == expected
def test_iteritems_startswith(self, filled_bitsets, sample_BitMap):
filled_bitsets['someid'] = sample_BitMap
result = {k: v for k, v in filled_bitsets.iteritems_startswith('id')}
expected = {'id1': sample_BitMap}
assert result == expected
assert 'someid' not in result
def test_itervalues(self, filled_bitsets, sample_BitMap):
result = [v for v in six.itervalues(filled_bitsets)]
expected = [sample_BitMap]
assert result == expected
def test_materialize(self, filled_bitsets, sample_BitMap):
result = filled_bitsets.materialize()
expected = {'id1': sample_BitMap}
assert result == expected
```
#### File: python-modified-tanimoto/tests/test_hdf5.py
```python
from __future__ import absolute_import
import pytest
from numpy.testing import assert_array_almost_equal, assert_almost_equal
from kripodb.hdf5 import SimilarityMatrix
from .utils import SimilarityMatrixInMemory
@pytest.fixture
def matrix():
sim_matrix = SimilarityMatrix('data/similarities.h5')
yield sim_matrix
sim_matrix.close();
@pytest.fixture
def empty_matrix():
with SimilarityMatrixInMemory() as sim_matrix:
yield sim_matrix
@pytest.fixture
def example_matrix():
with SimilarityMatrixInMemory() as sim_matrix:
labels = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
similarities = [
('a', 'b', 0.9),
('a', 'c', 0.6),
('b', 'c', 0.6),
('d', 'c', 0.7)
]
sim_matrix.update(similarities, labels)
yield sim_matrix
class TestSimilarityMatrix(object):
def test_find_1(self, matrix):
result = list(matrix.find('2n6i_4FU_frag1', 0.98))
expected = [('2n6i_4FU_frag2', 1.0), ('2n6i_4FU_frag6', 1.0)]
assert_array_almost_equal([r[1] for r in result], [r[1] for r in expected], 3)
assert [r[0] for r in result] == [r[0] for r in result]
def test_iter_first2(self, matrix):
myiter = iter(matrix)
result = [next(myiter), next(myiter)]
expected = [('2mlm_2W7_frag1', '2mlm_2W7_frag2', 0.5877), ('2mlm_2W7_frag2', '3wvm_STE_frag1', 0.4633)]
assert_array_almost_equal([r[2] for r in result], [r[2] for r in expected], 3)
assert [(r[0], r[1],) for r in result] == [(r[0], r[1],) for r in result]
def test_iter_last(self, matrix):
myiter = iter(matrix)
result = None
for row in myiter:
result = row
expected = ('3wyl_3KB_frag20', '3wyl_3KB_frag21', 0.999496452277409)
assert_almost_equal(result[2], expected[2], 5)
assert result[:2] == expected[:2]
def test_keep(self, example_matrix, empty_matrix):
in_matrix = example_matrix
out_matrix = empty_matrix
frags2keep = {'a', 'b'}
in_matrix.keep(out_matrix, frags2keep)
expected_labels = {'a', 'b', 'c'}
assert set(out_matrix.labels.label2ids().keys()) == expected_labels
expected_similarities = {
('a', 'b', 0.9),
('a', 'c', 0.6),
('b', 'c', 0.6)
}
assert set(out_matrix) == expected_similarities
def test_skip(self, example_matrix, empty_matrix):
in_matrix = example_matrix
out_matrix = empty_matrix
frags2skip = {'b'}
in_matrix.skip(out_matrix, frags2skip)
expected_labels = {'a', 'c', 'd'}
assert set(out_matrix.labels.label2ids().keys()) == expected_labels
expected_similarities = {
('a', 'c', 0.6),
('d', 'c', 0.7),
}
assert set(out_matrix) == expected_similarities
class TestPairsTable(object):
def test_count(self, example_matrix):
counts = list(example_matrix.count(100000))
expected = [(0.6, 2),
(0.7, 1),
(0.9, 1)]
assert_array_almost_equal(counts, expected, 6)
def test_count_rawscore(self, example_matrix):
counts = list(example_matrix.count(100000, True))
expected = [(39321, 2),
(45874, 1),
(58981, 1)]
assert_array_almost_equal(counts, expected, 6)
def test_count_multiframe(self, example_matrix):
counts = list(example_matrix.count(2))
expected = [(0.6, 2),
(0.7, 1),
(0.9, 1)]
assert_array_almost_equal(counts, expected, 6)
```
#### File: python-modified-tanimoto/tests/test_modifiedtanimoto.py
```python
from __future__ import absolute_import
import pytest
from pyroaring import BitMap
from kripodb import modifiedtanimoto
def assert_similarities(result, expected):
result = sorted(result)
expected = sorted(expected)
assert len(result) == len(expected)
for i, r in enumerate(result):
assert r[0] == expected[i][0]
assert r[1] == expected[i][1]
pytest.approx(r[2], expected[i][2])
class TestAlgorithm(object):
number_of_bits = None
corr_st = None
corr_sto = None
def setup(self):
self.number_of_bits = 100
self.corr_st = 0.663333333333
self.corr_sto = 0.336666666667
def test_calc_mean_onbit_density(self):
bitsets = {
'a': BitMap([1, 2, 3]),
'b': BitMap([1, 2, 4, 5, 8]),
'c': BitMap([1, 2, 4, 8])
}
result = modifiedtanimoto.calc_mean_onbit_density(bitsets.values(), self.number_of_bits)
expected = 0.04
assert result == expected
def test_corrections(self):
corr_st, corr_sto = modifiedtanimoto.corrections(0.01)
pytest.approx(corr_st, 0.663333333333)
pytest.approx(corr_sto, 0.336666666667)
def test_similarity(self):
bitset1 = BitMap([1, 2, 3])
bitset2 = BitMap([1, 2, 4, 8])
result = modifiedtanimoto.similarity(bitset1, bitset2,
self.number_of_bits,
self.corr_st, self.corr_sto)
expected = 0.5779523809525572
pytest.approx(result, expected)
def test_similarities_ignore_upper_triangle(self):
bitsets = {
'a': BitMap([1, 2, 3]),
'b': BitMap([1, 2, 4, 5, 8]),
'c': BitMap([1, 2, 4, 8])
}
iterator = modifiedtanimoto.similarities(bitsets, bitsets,
self.number_of_bits,
self.corr_st, self.corr_sto,
0.55, True)
result = [r for r in iterator]
expected = [
('a', 'c', 0.5779523809525572),
('b', 'c', 0.8357708333333689)]
# pair a-c is below cutoff with similarity of 0.53
assert_similarities(result, expected)
def test_similarities(self):
bitsets = {
'a': BitMap([1, 2, 3]),
'b': BitMap([1, 2, 4, 5, 8]),
'c': BitMap([1, 2, 4, 8])
}
iterator = modifiedtanimoto.similarities(bitsets, bitsets,
self.number_of_bits,
self.corr_st, self.corr_sto,
0.55, False)
result = [r for r in iterator]
expected = [
('a', 'c', 0.5779523809525572),
('c', 'a', 0.5779523809525572),
('c', 'b', 0.8357708333333689),
('b', 'c', 0.8357708333333689)]
# pair a-c is below cutoff with similarity of 0.53
assert_similarities(result, expected)
@pytest.mark.parametrize("bitset2,expected_score", (
(BitMap((1, 2, 3, 4)), 1.0),
(BitMap((5, 6, 7, 8)), 0.33),
(BitMap((3, 4, 5, 6)), 0.55),
(BitMap((1, 2, 5, 6)), 0.55),
(BitMap((2, 3, 4, 5)), 0.73),
(BitMap((1, 5, 6, 7)), 0.424),
(BitMap((1, 2, 3, 4, 5)), 0.86),
(BitMap((1, 2, 3)), 0.83),
))
def test_similarity_numberofbits400(bitset2, expected_score):
number_of_bits = 400
corr_st, corr_sto = modifiedtanimoto.corrections(0.01)
bitset1 = BitMap([1, 2, 3, 4])
result = modifiedtanimoto.similarity(bitset1, bitset2,
number_of_bits,
corr_st, corr_sto)
assert result == pytest.approx(expected_score, rel=1e-2)
```
#### File: python-modified-tanimoto/tests/test_pdb.py
```python
from __future__ import absolute_import
from six import StringIO, BytesIO
from mock import patch
import pytest
from kripodb.pdb import PdbReport
@pytest.fixture
def mock_fetch_response():
mresponse = BytesIO()
mresponse.write(b'structureId,chainId,structureTitle,compound,ecNo,uniprotAcc,uniprotRecommendedName\n')
mresponse.write(
b'"104L","B","HOW AMINO-ACID INSERTIONS ARE ALLOWED IN AN ALPHA-HELIX OF T4 LYSOZYME","T4 LYSOZYME","3.2.1.17","P00720","Endolysin"\n')
mresponse.write(b'"12E8","H","2E8 FAB FRAGMENT","IGG1-KAPPA 2E8 FAB (HEAVY CHAIN)","","",""\n')
mresponse.seek(0)
return mresponse
class TestPdbReport(object):
def test_url_default(self):
pdb_report = PdbReport()
url = pdb_report.url
expected = 'http://www.rcsb.org/pdb/rest/customReport?' \
'pdbids=*&' \
'customReportColumns=structureTitle,compound,ecNo,uniprotAcc,uniprotRecommendedName&' \
'format=csv&service=wsfile'
assert url == expected
def test_url_custom(self):
pdbids = ['1kvm', '2mbs']
fields = ['resolution']
pdb_report = PdbReport(pdbids, fields)
url = pdb_report.url
expected = 'http://www.rcsb.org/pdb/rest/customReport?' \
'pdbids=1kvm,2mbs&' \
'customReportColumns=resolution&' \
'format=csv&service=wsfile'
assert url == expected
@patch('kripodb.pdb.urlopen')
def test_fetch(self, mocked_urlopen, mock_fetch_response):
mocked_urlopen.return_value = mock_fetch_response
pdb_report = PdbReport(['104L', '12E8'])
pdbs = list(pdb_report.fetch())
expected = [{
'chainId': 'B',
'structureId': '104L',
'structureTitle': 'HOW AMINO-ACID INSERTIONS ARE ALLOWED IN AN ALPHA-HELIX OF T4 LYSOZYME',
'ecNo': '3.2.1.17',
'uniprotAcc': 'P00720',
'compound': 'T4 LYSOZYME',
'uniprotRecommendedName': 'Endolysin'
}, {
'chainId': 'H',
'structureId': '12E8',
'structureTitle': '2E8 FAB FRAGMENT',
'ecNo': None,
'uniprotAcc': None,
'compound': 'IGG1-KAPPA 2E8 FAB (HEAVY CHAIN)',
'uniprotRecommendedName': None
}]
assert pdbs == expected
``` |
{
"source": "3demax/elfie_remote_control",
"score": 3
} |
#### File: elfie_remote_control/tests/test_drone.py
```python
import unittest
import nose2
from unittest import expectedFailure
from mock import Mock, MagicMock
import drone
class DroneProtocolMathTestCase(unittest.TestCase):
def test_denormalization(self):
denorm = drone._denormalize_
self.assertEqual(denorm(-1), 0)
self.assertEqual(denorm(-0.5), 64)
self.assertEqual(denorm(0), 128)
self.assertEqual(denorm(0.5), 191)
self.assertEqual(denorm(1), 255)
class AtGroundDroneProtocolTestCase(unittest.TestCase):
# @expectedFailure
def test_idle(self):
expected_command = '6680800080008099'.decode('hex')
command = drone.get_command_string(
roll=0.0, pitch=0.0, throttle=0.0, yaw=0.0, command=None,
altitude_hold=False
)
self.assertEqual(command, expected_command)
def test_idle_altitude_hold(self):
expected_command = '6680808080000099'.decode('hex')
command = drone.get_command_string(
roll=0.0, pitch=0.0, throttle=0.0, yaw=0.0, command=None,
)
self.assertEqual(command, expected_command)
def test_spin_up_altitude_hold(self):
expected_command = '6680808080010199'.decode('hex')
command = drone.get_command_string(
roll=0.0, pitch=0.0, throttle=0.0, yaw=0.0, command='spin_up',
)
self.assertEqual(command, expected_command)
def test_shut_engines_altitude_hold(self):
expected_command = '6680808080040499'.decode('hex')
command = drone.get_command_string(
roll=0.0, pitch=0.0, throttle=0.0, yaw=0.0, command='shut_off',
)
self.assertEqual(command, expected_command)
@expectedFailure
def test_land_altitude_hold(self):
expected_command = '00'.decode('hex')
command = drone.get_command_string(
roll=0.0, pitch=0.0, throttle=0.0, yaw=0.0, command='land',
)
self.assertEqual(command, expected_command)
class InFlightDroneProtocolTestCase(unittest.TestCase):
""" these values are from adria's post:
https://hackaday.io/project/19680-controlling-a-jjrc-h37-elfie-quad-from-a-pc/log/\
53557-a-basic-script-to-monitor-the-controller-input-sent-to-the-quadcopter-by-udp
"""
def test_flight(self):
# 66807f0180007e99 roll:128 pitch:127 throttle:1 yaw:128 commands:00000000 err:7e
expected_command = '66807f0180007e99'
command = drone.get_command_string(
roll=0.0, pitch=-0.004, throttle=0.004, yaw=0.0, command=None,
altitude_hold=False
).encode('hex')
self.assertEqual(command, expected_command)
# 666b840080006f99 roll:107 pitch:132 throttle:0 yaw:128 commands:00000000 err:6f
expected_command = '666b840080006f99'
command = drone.get_command_string(
roll=-0.16, pitch=0.035, throttle=0.0, yaw=0.0, command=None,
altitude_hold=False
).encode('hex')
self.assertEqual(command, expected_command)
``` |
{
"source": "3DExtended/RaptorBot",
"score": 3
} |
#### File: core/data/TradeData.py
```python
from dataclasses import dataclass
@dataclass
class TradeData:
eventTime: int
symbol: str
tradeId: int
price: str
quantity: str
buyerOrderId: int
sellerOrderId: int
tradeTime: int
isBuyerMarketMaker: bool # If isBuyerMarketMaker is true then that means a seller fulfilled a buy order. The buyer created the order and waited for someone to fulfill it.
ignore: bool
def __init__(self, data: dict, sourceBinance: bool):
if (sourceBinance == True):
# If this assertion fails, this means that the binance api updated.
# Please update this mapping to the newest API version.
assert(len(data.keys()) - 1 == 10)
self.eventTime = data["E"]
self.symbol = data["s"]
self.tradeId = data["t"]
self.price = data["p"]
self.quantity = data["q"]
self.buyerOrderId = data["b"]
self.sellerOrderId = data["a"]
self.tradeTime = data["T"]
self.isBuyerMarketMaker = data["m"]
self.ignore = data["M"]
``` |
{
"source": "3D-Face/3DDFA_V2",
"score": 2
} |
#### File: 3DDFA_V2/utils/tddfa_util.py
```python
__author__ = 'cleardusk'
import sys
sys.path.append('..')
import argparse
import numpy as np
import torch
from bfm import bfm
# use global for accelerating
u_base, w_shp_base, w_exp_base = bfm.u_base, bfm.w_shp_base, bfm.w_exp_base
u, w_shp, w_exp = bfm.u, bfm.w_shp, bfm.w_exp
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected')
def load_model(model, checkpoint_fp):
checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict']
model_dict = model.state_dict()
# because the model is trained by multiple gpus, prefix module should be removed
for k in checkpoint.keys():
kc = k.replace('module.', '')
if kc in model_dict.keys():
model_dict[kc] = checkpoint[k]
if kc in ['fc_param.bias', 'fc_param.weight']:
model_dict[kc.replace('_param', '')] = checkpoint[k]
model.load_state_dict(model_dict)
return model
class ToTensorGjz(object):
def __call__(self, pic):
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img.float()
def __repr__(self):
return self.__class__.__name__ + '()'
class NormalizeGjz(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
tensor.sub_(self.mean).div_(self.std)
return tensor
def similar_transform(pts3d, roi_box, size):
pts3d[0, :] -= 1 # for Python compatibility
pts3d[2, :] -= 1
pts3d[1, :] = size - pts3d[1, :]
sx, sy, ex, ey = roi_box
scale_x = (ex - sx) / size
scale_y = (ey - sy) / size
pts3d[0, :] = pts3d[0, :] * scale_x + sx
pts3d[1, :] = pts3d[1, :] * scale_y + sy
s = (scale_x + scale_y) / 2
pts3d[2, :] *= s
pts3d[2, :] -= np.min(pts3d[2, :])
return np.array(pts3d, dtype=np.float32)
def _parse_param(param):
"""matrix pose form
param: shape=(62,), 62 = 12 + 40 + 10
scale may lie in R or alpha_shp + alpha_exp?
"""
R_ = param[:12].reshape(3, -1)
R = R_[:, :3]
offset = R_[:, -1].reshape(3, 1)
alpha_shp = param[12:52].reshape(-1, 1)
alpha_exp = param[52:].reshape(-1, 1)
return R, offset, alpha_shp, alpha_exp
def recon_sparse(param, roi_box, size):
"""68 3d landmarks reconstruction from 62: matrix pose form"""
R, offset, alpha_shp, alpha_exp = _parse_param(param)
pts3d = R @ (u_base + w_shp_base @ alpha_shp + w_exp_base @ alpha_exp).reshape(3, -1, order='F') + offset
pts3d = similar_transform(pts3d, roi_box, size)
return pts3d
def recon_dense(param, roi_box, size):
"""Dense points reconstruction: 53215 points"""
R, offset, alpha_shp, alpha_exp = _parse_param(param)
pts3d = R @ (u + w_shp @ alpha_shp + w_exp @ alpha_exp).reshape(3, -1, order='F') + offset
pts3d = similar_transform(pts3d, roi_box, size)
return pts3d
``` |
{
"source": "3D-FRONT-FUTURE/ObjectDrawer-ToolBox",
"score": 2
} |
#### File: ObjectDrawer-ToolBox/utils/processer.py
```python
import os
# from tools.oss_util import upload_dir
import logging
from datetime import datetime
from collections import OrderedDict
import cv2
import numpy as np
import shutil
import zipfile
from .capture_frame import capture_frame
# from .segmentation_infer.infer import segmentation_infer
def process(video_path):
PIPELINE = OrderedDict([
("VIDEO", video_proc),
("SELECT", select_proc),
("ZIP", zip_proc),
])
video_dir = os.path.dirname(video_path)
case_id = os.path.basename(video_path).split('.')[0]
for _k, _func in PIPELINE.items():
# input config
_config_in = {}
_config_in['video_path'] = video_path
_config_in['root_dir'] = video_dir
_config_in['case_id'] = case_id
# case config setting
_config_in['images_ori_dir'] = os.path.join(
video_dir, case_id, "images_ori")
_config_in['ground_images_dir'] = os.path.join(
video_dir, case_id, "ground_images")
is_success = _func(_config_in)
if not is_success:
print(_k, " Failed.")
return True
def video_proc(config):
capture_frame(config['video_path'],
config['images_ori_dir'],
sample_rate=4,
static_time=0,
scale='1/1',
img_max_size=1280)
return True
def select_proc(config):
filenames = os.listdir(config['images_ori_dir'])
filenames = [x for x in filenames if "png" in x]
filenames.sort()
# select 3 images to annotate ground
nums_ground = 3
if 'nums_ground' in config.keys():
nums_ground = config['nums_ground']
print("[INFO] Select 3 images.")
if not os.path.exists(config['ground_images_dir']):
os.mkdir(config['ground_images_dir'])
interval_ = len(filenames) / nums_ground
idx_list = [int(x) for x in list(np.arange(0, len(filenames), interval_))]
for i in idx_list:
image_name = filenames[i]
shutil.copy(os.path.join(config['images_ori_dir'], image_name), os.path.join(
config['ground_images_dir'], image_name))
return True, None
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file),
os.path.relpath(os.path.join(root, file),
os.path.join(path, '..')))
def zip_proc(config):
zipf = zipfile.ZipFile(os.path.join(
config['root_dir'], config['case_id'] + ".zip"), 'w', zipfile.ZIP_DEFLATED)
zipdir(config['ground_images_dir'], zipf)
zipf.close()
return True, None
```
#### File: ObjectDrawer-ToolBox/utils/visualize.py
```python
import os
import cv2
import numpy as np
def vis_arrangeed_images(current_dir_path, selected_image_names, rows, cols, scale = 4):
h, w = 0, 0
scaled_h, scaled_w = 0, 0
padding = 10
images = []
for image_name in selected_image_names:
image = cv2.imread(os.path.join(current_dir_path, image_name), 1)
h, w, c = image.shape
scaled_h, scaled_w = h // scale, w // scale
image = cv2.resize(image, (scaled_w, scaled_h))
image = cv2.copyMakeBorder(
image, padding, padding, padding, padding, cv2.BORDER_CONSTANT, value=[255, 255, 255])
images.append(image)
scaled_h, scaled_w, c = images[0].shape
vis_image = np.zeros((scaled_h * rows, scaled_w * cols, 3), dtype=np.uint8)
for i in range(rows * cols):
row = i // cols
col = i % cols
vis_image[row * scaled_h: row * scaled_h + scaled_h,
col * scaled_w: col * scaled_w + scaled_w] = images[i]
return vis_image, scaled_h, scaled_w
``` |
{
"source": "3dgiordano/taurus",
"score": 2
} |
#### File: taurus/bzt/cli.py
```python
import copy
import logging
import os
import platform
import shutil
import signal
import sys
import tempfile
import traceback
from logging import Formatter
from optparse import OptionParser, Option
import optparse
import textwrap
from tempfile import NamedTemporaryFile
import yaml
from colorlog import ColoredFormatter
import bzt
from bzt import ManualShutdown, NormalShutdown, RCProvider, AutomatedShutdown
from bzt import TaurusException, ToolError
from bzt import TaurusInternalException, TaurusConfigError, TaurusNetworkError
from bzt.engine import Engine, Configuration, ScenarioExecutor
from bzt.engine import SETTINGS
from bzt.commands import Commands
from bzt.linter import ConfigurationLinter
from bzt.six import HTTPError, string_types, get_stacktrace
from bzt.utils import run_once, is_int, BetterDict, get_full_path, is_url
class CLI(object):
"""
'cli' means 'tool' in hebrew, did you know that?
:param options: OptionParser parsed parameters
"""
console_handler = logging.StreamHandler(sys.stdout)
CLI_SETTINGS = "cli"
def __init__(self, options, from_command=False):
self.signal_count = 0
self.options = options
self.setup_logging(options, from_command=from_command)
self.log = logging.getLogger('')
if not from_command:
self.log.info("Taurus CLI Tool v%s", bzt.VERSION)
self.log.debug("Command-line options: %s", self.options)
self.log.debug("Python: %s %s", platform.python_implementation(), platform.python_version())
self.log.debug("OS: %s", platform.uname())
self.engine = Engine(self.log)
self.exit_code = 0
@staticmethod
@run_once
def setup_logging(options, from_command=False):
"""
Setting up console and file logging, colored if possible
:param options: OptionParser parsed options
:param from_command: When the invocation is from command
"""
colors = {
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'bold_red',
}
fmt_file = Formatter("[%(asctime)s %(levelname)s %(name)s] %(message)s")
if from_command:
fmt_verbose = Formatter("%(message)s")
fmt_regular = Formatter("%(message)s")
else:
if sys.stdout.isatty():
fmt_verbose = ColoredFormatter("%(log_color)s[%(asctime)s %(levelname)s %(name)s] %(message)s",
log_colors=colors)
fmt_regular = ColoredFormatter("%(log_color)s%(asctime)s %(levelname)s: %(message)s",
"%H:%M:%S", log_colors=colors)
else:
fmt_verbose = Formatter("[%(asctime)s %(levelname)s %(name)s] %(message)s")
fmt_regular = Formatter("%(asctime)s %(levelname)s: %(message)s", "%H:%M:%S")
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
# log everything to file
if options.log is None:
tf = tempfile.NamedTemporaryFile(prefix="bzt_", suffix=".log", delete=False)
tf.close()
os.chmod(tf.name, 0o644)
options.log = tf.name
if options.log:
file_handler = logging.FileHandler(options.log)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(fmt_file)
logger.addHandler(file_handler)
# log something to console
if options.verbose:
CLI.console_handler.setLevel(logging.DEBUG)
CLI.console_handler.setFormatter(fmt_verbose)
elif options.quiet:
CLI.console_handler.setLevel(logging.WARNING)
CLI.console_handler.setFormatter(fmt_regular)
else:
CLI.console_handler.setLevel(logging.INFO)
CLI.console_handler.setFormatter(fmt_regular)
logger.addHandler(CLI.console_handler)
logging.getLogger("requests").setLevel(logging.WARNING) # misplaced?
def __close_log(self):
"""
Close log handlers
:return:
"""
if self.options.log:
# need to finalize the logger before finishing
for handler in self.log.handlers:
if issubclass(handler.__class__, logging.FileHandler):
self.log.debug("Closing log handler: %s", handler.baseFilename)
handler.close()
self.log.handlers.remove(handler)
def __move_log_to_artifacts(self):
"""
Close log handlers, copy log to artifacts dir, recreate file handlers
:return:
"""
if self.options.log:
for handler in self.log.handlers:
if issubclass(handler.__class__, logging.FileHandler):
self.log.debug("Closing log handler: %s", handler.baseFilename)
handler.close()
self.log.handlers.remove(handler)
if os.path.exists(self.options.log):
self.engine.existing_artifact(self.options.log, move=True, target_filename="bzt.log")
self.options.log = os.path.join(self.engine.artifacts_dir, "bzt.log")
file_handler = logging.FileHandler(self.options.log)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(Formatter("[%(asctime)s %(levelname)s %(name)s] %(message)s"))
self.log.addHandler(file_handler)
self.log.debug("Switched writing logs to %s", self.options.log)
def __configure(self, configs):
self.log.info("Starting with configs: %s", configs)
if self.options.no_system_configs is None:
self.options.no_system_configs = False
bzt_rc = os.path.expanduser(os.path.join('~', ".bzt-rc"))
if os.path.exists(bzt_rc):
self.log.debug("Using personal config: %s" % bzt_rc)
else:
self.log.debug("Adding personal config: %s", bzt_rc)
self.log.info("No personal config found, creating one at %s", bzt_rc)
shutil.copy(os.path.join(get_full_path(__file__, step_up=1), 'resources', 'base-bzt-rc.yml'), bzt_rc)
merged_config = self.engine.configure([bzt_rc] + configs, not self.options.no_system_configs)
# apply aliases
for alias in self.options.aliases:
cli_aliases = self.engine.config.get('cli-aliases')
keys = sorted(cli_aliases.keys())
err = TaurusConfigError("'%s' not found in aliases. Available aliases are: %s" % (alias, ", ".join(keys)))
self.engine.config.merge(cli_aliases.get(alias, err))
if self.options.option:
overrider = ConfigOverrider(self.log)
overrider.apply_overrides(self.options.option, self.engine.config)
settings = self.engine.config.get(SETTINGS)
settings.get('verbose', bool(self.options.verbose)) # respect value from config
if self.options.verbose: # force verbosity if cmdline asked for it
settings['verbose'] = True
if settings.get('verbose'):
CLI.console_handler.setLevel(logging.DEBUG)
self.engine.create_artifacts_dir(configs, merged_config)
self.engine.default_cwd = os.getcwd()
self.engine.eval_env() # yacky, I don't like having it here, but how to apply it after aliases and artif dir?
def __lint_config(self):
settings = self.engine.config.get(CLI.CLI_SETTINGS).get("linter")
self.log.debug("Linting config")
self.warn_on_unfamiliar_fields = settings.get("warn-on-unfamiliar-fields", True)
config_copy = copy.deepcopy(self.engine.config)
ignored_warnings = settings.get("ignored-warnings", [])
self.linter = ConfigurationLinter(config_copy, ignored_warnings, self.log)
self.linter.register_checkers()
self.linter.lint()
warnings = self.linter.get_warnings()
for warning in warnings:
self.log.warning(str(warning))
if settings.get("lint-and-exit", False):
if warnings:
raise TaurusConfigError("Errors were found in the configuration")
else:
raise NormalShutdown("Linting has finished, no errors were found")
def _level_down_logging(self):
self.log.debug("Leveling down log file verbosity, use -v option to have DEBUG messages enabled")
for handler in self.log.handlers:
if issubclass(handler.__class__, logging.FileHandler):
handler.setLevel(logging.INFO)
def _level_up_logging(self):
for handler in self.log.handlers:
if issubclass(handler.__class__, logging.FileHandler):
handler.setLevel(logging.DEBUG)
self.log.debug("Leveled up log file verbosity")
def perform(self, configs, sub_args=None, extra_args=None):
"""
Run the tool
:type configs: list
:return: integer exit code
"""
if isinstance(configs, SubCmdOptionParser):
self.evaluate_command(configs, sub_args, extra_args)
else:
url_shorthands = []
jmx_shorthands = []
try:
url_shorthands = self.__get_url_shorthands(configs)
configs.extend(url_shorthands)
jmx_shorthands = self.__get_jmx_shorthands(configs)
configs.extend(jmx_shorthands)
if not self.engine.config.get(SETTINGS).get('verbose', False):
self.engine.logging_level_down = self._level_down_logging
self.engine.logging_level_up = self._level_up_logging
self.__configure(configs)
self.__move_log_to_artifacts()
self.__lint_config()
self.engine.prepare()
self.engine.run()
except BaseException as exc:
self.handle_exception(exc)
finally:
try:
for fname in url_shorthands + jmx_shorthands:
os.remove(fname)
self.engine.post_process()
except BaseException as exc:
self.handle_exception(exc)
self.log.info("Artifacts dir: %s", self.engine.artifacts_dir)
if self.engine.artifacts_dir is None:
self.log.info("Log file: %s", self.options.log)
if self.exit_code:
self.log.warning("Done performing with code: %s", self.exit_code)
else:
self.log.info("Done performing with code: %s", self.exit_code)
self.__close_log()
return self.exit_code
def evaluate_command(self, configs, sub_args, extra_args):
commands = Commands(self.log)
if configs.name == "remote":
if isinstance(sub_args, SubCmdOptionParser):
if sub_args.name == "on":
commands.remote_on()
elif sub_args.name == "off":
commands.remote_off()
elif sub_args.name == "catalog":
commands.remote_catalog()
elif sub_args.name == "attach":
if len(extra_args) == 0:
self.log.error("Specify service_id argument, one or more separated by space")
self.exit_code = 1
else:
service_ids = extra_args
commands.remote_attach(service_ids)
elif sub_args.name == "detach":
if len(extra_args) == 0:
self.log.error("Specify service_id argument, one or more " +
"separated by space or use the keyword '*all'")
self.exit_code = 1
else:
attach_ids = extra_args
commands.remote_detach(attach_ids)
elif sub_args.name == "inspect":
if len(extra_args) == 0:
self.log.error("Specify service_id argument, one or more separated by space")
self.exit_code = 1
else:
attach_ids = extra_args
commands.remote_detach(attach_ids)
elif sub_args.name == "list":
commands.remote_list()
else:
self.log.info("Unparsed sub-command:%s" % sub_args.name)
else:
self.log.info("Unknown Sub Args type")
else:
self.log.info("Unparsed command:%s" % configs.name)
def handle_exception(self, exc):
log_level = {'info': logging.DEBUG, 'http': logging.DEBUG, 'default': logging.DEBUG}
if not self.exit_code: # only fist exception goes to the screen
log_level['info'] = logging.WARNING
log_level['http'] = logging.ERROR
log_level['default'] = logging.ERROR
if isinstance(exc, RCProvider):
self.exit_code = exc.get_rc()
else:
self.exit_code = 1
if isinstance(exc, KeyboardInterrupt):
self.__handle_keyboard_interrupt(exc, log_level)
log_level['default'] = logging.DEBUG
elif isinstance(exc, TaurusException):
self.__handle_taurus_exception(exc, log_level['default'])
log_level['default'] = logging.DEBUG
elif isinstance(exc, HTTPError):
msg = "Response from %s: [%s] %s %s" % (exc.geturl(), exc.code, exc.reason, exc.read())
self.log.log(log_level['http'], msg)
log_level['default'] = logging.DEBUG
self.log.log(log_level['default'], "%s: %s\n%s", type(exc).__name__, exc, get_stacktrace(exc))
def __handle_keyboard_interrupt(self, exc, log_level):
if isinstance(exc, ManualShutdown):
self.log.log(log_level['info'], "Interrupted by user")
elif isinstance(exc, AutomatedShutdown):
self.log.log(log_level['info'], "Automated shutdown")
elif isinstance(exc, NormalShutdown):
self.log.log(logging.DEBUG, "Shutting down by request from code")
elif isinstance(exc, KeyboardInterrupt):
self.log.log(log_level['info'], "Keyboard interrupt")
else:
msg = "Non-KeyboardInterrupt exception %s: %s\n%s"
raise ValueError(msg % (type(exc), exc, get_stacktrace(exc)))
def __handle_taurus_exception(self, exc, log_level):
if isinstance(exc, TaurusConfigError):
self.log.log(log_level, "Config Error: %s", exc)
elif isinstance(exc, TaurusInternalException):
self.log.log(log_level, "Internal Error: %s", exc)
elif isinstance(exc, ToolError):
self.log.log(log_level, "Child Process Error: %s", exc)
if exc.diagnostics is not None:
for line in exc.diagnostics:
self.log.log(log_level, line)
elif isinstance(exc, TaurusNetworkError):
self.log.log(log_level, "Network Error: %s", exc)
else:
self.log.log(log_level, "Generic Taurus Error: %s", exc)
def __get_jmx_shorthands(self, configs):
"""
Generate json file with execution, executor and scenario settings
:type configs: list
:return: list
"""
jmxes = []
for filename in configs[:]:
if filename.lower().endswith(".jmx"):
jmxes.append(filename)
configs.remove(filename)
if jmxes:
self.log.debug("Adding JMX shorthand config for: %s", jmxes)
fds = NamedTemporaryFile(prefix="jmx_", suffix=".json")
fname = fds.name
fds.close()
config = Configuration()
for jmx_file in jmxes:
config.get(ScenarioExecutor.EXEC, []).append({"executor": "jmeter", "scenario": {"script": jmx_file}})
config.dump(fname, Configuration.JSON)
return [fname]
else:
return []
def __get_url_shorthands(self, configs):
"""
:type configs: list
:return: list
"""
urls = []
for candidate in configs[:]:
if is_url(candidate):
urls.append(candidate)
configs.remove(candidate)
if urls:
self.log.debug("Adding HTTP shorthand config for: %s", urls)
config_fds = NamedTemporaryFile(prefix="http_", suffix=".yml")
fname = config_fds.name
config_fds.close()
config = Configuration()
config.merge({
"execution": [{
"concurrency": "${__tstFeedback(Throughput_Limiter,1,${__P(concurrencyCap,1)},2)}",
"hold-for": "2m",
"throughput": "${__P(throughput,600)}",
"scenario": "linear-growth",
}],
"scenarios": {
"linear-growth": {
"retrieve-resources": False,
"timeout": "5s",
"keepalive": False,
"requests": [{
"action": "pause",
"pause-duration": 0,
"jsr223": [{
"language": "javascript",
"execute": "before",
"script-text": """
var startTime = parseInt(props.get("startTime"));
if (!startTime) {
startTime = Math.floor((new Date()).getTime() / 1000);
props.put("startTime", startTime);
} else {
var now = Math.floor((new Date()).getTime() / 1000);
var offset = now - startTime;
if (offset < 60) {
var targetOffset = Math.max(offset * 10, 10);
props.put("throughput", targetOffset.toString());
}
}"""
}]
}] + urls,
}
},
"modules": {
"jmeter": {
"properties": {
"throughput": 1,
"concurrencyCap": 500,
},
}
}
})
config.dump(fname, Configuration.JSON)
return [fname]
else:
return []
class ConfigOverrider(object):
def __init__(self, logger):
"""
:type logger: logging.Logger
"""
super(ConfigOverrider, self).__init__()
self.log = logger.getChild(self.__class__.__name__)
def apply_overrides(self, options, dest):
"""
Apply overrides
:type options: list[str]
:type dest: BetterDict
"""
for option in options:
name = option[:option.index('=')]
value = option[option.index('=') + 1:]
try:
self.__apply_single_override(dest, name, value)
except:
self.log.debug("Failed override: %s", traceback.format_exc())
self.log.error("Failed to apply override %s=%s", name, value)
raise
dest.dump()
def __apply_single_override(self, dest, name, value):
"""
Apply single override
:type name: str
:type value: str
"""
self.log.debug("Applying %s=%s", name, value)
parts = [(int(x) if is_int(x) else x) for x in name.split(".")]
pointer = dest
for index, part in enumerate(parts[:-1]):
self.__ensure_list_capacity(pointer, part, parts[index + 1])
if isinstance(part, int):
if part < 0:
if isinstance(parts[index + 1], int):
pointer.append([])
else:
pointer.append(BetterDict())
pointer = pointer[-1]
else:
pointer = pointer[part]
elif isinstance(parts[index + 1], int) and isinstance(pointer, dict):
pointer = pointer.get(part, [])
else:
pointer = pointer.get(part)
self.__ensure_list_capacity(pointer, parts[-1])
self.log.debug("Applying: [%s]=%s", parts[-1], value)
if isinstance(parts[-1], string_types) and parts[-1][0] == '^':
item = parts[-1][1:]
if isinstance(pointer, list):
item = int(item)
if -len(pointer) <= item < len(pointer):
del pointer[item]
else:
self.log.debug("No value to delete: %s", item)
elif isinstance(pointer, dict):
if item in pointer:
del pointer[item]
else:
self.log.debug("No value to delete: %s", item)
else:
raise ValueError("Cannot handle override %s in non-iterable type %s" % (item, pointer))
else:
parsed_value = self.__parse_override_value(value)
self.log.debug("Parsed override value: %r -> %r (%s)", value, parsed_value, type(parsed_value))
if isinstance(parsed_value, dict):
dict_value = BetterDict()
dict_value.merge(parsed_value)
parsed_value = dict_value
if isinstance(pointer, list) and parts[-1] < 0:
pointer.append(parsed_value)
else:
pointer[parts[-1]] = parsed_value
@staticmethod
def __parse_override_value(override):
try:
return yaml.load(override)
except BaseException:
return override
def __ensure_list_capacity(self, pointer, part, next_part=None):
"""
Extend pointer list to hold additional item
:type pointer: list
:type part: int
"""
if isinstance(pointer, list) and isinstance(part, int):
while len(pointer) <= part:
self.log.debug("Len %s less than %s", len(pointer), part)
if isinstance(next_part, int):
pointer.append([])
else:
pointer.append(BetterDict())
class OptionParserWithAliases(OptionParser, object):
"""
Decorator that processes short opts as aliases
"""
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
super(OptionParserWithAliases, self).__init__(
usage=usage, option_list=option_list,
option_class=option_class, version=version,
conflict_handler=conflict_handler, description=description, formatter=formatter,
add_help_option=add_help_option, prog=prog, epilog=epilog)
self.aliases = []
def _process_short_opts(self, rargs, values):
if rargs[0].startswith('-') and len(rargs[0]) > 2:
self.aliases.append(rargs.pop(0)[1:])
else:
return OptionParser._process_short_opts(self, rargs, values)
def parse_args(self, args=None, values=None):
res = OptionParser.parse_args(self, args, values)
res[0].aliases = self.aliases
return res
class SubCmdOptionParser(object):
"""A subcommand of a root command-line application that may be
invoked by a SubcommandOptionParser.
"""
def __init__(self, name, parser=None, help="", aliases=()):
"""Creates a new subcommand. name is the primary way to invoke
the subcommand; aliases are alternate names. parser is an
OptionParser responsible for parsing the subcommand's options.
help is a short description of the command. If no parser is
given, it defaults to a new, empty OptionParser.
"""
super(SubCmdOptionParser, self).__init__()
self.name = name
self.parser = parser or OptionParserWithAliases()
self.aliases = aliases
self.help_hint = help
class SubCmdsOptionParser(OptionParserWithAliases):
"""A variant of OptionParser that parses subcommands and their
arguments.
"""
# A singleton command used to give help on other subcommands.
_HelpSubcommand = SubCmdOptionParser('help', optparse.OptionParser(),
help='Give detailed help on a specific command',
)
def __init__(self, *args, **kwargs):
"""Create a new subcommand-aware option parser. All of the
options to OptionParser.__init__ are supported in addition
to subcommands, a sequence of Subcommand objects.
"""
super(SubCmdsOptionParser, self).__init__()
# The sub_command array, with the help command included.
self.sub_commands = list(kwargs.pop('sub_commands', []))
self.sub_commands.append(self._HelpSubcommand)
# A more helpful default usage.
if 'usage' not in kwargs:
kwargs['usage'] = """
%prog COMMAND [ARGS...]
%prog help COMMAND"""
# Super constructor.
OptionParserWithAliases.__init__(self, *args, **kwargs)
# Adjust the help-visible name of each subcommand.
for sub_command in self.sub_commands:
sub_command.parser.prog = '%s %s' % \
(self.get_prog_name(), sub_command.name)
# Our root parser needs to stop on the first unrecognized argument.
self.disable_interspersed_args()
def add_sub_command(self, cmd):
"""Adds a Subcommand object to the parser's list of commands.
"""
self.sub_commands.append(cmd)
# Add the list of subcommands to the help message.
def format_help(self, formatter=None):
# Get the original help message, to which we will append.
out = optparse.OptionParser.format_help(self, formatter)
if formatter is None:
formatter = self.formatter
# Subcommands header.
result = []
if len(self.sub_commands) > 1:
result.append(formatter.format_heading('Commands'))
formatter.indent()
# Generate the display names (including aliases).
# Also determine the help position.
disp_names = []
help_position = 0
for sub_command in self.sub_commands:
if sub_command.name == "help" and len(self.sub_commands) == 1:
continue
name = sub_command.name
if sub_command.aliases:
name += ' (%s)' % ', '.join(sub_command.aliases)
disp_names.append(name)
# Set the help position based on the max width.
proposed_help_position = len(name) + formatter.current_indent + 2
if proposed_help_position <= formatter.max_help_position:
help_position = max(help_position, proposed_help_position)
# Add each subcommand to the output.
for sub_command, name in zip(self.sub_commands, disp_names):
# Lifted directly from optparse.py.
name_width = help_position - formatter.current_indent - 2
if len(name) > name_width:
name = "%*s%s\n" % (formatter.current_indent, "", name)
indent_first = help_position
else:
name = "%*s%-*s " % (formatter.current_indent, "",
name_width, name)
indent_first = 0
result.append(name)
help_width = formatter.width - help_position
help_lines = textwrap.wrap(sub_command.help_hint, help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (help_position, "", line)
for line in help_lines[1:]])
formatter.dedent()
# Concatenate the original help message with the subcommand
# list.
return out + "".join(result)
def _sub_command_for_name(self, name):
"""Return the subcommand in self.subcommands matching the
given name. The name may either be the name of a subcommand or
an alias. If no subcommand matches, returns None.
"""
for sub_command in self.sub_commands:
if name == sub_command.name or \
name in sub_command.aliases:
return sub_command
return None
def parse_args(self, args=None, values=None):
"""Like OptionParser.parse_args, but returns these four items:
- options: the options passed to the root parser
- subcommand: the Subcommand object that was invoked
- suboptions: the options passed to the subcommand parser
- subargs: the positional arguments passed to the subcommand
"""
options, args = super(SubCmdsOptionParser, self).parse_args(args, values)
sub_command = None
sub_options = None
sub_args = None
sub_sub_args = None
if not args:
return options, args, None, None, None
if args:
cmd_name = args[0]
sub_command = self._sub_command_for_name(cmd_name)
if not sub_command:
return options, args, None, None, None
if isinstance(sub_command.parser, SubCmdsOptionParser):
sub_options, sub_args, sub_sub_options, sub_sub_args, extra_sub_args = \
sub_command.parser.parse_args(args[1:])
else:
sub_options, sub_args = sub_command.parser.parse_args(args[1:])
sub_sub_options = None
sub_sub_args = None
extra_sub_args = None
if extra_sub_args: # Remove the warnig from Codacy
pass
if sub_command is self._HelpSubcommand:
if sub_args:
cmd_name = sub_args[0]
help_command = self._sub_command_for_name(cmd_name)
if help_command:
help_command.parser.print_help()
self.exit()
else:
self.error('Unknown command ' + cmd_name)
else:
self.print_help()
self.exit()
else:
if len(sub_command.parser.sub_commands) > 1:
if not sub_sub_options:
sub_command.parser.print_help()
self.exit()
return options, sub_command, sub_options, sub_args, sub_sub_args
def get_option_parser():
usage = "Usage: bzt [options] [configs] [-aliases]"
dsc = "BlazeMeter Taurus Tool v%s, the configuration-driven test running engine" % bzt.VERSION
sub_commands = list()
# sub_commands.append(SubCmdOptionParser('on',
# SubCmdsOptionParser(
# usage="bzt remote on",
# description="Turn on the remote provisioning mode",
# add_help_option=False
# ),
# help='Turn ON the remote provisioning mode',
# )
# )
# sub_commands.append(SubCmdOptionParser('off',
# SubCmdsOptionParser(
# usage="bzt remote off",
# description="Turn off provisioning mode and release reserved resources",
# add_help_option=False
# ),
# help='Turn OFF the remote provisioning mode',
# )
# )
sub_commands.append(SubCmdOptionParser('catalog',
SubCmdsOptionParser(
usage="bzt remote catalog",
description="List the available services to be attached",
add_help_option=False
),
help='List the available services to be attached',
)
)
sub_commands.append(SubCmdOptionParser('attach',
SubCmdsOptionParser(
usage="bzt remote attach service_id | service_id1 service_id2 ...",
description="Attach a service to Taurus",
add_help_option=False
),
help='Attach a service to Taurus',
)
)
sub_commands.append(SubCmdOptionParser('detach',
SubCmdsOptionParser(
usage="bzt remote detach attach_id | attach_id1 attach_id2 ... | *all",
description="Detach an attached service",
add_help_option=False
),
help='Detach an attached service',
)
)
sub_commands.append(SubCmdOptionParser('list',
SubCmdsOptionParser(
usage="bzt remote list",
description="List services attached to Taurus",
add_help_option=False
),
help='List services attached to Taurus',
)
)
sub_commands.append(SubCmdOptionParser('inspect',
SubCmdsOptionParser(
usage="bzt remote inspect attach_id",
description="Inspect attached service, display detailed information",
add_help_option=False
),
help='Inspect attached service',
)
)
remote_opts = SubCmdsOptionParser(
usage="bzt remote [command] [options]",
description="Provisioning through Remote Services for Selenium and Appium",
sub_commands=sub_commands,
add_help_option=False
)
remote_cmd = SubCmdOptionParser('remote',
remote_opts,
help='Provisioning through Remote Services for Selenium and Appium',
)
parser = SubCmdsOptionParser(usage=usage, description=dsc, prog="bzt",
sub_commands=(remote_cmd,))
# parser = OptionParserWithAliases(usage=usage, description=dsc, prog="bzt")
parser.add_option('-l', '--log', action='store', default=None,
help="Log file location")
parser.add_option('-o', '--option', action='append',
help="Override option in config")
parser.add_option('-q', '--quiet', action='store_true',
help="Only errors and warnings printed to console")
parser.add_option('-v', '--verbose', action='store_true',
help="Prints all logging messages to console")
parser.add_option('-n', '--no-system-configs', action='store_true',
help="Skip system and user config files")
return parser
def signal_handler(sig, frame):
"""
required for non-tty python runs to interrupt
:param frame:
:param sig:
"""
del sig, frame
raise ManualShutdown()
def main():
"""
This function is used as entrypoint by setuptools
"""
parser = get_option_parser()
parsed_options, parsed_configs, parsed_suboptions, parsed_subargs, parsed_extra_args = parser.parse_args()
if parsed_suboptions: # Remove the warnig from Codacy
pass
from_command = False
if isinstance(parsed_configs, SubCmdOptionParser):
from_command = True
executor = CLI(parsed_options, from_command=from_command)
try:
code = executor.perform(parsed_configs, parsed_subargs, parsed_extra_args)
except BaseException as exc_top:
logging.error("%s: %s", type(exc_top).__name__, exc_top)
logging.debug("Exception: %s", traceback.format_exc())
code = 1
exit(code)
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
main()
```
#### File: bzt/jmx/tools.py
```python
import json
import os
import traceback
from distutils.version import LooseVersion
from bzt import TaurusInternalException, TaurusConfigError
from bzt.engine import Scenario
from bzt.jmx import JMX
from bzt.requests_model import RequestVisitor, has_variable_pattern
from bzt.six import etree, iteritems, numeric_types
from bzt.utils import BetterDict, dehumanize_time, ensure_is_dict, get_host_ips, get_full_path, guess_csv_dialect
class RequestCompiler(RequestVisitor):
def __init__(self, jmx_builder):
super(RequestCompiler, self).__init__()
self.jmx_builder = jmx_builder
def visit_hierarchichttprequest(self, request):
return self.jmx_builder.compile_http_request(request)
def visit_ifblock(self, block):
return self.jmx_builder.compile_if_block(block)
def visit_loopblock(self, block):
return self.jmx_builder.compile_loop_block(block)
def visit_whileblock(self, block):
return self.jmx_builder.compile_while_block(block)
def visit_foreachblock(self, block):
return self.jmx_builder.compile_foreach_block(block)
def visit_transactionblock(self, block):
return self.jmx_builder.compile_transaction_block(block)
def visit_includescenarioblock(self, block):
scenario_name = block.scenario_name
if scenario_name in self.path:
msg = "Mutual recursion detected in include-scenario blocks (scenario %s)"
raise TaurusConfigError(msg % scenario_name)
self.record_path(scenario_name)
return self.jmx_builder.compile_include_scenario_block(block)
def visit_actionblock(self, block):
return self.jmx_builder.compile_action_block(block)
def visit_setvariables(self, block):
return self.jmx_builder.compile_set_variables_block(block)
class AbstractThreadGroup(object):
XPATH = None
RAMP_UP_SEL = None
CONCURRENCY_SEL = None
def __init__(self, element, logger):
self.element = element
self.gtype = self.__class__.__name__
self.log = logger.getChild(self.gtype)
def get_testname(self):
return self.element.get('testname')
def set_concurrency(self, concurrency=None):
self.log.warning('Setting of concurrency for %s not implemented', self.gtype)
def set_ramp_up(self, ramp_up=None):
self.log.warning('Setting of ramp-up for %s not implemented', self.gtype)
def get_duration(self):
"""
task duration or None if getting isn't possible (skipped, timeless, jmeter variables, etc.)
"""
self.log.warning('Getting of duration for %s not implemented', self.gtype)
def get_rate(self, pure=False):
self.log.warning('Getting of rate for %s not implemented', self.gtype)
def get_iterations(self):
"""
iterations number or None if getting isn't possible (skipped, unsupported, jmeter variables, etc.)
Note: ConcurrencyThreadGroup and ArrivalsThreadGroup aren't stopped by iterations limit
"""
self.log.warning('Getting of iterations for %s not implemented', self.gtype)
def get_ramp_up(self, pure=False):
if not self.RAMP_UP_SEL:
self.log.warning('Getting of ramp-up for %s not implemented', self.gtype)
return 1
return self._get_val(self.RAMP_UP_SEL, name='ramp-up', default=0, pure=pure)
def get_concurrency(self, pure=False):
if not self.CONCURRENCY_SEL:
self.log.warning('Getting of concurrency for %s not implemented', self.gtype)
return 1
return self._get_val(self.CONCURRENCY_SEL, name='concurrency', default=1, pure=pure)
def _get_val(self, selector, name='', default=None, convertor=int, pure=False):
element = self.element.find(selector)
if element is None:
string_val = None
else:
string_val = element.text
if pure:
return string_val
try:
return convertor(string_val)
except (ValueError, TypeError):
if default:
msg = "Parsing {param} '{val}' in group '{gtype}' failed, choose {default}"
self.log.warning(msg.format(param=name, val=string_val, gtype=self.gtype, default=default))
return default
def get_on_error(self):
action = self.element.find(".//stringProp[@name='ThreadGroup.on_sample_error']")
if action is not None:
return action.text
class ThreadGroup(AbstractThreadGroup):
XPATH = 'jmeterTestPlan>hashTree>hashTree>ThreadGroup'
CONCURRENCY_SEL = ".//*[@name='ThreadGroup.num_threads']"
def get_duration(self):
sched_sel = ".//*[@name='ThreadGroup.scheduler']"
scheduler = self._get_val(sched_sel, "scheduler", pure=True)
if scheduler == 'true':
duration_sel = ".//*[@name='ThreadGroup.duration']"
return self._get_val(duration_sel, "duration")
elif scheduler == 'false':
ramp_sel = ".//*[@name='ThreadGroup.ramp_time']"
return self._get_val(ramp_sel, "ramp-up")
else:
msg = 'Getting of ramp-up for %s is impossible due to scheduler: %s'
self.log.warning(msg, (self.gtype, scheduler))
def get_iterations(self):
loop_control_sel = ".//*[@name='LoopController.continue_forever']"
loop_controller = self._get_val(loop_control_sel, name="loop controller", pure=True)
if loop_controller == "false":
loop_sel = ".//*[@name='LoopController.loops']"
return self._get_val(loop_sel, name="loops")
else:
msg = 'Getting of ramp-up for %s is impossible due to loop_controller: %s'
self.log.warning(msg, (self.gtype, loop_controller))
class SteppingThreadGroup(AbstractThreadGroup):
XPATH = r'jmeterTestPlan>hashTree>hashTree>kg\.apc\.jmeter\.threads\.SteppingThreadGroup'
CONCURRENCY_SEL = ".//*[@name='ThreadGroup.num_threads']"
class UltimateThreadGroup(AbstractThreadGroup):
XPATH = r'jmeterTestPlan>hashTree>hashTree>kg\.apc\.jmeter\.threads\.UltimateThreadGroup'
# parent of ConcurrencyThreadGroup and ArrivalThreadGroup
class AbstractDynamicThreadGroup(AbstractThreadGroup):
RAMP_UP_SEL = ".//*[@name='RampUp']"
def _get_time_unit(self):
unit_sel = ".//*[@name='Unit']"
return self._get_val(unit_sel, name="unit", pure=True)
def set_ramp_up(self, ramp_up=None):
ramp_up_element = self.element.find(self.RAMP_UP_SEL)
ramp_up_element.text = str(ramp_up)
def get_duration(self):
hold_sel = ".//*[@name='Hold']"
hold = self._get_val(hold_sel, name="hold")
ramp_up = self.get_ramp_up()
# 'empty' means 0 sec, let's detect that
p_hold = self._get_val(hold_sel, name="hold", pure=True)
p_ramp_up = self.get_ramp_up(pure=True)
if hold is None and not p_hold:
hold = 0
if ramp_up is None and not p_ramp_up:
ramp_up = 0
if hold is not None and ramp_up is not None:
result = hold + ramp_up
if self._get_time_unit() == 'M':
result *= 60
return result
def get_iterations(self):
iter_sel = ".//*[@name='Iterations']"
return self._get_val(iter_sel, name="iterations")
class ConcurrencyThreadGroup(AbstractDynamicThreadGroup):
XPATH = r'jmeterTestPlan>hashTree>hashTree>com\.blazemeter\.jmeter\.threads\.concurrency\.ConcurrencyThreadGroup'
CONCURRENCY_SEL = ".//*[@name='TargetLevel']"
def set_concurrency(self, concurrency=None):
concurrency_prop = self.element.find(self.CONCURRENCY_SEL)
concurrency_prop.text = str(concurrency)
class ArrivalsThreadGroup(AbstractDynamicThreadGroup):
XPATH = r'jmeterTestPlan>hashTree>hashTree>com\.blazemeter\.jmeter\.threads\.arrivals\.ArrivalsThreadGroup'
RATE_SEL = ".//*[@name='TargetLevel']"
def get_rate(self, pure=False):
return self._get_val(self.RATE_SEL, name='rate', default=1, pure=pure)
def set_rate(self, rate=None):
rate_prop = self.element.find(self.RATE_SEL)
rate_prop.text = str(rate)
class ThreadGroupHandler(object):
CLASSES = [ThreadGroup, SteppingThreadGroup, UltimateThreadGroup, ConcurrencyThreadGroup, ArrivalsThreadGroup]
def __init__(self, logger):
self.log = logger.getChild(self.__class__.__name__)
def groups(self, jmx):
"""
Get wrappers for thread groups that are enabled
"""
for _class in self.CLASSES:
for group in jmx.get(_class.XPATH):
if group.get("enabled") != "false":
yield _class(group, self.log)
def convert(self, group, target, load, concurrency):
"""
Convert a thread group to ThreadGroup/ConcurrencyThreadGroup for applying of load
"""
msg = "Converting %s (%s) to %s and apply load parameters"
self.log.debug(msg, group.gtype, group.get_testname(), target)
on_error = group.get_on_error()
if target == ThreadGroup.__name__:
new_group_element = JMX.get_thread_group(
concurrency=concurrency,
rampup=load.ramp_up,
hold=load.hold,
iterations=load.iterations,
testname=group.get_testname(),
on_error=on_error)
elif target == ConcurrencyThreadGroup.__name__:
new_group_element = JMX.get_concurrency_thread_group(
concurrency=concurrency,
rampup=load.ramp_up,
hold=load.hold,
steps=load.steps,
testname=group.get_testname(),
on_error=on_error)
else:
self.log.warning('Unsupported preferred thread group: %s', target)
return
group.element.getparent().replace(group.element, new_group_element)
class LoadSettingsProcessor(object):
TG = ThreadGroup.__name__
CTG = ConcurrencyThreadGroup.__name__
def __init__(self, executor):
self.log = executor.log.getChild(self.__class__.__name__)
self.load = executor.get_specific_load()
self.tg = self._detect_thread_group(executor)
self.tg_handler = ThreadGroupHandler(self.log)
def _detect_thread_group(self, executor):
"""
Detect preferred thread group
:param executor:
:return:
"""
tg = self.TG
if not executor.settings.get('force-ctg', True):
return tg
msg = 'Thread group detection: %s, regular ThreadGroup will be used'
if not self.load.duration:
self.log.debug(msg, 'duration not found')
elif self.load.iterations:
self.log.debug(msg, 'iterations are found')
elif not executor.tool:
msg = 'You must set executor tool (%s) for choosing of ConcurrencyThreadGroup'
raise TaurusInternalException(msg % executor.tool_name)
elif not executor.tool.ctg_plugin_installed():
self.log.warning(msg % 'plugin for ConcurrentThreadGroup not found')
else:
tg = self.CTG
return tg
def modify(self, jmx):
if not (self.load.iterations or self.load.concurrency or self.load.duration):
self.log.debug('No iterations/concurrency/duration found, thread group modification is skipped')
return
# IMPORTANT: fix groups order as changing of element type changes order of getting of groups
groups = list(self.tg_handler.groups(jmx))
if self.load.concurrency and not isinstance(self.load.concurrency, numeric_types): # property found
for group in groups:
self.tg_handler.convert(group=group, target=self.tg, load=self.load, concurrency=self.load.concurrency)
else:
target_list = zip(groups, self._get_concurrencies(groups))
for group, concurrency in target_list:
self.tg_handler.convert(group=group, target=self.tg, load=self.load, concurrency=concurrency)
if self.load.throughput:
self._add_shaper(jmx)
if self.load.steps and self.tg == self.TG:
self.log.warning("Stepping ramp-up isn't supported for regular ThreadGroup")
def _get_concurrencies(self, groups):
"""
Collect concurrency values and
calculate target concurrency for every thread group
"""
concurrency_list = []
for group in groups:
concurrency_list.append(group.get_concurrency())
if concurrency_list and self.load.concurrency:
total_old_concurrency = sum(concurrency_list) # t_o_c != 0 because of logic of group.get_concurrency()
for idx, concurrency in enumerate(concurrency_list):
part_of_load = 1.0 * self.load.concurrency * concurrency / total_old_concurrency
if part_of_load < 1:
concurrency_list[idx] = 1
else:
concurrency_list[idx] = int(round(part_of_load))
total_new_concurrency = sum(concurrency_list)
leftover = self.load.concurrency - total_new_concurrency
if leftover < 0:
msg = "Had to add %s more threads to maintain thread group proportion"
self.log.warning(msg, -leftover)
elif leftover > 0:
msg = "%s threads left undistributed due to thread group proportion"
self.log.warning(msg, leftover)
return concurrency_list
def _add_shaper(self, jmx):
"""
Add shaper
:param jmx: JMX
:return:
"""
if not self.load.duration:
self.log.warning("You must set 'ramp-up' and/or 'hold-for' when using 'throughput' option")
return
etree_shaper = jmx.get_rps_shaper()
if self.load.ramp_up:
jmx.add_rps_shaper_schedule(etree_shaper, 1, self.load.throughput, self.load.ramp_up)
if self.load.hold:
jmx.add_rps_shaper_schedule(etree_shaper, self.load.throughput, self.load.throughput, self.load.hold)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree_shaper)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
class JMeterScenarioBuilder(JMX):
"""
Helper to build JMeter test plan from Scenario
:param executor: ScenarioExecutor
:param original: inherited from JMX
"""
def __init__(self, executor, original=None):
super(JMeterScenarioBuilder, self).__init__(original)
self.executor = executor
self.scenario = executor.get_scenario()
self.engine = executor.engine
self.system_props = BetterDict()
self.request_compiler = None
def __gen_managers(self, scenario):
elements = []
if scenario.get("store-cache", True):
elements.append(self._get_cache_mgr())
elements.append(etree.Element("hashTree"))
if scenario.get("store-cookie", True):
elements.append(self._get_cookie_mgr(scenario))
elements.append(etree.Element("hashTree"))
if scenario.get("use-dns-cache-mgr", True):
elements.append(self.get_dns_cache_mgr())
elements.append(etree.Element("hashTree"))
self.system_props.merge({"system-properties": {"sun.net.inetaddr.ttl": 0}})
return elements
@staticmethod
def smart_time(any_time): # FIXME: bad name for the function, does not reflect what it does
try:
smart_time = int(1000 * dehumanize_time(any_time))
except TaurusInternalException:
smart_time = any_time
return smart_time
def __gen_defaults(self, scenario):
default_address = scenario.get("default-address", None)
retrieve_resources = scenario.get("retrieve-resources", True)
resources_regex = scenario.get("retrieve-resources-regex", None)
concurrent_pool_size = scenario.get("concurrent-pool-size", 4)
content_encoding = scenario.get("content-encoding", None)
timeout = scenario.get("timeout", None)
timeout = self.smart_time(timeout)
elements = [self._get_http_defaults(default_address, timeout, retrieve_resources,
concurrent_pool_size, content_encoding, resources_regex),
etree.Element("hashTree")]
return elements
def __add_think_time(self, children, req):
think_time = req.priority_option('think-time')
if think_time is not None:
children.append(JMX._get_constant_timer(self.smart_time(think_time)))
children.append(etree.Element("hashTree"))
def __add_extractors(self, children, req):
self.__add_regexp_ext(children, req)
self.__add_json_ext(children, req)
self.__add_jquery_ext(children, req)
self.__add_xpath_ext(children, req)
def __add_regexp_ext(self, children, req):
extractors = req.config.get("extract-regexp", BetterDict())
for varname in extractors:
cfg = ensure_is_dict(extractors, varname, "regexp")
extractor = JMX._get_extractor(varname, cfg.get('subject', 'body'), cfg['regexp'], cfg.get('template', 1),
cfg.get('match-no', 1), cfg.get('default', 'NOT_FOUND'))
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_json_ext(self, children, req):
jextractors = req.config.get("extract-jsonpath", BetterDict())
for varname in jextractors:
cfg = ensure_is_dict(jextractors, varname, "jsonpath")
if LooseVersion(str(self.executor.settings["version"])) < LooseVersion("3.0"):
extractor = JMX._get_json_extractor(varname,
cfg["jsonpath"],
cfg.get("default", "NOT_FOUND"),
cfg.get("from-variable", None))
else:
extractor = JMX._get_internal_json_extractor(varname,
cfg["jsonpath"],
cfg.get("default", "NOT_FOUND"),
cfg.get("scope", None),
cfg.get("from-variable", None),
cfg.get("match-no", "-1"),
cfg.get("concat", False))
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_jquery_ext(self, children, req):
css_jquery_extors = req.config.get("extract-css-jquery", BetterDict())
for varname in css_jquery_extors:
cfg = ensure_is_dict(css_jquery_extors, varname, "expression")
extractor = self._get_jquerycss_extractor(varname, cfg['expression'], cfg.get('attribute', ""),
cfg.get('match-no', 0), cfg.get('default', 'NOT_FOUND'))
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_xpath_ext(self, children, req):
xpath_extractors = req.config.get("extract-xpath", BetterDict())
for varname in xpath_extractors:
cfg = ensure_is_dict(xpath_extractors, varname, "xpath")
children.append(JMX._get_xpath_extractor(varname,
cfg['xpath'],
cfg.get('default', 'NOT_FOUND'),
cfg.get('validate-xml', False),
cfg.get('ignore-whitespace', True),
cfg.get('use-tolerant-parser', False)))
children.append(etree.Element("hashTree"))
@staticmethod
def __add_assertions(children, req):
assertions = req.config.get("assert", [])
for idx, assertion in enumerate(assertions):
assertion = ensure_is_dict(assertions, idx, "contains")
if not isinstance(assertion['contains'], list):
assertion['contains'] = [assertion['contains']]
children.append(JMX._get_resp_assertion(assertion.get("subject", Scenario.FIELD_BODY),
assertion['contains'],
assertion.get('regexp', True),
assertion.get('not', False),
assertion.get('assume-success', False)))
children.append(etree.Element("hashTree"))
jpath_assertions = req.config.get("assert-jsonpath", [])
for idx, assertion in enumerate(jpath_assertions):
assertion = ensure_is_dict(jpath_assertions, idx, "jsonpath")
exc = TaurusConfigError('JSON Path not found in assertion: %s' % assertion)
component = JMX._get_json_path_assertion(assertion.get('jsonpath', exc),
assertion.get('expected-value', ''),
assertion.get('validate', False),
assertion.get('expect-null', False),
assertion.get('invert', False),
assertion.get('regexp', True))
children.append(component)
children.append(etree.Element("hashTree"))
xpath_assertions = req.config.get("assert-xpath", [])
for idx, assertion in enumerate(xpath_assertions):
assertion = ensure_is_dict(xpath_assertions, idx, "xpath")
exc = TaurusConfigError('XPath not found in assertion: %s' % assertion)
component = JMX._get_xpath_assertion(assertion.get('xpath', exc),
assertion.get('validate-xml', False),
assertion.get('ignore-whitespace', True),
assertion.get('use-tolerant-parser', False),
assertion.get('invert', False))
children.append(component)
children.append(etree.Element("hashTree"))
@staticmethod
def __add_jsr_elements(children, req):
"""
:type children: etree.Element
:type req: Request
"""
jsrs = req.config.get("jsr223", [])
if not isinstance(jsrs, list):
jsrs = [jsrs]
for idx, _ in enumerate(jsrs):
jsr = ensure_is_dict(jsrs, idx, default_key='script-text')
lang = jsr.get("language", "groovy")
script_file = jsr.get("script-file", None)
script_text = jsr.get("script-text", None)
if not script_file and not script_text:
raise TaurusConfigError("jsr223 element must specify one of 'script-file' or 'script-text'")
parameters = jsr.get("parameters", "")
execute = jsr.get("execute", "after")
children.append(JMX._get_jsr223_element(lang, script_file, parameters, execute, script_text))
children.append(etree.Element("hashTree"))
def _get_merged_ci_headers(self, req, header):
def dic_lower(dic):
return {k.lower(): dic[k].lower() for k in dic}
ci_scenario_headers = dic_lower(self.scenario.get_headers())
ci_request_headers = dic_lower(req.headers)
headers = BetterDict()
headers.merge(ci_scenario_headers)
headers.merge(ci_request_headers)
if header.lower() in headers:
return headers[header]
else:
return None
def __gen_requests(self, scenario):
requests = scenario.get_requests()
elements = []
for compiled in self.compile_requests(requests):
elements.extend(compiled)
return elements
def compile_scenario(self, scenario):
elements = []
elements.extend(self.__gen_managers(scenario))
elements.extend(self.__gen_defaults(scenario))
elements.extend(self.__gen_datasources(scenario))
elements.extend(self.__gen_requests(scenario))
return elements
def compile_http_request(self, request):
"""
:type request: HierarchicHTTPRequest
:return:
"""
timeout = request.priority_option('timeout')
if timeout is not None:
timeout = self.smart_time(timeout)
content_type = self._get_merged_ci_headers(request, 'content-type')
if content_type == 'application/json' and isinstance(request.body, (dict, list)):
body = json.dumps(request.body)
else:
body = request.body
use_random_host_ip = request.priority_option('random-source-ip', default=False)
host_ips = get_host_ips(filter_loopbacks=True) if use_random_host_ip else []
http = JMX._get_http_request(request.url, request.label, request.method, timeout, body,
request.priority_option('keepalive', default=True),
request.upload_files, request.content_encoding,
request.priority_option('follow-redirects', default=True),
use_random_host_ip, host_ips)
children = etree.Element("hashTree")
if request.headers:
children.append(JMX._get_header_mgr(request.headers))
children.append(etree.Element("hashTree"))
self.__add_think_time(children, request)
self.__add_assertions(children, request)
if timeout is not None:
children.append(JMX._get_dur_assertion(timeout))
children.append(etree.Element("hashTree"))
self.__add_extractors(children, request)
self.__add_jsr_elements(children, request)
return [http, children]
def compile_if_block(self, block):
elements = []
# TODO: pass jmeter IfController options
if_controller = JMX._get_if_controller(block.condition)
then_children = etree.Element("hashTree")
for compiled in self.compile_requests(block.then_clause):
for element in compiled:
then_children.append(element)
elements.extend([if_controller, then_children])
if block.else_clause:
inverted_condition = "!(" + block.condition + ")"
else_controller = JMX._get_if_controller(inverted_condition)
else_children = etree.Element("hashTree")
for compiled in self.compile_requests(block.else_clause):
for element in compiled:
else_children.append(element)
elements.extend([else_controller, else_children])
return elements
def compile_loop_block(self, block):
elements = []
loop_controller = JMX._get_loop_controller(block.loops)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([loop_controller, children])
return elements
def compile_while_block(self, block):
elements = []
controller = JMX._get_while_controller(block.condition)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_foreach_block(self, block):
"""
:type block: ForEachBlock
"""
elements = []
controller = JMX._get_foreach_controller(block.input_var, block.loop_var)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_transaction_block(self, block):
elements = []
controller = JMX._get_transaction_controller(block.name, block.priority_option('force-parent-sample', True))
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_include_scenario_block(self, block):
elements = []
controller = JMX._get_simple_controller(block.scenario_name)
children = etree.Element("hashTree")
scenario = self.executor.get_scenario(name=block.scenario_name)
for element in self.compile_scenario(scenario):
children.append(element)
elements.extend([controller, children])
return elements
def compile_action_block(self, block):
"""
:type block: ActionBlock
:return:
"""
actions = {
'stop': 0,
'pause': 1,
'stop-now': 2,
'continue': 3,
}
targets = {'current-thread': 0, 'all-threads': 2}
action = actions[block.action]
target = targets[block.target]
duration = 0
if block.duration is not None:
duration = int(block.duration * 1000)
test_action = JMX._get_action_block(action, target, duration)
children = etree.Element("hashTree")
self.__add_jsr_elements(children, block)
return [test_action, children]
def compile_set_variables_block(self, block):
# pause current thread for 0s
test_action = JMX._get_action_block(action_index=1, target_index=0, duration_ms=0)
children = etree.Element("hashTree")
fmt = "vars.put('%s', %r);"
block.config["jsr223"] = [{
"language": "groovy",
"execute": "before",
"script-text": "\n".join(fmt % (var, expr) for var, expr in iteritems(block.mapping))
}]
self.__add_jsr_elements(children, block)
return [test_action, children]
def compile_requests(self, requests):
if self.request_compiler is None:
self.request_compiler = RequestCompiler(self)
compiled = []
for request in requests:
compiled.append(self.request_compiler.visit(request))
self.request_compiler.clear_path_cache()
return compiled
def __generate(self):
"""
Generate the test plan
"""
thread_group = JMX.get_thread_group(testname=self.executor.label)
thread_group_ht = etree.Element("hashTree", type="tg")
# NOTE: set realistic dns-cache and JVM prop by default?
self.request_compiler = RequestCompiler(self)
for element in self.compile_scenario(self.scenario):
thread_group_ht.append(element)
results_tree = self._get_results_tree()
results_tree_ht = etree.Element("hashTree")
self.append(self.TEST_PLAN_SEL, thread_group)
self.append(self.TEST_PLAN_SEL, thread_group_ht)
self.append(self.TEST_PLAN_SEL, results_tree)
self.append(self.TEST_PLAN_SEL, results_tree_ht)
def save(self, filename):
"""
Generate test plan and save
:type filename: str
"""
# NOTE: bad design, as repetitive save will duplicate stuff
self.__generate()
super(JMeterScenarioBuilder, self).save(filename)
def __gen_datasources(self, scenario):
sources = scenario.get("data-sources", [])
if not sources:
return []
if not isinstance(sources, list):
raise TaurusConfigError("data-sources '%s' is not a list" % sources)
elements = []
for idx, source in enumerate(sources):
source = ensure_is_dict(sources, idx, "path")
source_path = source["path"]
delimiter = source.get('delimiter', None)
if has_variable_pattern(source_path):
msg = "Path to CSV contains JMeter variable/function, can't check for file existence: %s"
self.log.warning(msg, source_path)
if not delimiter:
delimiter = ','
self.log.warning("Can't detect CSV dialect, default delimiter will be '%s'", delimiter)
else:
modified_path = self.executor.engine.find_file(source_path)
if not os.path.isfile(modified_path):
raise TaurusConfigError("data-sources path not found: %s" % modified_path)
if not delimiter:
delimiter = self.__guess_delimiter(modified_path)
source_path = get_full_path(modified_path)
config = JMX._get_csv_config(source_path, delimiter, source.get("quoted", False), source.get("loop", True),
source.get("variable-names", ""))
elements.append(config)
elements.append(etree.Element("hashTree"))
return elements
def __guess_delimiter(self, path):
with open(path) as fhd:
header = fhd.read(4096) # 4KB is enough for header
try:
delimiter = guess_csv_dialect(header).delimiter
except BaseException as exc:
self.log.debug(traceback.format_exc())
self.log.warning('CSV dialect detection failed (%s), default delimiter selected (",")', exc)
delimiter = "," # default value
return delimiter
```
#### File: bzt/modules/java.py
```python
import json
import os
import shutil
import subprocess
import time
from os import listdir
from os.path import join
from bzt import ToolError, TaurusConfigError
from bzt.engine import HavingInstallableTools, Scenario
from bzt.modules import SubprocessedExecutor
from bzt.utils import get_full_path, shell_exec, TclLibrary, JavaVM, RequiredTool, MirrorsManager
SELENIUM_DOWNLOAD_LINK = "http://selenium-release.storage.googleapis.com/3.6/" \
"selenium-server-standalone-3.6.0.jar"
SELENIUM_VERSION = "3.6" # FIXME: unused, remove it
JUNIT_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=junit/junit/" \
"{version}/junit-{version}.jar"
JUNIT_VERSION = "4.12"
JUNIT_MIRRORS_SOURCE = "http://search.maven.org/solrsearch/select?q=g%3A%22junit%22%20AND%20a%3A%22" \
"junit%22%20AND%20v%3A%22{version}%22&rows=20&wt=json".format(version=JUNIT_VERSION)
TESTNG_VERSION = "6.8.5"
TESTNG_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/testng/testng/" \
"{version}/testng-{version}.jar".format(version=TESTNG_VERSION)
HAMCREST_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core" \
"/1.3/hamcrest-core-1.3.jar"
JSON_JAR_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/json/json/20160810/json-20160810.jar"
class JavaTestRunner(SubprocessedExecutor, HavingInstallableTools):
"""
Allows to test java and jar files
:type script: str
"""
def __init__(self):
super(JavaTestRunner, self).__init__()
self.working_dir = os.getcwd()
self.target_java = "1.8"
self.props_file = None
self.base_class_path = []
def path_lambda(self, x):
return os.path.abspath(self.engine.find_file(x))
def install_required_tools(self):
self.hamcrest_path = self.path_lambda(self.settings.get("hamcrest-core",
"~/.bzt/selenium-taurus/tools/junit/hamcrest-core.jar"))
self.json_jar_path = self.path_lambda(
self.settings.get("json-jar", "~/.bzt/selenium-taurus/tools/junit/json.jar"))
self.selenium_server_jar_path = self.path_lambda(self.settings.get("selenium-server",
"~/.bzt/selenium-taurus/selenium-server.jar"))
def prepare(self):
"""
make jar.
"""
self.script = self.get_scenario().get(Scenario.SCRIPT,
TaurusConfigError("Script not passed to runner %s" % self))
self.script = self.engine.find_file(self.script)
self.install_required_tools()
self.working_dir = self.engine.create_artifact(self.settings.get("working-dir", "classes"), "")
self.target_java = str(self.settings.get("compile-target-java", self.target_java))
self.base_class_path.extend(self.settings.get("additional-classpath", []))
self.base_class_path.extend(self.get_scenario().get("additional-classpath", []))
self.base_class_path.extend([self.hamcrest_path, self.json_jar_path, self.selenium_server_jar_path])
self.props_file = self.engine.create_artifact("runner", ".properties")
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
self.reporting_setup(suffix=".ldjson")
def resource_files(self):
resources = super(JavaTestRunner, self).resource_files()
resources.extend(self.get_scenario().get("additional-classpath", []))
global_additional_classpath = self.settings.get("additional-classpath", [])
execution_files = self.execution.get('files', []) # later we need to fix path for sending into cloud
execution_files.extend(global_additional_classpath)
return resources
def _collect_script_files(self, extensions):
file_list = []
if self.script is not None and os.path.isdir(self.script):
for root, _, files in os.walk(self.script):
for test_file in files:
if os.path.splitext(test_file)[1].lower() in extensions:
path = get_full_path(join(root, test_file))
file_list.append(path)
else:
if os.path.splitext(self.script)[1].lower() in extensions:
file_list.append(get_full_path(self.script))
return file_list
def compile_scripts(self):
"""
Compile .java files
"""
self.log.debug("Compiling .java files started")
jar_path = join(self.engine.artifacts_dir, self.working_dir, self.settings.get("jar-name", "compiled.jar"))
if os.path.exists(jar_path):
self.log.debug(".java files are already compiled, skipping")
return
compile_cl = ["javac",
"-source", self.target_java,
"-target", self.target_java,
"-d", self.working_dir,
]
compile_cl.extend(["-cp", os.pathsep.join(self.base_class_path)])
compile_cl.extend(self._collect_script_files({".java"}))
with open(self.engine.create_artifact("javac", ".out"), 'ab') as javac_out:
with open(self.engine.create_artifact("javac", ".err"), 'ab') as javac_err:
self.log.debug("running javac: %s", compile_cl)
self.process = shell_exec(compile_cl, stdout=javac_out, stderr=javac_err)
ret_code = self.process.poll()
while ret_code is None:
self.log.debug("Compiling .java files...")
time.sleep(1)
ret_code = self.process.poll()
if ret_code != 0:
self.log.debug("javac exit code: %s", ret_code)
with open(javac_err.name) as err_file:
out = err_file.read()
raise ToolError("Javac exited with code: %s\n %s" % (ret_code, out.strip()))
self.log.info("Compiling .java files completed")
self.make_jar()
def make_jar(self):
"""
move all .class files to compiled.jar
"""
self.log.debug("Making .jar started")
with open(join(self.engine.artifacts_dir, "jar.out"), 'ab') as jar_out:
with open(join(self.engine.artifacts_dir, "jar.err"), 'ab') as jar_err:
class_files = [java_file for java_file in listdir(self.working_dir) if java_file.endswith(".class")]
jar_name = self.settings.get("jar-name", "compiled.jar")
if class_files:
compile_jar_cl = ["jar", "-cf", jar_name]
compile_jar_cl.extend(class_files)
else:
compile_jar_cl = ["jar", "-cf", jar_name, "."]
self.process = shell_exec(compile_jar_cl, cwd=self.working_dir, stdout=jar_out, stderr=jar_err)
ret_code = self.process.poll()
while ret_code is None:
self.log.debug("Making jar file...")
time.sleep(1)
ret_code = self.process.poll()
if ret_code != 0:
with open(jar_err.name) as err_file:
out = err_file.read()
raise ToolError("Jar exited with code %s\n%s" % (ret_code, out.strip()))
self.log.info("Making .jar file completed")
class JUnitTester(JavaTestRunner, HavingInstallableTools):
"""
Allows to test java and jar files
"""
def __init__(self):
super(JUnitTester, self).__init__()
self.junit_path = None
self.junit_listener_path = None
def prepare(self):
super(JUnitTester, self).prepare()
self.install_required_tools()
self.base_class_path += [self.junit_path, self.junit_listener_path]
self.base_class_path = [self.path_lambda(x) for x in self.base_class_path]
if any(self._collect_script_files({'.java'})):
self.compile_scripts()
def install_required_tools(self):
super(JUnitTester, self).install_required_tools()
self.junit_path = self.path_lambda(self.settings.get("path", "~/.bzt/selenium-taurus/tools/junit/junit.jar"))
self.junit_listener_path = join(get_full_path(__file__, step_up=2), "resources", "taurus-junit-1.0.jar")
tools = []
# only check javac if we need to compile. if we have JAR as script - we don't need javac
if self.script and any(self._collect_script_files({'.java'})):
tools.append(JavaC(self.log))
tools.append(TclLibrary(self.log))
tools.append(JavaVM(self.log))
link = SELENIUM_DOWNLOAD_LINK.format(version=SELENIUM_VERSION)
tools.append(SeleniumServerJar(self.selenium_server_jar_path, link, self.log))
tools.append(JUnitJar(self.junit_path, self.log, JUNIT_VERSION))
tools.append(HamcrestJar(self.hamcrest_path, HAMCREST_DOWNLOAD_LINK))
tools.append(JsonJar(self.json_jar_path, JSON_JAR_DOWNLOAD_LINK))
tools.append(JUnitListenerJar(self.junit_listener_path, ""))
self._check_tools(tools)
def startup(self):
# java -cp junit.jar:selenium-test-small.jar:
# selenium-2.46.0/selenium-java-2.46.0.jar:./../selenium-server.jar
# taurusjunit.CustomRunner runner.properties
jar_list = [join(self.working_dir, jar) for jar in listdir(self.working_dir) if jar.endswith(".jar")]
jar_list.extend(self._collect_script_files({".jar"}))
self.base_class_path.extend(jar_list)
with open(self.props_file, 'wt') as props:
props.write("report_file=%s\n" % self.report_file)
load = self.get_load()
if load.iterations:
props.write("iterations=%s\n" % load.iterations)
if load.hold:
props.write("hold_for=%s\n" % load.hold)
for index, item in enumerate(jar_list):
props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/')))
class_path = os.pathsep.join(self.base_class_path)
junit_cmd_line = ["java", "-cp", class_path, "-Djna.nosys=true", "taurusjunit.CustomRunner", self.props_file]
self._start_subprocess(junit_cmd_line)
class TestNGTester(JavaTestRunner, HavingInstallableTools):
"""
Allows to test java and jar files with TestNG
"""
__test__ = False # Hello, nosetests discovery mechanism
def __init__(self):
super(TestNGTester, self).__init__()
self.testng_path = None
self.testng_plugin_path = None
def prepare(self):
super(TestNGTester, self).prepare()
self.install_required_tools()
self.base_class_path += [self.testng_path, self.testng_plugin_path]
if any(self._collect_script_files({'.java'})):
self.compile_scripts()
def detected_testng_xml(self):
script_path = self.get_script_path()
if script_path and self.settings.get("autodetect-xml", True):
script_dir = get_full_path(script_path, step_up=1)
testng_xml = os.path.join(script_dir, 'testng.xml')
if os.path.exists(testng_xml):
return testng_xml
return None
def resource_files(self):
resources = super(TestNGTester, self).resource_files()
testng_xml = self.execution.get('testng-xml', None)
if not testng_xml:
testng_xml = self.detected_testng_xml()
if testng_xml:
self.log.info("Detected testng.xml file at %s", testng_xml)
self.execution['testng-xml'] = testng_xml
if testng_xml:
resources.append(testng_xml)
return resources
def install_required_tools(self):
super(TestNGTester, self).install_required_tools()
self.testng_path = self.path_lambda(self.settings.get("path", "~/.bzt/selenium-taurus/tools/testng/testng.jar"))
self.testng_plugin_path = join(get_full_path(__file__, step_up=2), "resources", "taurus-testng-1.0.jar")
tools = []
if self.script and any(self._collect_script_files({'.java'})):
tools.append(JavaC(self.log))
tools.append(TclLibrary(self.log))
tools.append(JavaVM(self.log))
link = SELENIUM_DOWNLOAD_LINK.format(version=SELENIUM_VERSION)
tools.append(SeleniumServerJar(self.selenium_server_jar_path, link, self.log))
tools.append(TestNGJar(self.testng_path, TESTNG_DOWNLOAD_LINK))
tools.append(HamcrestJar(self.hamcrest_path, HAMCREST_DOWNLOAD_LINK))
tools.append(JsonJar(self.json_jar_path, JSON_JAR_DOWNLOAD_LINK))
tools.append(TestNGPluginJar(self.testng_plugin_path, ""))
self._check_tools(tools)
def startup(self):
# java -classpath
# testng.jar:selenium-server.jar:taurus-testng-1.0.jar:json.jar:compiled.jar
# taurustestng.TestNGRunner runner.properties
jar_list = [join(self.working_dir, jar) for jar in listdir(self.working_dir) if jar.endswith(".jar")]
jar_list.extend(self._collect_script_files({".jar"}))
self.base_class_path.extend(jar_list)
with open(self.props_file, 'wt') as props:
props.write("report_file=%s\n" % self.report_file)
load = self.get_load()
if load.iterations:
props.write("iterations=%s\n" % load.iterations)
if load.hold:
props.write("hold_for=%s\n" % load.hold)
for index, item in enumerate(jar_list):
props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/')))
testng_xml = self.execution.get('testng-xml', None) or self.detected_testng_xml()
if testng_xml:
props.write('testng_config=%s\n' % testng_xml.replace(os.path.sep, '/'))
cmdline = ["java", "-cp", os.pathsep.join(self.base_class_path), "taurustestng.TestNGRunner", self.props_file]
self._start_subprocess(cmdline)
class TestNGJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(TestNGJar, self).__init__("TestNG", tool_path, download_link)
class HamcrestJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(HamcrestJar, self).__init__("HamcrestJar", tool_path, download_link)
class JsonJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(JsonJar, self).__init__("JsonJar", tool_path, download_link)
class JavaC(RequiredTool):
def __init__(self, parent_logger, tool_path='javac', download_link=''):
super(JavaC, self).__init__("JavaC", tool_path, download_link)
self.log = parent_logger.getChild(self.__class__.__name__)
def check_if_installed(self):
try:
output = subprocess.check_output([self.tool_path, '-version'], stderr=subprocess.STDOUT)
self.log.debug("%s output: %s", self.tool_name, output)
return True
except (subprocess.CalledProcessError, OSError):
return False
def install(self):
raise ToolError("The %s is not operable or not available. Consider installing it" % self.tool_name)
class SeleniumServerJar(RequiredTool):
def __init__(self, tool_path, download_link, parent_logger):
super(SeleniumServerJar, self).__init__("Selenium server", tool_path, download_link)
self.log = parent_logger.getChild(self.__class__.__name__)
def check_if_installed(self):
self.log.debug("%s path: %s", self.tool_name, self.tool_path)
selenium_launch_command = ["java", "-jar", self.tool_path, "-help"]
selenium_subproc = shell_exec(selenium_launch_command, stderr=subprocess.STDOUT)
output = selenium_subproc.communicate()
self.log.debug("%s output: %s", self.tool_name, output)
if selenium_subproc.returncode == 0:
self.already_installed = True
return True
else:
return False
class JUnitJar(RequiredTool):
def __init__(self, tool_path, parent_logger, junit_version):
super(JUnitJar, self).__init__("JUnit", tool_path)
self.log = parent_logger.getChild(self.__class__.__name__)
self.version = junit_version
self.mirror_manager = JUnitMirrorsManager(self.log, self.version)
def install(self):
dest = get_full_path(self.tool_path, step_up=1)
self.log.info("Will install %s into %s", self.tool_name, dest)
junit_dist = self._download(suffix=".jar")
if not os.path.exists(dest):
os.makedirs(dest)
shutil.move(junit_dist, self.tool_path)
self.log.info("Installed JUnit successfully")
if not self.check_if_installed():
raise ToolError("Unable to run %s after installation!" % self.tool_name)
class JUnitListenerJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(JUnitListenerJar, self).__init__("JUnitListener", tool_path, download_link)
def install(self):
raise ToolError("Automatic installation of JUnitListener isn't implemented")
class TestNGPluginJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(TestNGPluginJar, self).__init__("TestNGPlugin", tool_path, download_link)
def install(self):
raise ToolError("TestNG plugin should be bundled with Taurus distribution")
class JUnitMirrorsManager(MirrorsManager):
def __init__(self, parent_logger, junit_version):
self.junit_version = junit_version
super(JUnitMirrorsManager, self).__init__(JUNIT_MIRRORS_SOURCE, parent_logger)
def _parse_mirrors(self):
links = []
if self.page_source is not None:
self.log.debug('Parsing mirrors...')
try:
resp = json.loads(self.page_source)
objects = resp.get("response", {}).get("docs", [])
if objects:
obj = objects[0]
group = obj.get("g")
artifact = obj.get("a")
version = obj.get("v")
ext = obj.get("p")
link_template = "http://search.maven.org/remotecontent?filepath={group}/{artifact}/" \
"{version}/{artifact}-{version}.{ext}"
link = link_template.format(group=group, artifact=artifact, version=version, ext=ext)
links.append(link)
except BaseException as exc:
self.log.error("Error while parsing mirrors %s", exc)
default_link = JUNIT_DOWNLOAD_LINK.format(version=self.junit_version)
if default_link not in links:
links.append(default_link)
self.log.debug('Total mirrors: %d', len(links))
return links
```
#### File: bzt/six/py2.py
```python
import cookielib
import types
import traceback
import urllib
import urllib2
import urlparse
import operator
import ConfigParser
import UserDict
import StringIO
import BaseHTTPServer
import SocketServer as socketserver
string_types = basestring,
integer_types = (int, long)
numeric_types = (int, long, float, complex)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
file_type = file
configparser = ConfigParser
UserDict = UserDict.UserDict
BytesIO = StringIO.StringIO
StringIO = StringIO.StringIO
cookielib=cookielib
parse = urlparse
request = urllib
urlopen = urllib2.urlopen
urlencode = urllib.urlencode
build_opener = urllib2.build_opener
install_opener = urllib2.install_opener
ProxyHandler = urllib2.ProxyHandler
Request = urllib2.Request
HTTPError = urllib2.HTTPError
URLError = urllib2.URLError
BaseHTTPServer = BaseHTTPServer
socketserver = socketserver
SimpleHTTPRequestHandler = BaseHTTPServer.BaseHTTPRequestHandler
viewvalues = operator.methodcaller("viewvalues")
r_input = raw_input
def iteritems(dictionary, **kw):
return iter(dictionary.iteritems(**kw))
def b(string):
return string
def u(string):
if not isinstance(string, text_type):
return unicode(string.replace('\\', '\\\\'), "unicode_escape")
else:
return string
def get_stacktrace(exc):
return traceback.format_exc(exc).rstrip()
def reraise(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
def stream_decode(string):
return string
def unicode_decode(string):
if not isinstance(string, text_type):
return string.decode('utf-8')
else:
return string
def communicate(proc):
return proc.communicate()
```
#### File: modules/jmeter/__init__.py
```python
import logging
from tests.mocks import EngineEmul
from bzt.modules.jmeter import JMeter, JMeterExecutor
from bzt.utils import get_full_path
class MockJMeter(JMeter):
def __init__(self, has_ctg=None, reaction=None):
jmeter_version = JMeterExecutor.JMETER_VER
jmeter_path = "~/.bzt/jmeter-taurus/{version}/"
jmeter_path = get_full_path(jmeter_path)
super(MockJMeter, self).__init__(tool_path=jmeter_path, parent_logger=logging.getLogger(''),
jmeter_version=jmeter_version, jmeter_download_link=None, plugins=[], proxy={})
self.has_ctg = has_ctg
self.reaction = reaction if reaction else []
def ctg_plugin_installed(self):
return self.has_ctg
def _pmgr_call(self, params):
# replaces real pmgr call
reaction = self.reaction.pop(0)
if 'raise' in reaction:
raise reaction['raise']
return reaction['output']
class MockJMeterExecutor(JMeterExecutor):
def __init__(self, load=None, settings=None, has_ctg=None):
super(MockJMeterExecutor, self).__init__()
self.mock_install = True
self.version = None
if load is None: load = {}
if settings is None: settings = {}
if has_ctg is None: has_ctg = True
self.engine = EngineEmul()
self.env = self.engine.env
self.execution.merge(load)
self.settings.merge({"detect-plugins": False})
self.settings.merge(settings)
self.tool = MockJMeter(has_ctg)
def install_required_tools(self):
if self.mock_install:
self.version = self.settings.get('version')
self.tool = MockJMeter()
else:
super(MockJMeterExecutor, self).install_required_tools()
```
#### File: modules/selenium/test_python.py
```python
import json
import os
import time
from bzt import TaurusConfigError
from bzt.engine import ScenarioExecutor
from bzt.modules.functional import FuncSamplesReader, LoadSamplesReader, FunctionalAggregator
from bzt.modules.python import ApiritifNoseExecutor, PyTestExecutor, RobotExecutor, ApiritifLoadReader, \
ApiritifFuncReader
from tests import BZTestCase, RESOURCES_DIR
from tests.mocks import EngineEmul
from tests.modules.selenium import SeleniumTestCase
class TestSeleniumNoseRunner(SeleniumTestCase):
def test_selenium_prepare_python_single(self):
"""
Check if script exists in working dir
:return:
"""
self.obj.execution.merge({"scenario": {
"script": RESOURCES_DIR + "selenium/python/test_blazemeter_fail.py"
}})
self.obj.prepare()
def test_selenium_prepare_python_folder(self):
"""
Check if scripts exist in working dir
:return:
"""
self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "selenium/python/"}})
self.obj.prepare()
def test_selenium_startup_shutdown_python_single(self):
"""
run tests from .py file
:return:
"""
self.configure({
'execution': {
"iterations": 1,
'scenario': {'script': RESOURCES_DIR + 'selenium/python/'},
'executor': 'selenium'
},
'reporting': [{'module': 'junit-xml'}]
})
self.obj.execution.merge({"scenario": {
"script": RESOURCES_DIR + "selenium/python/test_blazemeter_fail.py"
}})
self.obj.prepare()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
self.obj.shutdown()
self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, "apiritif-0.csv")))
def test_selenium_startup_shutdown_python_folder(self):
"""
run tests from .py files
:return:
"""
self.configure({
'execution': {
'iterations': 1,
'scenario': {'script': RESOURCES_DIR + 'selenium/python/'},
'executor': 'selenium'
},
'reporting': [{'module': 'junit-xml'}]
})
self.obj.prepare()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
self.obj.shutdown()
self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, "apiritif-0.csv")))
def test_runner_fail_no_test_found(self):
"""
Check that Python Nose runner fails if no tests were found
:return:
"""
self.configure({
ScenarioExecutor.EXEC: {
"iterations": 1,
"executor": "selenium",
"scenario": {"script": RESOURCES_DIR + "selenium/invalid/dummy.py"}
}
})
self.obj.prepare()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
self.obj.shutdown()
diagnostics = "\n".join(self.obj.get_error_diagnostics())
self.assertIn("Nothing to test.", diagnostics)
def test_resource_files_collection_remote_nose(self):
self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "selenium/python/"}})
self.assertEqual(len(self.obj.resource_files()), 1)
def test_setup_exception(self):
"""
Do not crash when test's setUp/setUpClass fails
:return:
"""
self.obj.execution.merge({"scenario": {
"script": RESOURCES_DIR + "selenium/python/test_setup_exception.py"
}})
self.obj.engine.aggregator = FunctionalAggregator()
self.obj.prepare()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
diagnostics = "\n".join(self.obj.get_error_diagnostics())
self.assertIn("Nothing to test", diagnostics)
def test_long_iterations_value(self):
self.obj.execution.merge({
"iterations": 2 ** 64,
"scenario": {
"requests": [
"http://blazedemo.com/",
],
}
})
self.obj.prepare()
try:
self.obj.startup()
for _ in range(3):
self.assertFalse(self.obj.check())
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
class TestNoseRunner(BZTestCase):
def setUp(self):
super(TestNoseRunner, self).setUp()
self.obj = ApiritifNoseExecutor()
self.obj.engine = EngineEmul()
self.obj.env = self.obj.engine.env
def configure(self, config):
self.obj.engine.config.merge(config)
self.obj.execution = self.obj.engine.config["execution"][0]
def test_full_single_script(self):
self.obj.engine.check_interval = 0.1
self.obj.execution.merge({
"iterations": 1,
"ramp-up": "10s",
"hold-for": "10s",
"steps": 5,
"scenario": {
"script": RESOURCES_DIR + "apiritif/test_codegen.py"}})
self.obj.prepare()
self.obj.get_widget()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertFalse(self.obj.has_results())
self.assertNotEquals(self.obj.process, None)
def test_apiritif_generated_requests(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"iterations": 1,
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/",
"/reserve.php"]}}]})
self.obj.prepare()
self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, "test_requests.py")))
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertNotEquals(self.obj.process, None)
def test_apiritif_transactions(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"iterations": 1,
"scenario": {
"script": RESOURCES_DIR + "apiritif/test_transactions.py"
}
}]
})
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertNotEquals(self.obj.process, None)
def test_report_reading(self):
reader = FuncSamplesReader(RESOURCES_DIR + "apiritif/transactions.ldjson", self.obj.engine, self.obj.log)
items = list(reader.read(last_pass=True))
self.assertEqual(len(items), 6)
self.assertEqual(items[0].test_case, "test_1_single_request")
self.assertEqual(items[1].test_case, "test_2_multiple_requests")
self.assertEqual(items[2].test_case, "Transaction")
self.assertEqual(items[3].test_case, "Transaction")
self.assertEqual(items[4].test_case, "Transaction 1")
self.assertEqual(items[5].test_case, "Transaction 2")
def test_report_transactions_as_failed(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"iterations": 1,
"scenario": {
"default-address": "http://httpbin.org",
"requests": [{
"label": "failure by 404",
"url": "/status/404",
}]
}
}]
})
self.obj.engine.aggregator = FunctionalAggregator()
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertNotEquals(self.obj.process, None)
reader = LoadSamplesReader(os.path.join(self.obj.engine.artifacts_dir, "apiritif-0.ldjson"), self.obj.log)
samples = list(reader._read(last_pass=True))
self.assertEqual(len(samples), 1)
tstmp, label, concur, rtm, cnn, ltc, rcd, error, trname, byte_count = samples[0]
self.assertIsNotNone(error)
def test_status_skipped(self):
self.configure({
"execution": [{
"iterations": 1,
"scenario": {
"script": RESOURCES_DIR + "functional/test_all.py"
}
}]
})
self.obj.engine.aggregator = FunctionalAggregator()
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
reader = FuncSamplesReader(os.path.join(self.obj.engine.artifacts_dir, "apiritif-0.ldjson"),
self.obj.engine, self.obj.log)
samples = list(reader.read(last_pass=True))
self.assertEqual(len(samples), 4)
self.assertIsNotNone(samples[-1].status)
class TestSeleniumScriptBuilder(SeleniumTestCase):
def test_build_script(self):
self.configure({
"execution": [{
"executor": "selenium",
"hold-for": "4m",
"ramp-up": "3m",
"scenario": "loc_sc"}],
"scenarios": {
"loc_sc": {
"default-address": "http://blazedemo.com",
"timeout": "3.5s",
"requests": [{
"url": "/",
"assert": [{
"contains": ['contained_text'],
"not": True
}],
"actions": [
{"waitByName('toPort')": "visible"},
{"keysByName(\"toPort\")": "B"},
"clickByXPath(//div[3]/form/select[1]//option[3])",
"clickByXPath(//div[3]/form/select[2]//option[6])",
"clickByXPath(//input[@type='submit'])",
"pauseFor(3s)",
"clearCookies()",
"clickByLinkText(destination of the week! The Beach!)"
],
}, {
"label": "empty"}]}}})
self.obj.prepare()
with open(self.obj.script) as generated:
gen_contents = generated.readlines()
with open(RESOURCES_DIR + "selenium/generated_from_requests.py") as sample:
sample_contents = sample.readlines()
# strip line terminator and exclude specific build path
gen_contents = [line.rstrip() for line in gen_contents if 'webdriver' not in line]
sample_contents = [line.rstrip() for line in sample_contents if 'webdriver' not in line]
self.assertEqual(gen_contents, sample_contents)
class TestApiritifScriptGenerator(BZTestCase):
def setUp(self):
super(TestApiritifScriptGenerator, self).setUp()
self.obj = ApiritifNoseExecutor()
self.obj.engine = EngineEmul()
def configure(self, config):
self.obj.engine.config.merge(config)
self.obj.execution = self.obj.engine.config["execution"][0]
def test_keepalive_default(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target.keep_alive(True)", test_script)
def test_keepalive(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"keepalive": False,
"requests": [
"/",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target.keep_alive(False)", test_script)
def test_timeout_default(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertNotIn("timeout=30.0", test_script)
def test_timeout(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"timeout": "10s",
"default-address": "http://blazedemo.com",
"requests": [
"/?tag=1",
{
"url": "/?tag=2",
"timeout": "2s",
}
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target.timeout(10.0)", test_script)
self.assertNotIn("get('/?tag=1', timeout=10.0", test_script)
self.assertIn("get('/?tag=2', timeout=2.0", test_script)
def test_think_time(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
{
"url": "/?tag=2",
"think-time": "1s500ms",
}
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("time.sleep(1.5)", test_script)
def test_methods(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
{"url": "/?tag=get",
"method": "GET"},
{"url": "/?tag=post",
"method": "POST"},
{"url": "/?tag=put",
"method": "PUT"},
{"url": "/?tag=patch",
"method": "PATCH"},
{"url": "/?tag=head",
"method": "HEAD"},
{"url": "/?tag=delete",
"method": "DELETE"},
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("get('/?tag=get'", test_script)
self.assertIn("post('/?tag=post'", test_script)
self.assertIn("put('/?tag=put'", test_script)
self.assertIn("patch('/?tag=patch'", test_script)
self.assertIn("head('/?tag=head'", test_script)
self.assertIn("delete('/?tag=delete'", test_script)
def test_default_address_path_prefix(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://a.blazemeter.com",
"base-path": "/api/latest",
"requests": [
"/user",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target('https://a.blazemeter.com')", test_script)
self.assertIn("target.base_path('/api/latest')", test_script)
def test_headers(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"headers": {"X-Foo": "foo"},
"requests": [{
"url": "/",
"headers": {"X-Bar": "bar"}
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("'X-Foo': 'foo'", test_script)
self.assertIn("'X-Bar': 'bar'", test_script)
def test_follow_redirects_default(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target.allow_redirects(True)", test_script)
self.assertNotIn("allow_redirects=True", test_script)
def test_follow_redirects(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"follow-redirects": False,
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("allow_redirects=False", test_script)
def test_body_params(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"body": {
"foo": "bar",
},
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("params={'foo': 'bar'}", test_script)
def test_body_json(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"headers": {
"Content-Type": "application/json",
},
"body": {
"foo": "bar",
},
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("json={'foo': 'bar'}", test_script)
def test_body_string(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"body": "MY PERFECT BODY"
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("data='MY PERFECT BODY'", test_script)
def test_body_unknown(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"body": 123
}]
}
}]
})
self.assertRaises(TaurusConfigError, self.obj.prepare)
def test_plain_assertions(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"assert": [
"Welcome", "Simple Travel Agency"
]
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("response.assert_regex_in_body('Welcome')", test_script)
self.assertIn("response.assert_regex_in_body('Simple Travel Agency')", test_script)
def test_plain_assertion_kinds(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"assert": [
{"contains": ["1"], "regexp": False, "not": False},
{"contains": ["2"], "regexp": False, "not": True},
{"contains": ["3"], "regexp": True, "not": False},
{"contains": ["4"], "regexp": True, "not": True},
{"contains": ["5"], "regexp": False, "not": False, "subject": "headers"},
{"contains": ["6"], "regexp": False, "not": True, "subject": "headers"},
{"contains": ["7"], "regexp": True, "not": False, "subject": "headers"},
{"contains": ["8"], "regexp": True, "not": True, "subject": "headers"},
{"contains": ["8"], "regexp": True, "not": True, "subject": "headers"},
{"contains": ["9"], "not": False, "subject": "http-code"},
{"contains": ["10"], "not": True, "subject": "http-code"},
]
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_in_body('1')", test_script)
self.assertIn("assert_not_in_body('2')", test_script)
self.assertIn("assert_regex_in_body('3')", test_script)
self.assertIn("assert_regex_not_in_body('4')", test_script)
self.assertIn("assert_in_headers('5')", test_script)
self.assertIn("assert_not_in_headers('6')", test_script)
self.assertIn("assert_regex_in_headers('7')", test_script)
self.assertIn("assert_regex_not_in_headers('8')", test_script)
self.assertIn("assert_status_code('9')", test_script)
self.assertIn("assert_not_status_code('10')", test_script)
def test_jsonpath_assertions(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://api.github.com",
"requests": [{
"url": "/",
"assert-jsonpath": [
"$.foo.bar"
]
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_jsonpath('$.foo.bar', expected_value=None)", test_script)
def test_jsonpath_assertions_kinds(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://api.github.com",
"requests": [{
"url": "/",
"assert-jsonpath": [
{"jsonpath": "$.1", "invert": False},
{"jsonpath": "$.2", "invert": True},
{"jsonpath": "$.3", "expected-value": "value"},
]
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_jsonpath('$.1', expected_value=None)", test_script)
self.assertIn("assert_not_jsonpath('$.2', expected_value=None)", test_script)
self.assertIn("assert_jsonpath('$.3', expected_value='value')", test_script)
def test_xpath_assertions(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://api.github.com",
"requests": [{
"url": "/",
"assert-xpath": [
"//head/title"
]
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_xpath('//head/title', parser_type='html', validate=False)", test_script)
def test_xpath_assertions_kinds(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://api.github.com",
"requests": [{
"url": "/",
"assert-xpath": [
{"xpath": "//1", "invert": False},
{"xpath": "//2", "invert": True},
{"xpath": "//3", "validate-xml": True},
{"xpath": "//4", "validate-xml": False, "use-tolerant-parser": False},
]
}]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_xpath('//1', parser_type='html', validate=False)", test_script)
self.assertIn("assert_not_xpath('//2', parser_type='html', validate=False)", test_script)
self.assertIn("assert_xpath('//3', parser_type='html', validate=True)", test_script)
self.assertIn("assert_xpath('//4', parser_type='xml', validate=False)", test_script)
def test_complex_codegen(self):
""" This test serves code review purposes, to make changes more visible """
self.obj.engine.config.load([RESOURCES_DIR + 'apiritif/test_codegen.yml'])
self.configure(self.obj.engine.config['execution'][0])
self.obj.settings['verbose'] = True
self.obj.prepare()
exp_file = RESOURCES_DIR + 'apiritif/test_codegen.py'
# import shutil; shutil.copy2(self.obj.script, exp_file) # keep this comment to ease updates
self.assertFilesEqual(exp_file, self.obj.script)
def test_jmeter_functions_time(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/?time=${__time()}",
"/?time=${__time(MM/dd/yy)}",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("'/?time={}'.format(apiritif.format_date())", test_script)
self.assertIn("'/?time={}'.format(apiritif.format_date('MM/dd/yy'))", test_script)
def test_jmeter_functions_random(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/?random=${__Random(1, 10)}",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("'/?random={}'.format(apiritif.random_uniform(1, 10))", test_script)
def test_jmeter_functions_random_string(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/?rs=${__RandomString(3)}",
"/?rs=${__RandomString(4,abcdef)}",
]
}
}]
})
self.obj.prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("'/?rs={}'.format(apiritif.random_string(3))", test_script)
self.assertIn("'/?rs={}'.format(apiritif.random_string(4, 'abcdef'))", test_script)
def test_load_reader(self):
reader = ApiritifLoadReader(self.obj.log)
items = list(reader._read())
self.assertEqual(len(items), 0)
reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl")
items = list(reader._read())
self.assertEqual(len(items), 2)
reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl")
reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl")
items = list(reader._read())
self.assertEqual(len(items), 4)
def test_func_reader(self):
reader = ApiritifFuncReader(self.obj.engine, self.obj.log)
items = list(reader.read())
self.assertEqual(len(items), 0)
reader.register_file(RESOURCES_DIR + "apiritif/transactions.ldjson")
reader.register_file(RESOURCES_DIR + "apiritif/transactions.ldjson")
items = list(reader.read())
self.assertEqual(len(items), 12)
class TestPyTestExecutor(BZTestCase):
def setUp(self):
super(TestPyTestExecutor, self).setUp()
self.obj = PyTestExecutor()
self.obj.engine = EngineEmul()
self.obj.env = self.obj.engine.env
def configure(self, config):
self.obj.engine.config.merge(config)
self.obj.execution = self.obj.engine.config["execution"][0]
def test_full_single_script(self):
self.obj.execution.merge({
"iterations": 1,
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_statuses.py"
}
})
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertFalse(self.obj.has_results())
self.assertNotEquals(self.obj.process, None)
def test_statuses(self):
self.obj.execution.merge({
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_statuses.py"
}
})
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
with open(self.obj.report_file) as fds:
report = [json.loads(line) for line in fds.readlines() if line]
self.assertEqual(4, len(report))
self.assertEqual(["PASSED", "FAILED", "FAILED", "SKIPPED"], [item["status"] for item in report])
def test_iterations(self):
self.obj.execution.merge({
"iterations": 10,
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_single.py"
}
})
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
with open(self.obj.report_file) as fds:
report = [json.loads(line) for line in fds.readlines() if line]
self.assertEqual(10, len(report))
self.assertTrue(all(item["status"] == "PASSED" for item in report))
def test_hold(self):
self.obj.execution.merge({
"hold-for": "3s",
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_single.py"
}
})
self.obj.prepare()
try:
start_time = time.time()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
end_time = time.time()
self.obj.post_process()
duration = end_time - start_time
self.assertGreaterEqual(duration, 3.0)
def test_blazedemo(self):
self.obj.engine.check_interval = 0.1
self.obj.execution.merge({
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_blazedemo.py"
}
})
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
with open(self.obj.report_file) as fds:
report = [json.loads(line) for line in fds.readlines() if line]
self.assertEqual(2, len(report))
def test_package(self):
self.obj.engine.check_interval = 0.1
self.obj.execution.merge({
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/"
}
})
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
with open(self.obj.report_file) as fds:
report = [json.loads(line) for line in fds.readlines() if line]
self.assertEqual(7, len(report))
def test_additional_args(self):
ADDITIONAL_ARGS = "--foo --bar"
self.obj.execution.merge({
"scenario": {
"additional-args": ADDITIONAL_ARGS,
"script": RESOURCES_DIR + "selenium/pytest/test_single.py"
}
})
self.obj.runner_path = RESOURCES_DIR + "selenium/pytest/bin/runner.py"
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
with open(self.obj.stdout_file) as fds:
stdout = fds.read()
self.assertIn(ADDITIONAL_ARGS, stdout)
class TestRobotExecutor(BZTestCase):
def setUp(self):
super(TestRobotExecutor, self).setUp()
self.obj = RobotExecutor()
self.obj.engine = EngineEmul()
self.obj.env = self.obj.engine.env
def configure(self, config):
self.obj.engine.config.merge(config)
self.obj.execution = self.obj.engine.config["execution"][0]
def test_full_single_script(self):
self.configure({
"execution": [{
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertFalse(self.obj.has_results())
self.assertNotEquals(self.obj.process, None)
lines = open(self.obj.report_file).readlines()
self.assertEqual(5, len(lines))
def test_hold(self):
self.configure({
"execution": [{
"hold-for": "5s",
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
self.obj.prepare()
try:
start_time = time.time()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertTrue(os.path.exists(self.obj.report_file))
duration = time.time() - start_time
self.assertGreater(duration, 5)
def test_iterations(self):
self.configure({
"execution": [{
"iterations": 3,
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertFalse(self.obj.has_results())
self.assertNotEquals(self.obj.process, None)
lines = open(self.obj.report_file).readlines()
self.assertEqual(3 * 5, len(lines))
def test_variables(self):
self.configure({
"execution": [{
"iterations": 1,
"scenario": {
"variables": {
"USERNAME": "janedoe",
},
"script": RESOURCES_DIR + "selenium/robot/simple/test_novar.robot",
}
}]
})
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertFalse(self.obj.has_results())
self.assertNotEquals(self.obj.process, None)
samples = [json.loads(line) for line in open(self.obj.report_file).readlines() if line]
self.obj.log.info(samples)
self.assertEqual(5, len(samples))
self.assertTrue(all(sample["status"] == "PASSED" for sample in samples))
def test_variables_file(self):
self.configure({
"execution": [{
"iterations": 1,
"scenario": {
"variables": RESOURCES_DIR + "selenium/robot/simple/vars.yaml",
"script": RESOURCES_DIR + "selenium/robot/simple/test_novar.robot",
}
}]
})
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertFalse(self.obj.has_results())
self.assertNotEquals(self.obj.process, None)
samples = [json.loads(line) for line in open(self.obj.report_file).readlines() if line]
self.obj.log.info(samples)
self.assertEqual(5, len(samples))
self.assertTrue(all(sample["status"] == "PASSED" for sample in samples))
```
#### File: tests/modules/test_blazeMeterUploader.py
```python
import json
import logging
import math
import os
import shutil
import time
from io import BytesIO
from bzt import TaurusException
from bzt.bza import Master, Session
from bzt.modules.aggregator import DataPoint, KPISet
from bzt.modules.blazemeter import BlazeMeterUploader, ResultsFromBZA
from bzt.modules.blazemeter import MonitoringBuffer
from bzt.six import HTTPError
from bzt.six import iteritems, viewvalues
from tests import BZTestCase, random_datapoint, RESOURCES_DIR
from tests.mocks import EngineEmul, BZMock
class TestBlazeMeterUploader(BZTestCase):
def test_some_errors(self):
mock = BZMock()
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test': {"result": []},
'https://a.blazemeter.com/api/v4/sessions/1': {"result": {'id': 1, "note": "somenote"}},
'https://a.blazemeter.com/api/v4/masters/1': {"result": {'id': 1, "note": "somenote"}},
})
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/projects': {"result": {'id': 1}},
'https://a.blazemeter.com/api/v4/tests': {"result": {'id': 1}},
'https://a.blazemeter.com/api/v4/tests/1/start-external': {"result": {
"session": {'id': 1, "testId": 1, "userId": 1},
"master": {'id': 1},
"signature": "sign"
}},
'https://data.blazemeter.com/api/v4/image/1/files?signature=sign': {"result": True},
'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1' +
'&pq=0&target=labels_bulk&update=1': {},
'https://a.blazemeter.com/api/v4/sessions/1/stop': {"result": True},
'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1': {'result': {'session': {}}}
})
mock.mock_patch.update({
'https://a.blazemeter.com/api/v4/sessions/1': {"result": {"id": 1, "note": "somenote"}},
'https://a.blazemeter.com/api/v4/masters/1': {"result": {"id": 1, "note": "somenote"}},
})
obj = BlazeMeterUploader()
mock.apply(obj._user)
obj.parameters['project'] = 'Proj name'
obj.settings['token'] = '<PASSWORD>'
obj.settings['browser-open'] = 'none'
obj.engine = EngineEmul()
obj.prepare()
obj.startup()
obj.engine.stopping_reason = ValueError('wrong value')
obj.aggregated_second(random_datapoint(10))
obj.kpi_buffer[-1][DataPoint.CUMULATIVE][''][KPISet.ERRORS] = [
{'msg': 'Forbidden', 'cnt': 10, 'type': KPISet.ERRTYPE_ASSERT, 'urls': [], KPISet.RESP_CODES: '111'},
{'msg': 'Allowed', 'cnt': 20, 'type': KPISet.ERRTYPE_ERROR, 'urls': [], KPISet.RESP_CODES: '222'}]
obj.post_process()
obj.log.info("Requests: %s", mock.requests)
# check for note appending in _postproc_phase3()
reqs = mock.requests[-4:]
self.assertIn('api/v4/sessions/1', reqs[0]['url'])
self.assertIn('api/v4/sessions/1', reqs[1]['url'])
self.assertIn('api/v4/masters/1', reqs[2]['url'])
self.assertIn('api/v4/masters/1', reqs[3]['url'])
self.assertIn('ValueError: wrong value', str(reqs[1]['data']))
self.assertIn('ValueError: wrong value', str(reqs[3]['data']))
labels = mock.requests[8]['data']
if not isinstance(labels, str):
labels = labels.decode("utf-8")
obj.log.info("Labels: %s", labels)
data = json.loads(str(labels))
self.assertEqual(1, len(data['labels']))
total_item = data['labels'][0]
self.assertEqual('ALL', total_item['name'])
self.assertEqual(total_item['assertions'],
[{'failureMessage': 'Forbidden', 'failures': 10, 'name': 'All Assertions'}])
self.assertEqual(total_item['errors'], [{'m': 'Allowed', 'count': 20, 'rc': '222'}])
def test_no_notes_for_public_reporting(self):
mock = BZMock()
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/sessions/1/terminate-external': {},
'https://data.blazemeter.com/submit.php?session_id=1&signature=None&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': {},
})
obj = BlazeMeterUploader()
obj.parameters['project'] = 'Proj name'
obj.settings['token'] = '' # public reporting
obj.settings['browser-open'] = 'none'
obj.engine = EngineEmul()
mock.apply(obj._user)
obj.prepare()
obj._session = Session(obj._user, {'id': 1, 'testId': 1, 'userId': 1})
obj._master = Master(obj._user, {'id': 1})
obj.engine.stopping_reason = ValueError('wrong value')
obj.aggregated_second(random_datapoint(10))
obj.kpi_buffer[-1][DataPoint.CUMULATIVE][''][KPISet.ERRORS] = [
{'msg': 'Forbidden', 'cnt': 10, 'type': KPISet.ERRTYPE_ASSERT, 'urls': [], KPISet.RESP_CODES: '111'},
{'msg': 'Allowed', 'cnt': 20, 'type': KPISet.ERRTYPE_ERROR, 'urls': [], KPISet.RESP_CODES: '222'}]
obj.send_monitoring = False
obj.post_process()
# TODO: looks like this whole block of checks is useless
# check for note appending in _postproc_phase3()
reqs = [{'url': '', 'data': ''} for _ in range(4)] # add template for minimal size
reqs = (reqs + mock.requests)[-4:]
self.assertNotIn('api/v4/sessions/1', reqs[0]['url'])
self.assertNotIn('api/v4/sessions/1', reqs[1]['url'])
self.assertNotIn('api/v4/masters/1', reqs[2]['url'])
self.assertNotIn('api/v4/masters/1', reqs[3]['url'])
if reqs[1]['data']:
self.assertNotIn('ValueError: wrong value', reqs[1]['data'])
if reqs[3]['data']:
self.assertNotIn('ValueError: wrong value', reqs[3]['data'])
def test_check(self):
mock = BZMock()
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': {"result": []},
'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test': {"result": []}
})
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/projects': {"result": {
"id": 1,
"name": "boo",
"userId": 2,
"description": None,
"created": time.time(),
"updated": time.time(),
"organizationId": None
}},
'https://a.blazemeter.com/api/v4/tests': {"result": {'id': 1}},
'https://a.blazemeter.com/api/v4/tests/1/start-external': {"result": {
'session': {'id': 1, 'userId': 1, 'testId': 1},
'master': {'id': 1, 'userId': 1},
'signature': 'sign'}},
'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': [
{},
{"result": {'session': {"statusCode": 140, 'status': 'ENDED'}}},
{},
],
'https://data.blazemeter.com/api/v4/image/1/files?signature=sign': [
IOError("monitoring push expected fail"),
{"result": True},
{"result": True},
{"result": True},
{"result": True},
{"result": True},
{"result": True},
{"result": True},
{"result": True},
],
'https://a.blazemeter.com/api/v4/sessions/1/stop': {},
'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1':
{"result": {'session': {}}}
})
obj = BlazeMeterUploader()
obj.parameters['project'] = 'Proj name'
obj.settings['token'] = '123'
obj.settings['browser-open'] = 'none'
obj.engine = EngineEmul()
shutil.copy(__file__, os.path.join(obj.engine.artifacts_dir, os.path.basename(__file__)))
mock.apply(obj._user)
obj._user.timeout = 0.1
obj.prepare()
obj.startup()
for x in range(0, 31):
obj.aggregated_second(random_datapoint(x))
mon = [{"ts": 1, "source": "local", "cpu": 1, "mem": 2, "bytes-recv": 100, "other": 0}]
obj.monitoring_data(mon)
obj.check()
for x in range(32, 65):
obj.aggregated_second(random_datapoint(x))
obj.last_dispatch = time.time() - 2 * obj.send_interval
self.assertRaises(KeyboardInterrupt, obj.check)
obj.aggregated_second(random_datapoint(10))
obj.shutdown()
log_file = obj.engine.create_artifact('log', '.tmp')
handler = logging.FileHandler(log_file)
obj.engine.log.parent.addHandler(handler)
obj.engine.config.get('modules').get('shellexec').get('env')['TAURUS_INDEX_ALL'] = 1
obj.post_process()
self.assertEqual(20, len(mock.requests))
obj.engine.log.parent.removeHandler(handler)
def test_monitoring_buffer_limit_option(self):
obj = BlazeMeterUploader()
obj.engine = EngineEmul()
mock = BZMock(obj._user)
obj.settings["monitoring-buffer-limit"] = 100
obj.prepare()
for i in range(1000):
mon = [{"ts": i, "source": "local", "cpu": float(i) / 1000 * 100, "mem": 2, "bytes-recv": 100, "other": 0}]
obj.monitoring_data(mon)
for source, buffer in iteritems(obj.monitoring_buffer.data):
self.assertLessEqual(len(buffer), 100)
self.assertEqual(1, len(mock.requests))
def test_direct_feeding(self):
obj = BlazeMeterUploader()
self.sniff_log(obj.log)
obj.engine = EngineEmul()
mock = BZMock(obj._user)
mock.mock_post.update({
'https://data.blazemeter.com/submit.php?session_id=direct&signature=sign&test_id=None&user_id=None&pq=0&target=labels_bulk&update=1': {},
'https://data.blazemeter.com/api/v4/image/direct/files?signature=sign': {"result": True},
'https://a.blazemeter.com/api/v4/sessions/direct/stop': {"result": True},
'https://data.blazemeter.com/submit.php?session_id=direct&signature=sign&test_id=None&user_id=None&pq=0&target=engine_health&update=1': {'result': {'session': {}}}
})
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/sessions/direct': {"result": {}}
})
mock.mock_patch.update({
'https://a.blazemeter.com/api/v4/sessions/direct': {"result": {}}
})
obj.parameters['session-id'] = 'direct'
obj.parameters['signature'] = 'sign'
obj.settings['token'] = '<PASSWORD>'
obj.prepare()
obj.startup()
obj.check()
obj.shutdown()
obj.engine.stopping_reason = TaurusException("To cover")
obj.post_process()
self.assertNotIn("Failed to finish online", self.log_recorder.warn_buff.getvalue())
self.assertEquals('direct', obj._session['id'])
self.assertEqual(9, len(mock.requests), "Requests were: %s" % mock.requests)
def test_anonymous_feeding(self):
obj = BlazeMeterUploader()
obj.engine = EngineEmul()
obj.browser_open = False
mock = BZMock(obj._user)
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/sessions': {"result": {
"signature": "sign",
"publicTokenUrl": "publicUrl",
"session": {"id": 1, "testId": 1, "userId": 1},
"master": {"id": 1},
}},
'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': {},
'https://data.blazemeter.com/api/v4/image/1/files?signature=sign': {"result": True},
'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1': {'result': {'session': {}}},
})
obj.prepare()
obj.startup()
obj.check()
obj.shutdown()
obj.post_process()
self.assertEquals(1, obj._session['id'])
self.assertEqual(6, len(mock.requests), "Requests were: %s" % mock.requests)
def test_401(self):
obj = BlazeMeterUploader()
obj.engine = EngineEmul()
mock = BZMock(obj._user)
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/web/version': HTTPError(None, None, None, None, None, ),
})
self.assertRaises(HTTPError, obj.prepare)
def test_multiple_reporters_one_monitoring(self):
obj1 = BlazeMeterUploader()
obj1.engine = EngineEmul()
BZMock(obj1._user)
obj2 = BlazeMeterUploader()
obj2.engine = EngineEmul()
BZMock(obj2._user)
obj1.prepare()
obj2.prepare()
for i in range(10):
mon = [{"ts": i, "source": "local", "cpu": float(i) / 1000 * 100, "mem": 2, "bytes-recv": 100, "other": 0}]
obj1.monitoring_data(mon)
obj2.monitoring_data(mon)
def test_public_report(self):
mock = BZMock()
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': {"result": []}
})
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/projects': {"result": {'id': 1}},
'https://a.blazemeter.com/api/v4/tests': {'result': {'id': 'unittest1'}},
'https://a.blazemeter.com/api/v4/tests/unittest1/start-external': {"result": {
'session': {'id': 'sess1', 'userId': 1, 'testId': 1},
'master': {'id': 'master1', 'userId': 1},
'signature': ''
}},
'https://a.blazemeter.com/api/v4/masters/master1/public-token': {'result': {'publicToken': '<PASSWORD>Token'}},
'https://data.blazemeter.com/submit.php?session_id=sess1&signature=&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': {
"result": {'session': {}}},
'https://data.blazemeter.com/api/v4/image/sess1/files?signature=': {'result': True},
})
obj = BlazeMeterUploader()
obj.settings['token'] = '<PASSWORD>'
obj.settings['browser-open'] = 'none'
obj.settings['public-report'] = True
obj.settings['send-monitoring'] = False
obj.engine = EngineEmul()
mock.apply(obj._user)
self.sniff_log(obj.log)
obj.prepare()
obj.startup()
obj.aggregated_second(random_datapoint(10))
obj.check()
obj.shutdown()
obj.post_process()
log_buff = self.log_recorder.info_buff.getvalue()
log_line = "Public report link: https://a.blazemeter.com/app/?public-token=publicToken#/masters/master1/summary"
self.assertIn(log_line, log_buff)
logging.warning("\n".join([x['url'] for x in mock.requests]))
self.assertEqual(14, len(mock.requests))
def test_new_project_existing_test(self):
obj = BlazeMeterUploader()
mock = BZMock(obj._user)
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': {'result': [
{'id': 1, 'name': '<NAME>', 'configuration': {"type": 'external'}}
]},
'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test': {'result': []}
})
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/projects': {"result": {"id": 1}},
'https://a.blazemeter.com/api/v4/tests': {"result": {"id": 1}},
})
obj.parameters['project'] = 'Proj name'
obj.settings['token'] = '<PASSWORD>'
obj.settings['browser-open'] = 'none'
obj.engine = EngineEmul()
obj.prepare()
self.assertEquals('https://a.blazemeter.com/api/v4/projects', mock.requests[4]['url'])
self.assertEquals('POST', mock.requests[4]['method'])
self.assertEquals('https://a.blazemeter.com/api/v4/tests', mock.requests[6]['url'])
self.assertEquals('POST', mock.requests[6]['method'])
def test_new_project_new_test(self):
obj = BlazeMeterUploader()
mock = BZMock(obj._user)
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': {'result': []},
'https://a.blazemeter.com/api/v4/projects?workspaceId=1': {'result': []}
})
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/projects': {"result": {"id": 1}},
'https://a.blazemeter.com/api/v4/tests': {"result": {"id": 1}},
})
obj.settings['token'] = '<PASSWORD>'
obj.settings['browser-open'] = 'none'
obj.engine = EngineEmul()
obj.prepare()
self.assertEquals('https://a.blazemeter.com/api/v4/projects', mock.requests[6]['url'])
self.assertEquals('POST', mock.requests[6]['method'])
self.assertEquals('https://a.blazemeter.com/api/v4/tests', mock.requests[7]['url'])
self.assertEquals('POST', mock.requests[7]['method'])
def test_existing_project_new_test(self):
obj = BlazeMeterUploader()
mock = BZMock(obj._user)
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test': {'result': []},
'https://a.blazemeter.com/api/v4/projects?workspaceId=1': {'result': [
{'id': 1, 'name': 'Proj name'}
]}
})
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/projects': {"result": {"id": 1}},
'https://a.blazemeter.com/api/v4/tests': {"result": {"id": 1}},
})
obj.parameters['project'] = 'Proj name'
obj.settings['token'] = '<PASSWORD>'
obj.settings['browser-open'] = 'none'
obj.engine = EngineEmul()
obj.prepare()
self.assertEquals('https://a.blazemeter.com/api/v4/tests', mock.requests[6]['url'])
self.assertEquals('POST', mock.requests[6]['method'])
class TestBlazeMeterClientUnicode(BZTestCase):
def test_unicode_request(self):
"""
test UnicodeDecodeError in BlazeMeterClient._request()
"""
session = Session(data={'id': 1})
mock = BZMock(session)
mock.mock_post['https://data.blazemeter.com/api/v4/image/1/files?signature=None'] = {"result": 1}
session.upload_file(RESOURCES_DIR + "jmeter/unicode_file")
def test_binary_unicode_error(self):
session = Session(data={'id': 1})
mock = BZMock(session)
mock.mock_post['https://data.blazemeter.com/api/v4/image/1/files?signature=None'] = {"result": 1}
with open(RESOURCES_DIR + "jmeter/jmeter-dist-2.13.zip", 'rb') as fds:
zip_content = fds.read()
session.upload_file("jtls_and_more.zip", zip_content)
class DummyHttpResponse(object):
def __init__(self):
self.fake_socket = BytesIO()
self.fake_socket.write(open(RESOURCES_DIR + "unicode_file", 'rb').read())
def read(self):
self.fake_socket.seek(0)
return self.fake_socket.read(1024)
def dummy_urlopen(*args, **kwargs):
del args, kwargs
return DummyHttpResponse()
class TestResultsFromBZA(BZTestCase):
@staticmethod
def convert_kpi_errors(errors):
result = {}
for error in errors:
result[error['msg']] = {'count': error['cnt'], 'rc': error['rc']}
return result
@staticmethod
def get_errors_mock(errors, assertions=None):
# return mock of server response for errors specified in internal format (see __get_errors_from_BZA())
result = []
if not assertions:
assertions = {}
for _id in list(set(list(errors.keys()) + list(assertions.keys()))): # unique keys from both dictionaries
errors_list = []
if errors.get(_id):
for msg in errors[_id]:
errors_list.append({
"m": msg,
"count": errors[_id][msg]["count"],
"rc": errors[_id][msg]["rc"]})
assertions_list = []
if assertions.get(_id):
for msg in assertions[_id]:
assertions_list.append({
"failureMessage": msg,
"failures": assertions[_id][msg]["count"],
"name": "All Assertions"})
result.append({
"_id": _id,
"name": _id,
"assertions": assertions_list,
"samplesNotCounted": 0,
"assertionsNotCounted": 0,
"otherErrorsCount": 0,
"errors": errors_list})
return {
"https://a.blazemeter.com/api/v4/masters/1/reports/errorsreport/data?noDataError=false": {
"api_version": 4,
"error": None,
"result": result}}
def test_get_errors(self):
mock = BZMock()
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/data/labels?master_id=1': {
"api_version": 4,
"error": None,
"result": [{
"sessions": ["r-t-5746a8e38569a"],
"id": "ALL",
"name": "ALL"
}, {
"sessions": ["r-t-5746a8e38569a"],
"id": "e843ff89a5737891a10251cbb0db08e5",
"name": "http://blazedemo.com/"}]},
'https://a.blazemeter.com/api/v4/data/kpis?interval=1&from=0&master_ids%5B%5D=1&kpis%5B%5D=t&kpis%5B%5D=lt&kpis%5B%5D=by&kpis%5B%5D=n&kpis%5B%5D=ec&kpis%5B%5D=ts&kpis%5B%5D=na&labels%5B%5D=ALL&labels%5B%5D=e843ff89a5737891a10251cbb0db08e5': {
"api_version": 4,
"error": None,
"result": [{
"labelId": "ALL",
"labelName": "ALL",
"label": "ALL",
"kpis": [{
"n": 1, "na": 1, "ec": 0, "p90": 0, "t_avg": 817, "lt_avg": 82,
"by_avg": 0, "n_avg": 1, "ec_avg": 0, "ts": 1464248743
}, {"n": 1, "na": 1, "ec": 0, "p90": 0, "t_avg": 817, "lt_avg": 82,
"by_avg": 0, "n_avg": 1, "ec_avg": 0, "ts": 1464248744}]}]},
'https://a.blazemeter.com/api/v4/masters/1/reports/aggregatereport/data': {
"api_version": 4,
"error": None,
"result": [{
"labelName": "ALL", "99line": 1050, "90line": 836, "95line": 912}, {
"labelName": "http://blazedemo.com", "99line": 1050, "90line": 836, "95line": 912}]},
'https://a.blazemeter.com/api/v4/data/kpis?interval=1&from=1464248744&master_ids%5B%5D=1&kpis%5B%5D=t&kpis%5B%5D=lt&kpis%5B%5D=by&kpis%5B%5D=n&kpis%5B%5D=ec&kpis%5B%5D=ts&kpis%5B%5D=na&labels%5B%5D=ALL&labels%5B%5D=e843ff89a5737891a10251cbb0db08e5': {
"api_version": 4,
"error": None,
"result": [{
"labelId": "ALL",
"labelName": "ALL",
"label": "ALL",
"kpis": [{
"n": 1, "na": 1, "ec": 0, "p90": 0, "t_avg": 817, "lt_avg": 82,
"by_avg": 0, "n_avg": 1, "ec_avg": 0, "ts": 1464248744
}, {"n": 1, "na": 1, "ec": 0, "p90": 0, "t_avg": 817, "lt_avg": 82,
"by_avg": 0, "n_avg": 1, "ec_avg": 0, "ts": 1464248745}]}]},
'https://a.blazemeter.com/api/v4/data/kpis?interval=1&from=1464248745&master_ids%5B%5D=1&kpis%5B%5D=t&kpis%5B%5D=lt&kpis%5B%5D=by&kpis%5B%5D=n&kpis%5B%5D=ec&kpis%5B%5D=ts&kpis%5B%5D=na&labels%5B%5D=ALL&labels%5B%5D=e843ff89a5737891a10251cbb0db08e5': {
"api_version": 4,
"error": None,
"result": [{
"labelId": "ALL",
"labelName": "ALL",
"label": "ALL",
"kpis": [{
"n": 1, "na": 1, "ec": 0, "p90": 0, "t_avg": 817, "lt_avg": 82,
"by_avg": 0, "n_avg": 1, "ec_avg": 0, "ts": 1464248745}]}]}})
obj = ResultsFromBZA()
obj.master = Master(data={"id": 1})
mock.apply(obj.master)
# set cumulative errors from BM
mock.mock_get.update(self.get_errors_mock({'ALL': {"Not found": {"count": 10, "rc": "404"}}}))
# frame [0, 1464248744)
res1 = list(obj.datapoints(False))
self.assertEqual(1, len(res1))
cumul = res1[0][DataPoint.CUMULATIVE]
cur = res1[0][DataPoint.CURRENT]
self.assertEqual(1, len(cumul.keys()))
self.assertEqual(1, len(cur.keys()))
errors_1 = {'Not found': {'count': 10, 'rc': u'404'}}
self.assertEqual(self.convert_kpi_errors(cumul[""]["errors"]), errors_1) # all error data is written
self.assertEqual(self.convert_kpi_errors(cur[""]["errors"]), errors_1) # to 'current' and 'cumulative'
# frame [1464248744, 1464248745)
res2 = list(obj.datapoints(False))
self.assertEqual(1, len(res2))
cumul = res2[0][DataPoint.CUMULATIVE]
cur = res2[0][DataPoint.CURRENT]
self.assertEqual(1, len(cumul.keys()))
self.assertEqual(1, len(cur.keys()))
self.assertEqual(self.convert_kpi_errors(cumul[""]["errors"]), errors_1) # the same errors,
self.assertEqual(cur[""]["errors"], []) # new errors not found
mock.mock_get.update(self.get_errors_mock({
"ALL": {
"Not found": {
"count": 11, "rc": "404"}, # one more error
"Found": {
"count": 2, "rc": "200"}}, # new error message (error ID)
"label1": {
"Strange behaviour": {
"count": 666, "rc": "666"}}}, { # new error label
"ALL": {"assertion_example": {"count": 33}}}))
res3 = list(obj.datapoints(True)) # let's add the last timestamp [1464248745]
self.assertEqual(1, len(res3))
cumul = res3[0][DataPoint.CUMULATIVE]
cur = res3[0][DataPoint.CURRENT]
errors_all_full = {
'Not found': {'count': 11, 'rc': '404'},
'Found': {'count': 2, 'rc': '200'},
'assertion_example': {'count': 33, 'rc': 'All Assertions'}}
errors_all_update = {
'Not found': {'count': 1, 'rc': '404'},
'Found': {'count': 2, 'rc': '200'},
'assertion_example': {'count': 33, 'rc': 'All Assertions'}}
errors_label1 = {'Strange behaviour': {'count': 666, 'rc': '666'}}
self.assertEqual(errors_label1, self.convert_kpi_errors(cumul["label1"]["errors"]))
self.assertEqual(errors_all_full, self.convert_kpi_errors(cumul[""]["errors"]))
self.assertEqual(errors_label1, self.convert_kpi_errors(cur["label1"]["errors"]))
self.assertEqual(errors_all_update, self.convert_kpi_errors(cur[""]["errors"]))
def test_datapoint(self):
mock = BZMock()
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/data/labels?master_id=1': {
"api_version": 2,
"error": None,
"result": [{
"sessions": ["r-t-5746a8e38569a"],
"id": "ALL",
"name": "ALL"
}, {
"sessions": ["r-t-5746a8e38569a"],
"id": "<KEY>",
"name": "http://blazedemo.com/"}]},
'https://a.blazemeter.com/api/v4/data/kpis?interval=1&from=0&master_ids%5B%5D=1&kpis%5B%5D=t&kpis%5B%5D=lt&kpis%5B%5D=by&kpis%5B%5D=n&kpis%5B%5D=ec&kpis%5B%5D=ts&kpis%5B%5D=na&labels%5B%5D=ALL&labels%5B%5D=<KEY>': {
"api_version": 2,
"error": None,
"result": [{
"labelId": "ALL",
"labelName": "ALL",
"label": "ALL",
"kpis": [{
"n": 1,
"na": 1,
"ec": 0,
"p90": 0,
"t_avg": 817,
"lt_avg": 82,
"by_avg": 0,
"n_avg": 1,
"ec_avg": 0,
"ts": 1464248743}]}]},
'https://a.blazemeter.com/api/v4/masters/1/reports/aggregatereport/data': {
"api_version": 2,
"error": None,
"result": [{
"labelId": "ALL",
"labelName": "ALL",
"samples": 152,
"avgResponseTime": 786,
"90line": 836,
"95line": 912,
"99line": 1050,
"minResponseTime": 531,
"maxResponseTime": 1148,
"avgLatency": 81,
"geoMeanResponseTime": None,
"stDev": 108,
"duration": 119,
"avgBytes": 0,
"avgThroughput": 1.2773109243697,
"medianResponseTime": 0,
"errorsCount": 0,
"errorsRate": 0,
"hasLabelPassedThresholds": None
}, {
"labelId": "e843ff89a5737891a10251cbb0db08e5",
"labelName": "http://blazedemo.com/",
"samples": 152,
"avgResponseTime": 786,
"90line": 836,
"95line": 912,
"99line": 1050,
"minResponseTime": 531,
"maxResponseTime": 1148,
"avgLatency": 81,
"geoMeanResponseTime": None,
"stDev": 108,
"duration": 119,
"avgBytes": 0,
"avgThroughput": 1.2773109243697,
"medianResponseTime": 0,
"errorsCount": 0,
"errorsRate": 0,
"hasLabelPassedThresholds": None}]}})
mock.mock_get.update(self.get_errors_mock({"ALL": {}}))
obj = ResultsFromBZA()
obj.master = Master(data={"id": 1})
mock.apply(obj.master)
res = list(obj.datapoints(True))
cumulative_ = res[0][DataPoint.CUMULATIVE]
total = cumulative_['']
percentiles_ = total[KPISet.PERCENTILES]
self.assertEquals(1.05, percentiles_['99.0'])
def test_no_kpis_on_cloud_crash(self):
mock = BZMock()
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/data/labels?master_id=0': {
"api_version": 2,
"error": None,
"result": [
{
"sessions": [
"r-t-5746a8e38569a"
],
"id": "ALL",
"name": "ALL"
},
{
"sessions": [
"r-t-5746a8e38569a"
],
"id": "e843ff89a5737891a10251cbb0db08e5",
"name": "http://blazedemo.com/"
}
]
},
'https://a.blazemeter.com/api/v4/data/kpis?interval=1&from=0&master_ids%5B%5D=0&kpis%5B%5D=t&kpis%5B%5D=lt&kpis%5B%5D=by&kpis%5B%5D=n&kpis%5B%5D=ec&kpis%5B%5D=ts&kpis%5B%5D=na&labels%5B%5D=ALL&labels%5B%5D=e843ff89a5737891a10251cbb0db08e5': {
"api_version": 2,
"error": None,
"result": [
{
"labelId": "ALL",
"labelName": "ALL",
}
]
},
'https://a.blazemeter.com/api/v4/masters/0/reports/aggregatereport/data': {
"api_version": 2,
"error": None,
"result": [
{
"labelId": "ALL",
"labelName": "ALL",
"samples": 152,
"avgResponseTime": 786,
"90line": 836,
"95line": 912,
"99line": 1050,
"minResponseTime": 531,
"maxResponseTime": 1148,
"avgLatency": 81,
"geoMeanResponseTime": None,
"stDev": 108,
"duration": 119,
"avgBytes": 0,
"avgThroughput": 1.2773109243697,
"medianResponseTime": 0,
"errorsCount": 0,
"errorsRate": 0,
"hasLabelPassedThresholds": None
},
{
"labelId": "e843ff89a5737891a10251cbb0db08e5",
"labelName": "http://blazedemo.com/",
"samples": 152,
"avgResponseTime": 786,
"90line": 836,
"95line": 912,
"99line": 1050,
"minResponseTime": 531,
"maxResponseTime": 1148,
"avgLatency": 81,
"geoMeanResponseTime": None,
"stDev": 108,
"duration": 119,
"avgBytes": 0,
"avgThroughput": 1.2773109243697,
"medianResponseTime": 0,
"errorsCount": 0,
"errorsRate": 0,
"hasLabelPassedThresholds": None
}
]
}
})
obj = ResultsFromBZA(Master(data={'id': 0}))
mock.apply(obj.master)
res = list(obj.datapoints(True))
self.assertEqual(res, [])
class TestMonitoringBuffer(BZTestCase):
def to_rad(self, deg):
return deg * math.pi / 180
def test_harmonic(self):
iterations = 50
size_limit = 10
mon_buffer = MonitoringBuffer(size_limit, logging.getLogger(''))
for i in range(iterations):
cpu = math.sin(self.to_rad(float(i) / iterations * 180))
mon = [{"ts": i, "source": "local", "cpu": cpu}]
mon_buffer.record_data(mon)
self.assertLessEqual(len(mon_buffer.data['local']), size_limit)
def test_downsample_theorem(self):
# Theorem: average interval size in monitoring buffer will always
# be less or equal than ITERATIONS / BUFFER_LIMIT
mon_buffer = MonitoringBuffer(100, logging.getLogger(''))
for i in range(5000):
mon = [{"ts": i, "source": "local", "cpu": 1, "mem": 2, "bytes-recv": 100, "other": 0}]
mon_buffer.record_data(mon)
for source, buffer in iteritems(mon_buffer.data):
self.assertLessEqual(len(buffer), 100)
sizes = [item['interval'] for item in viewvalues(buffer)]
avg_size = float(sum(sizes)) / len(sizes)
expected_size = 5000 / 100
self.assertLessEqual(avg_size, expected_size * 1.20)
def test_sources(self):
mon_buffer = MonitoringBuffer(10, logging.getLogger(''))
for i in range(100):
mon = [
{"ts": i, "source": "local", "cpu": 1, "mem": 2, "bytes-recv": 100},
{"ts": i, "source": "server-agent", "cpu": 10, "mem": 20},
]
mon_buffer.record_data(mon)
for source, buffer in iteritems(mon_buffer.data):
self.assertLessEqual(len(buffer), 10)
def test_unpack(self):
ITERATIONS = 200
SIZE_LIMIT = 10
mon_buffer = MonitoringBuffer(SIZE_LIMIT, logging.getLogger(''))
for i in range(ITERATIONS):
mon = [{"ts": i, "source": "local", "cpu": 1}]
mon_buffer.record_data(mon)
unpacked = sum(item['interval'] for item in viewvalues(mon_buffer.data['local']))
self.assertEqual(unpacked, ITERATIONS)
``` |
{
"source": "3dgiordano/TrackingR",
"score": 3
} |
#### File: python/construct_dataset/construct_dataset.py
```python
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
import numpy as np
import pandas as pd
from python.tools import (
clean_folder
)
def construct_dataset(file_name, var_name):
"""Convenience function for constructing
a clean Pandas dataframe from the CSV
files provided by JH CSSE on their Github
repo
Args:
file_name (str): File name / URL of CSV file
var_name (name): Variable name
Returns:
df: Dataframe
"""
df = pd.read_csv(file_name)
del df['Lat'], df['Long']
# Melt to long format
df = pd.melt(df,
id_vars = ['Province/State', 'Country/Region'],
value_vars = list(df.columns.values[2:]))
df.rename(columns = {'variable': 'Date',
'value': var_name},
inplace = True)
# For some countries, data are reported
# by regions / states; aggregate to country level
return df.groupby(['Country/Region', 'Date']).sum().reset_index()
################
## Parameters ##
################
input_folder = './construct_dataset/input'
output_folder = './construct_dataset/output'
min_cases = 100
days_infectious_list = [5, 6, 7, 8, 9, 10] # Values of (1 / gamma) used in constructing
# time series of infected individuals
end_date = '2020-05-06' # End of sample
restrict_end_sample = False
#######################
## Construct dataset ##
#######################
clean_folder(output_folder)
# Read in data on total cases
df = construct_dataset(file_name = '{}/time_series_covid19_confirmed_global.csv'.format(input_folder),
var_name = 'total_cases')
# Merge in recovered cases and deaths
for file_name, var_name in zip(['{}/time_series_covid19_recovered_global.csv'.format(input_folder),
'{}/time_series_covid19_deaths_global.csv'.format(input_folder)],
['total_recovered', 'total_deaths']):
df_temp = construct_dataset(file_name = file_name,
var_name = var_name)
df = pd.merge(df, df_temp,
on = ['Country/Region', 'Date'],
how = 'left')
# Only consider days after a minimum
# number of total cases has been reached
mask = (df['total_cases'] >= min_cases)
df = df.loc[mask, ]
# Calculate world aggregates
df_temp = df.groupby('Date').sum().reset_index()
df_temp['Country/Region'] = 'World'
df = pd.concat([df_temp, df])
# Clean up the dataframe
df['Date'] = pd.to_datetime(df['Date'])
df.reset_index(inplace = True)
del df['index']
# Remove "Diamond Princess" from the data
mask = df['Country/Region'] != 'Diamond Princess'
df = df.loc[mask, ]
# Fix a few country names
df['Country/Region'] = df['Country/Region'].apply(lambda x: 'Taiwan' if x == 'Taiwan*' else x)
df['Country/Region'] = df['Country/Region'].apply(lambda x: 'South Korea' if x == 'Korea, South' else x)
# Sort by date
df.sort_values(by = ['Country/Region', 'Date'], ascending = True,
inplace = True)
# Construct derived flow variables (new cases /
# recoveries / deaths)
for var_name in ['cases', 'recovered', 'deaths']:
df['new_{}'.format(var_name)] = (df['total_{}'.format(var_name)]
- df.groupby('Country/Region').shift()['total_{}'.format(var_name)])
# Replace flow variables with 7-day moving averages
#for var_name in ['cases', 'recovered', 'deaths']:
# df['new_{}'.format(var_name)] = df.groupby('Country/Region')['new_{}'.format(var_name)].transform(lambda x: x.rolling(7).mean())
# Construct number of infected
for days_infectious in days_infectious_list:
df['infected_{}'.format(days_infectious)] = np.nan
for country in df['Country/Region'].unique():
mask = df['Country/Region'] == country
df_country = df.loc[mask, ].copy().reset_index()
T = df_country.shape[0]
# Initialize number of infected
infected = np.zeros(T) * np.nan
infected[0] = df_country['total_cases'][0]
# Main loop
for tt in range(1, T):
gamma = 1 / float(days_infectious)
# Calculate number of infected recursively;
# In the JH CSSE dataset, there are some
# data problems whereby new cases are occasionally
# reported to be negative; in these case, take zero
# when constructing time series for # of invected,
# and then change values to NaN's later on
infected[tt] = ((1 - gamma) * infected[tt - 1]
+ np.maximum(df_country['new_cases'][tt], 0.0))
df.loc[mask, 'infected_{}'.format(days_infectious)] = infected
# In the original JH CSSE dataset, there are
# some inconsistencies in the data
# Replace with NaN's in these cases
mask = df['new_cases'] < 0
df.loc[mask, 'new_cases'] = np.nan
print(' Inconsistent observations in new_cases in JH CSSE dataset: {:}'.format(mask.sum()))
for days_infectious in days_infectious_list:
df.loc[mask, 'infected_{}'.format(days_infectious)] = np.nan
# Calculate growth rate of infected
for days_infectious in days_infectious_list:
df['gr_infected_{}'.format(days_infectious)] = ((df['infected_{}'.format(days_infectious)]
/ df.groupby('Country/Region').shift(1)['infected_{}'.format(days_infectious)]) - 1)
mask = df.groupby('Country/Region').shift(1)['infected_{}'.format(days_infectious)] == 0.0
df.loc[mask, 'gr_infected_{}'.format(days_infectious)] = np.nan
# Deal with potential consecutive zeros in the number of infected
for days_infectious in days_infectious_list:
mask = (df['infected_{}'.format(days_infectious)] == 0.0) & (df.groupby('Country/Region').shift(1)['infected_{}'.format(days_infectious)] == 0.0)
df.loc[mask, 'gr_infected_{}'.format(days_infectious)] = - (1 / days_infectious)
if mask.sum() > 0:
print(' Number of observations with zero infected (with {} infectious days) over two consecutive days: {:}'.format(days_infectious, mask.sum()))
# Set to NaN observations with very small
# number of cases but very high growth rates
# to avoid these observations acting as
# large outliers
for days_infectious in days_infectious_list:
gamma = 1 / float(days_infectious)
mask = (df['new_cases'] <= 25) & (df['gr_infected_{}'.format(days_infectious)] >= gamma * (5 - 1)) # Implicit upper bound on R
df.loc[mask, ['infected_{}'.format(days_infectious),
'gr_infected_{}'.format(days_infectious)]] = np.nan
# Set to NaN observations implausibly
# high growth rates that are likely
# due to data issues
for days_infectious in days_infectious_list:
gamma = 1 / float(days_infectious)
mask = (df['gr_infected_{}'.format(days_infectious)] >= gamma * (10 - 1)) # Implicit upper bound on R
df.loc[mask, ['infected_{}'.format(days_infectious),
'gr_infected_{}'.format(days_infectious)]] = np.nan
# Remove initial NaN values for growth rates
for country in df['Country/Region'].unique():
mask = df['Country/Region'] == country
T = df.loc[mask, ].shape[0]
df.loc[mask, 'days_since_min_cases'] = range(T)
mask = df['days_since_min_cases'] >= 1
df = df.loc[mask, ]
del df['days_since_min_cases']
# If requested, restrict sample period
if restrict_end_sample:
mask = df['Date'] <= end_date
df = df.loc[mask, ]
# Save final dataset
df.to_csv('{}/dataset.csv'.format(output_folder), index = False)
```
#### File: python/estimate_R/estimate_R_KF.py
```python
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
import statsmodels.formula.api as smf
import scipy
import warnings
from python.tools import (
clean_folder
)
def estimate_R(y, gamma, n_start_values_grid = 0, maxiter = 200):
"""Estimate basic reproduction number using
Kalman filtering techniques
Args:
y (np array): Time series of growth rate in infections
gamma (double): Rate of recoveries (gamma)
n_start_values_grid (int, optional): Number of starting values used in the optimization;
the effective number of starting values is (n_start_values_grid ** 2)
maxiter (int, optional): Maximum number of iterations
Returns:
dict: Dictionary containing the results
R (np array): Estimated series for R
se_R (np array): Estimated standard error for R
flag (int): Optimization flag (0 if successful)
sigma2_irregular (float): Estimated variance of the irregular component
sigma2_level (float): Estimated variance of the level component
gamma (float): Value of gamma used in the estimation
"""
assert isinstance(n_start_values_grid, int), \
"n_start_values_grid must be an integer"
assert isinstance(maxiter, int), \
"maxiter must be an integer"
assert n_start_values_grid >= 0 and maxiter > 0, \
"n_start_values_grid and max_iter must be positive"
assert isinstance(y, np.ndarray), \
"y must be a numpy array"
assert y.ndim == 1, \
"y must be a vector"
# Setup model instance
mod_ll = sm.tsa.UnobservedComponents(y, 'local level')
# Estimate model
if n_start_values_grid > 0:
# If requested, use multiple starting
# values for more robust optimization results
start_vals_grid = np.linspace(0.01, 2.0, n_start_values_grid) * pd.Series(y).var()
opt_res = []
for start_val_1 in start_vals_grid:
for start_val_2 in start_vals_grid:
res_ll = mod_ll.fit(start_params = np.array([start_val_1, start_val_2]),
disp = False, maxiter = maxiter)
opt_res.append({'obj_value': res_ll.mle_retvals['fopt'],
'start_val_1': start_val_1,
'start_val_2': start_val_2,
'flag': res_ll.mle_retvals['warnflag']})
# The optimizer minimizes the negative of
# the likelihood, so find the minimum value
opt_res = pd.DataFrame(opt_res)
opt_res.sort_values(by = 'obj_value', ascending = True, inplace = True)
res_ll = mod_ll.fit(start_params = np.array([opt_res['start_val_1'][0],
opt_res['start_val_2'][0]]),
maxiter = maxiter, disp = False)
else:
res_ll = mod_ll.fit(maxiter = maxiter, disp = False)
R = 1 + 1 / (gamma) * res_ll.smoothed_state[0]
se_R = (1 / gamma * (res_ll.smoothed_state_cov[0] ** 0.5))[0]
return {'R': R,
'se_R': se_R,
'flag': res_ll.mle_retvals['warnflag'],
'sigma2_irregular': res_ll.params[0],
'sigma2_level': res_ll.params[1],
'signal_to_noise': res_ll.params[1] / res_ll.params[0],
'gamma': gamma}
################
## Parameters ##
################
output_folder = './estimate_R/output/estimate_R_KF/'
input_folder = './estimate_R/input/estimate_R_KF/'
min_T = 20
gamma = 1 / 7.0
min_signal_to_noise = 0.01
max_signal_to_noise = 0.25
days_infectious = 7 # Baseline for of duration of infectiousness
###############
## Load data ##
###############
clean_folder(output_folder)
df = pd.read_csv('{}/dataset.csv'.format(input_folder))
df['Date'] = pd.to_datetime(df['Date'])
# Impose minimum time-series observations
df_temp = df.groupby('Country/Region').count()['gr_infected_{}'.format(days_infectious)].reset_index()
df_temp.rename(columns = {'gr_infected_{}'.format(days_infectious): 'no_obs'},
inplace = True)
df = pd.merge(df, df_temp, how = 'left')
mask = df['no_obs'] >= min_T
df = df.loc[mask, ]
################
## Estimate R ##
################
df['R'] = np.nan
df['se_R'] = np.nan
df_optim_res = []
with warnings.catch_warnings():
# Ignore warnings from statsmodels
# Instead, check later
warnings.filterwarnings("ignore", message = "Maximum Likelihood optimization failed to converge. Check mle_retvals")
for country in df['Country/Region'].unique():
mask = df['Country/Region'] == country
df_temp = df.loc[mask, ].copy()
y = df_temp['gr_infected_{}'.format(days_infectious)].values
res = estimate_R(y, gamma = gamma)
df.loc[mask, 'R'] = res['R']
df.loc[mask, 'se_R'] = res['se_R']
df_optim_res.append({'Country/Region': country,
'flag': res['flag'],
'sigma2_irregular': res['sigma2_irregular'],
'sigma2_level': res['sigma2_level'],
'signal_to_noise': res['signal_to_noise']})
df_optim_res = pd.DataFrame(df_optim_res)
# Merge in optimization results
df = pd.merge(df, df_optim_res, how = 'left')
###################################
## Filter out unreliable results ##
###################################
# Unsuccessful optimization
mask = df['flag'] != 0
df = df.loc[~mask, ]
# Filter out implausible signal-to-noise ratios
mask = (df['signal_to_noise'] <= min_signal_to_noise) | (df['signal_to_noise'] >= max_signal_to_noise)
df = df.loc[~mask, ]
# Collect optimization results
df_optim_res = df.groupby('Country/Region').first()[['flag', 'sigma2_irregular', 'sigma2_level', 'signal_to_noise']].reset_index()
df_optim_res.to_csv('{}/optim_res.csv'.format(output_folder), index = False)
####################
## Export results ##
####################
df = df[['Country/Region', 'Date', 'R', 'se_R']].copy()
df.reset_index(inplace = True)
del df['index']
df['days_infectious'] = 1 / gamma
# Calculate confidence intervals
alpha = [0.05, 0.35]
names = ['95', '65']
for aa, name in zip(alpha, names):
t_crit = scipy.stats.norm.ppf(1 - aa / 2)
df['ci_{}_u'.format(name)] = df['R'] + t_crit * df['se_R']
df['ci_{}_l'.format(name)] = df['R'] - t_crit * df['se_R']
# Save estimates
df.to_csv('{}/estimated_R.csv'.format(output_folder), index = False)
```
#### File: python/stargazer/stargazer.py
```python
from __future__ import print_function
from statsmodels.regression.linear_model import RegressionResultsWrapper
from numpy import round, sqrt
class Stargazer:
"""
Class that is constructed with one or more trained
OLS models from the statsmodels package.
The user then can change the rendering options by
chaining different methods to the Stargazer object
and then render the results in either HTML or LaTeX.
"""
def __init__(self, models):
self.models = models
self.num_models = len(models)
self.extract_data()
self.reset_params()
def validate_input(self):
"""
Check inputs to see if they are going to
cause any problems further down the line.
Any future checking will be added here.
"""
targets = []
for m in self.models:
if not isinstance(m, RegressionResultsWrapper):
raise ValueError('Please use trained OLS models as inputs')
targets.append(m.model.endog_names)
# if targets.count(targets[0]) != len(targets):
# raise ValueError('Please make sure OLS targets are identical')
self.dependent_variable = targets[0]
def reset_params(self):
"""
Set all of the rendering parameters to their default settings.
Run upon initialization but also allows the user to reset
if they have made several changes and want to start fresh.
Does not effect any of the underlying model data.
"""
self.title_text = None
# self.show_header = True
self.header = True
# self.dep_var_name = 'Dependent variable:'
self.dep_var_name = ''
self.column_labels = None
self.column_separators = None
self.show_model_nums = True
self.original_cov_names = None
self.cov_map = None
self.show_precision = True
self.show_sig = True
self.sig_levels = [0.1, 0.05, 0.01]
self.sig_digits = 3
self.confidence_intervals = False
self.show_footer = True
self.custom_footer_text = []
self.show_n = True
self.show_r2 = True
self.show_adj_r2 = True
self.show_residual_std_err = True
self.show_f_statistic = True
self.show_dof = True
self.show_notes = True
self.notes_label = 'Note:'
self.notes_append = True
self.custom_notes = []
def extract_data(self):
"""
Extract the values we need from the models and store
for use or modification. They should not be able to
be modified by any rendering parameters.
"""
self.validate_input()
self.model_data = []
for m in self.models:
self.model_data.append(self.extract_model_data(m))
covs = []
for md in self.model_data:
covs = covs + list(md['cov_names'])
self.cov_names = sorted(set(covs))
def extract_model_data(self, model):
data = {}
data['cov_names'] = model.params.index.values
data['cov_values'] = model.params
data['p_values'] = model.pvalues
data['cov_std_err'] = model.bse
data['conf_int_low_values'] = model.conf_int()[0]
data['conf_int_high_values'] = model.conf_int()[1]
data['r2'] = model.rsquared
data['r2_adj'] = model.rsquared_adj
data['resid_std_err'] = sqrt(model.scale)
# data['f_statistic'] = model.fvalue
# data['f_p_value'] = model.f_pvalue
data['degree_freedom'] = model.df_model
data['degree_freedom_resid'] = model.df_resid
data['n_obs'] = model.nobs
return data
# Begin render option functions
def title(self, title):
self.title_text = title
def show_header(self, show):
assert type(show) == bool, 'Please input True/False'
self.header = show
def show_model_numbers(self, show):
assert type(show) == bool, 'Please input True/False'
self.show_model_nums = show
def custom_columns(self, labels, separators=None):
if separators is not None:
assert type(labels) == list, 'Please input a list of labels or a single label string'
assert type(separators) == list, 'Please input a list of column separators'
assert len(labels) == len(separators), 'Number of labels must match number of columns'
assert sum([int(type(s) != int) for s in separators]) == 0, 'Columns must be ints'
assert sum(separators) == self.num_models, 'Please set number of columns to number of models'
else:
assert type(labels) == str, 'Please input a single string label if no columns specified'
self.column_labels = labels
self.column_separators = separators
def significance_levels(self, levels):
assert len(levels) == 3, 'Please input 3 significance levels'
assert sum([int(type(l) != float) for l in levels]) == 0, 'Please input floating point values as significance levels'
self.sig_levels = sorted(levels, reverse=True)
def significant_digits(self, digits):
assert type(digits) == int, 'The number of significant digits must be an int'
assert digits < 10, 'Whoa hold on there bud, maybe use fewer digits'
self.sig_digits = digits
def show_confidence_intervals(self, show):
assert type(show) == bool, 'Please input True/False'
self.confidence_intervals = show
def dependent_variable_name(self, name):
assert type(name) == str, 'Please input a string to use as the depedent variable name'
self.dep_var_name = name
def covariate_order(self, cov_names):
missing = set(cov_names).difference(set(self.cov_names))
assert not missing, ('Covariate order must contain subset of existing '
'covariates: {} are not.'.format(missing))
self.original_cov_names = self.cov_names
self.cov_names = cov_names
def rename_covariates(self, cov_map):
assert isinstance(cov_map, dict), 'Please input a dictionary with covariate names as keys'
self.cov_map = cov_map
def reset_covariate_order(self):
if self.original_cov_names is not None:
self.cov_names = self.original_cov_names
def show_degrees_of_freedom(self, show):
assert type(show) == bool, 'Please input True/False'
self.show_dof = show
def custom_note_label(self, notes_label):
assert type(notes_label) == str, 'Please input a string to use as the note label'
self.notes_label = notes_label
def add_custom_notes(self, notes):
assert sum([int(type(n) != str) for n in notes]) == 0, 'Notes must be strings'
self.custom_notes = notes
def append_notes(self, append):
assert type(append) == bool, 'Please input True/False'
self.notes_append = append
def add_checkmarks(self, checkmarks):
assert type(checkmarks) == list, 'Please input a list'
for item in checkmarks:
assert len(item['values']) == self.num_models, 'Please set number of checkmarks to number of models'
assert type(item) == dict, 'Please input a dictionary for checkmarks'
self.checkmarks = checkmarks
# Begin HTML render functions
def render_html(self):
html = ''
html += self.generate_header_html()
html += self.generate_body_html()
html += self.generate_footer_html()
return html
def generate_header_html(self):
header = ''
if not self.show_header:
return header
if self.title_text is not None:
header += self.title_text + '<br>'
header += '<table style="text-align:center"><tr><td colspan="'
header += str(self.num_models + 1) + '" style="border-bottom: 1px solid black"></td></tr>'
if self.dep_var_name is not None:
header += '<tr><td style="text-align:left"></td><td colspan="' + str(self.num_models)
header += '"><em>' + self.dep_var_name + '</em></td></tr>'
header += '<tr><td style="text-align:left"></td>'
if self.column_labels is not None:
if type(self.column_labels) == str:
header += '<td colspan="' + str(self.num_models) + '">'
header += self.column_labels + "</td></tr>"
else:
# The first table column holds the covariates names:
header += '<tr><td></td>'
for i, label in enumerate(self.column_labels):
sep = self.column_separators[i]
header += '<td colspan="{}">{}</td>'.format(sep, label)
header += '</tr>'
if self.show_model_nums:
header += '<tr><td style="text-align:left"></td>'
for num in range(1, self.num_models + 1):
header += '<td>(' + str(num) + ')</td>'
header += '</tr>'
header += '<tr><td colspan="' + str(self.num_models + 1)
header += '" style="border-bottom: 1px solid black"></td></tr>'
return header
def generate_body_html(self):
"""
Generate the body of the results where the
covariate reporting is.
"""
body = ''
for cov_name in self.cov_names:
body += self.generate_cov_rows_html(cov_name)
return body
def generate_cov_rows_html(self, cov_name):
cov_text = ''
cov_text += self.generate_cov_main_html(cov_name)
if self.show_precision:
cov_text += self.generate_cov_precision_html(cov_name)
else:
cov_text += '<tr></tr>'
return cov_text
def generate_cov_main_html(self, cov_name):
cov_print_name = cov_name
if self.cov_map is not None:
cov_print_name = self.cov_map.get(cov_print_name, cov_name)
cov_text = '<tr><td style="text-align:left">' + cov_print_name + '</td>'
for md in self.model_data:
if cov_name in md['cov_names']:
cov_text += '<td>'
cov_text += str(round(md['cov_values'][cov_name], self.sig_digits))
if self.show_sig:
cov_text += '<sup>' + str(self.get_sig_icon(md['p_values'][cov_name])) + '</sup>'
cov_text += '</td>'
else:
cov_text += '<td></td>'
cov_text += '</tr>'
return cov_text
def generate_cov_precision_html(self, cov_name):
cov_text = '<tr><td style="text-align:left"></td>'
for md in self.model_data:
if cov_name in md['cov_names']:
cov_text += '<td>('
if self.confidence_intervals:
cov_text += str(round(md['conf_int_low_values'][cov_name], self.sig_digits)) + ' , '
cov_text += str(round(md['conf_int_high_values'][cov_name], self.sig_digits))
else:
cov_text += str(round(md['cov_std_err'][cov_name], self.sig_digits))
cov_text += ')</td>'
else:
cov_text += '<td></td>'
cov_text += '</tr>'
return cov_text
def get_sig_icon(self, p_value, sig_char='*'):
if p_value >= self.sig_levels[0]:
return ''
elif p_value >= self.sig_levels[1]:
return sig_char
elif p_value >= self.sig_levels[2]:
return sig_char * 2
else:
return sig_char * 3
def generate_footer_html(self):
"""
Generate the footer of the table where
model summary section is.
"""
footer = '<td colspan="' + str(self.num_models + 1) + '" style="border-bottom: 1px solid black"></td></tr>'
if not self.show_footer:
return footer
footer += self.generate_observations_html()
footer += self.generate_r2_html()
footer += self.generate_r2_adj_html()
if self.show_residual_std_err:
footer += self.generate_resid_std_err_html()
if self.show_f_statistic:
footer += self.generate_f_statistic_html()
footer += '<tr><td colspan="' + str(self.num_models + 1) + '" style="border-bottom: 1px solid black"></td></tr>'
footer += self.generate_notes_html()
footer += '</table>'
return footer
def generate_observations_html(self):
obs_text = ''
if not self.show_n:
return obs_text
obs_text += '<tr><td style="text-align: left">Observations</td>'
for md in self.model_data:
obs_text += '<td>' + str(md['degree_freedom'] + md['degree_freedom_resid'] + 1) + '</td>'
obs_text += '</tr>'
return obs_text
def generate_r2_html(self):
r2_text = ''
if not self.show_r2:
return r2_text
r2_text += '<tr><td style="text-align: left">R<sup>2</sup></td>'
for md in self.model_data:
r2_text += '<td>' + str(round(md['r2'], self.sig_digits)) + '</td>'
r2_text += '</tr>'
return r2_text
def generate_r2_adj_html(self):
r2_text = ''
if not self.show_r2:
return r2_text
r2_text += '<tr><td style="text-align: left">Adjusted R<sup>2</sup></td>'
for md in self.model_data:
r2_text += '<td>' + str(round(md['r2_adj'], self.sig_digits)) + '</td>'
r2_text += '</tr>'
return r2_text
def generate_resid_std_err_html(self):
rse_text = ''
if not self.show_r2:
return rse_text
rse_text += '<tr><td style="text-align: left">Residual Std. Error</td>'
for md in self.model_data:
rse_text += '<td>' + str(round(md['resid_std_err'], self.sig_digits))
if self.show_dof:
rse_text += '(df = ' + str(round(md['degree_freedom_resid'])) + ')'
rse_text += '</td>'
rse_text += '</tr>'
return rse_text
def generate_f_statistic_html(self):
f_text = ''
if not self.show_r2:
return f_text
f_text += '<tr><td style="text-align: left">F Statistic</td>'
for md in self.model_data:
f_text += '<td>' + str(round(md['f_statistic'], self.sig_digits))
f_text += '<sup>' + self.get_sig_icon(md['f_p_value']) + '</sup>'
if self.show_dof:
f_text += '(df = ' + str(md['degree_freedom']) + '; ' + str(md['degree_freedom_resid']) + ')'
f_text += '</td>'
f_text += '</tr>'
return f_text
def generate_notes_html(self):
notes_text = ''
if not self.show_notes:
return notes_text
notes_text += '<tr><td style="text-align: left">' + self.notes_label + '</td>'
if self.notes_append:
notes_text += self.generate_p_value_section_html()
notes_text += '</tr>'
notes_text += self.generate_additional_notes_html()
return notes_text
def generate_p_value_section_html(self):
notes_text = """
<td colspan="{}" style="text-align: right">
<sup>*</sup>p<{};
<sup>**</sup>p<{};
<sup>***</sup>p<{}
</td>""".format(self.num_models, *self.sig_levels)
return notes_text
def generate_additional_notes_html(self):
notes_text = ''
if len(self.custom_notes) == 0:
return notes_text
i = 0
for i, note in enumerate(self.custom_notes):
if (i != 0) | (self.notes_append):
notes_text += '<tr>'
notes_text += '<td></td><td colspan="' + str(self.num_models) + '" style="text-align: right">' + note + '</td></tr>'
return notes_text
# Begin LaTeX render functions
def render_latex(self, only_tabular=False):
latex = ''
latex += self.generate_header_latex(only_tabular=only_tabular)
latex += self.generate_body_latex()
latex += self.generate_footer_latex(only_tabular=only_tabular)
return latex
# def generate_header_latex(self, only_tabular=False):
# header = ''
# if not only_tabular:
# header += '\\begin{table}[!htbp] \\centering\n'
# if not self.show_header:
# return header
# if self.title_text is not None:
# header += ' \\caption{' + self.title_text + '}\n'
# header += ' \\label{}\n'
# header += '\\begin{tabular}{@{\\extracolsep{5pt}}lcc}\n'
# header += '\\\\[-1.8ex]\\hline\n'
# header += '\\hline \\\\[-1.8ex]\n'
# if self.dep_var_name is not None:
# header += '& \\multicolumn{' + str(self.num_models) + '}{c}'
# header += '{\\textit{' + self.dep_var_name + '}} \\\n'
# header += '\\cr \\cline{' + str(self.num_models) + '-' + str(self.num_models + 1) + '}\n'
# if self.column_labels is not None:
# if type(self.column_labels) == str:
# header += '\\\\[-1.8ex] & \\multicolumn{' + str(self.num_models) + '}{c}{' + self.column_labels + '} \\\\'
# else:
# header += '\\\\[-1.8ex] '
# for i, label in enumerate(self.column_labels):
# header += '& \\multicolumn{' + str(self.column_separators[i])
# header += '}{c}{' + label + '} '
# header += ' \\\\\n'
# if self.show_model_nums:
# header += '\\\\[-1.8ex] '
# for num in range(1, self.num_models + 1):
# header += '& (' + str(num) + ') '
# header += '\\\\\n'
# header += '\\hline \\\\[-1.8ex]\n'
# return header
def generate_header_latex(self, only_tabular=False):
header = '\\begin{table}[!htbp] \\centering\n'
if self.title_text is not None:
header += ' \\caption{' + self.title_text + '}\n'
header += ' \\label{}\n\\begin{tabular}{@{\\extracolsep{5pt}}'
header += 'l{}}}\n'.format('c' * self.num_models)
header += '\\toprule'
# header += '\\\\[-1.8ex]\\hline\n'
# header += '\\hline '
if self.dep_var_name != '':
# header += '\\\\[-1.8ex]\n'
header += '& \\multicolumn{' + str(self.num_models) + '}{c}'
header += '{\\textit{' + self.dep_var_name + '}} \\\n'
header += '\\cr \\cmidrule{' + str(2) + '-' + str(self.num_models + 1) + '}\n'
else:
header += '\n'
if self.column_labels is not None:
if type(self.column_labels) == str:
# header += '\\\\[-1.8ex] & \\multicolumn{' + str(self.num_models) + '}{c}{' + self.column_labels + '} \\\\'
header += '\\\\ & \\multicolumn{' + str(self.num_models) + '}{c}{' + self.column_labels + '} \\\\'
else:
# header += '\\\\[-1.8ex] '
header += '\\\\ '
for i, label in enumerate(self.column_labels):
header += '& \\multicolumn{' + str(self.column_separators[i])
header += '}{c}{' + label + '} '
header += ' \\\\\n'
if self.show_model_nums:
# header += '\\\\[-1.8ex] '
for num in range(1, self.num_models + 1):
header += '& (' + str(num) + ') '
header += '\\\\\n'
# header += '\\hline \\\\[-1.8ex]\n'
header += '\\midrule \n'
return header
def generate_body_latex(self):
"""
Generate the body of the results where the
covariate reporting is.
"""
body = ''
for cov_name in self.cov_names:
body += self.generate_cov_rows_latex(cov_name)
body += ' '
for _ in range(self.num_models):
body += '& '
body += '\\\\\n'
return body
def generate_cov_rows_latex(self, cov_name):
cov_text = ''
cov_text += self.generate_cov_main_latex(cov_name)
if self.show_precision:
cov_text += self.generate_cov_precision_latex(cov_name)
else:
cov_text += '& '
return cov_text
def generate_cov_main_latex(self, cov_name):
cov_print_name = cov_name
if self.cov_map is not None:
if cov_name in self.cov_map:
cov_print_name = self.cov_map[cov_name]
cov_text = ' ' + cov_print_name + ' '
for md in self.model_data:
if cov_name in md['cov_names']:
cov_text += '& ' + str(round(md['cov_values'][cov_name], self.sig_digits))
if self.show_sig:
cov_text += '$^{' + str(self.get_sig_icon(md['p_values'][cov_name])) + '}$'
cov_text += ' '
else:
cov_text += '& '
cov_text += '\\\\\n'
return cov_text
def generate_cov_precision_latex(self, cov_name):
cov_text = ' '
for md in self.model_data:
if cov_name in md['cov_names']:
cov_text += '& ('
if self.confidence_intervals:
cov_text += str(round(md['conf_int_low_values'][cov_name], self.sig_digits)) + ' , '
cov_text += str(round(md['conf_int_high_values'][cov_name], self.sig_digits))
else:
cov_text += str(round(md['cov_std_err'][cov_name], self.sig_digits))
cov_text += ') '
else:
cov_text += '& '
cov_text += '\\\\\n'
return cov_text
# def generate_footer_latex(self, only_tabular=False):
# """
# Generate the footer of the table where
# model summary section is.
# """
# footer = '\\hline \\\\[-1.8ex]\n'
# if not self.show_footer:
# return footer
# footer += self.generate_observations_latex()
# footer += self.generate_r2_latex()
# footer += self.generate_r2_adj_latex()
# if self.show_residual_std_err:
# footer += self.generate_resid_std_err_latex()
# # if self.show_f_statistic:
# # footer += self.generate_f_statistic_latex()
# footer += '\\hline\n\\hline \\\\[-1.8ex]\n'
# footer += self.generate_notes_latex()
# footer += '\\end{tabular}'
# if not only_tabular:
# footer += '\n\\end{table}'
# return footer
def generate_footer_latex(self, only_tabular=False):
"""
Generate the footer of the table where
model summary section is.
"""
footer = '\\hline \\\\[-1.8ex]\n'
if not self.show_footer:
return footer
footer += self.generate_observations_latex()
footer += self.generate_r2_latex()
# footer += self.generate_r2_adj_latex()
footer += self.generate_checkmarks_latex()
# footer += self.generate_resid_std_err_latex()
# footer += self.generate_f_statistic_latex()
# footer += '\\hline\n\\hline \\\\[-1.8ex]\n'
footer += '\\bottomrule\n'
footer += '\\\\[-1.8ex]'
footer += self.generate_notes_latex()
footer += '\\end{tabular}\n\\end{table}'
return footer
# def generate_observations_latex(self):
# obs_text = ''
# if not self.show_n:
# return obs_text
# obs_text += ' Observations '
# for md in self.model_data:
# obs_text += '& ' + str(md['degree_freedom'] + md['degree_freedom_resid'] + 1) + ' '
# obs_text += '\\\\\n'
# return obs_text
def generate_observations_latex(self):
obs_text = ''
if not self.show_n:
return obs_text
obs_text += ' $N$ '
for md in self.model_data:
obs_text += '& ' + '{:,}'.format(int(md['n_obs'])) + ' '
obs_text += '\\\\\n'
return obs_text
def generate_r2_latex(self):
r2_text = ''
if not self.show_r2:
return r2_text
r2_text += ' $R^{2}$ '
for md in self.model_data:
r2_text += '& ' + str(round(md['r2'], self.sig_digits)) + ' '
r2_text += '\\\\\n'
return r2_text
def generate_r2_adj_latex(self):
r2_text = ''
if not self.show_r2:
return r2_text
r2_text += ' Adjusted R${2}$ '
for md in self.model_data:
r2_text += '& ' + str(round(md['r2_adj'], self.sig_digits)) + ' '
r2_text += '\\\\\n'
return r2_text
def generate_checkmarks_latex(self):
checkmarks_text = ''
if not self.checkmarks:
return checkmarks_text
for item in self.checkmarks:
checkmarks_text += ' {} '.format(item['label'])
for ii in range(self.num_models):
if item['values'][ii] == True:
mark = '\\checkmark'
else:
mark = ''
checkmarks_text += '& {} '.format(mark)
if ii == (self.num_models - 1):
checkmarks_text += '\\\\\n'
return checkmarks_text
def generate_resid_std_err_latex(self):
rse_text = ''
if not self.show_r2:
return rse_text
rse_text += ' Residual Std. Error '
for md in self.model_data:
rse_text += '& ' + str(round(md['resid_std_err'], self.sig_digits))
if self.show_dof:
rse_text += '(df = ' + str(round(md['degree_freedom_resid'])) + ')'
rse_text += ' '
rse_text += ' \\\\\n'
return rse_text
def generate_f_statistic_latex(self):
f_text = ''
if not self.show_r2:
return f_text
f_text += ' F Statistic '
for md in self.model_data:
f_text += '& ' + str(round(md['f_statistic'], self.sig_digits))
f_text += '$^{' + self.get_sig_icon(md['f_p_value']) + '}$ '
if self.show_dof:
f_text += '(df = ' + str(md['degree_freedom']) + '; ' + str(md['degree_freedom_resid']) + ')'
f_text += ' '
f_text += '\\\\\n'
return f_text
def generate_notes_latex(self):
notes_text = ''
if not self.show_notes:
return notes_text
notes_text += '\\textit{' + self.notes_label + '}'
if self.notes_append:
notes_text += self.generate_p_value_section_latex()
notes_text += self.generate_additional_notes_latex()
return notes_text
# def generate_p_value_section_latex(self):
# notes_text = ''
# notes_text += ' & \\multicolumn{' + str(self.num_models) + '}{r}{$^{' + self.get_sig_icon(self.sig_levels[0] - 0.001) + '}$p$<$' + str(self.sig_levels[0]) + '; '
# notes_text += '$^{' + self.get_sig_icon(self.sig_levels[1] - 0.001) + '}$p$<$' + str(self.sig_levels[1]) + '; '
# notes_text += '$^{' + self.get_sig_icon(self.sig_levels[2] - 0.001) + '}$p$<$' + str(self.sig_levels[2]) + '} \\\\\n'
# return notes_text
def generate_p_value_section_latex(self):
notes_text = ''
notes_text += ' & \\multicolumn{' + str(self.num_models) + '}{r}{$^{' + self.get_sig_icon(self.sig_levels[0] - 0.001) + '}p<$' + str(self.sig_levels[0]) + '; '
notes_text += '$^{' + self.get_sig_icon(self.sig_levels[1] - 0.001) + '}p<$' + str(self.sig_levels[1]) + '; '
notes_text += '$^{' + self.get_sig_icon(self.sig_levels[2] - 0.001) + '}p<$' + str(self.sig_levels[2]) + '} \\\\\n'
return notes_text
def generate_additional_notes_latex(self):
notes_text = ''
# if len(self.custom_notes) == 0:
# return notes_text
for note in self.custom_notes:
# if (i != 0) | (self.notes_append):
# notes_text += '\\multicolumn{' + str(self.num_models) + '}{r}\\textit{' + note + '} \\\\\n'
# else:
# notes_text += ' & \\multicolumn{' + str(self.num_models) + '}{r}\\textit{' + note + '} \\\\\n'
notes_text += ' & \\multicolumn{' + str(self.num_models) + '}{r}\\textit{' + note + '} \\\\\n'
return notes_text
# Begin Markdown render functions
# def render_markdown(self):
# print("sorry haven't made this yet :/")
# Begin ASCII render functions
# def render_ascii(self):
# print("sorry haven't made this yet :/")
```
#### File: code/python/tools.py
```python
import os
import shutil
import numpy as np
import pandas as pd
import scipy.integrate, scipy.stats, scipy.optimize, scipy.signal
from scipy.stats import mannwhitneyu
import statsmodels.formula.api as smf
import pystan
def clean_folder(folder):
"""Create a new folder, or if the folder already exists,
delete all containing files
Args:
folder (string): Path to folder
"""
if os.path.isdir(folder):
shutil.rmtree(folder)
try:
os.makedirs(folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_data_for_stan(y):
"""Convenience function for
collecting data for STAN estimation
Args:
y (np vector): Data series for Bayesian filtering
Returns:
dict: Data for Stan estimation
"""
assert y.ndim == 1, \
"y must be a vector"
assert len(y) > 0, \
"y must have positive length"
assert isinstance(y, np.ndarray), \
"y must be a numpy array"
N_obs = len(pd.Series(y).dropna())
N_mis = np.sum(np.isnan(y))
ii_obs = list(range(1, N_obs + N_mis + 1))
ii_mis = []
if N_mis > 0:
for ii in np.argwhere(np.isnan(y)):
ii_mis.append(ii[0] + 1)
ii_obs.remove(ii[0] + 1)
return {'N_obs': N_obs,
'N_mis': N_mis,
'ii_obs': ii_obs,
'ii_mis': ii_mis,
'y_obs': pd.Series(y).dropna()}
def estimate_R(y, gamma, stm_missing, stm_no_missing, num_iter, num_chains, num_warmup, rng, sig_levels, full_output = False):
"""Estimate R using Bayesian Kalman
smoothing
Args:
y (np array): Data series for the growth rate of infected individuals
gamma (double): Inverse of average infectiousness duration
stm_missing (pickle): Stan model (for case with missing data)
stm_no_missing (pickle): Stan model (for case without missing data)
num_iter (int): Number of iterations
num_chains (int): Number of MCMC chains
num_warmup (int): Number of warmup periods
rng (obj): Numpy random state
sig_levels (list): List of significance levels for credible bounds
full_output (bool, optional): If True, return full output from Stan
Returns:
TYPE: Description
"""
assert y.ndim == 1, \
"y must be a vector"
assert len(y) > 0, \
"y must have positive length"
assert isinstance(y, np.ndarray), \
"y must be a numpy array"
assert isinstance(num_chains, int) and isinstance(num_iter, int) and isinstance(num_warmup, int), \
"num_chains, num_iter, and num_warmup must be integers"
assert num_chains > 0 and num_iter > 0 and num_warmup > 0, \
"num_chains, num_iter, and num_warmup must be positive"
assert len(sig_levels) >= 1 and all(isinstance(x, int) for x in sig_levels), \
"sig_levels must be a list with only integers"
# Get data in Stan format
s_data = get_data_for_stan(y)
# Estimate model
if np.sum(np.isnan(y)) > 0:
fit = stm_missing.sampling(data = s_data,
iter = num_iter,
chains = num_chains,
warmup = num_warmup,
verbose = False,
seed = rng)
else:
fit = stm_no_missing.sampling(data = s_data,
iter = num_iter,
chains = num_chains,
warmup = num_warmup,
verbose = False,
seed = rng)
fit_res = fit.extract(permuted = True)
# Collect results
res = {}
res['R'] = 1 + 1 / gamma * fit_res['mu'].mean(axis = 0)
for aa in sig_levels:
ub = 1 + 1 / gamma * np.percentile(fit_res['mu'], axis = 0, q = 100 - aa / 2.0)
lb = np.maximum(1 + 1 / gamma * np.percentile(fit_res['mu'], axis = 0, q = aa / 2.0), 0.0)
res['ub_{}'.format(100 - aa)] = ub
res['lb_{}'.format(100 - aa)] = lb
res['signal_to_noise'] = fit_res['signal_to_noise'].mean()
res['var_irregular'] = (1 / fit_res['precision_irregular']).mean()
# Extract convergence statistics
fit_summary = fit.summary()
df_conv_stats = pd.DataFrame(fit_summary['summary'])
df_conv_stats.columns = fit_summary['summary_colnames']
df_conv_stats['var_name'] = fit_summary['summary_rownames']
mask = df_conv_stats['var_name'].apply(lambda x: 'mu' in x)
df_conv_stats = df_conv_stats.loc[mask, ]
res['n_eff_pct'] = df_conv_stats['n_eff'].min() / float(num_chains * (num_iter - num_warmup))
res['Rhat_diff'] = (df_conv_stats['Rhat'] - 1).abs().max()
# If requested, extract full Stan fit
if full_output:
res['stan_fit'] = fit
return res
def mean_se(x, robust = True):
"""Aggregation function for
pandas to calculate standard errors
for the mean
Args:
x (series): pandas Series
robust (bool, optional): if True, calculate
heteroskedasticity-robust standard errors
Returns:
float: standard error
"""
x = pd.DataFrame(x)
x.columns = ['x']
if robust:
mod = smf.ols('x ~ 1', data = x).fit(cov_type = 'HC2')
else:
mod = smf.ols('x ~ 1', data = x).fit()
return mod.bse['Intercept']
def simulate_AR1(rho, sigma, T, shocks = None):
"""Simulate a time series for
an AR(1) process with
x_{t + 1} = rho x_t + eps_{t+1}
where
eps_{t + 1} ~ N(0, sigma ^ 2).
Initial condition is
x_0 ~ N(0, sigma ^ 2 / (1 - rho ^ 2))
Persistence parameter must lie in (-1, 1)
for an AR(1) to be simulated.
Args:
rho (float): AR(1) persistence parameter
sigma (float): Standard deviation of shocks
T (int): Length of simulated time series
shocks (array, optional): If provided,
use the time series in shocks for the disturbances (eps)
Returns:
dict: Dictionary, contains:
shocks (float): Simulated shocks (eps)
x (float): Simulated time series
"""
assert rho > - 1 and rho < 1, \
'Persistence parameter should be in (-1, 1).'
if shocks is None:
shocks = np.random.randn(1, T).flatten() * sigma
shocks[0] = np.random.randn(1, 1) * sigma / np.sqrt(1 - rho ** 2)
return {'shocks': shocks,
'x': scipy.signal.lfilter([1] ,[1, -rho], shocks)}
``` |
{
"source": "3D-Hack/team10_backend",
"score": 3
} |
#### File: 3D-Hack/team10_backend/main.py
```python
import os
import requests
import datetime
import json
def get_average_kilometerage(vrm, average=10000):
headers = {
"x-api-key": os.getenv('MOT_API_KEY'),
"Content-Type": "application/json",
}
response = requests.get(
os.getenv('MOT_API_URL'), params={"registration": vrm}, headers=headers
)
if response.status_code != 200:
raise Exception("OMG. Not 200.")
response_json = response.json()[0]
if "motTests" not in response_json:
return average
year = datetime.datetime.today().year
lasttest = response_json["motTests"][0]
distance = int(lasttest["odometerValue"])
if lasttest["odometerUnit"] == "mi":
distance = distance / 0.62137119
regyear = datetime.datetime.strptime(
response_json["registrationDate"], "%Y.%m.%d"
).year
apart = year - regyear
return distance / apart
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
def get_vehicle_info(vrm):
headers = {
"x-api-key": os.getenv('DVLA_API_KEY'),
"Content-Type": "application/json",
}
response = requests.post(
os.getenv('DVLA_API_URL'),
json={"registrationNumber": vrm},
headers=headers,
)
if response.status_code != 200:
raise Exception("OMG. Not 200.")
motStatus = response.json()["motStatus"] in ("Valid", "Not valid")
average = 10000
if motStatus:
average = get_average_kilometerage(vrm)
convert_co2_result = convert_co2(response.json()["co2Emissions"], average)
removed = convert_co2_result.popitem()
return {
"co2": response.json()["co2Emissions"],
"make": response.json()["make"],
"colour": response.json()["colour"],
"averageKilometersYear": round(average),
"convertCo2": convert_co2_result,
"text_string": removed[1]
}
def format_reponse_string(num_item, item):
"""
Hackathon level string formating.
"""
return (
str(int(num_item)) + (" " + item + "s" if num_item > 1 else " " + item)
if num_item > 0
else ""
)
def convert_co2(co2, mileage):
"""
Convert emmissions to some other stats
co2 in g/km
mileage in km
"""
co2_target = 90 # gCO2/km target from the eu.
total_co2 = (co2 - co2_target) * mileage
# set some co2 grams equivalent
trees, steaks, cheese, avocado = 9999, 4000, 3000, 200
num_trees, rem = divmod(total_co2, trees)
num_steak, rem = divmod(rem, steaks)
num_cheese, rem = divmod(rem, cheese)
num_avocado, rem = divmod(rem, avocado)
# 100*long_haul_flight*passengers
rocket_equivalents = round(100 * 2000000 * 4 / total_co2)
# Dont judge me! i think it works
tree_string = format_reponse_string(num_trees, "tree")
steak_string = format_reponse_string(num_steak, "steak")
avo_string = format_reponse_string(num_avocado, "avo")
cheese_string = format_reponse_string(num_cheese, "cheesey kilo")
msg = (
f"Your car is producing {total_co2/1000:.1f} kg CO2 per year over the EU target (90 g/km). "
+ f"You can offset that by planting or saving {tree_string} and "
+ f"avoiding {steak_string}, {cheese_string}, {avo_string}."
+ f" Or we could forgo 1 of Branson/Bezos rocket trips and drive for {rocket_equivalents} years."
)
msg = msg.replace(" , ", " ").replace(", ,", ",").replace(", .", ".")
if msg.count(",") > 1:
msg = ".".join(msg.rsplit(",", 1))
if msg.count(",") > 0:
msg = " and".join(msg.rsplit(",", 1))
msg = msg.replace(" ", " ")
return {
"trees": num_trees,
"avocado": num_avocado,
"steaks": num_steak,
"cheese": num_cheese,
"text_string": msg,
}
def lambda_function(event, context):
vrm = event["queryStringParameters"]["vrm"]
print(event)
myvar = get_vehicle_info(vrm)
response = {
"statusCode": 200,
"body": json.dumps(myvar),
"headers": {"Access-Control-Allow-Origin": "*"},
}
return response
# Press the green button in the gutter to run the script.
if __name__ == "__main__":
print(get_vehicle_info(input('Enter VRM: ')))
``` |
{
"source": "3dh-de/pishower",
"score": 3
} |
#### File: 3dh-de/pishower/iocontrol.py
```python
import time
from pishowerutils import logger
try:
import RPi.GPIO as GPIO
except RuntimeError:
logger.error('Error importing RPi.GPIO! Superuser privileges needed!')
quit(-1)
class RelayControl:
""" Easy GPIO control to toggle relais """
invertedLevels = True
_state_on = GPIO.LOW
_state_off = GPIO.HIGH
_registered_outs = []
def __init__(self, outports=[], inverted=True):
""" Init GPIO interface """
self.invertedLevels = inverted
if not inverted:
self._state_on, self._state_off = self._state_off, self._state_on
self._states = {self._state_on: 'ON', self._state_off: 'OFF'}
GPIO.setmode(GPIO.BCM) # BCM-numbering (GPIO 17) instead of pin (11)
for port in outports:
GPIO.setup(port, GPIO.OUT, initial=self._state_off)
self._registered_outs.append(port)
logger.debug('init GPIO {} as output'.format(port))
time.sleep(0.5)
def __del__(self):
""" Clear GPIO interface """
GPIO.cleanup()
def toggle_output(self, port, value=-1):
if port not in self._registered_outs:
logger.warning('Cannot toggle unregistered GPIO {0}!'.format(port))
return
if value is -1:
value = GPIO.input(port)
logger.debug('toggle GPIO {0} from {1} to {2}'.format(port, self._states[value], self._states[not value]))
value = not value
elif self.invertedLevels:
value = not value
state = GPIO.HIGH if value > 0 else GPIO.LOW
GPIO.output(port, state)
logger.debug('set GPIO {0} to {1}'.format(port, self._states[state]))
time.sleep(0.01)
# command line execution
if __name__ == '__main__':
relay1 = 17 # GPIO 17 / Pin 11 auf Ausgang setzen
relay2 = 27 # GPIO 27 / Pin 13 auf Ausgang setzen
try:
control = RelayControl([relay1, relay2])
time.sleep(2.0)
logger.debug('Testing invalid port...')
control.toggle_output(4, True)
logger.debug('Switching relais to ON...')
control.toggle_output(relay1, True)
control.toggle_output(relay2, True)
time.sleep(2.0)
logger.debug('Toggling relais...')
control.toggle_output(relay1)
time.sleep(2.0)
control.toggle_output(relay2)
time.sleep(2.0)
logger.debug('Switching relais2 to OFF...')
control.toggle_output(relay2, False)
time.sleep(2.0)
logger.debug('Switching relais2 to ON...')
control.toggle_output(relay2, True)
time.sleep(2.0)
except KeyboardInterrupt:
logger.debug('Key pressed - finishing now...')
except:
logger.error('Unknown error received. Reading aborted!')
```
#### File: 3dh-de/pishower/ledthermometer.py
```python
import time
import Adafruit_DHT
import max7219.led as led
from max7219.font import proportional, SINCLAIR_FONT, TINY_FONT, CP437_FONT
# create matrix device
device = led.matrix(cascaded=4)
device.orientation(90)
# Parse command line parameters.
sensor_args = {'11': Adafruit_DHT.DHT11,
'22': Adafruit_DHT.DHT22,
'2302': Adafruit_DHT.AM2302}
# sensor = sensor_args['11']
# sensor_gpio_pin = 27
sensor = sensor_args['2302']
sensor_gpio_pin = 17
sensor_correction = 0.0
def fadeMessage(msg, seconds=2.5, f=TINY_FONT):
""" display given message string by fading in and out the brightness """
device.brightness(0)
device.show_message(msg, font=f, always_scroll=False)
for intensity in range(8):
device.brightness(intensity)
time.sleep(0.050)
time.sleep(2.50)
for intensity in range(8):
device.brightness(7 - intensity)
time.sleep(0.050)
return
try:
device.clear()
while True:
msg = u'{0}'.format(time.strftime('%H:%M'))
fadeMessage(msg, 2.5, TINY_FONT)
time.sleep(0.5)
humidity, temperature = Adafruit_DHT.read_retry(sensor, sensor_gpio_pin)
# temperature += sensor_correction
if humidity is not None and temperature is not None:
msg = u'{0:0.1f}c {1:0.0f}'.format(temperature, humidity)
fadeMessage(msg, 2.5, TINY_FONT)
time.sleep(0.5)
# Abbruch durch Taste Strg-C
except KeyboardInterrupt:
device.clear()
```
#### File: 3dh-de/pishower/pishowermain.py
```python
import time
from threading import Thread
from rfidreader import UsbRfidReader
from iocontroltimer import IOControlTimer
from iocontrol import RelayControl
from lcdcontrol import LcdControl
from pishowerutils import logger
class PiShowerMain(Thread):
"""
Main control of PiShower system
Controls the I/O devices and logic to switch the
magnet valves for the shower cabin water.
There are separate valves for cold (GPIO 17, blue cable)
and warm (GPIO 27, yellow cable) water, switched by relays
connected to GPIO 17+27.
An I2C LCD 16x2 display shows the current state and
messages for the user.
By an attached USB Mifare RFID/NFC reader the users
authenticate and start the shower for a predefined time.
After the time is expired, all valves are closed by an
timeout event.
"""
shower_time = 420
shower_id = 1
_gpio_cold_water = 17
_gpio_warm_water = 27
_cardReader = None
_showerTimer = None
_relays = None
_display = None
__running = False
def __init__(self, shower_id=1, shower_seconds=420):
""" Init card reader, display and I/O controls """
Thread.__init__(self)
try:
if shower_seconds < 30:
logger.error('invalid shower time given! time must be > 30s')
raise AttributeError()
self.shower_id = shower_id
self.shower_time = shower_seconds
self._cardReader = UsbRfidReader()
self._showerTimer = IOControlTimer(self.shower_time)
self._relays = RelayControl([self._gpio_cold_water, self._gpio_warm_water])
self._display = LcdControl()
self.__running = False
time.sleep(1.0)
logger.debug('shower control ready')
except Exception as error:
logger.error('Error during init of main control! {0}'.format(error))
def __del__(self):
""" Close all devices and stop timers """
try:
self.stop()
except Exception as error:
logger.error('Error in destructor of main control! {0}'.format(error))
def start(self):
""" Init timers, reader and display user message """
self.__running = True
Thread.start(self)
try:
logger.debug('starting shower control...')
time.sleep(1.0)
except Exception as error:
logger.error('Error during starting main control! {0}'.format(error))
def stop(self):
""" Close all valves (relays), stop timers and display warning """
try:
logger.warning('stopping and locking shower control...')
self.__running = False
if self._showerTimer:
self._showerTimer.stop()
if self._relays:
self._relays.toggle_output(self._gpio_cold_water, False)
self._relays.toggle_output(self._gpio_warm_water, False)
if self._display:
self.show_message_error()
except Exception as error:
logger.error('Error during stopping main control! {0}'.format(error))
def run(self):
""" Handle card reader events and logic for shower control """
if self._cardReader is None:
logger.error('No card reader available! Aborting!')
self.stop()
return
if self._relays is None:
logger.error('No I/O control for relays available! Aborting!')
self.stop()
return
try:
logger.debug('running main control loop...')
self.show_message_ready()
shower_active = False
start_time = None
while self.__running:
code = self._cardReader.readline() # codes must be read to be cleared from buffer
if shower_active:
# a shower process was started
code = None
minutes_left = 0 # TODO: add minutes left calc here
if self._showerTimer is None:
logger.warning('No timer found! Stopped shower process!')
shower_active = False
self.show_message_ready()
else:
if not self._showerTimer.is_finished():
self.show_message_processing(minutes_left)
else:
logger.debug('stopped shower process on timeout')
shower_active = False
self.show_message_stop()
if self._relays:
self._relays.toggle_output(self._gpio_cold_water, False)
self._relays.toggle_output(self._gpio_warm_water, False)
time.sleep(6.0)
self.show_message_ready()
elif code is not None:
# wait for request to start new shower process
logger.debug('found a code: \'{0}\''.format(code)) # TODO: add validator here
self.show_message_processing(self.shower_time//60)
shower_active = True
if self._relays:
self._relays.toggle_output(self._gpio_cold_water, True)
self._relays.toggle_output(self._gpio_warm_water, True)
if self._showerTimer:
self._showerTimer.start()
time.sleep(0.2)
logger.debug('stopping main control loop...')
except Exception as error:
logger.error('Error in main control loop! {0}'.format(error))
def show_message_ready(self):
if self._display:
self._display.show('Dusche {0} FREI'.format(self.shower_id), 1)
self._display.show('>>> Karte ?? <<<', 2)
def show_message_processing(self, shower_time):
if self._display:
self._display.show('Duschzeit {0:3d}min'.format(shower_time), 1)
self._display.show('>>> Karte OK <<<', 2)
def show_message_error(self):
if self._display:
self._display.show('Dusche {0} AUS'.format(self.shower_id), 1)
self._display.show('>>> STOERUNG <<<', 2)
def show_message_stop(self):
if self._display:
self._display.show('Dusche {0} BELEGT'.format(self.shower_id), 1)
self._display.show('>>> GESPERRT <<<', 2)
# exec tests
if __name__ == "__main__":
try:
main = PiShowerMain(1, 40)
main.start()
while main.is_alive():
time.sleep(0.2)
except KeyboardInterrupt:
logger.debug('Key pressed - finishing now...')
except Exception as error:
logger.error('Error in main control! {0}'.format(error))
finally:
main.stop()
``` |
{
"source": "3d-hub/3d-hub-connector",
"score": 2
} |
#### File: 3d-hub-connector/tests/test_announcements.py
```python
import datetime
import json
import unittest
import unittest.mock as mock
import pytest
from fdm_connector import FdmConnectorPlugin
from fdm_connector.constants import Errors, State
from tests.utils import mock_settings_get, mock_settings_global_get, mock_settings_custom, create_fake_at, \
mocked_host_intercepted
class TestPluginAnnouncing(unittest.TestCase):
@classmethod
def setUp(cls):
cls.settings = mock.MagicMock() # Replace or refine with set/get
cls.logger = mock.MagicMock()
cls.plugin = FdmConnectorPlugin()
cls.plugin._settings = cls.settings
cls.plugin._settings.get = mock_settings_get
cls.plugin._settings.get = mock_settings_global_get
cls.plugin._write_persisted_data = lambda *args: None
cls.plugin._logger = cls.logger
cls.plugin._logger.info = print
cls.plugin._logger.error = print
# Nice way to test persisted data
cls.plugin._data_folder = "test_data/announcements"
def assert_state(self, state):
assert self.plugin._state is state
def test_call_mocked_announcement_improperly(self):
"""Call the query announcement, make sure it validates 'access_token'"""
self.assert_state(State.BOOT)
with pytest.raises(Exception) as e:
self.plugin._query_announcement("asd", "asd")
assert e.value.args[0] == Errors.access_token_too_short
self.assert_state(State.CRASHED)
def test_announcement_without_baseurl(self):
"""Call the query announcement, make sure it doesnt crash"""
fake_token = create_fake_at()
self.assert_state(State.BOOT)
with pytest.raises(Exception) as e:
self.plugin._query_announcement(None, access_token=fake_token)
assert e.value.args[0] == Errors.base_url_not_provided
self.assert_state(State.CRASHED)
# This method will be used by the mock to replace requests.get
def mocked_requests_post(*args, **kwargs):
class MockResponse:
def __init__(self, status_code, text):
self.status_code = status_code
self.text = text
if f"{mocked_host_intercepted}:443" in args[0]:
fake_token = create_fake_at()
return MockResponse(200, json.dumps({"access_token": fake_token, "expires_in": 100}))
return MockResponse(404, "{}")
@mock.patch('requests.post', side_effect=mocked_requests_post)
def test_announcement_with_proper_data(self, mock_post):
"""Call the query announcement properly"""
fake_token = create_fake_at()
url = "testwrong_url"
self.assert_state(State.BOOT)
# TODO wrong url is not prevented
self.plugin._query_announcement(url, fake_token)
# assert e.value.args[0] == Errors.base_url_not_provided
self.assert_state(State.SLEEP)
def test_check_fdm(self):
with pytest.raises(Exception) as e:
self.plugin._check_fdmmonster()
assert e.value.args[0] == Errors.config_openid_missing
self.assert_state(State.CRASHED)
def test_check_fdmmonster_unreachable_settings(self):
self.plugin._settings.get = mock_settings_custom
self.assert_state(State.BOOT)
self.plugin._check_fdmmonster()
# TODO We are crashed with a connection error being caught. Save the reason
self.assert_state(State.CRASHED)
@mock.patch('requests.post', side_effect=mocked_requests_post)
def test_check_fdmmonster_reachable_settings(self, mock_request):
self.plugin._settings.get = mock_settings_custom
self.assert_state(State.BOOT)
self.plugin._check_fdmmonster()
self.assert_state(State.SLEEP)
@mock.patch('requests.post', side_effect=mocked_requests_post)
def test_check_fdmmonster_reachable_settings_expired(self, mock_request):
self.plugin._settings.get = mock_settings_custom
self.plugin._persisted_data["requested_at"] = datetime.datetime.utcnow().timestamp()
self.plugin._persisted_data["expires"] = -100
self.assert_state(State.BOOT)
self.plugin._check_fdmmonster()
self.assert_state(State.SLEEP)
@mock.patch('requests.post', side_effect=mocked_requests_post)
def test_check_fdmmonster_reachable_settings_unexpired(self, mock_request):
self.plugin._settings.get = mock_settings_custom
self.plugin._persisted_data["requested_at"] = datetime.datetime.utcnow().timestamp()
self.plugin._persisted_data["expires"] = 10000000
self.plugin._persisted_data["access_token"] = create_fake_at()
self.assert_state(State.BOOT)
self.plugin._check_fdmmonster() # We skip querying the access_token
self.assert_state(State.SLEEP)
``` |
{
"source": "3DHubs/localflavor",
"score": 3
} |
#### File: localflavor/ar/forms.py
```python
from __future__ import unicode_literals
from localflavor.exceptions import ValidationError
from localflavor.base import RegexValidator, Select
from localflavor.stub import _
from .ar_provinces import PROVINCE_CHOICES
class ARProvinceSelect(Select):
"""A Select widget that uses a list of Argentinean provinces/autonomous cities as its choices."""
def __init__(self, attrs=None):
super(ARProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class ARPostalCodeField(RegexValidator):
"""
Accepts a 'classic' NNNN Postal Code or a CPA.
See:
* http://www.correoargentino.com.ar/cpa/que_es
* http://www.correoargentino.com.ar/cpa/como_escribirlo
"""
default_error_messages = {
'invalid': _('Enter a valid postal code in the format NNNN or ANNNNAAA.'),
}
def __init__(self, max_length=8, min_length=4, *args, **kwargs):
super(ARPostalCodeField, self).__init__(
r'^\d{4}$|^[A-HJ-NP-Za-hj-np-z]\d{4}\D{3}$',
max_length=max_length, min_length=min_length, *args, **kwargs
)
def clean(self, value):
value = super(ARPostalCodeField, self).clean(value)
if value in self.empty_values:
return self.empty_value
if len(value) not in (4, 8):
raise ValidationError(self.error_messages['invalid'])
if len(value) == 8:
return '%s%s%s' % (value[0].upper(), value[1:5], value[5:].upper())
return value
```
#### File: localflavor/localflavor/base.py
```python
import re
from .exceptions import ValidationError
class CharValidator(object):
"""Validates and cleans a given string value of given min/max length."""
default_error_messages = {
'invalid': 'Invalid value.'
}
empty_values = (None, '')
empty_value = None
validators = []
default_validators = []
choices = None
def __init__(self, min_length=None, max_length=None, choices=None, **kwargs):
self.min_length = min_length
self.max_length = max_length
self.choices = choices if choices else self.choices
self.choices = [str(_[0]) for _ in self.choices] if self.choices else self.choices
self.error_messages = self.default_error_messages
if 'message' in kwargs:
self.error_messages = {'invalid': kwargs.pop('message')}
self.error_messages = kwargs.pop('error_messages', self.error_messages)
def _is_valid(self, value):
if not isinstance(value, str):
return False
if self.min_length and len(value) < self.min_length:
return False
if self.max_length and len(value) > self.max_length:
return False
if self.choices and str(value) not in self.choices:
return False
for validator in self.validators + self.default_validators:
if not validator(value):
return False
return True
def __call__(self, value):
if not self._is_valid(value):
raise ValidationError(self.error_messages['invalid'])
def clean(self, value):
value = value.strip()
if value in self.empty_values:
return self.empty_value
if not self._is_valid(value):
raise ValidationError(self.error_messages['invalid'])
return value
class RegexValidator(CharValidator):
"""Validates and cleans a given value with a given regex."""
regex = None
def __init__(self, regex, message=None, *args, **kwargs):
if message:
kwargs['message'] = message
super(RegexValidator, self).__init__(*args, **kwargs)
self.regex = re.compile(regex)
def _is_valid(self, value):
if not super(RegexValidator, self)._is_valid(value):
return False
if not self.regex.search(value):
return False
return True
def clean(self, value):
value = super(RegexValidator, self).clean(value)
if not self._is_valid(value):
raise ValidationError(self.error_messages['invalid'])
return value
class ChoiceField(CharValidator):
"""A stub choice field."""
pass
class Select(object):
"""A stub select class."""
def __init__(self, choices=None):
self.choices = choices
```
#### File: localflavor/cn/forms.py
```python
from __future__ import unicode_literals
from localflavor.base import RegexValidator, Select
from localflavor.stub import _
from .cn_provinces import PROVINCE_CHOICES
__all__ = (
'CNProvinceSelect',
'CNPostalCodeField',
)
ID_CARD_RE = r'^\d{15}(\d{2}[0-9xX])?$'
POST_CODE_RE = r'^\d{6}$'
# Valid location code used in id card checking algorithm
CN_LOCATION_CODES = (
11, # Beijing
12, # Tianjin
13, # Hebei
14, # Shanxi
15, # Nei Mongol
21, # Liaoning
22, # Jilin
23, # Heilongjiang
31, # Shanghai
32, # Jiangsu
33, # Zhejiang
34, # Anhui
35, # Fujian
36, # Jiangxi
37, # Shandong
41, # Henan
42, # Hubei
43, # Hunan
44, # Guangdong
45, # Guangxi
46, # Hainan
50, # Chongqing
51, # Sichuan
52, # Guizhou
53, # Yunnan
54, # Xizang
61, # Shaanxi
62, # Gansu
63, # Qinghai
64, # Ningxia
65, # Xinjiang
71, # Taiwan
81, # Hong Kong
91, # Macao
)
class CNProvinceSelect(Select):
"""A select widget providing the list of provinces and districts in People's Republic of China as choices."""
def __init__(self, attrs=None):
super(CNProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class CNPostalCodeField(RegexValidator):
"""
A form field that validates input as postal codes in mainland China.
Valid codes are in the format of XXXXXX where X is a digit.
"""
default_error_messages = {
'invalid': _('Enter a valid postal code in the format XXXXXX.'),
}
def __init__(self, *args, **kwargs):
super(CNPostalCodeField, self).__init__(POST_CODE_RE, *args, **kwargs)
```
#### File: localflavor/cu/forms.py
```python
from __future__ import unicode_literals
from localflavor.exceptions import ValidationError
from localflavor.base import CharValidator, RegexValidator, Select
from localflavor.stub import _
from .choices import PROVINCE_CHOICES, PROVINCE_NORMALIZED, REGION_CHOICES, REGION_NORMALIZED
class CURegionField(CharValidator):
"""
A form field for a Cuban region.
The input is validated against a dictionary which includes names and abbreviations.
It normalizes the input to the standard abbreviation for the given region.
.. versionadded:: 1.6
"""
default_error_messages = {
'invalid': _('Enter a Cuban region.'),
}
description = _("Cuban regions (three uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = REGION_CHOICES
kwargs['max_length'] = 3
super(CURegionField, self).__init__(*args, **kwargs)
def clean(self, value):
super(CURegionField, self).clean(value)
if value in self.empty_values:
return self.empty_value
try:
return REGION_NORMALIZED[value.strip().lower()]
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class CURegionSelect(Select):
"""
A Select widget that uses a list of Cuban regions as its choices.
.. versionadded:: 1.6
"""
def __init__(self, attrs=None):
super(CURegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class CUProvinceField(CharValidator):
"""
A form field for a Cuban province.
The input is validated against a dictionary which includes names and abbreviations.
It normalizes the input to the standard abbreviation for the given province.
.. versionadded:: 1.6
"""
default_error_messages = {
'invalid': _('Enter a Cuban province.'),
}
description = _("Cuban provinces (three uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = PROVINCE_CHOICES
kwargs['max_length'] = 3
super(CUProvinceField, self).__init__(*args, **kwargs)
def clean(self, value):
super(CUProvinceField, self).clean(value)
if value in self.empty_values:
return self.empty_value
try:
return PROVINCE_NORMALIZED[value.strip().lower()]
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class CUProvinceSelect(Select):
"""
A Select widget that uses a list of Cuban provinces as its choices.
.. versionadded:: 1.6
"""
def __init__(self, attrs=None):
super(CUProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class CUPostalCodeField(RegexValidator):
"""
A form field for a Cuban postal Code.
Taken from : http://mapanet.eu/Postal_Codes/?C=CU
The Cuban postal code is a combination of 5 digits non begin with 0.
.. versionadded:: 1.6
"""
default_error_messages = {
'invalid': _('Enter a valid postal code in the format XXXXX.'),
}
description = _("Cuban postal code")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 5
super(CUPostalCodeField, self).__init__(r'^[1-9]\d{4}$', *args, **kwargs)
```
#### File: localflavor/generic/forms.py
```python
from __future__ import unicode_literals
from localflavor.base import CharValidator
from .validators import IBAN_COUNTRY_CODE_LENGTH, BICValidator, IBANValidator
DEFAULT_DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
DEFAULT_DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
IBAN_MIN_LENGTH = min(IBAN_COUNTRY_CODE_LENGTH.values())
class IBANFormField(CharValidator):
"""
An IBAN consists of up to 34 alphanumeric characters.
To limit validation to specific countries, set the 'include_countries' argument with a tuple or list of ISO 3166-1
alpha-2 codes. For example, `include_countries=('NL', 'BE, 'LU')`.
A list of countries that use IBANs as part of SEPA is included for convenience. To use this feature, set
`include_countries=IBAN_SEPA_COUNTRIES` as an argument to the field.
In addition to validating official IBANs, this field can optionally validate unofficial IBANs that have been
catalogued by Nordea by setting the `use_nordea_extensions` argument to True.
https://en.wikipedia.org/wiki/International_Bank_Account_Number
.. versionadded:: 1.1
"""
def __init__(self, use_nordea_extensions=False, include_countries=None, *args, **kwargs):
kwargs.setdefault('min_length', IBAN_MIN_LENGTH)
kwargs.setdefault('max_length', 34)
self.default_validators = [IBANValidator(use_nordea_extensions, include_countries)]
super(IBANFormField, self).__init__(*args, **kwargs)
class BICFormField(CharValidator):
"""
A BIC consists of 8 (BIC8) or 11 (BIC11) alphanumeric characters.
BICs are also known as SWIFT-BIC, BIC code, SWIFT ID, SWIFT code or ISO 9362.
https://en.wikipedia.org/wiki/ISO_9362
.. versionadded:: 1.1
"""
default_validators = [BICValidator()]
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 11)
super(BICFormField, self).__init__(*args, **kwargs)
def clean(self, value):
# BIC is always written in upper case.
# https://www2.swift.com/uhbonline/books/public/en_uk/bic_policy/bic_policy.pdf
value = super(BICFormField, self).clean(value)
if value in self.empty_values:
return self.empty_value
return value.upper()
```
#### File: localflavor/id_/forms.py
```python
from __future__ import unicode_literals
import re
from localflavor.stub import EMPTY_VALUES
from localflavor.exceptions import ValidationError
from localflavor.base import CharValidator, Select
from localflavor.stub import _
postcode_re = re.compile(r'^[1-9]\d{4}$')
class IDPostalCodeField(CharValidator):
"""
An Indonesian post code field.
http://id.wikipedia.org/wiki/Kode_pos
"""
default_error_messages = {
'invalid': _('Enter a valid 5 digit postal code.'),
}
def clean(self, value):
super(IDPostalCodeField, self).clean(value)
if value in EMPTY_VALUES:
return self.empty_value
value = value.strip()
if not postcode_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value) < 10110:
raise ValidationError(self.error_messages['invalid'])
# 1xxx0
if value[0] == '1' and value[4] != '0':
raise ValidationError(self.error_messages['invalid'])
return '%s' % (value, )
class IDProvinceSelect(Select):
"""A Select widget that uses a list of provinces of Indonesia as its choices."""
def __init__(self, attrs=None):
# Load data in memory only when it is required, see also #17275
from .id_choices import PROVINCE_CHOICES
super(IDProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
```
#### File: localflavor/il/forms.py
```python
from __future__ import unicode_literals
from localflavor.base import RegexValidator
from localflavor.stub import _
class ILPostalCodeField(RegexValidator):
"""
A form field that validates its input as an Israeli postal code.
Valid form is XXXXX where X represents integer.
"""
default_error_messages = {
'invalid': _('Enter a valid postal code in the format XXXXXXX or XXXXX, digits only.'),
}
def __init__(self, *args, **kwargs):
super(ILPostalCodeField, self).__init__(r'^\d{5}$|^\d{7}$', *args, **kwargs)
def clean(self, value):
if value not in self.empty_values:
value = value.replace(' ', '')
return super(ILPostalCodeField, self).clean(value)
```
#### File: localflavor/si/forms.py
```python
from __future__ import unicode_literals
from localflavor.base import ChoiceField, Select
from localflavor.stub import _
from .si_postalcodes import SI_POSTALCODES_CHOICES
class SIPostalCodeField(ChoiceField):
"""Slovenian post codes field."""
default_error_messages = {
'invalid': _('Enter a valid postal code in the format XXXX.'),
}
def __init__(self, *args, **kwargs):
kwargs.setdefault('choices', SI_POSTALCODES_CHOICES)
super(SIPostalCodeField, self).__init__(*args, **kwargs)
class SIPostalCodeSelect(Select):
"""A Select widget that uses Slovenian postal codes as its choices."""
def __init__(self, attrs=None):
super(SIPostalCodeSelect, self).__init__(attrs,
choices=SI_POSTALCODES_CHOICES)
```
#### File: 3DHubs/localflavor/setup.py
```python
import os
import re
import codecs
from setuptools import setup, find_packages
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*$py.class', '*~', '.*', '*.bak')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
setup(
name="localflavor",
version=find_version("localflavor", "__init__.py"),
url='https://github.com/anka-sirota/localflavor',
license='BSD',
description="Country-specific validation helpers, ported from Django Localflavor",
long_description=read('README.rst'),
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=[
'six>=1.11.0',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
],
)
``` |
{
"source": "3digitdev/theta",
"score": 3
} |
#### File: modules/Date/Date.py
```python
import datetime
from modules.Module import Module
class Date(Module):
def __init__(self, pipe):
super().__init__(self, pipe)
def run(self, command: str, regex) -> str:
try:
now = datetime.datetime.now()
day = self._parse_day(now)
self.say('It is {0:%A} {0:%B} {1}, {0:%Y}'.format(now, day))
except Exception as e:
self.log_exception(e)
def _parse_day(self, now: datetime.date) -> str:
day = f'{now:%d}'.lstrip('0')
if day.endswith('1'):
day = f'{day}st'
elif day.endswith('2'):
day = f'{day}nd'
elif day.endswith('3'):
day = f'{day}rd'
else:
day = f'{day}th'
return day
```
#### File: modules/PhilipsHue/HueController.py
```python
import requests
import json
# REFERENCE: https://developers.meethue.com/develop/get-started-2/
class HueController(object):
def __init__(self):
self.user = '<KEY>'
self.api_base = f'http://192.168.1.4/api/{self.user}'
self.group_map = {}
self.light_map = {}
self.group_aliases = {
'Lamp': [
'living room',
'livingroom',
'lamp'
],
'Bedroom': [
'bed room',
'bedroom',
'master bedroom',
'master bed room'
],
'Craft Room': [
'office',
'craftroom',
'craft room'
]
}
self._init_states()
def _init_states(self):
groups = self.get_groups()
if groups.status_code != 200:
print(f'Cannot reach Hue bridge at {self._build_url(["groups"])}')
exit(1)
for id, group in groups.json().items():
self.group_map[group['name']] = id
lights = self.get_lights()
if lights.status_code != 200:
print(f'Cannot reach Hue bridge at {self._build_url(["lights"])}')
exit(1)
for id, light in lights.json().items():
self.light_map[light['name']] = id
def _build_url(self, parts: list) -> str:
return '/'.join([self.api_base, *parts])
def _clamp_brightness(self, bright: int) -> int:
return max(0, min(int(254 * (bright / 100)), 254))
def get_lights(self) -> requests.Response:
return requests.get(
url=self._build_url(['lights'])
)
def _get_light_id(self, name: str) -> str:
if name not in self.light_map.keys():
print(f'ERROR: Cannot find Light named {name}')
exit(1)
return str(self.light_map[name])
def get_light_by_name(self, name: str) -> requests.Response:
return requests.get(
url=self._build_url(['lights', self._get_light_id(name)])
)
def turn_on_light(self, id: str, bright: int = None) -> requests.Response:
body = {'on': True}
if bright is not None:
body['bri': self._clamp_brightness(bright)]
return requests.put(
url=self._build_url(['lights', id, 'state']),
data=json.dumps(body)
)
def turn_off_light(self, id: str) -> requests.Response:
return requests.put(
url=self._build_url(['lights', id, 'state']),
data=json.dumps({'on': False})
)
def _set_light_bright(self, id: str, bright: int) -> requests.Response:
return requests.put(
url=self._build_url(['lights', id, 'state']),
data=json.dumps({'bri': bright})
)
def get_groups(self) -> requests.Response:
return requests.get(
url=self._build_url(['groups'])
)
def get_group_names(self) -> list:
resp = self.get_groups()
if resp.status_code != 200:
print('Cannot reach Hue bridge to get Groups!')
exit(1)
return [group['name'] for group in resp.json().values()]
def _get_group_id(self, name: str) -> str:
group_name = self._group_name_from_alias(name)
if group_name == '':
print(f'ERROR: Cannot find Group named {name}')
exit(1)
return str(self.group_map[group_name])
def _group_name_from_alias(self, alias: str) -> str:
for group, aliases in self.group_aliases.items():
if alias == group.lower() or alias in aliases:
return group
return ''
def get_group_by_name(self, name: str) -> requests.Response:
return requests.get(
url=self._build_url(['groups', self._get_group_id(name)])
)
def turn_on_group(self, name: str, bright=None) -> requests.Response:
# If we are setting the brightness, we should set all the lights
# before turning them on, otherwise use previous brightness
if bright is not None:
bright = self._clamp_brightness(bright)
else:
bright = self._clamp_brightness(100)
group = self.get_group_by_name(name).json()
if not group['state']['all_on']:
body = {'on': True, 'bri': self._clamp_brightness(bright)}
requests.put(
url=self._build_url(
['groups', self._get_group_id(name), 'action']
),
data=json.dumps(body)
)
for light_id in group['lights']:
resp = self._set_light_bright(light_id, bright)
if resp.status_code != 200:
print(f'ERROR: Could not access Light {light_id}')
def turn_off_group(self, name: str) -> requests.Response:
return requests.put(
url=self._build_url(
['groups', self._get_group_id(name), 'action']
),
data=json.dumps({'on': False})
)
```
#### File: modules/UnitConversion/UnitConversion.py
```python
import re
from pint import UnitRegistry
from pint.errors import \
UndefinedUnitError, OffsetUnitCalculusError, DimensionalityError
from modules.Module import Module
from utils.mod_utils import get_params
class UnitConversion(Module):
def __init__(self, pipe):
super().__init__(self, pipe)
self.unit_reg = UnitRegistry()
def run(self, command: str, regex) -> str:
error = None
result = None
try:
params = get_params(command, regex, self.regexes.keys())
if any([p is None for p in params.values()]) or \
params.keys() == []:
return
# one useful thing for temperature
before_unit = self._strip_degrees(params['before_unit'])
after_unit = self._strip_degrees(params['after_unit'])
try:
before = self.unit_reg(before_unit)
after = self.unit_reg(after_unit)
if params['after_count'] != '':
result = self._convert(
after, before, params['after_count']
)
elif params['before_count'] != '':
result = self._convert(
before, after, params['before_count']
)
except OffsetUnitCalculusError as err:
error = f'Conversion Error: {err}'
except UndefinedUnitError as err:
error = f'Undefined unit: {err}'
except DimensionalityError as err:
error = f'Conversion Error: {err}'
if error is not None:
self.send_error(error)
elif result is not None:
self.say(result)
except Exception as e:
self.log_exception(e)
def _strip_degrees(self, units):
reg = re.compile(r'degree(?:s? |_)(?P<unit>.*)')
match = re.match(reg, units)
return units if match is None else match.group('unit')
def _split_degrees(self, units, plural):
if re.match(f'degree_.*', units):
parts = units.split('_')
if plural:
return f'{parts[0]}s {parts[1]}'
return ' '.join(parts)
return units
def _convert(self, first, second, value):
value = value.replace(',', '')
if re.match(r'^an?$', value):
value = 1
first._magnitude = float(value)
try:
result = first.to(second)
result._magnitude = self._string_to_num(result.magnitude)
return self._format_response(first, result, value)
except DimensionalityError:
raise
def _format_response(self, first, result, value):
value = self._string_to_num(value)
f_unit = self._split_degrees(str(first.units), value != 1)
if 'degrees' not in f_unit and value != 1:
f_unit += 's'
r_unit = self._split_degrees(str(result.units), result.magnitude != 1)
if 'degrees' not in r_unit and result.magnitude:
r_unit += 's'
return f'{value} {f_unit} is {result.magnitude} {r_unit}'
def _string_to_num(self, s):
chopped = float(f'{float(s):.4f}')
if chopped.is_integer():
return int(chopped)
return chopped
```
#### File: theta/iota/Speech2Text.py
```python
import os
import snowboydecoder
try:
import azure.cognitiveservices.speech as speechsdk
except ImportError:
print(
"Importing the Speech SDK for Python failed."
"Refer to"
"https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-python"
"for installation instructions."
)
import sys
sys.exit(1)
from modules.Module import Module
import utils.mod_utils as Utils
interrupted = False
def signal_handler(signal, frame):
global interrupted
interrupted = True
def interrupt_callback():
global interrupted
return interrupted
class PhraseResult(object):
__slots__ = ['succeeded', 'error_msg', 'phrase']
def __init__(self):
self.succeeded = False
self.error_msg = None
self.phrase = ''
def store_phrase(self, phrase):
self.succeeded = True
self.phrase = phrase
def error(self, error):
self.succeeded = False
self.error_msg = error
class Speech2Text(Module):
def __init__(self):
self.detector = snowboydecoder.HotwordDetector(
decoder_model=os.path.join('iota', 'resources', 'Iota.pmdl'),
sensitivity=0.3
)
self._setup_mq()
def listen(self):
print('Listening...')
# main loop
self.detector.start(
# Function to call when we detect the wake word
detected_callback=self.detected_wake_word,
# Function to call that interrupts the loop
interrupt_check=interrupt_callback,
# Time to sleep between loops in seconds
sleep_time=0.03,
# Amount of silence that's needed to stop recording phrase
silent_count_threshold=7,
# Maximum seconds it will listen after wake word is heard
recording_timeout=10
)
self.detector.terminate()
def detected_wake_word(self):
self.send_response(
type='WakeWord',
response=''
)
'''performs one-shot speech recognition from the default microphone'''
# Creates a speech recognizer using microphone as audio input.
# The default language is "en-us".
speech_recognizer = speechsdk.SpeechRecognizer(
speech_config=Utils.SPEECH_CONFIG
)
# Listen for a phrase from the microphone
result = speech_recognizer.recognize_once()
# Check the result
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
print(f'I heard: "{result.text}"')
self.send_response(
type='VoiceCommand',
response=result.text
)
elif result.reason == speechsdk.ResultReason.NoMatch:
self.send_error('No speech could be recognized')
elif result.reason == speechsdk.ResultReason.Canceled:
error = result.cancellation_details
if error.reason == speechsdk.CancellationReason.Error:
self.send_error(f'Cancelled. Error: {error.error_details}')
else:
self.send_error(f'Cancelled: {error.reason}')
```
#### File: iota/utils/rpn.py
```python
import re
class RPNCalculator(object):
def __init__(self, operands_reg='\\d+'):
self.ors_reg = re.compile(r'(?:times|x|\*|divided by|\/|mod(?:ul(?:o|us))?|\%|plus|\+|minus|\-|to the power of|\^)')
self.ands_reg = re.compile(operands_reg)
def _get_operator(self, op: str):
op_map = {
'+': {
'others': ['plus'],
'fn': lambda x, y: x + y
},
'-': {
'others': ['minus'],
'fn': lambda x, y: x - y
},
'*': {
'others': ['times', 'x'],
'fn': lambda x, y: x * y
},
'/': {
'others': ['divided by'],
'fn': lambda x, y: x / y
},
'%': {
'others': ['mod', 'modulo'],
'fn': lambda x, y: x % y
},
'^': {
'others': ['**', 'to the power of'],
'fn': lambda x, y: x ** y
}
}
for k, v in op_map.items():
if op == k or op in v['others']:
return v['fn']
return None
def _pemdas(self, op: str) -> int:
add = ['plus', '+']
sub = ['minus', '-']
mult = ['times', 'x', '*']
div = ['divided by', '/']
mod = ['mod', 'modulo', '%']
exp = ['to the power of', '^']
if op in [*add, *sub]:
return 1
elif op in [*mult, *div, *mod]:
return 2
elif op in exp:
return 3
return 0
def _peek(self, stack: list) -> str:
return stack[-1] if stack else None
def _in_to_post(self, expression: str) -> list:
operators = re.findall(self.ors_reg, expression)
operands = re.findall(self.ands_reg, expression)
postfix = []
stack = ['#']
for i, operand in enumerate(operands):
postfix.append(operand)
if i == len(operators):
break
op = operators[i]
if self._pemdas(op) > self._pemdas(self._peek(stack)):
stack.append(op)
else:
while self._peek(stack) != '#' and \
(self._pemdas(op) <= self._pemdas(self._peek(stack))):
postfix.append(stack.pop())
stack.append(op)
while self._peek(stack) != '#':
postfix.append(stack.pop())
return postfix
def calculate(self, expression, fn=lambda n: n):
postfix = self._in_to_post(expression)
stack = []
for token in postfix:
if self.ors_reg.match(token):
# Pull out a function to be called based on operator
operator = self._get_operator(token)
if operator is None:
break
operand_2 = stack.pop()
operand_1 = stack.pop()
# execute the operator, but put the result back as a string
stack.append(
str(
operator(float(operand_1), float(operand_2))
)
)
else:
stack.append(fn(token))
return stack[0]
```
#### File: theta/tools/voice_tests.py
```python
import os
try:
import azure.cognitiveservices.speech as speechsdk
except ImportError:
print('''
Importing the Speech SDK for Python failed.
Refer to
https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-text-to-speech-python for
installation instructions.
''')
import sys
sys.exit(1)
# Set up the subscription info for the Speech Service:
# Replace with your own subscription key and service region (e.g., "westus").
speech_key = os.environ['AZURE_KEY']
service_region = os.environ['AZURE_REGION']
def speech_synthesis_to_speaker():
'''performs speech synthesis to the default speaker'''
# Creates an instance of a speech config with
# specified subscription key and service region.
speech_config = speechsdk.SpeechConfig(
subscription=speech_key, region=service_region
)
# speech_config.speech_synthesis_voice_name = \
# "Microsoft Server Speech Text to Speech Voice (en-US, AriaRUS)"
speech_config.speech_synthesis_voice_name = \
'Microsoft Server Speech Text to Speech Voice (en-US, AriaNeural)'
# Creates a speech synthesizer using the default speaker as audio output.
# The default spoken language is "en-us".
speech_synthesizer = speechsdk.SpeechSynthesizer(
speech_config=speech_config
)
# Receives a text from console input and synthesizes it to speaker.
while True:
print('Enter some text that you want to speak')
try:
text = input()
except EOFError:
break
result = speech_synthesizer.speak_text_async(text).get()
# Check result
if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
print('Speech synthesized to speaker for text [{}]'.format(text))
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print(
f'Speech synthesis canceled: {cancellation_details.reason}')
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print(f'Error details: {cancellation_details.error_details}')
speech_synthesis_to_speaker()
``` |
{
"source": "3dik/kik-bot-api-unofficial",
"score": 2
} |
#### File: examples/legacy/cmdline_legacy.py
```python
import sys
import time
from argparse import ArgumentParser
from kik_unofficial.client_legacy import KikClient
def execute(cmd=sys.argv[1:]):
parser = ArgumentParser(description="Unofficial Kik api")
parser.add_argument('-u', '--username', help="Kik username", required=True)
parser.add_argument('-p', '--password', help="<PASSWORD>", required=True)
args = parser.parse_args(cmd)
kik = KikClient(args.username, args.password)
CmdLine(kik).run()
class CmdLine:
def __init__(self, kik):
self.kik = kik
self.partners = {}
def run(self):
self.partners = self.kik.get_chat_partners()
self.list_chats()
while True:
try:
info = self.kik.get_next_event()
if not info:
print("[-] failed to parse info")
elif 'type' not in info:
print("[-] type not in info")
print(info)
elif info["type"] == "message_read":
self.message_read(info)
elif info["type"] == "is_typing":
self.is_typing(info)
elif info["type"] == "message":
self.message(info)
elif info['type'] == 'group_message':
self.group_message(info)
elif info['type'] == 'group_typing':
self.group_typing(info)
elif info['type'] == 'group_content':
self.group_content(info)
elif info['type'] == 'group_sticker':
self.group_sticker(info)
elif info['type'] == 'group_gallery':
self.group_gallery(info)
elif info['type'] == 'group_camera':
self.group_camera(info)
elif info['type'] == 'group_gif':
self.group_gif(info)
elif info['type'] == 'group_card':
self.group_card(info)
elif info['type'] == 'message':
self.message(info)
elif info['type'] == 'content':
self.content(info)
elif info['type'] == 'sticker':
self.sticker(info)
elif info['type'] == 'gallery':
self.gallery(info)
elif info['type'] == 'camera':
self.camera(info)
elif info['type'] == 'gif':
self.gif(info)
elif info['type'] == 'card':
self.card(info)
elif info['type'] == 'qos' or info['type'] == 'acknowledgement':
pass
elif info["type"] == "end":
print("[!] Server ended communication.")
break
else:
print("[-] Unknown message: {}".format(info))
except TypeError as e:
print(e)
def content(self, info):
print("[+] Unknown content received of type {}".format(info["app_id"]))
def sticker(self, info):
print("[+] Sticker received in pack {}: {}".format(info["sticker_pack_id"], info["sticker_url"]))
def gallery(self, info):
print("[+] Gallery image received '{}': {}".format(info['file_name'], info['file_url']))
def camera(self, info):
print("[+] Camera image received '{}': {}".format(info['file_name'], info['file_url']))
def gif(self, info):
print("[+] Gif received: {}".format(info['uris']))
def card(self, info):
if 'jsonData' in info:
print("[+] Card received: {}: {}".format(info['app_name'], info['jsonData']))
elif info['app_name'] == 'ScribbleChat':
print("[+] Card received: {}: {}".format(info['app_name'], info['video_url']))
elif 'url' in info:
print("[+] Card received: '{}': {}".format(info['app_name'], info['url']))
else:
print("[-] Unknown card received: {}".format(info['app_name']))
def group_content(self, info):
print("[+] Unknown content received of type {}".format(info["app_id"]))
def group_sticker(self, info):
print("[+] Sticker received in pack {}: {}".format(info["sticker_pack_id"], info["sticker_url"]))
def group_gallery(self, info):
print("[+] Gallery image received '{}': {}".format(info['file_name'], info['file_url']))
def group_camera(self, info):
print("[+] Camera image received '{}': {}".format(info['file_name'], info['file_url']))
def group_gif(self, info):
print("[+] Gif received: {}".format(info['uris']))
def group_card(self, info):
if 'jsonData' in info:
print("[+] Card received: {}: {}".format(info['app_name'], info['jsonData']))
elif info['app_name'] == 'ScribbleChat':
print("[+] Card received: {}: {}".format(info['app_name'], info['video_url']))
elif 'url' in info:
print("[+] Card received: '{}': {}".format(info['app_name'], info['url']))
else:
print("[-] Unknown card received: {}".format(info['app_name']))
def list_chats(self):
print("[+] Chats\n{}".format("\n".join([self.full_name(peer['jid']) for peer in self.partners.values()])))
def display_name(self, name):
if name not in self.partners:
peer_info = self.kik.get_info_for_node(name)
print(peer_info)
self.partners[peer_info['jid']] = peer_info
peer = self.partners[name]
return peer['display_name'].strip()
def full_name(self, name):
peer = self.partners[name]
if peer['type'] == 'group':
if peer['public']:
return "{} ({})".format(peer['display_name'], peer['code'])
else:
return "{}".format(peer['display_name'])
else:
return "{} ({})".format(peer['display_name'], peer['username'])
def message_read(self, info):
print("[+] <{0}> message read: {1}".format(self.display_name(info['from']), info["message_id"]))
def is_typing(self, info):
if info["is_typing"]:
print("[+] <{}> typing...".format(self.display_name(info['from'])))
else:
print("[+] <{}> stopped typing...".format(self.display_name(info['from'])))
def message(self, info):
partner = info['from']
print("[+] <{0}> {1}".format(partner, info['body']))
self.kik.send_read_confirmation(partner, info["message_id"])
reply = "You said '{}'!".format(info['body'])
self.kik.send_is_typing(partner, "true")
time.sleep(0.2 * len(reply))
self.kik.send_is_typing(partner, "false")
self.kik.send_message(partner, reply)
def group_message(self, info):
group = info['group_id']
print("[+] <{0}> {1}: {2}".format(self.display_name(info['group_id']), self.display_name(info['from']),
info['body']))
if 'ping' in info['body'].lower():
self.kik.send_message(group, 'pong', groupchat=True)
return
def group_typing(self, info):
if info['is_typing']:
print("[+] <{0}> {1} is typing...".format(self.display_name(info['group_id']),
self.display_name(info['from'])))
else:
print("[+] <{0}> {1} stopped typing...".format(self.display_name(info['group_id']),
self.display_name(info['from'])))
```
#### File: examples/legacy/echo_bot_legacy.py
```python
import sys
import time
import logging
from kik_unofficial.client import KikClient
def main():
username, password = "sh<PASSWORD>", "<PASSWORD>"
kik_client = KikClient(username, password)
print("[+] Listening for incoming events.")
# main events loop
while True:
info = kik_client.get_next_event()
if "type" not in info:
continue
if info["type"] == "message_read":
print("[+] Human has read the message (user " + info["from"] + ", message id: " + info["message_id"] + ")")
elif info["type"] == "is_typing":
if info["is_typing"]:
print("[+] Human is typing (user " + info["from"] + ")")
else:
print("[+] Human is not typing (user " + info["from"] + ")")
elif info["type"] == "message":
partner = info["from"]
print("[+] Human says: \"" + info["body"] + "\" (user " + partner + ")")
kik_client.send_read_confirmation(partner, info["message_id"])
replay = "You said '" + info["body"] + "'!"
kik_client.send_is_typing(partner, "true")
time.sleep(0.2 * len(replay))
kik_client.send_is_typing(partner, "false")
kik_client.send_message(partner, replay)
elif info["type"] == "end":
print("[!] Server ended communication.")
break
print("[+] Done!")
kik_client.close()
if __name__ == '__main__':
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3!!")
try:
main()
except KeyboardInterrupt:
print("[!] User stopped execution.")
```
#### File: kik-bot-api-unofficial/examples/register_client.py
```python
import logging
from kik_unofficial.client import KikClient
from kik_unofficial.callbacks import KikClientCallback
from kik_unofficial.datatypes.xmpp.errors import SignUpError
from kik_unofficial.datatypes.xmpp.roster import FetchRosterResponse
from kik_unofficial.datatypes.xmpp.login import LoginResponse
from kik_unofficial.datatypes.xmpp.sign_up import RegisterResponse
class RegisterClient(KikClientCallback):
def on_sign_up_ended(self, response: RegisterResponse):
print("Registered on node {}.".format(response.kik_node))
def on_authenticated(self):
print("Authorized connection initiated.")
client.request_roster()
def on_login_ended(self, response: LoginResponse):
print("Logged in as {}.".format(response.username))
def on_register_error(self, response: SignUpError):
if "captcha_url" in dir(response):
print(response.captcha_url)
result = input("Captcha result:")
client.register(email, username, password, first, last, birthday, result)
else:
print("Unable to register! error information:\r\n{}".format(response))
def on_roster_received(self, response: FetchRosterResponse):
print("Friends: {}".format(response.peers))
if __name__ == '__main__':
username = input('Username: ')
password = input('Password: ')
first = input('First name: ')
last = input('Last name: ')
email = input('Email: ')
birthday = input('Birthday: (like 01-01-1990): ')
client = KikClient(callback=RegisterClient(), log_level=logging.DEBUG)
client.register(email, username, password, first, last, birthday)
```
#### File: datatypes/xmpp/xiphias.py
```python
import base64
from builtins import NotImplementedError
from typing import List
from bs4 import BeautifulSoup
from google.protobuf import message
from kik_unofficial.datatypes.xmpp.base_elements import XMPPElement, XMPPResponse
from kik_unofficial.protobuf.entity.v1.entity_service_pb2 import GetUsersRequest, GetUsersResponse, GetUsersByAliasRequest, RequestedJid, \
GetUsersByAliasResponse
class XiphiasRequest(XMPPElement):
def __init__(self, method):
super().__init__()
self.method = method
def get_protobuf_payload(self) -> message.Message:
raise NotImplementedError
def serialize(self):
payload = self.get_protobuf_payload()
protobuf_data = base64.b64encode(payload.SerializeToString()).decode()
data = ('<iq type="set" id="{}">'
'<query xmlns="kik:iq:xiphias:bridge" service="mobile.entity.v1.Entity" method="{}">'
'<body>{}</body>'
'</query>'
'</iq>').format(self.message_id, self.method, protobuf_data)
return data.encode()
class UsersRequest(XiphiasRequest):
def __init__(self, peer_jids):
super().__init__('GetUsers')
if isinstance(peer_jids, List):
self.peer_jids = peer_jids
else:
self.peer_jids = [peer_jids]
def get_protobuf_payload(self):
request = GetUsersRequest()
for peer_jid in self.peer_jids:
jid = request.ids.add()
jid.local_part = peer_jid.split('@')[0]
return request
class UsersResponseUser:
"""
Normal jids (used with client.xiphias_get_users):
Includes user data such as profile creation date and background picture URL.
Alias jids provided in public groups (used with client.xiphias_get_users_by_alias):
Includes all the private profile data (username, display_name, etc) of a user
if you're chatting with them, else it'll get the local jid and the creation date.
"""
username = None
jid = None
alias_jid = None
display_name = None
creation_date_seconds = None
creation_date_nanos = None
bio = None
background_pic_full_sized = None
background_pic_thumbnail = None
background_pic_updated_seconds = None
interests = None
kin_user_id = None
def __init__(self, user):
if hasattr(user, 'private_profile'):
self.username = user.private_profile.username.username
if user.private_profile.id.local_part:
# The attribute seems to exist with an empty string
self.jid = user.private_profile.id.local_part + "@talk.kik.com"
if user.id:
if hasattr(user.id, 'local_part'):
if user.id.local_part:
self.jid = user.id.local_part + "@talk.kik.com"
if hasattr(user.id, 'alias_jid'):
if user.id.alias_jid.local_part:
self.alias_jid = user.id.alias_jid.local_part + "@talk.kik.com"
if hasattr(user, 'public_group_member_profile'):
# The attrs below are found in the member's profile
user = user.public_group_member_profile
if user.registration_element:
self.creation_date_seconds = user.registration_element.creation_date.seconds
self.creation_date_nanos = user.registration_element.creation_date.nanos
if hasattr(user, 'display_name'):
self.display_name = user.display_name.display_name
if hasattr(user, 'bio_element'):
self.bio = user.bio_element.bio
if hasattr(user, 'background_profile_pic_extension'):
pic = user.background_profile_pic_extension.extension_detail.pic
self.background_pic_full_sized = pic.full_sized_url
self.background_pic_thumbnail = pic.thumbnail_url
self.background_pic_updated_seconds = pic.last_updated_timestamp.seconds
if hasattr(user, 'interests_element'):
self.interests = [element.localized_verbiage for element in user.interests_element.interests_element]
if hasattr(user, 'kin_user_id_element'):
self.kin_user_id = user.kin_user_id_element.kin_user_id.id
class UsersResponse(XMPPResponse):
def __init__(self, data: BeautifulSoup):
super().__init__(data)
text = base64.urlsafe_b64decode(data.query.body.text.encode())
response = GetUsersResponse()
response.ParseFromString(text)
self.users = [UsersResponseUser(user) for user in response.users]
class UsersByAliasRequest(XiphiasRequest):
def __init__(self, alias_jids):
super().__init__('GetUsersByAlias')
if isinstance(alias_jids, List):
self.alias_jids = alias_jids
else:
self.alias_jids = [alias_jids]
def get_protobuf_payload(self):
request = GetUsersByAliasRequest()
for peer_jid in self.alias_jids:
jid = request.ids.add() # type: RequestedJid
jid.alias_jid.local_part = peer_jid.split('@')[0]
return request
class UsersByAliasResponse(XMPPResponse):
def __init__(self, data: BeautifulSoup):
super().__init__(data)
text = base64.urlsafe_b64decode(data.query.body.text.encode())
response = GetUsersByAliasResponse()
response.ParseFromString(text)
self.users = [UsersResponseUser(payload) for payload in response.payloads]
``` |
{
"source": "3dimaging/DeepLearningCamelyon_II",
"score": 2
} |
#### File: dldp/heatmap_pred/copy_makeup_files.py
```python
import numpy as np
import pandas as pd
import sys
import os
import os.path as osp
from shutil import copyfile
class copy_makeup_files(object):
"""
:note: the makeup_path and destination_path have the same directory
structure.
"""
def __init__(self, makeup_path, destination_path):
"""
for class initialization
:param makeup_path: the folder storing the makeup results
:type makeup_path: string
:param destination_path: the folder storing all the results
:type destination_path: string
"""
self.makeup_path = makeup_path
self.destination_path = destination_path
def copy_file(self):
dir = os.listdir(self.makeup_path)
# list taskid in each folder
makeup_file_list = []
# full_predicted_taskid = []
for folder in dir:
file_list = os.listdir(osp.join(self.makeup_path, folder))
for file in file_list:
full_path_file = osp.join(self.makeup_path, folder, file)
print(full_path_file)
copyfile(full_path_file, osp.join(self.destination_path,
folder, file))
makeup_file_list.append(full_path_file)
return makeup_file_list
if __name__ == "__main__":
makeup_path = '/scratch/weizhe.li/Pred_Storage/makeup_normal_Macenko'
destination_path = '/scratch/weizhe.li/Pred_Storage/Macenko/normal_Macenko'
makeup_copy = copy_makeup_files(makeup_path, destination_path)
makeup_copy.copy_file()
```
#### File: dldp/image_process/color_normalization.py
```python
import dldp.utils.fileman as fm
import numpy as np
from datetime import datetime
from tqdm import tqdm
from imageio import imwrite as saveim
import staintools
from skimage import io
import os
import os.path as osp
import stainNorm_Reinhard
import stainNorm_Macenko
import sys
sys.path.append('/home/wli/Stain_Normalization-master')
# from scipy.misc import imsave as saveim
# import the modules from dldp package
def color_normalization(template_image_path, color_norm_method):
"""
The function put all the color normalization methods together.
:param template_image_path: the template image for normalization
:type template_image_path: string
:param color_norm_method: the method for color normalization
:type color_norm_method: string
:return: color_normalizer. It is the initialized object for the
actual normalization.
:rtype: object
"""
template_image = staintools.read_image(template_image_path)
standardizer = staintools.LuminosityStandardizer.standardize(
template_image)
if color_norm_method == 'Reinhard':
color_normalizer = stainNorm_Reinhard.Normalizer()
color_normalizer.fit(standardizer)
elif color_norm_method == 'Macenko':
color_normalizer = stainNorm_Macenko.Normalizer()
color_normalizer.fit(standardizer)
elif color_norm_method == 'Vahadane':
color_normalizer = staintools.StainNormalizer(method='vahadane')
color_normalizer.fit(standardizer)
return color_normalizer
def color_norm(image_patch, fit, log_file=False):
"""
To normalize images based on color_normalizer from function color_nor
malization.
:param image_patch: the image to be normalized. It can be a path of the image or image itself
:type image_patch: array
:param fit: the object of color_normalizer.
:type fit: object
:param log_file: the file to record the failed tasks.
:type log_file: str
:return: img_norm, the normalized images.
:rtype: array
"""
if isinstance(image_patch, str) and osp.isfile(image_patch):
img = io.imread(image_patch)
elif isinstance(image_patch, np.ndarray):
img = image_patch
img = img[:, :, :3]
img_norm = []
try:
img_standard = staintools.LuminosityStandardizer.standardize(img)
img_norm = fit.transform(img_standard)
except Exception as e:
log_file.write(str(image_patch) + ';' + str(e))
# print(img_norm)
return img_norm
def save_color_norm_patches(dest_path, source_path, image_patch, img_norm,
color_norm_method):
"""
The normalized image patches will be saved in the same folder structure
as the original image patches.
:param dest_path: the place to store the normalized image patches.
:type dest_path:string
:param source_path: the folder to store the original image patches.
:type source_path: string
:param file: the full path of the original image patch.
:type file: string
:param img_norm: the normalized image patch.
:type img_norm: array
:param color_norm_method: the method used for color normalization
:type color_norm_method: string
:return: None
"""
file_full_path = osp.dirname(image_patch)
relative_path = osp.relpath(file_full_path, source_path)
path_to_create = osp.join(dest_path, relative_path)
try:
os.makedirs(path_to_create)
except Exception:
pass
# print(image_patch)
try:
saveim('%s/%s_%s.png' % (path_to_create,
osp.splitext(osp.basename(image_patch))[0],
color_norm_method),
img_norm)
except Exception:
pass
return None
if __name__ == "__main__":
template_image_path = '/raidb/wli/tumor_st.png'
# The template image is also included in the package
# template_image_path = '.../dldp/data/tumor_st.png'
source_path = '/home/wli/Documents/original_256_patches'
# source_path = '/raidb/wli/256_patches_to_be_normalized/original
# _256_patches'
dest_path_1 = '/raidb/wli/testing_1219/color_norm/Reinhard'
dest_path_2 = '/raidb/wli/testing_1219/color_norm/Macenko'
log_path = '/home/wli/log_files'
###########################################################################
patches_for_color_norm = fm.list_file_in_dir_II(
source_path, file_ext='png')
print(len(patches_for_color_norm))
for color_norm_method in ['Reinhard', 'Macenko']:
current_time = datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")
log_file = open('%s/%s_%s.txt' %
(log_path, color_norm_method, current_time), 'w')
fit = color_normalization(template_image_path, color_norm_method)
if color_norm_method == 'Reinhard':
dest_path = dest_path_1
elif color_norm_method == 'Macenko':
dest_path = dest_path_2
for image_patch in tqdm(patches_for_color_norm):
img_norm = color_norm(image_patch, fit, log_file)
save_color_norm_patches(
dest_path, source_path, image_patch, img_norm,
color_norm_method)
```
#### File: dldp/image_process/patch_aug.py
```python
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os.path as osp
import glob
import cv2
from imageio import imwrite as saveim
# for 224 images
def random_crop2(image, mask, size_origin, crop_size):
"""
To get randomly cropped images from original image
:param image: original image
:type image: array
:param mask: the corresponding mask image from ground truth
:type mask: array
:param size_origin: the size of original image.
:param crop_size: the size of image to be cropped.
:return: cropped image, cropped mask, position information
:rtype: tuple
"""
# width, height = slide.level_dimensions[4]
dy, dx = crop_size
x = np.random.randint(0, size_origin - dx + 1)
y = np.random.randint(0, size_origin - dy + 1)
index = [x, y]
# cropped_img = (image[x:(x+dx), y:(y+dy),:], rgb_binary[x:(x+dx), y:(y+dy)], mask[x:(x+dx), y:(y+dy)])
cropped_img = image[x:(x+dx), y:(y+dy), :]
# cropped_binary = rgb_binary[x:(x+dx), y:(y+dy)]
cropped_mask = mask[x:(x+dx), y:(y+dy)]
return (cropped_img, cropped_mask, index)
def patch_aug_flip_rotate_crop(image, crop_size, image_name, folder_to_save=False):
"""
the function generates 224 patches from 256 patches
:param image: the original image patch
:type image: array
:param crop_size: the sized of new image
:type crop_size: list
:param image_name: the name of the original image
:type image_name: string
:param folder_to_save: the folder to save the new images
:type folder_to_save: string
:return: cropped image
:rtype: array
"""
random_number = np.random.randint(0, 4)
if random_number == 0:
image_rotated = np.fliplr(image)
elif random_number == 1:
image_rotated = np.rot90(image, 1)
elif random_number == 2:
image_rotated = np.rot90(image, 2)
elif random_number == 3:
image_rotated = np.rot90(image, 3)
else:
image_rotated = image
# maskset = [imgmask, maskroted1, maskroted2, maskroted3, maskroted4]
# imageset = [img_norm, imageroted1, imageroted2, imageroted3, imageroted4]
dy, dx = crop_size
if image.shape[0] > dx:
x = np.random.randint(0, 256 - dx + 1)
y = np.random.randint(0, 256 - dy + 1)
# index = [x, y]
# cropped_img = (image[x:(x+dx),y:(y+dy),:],rgb_binary[x:(x+dx),y:(y+dy)],
# mask[x:(x+dx), y:(y+dy)])
cropped_img = image_rotated[x:(x + dx), y:(y + dy), :]
# cropped_binary = rgb_binary[x:(x+dx), y:(y+dy)]
# cropped_mask = mask[x:(x + dx), y:(y + dy)]
cropped_img = image_rotated
if folder_to_save:
saveim('%s/%s_aug.png' % (folder_to_save, image_name), cropped_img)
return cropped_img
def color_noise_hsv_to_blue(image, max=20):
"""
adding color noise to the direction of blue based on HSV color space
:param image: the image to be modified
:type image: array
:param max: the range of color noise
:type max: int
:return: img_noise_rgb, a RGB image with color noise
:rtype: array
"""
img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
img_colornoise = img_hsv + np.random.uniform(0, max, size=(1, 3))
img_colornoise[img_colornoise > 255] = 255
img_colornoise[:, :, 0][img_colornoise[:, :, 0] > 179] = 179
img_noise = img_colornoise.astype('uint8')
img_noise_rgb = cv2.cvtColor(img_noise, cv2.COLOR_HSV2BGR)
return img_noise_rgb
def color_noise_hsv_to_red(image, max=20):
"""
This function is used to add color noise to the direction of red
based on HSV color space.
:param image: the original image
:type image: array
:param max: the range of color noise
:type max: int
:return: m_rgb, a RGB image with color noise
:rtype: array
"""
m_hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
m_colornoise = m_hsv + np.random.uniform(0, max, size=(1, 3))
# using 'unit8' transformation is dangerous, there are lots of values go
# beyong 255 after adding random number, the transformation will
# automatically minus it by 255. So here we set the range for S, V, if >
# 255, then it is 255; for H, if > 179, then it is 179
m_colornoise[m_colornoise > 255] = 255
m_colornoise[:, :, 0][m_colornoise[:, :, 0] > 179] = 179
# now we can transform the value to unit8 safely
m_int = m_colornoise.astype('uint8')
m_rgb = cv2.cvtColor(m_int, cv2.COLOR_HSV2RGB)
return m_rgb
# this is based on the description of liu yun's paper,"Detecting Cancer Metastases on Gigapixel Pathology Images".
# use the random functions from tensorflow
def color_perturb(image):
"""
adding color noise and changing the constrast.
:param image: image to be modified
:type image: array
:returns: image with color perturbation
:rtype: array
"""
image = tf.image.random_brightness(image, max_delta=64. / 255.)
image = tf.image.random_saturation(image, lower=0.75, upper=1.25)
image = tf.image.random_hue(image, max_delta=0.04)
image = tf.image.random_contrast(image, lower=0.25, upper=1.75)
return image
```
#### File: dldp/utils/xml_to_mask.py
```python
import math
import glob
import pandas as pd
import xml.etree.ElementTree as et
from pandas import DataFrame
import openslide
import numpy as np
import cv2
import matplotlib.pyplot as plt
import logging
import os.path as osp
import sys
sys.path.append('/home/weizhe.li/dldp/utils/logman')
sys.path.append('/home/weizhe.li/dldp/utils')
# setup_logging
import fileman as fm
import logger_management
from logger_management import log_with_template
from logger_management import StreamToLogger
from logger_management import setup_logging
# import multiresolutionimageinterface as mir
# reader = mir.MultiResolutionImageReader()
# mr_image = reader.open('/home/wli/Downloads/tumor_036.tif')
# Ximageorg, Yimageorg = mr_image.getDimensions()
# dims = mr_image.getLevelDimensions(4)
# Ximage = (Ximage+240//2)//240
# Ximage = 4000
# Yimage = (Yimage+240//2)//240
# Yimage = 2000
class mask_generator(object):
"""
The class is used to generate a single mask file (not pyramid) based
on xml file.
"""
def __init__(self, xml_file, level, dims):
"""
variables initialization
:param xml_file:
:param level:
:param dims:
:param result_folder:
"""
self.xml_file = xml_file
self.level = level
self.dims = dims
self.result_folder = result_folder
def convert_xml_df(self):
"""
To convert a xml file to a series of dataframes in a tuple.
:return: df_xml: x, y coordinates
:rtype: dataframe
"""
down_sample = 2**self.level
parseXML = et.parse(self.xml_file)
root = parseXML.getroot()
dfcols = ['Name', 'Order', 'X', 'Y']
df_xml = pd.DataFrame(columns=dfcols)
for child in root.iter('Annotation'):
for coordinate in child.iter('Coordinate'):
Name = child.attrib.get('Name')
Order = coordinate.attrib.get('Order')
X_coord = float(coordinate.attrib.get('X'))
X_coord = X_coord//down_sample
#X_coord = X_coord/down_sample
Y_coord = float(coordinate.attrib.get('Y'))
Y_coord = Y_coord//down_sample
#Y_coord = Y_coord/down_sample
df_xml = df_xml.append(pd.Series(
[Name, Order, X_coord, Y_coord], index=dfcols),
ignore_index=True) # type: DataFrame
df_xml = pd.DataFrame(df_xml)
print('redundent xml:', df_xml.shape)
return df_xml
# x_values = list(annotations['X'].get_values())
# y_values = list(annotations['Y'].get_values())
# xy = list(zip(x_values,y_values))
def points_collection(self, annotations):
"""
remove the duplicated coordinates due to the down_sampling
:param duplicate:
:return: list with no duplicates
:rtype: list
"""
final_name_list = list(annotations['Name'].unique())
coxy = [[] for x in range(len(final_name_list))]
for index, n in enumerate(final_name_list):
newx = annotations[annotations['Name'] == n]['X']
newy = annotations[annotations['Name'] == n]['Y']
newxy = list(zip(newx, newy))
coxy[index] = np.array(newxy, dtype=np.int32)
return (coxy, final_name_list)
def mask_gen(self, coxy, result_folder):
"""
generate a binary mask file
:param final_list: the down-sampled annotation
:type final_list: list
:param result_folder:
:type result_folder:str
:return: mask file
:rtype: tif file
"""
# image = cv2.imread('/home/wli/Downloads/tumor_036.xml', -1)
canvas = np.zeros((int(self.dims[1]//2**self.level), int(self.dims[0]//2**self.level)), np.uint8)
# canvas = np.zeros((int(dims[1]/32), int(dims[0]/32)), np.uint8)
# tile =mr_image.getUCharPatch(0, 0, dims[0], dims[1], 4)
# canvas = np.zeros((Ximage, Yimage, 3), np.uint8) # fix the division
# coords = np.array([xy], dtype=np.int32)
# cv2.drawContours(canvas, [coords],-1, (0,255,0), -1)
# cv2.drawContours(canvas, coxy, -1, (255, 255, 255), 10)
# cv2.drawContours(canvas, coxy, -1, (255, 255, 255), CV_FILLED)
cv2.fillPoly(canvas, pts=coxy, color=(255, 255, 255))
# cv2.polylines(canvas, coxy, isClosed=True, color=(255,255,255),
# thickness=5)
cv2.imwrite('%s/%s.png' % (result_folder,
osp.splitext(osp.basename(self.xml_file))[0]), canvas)
# cv2.imshow("tile", tile);cv2.waitKey();cv2.destroyAllWindows()
# cv2.fillConvexPoly(mask, coords,1)
# mask = mask.astype(np.bool)
# output = np.zeros_like(image)
# output[mask] = image[mask]
# cv2.imshow('image',output)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
class lesion_count(mask_generator):
def __init__(self):
super().__init__
def gen_lesion_table(self, coxy, final_name_list):
lesion_total = []
for coord, lesion_name in list(zip(coxy, contour_names)):
M = cv2.moments(coord)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
lesion_total.append([lesion_name, (cX, cY), cv2.contourArea(coord)])
return lesion_total
if __name__ == '__main__':
# setup_logging
module_name = sys.modules['__main__'].__file__
log_template_path = '/home/weizhe.li/dldp/utils/logman/logging_config.yaml'
# The log template is also inculded in this package.
# log_template_path = '.../dldp/utils/logman/logging_config.yaml'
logger = log_with_template(log_template_path, module_name)
# xml_folder = '/home/wzli/Downloads/CAMELYON16/testing/
# lesion_annotations/'
# xml_folder = '/home/wzli/Downloads/CAMELYON16/training/
# lesion_annotations_training/'
xml_folder = '/projects01/wxc4/wli/CAMELYON16/lesion_annotations'
xml_paths = fm.list_file_in_dir_II(xml_folder, 'xml')
logger.debug('the fist xml file is %s' % xml_paths[0])
# xml_paths = glob.glob(osp.join(xml_folder, '*.xml'))
# xml_paths.sort()
# slide_folder = '/home/wzli/Downloads/CAMELYON16/testing/images/'
slide_folder = '/projects01/wxc4/CAMELYON16-training/tumor'
result_folder = '/projects01/wxc4/wli/CAMELYON16/lesion_counts'
created_folder = fm.creat_folder('', result_folder)
# slide_paths = glob.glob(osp.join(slide_folder, '*.tif'))
level = 5
############################lesion count#######################################
lesion_total = []
col_names = ['slide_name', 'lesion_name', 'centroid', 'area']
for xml_file in xml_paths:
slide_name = osp.basename(xml_file.replace('.xml', '.tif'))
slide_path = osp.join(slide_folder, slide_name)
wsi_image = openslide.open_slide(slide_path)
dims = wsi_image.dimensions
mask_gen = mask_generator(xml_file, level, dims)
annotations = mask_gen.convert_xml_df()
final_annotations, _ = mask_gen.points_collection(annotations)
# mask_gen.mask_gen(final_annotations, reult_folder)
lesion_stat = lesion_count(xml_file, level, dims)
annotations = lesion_stat.convert_xml_df()
final_annotations, lesion_names = lesion_stat.points_collection(annotations)
slide_lesions = lesion_stat.gen_lesion_table(final_annotations, lesion_names)
lesion_total.append(slide_lesions)
df_lesion_stat = pd.DataFrame(lesion_total, columns=col_names)
df_lesion_stat.to_csv(result_folder)
```
#### File: dldp/wsi_visu/WSI_viewer_new.py
```python
import os.path as osp
import openslide
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import cv2
from pandas import DataFrame
import xml.etree.ElementTree as et
import pandas as pd
from skimage.filters import threshold_otsu
import math
import glob
import re
import sys
class WSI_viewer_new(object):
'''
The wsi_viewer class is used to display the wsi image, ground truth, predicted heatmap and their overlays
Input: paths for WSI image, xml file for ground truth, heatmap
WSI_path : the path of a WSI image;
Xml_path : the path of a xml file, the ground truth (outline the tumor regions) of a WSI image;
Dimension_path : the path of npy file I generated to store the dimensions of WSI image and tissue region;
Mask_truth_path : the path of generated binary mask files from xml files;
Heatmap_path : the path of predicted heatmap showing the scores of possible tumor region.
Output: contours of tissue region, tumor region (from ground truth), and heatmap on WSI image
How to Use:
Step 1: create an instance of the object
for example, viewer = WSI_viewer()
Step 2: display the combined contours of tissue region and tumor region
viewer.combined_contour_on_wsi()
Step 3: display heatmap over contours of tumor region
viewer.display_heatmap()
Step 4: generate binary mask flies:
viewer.mask_generation()
Author: <NAME>
Data: 05-08-19
'''
# Class Attribute
slide_level = 5
PPI = 150
threshold = 0.6
# Initializer / Instance Attributes
def __init__(self, WSI_path, Xml_path, Dimension_path, Mask_truth_path='', Heatmap_path=''):
self.WSI_path = WSI_path
self.Xml_path = Xml_path
self.Mask_truth_path = Mask_truth_path
self.Heatmap_path = Heatmap_path
self.Dimension_path = Dimension_path
# load in the files
self.wsi_image = openslide.open_slide(WSI_path)
self.dims = self.wsi_image.dimensions
# ground_truth = openslide.open_slide(ground_truth_dir)
if Mask_truth_path:
try:
self.mask_truth = cv2.imread(Mask_truth_path)
self.mask_truth = self.mask_truth[:, :, 0].astype('uint8')
except:
self.wsi_truth = openslide.open_slide(Mask_truth_path)
self.mask_truth_asap = self.wsi_truth.read_region((0, 0), self.slide_level, (
int(self.dims[0] / math.pow(2, self.slide_level)), int(self.dims[1] / math.pow(2, self.slide_level))))
if Heatmap_path:
self.heat_map = np.load(Heatmap_path)
self.bbox = np.load(Dimension_path)
# read in the wsi image at level 4, downsampled by 16
# dims = wsi_image.level_dimensions[4]
self.wsi_image_thumbnail = np.array(self.wsi_image.read_region((0, 0), self.slide_level, (
int(self.dims[0] / math.pow(2, self.slide_level)), int(self.dims[1] / math.pow(2, self.slide_level)))))
self.wsi_image_thumbnail = self.wsi_image_thumbnail[:, :, :3].astype(
'uint8')
self.wsi_image_thumbnail_copy = self.wsi_image_thumbnail.copy()
self.wsi_image_thumbnail_copy_2 = self.wsi_image_thumbnail.copy()
# read in the ground_truth
# ground_truth_image = np.array(ground_truth.get_thumbnail((dims[0]/16, dims[1]/16)))
def tissue_contour_on_wsi(self):
# read the WSI file, do not use get_thumbnail function. It has bug
# wsi_image = openslide.open_slide(WSI_path)
# dims = wsi_image.dimensions
# thumbnail = wsi_image.read_region((0,0), slide_level,(int(dims[0]/32), int(dims[1]/32)))
# thumbnail = np.array(thumbnail)
# thumbnail = thumbnail[:,:,:3]
# thumbnail = thumbnail.astype('uint8')
# drawcontours for tissue regions only
hsv_image = cv2.cvtColor(self.wsi_image_thumbnail, cv2.COLOR_RGB2HSV)
h, s, v = cv2.split(hsv_image)
hthresh = threshold_otsu(h)
sthresh = threshold_otsu(s)
# vthresh = threshold_otsu(v)
# be min value for v can be changed later
minhsv = np.array([hthresh, sthresh, 0], np.uint8)
maxhsv = np.array([180, 255, 255], np.uint8)
thresh = [minhsv, maxhsv]
# extraction the countor for tissue
rgbbinary = cv2.inRange(hsv_image, thresh[0], thresh[1])
# plt.imshow(rgbbinary)
rgbbinary = rgbbinary.astype("uint8")
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
rgbbinary_close = cv2.morphologyEx(rgbbinary, cv2.MORPH_CLOSE, kernel)
rgbbinary_open = cv2.morphologyEx(
rgbbinary_close, cv2.MORPH_OPEN, kernel)
_, contours, _ = cv2.findContours(
rgbbinary_open, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours_on_wsi = cv2.drawContours(
self.wsi_image_thumbnail_copy, contours, -1, (0, 255, 0), 20)
cv2.imwrite("tissue_contour_%s.png" % osp.splitext(
osp.basename(self.WSI_path))[0], contours_on_wsi)
return contours
# reader = mir.MultiResolutionImageReader()
# mr_image = reader.open('/home/wli/Downloads/tumor_036.tif')
# Ximageorg, Yimageorg = mr_image.getDimensions()
# dims = mr_image.getLevelDimensions(4)
# Ximage = (Ximage+240//2)//240
# Ximage = 4000
# Yimage = (Yimage+240//2)//240
# Yimage = 2000
# this is a private method used for mask generation
def _convert_xml_df(self):
parseXML = et.parse(self.Xml_path)
root = parseXML.getroot()
dfcols = ['Name', 'Order', 'X', 'Y']
df_xml = pd.DataFrame(columns=dfcols)
for child in root.iter('Annotation'):
for coordinate in child.iter('Coordinate'):
Name = child.attrib.get('Name')
Order = coordinate.attrib.get('Order')
X_coord = float(coordinate.attrib.get('X'))
# X_coord = X_coord - 30000
# X_coord = ((X_coord)*dims[0])/Ximageorg
X_coord = X_coord / math.pow(2, self.slide_level)
Y_coord = float(coordinate.attrib.get('Y'))
# Y_coord = Y_coord - 155000
# Y_coord = ((Y_coord)*dims[1])/Yimageorg
Y_coord = Y_coord / math.pow(2, self.slide_level)
df_xml = df_xml.append(pd.Series([Name, Order, X_coord, Y_coord], index=dfcols),
ignore_index=True) # type: DataFrame
df_xml = pd.DataFrame(df_xml)
return (df_xml)
# x_values = list(annotations['X'].get_values())
# y_values = list(annotations['Y'].get_values())
# xy = list(zip(x_values,y_values))
# this is a private method used for mask generation
def _Remove(self, duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
return final_list
def mask_generation(self):
# mask or ground truth generation
annotations = self._convert_xml_df()
final_list = self._Remove(annotations['Name'])
# the list coxy store the x,y coordinates
coxy = [[] for x in range(len(final_list))]
i = 0
for n in final_list:
newx = annotations[annotations['Name'] == n]['X']
newy = annotations[annotations['Name'] == n]['Y']
print(n)
print(newx, newy)
newxy = list(zip(newx, newy))
coxy[i] = np.array(newxy, dtype=np.int32)
# note: i is different from n.
i = i + 1
# image = cv2.imread('/home/wli/Downloads/tumor_036.xml', -1)
# int(self.dims[0]/math.pow(2, self.slide_level)), int(self.dims[1]/math.pow(2, self.slide_level)
canvas = np.zeros(
(int(self.dims[1] / math.pow(2, self.slide_level)),
int(self.dims[0] / math.pow(2, self.slide_level))),
np.uint8)
# tile =mr_image.getUCharPatch(0, 0, dims[0], dims[1], 4)
# canvas = np.zeros((Ximage, Yimage, 3), np.uint8) # fix the division
# coords = np.array([xy], dtype=np.int32)
# cv2.drawContours(canvas, [coords],-1, (0,255,0), -1)
# cv2.drawContours(canvas, coxy, -1, (255, 255, 255), 10)
# cv2.drawContours(canvas, coxy, -1, (255, 255, 255), CV_FILLED)
cv2.fillPoly(canvas, pts=coxy, color=(255, 255, 255))
# cv2.polylines(canvas, coxy, isClosed=True, color=(255,255,255), thickness=5)
cv2.imwrite('home_made_mask_%s.tif' % osp.splitext(
osp.basename(self.Xml_path))[0], canvas)
return canvas
def truth_contour_on_wsi(self):
# read mask file
if self.Mask_truth_path:
try:
mask = self.mask_truth
mask_binary = mask.clip(max=1)
except:
mask = self.mask_truth_asap
mask_binary = np.array(mask.convert('L'))
else:
mask = self.mask_generation()
mask_binary = mask.clip(max=1)
_, contours_mask, _ = cv2.findContours(
mask_binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
truth_contours_wsi = cv2.drawContours(
self.wsi_image_thumbnail, contours_mask, -1, (0, 0, 255), 20)
cv2.imwrite("truth_contours_wsi_%s.png" % osp.basename(
osp.splitext(self.WSI_path)[0]), truth_contours_wsi)
return truth_contours_wsi
def combined_contour_on_wsi(self):
# draw contours for tissue regions and mask at the same pic
wsi_image_thumbnail = self.wsi_image_thumbnail
contours_tissue = self.tissue_contour_on_wsi()
contours_mask = self.truth_contour_on_wsi().copy()
# cv2.drawContours(wsi_image_thumbnail, contours_mask, -1, (0, 255, 0), 20)
segmentation_mask = cv2.drawContours(
contours_mask, contours_tissue, -1, (0, 255, 0), 20)
plt.imshow(segmentation_mask)
cv2.imwrite("tissue_truth_contour_%s.png" % osp.basename(
osp.splitext(self.WSI_path)[0]), segmentation_mask)
def display_heatmap(self):
''' This function is used to display the heat_map over WSI image
input: directory of WSI image, directory of heat_map, directory of dimensions, threshhold
output: orginal WSI image, heat_map, heat_map over WSI image,
'''
# thumbnail_test_002 = np.array(slide.get_thumbnail(dims))
# change to grayscale image to avoid the color confusion with heat_map.
# wsi_image_thumbnail_grayscale = cv2.cvtColor(wsi_image_thumbnail, code=cv2.COLOR_RGB2GRAY)
# make a heat_map with the same size as the downsampled wsi image
# heatmap_final_final = np.zeros((dimension_002[0]*14, dimension_002[1]*14), pred_test_002.dtype)
heatmap_final_final = np.zeros((self.wsi_image_thumbnail.shape[0], self.wsi_image_thumbnail.shape[1]),
self.heat_map.dtype)
heatmap_final_final[self.bbox[5] * 14:(self.bbox[6] + 1) * 14,
self.bbox[3] * 14:(self.bbox[4] + 1) * 14] = self.heat_map
# select the heat_map pixels above the threshhold
heatmap_final_final_bin = (
heatmap_final_final > self.threshold) * heatmap_final_final
heatmap_final_final_bin[heatmap_final_final_bin == 0] = np.nan
truth_contour_wsi = self.truth_contour_on_wsi()
# display the overlay image
plt.figure(figsize=(80, 40))
ax1 = plt.subplot(1, 4, 1)
ax1.imshow(self.wsi_image_thumbnail_copy_2)
ax2 = plt.subplot(1, 4, 2)
try:
ax2.imshow(self.mask_truth, cmap='gray')
except:
ax2.imshow(self.mask_truth_asap, cmap='gray')
ax3 = plt.subplot(1, 4, 3)
ax3.imshow(heatmap_final_final, cmap='jet')
ax4 = plt.subplot(1, 4, 4)
truth_contour_wsi = self.truth_contour_on_wsi()
ax4.imshow(truth_contour_wsi)
ax4.imshow(heatmap_final_final_bin, cmap='jet',
interpolation='none', alpha=1.0, vmin=0.0, vmax=1.0)
plt.show()
plt.savefig('overlay_%s.png' % osp.splitext(
osp.basename(self.WSI_path))[0], dpi=self.PPI)
# the function for heatmap construction
if __name__ == "__main__":
# Xml_dir = '/raida/wjc/CAMELYON16/testing/lesion_annotations'
#Xml_path = '/Users/liw17/Documents/WSI/lesion_annotations/tumor_026.xml'
# Xml_paths = glob.glob(osp.join(Xml_dir, '*.xml'))
# Xml_paths.sort()
# WSI_dir = '/raida/wjc/CAMELYON16/testing/images'
#WSI_path = osp.join(WSI_dir, 'tumor_026.tif')
# WSI_paths = glob.glob(osp.join(WSI_dir, '*.tif'))
# WSI_paths.sort()
# Mask_truth_dir = '/raidb/wli/Final_Results/Display/home_made_mask_files_32xdownsample'
# Mask_truth_path = osp.join(Mask_truth_dir, 'test_002_truth_16.tif')
#Mask_truth_path = '/Users/liw17/Documents/WSI/mask_asap/tumor_026_mask.tif'
# Mask_truth_paths = glob.glob(osp.join(Mask_truth_dir, '*.tif'))
# Mask_truth_paths.sort()
# Heatmap_dir = '/raidb/wli/Final_Results/Heat_map/Method_II/color_noise_color_normalization/Method_II_Model_I_norm/test_0506'
# Heatmap_path = osp.join(Heatmap_dir, 'test_002.npy')
#Heatmap_path = '/Volumes/ExFAT-wzl/heat_map/tumor/tumor_026.npy'
# Heatmap_paths = glob.glob(osp.join(Heatmap_dir, '*.npy'))
# Heatmap_paths.sort()
# Dimension_dir = '/raidb/wli/Final_Results/Display/pred_dim_0314/testing/'
#Dimension_path = osp.join(Dimension_dir, 'dimensions_test_002.npy')
#Dimension_path = '/Users/liw17/Documents/pred_dim_0314/training-updated/tumor/dimensions/tumor_026.npy'
# Dimension_paths = glob.glob(osp.join(Dimension_dir, '*.npy'))
# Dimension_paths.sort()
Xml_dir = '/raida/wjc/CAMELYON16/testing/lesion_annotations'
Xml_path = '/raida/wjc/CAMELYON16/testing/lesion_annotations/tumor_026.xml'
Xml_paths = glob.glob(osp.join(Xml_dir, '*.xml'))
WSI_dir = '/raida/wjc/CAMELYON16/training/tumor'
WSI_path = osp.join(WSI_dir, 'tumor_026.tif')
WSI_paths = glob.glob(osp.join(WSI_dir, '*.tif'))
#Mask_truth_dir = '/Users/liw17/Documents/WSI/'
#Mask_truth_path = osp.join(Mask_truth_dir, 'test_002_truth_16.tif')
Mask_truth_path = '/raida/wjc/CAMELYON16/training/masking/tumor_026_mask.tif'
#Mask_truth_paths = glob.glob(osp.join(Mask_truth_dir, '*.tif'))
#Heatmap_dir = '/Volumes/ExFAT-wzl/heat_map/test'
#Heatmap_path = osp.join(Heatmap_dir, 'test_002.npy')
Heatmap_path = '/raidb/wli/Final_Results/Heat_map/Method_II/color_noise_color_normalization/Method_II_Model_I_norm/tumor_0506/tumor_026.npy'
#Heatmap_paths = glob.glob(osp.join(Heatmap_dir, '*.npy'))
Dimension_dir = '/raidb/wli/Final_Results/Display/pred_dim_0314/testing/pred_dim_0314/testing/'
#Dimension_path = osp.join(Dimension_dir, 'dimensions_test_002.npy')
Dimension_path = '/raidb/wli/Final_Results/Display/pred_dim_0314/training-updated/tumor/dimensions/tumor_026.npy'
#Dimension_paths = glob.glob(osp.join(Dimension_dir, '*.npy'))
# for wsi_image_path in WSI_paths:
# print(wsi_image_path)
# Xml_path_new = [x for x in Xml_paths if re.search(
# osp.splitext(osp.basename(wsi_image_path))[0], x)]
# Xml_path = Xml_path_new[0]
# Dimension_path_new = [x for x in Dimension_paths if re.search(
# osp.splitext(osp.basename(wsi_image_path))[0], x)]
# Dimension_path = Dimension_path_new[0]
# Mask_truth_path_new = [x for x in Mask_truth_paths if
# re.search(osp.splitext(osp.basename(wsi_image_path))[0], x)]
# Mask_truth_path = Mask_truth_path_new[0]
# Heatmap_path_new = [x for x in Heatmap_paths if re.search(
# osp.splitext(osp.basename(wsi_image_path))[0], x)]
# Heatmap_path = Heatmap_path_new[0]
# viewer = WSI_viewer_new(wsi_image_path, Xml_path,
# Dimension_path, Mask_truth_path, Heatmap_path)
# viewer.slide_level = 5
# viewer.PPI = 150
# viewer.display_heatmap()
viewer = WSI_viewer_new(
WSI_path, Xml_path, Dimension_path, Mask_truth_path, Heatmap_path)
viewer.display_heatmap()
``` |
{
"source": "3dimaging/FLL2021",
"score": 4
} |
#### File: FLL2021/Week-23/ball_sprite.py
```python
import pygame, sys
pygame.init()
screen = pygame.display.set_mode([640,480])
screen.fill([255, 255, 255])
class Ball(pygame.sprite.Sprite):
def __init__(self, image, location, speed):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = location
self.speed = speed
def move(self):
self.rect = self.rect.move(self.speed)
if self.rect.left < 0 or self.rect.right > 640:
self.speed[0] = -self.speed[0]
if self.rect.top < 0 or self.rect.bottom > 480:
self.speed[1] = -self.speed[1]
img_file = "wackyball.bmp"
ball = Ball(img_file, [200, 200], [3, 3])
# Adds each ball to the list as it’s created
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.time.delay(20)
# Redraws the screen
screen.fill([255, 255, 255])
ball.move()
screen.blit(ball.image, ball.rect)
pygame.display.flip()
pygame.quit()
``` |
{
"source": "3Dit-GmbH/Govie-Tools",
"score": 2
} |
#### File: 3Dit-GmbH/Govie-Tools/functions.py
```python
import bpy
import subprocess
import atexit
server_process = None
O = bpy.ops
def select_object(obj):
C = bpy.context
O.object.select_all(action='DESELECT')
C.view_layer.objects.active = obj
obj.select_set(True)
def start_server(server_path,file_path):
global server_process
if server_process is not None:
stop_server()
server_process = subprocess.Popen([bpy.app.binary_path_python, server_path, file_path])
def stop_server():
global server_process
if server_process is not None:
print("Closed process " + str(server_process.pid))
server_process.kill()
atexit.register(stop_server)
``` |
{
"source": "3djake/mirrorcast",
"score": 3
} |
#### File: opt/mirrorcast/hosts.py
```python
import logging, os, csv, logging.handlers
mirror_logger = logging.getLogger()
mirror_logger.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
formatter = logging.Formatter(' mirrorcast - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
mirror_logger.addHandler(handler)
#logging.basicConfig(filename='/opt/mirrorcast/mirrorcast.log',level=logging.DEBUG,format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')
class Hosts():
def __init__(self):
self.receivers = []
self.receiver = "None"
#load list of receivers from file
try:
with open(os.path.dirname(os.path.abspath(__file__)) + "/receivers") as csvfile:
file = csv.DictReader(csvfile)
for line in file:
self.receivers.append(line)
except:
mirror_logger.error("Failed to load host names")
exit(0)
csvfile.close()
self.aspect = self.receivers[0]['aspect']
#set receiver to the one picked by the user
def set_receiver(self, but, name):
self.receiver = str(but.get_label())
for i in self.receivers:
if i['host'] == self.receiver and but.get_active():
self.aspect = i['aspect']
mirror_logger.info("Receiver set to: " + i['host'] + " Receivers aspect: " + self.aspect)
return
if but.get_active():
self.receiver = "None"
self.aspect = "16:9"
mirror_logger.info("Receiver set to: " + self.receiver)
```
#### File: opt/mirrorcast/media.py
```python
from tkinter import *
import socket
class Media(object):
def __init__(self,master):
self.value=""
self.receiver="None"
top=self.top=master
topframe = Frame(top)
topframe.pack()
self.l=Label(topframe,text="These are controls for the Receiving Device")
self.l.pack()
self.l3=Label(topframe,text="__________________________________________")
self.l3.pack()
self.l2=Label(topframe,text="Note: It may take up to 15 seconds to start your media on the receiver")
self.l2.pack()
self.l4=Label(topframe,text="This feature is experimental, if you file does not play please close this window and try again.")
self.l4.pack()
controls = Frame(top)
controls.pack(side=BOTTOM)
self.state=Label(controls,textvariable="")
self.backb=Button(controls,text='Rewind',command=self.back)
self.playb=Button(controls,text='Play/Pause',command=self.play)
self.forwardb=Button(controls,text='Fast-Forward',command=self.forward)
self.stopb=Button(controls,text='Stop',command=self.stop)
self.volb = Scale(controls, from_=-2500, to=400, orient=HORIZONTAL, label="Volume", showvalue=0, command=self.vol)
self.state.pack(side=LEFT)
self.backb.pack(side=LEFT)
self.playb.pack(side=LEFT)
self.forwardb.pack(side=LEFT)
self.audiobackb=Button(controls,text='Audio Track Back',command=self.audioback)
self.audioforwb=Button(controls,text='Audio Track Up',command=self.audioforw)
self.audioforwb.pack(side=LEFT)
self.audiobackb.pack(side=LEFT)
self.volb.pack()
def vol(self, vol):
cmd = "tube-vol,"
self.send_cmd(cmd)
return
def audioforw(self):
cmd = "tube-track-up,"
self.send_cmd(cmd)
return
def audioback(self):
cmd = "tube-track-down,"
self.send_cmd(cmd)
return
def play(self):
cmd = "tube-pause,"
self.send_cmd(cmd)
return
def back(self):
cmd = "tube-back,"
self.send_cmd(cmd)
return
def forward(self):
cmd = "tube-forward,"
self.send_cmd(cmd)
return
def stop(self):
cmd = "tube-stop,"
self.send_cmd(cmd)
return
def send_cmd(self, cmd):
if cmd == "tube-vol,":
command = cmd + socket.gethostname() + "," + str(self.volb.get())
else:
command = cmd + socket.gethostname()
try:
print(cmd)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect((self.receiver, 8092))
sock.settimeout(30)
sock.send(command.encode('ascii'))
sock.close()
except:
return False
def on_closing(self):
cmd = "tube-stop,"
self.send_cmd(cmd)
```
#### File: opt/mirrorcast/mirrorcast-client.py
```python
import socket, gi, subprocess, time, os, threading, logging, dbus,logging.handlers
from hosts import Hosts as hosts
from displays import Displays
from audio import Audio
from tube import Tube
from media import Media
from dvd import Dvd
from tkinter import *
from tkinter.filedialog import askopenfilename
gi.require_version('AppIndicator3', '0.1')
gi.require_version('Gtk', '3.0')
gi.require_version('Gtk', '3.0')
gi.require_version('Notify', '0.7')
from gi.repository import Gtk as gtk
from gi.repository import AppIndicator3 as appindicator
from gi.repository import Notify as notify
from gi.repository import GObject as gobject
from dbus.mainloop.glib import DBusGMainLoop
mirror_logger = logging.getLogger()
mirror_logger.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
formatter = logging.Formatter(' mirrorcast - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
mirror_logger.addHandler(handler)
class TrayMenu:
def __init__(self):
mirror_logger.info("Started Mirrorcast")
self.indicator = appindicator.Indicator.new("mirrorMenu", os.path.abspath('/opt/mirrorcast/mirrorcast_tray.png'), appindicator.IndicatorCategory.SYSTEM_SERVICES)
self.indicator.set_status(appindicator.IndicatorStatus.ACTIVE)
#A string so we know what the user is doing
self.state = "stopped"
self.menu = gtk.Menu()
#Set up menu
item_start= gtk.MenuItem('Start Mirroring')
item_start.connect('activate', self.start)
self.menu.append(item_start)
#Media Sub Menu
self.media_sub = gtk.Menu()
item_media= gtk.MenuItem('Play Media (Experimental)')
item_media.set_submenu(self.media_sub)
item_file = gtk.MenuItem('Media File')
item_file.connect('activate', self.file)
self.media_sub.append(item_file)
item_dvd = gtk.MenuItem('Play DVD')
item_dvd.connect('activate', self.dvd)
self.media_sub.append(item_dvd)
item_youtube = gtk.MenuItem('Youtube URL')
item_youtube.connect('activate', self.youtube)
self.media_sub.append(item_youtube)
self.menu.append(item_media)
item_freeze = gtk.MenuItem('Freeze')
item_freeze.connect('activate', self.freeze)
self.menu.append(item_freeze)
item_update = gtk.MenuItem('Update Mirrorcast')
item_update.connect('activate', self.update)
#Update feature is disabled for now
#self.menu.append(item_update)
item_quit = gtk.MenuItem('Quit')
item_quit.connect('activate', self.quit)
self.menu.append(item_quit)
sep = gtk.SeparatorMenuItem()
self.menu.append(sep)
self.outputSub = gtk.Menu()
output = gtk.MenuItem("Cast To")
output.set_submenu(self.outputSub)
'''Recievers/Hosts Menu'''
self.hosts = hosts()
#Varaibles for sorting receivers into sub-menus
self.list_receivers = []
self.sortedMenu = []
sortSub = []
subitems = []
sortInd = 0
sortInd2 = 0
#Add receivers to menu
self.list_receivers.append(gtk.RadioMenuItem('None'))
for ind, i in enumerate(self.hosts.receivers):
#aAlow user to sort their receivers into organised sublists
if i['aspect'] == "sub":
self.sortedMenu.append(gtk.Menu())
sortSub.append(gtk.MenuItem(i['host']))
sortSub[sortInd].set_submenu(self.sortedMenu[sortInd])
self.outputSub.append(sortSub[sortInd])
sortInd = sortInd + 1
elif sortInd > 0:
try:
subitems.append(gtk.RadioMenuItem(str(i['host']),group=self.subitems[sortInd2-1]))
except:
subitems.append(gtk.RadioMenuItem(str(i['host']),group=self.list_receivers[0]))
subitems[sortInd2].connect('toggled', self.hosts.set_receiver, subitems[sortInd2].get_label())
self.sortedMenu[sortInd-1].append(subitems[sortInd2])
sortInd2 = sortInd2 + 1
else:
self.list_receivers.append(gtk.RadioMenuItem(str(i['host']),group=self.list_receivers[ind-1]))
for i in self.list_receivers:
self.outputSub.append(i)
i.connect('toggled', self.hosts.set_receiver, i.get_label())
self.list_receivers[0].set_active(True)
self.Display = Displays()
self.displaysSub = gtk.Menu()
displays = gtk.MenuItem("Select Display to Mirror")
displays.set_submenu(self.displaysSub)
self.list_displays = []
#Add displays/monitors to menu
for ind, i in enumerate(self.Display.monitors):
if ind != 0:
self.list_displays.append(gtk.RadioMenuItem(str(i[0]),group=self.list_displays[ind-1]))
else:
self.list_displays.append(gtk.RadioMenuItem(self.Display.monitors[0][0]))
for i in self.list_displays:
self.displaysSub.append(i)
i.connect('toggled', self.Display.set_display, i.get_label())
self.list_displays[0].set_active(True)
self.menu.append(output)
self.menu.append(displays)
self.menu.show_all()
self.indicator.set_menu(self.menu)
self.sound = Audio()
self.ffmpeg = None
self.srt = None
self.vlc = None
self.sleep = dbus_listen(item_start, self)
def close_stream(self):
if self.ffmpeg is not None:
if self.ffmpeg.poll() is None:
self.ffmpeg.terminate()
self.ffmpeg.wait()
if self.srt is not None:
if self.srt.poll() is None:
self.srt.terminate()
self.srt.wait()
#the following function is run when the user clicks "Start/Stop Mirroring"
def start(self, w):
notify.init("mirrorMenu")
mirror_logger.info("Detected Audio Device: " + str(self.sound.audioDev))
if w.get_label() == 'Start Mirroring':
#If the user did not select a receiver
if self.hosts.receiver == "None":
notify.init("mirrorMenu")
notify.Notification.new("Error", "You did not select a receiver", None).show()
return
notify.Notification.new("Connecting to Receiver", "Attempting to establish connection to " + self.hosts.receiver, None).show()
mirror_logger.info("User is trying to connect to " + self.hosts.receiver)
#If we cannot connect to the receiver
try:
if self.connect("play-srt,") == False:
notify.init("mirrorMenu")
notify.Notification.new("Connection Error", "Could not connect to" + self.hosts.receiver + ". please try again and if problem persists then please contact your system administrator.", None).show()
mirror_logger.warning("Failed to connect to " + self.hosts.receiver)
return
except:
notify.Notification.new("Connection Error", "Could not connect to" + self.hosts.receiver + ". please try again and if problem persists then please contact your system administrator.", None).show()
#Create and start loop that checks if receiver can still be reached
mirror_logger.info("User connected to " + self.hosts.receiver)
w.set_label("Stop Mirroring")
self.start_casting()
#Start a loop that will keep checking if the client can still reach the server
connection=threading.Thread(target=self.alive,args=[w])
connection.start()
elif w.get_label() == 'Stop Mirroring':
self.state = "stopped"
self.sound.audio(False)
self.Display.display(False, self.hosts.aspect)
self.close_stream()
w.set_label('Start Mirroring')
return
def start_casting(self):
res = self.Display.resolution
self.sleep.sleep = False
#If receiver is set to display 4:3 and the client is 16:9 then change screen resoltion to 1024x768
if (self.hosts.aspect == "wide" or self.hosts.aspect == "16:9" or self.hosts.aspect == "16:10") and self.Display.get_ratio(res) == "16:9":
self.hosts.aspect = "16:9"
else:
self.hosts.aspect = "4:3"
if self.Display.get_ratio(self.Display.resolution) != "4:3":
res = "1024x768"
try:
subprocess.call("xrandr --output " + self.Display.type + " --mode 1024x768", shell=True)
except:
mirror_logger.warning("Could now change screen resolution to 4:3")
notify.Notification.new("Resolution Error", "Failed to change screen resolution to match receiver.", None).show()
return
self.sound.audio(True)
#Start encoding and sending the stream to the receiver
self.state = "casting"
time.sleep(1)
display = os.environ['DISPLAY']#get display of current user
self.ffmpeg = subprocess.Popen(["ffmpeg", "-loglevel", "warning", "-f", "pulse", "-ac", "2", "-i", "default", "-async", "1", "-f", "x11grab", "-r", "25", "-s", str(res), "-i", str(display) + "+" + str(int(self.Display.xoffset)) + "," + str(self.Display.yoffset), "-aspect", self.hosts.aspect, "-vcodec", "libx264", "-pix_fmt", "yuv420p", "-tune", "zerolatency", "-preset", "ultrafast", "-vf", "scale=" + str(res).replace('x', ':'), "-x264opts", "vbv-maxrate=7700:vbv-bufsize=1000:intra-refresh=1:slice-max-size=500:keyint=10:ref=1", "-f", "mpegts", "-"], stdout=subprocess.PIPE)
self.srt = subprocess.Popen(["stransmit", "file://con", "srt://{}:8090?mode=client&pbkeylen=0".format(self.hosts.receiver)], stdin=self.ffmpeg.stdout, stdout=subprocess.PIPE)
self.sound.monitor_audio()
notify.Notification.new("Connection Established", "Connection to " + self.hosts.receiver + " established.", None).show()
def alive(self, w):
mirror_logger.info("Sending Alive Packets")
timestamp = time.localtime()
timeout = 10
retries = 1
i=0
command = "active," + socket.gethostname()
while True:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect((self.hosts.receiver, 8092))
sock.settimeout(None)
#If the user's computer is going to sleep
if self.state == "stopped" or self.sleep.sleep == True:
mirror_logger.info("User stopped casting")
command = "stop," + socket.gethostname()
sock.send(command.encode('ascii'))
sock.close()
return
if self.state == "freeze":
logging.info("User freezed their screen")
notify.Notification.new("Freezing", "Your screen will freeze in 10 seconds.", None).show()
command = "freezee," + socket.gethostname()
time.sleep(10)
sock.send(command.encode('ascii'))
status = sock.recv(1024)
if status.decode('ascii') == "paused":
self.close_stream()
w.set_label('Start Mirroring')
notify.init("mirrorMenu")
notify.Notification.new("Freezed", "You have frozen your current desktop, click Start Mirroring to resume", None).show()
self.state = "stopped"
time.sleep(1)
self.sound.audio(False)
self.Display.display(False, self.hosts.aspect)
sock.close()
return
sock.send(command.encode('ascii'))
status = sock.recv(1024)
if status.decode('ascii') == "ok":
timestamp = time.localtime()
sock.close()
except:
#time.sleep(1)
if (time.mktime(time.localtime()) - time.mktime(timestamp)) >= timeout and self.state != "stopped" and self.sleep.sleep != True:
i = i + 1
if i == 1:
mirror_logger.warning("Attempting to reconnect to " + self.hosts.receiver)
self.close_stream()
notify.Notification.new("Reconnecting", "Connection to " + self.hosts.receiver + " has been lost. Attempting to reconnect.", None).show()
time.sleep(2)
if self.connect("play,") == True:
mirror_logger.info("Reconnected to " + self.hosts.receiver)
self.start_casting()
i = 0
if i == retries:
self.state = "stopped"
w.set_label('Start Mirroring')
notify.init("mirrorMenu")
notify.Notification.new("Connection Lost", "Connection to " + self.hosts.receiver + " timed out.", None).show()
mirror_logger.warning("Connection Lost: Connection to " + self.hosts.receiver + " timed out.")
return
def quit(self, w):
self.sound.audio(False)
self.state = "stopped"
self.Display.display(False, self.hosts.aspect)
#kill ffmpeg incase user forgot to click "stop"
self.close_stream()
gtk.main_quit()
def freeze(self, w):
if self.state == "stopped":
notify.init("mirrorMenu")
notify.Notification.new("Not Mirroring", "To freeze your screen you need to Start Mirroring.", None).show()
return
self.state = "freeze"
return
def update(self, w):
if self.state == "casting":
notify.init("mirrorMenu")
notify.Notification.new("Cannot Update", "Please stop mirroring before you try to update", None).show()
return
subprocess.call("/opt/mirrorcast/mirrorcast-autoupdater.sh", shell=True)
gtk.main_quit()
return
def file(self, w):
if self.state == "casting":
notify.init("mirrorMenu")
notify.Notification.new("Error", "Please stop mirroring before you try to use this feature", None).show()
else:
if self.hosts.receiver == "None":
notify.init("mirrorMenu")
notify.Notification.new("Error", "Please select a receiving device", None).show()
return
if self.connect("media,") == False:
notify.init("mirrorMenu")
notify.Notification.new("Connection Error", "Could not connect to" + self.hosts.receiver + ". please try again and if problem persists then please contact your system administrator.", None).show()
mirror_logger.warning("Failed to connect to " + self.hosts.receiver)
return
mirror_logger.info("User connected to " + self.hosts.receiver + " to play media file")
select = Tk()
select.withdraw()
types= [("Video Files", ("*.mp4","*.avi","*.mov","*.mkv","*.flv","*.mpeg","*.mpg","*.wmv", "*.webm", "*.ogg", "*.ogv")), ("All files", "*.*")]
file = askopenfilename(filetypes=types)
select.destroy()
if file == () or file == None or file == "":
return
newpath = r'/tmp/media'
if not os.path.exists(newpath):
os.makedirs(newpath)
if os.path.isfile(file):
if not os.path.isfile(newpath + '/' + os.path.basename(file)):
os.symlink(str(file), "/tmp/media/" + os.path.basename(file))
self.ffmpeg = subprocess.Popen(["http-server", "/tmp/media", "-p", "8090"], stdout=subprocess.PIPE)
time.sleep(2)
self.send_cmd("media-start," + os.path.basename(file) + ",")
mediaui(self.hosts.receiver)
self.close_stream()
def dvd(self, w):
if self.state == "casting":
notify.init("mirrorMenu")
notify.Notification.new("Error", "Please stop mirroring before you try to use this feature", None).show()
else:
if self.hosts.receiver == "None":
notify.init("mirrorMenu")
notify.Notification.new("Error", "Please select a receiving device", None).show()
return
if self.connect("media,") == False:
notify.init("mirrorMenu")
notify.Notification.new("Connection Error", "Could not connect to" + self.hosts.receiver + ". please try again and if problem persists then please contact your system administrator.", None).show()
mirror_logger.warning("Failed to connect to " + self.hosts.receiver)
return
mirror_logger.info("User connected to " + self.hosts.receiver + " to stream DVD")
#Use lsdvd to retreive keys for encrypted dvd's, requires libdvd-pkg and lsdvd
try:
subprocess.check_output("lsdvd", shell=True)
except:
notify.init("mirrorMenu")
notify.Notification.new("Error", "Please insert a DVD first or wait for DVD to load.", None).show()
return
self.send_cmd("dvd-start,")
ui = dvdui(self.hosts.receiver)
def youtube(self, w):
if self.state == "casting":
notify.init("mirrorMenu")
notify.Notification.new("Error", "Please stop mirroring before you try to use this feature", None).show()
else:
if self.hosts.receiver == "None":
notify.init("mirrorMenu")
notify.Notification.new("Error", "Please select a receiving device", None).show()
if self.connect("tu-media,") == False:
notify.init("mirrorMenu")
notify.Notification.new("Connection Error", "Could not connect to" + self.hosts.receiver + ". please try again and if problem persists then please contact your system administrator.", None).show()
mirror_logger.warning("Failed to connect to " + self.hosts.receiver)
return
ui = tubeui(self.hosts.receiver)
def connect(self, cmd):
command = cmd + socket.gethostname()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect((self.hosts.receiver, 8092))
sock.settimeout(30)
sock.send(command.encode('ascii'))
while True:
status = sock.recv(8024)
#if server returns busy then some one else is already using this receiver
if status.decode('ascii') == "busy":
notify.init("mirrorMenu")
mirror_logger.info("User attempted to connect to " + self.hosts.receiver + " but receiver was busy")
notify.Notification.new("Error", "Sorry some one else is already connected to this receiver, please try again later.", None).show()
sock.close()
return False
#If client succeeds in connecting to receiver
elif status.decode('ascii') == "ready":
mirror_logger.info("Server is ready")
break
sock.close()
except:
return False
if cmd == "play,":
self.state = "casting"
return True
def send_cmd(self, cmd):
command = cmd + socket.gethostname()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect((self.hosts.receiver, 8092))
sock.settimeout(None)
sock.send(command.encode('ascii'))
sock.close()
except:
return False
class tubeui():
def __init__(self, receiver):
self.root=Tk()
self.m=Tube(self.root)
self.m.receiver=receiver
self.root.title("Play Youtube URL")
self.root.protocol("WM_DELETE_WINDOW", self.on_exit)
self.root.mainloop()
def on_exit(self):
self.m.on_closing()
self.root.destroy()
class mediaui():
def __init__(self, receiver):
self.root=Tk()
self.m=Media(self.root)
self.m.receiver=receiver
self.root.title("Receiver Controls")
self.root.protocol("WM_DELETE_WINDOW", self.on_exit)
self.root.mainloop()
def on_exit(self):
self.m.on_closing()
self.root.destroy()
#reactor.stop()
class dvdui():
def __init__(self, receiver):
self.root=Tk()
self.m=Dvd(self.root)
self.m.receiver=receiver
self.root.title("Receiver Controls")
self.root.protocol("WM_DELETE_WINDOW", self.on_exit)
self.root.mainloop()
def on_exit(self):
self.m.on_closing()
self.root.destroy()
#reactor.stop()
class dbus_listen():
def handle_sleep(self, *args):
self.w.set_label('Start Mirroring')
mirror_logger.info("User computer is going to sleep, killing ffmpeg")
self.tray.close_stream()
self.sleep = True
def __init__(self, w, tray):
self.sleep = False
self.w = w
self.tray = tray
DBusGMainLoop(set_as_default=True) # integrate into gobject main loop
bus = dbus.SystemBus() # connect to system wide dbus
bus.add_signal_receiver( # define the signal to listen to
self.handle_sleep, # callback function
'PrepareForSleep', # signal name
'org.freedesktop.login1.Manager', # interface
'org.freedesktop.login1', # bus name
)
def main():
gtk.main()
return 0
if __name__ == "__main__":
indicator = TrayMenu()
main()
```
#### File: opt/mirrorcast/tube.py
```python
from tkinter import *
import socket
class Tube(object):
def __init__(self,master):
self.receiver="None"
top=self.top=master
topframe = Frame(top)
topframe.pack()
self.l=Label(topframe,text="This will play videos from both youtube and dailymotion")
self.l.pack()
self.l2=Label(topframe,text="Copy(ctrl+c) and Paste(Ctrl+v) the Youtube URL into the text box and then click 'load'")
self.l2.pack()
self.l3=Label(topframe,text="While the video is loading, the 'load' button will remain greyed out, this may take a few seconds.")
self.l3.pack()
self.e=Entry(topframe, width=80)
self.e.pack(side=LEFT)
self.loadb=Button(topframe,text='load',command=self.load)
self.loadb.pack(side=LEFT)
controls = Frame(top)
controls.pack(side=BOTTOM)
self.state=Label(controls,textvariable="")
self.backb=Button(controls,text='Rewind',command=self.back)
self.playb=Button(controls,text='Play/Pause',command=self.play)
self.forwardb=Button(controls,text='Fast-Forward',command=self.forward)
self.stopb=Button(controls,text='Stop',command=self.stop)
self.volb = Scale(controls, from_=-2500, to=400, orient=HORIZONTAL, label="Volume", showvalue=0, command=self.vol)
self.state.pack(side=LEFT)
self.backb.pack(side=LEFT)
self.playb.pack(side=LEFT)
self.forwardb.pack(side=LEFT)
self.volb.set(0)
self.volb.pack(side=LEFT)
self.stopb.pack(side=LEFT)
def load(self):
cmd = "tube-load,"
self.value=self.e.get()
self.send_cmd(cmd, self.value)
return
def play(self):
cmd = "tube-pause,"
self.value=self.e.get()
self.send_cmd(cmd, self.value)
return
def back(self):
cmd = "tube-back,"
self.send_cmd(cmd, self.value)
return
def forward(self):
cmd = "tube-forward,"
self.send_cmd(cmd, self.value)
return
def stop(self):
cmd = "tube-stop,"
self.value=""
self.send_cmd(cmd, self.value)
self.set_state("")
return
def vol(self, vol):
cmd = "tube-vol,"
self.send_cmd(cmd, self.volb.get())
return
def send_cmd(self, cmd, url):
if cmd == "tube-vol,":
command = cmd + socket.gethostname() + "," + str(self.volb.get())
else:
command = cmd + socket.gethostname() + "," + url
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect((self.receiver, 8092))
sock.settimeout(None)
sock.send(command.encode('ascii'))
if cmd == "tube-load,":
status = sock.recv(8024)
if status.decode('ascii') == "ready":
self.set_state("Playing")
elif status.decode('ascii') == "error":
self.set_state("Invalid URL: Please check URL is valid")
sock.close()
except:
self.set_state("Failed, Please make sure you copied the URL correctly")
return False
def set_state(self, state):
self.state.configure(text=state)
return
def on_closing(self):
cmd = "tube-stop,"
self.value=""
self.send_cmd(cmd, self.value)
self.set_state("")
``` |
{
"source": "3DLables/3DLabeler",
"score": 4
} |
#### File: 3DLabeler/ThreeDLabeler/images.py
```python
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
class Image:
"""
Processor class for annotating 3D scans.
Arguments:
voxels: a 3D numpy array
voxel_size: a tuple/list of three numbers indicating the voxel size in mm,
cm etc point_position: the position in 3D of each point of interest.
See tag_parser for more info
"""
def __init__(self, voxels, point_position, voxel_size=(1, 1, 1)):
self.voxels = voxels
self.voxel_size = voxel_size
self.point_position = point_position / voxel_size
def cube(self):
"""Returns a cube image with all dimensions equal to the longest."""
dims = self.voxels.shape
max_dim = max(dims)
x_target = (max_dim - dims[0]) / 2
y_target = (max_dim - dims[1]) / 2
z_target = (max_dim - dims[2]) / 2
self.voxels = np.pad(self.voxels,
((int(np.ceil(x_target)),
int(np.floor(x_target))),
(int(np.ceil(y_target)),
int(np.floor(y_target))),
(int(np.ceil(z_target)),
int(np.floor(z_target)))),
'constant',
constant_values=(0))
self.point_position = self.point_position + [np.ceil(z_target),
np.ceil(y_target),
np.ceil(x_target)]
return(self)
def scale(self, size=128):
"""Scales a 3D Numpy array to the desired dimensions
Keyword Arguments:
size {int} -- The number of pixels that should be scaled too.
It uses the numpy.ndimage scale function. (default: {128})
"""
scale_factor = size / max(self.voxels.shape)
self.voxels = ndimage.zoom(self.voxels, scale_factor)
self.point_position = self.point_position * scale_factor
self.voxel_size = False # To ignore this
return(self)
def plot(self, vcol=1):
"""
Graphs an points. pt_cols is used to set the cols to iterate
over (different views)
"""
img = self.voxels
points = self.point_position
ax = []
fig = plt.figure(figsize=(9, 8))
# TODO make this setable in the function call
columns = 3
rows = 2
for i in range(points.shape[0]):
im_slice = int(np.round(points[i, vcol]))
if vcol == 0:
im = img[im_slice, :, :]
elif vcol == 1:
im = img[:, im_slice, :]
else:
im = img[:, :, im_slice]
plot_cols = np.array([0, 1, 2])
plot_cols = plot_cols[plot_cols != vcol]
ax.append(fig.add_subplot(rows, columns, i+1)) # set title
ax[-1].set_title("Image depth: " +
str(im_slice) +
'\n x-axis' +
str(np.round(points[i, min(plot_cols)])) +
'\n y-axis' +
str(np.round(points[i, max(plot_cols)])))
plt.imshow(im)
plt.plot(points[i, min(plot_cols)],
points[i, max(plot_cols)],
'ro')
# TODO Fix bug where points don't plot properly
# BUG
plt.show()
def _cube_points(self):
"""Cubes two dimensional array of key points for rotation
Returns:
numpy array -- A 3D Numpy array with point 1-n shown in 3D Space
"""
cubedims = self.voxels.shape
points = self.point_position
points = np.rint(points).astype('int') # sets to int for indexing
arr = np.zeros((cubedims), dtype=int) # creates empty array
for i in range(self.point_position.shape[0]):
arr[points[i, 0], points[i, 1], points[i, 2]] = i+1
# +1 Avoide zeros
return arr
def _square_points(self, arr):
"""Takes a cubed point array and return a 2D version of it with key
points
Arguments:
arr {3D Numpy Array} -- The 3D array with key points in it
Returns:
2D Numpy Array -- The x, y, z coordinates of each key point
Yields:
numpy array -- when used as an iterator. This may be a bug
"""
flatpoints = np.zeros((self.point_position.shape), dtype=int)
# double (()) to make it a tuple
npoints = flatpoints.shape[0]
for i in range(npoints):
flatpoints[i, :] = np.where(arr == i+1)
return flatpoints
def img_transformer(self, number_rot=24):
"""Generates 24 projections of a 3D image along with the key points
Returns:
ThreeDLabeler.Image -- The voxels and key points for a projection
of the image
"""
voxels = []
points = []
if number_rot == 24:
rot_fun = rotations24
elif number_rot == 4:
rot_fun = rotations4
elif number_rot == 2:
rot_fun = rotations2
else:
raise ValueError("Incorrect number or rotations, try 4, 24")
for i in rot_fun(self.voxels):
voxels.append(i)
for j in rot_fun(self._cube_points()):
points.append(self._square_points(j))
imgs = []
for i in range(number_rot):
imgs.append(Image(voxels[i], points[i]))
return imgs
# TODO Add possibility to not just cube an image
# TODO Add Storeage/writing functionality
def rotations24(polycube):
"""https://stackoverflow.com/
questions/33190042/how-to-calculate-all-24-rotations-of-3d-array"""
# imagine shape is pointing in axis 0 (up)
# 4 rotations about axis 0
yield from rotations_flip4(polycube, 0)
# rotate 180 about axis 1, now shape is pointing down in axis 0
# 4 rotations about axis 0
yield from rotations_flip4(rot90(polycube, 2, axis=1), 0)
# rotate 90 or 270 about axis 1, now shape is pointing in axis 2
# 8 rotations about axis 2
yield from rotations_flip4(rot90(polycube, axis=1), 2)
yield from rotations_flip4(rot90(polycube, -1, axis=1), 2)
# rotate about axis 2, now shape is pointing in axis 1
# 8 rotations about axis 1
yield from rotations_flip4(rot90(polycube, axis=2), 1)
yield from rotations_flip4(rot90(polycube, -1, axis=2), 1)
def rotations4(polycube):
yield polycube # Unit yeld
yield polycube[::-1, :, :]
yield polycube[:, ::-1, :]
yield polycube[..., ::-1]
def rotations2(polycube):
yield polycube # Unit yeld
yield polycube[::-1, :, :]
# yield polycube[:, ::-1, :]
# yield polycube[..., ::-1]
def rotations_flip4(polycube, axis):
"""List the four rotations of the given cube about the given axis."""
for i in range(4):
yield rot90(polycube, i, axis)
def rot90(m, k=1, axis=2):
"""Rotate an array k*90 degrees in the counter-clockwise direction
around the given axis
This differs from np's rot90 because it's 3D
"""
m = np.swapaxes(m, 2, axis)
m = np.rot90(m, k)
m = np.swapaxes(m, 2, axis)
return m
```
#### File: 3DLabeler/ThreeDLabeler/plotting.py
```python
import matplotlib.pyplot as plt
import numpy as np
def mri_plot(img, points, vcol=1, figsize=(9, 8)):
"""Graphs an points. pt_cols is used to set the cols to iterate
over (different views)
"""
ax = []
fig = plt.figure(figsize=figsize)
columns = 3
rows = 2
for i in range(points.shape[0]):
im_slice = int(np.round(points[i, vcol]))
if vcol == 0:
im = img[:, :, im_slice]
elif vcol == 1:
im = img[:, im_slice, :]
else:
im = img[im_slice, :, :]
ax.append(fig.add_subplot(rows, columns, i+1))
ax[-1].set_title("Image depth: "+str(im_slice)) # set title
plt.imshow(im)
plot_cols = np.array([0, 1, 2])
plot_cols = plot_cols[plot_cols != vcol]
plt.plot(points[i, min(plot_cols)], points[i, max(plot_cols)], 'ro')
plt.show()
``` |
{
"source": "3dlg-hcvc/ANCSH-pytorch",
"score": 2
} |
#### File: ANCSH_lib/data/dataset.py
```python
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
class ANCSHDataset(Dataset):
def __init__(self, data_path, num_points):
self.f_data = h5py.File(data_path)
self.instances = sorted(self.f_data)
self.num_points = num_points
def __len__(self):
return len(self.instances)
def __getitem__(self, index):
id = self.instances[index]
ins = self.f_data[id]
# Get the points index used to sample points
input_points = ins['camcs_per_point'][:]
input_points_num = input_points.shape[0]
perm = np.random.permutation(input_points_num)[:self.num_points]
if self.num_points > input_points_num:
additional = np.random.choice(input_points_num, self.num_points - input_points_num, replace=True)
perm = np.concatenate((perm, additional))
assert perm.shape[0] == self.num_points, f'{perm.shape[0]}, {self.num_points}, {input_points_num}'
# Get the camcs_per_point
camcs_per_point = torch.tensor(input_points, dtype=torch.float32)[perm]
# Get all other items
gt_dict = {}
for k, v in ins.items():
if k == "camcs_per_point":
continue
elif "per_point" in k:
gt_dict[k] = torch.tensor(v[:], dtype=torch.float32)[perm]
else:
gt_dict[k] = torch.tensor(v[:], dtype=torch.float32)
return camcs_per_point, gt_dict, id
```
#### File: 3dlg-hcvc/ANCSH-pytorch/evaluate.py
```python
import logging
from time import time
import hydra
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from ANCSH_lib import ANCSHEvaluator, utils
from tools.utils import io
log = logging.getLogger('evaluate')
@hydra.main(config_path="configs", config_name="evaluate")
def main(cfg: DictConfig):
OmegaConf.update(cfg, "paths.result_dir", io.to_abs_path(cfg.paths.result_dir, get_original_cwd()))
combined_result_path = cfg.optimization_result_path
num_parts = cfg.num_parts
log.info(f'Instances in dataset have {num_parts} parts')
evaluator = ANCSHEvaluator(cfg, combined_result_path, num_parts=num_parts)
evaluator.process_ANCSH(do_eval=True)
if __name__ == "__main__":
start = time()
main()
stop = time()
print(str(stop - start) + " seconds")
```
#### File: ANCSH-pytorch/preprocess/proc_stage1.py
```python
import os
import h5py
import yaml
import logging
import numpy as np
from PIL import Image
from scipy.spatial.transform import Rotation as R
from progress.bar import Bar
from multiprocessing import Pool, cpu_count
from omegaconf import OmegaConf
from tools.utils import io
# from tools.visualization import Viewer
from utils import DataLoader, URDFReader, DatasetName
log = logging.getLogger('proc_stage1')
class ProcStage1Impl:
def __init__(self, cfg):
self.output_path = cfg.output_path
self.tmp_dir = cfg.tmp_dir
self.render_cfg = cfg.render_cfg
self.rest_state_data_filename = cfg.rest_state_data_filename
self.width = self.render_cfg.width
self.height = self.render_cfg.height
self.dataset_name = cfg.dataset_name
def get_metadata(self, metadata_path, frame_index, num_parts):
metadata = {}
if DatasetName[self.dataset_name] == DatasetName.SAPIEN or \
DatasetName[self.dataset_name] == DatasetName.SHAPE2MOTION:
with open(metadata_path, "r") as meta_file:
metadata_all = yaml.load(meta_file, Loader=yaml.Loader)
frame_metadata = metadata_all[f'frame_{frame_index}']
metadata = {
'projMat': np.reshape(frame_metadata['projMat'], (4, 4), order='F'),
'viewMat': np.reshape(frame_metadata['viewMat'], (4, 4), order='F'),
'linkAbsPoses': []
}
num_links = len(frame_metadata['obj'])
if num_links < num_parts:
metadata['linkAbsPoses'].append(np.eye(4))
for link_idx in range(num_links):
position = frame_metadata['obj'][link_idx][4]
# x,y,z,w
quaternion = frame_metadata['obj'][link_idx][5]
orientation = R.from_quat(quaternion).as_matrix()
pose = np.eye(4)
pose[:3, :3] = orientation
pose[:3, 3] = position
metadata['linkAbsPoses'].append(pose)
return metadata
def __call__(self, idx, input_data):
output_filepath = os.path.splitext(self.output_path)[0] + f'_{idx}' + os.path.splitext(self.output_path)[-1]
h5file = h5py.File(output_filepath, 'w')
bar = Bar(f'Stage1 Processing chunk {idx}', max=len(input_data))
for index, input_each in input_data.iterrows():
depth_frame_path = os.path.join(self.render_cfg.render_dir, input_each['objectCat'],
input_each['objectId'], input_each['articulationId'],
self.render_cfg.depth_folder, input_each['depthFrame'])
mask_frame_path = os.path.join(self.render_cfg.render_dir, input_each['objectCat'],
input_each['objectId'], input_each['articulationId'],
self.render_cfg.mask_folder, input_each['maskFrame'])
metadata_path = os.path.join(self.render_cfg.render_dir, input_each['objectCat'],
input_each['objectId'], input_each['articulationId'],
input_each['metadata'])
tmp_data_dir = os.path.join(self.tmp_dir, input_each['objectCat'], input_each['objectId'])
rest_state_data_path = os.path.join(tmp_data_dir, self.rest_state_data_filename)
frame_index = int(input_each['depthFrame'].split(self.render_cfg.depth_ext)[0])
# float32 depth buffer, range from 0 to 1
depth_data = np.array(h5py.File(depth_frame_path, "r")["data"]).flatten()
# uint8 mask, invalid value is 255
mask_frame = np.asarray(Image.open(mask_frame_path))
rest_data_data = io.read_json(rest_state_data_path)
num_parts = len([link for link in rest_data_data['links'] if link if not link['virtual']])
assert depth_data.size == mask_frame.size
metadata = self.get_metadata(metadata_path, frame_index, num_parts)
x_range = np.linspace(-1, 1, self.width)
y_range = np.linspace(1, -1, self.height)
x, y = np.meshgrid(x_range, y_range)
x = x.flatten()
y = y.flatten()
z = 2.0 * depth_data - 1.0
# shape nx4
points_tmp = np.column_stack((x, y, z, np.ones(self.height * self.width)))
mask_tmp = mask_frame.flatten()
# points in clip space
points_clip = points_tmp[mask_tmp < 255]
link_mask = mask_tmp[mask_tmp < 255]
# check if unique value in mask match num parts
assert points_clip.shape[0] == link_mask.shape[0]
proj_mat = metadata['projMat']
view_mat = metadata['viewMat']
# transform points from clip space to camera space
# shape 4xn
points_camera = np.dot(np.linalg.inv(proj_mat), points_clip.transpose())
# homogeneous normalization
points_camera = points_camera / points_camera[-1, :]
# shape 4xn
points_world = np.dot(np.linalg.inv(view_mat), points_camera)
# transform links to rest state
points_rest_state = np.empty_like(points_world)
parts_camera2rest_state = []
for link_idx, link in enumerate(rest_data_data['links']):
if link['virtual']:
continue
link_points_world = points_world[:, link_mask == link_idx]
# virtual link link_index is -1
current_part_pose = metadata['linkAbsPoses'][link['part_index']]
rest_state_pose = np.reshape(link['abs_pose'], (4, 4), order='F')
transform2rest_state = np.dot(rest_state_pose, np.linalg.inv(current_part_pose))
link_points_rest_state = np.dot(transform2rest_state, link_points_world)
points_rest_state[:, link_mask == link_idx] = link_points_rest_state
# points in camera space to rest state
camera2rest_state = np.dot(transform2rest_state, np.linalg.inv(view_mat))
# shape num parts x 16
parts_camera2rest_state.append(camera2rest_state.flatten('F'))
parts_camera2rest_state = np.asarray(parts_camera2rest_state)
# shape nx3
points_camera_p3 = points_camera.transpose()[:, :3]
points_world_p3 = points_world.transpose()[:, :3]
points_rest_state_p3 = points_rest_state.transpose()[:, :3]
camera2base_matrix = np.linalg.inv(view_mat).flatten('F')
instance_name = f'{input_each["objectCat"]}_{input_each["objectId"]}_{input_each["articulationId"]}_{str(frame_index)}'
h5frame = h5file.require_group(instance_name)
h5frame.create_dataset("mask", shape=link_mask.shape, data=link_mask, compression="gzip")
h5frame.create_dataset("points_camera", shape=points_camera_p3.shape, data=points_camera_p3,
compression="gzip")
h5frame.create_dataset("points_rest_state", shape=points_rest_state_p3.shape, data=points_rest_state_p3,
compression="gzip")
h5frame.create_dataset("parts_transformation", shape=parts_camera2rest_state.shape,
data=parts_camera2rest_state, compression="gzip")
h5frame.create_dataset("base_transformation", shape=camera2base_matrix.shape,
data=camera2base_matrix, compression="gzip")
bar.next()
bar.finish()
h5file.close()
return output_filepath
class ProcStage1:
def __init__(self, cfg):
self.cfg = cfg
self.data_loader = DataLoader(cfg)
self.data_loader.parse_input()
self.input_cfg = self.cfg.paths.preprocess.stage1.input
self.tmp_output = self.cfg.paths.preprocess.stage1.tmp_output
self.output_cfg = self.cfg.paths.preprocess.stage1.output
self.height = self.cfg.dataset.param.height
self.width = self.cfg.dataset.param.width
self.debug = self.cfg.debug
def preprocess_motion_data(self, motion_data_df):
bar = Bar('Stage1 Parse Motion Data', max=len(motion_data_df))
for index, motion_data in motion_data_df.iterrows():
motion_file_path = os.path.join(self.data_loader.motion_dir, motion_data['objectCat'],
motion_data['objectId'], motion_data['motion'])
assert io.file_exist(motion_file_path), f'Can not found Motion file {motion_file_path}!'
if DatasetName[self.cfg.dataset.name] == DatasetName.SAPIEN or \
DatasetName[self.cfg.dataset.name] == DatasetName.SHAPE2MOTION:
urdf_reader = URDFReader(motion_file_path)
tmp_data_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name,
motion_data['objectCat'], motion_data['objectId'])
urdf_reader.export(
result_data_path=tmp_data_dir,
rest_state_data_filename=self.tmp_output.rest_state_data,
rest_state_mesh_filename=self.tmp_output.rest_state_mesh
)
bar.next()
bar.finish()
def process(self):
input_data = self.data_loader.data_info
io.ensure_dir_exists(self.cfg.paths.preprocess.tmp_dir)
input_data.to_csv(os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.input_files))
motion_data_df = input_data.drop_duplicates(subset=['objectCat', 'objectId', 'motion'])
self.preprocess_motion_data(motion_data_df)
io.ensure_dir_exists(self.cfg.paths.preprocess.output_dir)
num_processes = min(cpu_count(), self.cfg.num_workers)
# calculate the chunk size
chunk_size = max(1, int(input_data.shape[0] / num_processes))
chunks = [input_data.iloc[input_data.index[i:i + chunk_size]] for i in
range(0, input_data.shape[0], chunk_size)]
log.info(f'Stage1 Processing Start with {num_processes} workers and {len(chunks)} chunks')
config = OmegaConf.create()
config.output_path = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name,
self.output_cfg.pcd_data)
config.tmp_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name)
render_cfg = OmegaConf.create()
render_cfg.width = self.width
render_cfg.height = self.height
render_cfg.render_dir = self.data_loader.render_dir
render_cfg.depth_ext = self.input_cfg.render.depth_ext
render_cfg.mask_ext = self.input_cfg.render.mask_ext
render_cfg.depth_folder = self.input_cfg.render.depth_folder
render_cfg.mask_folder = self.input_cfg.render.mask_folder
config.render_cfg = render_cfg
config.rest_state_data_filename = self.tmp_output.rest_state_data
config.dataset_name = self.cfg.dataset.name
with Pool(processes=num_processes) as pool:
proc_impl = ProcStage1Impl(config)
output_filepath_list = pool.starmap(proc_impl, enumerate(chunks))
output_file_path = os.path.join(self.cfg.paths.preprocess.output_dir, self.output_cfg.pcd_data)
h5file = h5py.File(output_file_path, 'w')
for filepath in output_filepath_list:
with h5py.File(filepath, 'r') as h5f:
for key in h5f.keys():
h5f.copy(key, h5file)
h5file.close()
# if self.debug:
# tmp_dir = os.path.join(self.cfg.paths.preprocess.tmp_dir, self.tmp_output.folder_name)
# with h5py.File(output_file_path, 'r') as h5file:
# bar = Bar('Stage1 Visualization', max=len(h5file.keys()))
# for key in h5file.keys():
# h5group = h5file[key]
# folder_names = key.split('_')
# viz_output_dir = os.path.join(tmp_dir, folder_names[0], folder_names[1], folder_names[2])
# viz_output_filename = key
# viz_output_path = os.path.join(viz_output_dir, viz_output_filename)
# viewer = Viewer(h5group['points_camera'][:], mask=h5group['mask'][:])
# if self.cfg.show:
# viewer.show(window_name=viz_output_filename + '_points_camera')
# else:
# viewer.render(fig_path=viz_output_path + '_points_camera.jpg')
# if self.cfg.export:
# viewer.export(mesh_path=viz_output_path + '_points_camera.ply')
# viewer.reset()
# viewer.add_geometry(h5group['points_rest_state'][:], mask=h5group['mask'][:])
# if self.cfg.show:
# viewer.show(window_name=viz_output_filename + '_points_rest_state')
# else:
# viewer.render(fig_path=viz_output_path + '_points_rest_state.jpg')
# if self.cfg.export:
# viewer.export(mesh_path=viz_output_path + '_points_rest_state.ply')
# del viewer
# bar.next()
# bar.finish()
```
#### File: tools/visualization/ancsh_visualizer.py
```python
import os
import logging
import h5py
import numpy as np
import trimesh.base
from matplotlib import cm
from progress.bar import Bar
import hydra
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from tools.utils import io
from tools.visualization.viewer import Viewer
from ANCSH_lib.utils import NetworkType, get_prediction_vertices
log = logging.getLogger('ANCSH visualizer')
class ANCSHVisualizer:
def __init__(self, data: h5py.File, network_type, gt=False, sampling=1):
self.data = data
self.gt = gt
self.prefix = 'gt_' if gt else 'pred_'
self.export_dir = None
self.items = []
self.show_flag = False
self.render_flag = False
self.export_flag = False
self.network_type = network_type
self.sampling = sampling
self.arrow_sampling = 4
self.width = 1024
self.height = 768
self.point_size = 10
self.fig_ext = '.jpg'
self.mesh_ext = '.ply'
self.draw_offset_arrows = True
self.additional_mesh = None
def parse_items(self):
visit_groups = lambda name, node: self.items.append(name) if isinstance(node, h5py.Group) else None
self.data.visititems(visit_groups)
def add_trimesh(self, mesh: trimesh.base.Trimesh):
self.additional_mesh = mesh
def render_options(self, viewer, name, suffix=''):
if self.additional_mesh is not None:
viewer.add_trimesh(self.additional_mesh)
filename = name + suffix + '_' + self.network_type.value
folder_names = name.split('_')
viz_output_dir = os.path.join(self.export_dir, folder_names[0], folder_names[1], folder_names[2])
viewer.point_size = self.point_size
if self.show_flag:
viewer.show(window_size=[self.width, self.height], window_name=name)
if self.render_flag and self.export_dir:
viewer.render(fig_path=os.path.join(viz_output_dir, filename + self.fig_ext),
fig_size=[self.width, self.height])
if self.export_flag and self.export_dir:
viewer.export(mesh_path=os.path.join(viz_output_dir, filename + self.mesh_ext))
def viz_segmentation(self, data_group, data_name):
suffix = '_segmentation'
segmentations = data_group[f'{self.prefix}seg_per_point'][:][::self.sampling]
points_camera = data_group['camcs_per_point'][:][::self.sampling]
viewer = Viewer(points_camera, mask=segmentations)
self.render_options(viewer, data_name, suffix)
del viewer
def viz_npcs(self, data_group, data_name):
suffix = '_npcs'
segmentations = data_group[f'{self.prefix}seg_per_point'][:][::self.sampling]
npcs_points = data_group[f'{self.prefix}npcs_per_point'][:][::self.sampling]
viewer = Viewer(npcs_points, mask=segmentations)
self.render_options(viewer, data_name, suffix)
del viewer
def viz_naocs(self, data_group, data_name):
suffix = '_naocs'
segmentations = data_group[f'{self.prefix}seg_per_point'][:][::self.sampling]
naocs_points = data_group[f'{self.prefix}naocs_per_point'][:][::self.sampling]
viewer = Viewer(naocs_points, mask=segmentations)
self.render_options(viewer, data_name, suffix)
del viewer
def viz_joint_association(self, data_group, data_name):
suffix = '_joint_association'
joint_associations = data_group[f'{self.prefix}joint_cls_per_point'][:][::self.sampling]
joint_axes = data_group[f'{self.prefix}axis_per_point'][:][::self.sampling]
naocs_points = data_group[f'{self.prefix}naocs_per_point'][:][::self.sampling]
colors = Viewer.colors_from_mask(joint_associations, empty_first=True)
viewer = Viewer(naocs_points, colors=colors)
arrow_sample_indices = joint_associations != 0
viewer.add_arrows(naocs_points[arrow_sample_indices][::self.arrow_sampling],
joint_axes[arrow_sample_indices][::self.arrow_sampling],
color=[0, 0, 0, 0.6], radius=0.002, length=0.04)
self.render_options(viewer, data_name, suffix)
del viewer
def viz_point2joint_offset(self, data_group, data_name):
suffix = '_point2joint_offset'
joint_associations = data_group[f'{self.prefix}joint_cls_per_point'][:][::self.sampling]
point_heatmaps = data_group[f'{self.prefix}heatmap_per_point'][:][::self.sampling]
unit_vectors = data_group[f'{self.prefix}unitvec_per_point'][:][::self.sampling]
naocs_points = data_group[f'{self.prefix}naocs_per_point'][:][::self.sampling]
invalid_heatmap_mask = joint_associations == 0
max_val = np.amax(point_heatmaps)
cmap = cm.get_cmap('jet')
colors = cmap(point_heatmaps / max_val)
colors[invalid_heatmap_mask] = np.array([0.5, 0.5, 0.5, 0.5])
viewer = Viewer(naocs_points, colors=colors)
arrow_sample_indices = ~invalid_heatmap_mask
arrow_length = (1 - point_heatmaps) * 0.2 + 10e-8
if self.draw_offset_arrows:
viewer.add_trimesh_arrows(naocs_points[arrow_sample_indices][::self.arrow_sampling],
unit_vectors[arrow_sample_indices][::self.arrow_sampling],
color=[0, 0, 0, 0.6], radius=0.002,
length=arrow_length[arrow_sample_indices][::self.arrow_sampling])
else:
viewer.add_arrows(naocs_points[arrow_sample_indices][::self.arrow_sampling],
unit_vectors[arrow_sample_indices][::self.arrow_sampling],
color=[0, 0, 0, 0.6], radius=0.002, length=0.04)
self.render_options(viewer, data_name, suffix)
self.render_options(viewer, data_name, suffix)
del viewer
def render(self, show=False, export=None, export_mesh=False):
self.show_flag = show
if self.show_flag:
self.render_flag = False
else:
self.render_flag = True
self.export_flag = export_mesh
self.export_dir = export
self.parse_items()
bar = Bar(f'Rendering {len(self.items)} instances', max=len(self.items))
for i, item_name in enumerate(self.items):
data_group = self.data[item_name]
log.debug(f'Render {item_name}')
log.debug(data_group.keys())
log.debug(data_group.attrs)
self.viz_segmentation(data_group, item_name)
self.viz_npcs(data_group, item_name)
if self.network_type == NetworkType.ANCSH:
self.viz_naocs(data_group, item_name)
self.viz_joint_association(data_group, item_name)
self.viz_point2joint_offset(data_group, item_name)
bar.next()
bar.finish()
class OptimizerVisualizer:
def __init__(self, data: h5py.File):
self.data = data
self.export_dir = None
self.items = []
self.show_flag = False
self.render_flag = False
self.export_flag = False
self.width = 1024
self.height = 768
self.fig_ext = '.jpg'
self.mesh_ext = '.ply'
def parse_items(self):
visit_groups = lambda name, node: self.items.append(name) if isinstance(node, h5py.Group) else None
self.data.visititems(visit_groups)
def render_options(self, viewer, name, suffix):
filename = name + suffix + '_optimization'
folder_names = name.split('_')
viz_output_dir = os.path.join(self.export_dir, folder_names[0], folder_names[1], folder_names[2])
if self.show_flag:
viewer.show(window_size=[self.width, self.height], window_name=name)
if self.render_flag and self.export_dir:
viewer.render(fig_path=os.path.join(viz_output_dir, filename + self.fig_ext),
fig_size=[self.width, self.height])
if self.export_flag and self.export_dir:
viewer.export(mesh_path=os.path.join(viz_output_dir, filename + self.mesh_ext))
def viz_npcs2cam(self, data_group, data_name):
segmentations = data_group['pred_seg_per_point'][:]
npcs_points = data_group['pred_npcs_per_point'][:]
npcs2cam_rt = data_group['pred_npcs2cam_rt'][:]
npcs2cam_scale = data_group['pred_npcs2cam_scale'][:]
camera_points = data_group['camcs_per_point'][:]
npcs2cam_points = np.empty_like(npcs_points)
for k in range(npcs2cam_rt.shape[0]):
rt = npcs2cam_rt[k].reshape((4, 4), order='F')
scale = npcs2cam_scale[k]
npcs2cam_points_part = npcs_points[segmentations == k] * scale
npcs2cam_points_part_p4 = np.hstack((npcs2cam_points_part, np.ones((npcs2cam_points_part.shape[0], 1))))
npcs2cam_points_part = np.dot(npcs2cam_points_part_p4, rt.T)[:, :3]
npcs2cam_points[segmentations == k] = npcs2cam_points_part
distance = np.linalg.norm(npcs2cam_points - camera_points, axis=1)
max_val = np.amax(distance)
cmap = cm.get_cmap('jet')
colors = cmap(distance / max_val)
viewer = Viewer(npcs2cam_points, mask=segmentations)
self.render_options(viewer, data_name, '_npcs2cam')
viewer.reset()
viewer.add_geometry(npcs2cam_points, colors=colors)
self.render_options(viewer, data_name, '_difference')
del viewer
def render(self, show=False, export=None, export_mesh=False):
self.show_flag = show
if self.show_flag:
self.render_flag = False
else:
self.render_flag = True
self.export_flag = export_mesh
self.export_dir = export
self.parse_items()
bar = Bar(f'Rendering {len(self.items)} instances', max=len(self.items))
for i, item_name in enumerate(self.items):
data_group = self.data[item_name]
log.debug(f'Render {item_name}')
log.debug(data_group.keys())
log.debug(data_group.attrs)
self.viz_npcs2cam(data_group, item_name)
bar.next()
bar.finish()
@hydra.main(config_path="../../configs", config_name="preprocess")
def main(cfg: DictConfig):
prediction = True
ancsh_path = '/home/sam/Development/Research/ancsh/ANCSH-pytorch/results/network/test/ANCSH_2022-01-07_00-33-40/ANCSH_inference_result.h5'
npcs_path = '/home/sam/Development/Research/ancsh/ANCSH-pytorch/results/network/test/NPCS_2022-01-07_00-34-24/NPCS_inference_result.h5'
optimize_path = '/home/sam/Development/Research/ancsh/ANCSH-pytorch/results/optimization_result.h5'
if prediction:
export_dir = '/home/sam/Development/Research/ancsh/ANCSH-pytorch/results/visualization/pred'
else:
export_dir = '/home/sam/Development/Research/ancsh/ANCSH-pytorch/results/visualization/gt'
ancsh_input_h5 = h5py.File(ancsh_path, 'r')
npcs_input_h5 = h5py.File(npcs_path, 'r')
optimize_input_h5 = h5py.File(optimize_path, 'r')
visualizer = OptimizerVisualizer(optimize_input_h5)
visualizer.render(show=False, export=export_dir)
# visualizer = ANCSHVisualizer(npcs_input_h5, NetworkType.NPCS, gt=not prediction)
# visualizer.render(show=True, export=export_dir)
# visualizer = ANCSHVisualizer(ancsh_input_h5, NetworkType.ANCSH, gt=not prediction)
# visualizer.render(show=True, export=export_dir)
if __name__ == "__main__":
main()
```
#### File: 3dlg-hcvc/ANCSH-pytorch/train.py
```python
import os
import h5py
import logging
import torch
from time import time
import hydra
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from ANCSH_lib import ANCSHTrainer, utils
from ANCSH_lib.utils import NetworkType
from tools.utils import io
log = logging.getLogger('train')
@hydra.main(config_path="configs", config_name="network")
def main(cfg: DictConfig):
OmegaConf.update(cfg, "paths.result_dir", io.to_abs_path(cfg.paths.result_dir, get_original_cwd()))
train_path = cfg.train.input_data if io.file_exist(cfg.train.input_data) else cfg.paths.preprocess.output.train
test_path = cfg.paths.preprocess.output.val if cfg.test.split == 'val' else cfg.paths.preprocess.output.test
test_path = cfg.test.input_data if io.file_exist(cfg.test.input_data) else test_path
data_path = {"train": train_path, "test": test_path}
num_parts = utils.get_num_parts(train_path)
# test_num_parts = utils.get_num_parts(test_path)
# assert num_parts == test_num_parts
log.info(f'Instances in dataset have {num_parts} parts')
network_type = NetworkType[cfg.network.network_type]
utils.set_random_seed(cfg.random_seed)
torch.set_deterministic(True)
torch.backends.cudnn.deterministic = True
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
trainer = ANCSHTrainer(
cfg=cfg,
data_path=data_path,
network_type=network_type,
num_parts=num_parts,
)
if not cfg.eval_only:
log.info(f'Train on {train_path}, validate on {test_path}')
if not cfg.train.continuous:
trainer.train()
else:
trainer.resume_train(cfg.train.input_model)
trainer.test()
else:
log.info(f'Test on {test_path} with inference model {cfg.test.inference_model}')
trainer.test(inference_model=cfg.test.inference_model)
if __name__ == "__main__":
start = time()
main()
stop = time()
duration_time = utils.duration_in_hours(stop - start)
log.info(f'Total time duration: {duration_time}')
``` |
{
"source": "3dlg-hcvc/multion-challenge",
"score": 3
} |
#### File: 3dlg-hcvc/multion-challenge/evaluate.py
```python
import sys
sys.path.insert(0, "")
import argparse
import os
import random
import numpy as np
import habitat
from habitat.core.challenge import Challenge
class RandomWalker(habitat.Agent):
def __init__(self):
self._POSSIBLE_ACTIONS = np.array([0,1,2,3])
def reset(self):
pass
def act(self, observations, not_done_masks):
return [np.random.choice(self._POSSIBLE_ACTIONS) for i in range(len(observations))]
def main():
agent = RandomWalker()
challenge = Challenge()
challenge.submit(agent)
if __name__ == "__main__":
main()
``` |
{
"source": "3dlg-hcvc/plan2scene-mask-extraction",
"score": 2
} |
#### File: code/src/module_loader.py
```python
import sys
def load_plane_predictor(planar_reconstruction_path):
sys.path.append(planar_reconstruction_path)
from surface_mask_extraction.plane_prediction import PlanePredictor
module = PlanePredictor
sys.path.remove(planar_reconstruction_path)
# Unload modules that conflict with semantic-segmentation-pytorch
del sys.modules["models"]
del sys.modules["utils"]
return module
def load_surface_segmenter(semantic_segmentation_project_path):
sys.path.append(semantic_segmentation_project_path)
from surface_mask_extraction.surface_segmentation import SurfaceSegmenter
module = SurfaceSegmenter
# Unload modules that conflict with PlanarReconstruction
del sys.modules["models"]
del sys.modules["utils"]
return module
``` |
{
"source": "3dlg-hcvc/r2v-to-plan2scene",
"score": 3
} |
#### File: code/src/config_parser.py
```python
import json
import os.path as osp
import logging
class Config:
"""
Configuration item.
"""
def __init__(self, config_dict: dict):
"""
Initialize configuration item using a dictionary.
:param d:
"""
self.config_dict = config_dict
for k, v in config_dict.items():
if isinstance(v, dict):
v = Config(v)
self.__dict__[k] = v
def as_dict(self):
return self.config_dict
def __getitem__(self, key):
return self.__dict__[key]
def parse_config(config_path: str):
"""
Parses a json config file into a Config object.
:param config_path: Path to the json config file.
"""
if not osp.exists(config_path):
logging.warning(f"Config file not found: {config_path}")
return None
with open(config_path, "r") as f:
config_dict = json.loads(f.read())
if isinstance(config_dict, dict):
return Config(config_dict)
else:
return config_dict
```
#### File: src/r2vstk/json_util.py
```python
import json
import logging
import copy
from r2vstk.config_manager import ConfigManager
from r2vstk.floorplan import Wall
from r2vstk.id_gen import generate_wall_id
import math
log = logging.getLogger(__name__)
def generate_wall_json(conf: ConfigManager, wall: Wall, room_id: str, multiplication_factor: float, should_swap_wall_endpoints: bool,
is_short_walled: bool = False) -> dict:
"""
Generate a json describing a wall.
:param conf: ConfigManager
:param wall: Described wall
:param room_id: Room id
:param room_wall_ids: Dictionary mapping from room_id -> wall -> wall_id
:param multiplication_factor: Scale factor to real world units
:param should_swap_wall_endpoints: Should the endpoints of the wall swap.
:param is_short_walled: Specify true to make the wall short.
:return: Wall json
"""
hole_jsons = []
old_wall_width = math.sqrt((wall.p2.pos[1] - wall.p1.pos[1]) ** 2 + (wall.p2.pos[0] - wall.p1.pos[0]) ** 2)
new_wall_width = math.sqrt(((wall.p2.pos[1] * multiplication_factor) - (wall.p1.pos[1] * multiplication_factor)) ** 2 +
((wall.p2.pos[0] * multiplication_factor) - (wall.p1.pos[0] * multiplication_factor)) ** 2)
wall_p1 = wall.p1
wall_p2 = wall.p2
if should_swap_wall_endpoints:
wall_p1, wall_p2 = wall_p2, wall_p1
for hole in wall.holes:
hole_minx = hole.min_x
hole_maxx = hole.max_x
if should_swap_wall_endpoints:
hole_minx, hole_maxx = old_wall_width - hole_maxx, old_wall_width - hole_minx
hole_type = None
# Load defaults since we do not have a model
min_y = conf.arch_defaults.door_min_y
max_y = conf.arch_defaults.door_max_y
if hole.type == "door":
hole_type = "Door"
min_y = conf.arch_defaults.door_min_y
max_y = conf.arch_defaults.door_max_y
elif hole.type == "window":
hole_type = "Window"
min_y = conf.arch_defaults.window_min_y
max_y = conf.arch_defaults.window_max_y
hole_json = {
"id": hole.id, # Node id of object creating hole in the wall
"type": hole_type, # hole type (`Door` or `Window`)
"box": { # cutout of hole as box on the wall
"min": [(hole_minx / old_wall_width * new_wall_width), min_y], # minimum point
# x is distance from points[0] (toward points[1])
# y is height from wall bottom (goes from 0 to wall height)
"max": [(hole_maxx / old_wall_width * new_wall_width), max_y] # maximum point
}
}
hole_jsons.append(hole_json)
wall_json = {
"roomId": [room_id],
"id": generate_wall_id(room_id, wall),
"type": "Wall",
"points": [[(wall_p1.pos[0] * multiplication_factor), 0.0, (wall_p1.pos[1] * multiplication_factor)],
[(wall_p2.pos[0] * multiplication_factor), 0.0, (wall_p2.pos[1] * multiplication_factor)]],
"holes": hole_jsons,
"height": conf.arch_defaults.wall_height if not is_short_walled else conf.arch_defaults.short_wall_height,
"materials": copy.deepcopy(conf.arch_defaults.wall_materials),
"depth": conf.arch_defaults.wall_depth,
"extra_height": conf.arch_defaults.wall_extra_height
}
return wall_json
def generate_ceiling_json(conf: ConfigManager, polyline, room_id: str, multiplication_factor: float) -> dict:
"""
Generate a json describing a ceiling.
:param conf: ConfigManager
:param polyline: Outline of the room
:param room_id: id of the room
:param multiplication_factor: Scale factor to real-world
:return: Json description of the ceiling
"""
r = {
"id": room_id + "_c",
"roomId": room_id,
"points": [[[(p[0] * multiplication_factor), 0.0, (p[1] * multiplication_factor)] for p in polyline]],
"type": "Ceiling",
"materials": conf.arch_defaults.ceiling_materials[:],
"offset": [0.0, conf.arch_defaults.wall_height, 0.0],
"depth": conf.arch_defaults.ceiling_depth
}
return r
def generate_floor_json(conf: ConfigManager, polyline, room_id:str, multiplication_factor:float):
"""
Generate a json describing the floor surface
:param conf: ConfigManager
:param polyline: Outline of the room
:param room_id: id of the room
:param multiplication_factor: Scale factor to real-world
:return: Json description of the floor.
"""
r = {
"id": room_id + "_f",
"roomId": room_id,
"points": [[[(p[0] * multiplication_factor), 0.0, (p[1] * multiplication_factor)] for p in polyline]],
"type": "Floor",
"materials": conf.arch_defaults.floor_materials[:],
"depth": conf.arch_defaults.floor_depth
}
return r
``` |
{
"source": "3dlg-hcvc/tricolo",
"score": 2
} |
#### File: 3dlg-hcvc/tricolo/run_retrieval_val.py
```python
import os
import sys
import yaml
import json
import random
import argparse
import numpy as np
import torch
from tricolo.trainers.SimCLR import SimCLR
from tricolo.dataloader.dataloader import ClrDataLoader
parser = argparse.ArgumentParser()
parser.add_argument("--exp", type=str, default="None", help="Exp to evaluate")
parser.add_argument("--split", type=str, help="Dataset split to evaluate on (valid or test)")
parser.add_argument('--clip', action='store_true', help='Use pretrained CLIP to evaluate')
args = parser.parse_args()
def main(load_dir):
if not args.clip:
with open(load_dir + '/checkpoints/config.json', 'r') as f:
config = json.load(f)
config['train'] = False
config['log_dir'] = load_dir
else:
"Dummy config file"
config = yaml.load(open('./tricolo/configs/clip.yaml', "r"), Loader=yaml.FullLoader)
config['train'] = False
config['log_dir'] = './logs/retrieval/clip'
dataset = ClrDataLoader(config['dset'], config['batch_size'], config['sparse_model'], **config['dataset'])
simclr = SimCLR(dataset, config)
pr_at_k = simclr.test(config['log_dir'], clip=args.clip, eval_loader=args.split)
precision = pr_at_k['precision']
recall = pr_at_k['recall']
recall_rate = pr_at_k['recall_rate']
ndcg = pr_at_k['ndcg']
# r_rank = pr_at_k['r_rank']
rr_1 = recall_rate[0]
rr_5 = recall_rate[4]
ndcg_5 = ndcg[4]
return rr_1, rr_5, ndcg_5
if __name__ == "__main__":
torch.multiprocessing.set_sharing_strategy('file_system')
path = './logs/retrieval/' + args.exp
load_dirs = [path]
rr_1 = []
rr_5 = []
ndcg_5 = []
print(load_dirs)
for load_dir in load_dirs:
_rr_1, _rr_5, _ndcg_5 = main(load_dir)
torch.cuda.empty_cache()
rr_1.append(_rr_1)
rr_5.append(_rr_5)
ndcg_5.append(_ndcg_5)
# Report back numbers as percentages
rr_1 = np.array(rr_1) * 100
rr_5 = np.array(rr_5) * 100
ndcg_5 = np.array(ndcg_5) * 100
print(np.mean(rr_1), np.mean(rr_5), np.mean(ndcg_5))
```
#### File: tricolo/dataloader/dataloader.py
```python
import torch
from torch.utils.data import DataLoader
from torch.utils.data._utils.collate import default_collate
from tricolo.dataloader.dataset import ClrDataset, ClrDatasetPrimitives
def collate_fn(batch):
default_collate_items = ['model_id', 'category', 'text', 'tokens', 'images']
locs = []
feats = []
data = []
for i, item in enumerate(batch):
_locs = batch[i]['voxels']['locs']
locs.append(torch.cat([_locs, torch.LongTensor(_locs.shape[0],1).fill_(i)],1))
feats.append(batch[i]['voxels']['feats'])
data.append({k:item[k] for k in default_collate_items})
locs = torch.cat(locs)
feats = torch.cat(feats)
data = default_collate(data)
data['voxels'] = {'locs': locs, 'feats': feats}
return data
class ClrDataLoader(object):
def __init__(self, dset, batch_size, sparse_model, num_workers, train_json_file, val_json_file, test_json_file, image_size, voxel_size, root_npz_file='./datasets/all_npz/'):
self.batch_size = batch_size
self.num_workers = num_workers
self.train_json_file = train_json_file
self.val_json_file = val_json_file
self.test_json_file = test_json_file
self.image_size = image_size
self.voxel_size = voxel_size
self.sparse_model = sparse_model
self.root_npz_file = root_npz_file
self.dset = dset
def get_data_loaders(self):
if self.dset == 'shapenet':
print('Using Shapenet Dataset')
train_dataset = ClrDataset(json_file=self.train_json_file, sparse_model=self.sparse_model, image_size=self.image_size, voxel_size=self.voxel_size, root_npz_file=self.root_npz_file)
valid_dataset = ClrDataset(json_file=self.val_json_file, sparse_model=self.sparse_model, image_size=self.image_size, voxel_size=self.voxel_size, root_npz_file=self.root_npz_file)
test_dataset = ClrDataset(json_file=self.test_json_file, sparse_model=self.sparse_model, image_size=self.image_size, voxel_size=self.voxel_size, root_npz_file=self.root_npz_file)
elif self.dset == 'primitives':
print('Using Primitives Dataset')
train_dataset = ClrDatasetPrimitives(json_file=self.train_json_file, voxel_root_dir=self.root_npz_file)
valid_dataset = ClrDatasetPrimitives(json_file=self.val_json_file, voxel_root_dir=self.root_npz_file)
test_dataset = ClrDatasetPrimitives(json_file=self.test_json_file, voxel_root_dir=self.root_npz_file)
else:
raise('Implement Other Dataset')
if self.sparse_model:
train_loader = DataLoader(train_dataset, collate_fn=collate_fn, batch_size=self.batch_size, num_workers=self.num_workers, drop_last=True, shuffle=True)
valid_loader = DataLoader(valid_dataset, collate_fn=collate_fn, batch_size=self.batch_size, num_workers=self.num_workers, drop_last=True, shuffle=True)
test_loader = DataLoader(test_dataset, collate_fn=collate_fn, batch_size=self.batch_size, num_workers=self.num_workers, drop_last=False, shuffle=True)
else:
train_loader = DataLoader(train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, drop_last=True, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=self.batch_size, num_workers=self.num_workers, drop_last=True, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=self.batch_size, num_workers=self.num_workers, drop_last=False, shuffle=True)
print('Training file: {}, Size: {}'.format(self.train_json_file, len(train_loader.dataset)))
print('Val file: {}, Size: {}'.format(self.val_json_file, len(valid_loader.dataset)))
print('Test file: {}, Size: {}'.format(self.test_json_file, len(test_loader.dataset)))
return train_loader, valid_loader, test_loader
```
#### File: tricolo/models/retrieval_model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from tricolo.models.models import cnn_encoder, cnn_encoder32, cnn_encoder_sparse, SVCNN, MVCNN
class ModelCLR(nn.Module):
def __init__(self, dset, voxel_size, sparse_model, out_dim, use_voxel, tri_modal, num_images, image_cnn, pretraining, vocab_size):
super(ModelCLR, self).__init__()
self.dset = dset
self.ef_dim = 32
self.z_dim = 512
self.out_dim = out_dim
self.cnn_name = image_cnn
self.use_voxel = use_voxel
self.tri_modal = tri_modal
self.voxel_size = voxel_size
self.num_images = num_images
self.pretraining = pretraining
self.sparse_model = sparse_model
self.text_model, self.text_fc = self._get_text_encoder()
self.embedding_layer = nn.Embedding(vocab_size, 256, padding_idx=0)
self.voxel_model, self.voxel_fc, self.image_model, self.image_fc = self._get_res_encoder()
def _get_text_encoder(self):
print("Text feature extractor: BiGRU")
text_model = nn.GRU(input_size=256, hidden_size=128, num_layers=1, bidirectional=True)
text_fc = nn.Linear(256, self.out_dim)
return text_model, text_fc
def _get_res_encoder(self):
voxel_model = None
voxel_fc = None
image_model = None
image_fc = None
if self.dset == 'shapenet':
if self.tri_modal:
print('Training Tri-Modal Model')
if self.sparse_model:
voxel_model = cnn_encoder_sparse(self.voxel_size, self.ef_dim, self.z_dim)
else:
voxel_model = cnn_encoder(self.voxel_size, self.ef_dim, self.z_dim)
voxel_fc = nn.Sequential(nn.Linear(self.z_dim,self.out_dim),nn.ReLU(),nn.Linear(self.out_dim,self.out_dim))
svcnn = SVCNN(self.z_dim, pretraining=self.pretraining, cnn_name=self.cnn_name)
image_model = MVCNN(self.z_dim, svcnn, cnn_name=self.cnn_name, num_views=self.num_images)
image_fc = nn.Sequential(nn.Linear(self.z_dim,self.out_dim),nn.ReLU(),nn.Linear(self.out_dim,self.out_dim))
elif self.use_voxel:
print('Training Bi-Modal Model')
if self.sparse_model:
voxel_model = cnn_encoder_sparse(self.voxel_size, self.ef_dim, self.z_dim)
else:
voxel_model = cnn_encoder(self.voxel_size, self.ef_dim, self.z_dim)
voxel_fc = nn.Sequential(nn.Linear(self.z_dim,self.out_dim),nn.ReLU(),nn.Linear(self.out_dim,self.out_dim))
else:
print('Training Bi-Modal Model')
svcnn = SVCNN(self.z_dim, pretraining=self.pretraining, cnn_name=self.cnn_name)
image_model = MVCNN(self.z_dim, svcnn, cnn_name=self.cnn_name, num_views=self.num_images)
image_fc = nn.Sequential(nn.Linear(self.z_dim,self.out_dim),nn.ReLU(),nn.Linear(self.out_dim,self.out_dim))
elif self.dset == 'primitives':
print('Training Primitives')
if self.tri_modal:
raise('Implement Other Dataset')
elif self.use_voxel:
voxel_model = cnn_encoder32(self.ef_dim, self.z_dim)
voxel_fc = nn.Sequential(nn.Linear(self.z_dim,self.out_dim),nn.ReLU(),nn.Linear(self.out_dim,self.out_dim))
print('Bi-Modal Voxel, Text')
else:
raise('Implement Other Dataset')
else:
raise('Implement Other Dataset')
return voxel_model, voxel_fc, image_model, image_fc
def voxel_encoder(self, xis):
h = self.voxel_model(xis)
h.squeeze()
x = self.voxel_fc(h)
return x
def image_encoder(self, xis):
h = self.image_model(xis)
h.squeeze()
x = self.image_fc(h)
return x
def text_encoder(self, encoded_inputs):
embed_inputs = self.embedding_layer(encoded_inputs)
embed_inputs = torch.transpose(embed_inputs, 0, 1)
N = embed_inputs.shape[1]
h0 = torch.zeros(2, N, 128).cuda()
output, hidden = self.text_model(embed_inputs, h0)
out_emb = torch.tanh(self.text_fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)))
return out_emb
def forward(self, voxels, images, encoded_inputs):
z_voxels = None
z_images = None
if self.tri_modal:
images = images.reshape(-1, images.shape[2], images.shape[3], images.shape[4])
z_voxels = self.voxel_encoder(voxels)
z_images = self.image_encoder(images)
elif self.use_voxel:
z_voxels = self.voxel_encoder(voxels)
else:
images = images.reshape(-1, images.shape[2], images.shape[3], images.shape[4])
z_images = self.image_encoder(images)
zls = self.text_encoder(encoded_inputs)
return z_voxels, z_images, zls
``` |
{
"source": "3dlook-me/cvat",
"score": 2
} |
#### File: tests/rest_api/test_0100_jobs.py
```python
from http import HTTPStatus
from deepdiff import DeepDiff
import pytest
from .utils.config import get_method
def get_job_staff(job, tasks, projects):
job_staff = []
job_staff.append(job['assignee'])
tid = job['task_id']
job_staff.append(tasks[tid]['owner'])
job_staff.append(tasks[tid]['assignee'])
pid = job['project_id']
if pid:
job_staff.append(projects[pid]['owner'])
job_staff.append(projects[pid]['assignee'])
job_staff = set(u['id'] for u in job_staff if u is not None)
return job_staff
def get_org_staff(org_id, memberships):
if org_id in ['', None]:
return set()
else:
return set(m['user']['id'] for m in memberships
if m['role'] in ['maintainer', 'owner'] and m['user'] != None
and m['organization'] == org_id)
def filter_jobs(jobs, tasks, org):
if org is None:
kwargs = {}
jobs = jobs.raw
elif org == '':
kwargs = {'org': ''}
jobs = [job for job in jobs
if tasks[job['task_id']]['organization'] is None]
else:
kwargs = {'org_id': org}
jobs = [job for job in jobs
if tasks[job['task_id']]['organization'] == org]
return jobs, kwargs
def is_org_member(memberships, user, org_id):
if org_id in ['', None]:
return True
else:
return user['id'] in set(m['user']['id'] for m in memberships
if m['user'] != None and m['organization'] == org_id)
class TestGetJobs:
def _test_get_job_200(self, user, jid, data, **kwargs):
response = get_method(user, f'jobs/{jid}', **kwargs)
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json()) == {}
def _test_get_job_403(self, user, jid, **kwargs):
response = get_method(user, f'jobs/{jid}', **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('org', [None, '', 1, 2])
def test_admin_get_job(self, jobs, tasks, org):
jobs, kwargs = filter_jobs(jobs, tasks, org)
# keep only the reasonable amount of jobs
for job in jobs[:8]:
self._test_get_job_200('admin2', job['id'], job, **kwargs)
@pytest.mark.parametrize('org_id', ['', None, 1, 2])
@pytest.mark.parametrize('groups', [['business'], ['user'], ['worker'], []])
def test_non_admin_get_job(self, org_id, groups, users, jobs, tasks, projects,
memberships):
# keep the reasonable amount of users and jobs
users = [u for u in users if u['groups'] == groups][:4]
jobs, kwargs = filter_jobs(jobs, tasks, org_id)
org_staff = get_org_staff(org_id, memberships)
for job in jobs[:8]:
job_staff = get_job_staff(job, tasks, projects)
# check if the specific user in job_staff to see the job
for user in users:
if user['id'] in job_staff | org_staff:
self._test_get_job_200(user['username'], job['id'], job, **kwargs)
else:
self._test_get_job_403(user['username'], job['id'], **kwargs)
class TestListJobs:
def _test_list_jobs_200(self, user, data, **kwargs):
response = get_method(user, 'jobs', **kwargs, page_size=all)
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json()['results']) == {}
def _test_list_jobs_403(self, user, **kwargs):
response = get_method(user, 'jobs', **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('org', [None, '', 1, 2])
def test_admin_list_jobs(self, jobs, tasks, org):
jobs, kwargs = filter_jobs(jobs, tasks, org)
self._test_list_jobs_200('admin1', jobs, **kwargs)
@pytest.mark.parametrize('org_id', ['', None, 1, 2])
@pytest.mark.parametrize('groups', [['business'], ['user'], ['worker'], []])
def test_non_admin_list_jobs(self, org_id, groups, users, jobs, tasks,
projects, memberships):
# keep the reasonable amount of users and jobs
users = [u for u in users if u['groups'] == groups][:2]
jobs, kwargs = filter_jobs(jobs, tasks, org_id)
org_staff = get_org_staff(org_id, memberships)
for user in users:
user_jobs = []
for job in jobs:
job_staff = get_job_staff(job, tasks, projects)
if user['id'] in job_staff | org_staff:
user_jobs.append(job)
if is_org_member(memberships, user, org_id):
self._test_list_jobs_200(user['username'], user_jobs, **kwargs)
else:
self._test_list_jobs_403(user['username'], **kwargs)
class TestGetAnnotations:
def _test_get_job_annotations_200(self, user, jid, data, **kwargs):
response = get_method(user, f'jobs/{jid}/annotations', **kwargs)
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response.json()) == {}
def _test_get_job_annotations_403(self, user, jid, **kwargs):
response = get_method(user, f'jobs/{jid}/annotations', **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('org', [None, '', 1, 2])
def test_admin_get_job_annotations(self, jobs, tasks, annotations, org):
jobs, kwargs = filter_jobs(jobs, tasks, org)
# keep only the reasonable amount of jobs
for job in jobs[:8]:
jid = str(job['id'])
self._test_get_job_annotations_200('admin2', jid,
annotations['job'][jid], **kwargs)
@pytest.mark.parametrize('org_id', ['', None])
@pytest.mark.parametrize('groups', [['business'], ['user'], ['worker'], []])
def test_non_admin_get_job_annotations(self, org_id, groups, users, jobs, tasks,
projects, annotations, memberships):
users = [u for u in users if u['groups'] == groups][:4]
jobs, kwargs = filter_jobs(jobs, tasks, org_id)
org_staff = get_org_staff(org_id, memberships)
# keep only the reasonable amount of jobs
for job in jobs[:8]:
job_staff = get_job_staff(job, tasks, projects)
jid = str(job['id'])
for user in users:
if user['id'] in job_staff | org_staff:
self._test_get_job_annotations_200(user['username'],
jid, annotations['job'][jid], **kwargs)
else:
self._test_get_job_annotations_403(user['username'],
jid, **kwargs)
``` |
{
"source": "3Dmaps/mapcreator",
"score": 3
} |
#### File: mapcreator/mapcreator/cli_util.py
```python
import click
import sys, traceback
from mapcreator import building
from mapcreator import persistence
from mapcreator import echoes
from mapcreator.state import FileAddResult
"""
The x_or_error functions return a falsy value if an error was thrown.
If everything went OK, they return either True or resulting state, depending on the function.
"""
def init_or_error():
try:
state = persistence.init_state()
except Exception as e:
echoes.error('Unable to initialize project: {}'.format(e))
return None
else:
return state
def load_or_error():
try:
state = persistence.load_state()
except Exception as e:
echoes.error('Unable to load or initialize the project: {}'.format(e))
return None
else:
return state
def save_or_error(state):
try:
persistence.save_state(state)
except Exception as e:
echoes.error('Unable to save changes to the project! No changes done!')
echoes.error('What went wrong: {}'.format(e))
return None
else:
return state
def clear_or_error():
try:
persistence.clear_state()
except Exception as e:
echoes.error('Unable to reset project: {}'.format(e))
return False
else:
return True
def build_init_or_error():
try:
building.init_build()
except Exception as e:
echoes.error('Unable to initialize build: {}'.format(e))
return False
else:
return True
def build_clean_or_error():
try:
building.cleanup()
except Exception as e:
echoes.error('Unable to clean temporary build files: {}'.format(e))
return False
else:
return True
def add_files(files, add_method_name):
state = load_or_error()
if not state: return
echoes.info('Adding files to project...')
count = 0
for fpath in files:
result = getattr(state, add_method_name)(fpath) # Call method whose name is add_method_name
if result == FileAddResult.DOESNT_EXIST:
echoes.error('File "{}" doesn\'t exist!'.format(fpath))
elif result == FileAddResult.ALREADY_ADDED:
echoes.warn('{} has already been added to this project'.format(fpath))
elif result == FileAddResult.SUCCESS:
echoes.info('"{}" added'.format(fpath))
count += 1
else:
echoes.error('Unrecognized FileAddResult {} when trying to add {}!'.format(result, fpath))
if count > 0:
if not save_or_error(state): return
if count == len(files):
echoes.success("{} files added to the project successfully!".format(len(files)))
else:
echoes.warn("{} files (out of {}) added to the project successfully".format(count, len(files)))
else:
echoes.warn('No files were added.')
def clear_files(clear_method_name, files_type):
state = load_or_error()
if not state: return
echoes.info('Clearing {} files...'.format(files_type))
getattr(state, clear_method_name)() # Call method whose name is clear_method_name
if save_or_error(state):
echoes.success('All {} files cleared successfully!'.format(files_type))
def parse_color(line, debug = False):
PARSER = [str, int, int, int]
parts = line.split(' ')
if len(parts) != len(PARSER):
echoes.error("Invalid number of arguments on a line!")
echoes.error("(Should be {}, was {})".format(len(PARSER), len(parts)))
if debug: echoes.info("Line was: {}".format(line))
return None
try:
for i, func in enumerate(PARSER):
parts[i] = func(parts[i])
except ValueError as e:
echoes.error("Invalid color format!")
echoes.error("({})".format(e))
if debug: echoes.info("Line was: {}".format(line))
return None
return parts
def validate_color(r, g, b):
for value in (r, g, b):
if value < 0 or value > 255:
echoes.error("Invalid color value {}!".format(value))
echoes.info("(Should be between 0 and 255)")
return False
return True
def validate_resolution(reso, lower, upper):
if reso < lower or reso > upper:
echoes.error("Invalid resolution {}!".format(reso))
echoes.info("(Should be between {} and {})".format(lower, upper))
return False
return True
def do_build(files, statusclass, actions, state, debug = False):
has_errors = False
outfiles = []
echoes.info('Processing...')
buildstatus = statusclass(0, files, state)
errors = []
with click.progressbar(actions, bar_template=echoes.PROGRESS_BAR_TEMPLATE, show_eta=False) as bar:
for action in bar:
try:
action(buildstatus, debug)
except Exception as e:
errors.append(e)
if debug:
e_type, e_value, e_traceback = sys.exc_info()
errors.extend(traceback.format_tb(e_traceback))
has_errors = True
if errors:
echoes.error('Exceptions caught:')
for e in errors:
echoes.error(e)
for line in str(buildstatus).split('\n'):
echoes.info(line)
outfiles.extend(buildstatus.get_result_files())
return (outfiles, has_errors)
```
#### File: mapcreator/mapcreator/osm.py
```python
from xml.etree import ElementTree
class OSMData:
TAG_ROOT = 'osm'
TAG_NODE = 'node'
TAG_WAY = 'way'
TAG_WAY_NODE = 'nd'
TAG_TAG = 'tag'
ATTRIB_ID = 'id'
ATTRIB_REF = 'ref'
ATTRIB_KEY = 'k'
ATTRIB_VALUE = 'v'
ATTRIB_LAT = 'lat'
ATTRIB_LON = 'lon'
KEY_LANDUSE = 'landuse'
ACCEPTED_LANDUSES = ['meadow']
KEY_HIGHWAY = 'highway'
ACCEPTED_HIGHWAYS = ['footway', 'path']
def __init__(self):
self.node_filters = []
self.way_filters = []
@classmethod
def load(cls, path):
data = OSMData()
data.tree = ElementTree.parse(path)
data.preprocess()
return data
@classmethod
def get_elem_id(cls, elem):
return int(elem.get(OSMData.ATTRIB_ID))
@classmethod
def get_tag(cls, elem, key):
for tagElement in elem.findall(OSMData.TAG_TAG):
if tagElement.get(OSMData.ATTRIB_KEY) == key:
return tagElement.get(OSMData.ATTRIB_VALUE)
return None
def preprocess(self):
self.nodes = {}
self.included_nodes = set()
self.ways = {}
self.included_ways = set()
root = self.tree.getroot()
if root.tag != OSMData.TAG_ROOT:
raise ValueError('Invalid OSM XML Data - the root node\'s tag was not "osm"!')
for child in root:
if child.tag == OSMData.TAG_NODE:
nodeid = OSMData.get_elem_id(child)
self.nodes[nodeid] = child
self.included_nodes.add(nodeid)
elif child.tag == OSMData.TAG_WAY:
wayid = OSMData.get_elem_id(child)
self.ways[wayid] = child
self.included_ways.add(wayid)
def add_node_filter(self, *filters):
"""
Adds filters.
Filters are functions that take a ElementTree element and this OSMData instance
as arguments and returns a truthful value if said element should be
kept.
Filters given in the same method call are joined with "and",
while filters given in subsequent calls are joined with "or".
For example
data = OSMData()
data.add_node_filter(f1, f2, f3)
data.add_node_filter(f4)
will result in elements for which
(f1(element) and f2(element) and f3(element)) or f4(element)
is False getting filtered out and elements for which that is True being kept
"""
OSMData.add_filter(self.node_filters, *filters)
def add_way_filter(self, *filters):
"""
Adds filters.
Filters are functions that take a ElementTree element and this OSMData instance
as arguments and returns a truthful value if said element should be
kept.
Filters given in the same method call are joined with "and",
while filters given in subsequent calls are joined with "or".
For example
data = OSMData()
data.add_way_filter(f1, f2, f3)
data.add_way_filter(f4)
will result in elements for which
(f1(element) and f2(element) and f3(element)) or f4(element)
is False getting filtered out and elements for which that is True being kept
"""
OSMData.add_filter(self.way_filters, *filters)
@classmethod
def add_filter(cls, filterlist, *filters):
filterlist.append(filters)
def do_filter(self):
self.filter(self.included_nodes, self.nodes, self.node_filters)
self.filter(self.included_ways, self.ways, self.way_filters)
def filter(self, idset, elemdict, filterlist):
filterset = set()
for elemid in idset:
elem = elemdict[elemid]
ok = False
for filt in filterlist:
current_ok = True
for f in filt:
if not f(elem, self):
current_ok = False
break
if current_ok:
ok = True
break
if not ok:
filterset.add(elemid)
idset -= filterset
def prepare_for_save(self):
root = self.tree.getroot()
for way in self.ways.values():
wayid = OSMData.get_elem_id(way)
# Remove dropped ways
if wayid not in self.included_ways:
root.remove(way)
else:
for child in way:
# Add nodes that belong to the way even if they were dropped by some other filter
if child.tag == OSMData.TAG_WAY_NODE:
self.included_nodes.add(int(child.get(OSMData.ATTRIB_REF)))
for node in self.nodes.values():
nodeid = OSMData.get_elem_id(node)
# Remove dropped nodes
if nodeid not in self.included_nodes:
root.remove(node)
def save(self, path):
self.tree.write(path, encoding='utf-8', xml_declaration=True)
def areaFilter(elem, osmdata):
"""
Currently filters in only areas with "landuse" in the accepted landuses list.
"""
return OSMData.get_tag(elem, OSMData.KEY_LANDUSE) in OSMData.ACCEPTED_LANDUSES
def trailFilter(elem, osmdata):
"""
Currently filters in all trails with "highway" in the accepted highways list.
"""
return OSMData.get_tag(elem, OSMData.KEY_HIGHWAY) in OSMData.ACCEPTED_HIGHWAYS
class WayCoordinateFilter:
def __init__(self, minx, maxx, miny, maxy):
self.minx = minx
self.miny = miny
self.maxx = maxx
self.maxy = maxy
def filter(self, elem, osmdata):
for ref in elem.iter(OSMData.TAG_WAY_NODE):
try:
node = osmdata.nodes[int(ref.get(OSMData.ATTRIB_REF))]
x = float(node.get(OSMData.ATTRIB_LON))
y = float(node.get(OSMData.ATTRIB_LAT))
except ValueError:
continue # Just skip any dirty data
if self.minx <= x and x <= self.maxx and self.miny <= y and y <= self.maxy:
return True
return False
def merge(osm_datas):
"""
Merges several OSMdatas into one. Takes a list of OSMdatas as a parameter.
Returns a combined OSMdata. Entity doubles are not checked or removed, so the
combined XML may combine doubles. The combined XML tree is the attribute tree
of the OSMdata that is returned.
"""
trees = []
for osm_data in osm_datas:
trees.append(osm_data.tree)
result_xml_element_tree = None
for osm_tree in trees:
if result_xml_element_tree is None:
result_xml_element_tree = osm_tree
else:
for child in osm_tree.getroot().iterfind('.'):
result_xml_element_tree.getroot().extend(child)
if result_xml_element_tree is not None:
resultOSMdata = OSMData()
resultOSMdata.tree = result_xml_element_tree
return resultOSMdata
```
#### File: mapcreator/tests/test_cli_util.py
```python
import mapcreator
from mock import patch
from imp import reload
from mapcreator import cli_util
from mapcreator.state import State, FileAddResult
@patch('mapcreator.cli_util.echoes')
class TestCliUtil:
def x_or_error_shows_error(self, function, mock_persistence_func, mock_echoes, call_contents):
assert not function()
mock_persistence_func.assert_called_once()
mock_error = mock_echoes.error
mock_error.assert_called()
total_output = ''
for call in mock_error.call_args_list:
args, kwargs = call
assert 0 == len(kwargs)
assert 1 == len(args)
total_output += ' ' + args[0]
for piece in call_contents:
assert piece in total_output
def x_or_error_no_error(self, function, mock_persistence_func, mock_echoes, return_value):
assert function() == return_value
mock_persistence_func.assert_called_once()
mock_error = mock_echoes.error
mock_error.assert_not_called()
@patch('mapcreator.persistence.init_state', side_effect=OSError('Whoops!'))
def test_init_or_error_shows_error_when_unsuccessful(self, mock_init, mock_echoes):
self.x_or_error_shows_error(
cli_util.init_or_error,
mock_init,
mock_echoes,
['Unable to initialize project', str(OSError('Whoops!'))]
)
@patch('mapcreator.persistence.init_state', return_value = 'Success :-)')
def test_init_or_error_doesnt_show_error_when_successful(self, mock_init, mock_echoes):
self.x_or_error_no_error(cli_util.init_or_error, mock_init, mock_echoes, 'Success :-)')
@patch('mapcreator.persistence.load_state', side_effect=OSError('Whoops!'))
def test_load_or_error_shows_error_when_unsuccessful(self, mock_load, mock_echoes):
self.x_or_error_shows_error(
cli_util.load_or_error,
mock_load,
mock_echoes,
['Unable to load or initialize the project', str(OSError('Whoops!'))]
)
@patch('mapcreator.persistence.load_state', return_value = 'Success :-)')
def test_load_or_error_doesnt_show_error_when_successful(self, mock_load, mock_echoes):
self.x_or_error_no_error(cli_util.load_or_error, mock_load, mock_echoes, 'Success :-)')
@patch('mapcreator.persistence.save_state', side_effect=OSError('Whoops!'))
def test_save_or_error_shows_error_when_unsuccessful(self, mock_save, mock_echoes):
self.x_or_error_shows_error(
lambda: cli_util.save_or_error('asd'),
mock_save,
mock_echoes,
['Unable to save changes', 'No changes done', 'What went wrong', str(OSError('Whoops!'))]
)
@patch('mapcreator.persistence.save_state', return_value = True)
def test_save_or_error_doesnt_show_error_when_successful(self, mock_save, mock_echoes):
self.x_or_error_no_error(lambda: cli_util.save_or_error('Success :-)'), mock_save, mock_echoes, 'Success :-)')
@patch('mapcreator.persistence.clear_state', side_effect=OSError('Whoops!'))
def test_clear_or_error_shows_error_when_unsuccessful(self, mock_clear, mock_echoes):
self.x_or_error_shows_error(
cli_util.clear_or_error,
mock_clear,
mock_echoes,
['Unable to reset project', str(OSError('Whoops!'))]
)
@patch('mapcreator.persistence.clear_state', return_value = True)
def test_clear_or_error_doesnt_show_error_when_successful(self, mock_clear, mock_echoes):
self.x_or_error_no_error(cli_util.clear_or_error, mock_clear, mock_echoes, True)
@patch('mapcreator.building.init_build', side_effect=OSError('Whoops!'))
def test_build_init_or_error_shows_error_when_unsuccessful(self, mock_init, mock_echoes):
self.x_or_error_shows_error(
cli_util.build_init_or_error,
mock_init,
mock_echoes,
['Unable to initialize build', str(OSError('Whoops!'))]
)
@patch('mapcreator.building.init_build')
def test_build_init_or_error_doesnt_show_error_when_successful(self, mock_init, mock_echoes):
self.x_or_error_no_error(cli_util.build_init_or_error, mock_init, mock_echoes, True)
@patch('mapcreator.building.cleanup', side_effect=OSError('Whoops!'))
def test_build_clean_or_error_shows_error_when_unsuccessful(self, mock_clean, mock_echoes):
self.x_or_error_shows_error(
cli_util.build_clean_or_error,
mock_clean,
mock_echoes,
['Unable to clean', str(OSError('Whoops!'))]
)
@patch('mapcreator.building.cleanup')
def test_build_clean_or_error_doesnt_show_error_when_successful(self, mock_clean, mock_echoes):
self.x_or_error_no_error(cli_util.build_clean_or_error, mock_clean, mock_echoes, True)
@patch('mapcreator.persistence.load_state', lambda: State())
@patch.object(mapcreator.state.State, 'add_height_file')
@patch('mapcreator.persistence.save_state', side_effect = lambda state: state) #Returns state, so save was successful
def test_add_files(self, mock_save, mock_state, mock_echoes):
mock_state.return_value = FileAddResult.SUCCESS
cli_util.add_files(('test1.txt', 'test2.txt'), 'add_height_file')
mock_state.assert_any_call('test1.txt')
mock_state.assert_any_call('test2.txt')
assert mock_state.call_count == 2
assert mock_save.call_count == 1
mock_echoes.success.assert_called()
@patch('mapcreator.persistence.load_state', lambda: State())
@patch.object(mapcreator.state.State, 'add_osm_file')
@patch('mapcreator.persistence.save_state')
def test_add_files_no_files_exist(self, mock_save, mock_state, mock_echoes):
mock_state.return_value = FileAddResult.DOESNT_EXIST
cli_util.add_files(('test1.txt','test2.txt','test3.txt'), 'add_osm_file')
mock_state.assert_any_call('test1.txt')
mock_state.assert_any_call('test2.txt')
mock_state.assert_any_call('test3.txt')
assert mock_state.call_count == 3
assert mock_save.call_count == 0 # Don't save if there's no changes
for i in range(1,3):
mock_echoes.error.assert_any_call('File "test{}.txt" doesn\'t exist!'.format(i))
@patch('mapcreator.persistence.load_state', lambda: State())
@patch.object(mapcreator.state.State, 'add_height_file')
@patch('mapcreator.persistence.save_state')
def test_add_files_all_already_added(self, mock_save, mock_state, mock_echoes):
mock_state.return_value = FileAddResult.ALREADY_ADDED
cli_util.add_files(('test3.txt',), 'add_height_file')
mock_state.assert_called_once_with('test3.txt')
assert mock_save.call_count == 0
mock_echoes.warn.assert_any_call('test3.txt has already been added to this project')
@patch('mapcreator.persistence.load_state', lambda: State())
@patch.object(mapcreator.state.State, 'add_osm_file')
@patch('mapcreator.persistence.save_state')
def test_add_files_some_files_ok(self, mock_save, mock_state, mock_echoes):
def add_side_effect(filename):
if filename in ('1', '11', '21'):
return FileAddResult.SUCCESS
else:
return (FileAddResult.ALREADY_ADDED, FileAddResult.DOESNT_EXIST)[int(filename) % 2]
mock_state.side_effect = add_side_effect
files = [str(i) for i in range(25)]
cli_util.add_files(files, 'add_osm_file')
assert mock_state.call_count == len(files)
assert mock_save.call_count == 1
mock_echoes.warn.assert_called_with('3 files (out of 25) added to the project successfully')
@patch('mapcreator.persistence.load_state', lambda: State())
@patch.object(mapcreator.state.State, 'clear_osm_files')
@patch('mapcreator.persistence.save_state')
def test_clear_files(self, mock_save, mock_state, mock_echoes):
cli_util.clear_files('clear_osm_files', 'open street map')
assert mock_state.call_count == 1
assert mock_save.call_count == 1
mock_echoes.success.assert_called_with('All open street map files cleared successfully!')
def test_parse_color_wrong_number_of_args(self, mock_echoes):
mock_error = mock_echoes.error
assert not cli_util.parse_color("tag 123 221 9 77")
mock_error.assert_called()
def test_parse_color_invalid_number_format(self, mock_echoes):
mock_error = mock_echoes.error
assert not cli_util.parse_color("terrain 7 8 cheese")
mock_error.assert_called()
def test_parse_color(self, mock_echoes):
mock_error = mock_echoes.error
assert ["hello", 7, 88, 3] == cli_util.parse_color("hello 7 88 3")
mock_error.assert_not_called()
def test_validate_color_inivalid_color(self, mock_echoes):
mock_error = mock_echoes.error
assert not cli_util.validate_color(-11, 255, 13)
mock_error.assert_called()
mock_error.reset_mock()
assert not cli_util.validate_color(0, 256, 13)
mock_error.assert_called()
mock_error.reset_mock()
assert not cli_util.validate_color(6, 7, 999)
mock_error.assert_called()
def test_validate_color_valid_color(self, mock_echoes):
mock_error = mock_echoes.error
assert cli_util.validate_color(0, 255, 127)
assert cli_util.validate_color(255, 44, 0)
assert cli_util.validate_color(31, 0, 255)
assert cli_util.validate_color(123, 221, 99)
mock_error.assert_not_called()
def test_validate_resolution_valid_reso(self, mock_echoes):
mock_error = mock_echoes.error
assert cli_util.validate_resolution(1, 0.5, 1000)
assert cli_util.validate_resolution(0.05, 0.01, 0.05)
assert cli_util.validate_resolution(700, 300, 800)
mock_error.assert_not_called()
def test_validate_resolution_invalid_reso(self, mock_echoes):
mock_error = mock_echoes.error
assert not cli_util.validate_resolution(1, 2, 1000)
mock_error.assert_called()
mock_error.reset_mock()
assert not cli_util.validate_resolution(1000, 50, 999)
mock_error.assert_called()
mock_error.reset_mock()
assert not cli_util.validate_resolution(0.05, 0.1, 0.5)
mock_error.assert_called()
``` |
{
"source": "3D-measurement/Hformer",
"score": 3
} |
#### File: models/layers/involution.py
```python
import paddle.nn as nn
class involution(nn.Layer):
def __init__(self,
channels,
kernel_size,
stride):
super(involution, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.channels = channels
reduction_ratio = 4
self.group_channels = 16
self.groups = self.channels // self.group_channels
self.conv1 = nn.Sequential(
('conv', nn.Conv2D(
in_channels=channels,
out_channels=channels // reduction_ratio,
kernel_size=1,
bias_attr=False
)),
('bn', nn.BatchNorm2D(channels // reduction_ratio)),
('activate', nn.ReLU())
)
self.conv2 = nn.Sequential(
('conv', nn.Conv2D(
in_channels=channels // reduction_ratio,
out_channels=kernel_size**2 * self.groups,
kernel_size=1,
stride=1))
)
if stride > 1:
self.avgpool = nn.AvgPool2D(stride, stride)
def forward(self, x):
weight = self.conv2(self.conv1(
x if self.stride == 1 else self.avgpool(x)))
b, c, h, w = weight.shape
weight = weight.reshape((
b, self.groups, self.kernel_size**2, h, w)).unsqueeze(2)
out = nn.functional.unfold(
x, self.kernel_size, strides=self.stride, paddings=(self.kernel_size-1)//2, dilations=1)
out = out.reshape(
(b, self.groups, self.group_channels, self.kernel_size**2, h, w))
out = (weight * out).sum(axis=3).reshape((b, self.channels, h, w))
return out
``` |
{
"source": "3DMiller/metashape-scripts",
"score": 2
} |
#### File: metashape-scripts/src/automatic_masking.py
```python
import pathlib
import Metashape
# Checking compatibility
compatible_major_version = "1.7"
found_major_version = ".".join(Metashape.app.version.split('.')[:2])
if found_major_version != compatible_major_version:
raise Exception("Incompatible Metashape version: {} != {}".format(found_major_version, compatible_major_version))
def generate_automatic_background_masks_with_rembg():
try:
import rembg
import rembg.bg
import scipy
import numpy as np
import io
from PIL import Image
except ImportError:
print("Please ensure that you installed torch and rembg - see instructions in the script")
raise
print("Script started...")
doc = Metashape.app.document
chunk = doc.chunk
cameras = chunk.cameras
nmasks_exists = 0
for c in cameras:
if c.mask is not None:
nmasks_exists += 1
print("Camera {} already has mask".format(c.label))
if nmasks_exists > 0:
raise Exception("There are already {} masks, please remove them and try again".format(nmasks_exists))
masks_dirs_created = set()
cameras_by_masks_dir = {}
for i, c in enumerate(cameras):
input_image_path = c.photo.path
print("{}/{} processing: {}".format(i + 1, len(cameras), input_image_path))
image_mask_dir = pathlib.Path(input_image_path).parent / 'masks'
if image_mask_dir.exists() and str(image_mask_dir) not in masks_dirs_created:
attempt = 2
image_mask_dir_attempt = pathlib.Path(str(image_mask_dir) + "_{}".format(attempt))
while image_mask_dir_attempt.exists() and str(image_mask_dir_attempt) not in masks_dirs_created:
attempt += 1
image_mask_dir_attempt = pathlib.Path(str(image_mask_dir) + "_{}".format(attempt))
image_mask_dir = image_mask_dir_attempt
if image_mask_dir.exists():
assert str(image_mask_dir) in masks_dirs_created
else:
image_mask_dir.mkdir(parents=False, exist_ok=False)
masks_dirs_created.add(str(image_mask_dir))
cameras_by_masks_dir[str(image_mask_dir)] = list()
image_mask_name = pathlib.Path(input_image_path).name.split(".")
if len(image_mask_name) > 1:
image_mask_name = image_mask_name[:-1]
image_mask_name = ".".join(image_mask_name)
image_mask_path = str(image_mask_dir / image_mask_name) + "_mask.png"
# image_bytes = np.fromfile(input_image_path)
# img_no_background_bytes = rembg.bg.remove(image_bytes)
# img = np.array(Image.open(io.BytesIO(image_bytes)).convert("RGBA"))
# img_no_background = np.array(Image.open(io.BytesIO(img_no_background_bytes)).convert("RGBA"))
# mask = (img_no_background[:, :, 3] > 0)
# mask = scipy.ndimage.morphology.binary_dilation(mask, iterations=8)
# mask = scipy.ndimage.morphology.binary_erosion(mask, iterations=12)
# mask = mask.astype(np.uint8) * 255
# mask = np.dstack([mask, mask, mask])
photo_image = c.photo.image()
img = np.frombuffer(photo_image.tostring(), dtype={'U8': np.uint8, 'U16': np.uint16}[photo_image.data_type]).reshape(photo_image.height, photo_image.width, photo_image.cn)[:, :, :3]
model_name = "u2net"
model = rembg.bg.get_model(model_name)
mask = rembg.u2net.detect.predict(model, img).convert("L")
mask = np.array(mask.resize((photo_image.width, photo_image.height)))
mask = (mask > 10)
mask = scipy.ndimage.morphology.binary_dilation(mask, iterations=3)
mask = scipy.ndimage.morphology.binary_erosion(mask, iterations=3)
mask = mask.astype(np.uint8) * 255
mask = np.dstack([mask, mask, mask])
# Image.fromarray(img).save(str(image_mask_dir / image_mask_name) + "_1.jpg")
# Image.fromarray(img_no_background).save(str(image_mask_dir / image_mask_name) + "_2.jpg")
Image.fromarray(mask).save(image_mask_path)
Metashape.app.update()
cameras_by_masks_dir[str(image_mask_dir)].append(c)
print("{} masks generated in {} directories:".format(len(cameras), len(masks_dirs_created)))
for mask_dir in sorted(masks_dirs_created):
print(mask_dir)
print("Importing masks into project...")
for masks_dir, cameras in cameras_by_masks_dir.items():
chunk.generateMasks(path=masks_dir + "/{filename}_mask.png", masking_mode=Metashape.MaskingMode.MaskingModeFile, cameras=cameras)
print("Script finished.")
label = "Custom Menu/Automatic background masking"
Metashape.app.addMenuItem(label, generate_automatic_background_masks_with_rembg)
print("To execute this script press {}".format(label))
``` |
{
"source": "3dnygm4/titanium",
"score": 2
} |
#### File: titanium/app/views.py
```python
from app import app, db
from flask import flash, redirect, render_template, request, \
session, url_for
from functools import wraps
#decorator function to control login
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('users.login'))
return wrap
#display error messages on template
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash (u"Error in the %s field - %s"%(
getattr(form,field).label.text, error),'error')
#errorhandler
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'),500
@app.errorhandler(404)
def internal_error(error):
return render_template('404.html'),404
@app.route('/',defaults={'page':'index'})
def index(page):
return (redirect(url_for('tasks.tasks')))
``` |
{
"source": "3dperceptionlab/tactile-gcn",
"score": 2
} |
#### File: 3dperceptionlab/tactile-gcn/evaluate.py
```python
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2018, 3D Perception Lab"
__credits__ = ["<NAME>",
"<NAME>"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import argparse
import datetime
import logging
import os
import sys
import time
from timeit import default_timer as timer
import torch
import torch.nn.functional as F
import torch.utils.data.dataloader
from torch.utils.data.sampler import SubsetRandomSampler
from torch_geometric.data import Data
from torch_geometric.data import DataLoader
import loader.biotacsp_loader
import dataset.biotacsp
import network.utils
import utils.evaluation
log = logging.getLogger(__name__)
def evaluate (args):
dataset_ = dataset.biotacsp.BioTacSp(root='data/biotacsp', k=args.graph_k, split=args.split, normalize=args.normalize)
log.info(dataset_)
log.info("Evaluating network over {0} split...".format(args.split))
eval_loader_ = DataLoader(dataset_, batch_size=args.batch_size, shuffle=False, num_workers=1)
## Select CUDA device
device_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
log.info(device_)
log.info(torch.cuda.get_device_name(0))
## Build model
model_ = network.utils.get_network(args.network, dataset_.data.num_features, dataset_.data.num_classes).to(device_)
log.info(model_)
# Load checkpoint if specified
if args.checkpoint is not None:
if os.path.isfile(args.checkpoint):
log.info('Loading checkpoint {}'.format(args.checkpoint))
checkpoint_ = torch.load(args.checkpoint)
model_.load_state_dict(checkpoint_['model_state'])
log.info('Loaded network...')
else:
log.info('The checkpoint file at {} was not found'.format(args.checkpoint))
utils.evaluation.eval(model_, device_, eval_loader_)
if __name__ == "__main__":
parser_ = argparse.ArgumentParser(description="Parameters")
parser_.add_argument("--log_path", nargs="?", default="logs", help="Logging path")
parser_.add_argument("--split", nargs="?", default="test", help="Dataset split to evaluate")
parser_.add_argument("--checkpoint", nargs="?", required=True, help="Path to save checkpoints")
parser_.add_argument("--normalize", nargs="?", type=bool, default=True, help="Normalize dataset using feature scaling")
parser_.add_argument("--graph_k", nargs="?", type=int, default=0, help="K-Neighbours for graph connections, use 0 for manual connections")
parser_.add_argument("--batch_size", nargs="?", type=int, default=1, help="Batch Size")
parser_.add_argument("--network", nargs="?", default="GCN_test", help="The network model to train")
args_ = parser_.parse_args()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
# Experiment name (and log filename) follows the format network-normalization-graph_k-datetime
experiment_str_ = "eval-{0}-{1}-{2}-{3}-{4}".format(
args_.split,
args_.network,
args_.normalize,
args_.graph_k,
datetime.datetime.now().strftime('%b%d_%H-%M-%S'))
# Add file handler to logging system to simultaneously log information to console and file
log_formatter_ = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
file_handler_ = logging.FileHandler("{0}/{1}.log".format(args_.log_path, experiment_str_))
file_handler_.setFormatter(log_formatter_)
log.addHandler(file_handler_)
evaluate(args_)
```
#### File: tactile-gcn/transforms/tograph.py
```python
import logging
import torch
from torch_geometric.data import Data
import numpy as np
import scipy.spatial
log = logging.getLogger(__name__)
class ToGraph(object):
def __init__(self, k):
assert(k >= 0), 'graph_k must be equal or greater than 0'
# Actually, this would be X
self.m_taxels_y = [0.386434851,0.318945051,0.08737268,0.083895199,-0.018624877,-0.091886816,-0.1366595,-0.223451775,-0.320752549,-0.396931929,0.386434851,0.318945051,0.08737268,0.083895199,-0.018624877,-0.091886816,-0.1366595,-0.223451775,-0.320752549,-0.396931929,0.25875305,0.170153841,0.170153841,0.075325086]
# Actually, this would be Y
self.m_taxels_z = [-0.108966104,-0.205042252,-0.128562247,-0.235924865,-0.30011705,-0.12043608,-0.237549685,-0.270674659,-0.199498368,-0.100043884,-0.108966104,-0.205042252,-0.128562247,-0.235924865,-0.30011705,-0.12043608,-0.237549685,-0.270674659,-0.199498368,-0.100043884,-0.252337663,-0.274427927,-0.274427927,-0.298071391]
# Actually, this would be Z
self.m_taxels_x = [0.156871012,0.12070609,0.281981384,0.201566857,0.094918748,0.284956139,0.187122746,0.071536904,0.127771244,0.151565706,-0.156871012,-0.12070609,-0.281981384,-0.201566857,-0.094918748,-0.284956139,-0.187122746,-0.071536904,-0.127771244,-0.151565706,0,0.072909607,-0.072909607,0]
if k == 0: ## Use manual connections
self.m_edge_origins = [0, 1, 1, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 10, 11, 11, 12, 13, 13, 13, 14, 14, 14, 15, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23]
self.m_edge_ends = [1, 0, 20, 3, 2, 4, 21, 23, 3, 6, 7, 23, 6, 5, 4, 7, 8, 4, 6, 8, 17, 6, 7, 9, 8, 11, 10, 20, 13, 12, 23, 14, 13, 16, 17, 16, 15, 14, 17, 18, 14, 16, 18, 7, 17, 16, 19, 18, 1, 11, 21, 22, 3, 20, 22, 23, 13, 20, 21, 23, 21, 22, 3, 13, 4, 14]
else:
points_ = np.transpose(np.vstack((self.m_taxels_x, self.m_taxels_y)), (1, 0))
tree_ = scipy.spatial.KDTree(points_)
_, idxs_ = tree_.query(points_, k=k + 1) # Closest point will be the point itself, so k + 1
idxs_ = idxs_[:, 1:] # Remove closest point, which is the point itself
self.m_edge_origins = np.repeat(np.arange(len(points_)), k)
self.m_edge_ends = np.reshape(idxs_, (-1))
def __call__(self, sample):
# Index finger
graph_x_ = torch.tensor(np.vstack((sample['data_index'], sample['data_middle'], sample['data_thumb'])), dtype=torch.float).transpose(0, 1)
graph_edge_index_ = torch.tensor([self.m_edge_origins, self.m_edge_ends], dtype=torch.long)
graph_pos_ = torch.tensor(np.vstack((self.m_taxels_x, self.m_taxels_y, self.m_taxels_z)), dtype=torch.float).transpose(0, 1)
graph_y_ = torch.tensor([sample['slipped']], dtype=torch.long)
data_ = Data(x = graph_x_,
edge_index = graph_edge_index_,
pos = graph_pos_,
y = graph_y_)
return data_
def __repr__(self):
return "{}".format(self.__class__.__name__)
``` |
{
"source": "3d-pli/fastpli",
"score": 3
} |
#### File: model/sandbox/seeds.py
```python
import numpy as np
def triangular_grid(width,
height,
spacing,
center=False,
sort=True,
endpoint=True):
"""
Generated 2d triangular grid of seed points inside [0,width]x[0,height].
Parameters
----------
width, height : float
length and height of grid [0,width]x[0,height]
spacing : float
distance between seed points
center : bool, optional
If false, the seed points will be inside [0,width]x[0,height],
beginning at (0,0).
If true, the grid will be inside
[-width/2,width/2]x[-height/2,height/2] with widths eed point at (0,0).
sort : bool, optional
If true, the returning seed points are lexsorted along x,y.
endpoint : bool, optional
If false, [0,width)x[0,height) or
[-width/2,width/2)x[-height/2,height/2).
If true, [0,width]x[0,height] or
[-width/2,width/2]x[-height/2,height/2].
Returns
-------
res : (nx2)-array
seed points
"""
x0 = 0
y0 = 0
dx = spacing / 2 # np.sin(np.deg2rad(30))
dy = spacing * np.sqrt(3) / 2 # np.cos(np.deg2rad(30))
if center:
width = width / 2
height = height / 2
if endpoint:
if width % spacing == 0 or (width + dx) % spacing == 0:
width += dx / 2
if height % spacing == 0 or (height + dy) % spacing == 0:
height += dy / 2
grid_0 = np.mgrid[x0:width:spacing, y0:height:2 * dy].reshape(2, -1)
grid_1 = np.mgrid[x0 + dx:width:spacing,
y0 + dy:height:2 * dy].reshape(2, -1)
grid = np.concatenate((grid_0, grid_1), axis=1)
if center:
# mirror x axis
grid_mirror = grid[:, grid[1, :] != 0]
grid_mirror[1, :] *= -1
grid = np.concatenate((grid, grid_mirror), axis=1)
# mirror y axis
grid_mirror = grid[:, grid[0, :] != 0]
grid_mirror[0, :] *= -1
grid = np.concatenate((grid, grid_mirror), axis=1)
if sort:
idx = np.lexsort((grid[0, :], grid[1, :]))
grid = grid[:, idx]
return np.ascontiguousarray(grid.T)
def triangular_circle(radius, spacing, center=(0, 0), radii=0):
"""
Generated 2d triangular grid inside a circle.
Parameters
----------
radius : float
radius of circle
spacing : float
distance between seed points
center : (2,)-array_like
center of circle
radii : float or (n,)-array_like, optional
seed points will be iterpreted as cricles with a global or
individual radii
Returns
-------
res : (nx2)-array
seed points
"""
seeds = triangular_grid(2 * (radius + spacing), 2 *
(radius + spacing), spacing, True) + center
return crop_circle(radius, seeds, center, radii)
def crop_rectangle(a, b, seeds, radii=0):
"""
Crops a sequence of 2-dim points inside a rectangle.
Parameters
----------
a, b : float
cropping between [0,a]x[0,b]
a, b : (2,)-array_like
cropping between [a[0],b[0]]x[a[1],b[1]]
seeds : (n,2)-array_like
to be cropped seed points
radii : float or (n,)-array_like, optional
seed points will be iterpreted as cricles with a global or
individual radii
Returns
-------
res : (nx2)-array
cropped seed points
"""
seeds = np.array(seeds, ndmin=2, copy=False)
radii = np.array(radii, ndmin=2, copy=False)
if isinstance(a, (int, float)) and isinstance(b, (int, float)):
a, b = [0, 0], [a, b]
if seeds.ndim != 2 or seeds.shape[1] != 2:
raise TypeError('seeds : (nx2)-array')
return seeds[np.logical_and(np.all(seeds - radii.T >= a, 1),
np.all(seeds + radii.T <= b, 1))]
def crop_circle(radius, seeds, center=(0, 0), radii=0):
"""
Crops a sequence of 2-dim points inside a circle.
Parameters
----------
radius : float
radius of circle area
seeds : (n,2)-array_like
to be cropped seed points
center : (2,)-array_like
center of circle
radii : float or (n,)-array_like, optional
seed points will be iterpreted as cricles with a global or
individual radii
Returns
-------
res : (nx2)-array
cropped seed points
"""
seeds = np.array(seeds, ndmin=2, copy=False)
radii = np.array(radii, ndmin=1, copy=False)
if seeds.ndim != 2 or seeds.shape[1] != 2:
raise TypeError('seeds : (nx2)-array')
return seeds[(np.sum((seeds - center)**2, 1)) <= (radius - radii.T)**2]
```
#### File: fastpli/objects/fibers.py
```python
import copy
import numpy as np
import numba
from .layers import Layers
"""
--------------------------------------------------------------------------------
--------------------------------------FIBER-------------------------------------
--------------------------------------------------------------------------------
"""
def _convert_to_fiber(data, dtype):
""" converts data into (n,4)-np.array"""
if data is None:
return np.empty((), dtype)
if isinstance(data, Fiber):
return data
data = np.atleast_2d(np.array(data, dtype=dtype, copy=True))
if data.ndim != 2:
raise TypeError('Wrong shape: expected (n,4)')
if data.shape[1] != 4:
raise TypeError('Wrong shape: expected (n,4)')
if not np.issubdtype(data.dtype, np.floating):
raise TypeError('Wrong type: has to be floating')
return data
class Fiber:
__is_frozen = False
def __setattr__(self, key, value):
if self.__is_frozen and not hasattr(self, key):
raise TypeError('%r is a frozen class' % self)
object.__setattr__(self, key, value)
def __freeze(self):
self.__is_frozen = True
def __init__(self, data, dtype=float):
self._data = _convert_to_fiber(data, dtype)
self.__freeze()
def __getitem__(self, item):
return self._data[item]
def __setitem__(self, item, value):
self._data[item] = value
def __str__(self):
return self._data.__str__()
def __repr__(self):
return self._data.__repr__()
def copy(self):
""" deep copy of class """
return copy.deepcopy(self)
def __iter__(self):
return self._data.__iter__()
def __next__(self):
return self._data.__next__()
def __len__(self):
return self._data.shape[-1]
@property
def shape(self):
""" returns shape of data """
return self._data.shape
@property
def dtype(self):
""" returns np.dtype of data """
return self._data.dtype
def as_array(self):
"""
Returns copy data as np.array
Returns
-------
res : (n,4)-array
fiber as np.array
"""
return self._data.copy()
def cast(self, dtype):
"""
Changes datatype to new type
Parameters
----------
dtype : type
numpy types are fully supported
Returns
-------
res : Fiber
casted fiber
"""
return Fiber(self, dtype)
@property
def points(self):
"""
Returns xyz data as np.array
Returns
-------
res : (n,3)-array
fiber points as numpy array
"""
return self._data[:, :-1]
@points.setter
def points(self, value):
self._data[:, :-1] = value
@property
def radii(self):
"""
Returns radii data as np.array
Returns
-------
res : (n,1)-array
fiber radii as numpy array
"""
return self._data[:, -1]
@radii.setter
def radii(self, value):
self._data[:, -1] = value
def scale(self, scale, mode='all'):
"""
Scales fiber
Parameters
----------
scale : float
scale factor
mode : str, optional
'all', 'points' or 'radii' will be scaled
Returns
-------
res : Fiber
scaled Fiber
"""
data = self._data.copy()
if mode == 'all':
data[:] *= scale
elif mode == 'points':
data[:, :3] *= scale
elif mode == 'radii':
data[:, -1] *= scale
else:
raise ValueError('mode = [all, points, radii]')
return Fiber(data)
def rotate(self, rot, offset=None):
"""
Rotates fiber around offset
Parameters
----------
rot : (3,3)-array_like
rotation matrix
offset : 3d-array-array_like, optional
offset for rotation center
Returns
-------
res : Fiber
rotated fiber
"""
data = self._data.copy()
rot = np.array(rot, copy=False)
if offset is None:
data[:, :3] = np.dot(rot, data[:, :3].T).T
else:
offset = np.array(offset, copy=False)
data[:, :3] = np.dot(rot, (data[:, :3] - offset).T).T + offset
return Fiber(data)
def translate(self, offset):
"""
Translates fiber
Parameters
----------
offset : 3d-array-array_like
offset to translate
Returns
-------
res : Fiber
translated fiber
"""
data = self._data.copy()
offset = np.array(offset, copy=False)
data[:, :3] += offset
return Fiber(data)
def apply(self, fun):
"""
Applies function to fiber
Parameters
----------
fun : function
Returns
-------
res : Fiber
fun(fiber)
"""
fiber = self.copy()
fiber[:] = fun(fiber[:])
return fiber
def apply_to_points(self, fun):
"""
Applies function to fiber positions
Parameters
----------
fun : function
Returns
-------
res : Fiber
fun(fiber)
"""
fiber = self.copy()
fiber[:, :-1] = fun(fiber[:, :-1])
return fiber
def apply_to_radii(self, fun):
"""
Applies function to fiber radii
Parameters
----------
fun : function
Returns
-------
res : Fiber
fun(fiber)
"""
fiber = self.copy()
fiber[:, -1] = fun(fiber[:, -1])
return fiber
def cut(self, voi):
"""
Cut fiber into voi. The cutting process can create multiple fibers.
It checks every fiber_segment_aabb if it overlapps with the voi.
Parameters
----------
voi : [xmin, ymin, zmin],[xmax,ymax,zmax]
Volume of interest of which fibers to include. E.g. same as in
Simulation
Returns
-------
res : FiberBundle
cutted fiber into fiber_bundle
"""
fibers = []
start = 0
voi = np.array(voi)
for i in range(self._data.shape[0] - 1):
if not _fiber_segment_aabb_in_aabb(
self._data[i, :], self._data[i + 1, :], voi[0], voi[1]):
if start != i:
fibers.append(self._data[start:i + 1])
start = i + 1
if start != i + 1:
fibers.append(self._data[start:])
return FiberBundle(fibers)
def cut_sphere(self, radius, center=(0, 0, 0)):
"""
Cut fiber into sphere. The cutting process can create multiple fibers.
It checks every fiber_segment_aabb if it overlapps with the sphere.
Parameters
----------
radius : float
radius of cutting sphere
center : 3d-array
center of cutting sphere
Returns
-------
res : FiberBundle
cutted fiber_bundle
"""
center = np.array(center, copy=False)
fibers = []
start = 0
for i in range(self._data.shape[0] - 1):
if not _fiber_segment_aabb_in_sphere(
self._data[i, :], self._data[i + 1, :], radius, center):
if start != i:
fibers.append(self._data[start:i + 1])
start = i + 1
if start != i + 1:
fibers.append(self._data[start:])
return FiberBundle(fibers)
@numba.njit(cache=True)
def _fiber_segment_aabb_in_aabb(c0, c1, vmin, vmax):
c_min = np.array([
min(c0[0] - c0[-1], c1[0] - c1[-1]),
min(c0[1] - c0[-1], c1[1] - c1[-1]),
min(c0[2] - c0[-1], c1[2] - c1[-1])
])
c_max = np.array([
max(c0[0] + c0[-1], c1[0] + c1[-1]),
max(c0[1] + c0[-1], c1[1] + c1[-1]),
max(c0[2] + c0[-1], c1[2] + c1[-1])
])
for i in range(3):
if c_min[i] > vmax[i] or c_max[i] < vmin[i]:
return False
return True
@numba.njit(cache=True)
def _fiber_segment_aabb_in_sphere(c0, c1, r, center):
c_min = np.array([
min(c0[0] - c0[-1], c1[0] - c1[-1]),
min(c0[1] - c0[-1], c1[1] - c1[-1]),
min(c0[2] - c0[-1], c1[2] - c1[-1])
])
c_max = np.array([
max(c0[0] + c0[-1], c1[0] + c1[-1]),
max(c0[1] + c0[-1], c1[1] + c1[-1]),
max(c0[2] + c0[-1], c1[2] + c1[-1])
])
dmin = 0
for i in range(3):
if center[i] < c_min[i]:
dmin += (center[i] - c_min[i])**2
elif center[i] > c_max[i]:
dmin += (center[i] - c_max[i])**2
return dmin <= r**2
"""
--------------------------------------------------------------------------------
-----------------------------------FIBERBUNDLE----------------------------------
--------------------------------------------------------------------------------
"""
def _convert_to_fiber_bundle(data, layers, dtype):
""" converts data into FiberBundle"""
if data is None:
return [], None
if isinstance(data, Fiber):
return [data], Layers(layers)
if isinstance(data, FiberBundle):
return data[:], Layers(layers)
if not isinstance(data, (list, tuple)):
raise TypeError(f'data is not a list: {type(data)}')
fiber_bundle = []
for fiber in data:
fiber_bundle.append(Fiber(fiber))
return fiber_bundle, Layers(layers)
class FiberBundle:
__is_frozen = False
def __setattr__(self, key, value):
if self.__is_frozen and not hasattr(self, key):
raise TypeError('%r is a frozen class' % self)
object.__setattr__(self, key, value)
def __freeze(self):
self.__is_frozen = True
def __init__(self, data=None, layers=None, dtype=None):
self._data, self._layers = _convert_to_fiber_bundle(data, layers, dtype)
self.__freeze()
def __getitem__(self, item):
return self._data[item]
def __setitem__(self, item, value):
self._data[item] = Fiber(value)
def __delitem__(self, item):
del self._data[item]
def __str__(self):
return self._data.__str__()
def __repr__(self):
return self._data.__repr__()
def copy(self):
""" deep copy of class """
return copy.deepcopy(self)
def __iter__(self):
return iter(self._data)
def __next__(self):
return next(self._data)
def __len__(self):
return len(self._data)
@property
def dtype(self):
""" dtype of containing Fibers """
if len(self) > 1:
return self._data[0].dtype
else:
return None
def as_array(self):
"""
Returns copy data as list(np.array)
Returns
-------
res : [(n,4)-array]
fiber bundle as list(np.array)
"""
return [f.as_array() for f in self]
@property
def layers(self):
"""
Returns layer properties of fiber_bundle
Returns
-------
res : Layers
Layers class containing [Layer].
"""
return self._layers
@layers.setter
def layers(self, value):
self._layers = Layers(value)
def append(self, fiber):
""" appends Fiber to FiberBundle """
self._data.append(Fiber(fiber))
def extend(self, fibers):
""" extends Fiber to FiberBundle """
for fiber in fibers:
self._data.append(Fiber(fiber))
def cast(self, dtype):
"""
Cast objects into new type
Parameters
----------
dtype : type
Returns
-------
res : fiber_bundle
casted fiber_bundle
"""
fiber_bundle = FiberBundle()
for fiber in self:
fiber_bundle.append(fiber.cast(dtype))
return fiber_bundle
def scale(self, scale, mode='all'):
"""
Rescales fiber_bundle
Parameters
----------
scale : float
scale factor
mode : str, optional
'all', 'points' or 'radii' will be scaled
Returns
-------
res : FiberBundle
scaled fiber_bundle
"""
fiber_bundle = FiberBundle()
for fiber in self:
fiber_bundle.append(fiber.scale(scale, mode))
return fiber_bundle
def rotate(self, rot, offset=None):
"""
Rotates fiber_bundle around offset
Parameters
----------
rot : (3,3)-array_like
rotation matrix
offset : 3d-array-array_like, optional
offset for rotation center
Returns
-------
res : FiberBundle
rotated fiber_bundle
"""
rot = np.array(rot, copy=False)
if offset is not None:
offset = np.array(offset, copy=False)
fiber_bundle = FiberBundle()
for fiber in self:
fiber_bundle.append(fiber.rotate(rot, offset))
return fiber_bundle
def translate(self, offset):
"""
Translates fiber_bundle
Parameters
----------
offset : 3d-array-array_like
offset to translate
Returns
-------
res : FiberBundle
translated fiber_bundle
"""
offset = np.array(offset, copy=False)
fiber_bundle = FiberBundle()
for fiber in self:
fiber_bundle.append(fiber.translate(offset))
return fiber_bundle
def apply(self, fun):
"""
Applies function to fibers
Parameters
----------
fun : function
Returns
-------
res : FiberBundle
fun(fiber_bundle)
"""
fiber_bundle = FiberBundle()
for fiber in self:
fiber_bundle.append(fiber.apply(fun))
return fiber_bundle
def apply_to_points(self, fun):
"""
Applies function to fibers positions
Parameters
----------
fun : function
Returns
-------
res : FiberBundle
fun(fiber_bundle.points)
"""
fiber_bundle = FiberBundle()
for fiber in self:
fiber_bundle.append(fiber.apply_to_points(fun))
return fiber_bundle
def apply_to_radii(self, fun):
"""
Applies function to fibers radii
Parameters
----------
fun : function
Returns
-------
res : FiberBundle
fun(fiber_bundle.radii)
"""
fiber_bundle = FiberBundle()
for fiber in self:
fiber_bundle.append(fiber.apply_to_radii(fun))
return fiber_bundle
def cut(self, voi):
"""
Cut fiber into voi. The cutting process can create multiple fibers.
It checks every fiber_segment_aabb if it overlapps with the voi.
Parameters
----------
voi : [xmin, ymin, zmin],[xmax,ymax,zmax]
Volume of interest of which fibers to include. E.g. same as in
Simulation
Returns
-------
res : FiberBundle
cutted fiber_bundle
"""
fiber_bundle = FiberBundle()
for fiber in self:
fiber_bundle.extend(fiber.cut(voi))
return fiber_bundle
def cut_sphere(self, radius, center=(0, 0, 0)):
"""
Cut fiber into sphere. The cutting process can create multiple fibers.
It checks every fiber_segment_aabb if it overlapps with the sphere.
Parameters
----------
radius : float
radius of cutting sphere
center : 3d-array
center of cutting sphere
Returns
-------
res : FiberBundle
cutted fiber_bundle
"""
fiber_bundle = FiberBundle()
for fiber in self:
fiber_bundle.extend(fiber.cut_sphere(radius, center))
return fiber_bundle
"""
--------------------------------------------------------------------------------
----------------------------------FIBERBUNDLES----------------------------------
--------------------------------------------------------------------------------
"""
def _convert_to_fiber_bundles(data, layers, dtype):
""" converts data into FiberBundle"""
if data is None:
return []
if isinstance(data, Fiber):
return [FiberBundle(data, layers)]
if isinstance(data, FiberBundle):
return [data]
if isinstance(data, FiberBundles):
return data[:]
if not isinstance(data, (list, tuple)):
raise TypeError('data is not a list')
if layers is not None:
if len(data) != len(layers):
raise TypeError('[FiberBundle] and [Layers] differ in length')
else:
layers = [None] * len(data)
fiber_bundles = []
for fiber_bundle, lys in zip(data, layers):
fiber_bundles.append(FiberBundle(fiber_bundle, lys))
return fiber_bundles
class FiberBundles():
__is_frozen = False
def __setattr__(self, key, value):
if self.__is_frozen and not hasattr(self, key):
raise TypeError('%r is a frozen class' % self)
object.__setattr__(self, key, value)
def __freeze(self):
self.__is_frozen = True
def __init__(self, data=None, layers=None, dtype=None):
self._data = _convert_to_fiber_bundles(data, layers, dtype)
self.__freeze()
def __getitem__(self, item):
return self._data[item]
def __setitem__(self, item, value):
self._data[item] = FiberBundle(value)
def __delitem__(self, item):
del self._data[item]
def __str__(self):
return self._data.__str__()
def __repr__(self):
return self._data.__repr__()
def copy(self):
""" deep copy of class """
return copy.deepcopy(self)
def __iter__(self):
return iter(self._data)
def __next__(self):
return next(self._data)
def __len__(self):
return len(self._data)
@property
def dtype(self):
if len(self) > 1:
return self[0].dtype
else:
return None
def as_array(self):
"""
Returns copy data as list(list(np.array))
Returns
-------
res : [[(n,4)-array]]
fiber bundle as list(list(np.array))
"""
return [fb.as_array() for fb in self]
@property
def layers(self):
"""
Returns layer properties of fiber_bundles
Returns
-------
res : [Layers]
[Layers] class containing [Layer].
The element position corresponds to FiberBundle index
"""
return [fb.layers for fb in self]
@layers.setter
def layers(self, value):
if len(value) != len(self):
raise ValueError('Wrong number of [layers]')
for fb, lys in zip(self, value):
fb.layers = lys
def append(self, fiber_bundle):
""" Appends FiberBundle """
self._data.append(FiberBundle(fiber_bundle))
def extend(self, fiber_bundles):
""" Extends FiberBundle """
for fiber_bundle in fiber_bundles:
self._data.append(FiberBundle(fiber_bundle))
def cast(self, dtype):
"""
Cast objects into new type
Parameters
----------
dtype : type
Returns
-------
res : FiberBundles
fiber_bundles
"""
fiber_bundles = FiberBundles()
for fb in self:
fiber_bundles.append(fb.cast(dtype))
return fiber_bundles
def scale(self, scale, mode='all'):
"""
Rescales fiber_bundles
Parameters
----------
scale : float
scale factor
mode : str, optional
'all', 'points' or 'radii' will be scaled
Returns
-------
res : FiberBundles
scaled fiber_bundles
"""
fiber_bundles = FiberBundles()
for fb in self:
fiber_bundles.append(fb.scale(scale, mode))
return fiber_bundles
def rotate(self, rot, offset=None):
"""
Rotates fiber_bundles around offset
Parameters
----------
fiber_bundles : [[(,4)-array, ...]]
list of fibers
rot : (3,3)-array_like
rotation matrix
offset : 3d-array-array_like, optional
offset for rotation center
Returns
-------
res : FiberBundles
rotated fiber_bundles
"""
rot = np.array(rot, copy=False)
if offset is not None:
offset = np.array(offset, copy=False)
fiber_bundles = FiberBundles()
for fb in self:
fiber_bundles.append(fb.rotate(rot, offset))
return fiber_bundles
def translate(self, offset):
"""
Translates fiber_bundles
Parameters
----------
offset : 3d-array-array_like
offset to translate
Returns
-------
res : FiberBundles
translated fiber_bundles
"""
offset = np.array(offset, copy=False)
fiber_bundles = FiberBundles()
for fb in self:
fiber_bundles.append(fb.translate(offset))
return fiber_bundles
def apply(self, fun):
"""
Applies function to fibers
Parameters
----------
fun : function
Returns
-------
res : FiberBundles
fun(fiber_bundles)
"""
fiber_bundles = FiberBundles()
for fb in self:
fiber_bundles.append(fb.apply(fun))
return fiber_bundles
def apply_to_points(self, fun):
"""
Applies function to fibers positions
Parameters
----------
fun : function
Returns
-------
res : FiberBundles
fun(fiber_bundles[...].points)
"""
fiber_bundles = FiberBundles()
for fb in self:
fiber_bundles.append(fb.apply_to_points(fun))
return fiber_bundles
def apply_to_radii(self, fun):
"""
Applies function to fibers radii
Parameters
----------
fun : function
Returns
-------
res : FiberBundles
fun(fiber_bundles[...].radii)
"""
fiber_bundles = FiberBundles()
for fb in self:
fiber_bundles.append(fb.apply_to_radii(fun))
return fiber_bundles
def cut(self, voi):
"""
Cut fiber into voi. The cutting process can create multiple fibers.
It checks every fiber_segment_aabb if it overlapps with the voi.
Parameters
----------
voi : [xmin, ymin, zmin],[xmax,ymax,zmax]
Volume of interest of which fibers to include. E.g. same as in
Simulation
Returns
-------
res : FiberBundles
cutted fiber_bundles
"""
fiber_bundles = FiberBundles()
for fb in self:
fiber_bundles.append(fb.cut(voi))
return fiber_bundles
def cut_sphere(self, radius, center=(0, 0, 0)):
"""
Cut fiber into voi. The cutting process can create multiple fibers.
It checks every fiber_segment_aabb if it overlapps with the voi.
Parameters
----------
radius : float
radius of cutting sphere
center : 3d-array
center of cutting sphere
Returns
-------
res : FiberBundles
cutted fiber_bundles
"""
center = np.array(center, copy=False)
fiber_bundles = FiberBundles()
for fb in self:
fiber_bundles.append(fb.cut_sphere(radius, center))
return fiber_bundles
```
#### File: tests/analysis/orientation_test.py
```python
import unittest
import numpy as np
import fastpli.io
import fastpli.analysis
np.random.seed(42)
class MainTest(unittest.TestCase):
def test_remap_direction(self):
phi = np.linspace(-42 * np.pi, 42 * np.pi, 1000)
phi = fastpli.analysis.orientation.remap_direction(phi)
self.assertTrue(np.all(phi >= 0) or np.all(phi < np.pi))
def test_remap_orientation(self):
phi, theta = np.mgrid[-42 * np.pi:42 * np.pi:100j,
-42 * np.pi:42 * np.pi:100j]
phi = phi.ravel()
theta = theta.ravel()
phi, theta = fastpli.analysis.orientation.remap_orientation(phi, theta)
x = np.multiply(np.cos(phi), np.sin(theta))
y = np.multiply(np.sin(phi), np.sin(theta))
z = np.cos(theta)
x[z < 0] = -x[z < 0]
y[z < 0] = -y[z < 0]
z[z < 0] = -z[z < 0]
phi_ = np.arctan2(y, x)
theta_ = np.arccos(z)
phi_[phi_ < 0] += 2 * np.pi
self.assertTrue(np.allclose(phi, phi_))
self.assertTrue(np.allclose(theta, theta_))
def test_remap_spherical(self):
phi, theta = np.mgrid[-42 * np.pi:42 * np.pi:100j,
-42 * np.pi:42 * np.pi:100j]
phi = phi.ravel()
theta = theta.ravel()
phi, theta = fastpli.analysis.orientation.remap_spherical(phi, theta)
x = np.multiply(np.cos(phi), np.sin(theta))
y = np.multiply(np.sin(phi), np.sin(theta))
z = np.cos(theta)
phi_ = np.arctan2(y, x)
theta_ = np.arccos(z)
phi_[phi_ < 0] += 2 * np.pi
self.assertTrue(np.allclose(phi, phi_))
self.assertTrue(np.allclose(theta, theta_))
def test_fiber_bundles(self):
fastpli.analysis.orientation.fiber_bundles(
fastpli.io.fiber_bundles.load('tests/cube.dat'))
def test_histogram(self):
phi = np.random.normal(np.pi / 3, 0.5, 1000)
theta = np.random.normal(np.deg2rad(45), 0.5, 1000)
fastpli.analysis.orientation.histogram(phi,
theta,
n_phi=60,
n_theta=30,
weight_area=True)
fastpli.analysis.orientation.histogram(phi,
theta,
n_phi=60,
n_theta=30,
weight_area=False)
```
#### File: tests/objects/fibers_test.py
```python
import unittest
import numpy as np
import fastpli.objects
import fastpli.tools
class MainTest(unittest.TestCase):
def setUp(self):
self.fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [1, 1, 1, 2]])
self.fiber_bundle = fastpli.objects.FiberBundle(self.fiber.copy())
self.fiber_bundles = fastpli.objects.FiberBundles(self.fiber.copy())
def test_init(self):
fastpli.objects.FiberBundle()
fastpli.objects.FiberBundles()
a = np.array([0, 0, 0, 0])
_ = fastpli.objects.Fiber([[0, 0, 0, 1], [0, 0, 1, 2]])
f = fastpli.objects.Fiber(a)
self.assertTrue(isinstance(f, fastpli.objects.Fiber))
f = fastpli.objects.Fiber(f)
self.assertTrue(isinstance(f, fastpli.objects.Fiber))
fb = fastpli.objects.FiberBundle([a])
self.assertTrue(isinstance(fb, fastpli.objects.FiberBundle))
fb = fastpli.objects.FiberBundle(f)
self.assertTrue(isinstance(fb, fastpli.objects.FiberBundle))
fb = fastpli.objects.FiberBundle(fb)
self.assertTrue(isinstance(fb, fastpli.objects.FiberBundle))
fbs = fastpli.objects.FiberBundles([[a]])
self.assertTrue(isinstance(fbs, fastpli.objects.FiberBundles))
fbs = fastpli.objects.FiberBundles(f)
self.assertTrue(isinstance(fbs, fastpli.objects.FiberBundles))
fbs = fastpli.objects.FiberBundles([f, f])
self.assertTrue(isinstance(fbs, fastpli.objects.FiberBundles))
fbs = fastpli.objects.FiberBundles(fbs)
self.assertTrue(isinstance(fbs, fastpli.objects.FiberBundles))
fb = fastpli.objects.FiberBundle([[[0, 0, 0, 1], [1, 1, 1, 1],
[2, 2, 2, 1]],
[[1, 0, 0, 1], [1, 1, 1, 1],
[2, 2, 2, 1]]])
for f in fb:
self.assertTrue(isinstance(f, fastpli.objects.Fiber))
self.assertTrue(isinstance(f._data, np.ndarray))
fbs = fastpli.objects.FiberBundles([[[[0, 0, 0, 1], [1, 1, 1, 1],
[2, 2, 2, 1]],
[[1, 0, 0, 1], [1, 1, 1, 1],
[2, 2, 2, 1]]],
[[[0, 1, 2, 3], [1, 2, 3, 4],
[2, 4, 5, 5]],
[[1, 1, 2, 3], [1, 2, 3, 4],
[2, 4, 5, 5]],
[[1, 1, 2, 3], [1, 2, 3, 4],
[2, 4, 5, 5]]]])
for fb in fbs:
self.assertTrue(isinstance(fb, fastpli.objects.FiberBundle))
for f in fb:
self.assertTrue(isinstance(f, fastpli.objects.Fiber))
self.assertTrue(isinstance(f._data, np.ndarray))
def test_type(self):
self.assertTrue(isinstance(self.fiber[:], np.ndarray))
self.assertTrue(self.fiber[:].dtype == float)
self.assertTrue(
fastpli.objects.Fiber([[1, 1, 1, 1]], np.float32).dtype ==
np.float32)
def test_layers(self):
fastpli.objects.FiberBundle(self.fiber_bundle,
[(0.333, -0.004, 10, 'p'),
(0.666, 0, 5, 'b'), (1.0, 0.004, 1, 'r')])
fastpli.objects.FiberBundles(self.fiber_bundles,
[[(0.333, -0.004, 10, 'p'),
(0.666, 0, 5, 'b'),
(1.0, 0.004, 1, 'r')]])
fb = fastpli.objects.FiberBundle([[[0, 0, 0, 1], [1, 1, 1, 1],
[2, 2, 2, 1]],
[[1, 0, 0, 1], [1, 1, 1, 1],
[2, 2, 2, 1]]])
fb = fastpli.objects.FiberBundle(fb, [(0.333, -0.004, 10, 'p'),
(0.666, 0, 5, 'b'),
(1.0, 0.004, 1, 'r')])
fbs = [[[[0, 0, 0, 1], [1, 1, 1, 1], [2, 2, 2, 1]],
[[1, 0, 0, 1], [1, 1, 1, 1], [2, 2, 2, 1]]],
[[[0, 1, 2, 3], [1, 2, 3, 4], [2, 4, 5, 5]],
[[1, 1, 2, 3], [1, 2, 3, 4], [2, 4, 5, 5]],
[[1, 1, 2, 3], [1, 2, 3, 4], [2, 4, 5, 5]]]]
fbs = fastpli.objects.FiberBundles(fbs,
[[(0.333, -0.004, 10, 'p'),
(0.666, 0, 5, 'b'),
(1.0, 0.004, 1, 'r')]] * len(fbs))
def test_resize(self):
fiber = self.fiber.scale(10)
self.assertTrue(np.array_equal(fiber[:], self.fiber[:] * 10))
fb = self.fiber_bundle.scale(10)
for f in fb:
self.assertTrue(np.array_equal(f[:], self.fiber[:] * 10))
fbs = self.fiber_bundles.scale(10)
for fb in fbs:
for f in fb:
self.assertTrue(np.array_equal(f[:], self.fiber[:] * 10))
fiber = self.fiber.scale(10, mode='points')
self.assertTrue(np.array_equal(fiber[:, :-2], self.fiber[:, :-2] * 10))
self.assertTrue(np.array_equal(fiber[:, -1], self.fiber[:, -1]))
fiber = self.fiber.scale(10, mode='radii')
self.assertTrue(np.array_equal(fiber[:, :-2], self.fiber[:, :-2]))
self.assertTrue(np.array_equal(fiber[:, -1], self.fiber[:, -1] * 10))
def test_rotation(self):
fiber = self.fiber.rotate(fastpli.tools.rotation.x(0))
self.assertTrue(np.array_equal(self.fiber[:], fiber[:]))
fiber = self.fiber.rotate(fastpli.tools.rotation.x(np.deg2rad(90)))
self.assertTrue(
np.allclose(fiber[:], np.array([[0, 0, 0, 1], [1, -1, 1, 2]])))
fiber = self.fiber.rotate(fastpli.tools.rotation.x(np.deg2rad(90)),
[1, 1, 1])
self.assertTrue(
np.allclose(fiber[:], np.array([[0, 2, 0, 1], [1, 1, 1, 2]])))
fiber_bundle = self.fiber_bundle.rotate(
fastpli.tools.rotation.x(np.deg2rad(90)), [1, 1, 1])
self.assertTrue(len(fiber_bundle) == len(self.fiber_bundle))
for f in fiber_bundle:
self.assertTrue(
np.allclose(f[:], np.array([[0, 2, 0, 1], [1, 1, 1, 2]])))
for fb in self.fiber_bundles:
for f in fb:
fiber = f.rotate(fastpli.tools.rotation.x(np.deg2rad(90)),
[1, 1, 1])
self.assertTrue(
np.allclose(fiber[:], np.array([[0, 2, 0, 1], [1, 1, 1,
2]])))
def test_translate(self):
fiber = self.fiber.translate([1, 1, 1])
self.assertTrue(
np.array_equal(fiber[:, :3],
self.fiber[:, :3] + np.array([1, 1, 1])))
self.assertTrue(np.array_equal(fiber[:, -1], self.fiber[:, -1]))
fiber_bundle = self.fiber_bundle.translate([1, 1, 1])
self.assertTrue(len(fiber_bundle) == len(self.fiber_bundle))
for f in fiber_bundle:
self.assertTrue(
np.array_equal(fiber[:, :3],
self.fiber[:, :3] + np.array([1, 1, 1])))
self.assertTrue(np.array_equal(f[:, -1], self.fiber[:, -1]))
for fb in self.fiber_bundles:
for f in fb:
fiber = f.translate([1, 1, 1])
self.assertTrue(
np.array_equal(fiber[:, :3],
self.fiber[:, :3] + np.array([1, 1, 1])))
self.assertTrue(np.array_equal(f[:, -1], self.fiber[:, -1]))
def test_apply(self):
# Fiber
fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [1, 1, 1, 2]], dtype=float)
fiber_ = fiber.apply(lambda x: x + 1)
self.assertTrue(isinstance(fiber_, fastpli.objects.Fiber))
self.assertTrue(np.array_equal(fiber[:] + 1, fiber_[:]))
fiber_ = fiber.apply_to_points(lambda x: x + 1)
self.assertTrue(isinstance(fiber_, fastpli.objects.Fiber))
self.assertTrue(np.array_equal(fiber[:, :-1] + 1, fiber_[:, :-1]))
self.assertTrue(np.array_equal(fiber[:, -1], fiber_[:, -1]))
fiber_ = fiber.apply_to_radii(lambda x: x + 1)
self.assertTrue(isinstance(fiber_, fastpli.objects.Fiber))
self.assertTrue(np.array_equal(fiber[:, :-1], fiber_[:, :-1]))
self.assertTrue(np.array_equal(fiber[:, -1] + 1, fiber_[:, -1]))
# FiberBundle
fb = fastpli.objects.FiberBundle([[0, 0, 0, 1], [1, 1, 1, 2]],
dtype=float)
fb_ = fb.apply(lambda x: x + 1)
self.assertTrue(isinstance(fb_, fastpli.objects.FiberBundle))
self.assertTrue(np.array_equal(fb[0][:] + 1, fb_[0][:]))
fb_ = fb.apply_to_points(lambda x: x + 1)
self.assertTrue(isinstance(fb_, fastpli.objects.FiberBundle))
self.assertTrue(np.array_equal(fb[0][:, :-1] + 1, fb_[0][:, :-1]))
self.assertTrue(np.array_equal(fb[0][:, -1], fb_[0][:, -1]))
fb_ = fb.apply_to_radii(lambda x: x + 1)
self.assertTrue(isinstance(fb_, fastpli.objects.FiberBundle))
self.assertTrue(np.array_equal(fb[0][:, :-1], fb_[0][:, :-1]))
self.assertTrue(np.array_equal(fb[0][:, -1] + 1, fb_[0][:, -1]))
# FiberBundles
fbs = fastpli.objects.FiberBundles([[[0, 0, 0, 1], [1, 1, 1, 2]]],
dtype=float)
fbs_ = fbs.apply(lambda x: x + 1)
self.assertTrue(isinstance(fbs_, fastpli.objects.FiberBundles))
self.assertTrue(np.array_equal(fbs[0][0][:] + 1, fbs_[0][0][:]))
fbs_ = fbs.apply_to_points(lambda x: x + 1)
self.assertTrue(isinstance(fbs_, fastpli.objects.FiberBundles))
self.assertTrue(
np.array_equal(fbs[0][0][::, :-1] + 1, fbs_[0][0][:, :-1]))
self.assertTrue(np.array_equal(fbs[0][0][:, -1], fbs_[0][0][:, -1]))
fbs_ = fbs.apply_to_radii(lambda x: x + 1)
self.assertTrue(isinstance(fbs_, fastpli.objects.FiberBundles))
self.assertTrue(np.array_equal(fbs[0][0][:, :-1], fbs_[0][0][:, :-1]))
self.assertTrue(np.array_equal(fbs[0][0][:, -1] + 1, fbs_[0][0][:, -1]))
def test_cut(self):
fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [1, 1, 1, 2]], dtype=float)
fibers = fiber.cut([[-10] * 3, [10] * 3])
self.assertTrue(len(fibers) == 1)
self.assertTrue(np.array_equal(fibers[0][:], fiber[:]))
fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [10, 10, 10, 2]])
fibers = fiber.cut([[-5] * 3, [5] * 3])
self.assertTrue(len(fibers) == 1)
self.assertTrue(np.array_equal(fibers[0][:], fiber[:]))
fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [10, 10, 10, 2],
[100, 100, 100, 2]])
fibers = fiber.cut([[-5] * 3, [5] * 3])
self.assertTrue(len(fibers) == 1)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0][:], fiber[:]))
fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [10, 10, 10, 2],
[100, 100, 100, 2], [10, 10, 10, 2],
[0, 0, 0, 1]])
fibers = fiber.cut([[-5] * 3, [5] * 3])
self.assertTrue(len(fibers) == 2)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(fibers[1].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0][:], fiber[:]))
self.assertTrue(not np.array_equal(fibers[1][:], fiber[:]))
fiber_bundle = fastpli.objects.FiberBundle(fiber)
cut_fb = fiber_bundle.cut([[-5] * 3, [5] * 3])
fibers = cut_fb
self.assertTrue(len(fibers) == 2)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(fibers[1].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0][:], fiber[:]))
self.assertTrue(not np.array_equal(fibers[1][:], fiber[:]))
fiber_bundles = fastpli.objects.FiberBundles(fiber)
cut_fbs = fiber_bundles.cut([[-5] * 3, [5] * 3])
fibers = cut_fbs[0]
self.assertTrue(len(cut_fbs) == 1)
self.assertTrue(len(fibers) == 2)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(fibers[1].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0][:], fiber[:]))
self.assertTrue(not np.array_equal(fibers[1][:], fiber[:]))
fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [10, 10, 10, 2]])
fibers = fiber.cut([[5] * 3, [6] * 3])
self.assertTrue(np.array_equal(fibers[0][:], fiber[:]))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "3d-pli/QtSLIX",
"score": 2
} |
#### File: QtSLIX/ThreadWorkers/ParameterGenerator.py
```python
import numpy
import os
from PyQt5.QtCore import QThread, QObject, pyqtSignal
import SLIX
if SLIX.toolbox.gpu_available:
import cupy
__all__ = ['ParameterGeneratorWorker']
class ParameterGeneratorWorker(QObject):
"""
Worker class for the parameter generator.
This class gets called from the ParameterGeneratorWidget when the user clicks the "Generate" button.
"""
# Signal to inform the ParameterGeneratorWidget that the worker has finished
finishedWork = pyqtSignal()
# Signal to inform the ParameterGeneratorWidget what step the worker is currently working on
currentStep = pyqtSignal(str)
# Error message
errorMessage = pyqtSignal(str)
def __init__(self, filename: str, image: numpy.array,
output_folder: str, filtering: str,
filtering_parm_1: float, filtering_parm_2: float,
use_gpu: bool, detailed: bool, min: bool, max: bool,
avg: bool, direction: bool, nc_direction: bool,
peaks: bool, peak_width: bool, peak_distance: bool,
peak_prominence: bool, dir_correction: float):
"""
Initialize the worker.
Args:
filename: Filename of the measurement image
image: NumPy array of the measurement image
output_folder: Folder to save the generated images
filtering: Filtering method to use
filtering_parm_1: Parameter 1 of the filtering method
filtering_parm_2: Parameter 2 of the filtering method
use_gpu: Use GPU for calculations
detailed: Use detailed mode
min: Generate minima image
max: Generate maxima image
avg: Generate average image
direction: Generate direction image
nc_direction: Generate non crossing direction image
peaks: Generate peaks image
peak_width: Generate peak width image
peak_distance: Generate peak distance image
peak_prominence: Generate peak prominence image
dir_correction: Direction correction in degree
"""
super().__init__()
self.filename = filename
self.image = image
self.output_folder = output_folder
self.gpu = use_gpu
self.detailed = detailed
self.min = min
self.max = max
self.avg = avg
self.direction = direction
self.nc_direction = nc_direction
self.peaks = peaks
self.peak_width = peak_width
self.peak_distance = peak_distance
self.peak_prominence = peak_prominence
self.filtering = filtering
self.filtering_parameter_1 = filtering_parm_1
self.filtering_parameter_2 = filtering_parm_2
self.dir_correction = dir_correction
self.output_path_name = ""
self.output_data_type = ".tiff"
def get_output_path_name(self) -> str:
# Get the filename without the extension to determine the output file names
if os.path.isdir(self.filename):
filename_without_extension = SLIX._cmd.ParameterGenerator.get_file_pattern(self.filename)
else:
filename_without_extension = \
os.path.splitext(os.path.basename(self.filename))[0]
output_path_name = f'{self.output_folder}/{filename_without_extension}'
# Create the output folder if it does not exist
return output_path_name
def apply_filtering(self) -> None:
# If the thread is stopped, return
if QThread.currentThread().isInterruptionRequested():
self.finishedWork.emit()
return
# Apply filtering
if self.filtering != "None":
self.currentStep.emit(f"Filtering: {self.filtering} "
f"{self.filtering_parameter_1} "
f"{self.filtering_parameter_2}")
if self.filtering == "Fourier":
self.image = SLIX.preparation.low_pass_fourier_smoothing(self.image,
self.filtering_parameter_1,
self.filtering_parameter_2)
elif self.filtering == "Savitzky-Golay":
self.image = SLIX.preparation.savitzky_golay_smoothing(self.image,
self.filtering_parameter_1,
self.filtering_parameter_2)
def generate_minima(self) -> None:
if QThread.currentThread().isInterruptionRequested():
self.finishedWork.emit()
return
# Generate minima image
if self.min:
self.currentStep.emit("Generating minima...")
min_img = numpy.min(self.image, axis=-1)
SLIX.io.imwrite(f'{self.output_path_name}_min'
f'{self.output_data_type}', min_img)
def generate_maxima(self) -> None:
if QThread.currentThread().isInterruptionRequested():
self.finishedWork.emit()
return
# Generate maxima image
if self.max:
self.currentStep.emit("Generating maxima...")
max_img = numpy.max(self.image, axis=-1)
SLIX.io.imwrite(f'{self.output_path_name}_max'
f'{self.output_data_type}', max_img)
def generate_average(self) -> None:
if QThread.currentThread().isInterruptionRequested():
self.finishedWork.emit()
return
# Generate average image
if self.avg:
self.currentStep.emit("Generating average...")
avg_img = numpy.mean(self.image, axis=-1)
SLIX.io.imwrite(f'{self.output_path_name}_avg'
f'{self.output_data_type}', avg_img)
def generate_peaks(self, peaks: numpy.ndarray, detailed: bool, gpu: bool) -> None:
if QThread.currentThread().isInterruptionRequested():
self.finishedWork.emit()
return
# Generate all peaks to write low and high prominence peaks
if self.peaks:
self.currentStep.emit("Generating all peaks...")
all_peaks = SLIX.toolbox.peaks(self.image, use_gpu=gpu, return_numpy=True)
if not detailed:
SLIX.io.imwrite(f'{self.output_path_name}_high_prominence_peaks'
f'{self.output_data_type}',
numpy.sum(peaks, axis=-1,
dtype=numpy.uint16))
SLIX.io.imwrite(f'{self.output_path_name}_low_prominence_peaks'
f'{self.output_data_type}',
numpy.sum(all_peaks, axis=-1, dtype=numpy.uint16) -
numpy.sum(peaks, axis=-1,
dtype=numpy.uint16))
else:
SLIX.io.imwrite(f'{self.output_path_name}_all_peaks_detailed'
f'{self.output_data_type}', all_peaks)
SLIX.io.imwrite(
f'{self.output_path_name}_high_prominence_peaks_detailed'
f'{self.output_data_type}',
peaks
)
def generate_direction(self, peaks: numpy.ndarray, centroids: numpy.ndarray, gpu: bool) -> None:
if QThread.currentThread().isInterruptionRequested():
self.finishedWork.emit()
return
# Generate the direction images
if self.direction:
self.currentStep.emit("Generating direction...")
direction = SLIX.toolbox.direction(peaks, centroids, use_gpu=gpu, number_of_directions=3,
correction_angle=self.dir_correction, return_numpy=True)
for dim in range(direction.shape[-1]):
SLIX.io.imwrite(f'{self.output_path_name}_dir_{dim + 1}'
f'{self.output_data_type}',
direction[:, :, dim])
del direction
def generate_non_crossing_direction(self, peaks: numpy.ndarray, centroids: numpy.ndarray, gpu: bool) -> None:
if QThread.currentThread().isInterruptionRequested():
self.finishedWork.emit()
return
# Generate the non-crossing direction images
if self.nc_direction:
self.currentStep.emit("Generating non crossing direction...")
nc_direction = SLIX.toolbox.direction(peaks, centroids, use_gpu=gpu,
number_of_directions=1, return_numpy=True)
SLIX.io.imwrite(f'{self.output_path_name}_dir'
f'{self.output_data_type}',
nc_direction[:, :])
del nc_direction
def generate_peak_distance(self, peaks: numpy.ndarray, centroids: numpy.ndarray, detailed: bool, gpu: bool) -> None:
detailed_str = "_detailed" if detailed else ""
if QThread.currentThread().isInterruptionRequested():
self.finishedWork.emit()
return
# Generate the peak distance
if self.peak_distance:
self.currentStep.emit("Generating peak distance...")
if detailed:
peak_distance = SLIX.toolbox.peak_distance(peaks, centroids, use_gpu=gpu, return_numpy=True)
else:
peak_distance = SLIX.toolbox.mean_peak_distance(peaks, centroids, use_gpu=gpu, return_numpy=True)
SLIX.io.imwrite(f'{self.output_path_name}_peakdistance{detailed_str}'
f'{self.output_data_type}', peak_distance)
del peak_distance
def generate_peak_width(self, peaks: numpy.ndarray, detailed: bool, gpu: bool) -> None:
detailed_str = "_detailed" if detailed else ""
if QThread.currentThread().isInterruptionRequested():
self.finishedWork.emit()
return
# Generate the peak width
if self.peak_width:
self.currentStep.emit("Generating peak width...")
if detailed:
peak_width = SLIX.toolbox.peak_width(self.image, peaks, use_gpu=gpu, return_numpy=True)
else:
peak_width = SLIX.toolbox.mean_peak_width(self.image, peaks, use_gpu=gpu)
SLIX.io.imwrite(f'{self.output_path_name}_peakwidth{detailed_str}'
f'{self.output_data_type}', peak_width)
del peak_width
def generate_peak_prominence(self, peaks: numpy.ndarray, detailed: bool, gpu: bool) -> None:
detailed_str = "_detailed" if detailed else ""
if QThread.currentThread().isInterruptionRequested():
self.finishedWork.emit()
return
# Generate the peak prominence
if self.peak_prominence:
self.currentStep.emit("Generating peak prominence...")
if detailed:
prominence = SLIX.toolbox.peak_prominence(self.image, peaks, use_gpu=gpu, return_numpy=True)
else:
prominence = SLIX.toolbox.mean_peak_prominence(self.image, peaks, use_gpu=gpu, return_numpy=True)
SLIX.io.imwrite(f'{self.output_path_name}_peakprominence{detailed_str}'
f'{self.output_data_type}', prominence)
del prominence
def process(self) -> None:
"""
Process the image. This method is called from the ParameterGeneratorWidget.
Returns:
None
"""
self.output_path_name = self.get_output_path_name()
if os.path.isdir(self.filename):
SLIX.io.imwrite(f'{self.output_path_name}_Stack{self.output_data_type}', self.image)
gpu = self.gpu
detailed = self.detailed
try:
self.apply_filtering()
self.generate_minima()
self.generate_maxima()
self.generate_average()
if QThread.currentThread().isInterruptionRequested():
self.finishedWork.emit()
return
# The following steps require the significant peaks of the measurement ...
self.currentStep.emit("Generating significant peaks...")
peaks = SLIX.toolbox.significant_peaks(self.image, use_gpu=gpu, return_numpy=True)
if QThread.currentThread().isInterruptionRequested():
self.finishedWork.emit()
return
# ... as well as the centroids
self.currentStep.emit("Generating centroids...")
centroids = SLIX.toolbox.centroid_correction(self.image, peaks, use_gpu=gpu, return_numpy=True)
self.generate_peaks(peaks, detailed, gpu)
self.generate_direction(peaks, centroids, gpu)
self.generate_non_crossing_direction(peaks, centroids, gpu)
self.generate_peak_distance(peaks, centroids, detailed, gpu)
self.generate_peak_width(peaks, detailed, gpu)
self.generate_peak_prominence(peaks, detailed, gpu)
except cupy.cuda.memory.OutOfMemoryError as e:
self.errorMessage.emit("cupy.cuda.memory.OutOfMemoryError: Ran out of memory during computation. "
"Please disable the GPU option.")
if self.gpu:
mempool = cupy.get_default_memory_pool()
mempool.free_all_blocks()
# Tell connected components that we are done
self.finishedWork.emit()
``` |
{
"source": "3d-pli/SLIX",
"score": 3
} |
#### File: SLIX/CPU/toolbox.py
```python
import numpy
import SLIX
from SLIX.CPU._toolbox import _direction, _prominence, _peakwidth, \
_peakdistance, _centroid, _centroid_correction_bases, _peaks
__all__ = ['TARGET_PROMINENCE', 'peaks',
'peak_width', 'peak_prominence',
'peak_distance', 'mean_peak_distance',
'background_mask', 'mean_peak_width',
'direction', 'num_peaks', 'mean_peak_prominence',
'unit_vectors', 'centroid_correction', 'normalize']
TARGET_PROMINENCE = 0.08
def background_mask(image):
"""
Creates a background mask by setting all image pixels with low scattering
signals to zero. As all background pixels are near zero for all images in
the SLI image stack, this method should remove most of the background
allowing for better approximations using the available features.
It is advised to use this function.
Args:
image: Complete SLI measurement image stack as a 2D/3D Numpy array
threshold: Threshhold for mask creation (default: 10)
Returns:
numpy.array: 1D/2D-image which masks the background as True and
foreground as False
"""
avg_image = numpy.average(image, axis=-1)
# Set histogram to a range of 0 to 1 ignoring any outliers.
hist_avg_image = avg_image / numpy.percentile(avg_image, 99)
# Generate histogram in range of 0 to 1 to ignore outliers again. We search for values at the beginning anyway.
avg_hist, avg_bins = numpy.histogram(hist_avg_image, bins=256, range=(0, 1))
# Use SLIX to search for significant peaks in the histogram
avg_hist = avg_hist[numpy.newaxis, numpy.newaxis, ...]
peaks = SLIX.toolbox.significant_peaks(image=avg_hist).flatten()
# Reverse the histogram to search for minimal values with SLIX (again)
avg_hist = -avg_hist
reversed_peaks = SLIX.toolbox.significant_peaks(image=avg_hist).flatten()
# We can now calculate the index of our background threshold using the reversed_peaks
index = numpy.argmax(peaks) + numpy.argmax(reversed_peaks[numpy.argmax(peaks):])
# Reverse from 0 to 1 to original image scale and calculate the threshold position
threshold = avg_bins[index] * numpy.percentile(avg_image, 99)
# Return a mask with the calculated background image
return avg_image < threshold
def peaks(image):
"""
Detect all peaks from a full SLI measurement. Peaks will not be filtered
in any way. To detect only significant peaks, filter the peaks by using
the prominence as a threshold.
Args:
image: Complete SLI measurement image stack as a 2D/3D Numpy array
Returns:
2D/3D boolean image containing masking the peaks with `True`
"""
image = numpy.array(image, dtype=numpy.float32)
reshape = False
if len(image.shape) == 3:
reshape = True
[image_x, image_y, image_z] = image.shape
image = image.reshape(image_x * image_y, image_z)
resulting_image = _peaks(image)
if reshape:
image = image.reshape(image_x, image_y, image_z)
resulting_image = resulting_image.reshape(image_x, image_y, image_z)
return resulting_image.astype('bool')
def num_peaks(image=None, peak_image=None):
"""
Calculate the number of peaks from each line profile in an SLI image series
by detecting all peaks and applying thresholds to remove unwanted peaks.
Args:
image: Full SLI measurement (series of images) which is prepared for the
pipeline using the SLIX toolbox methods.
peak_image: Boolean NumPy array specifying the peak positions in the full SLI stack
Returns:
Array where each entry corresponds to the number of detected peaks within
the first dimension of the SLI image series.
"""
if peak_image is None and image is not None:
peak_image = peaks(image)
elif peak_image is not None:
peak_image = numpy.array(peak_image)
else:
raise ValueError('Either image or peak_image has to be defined.')
return numpy.count_nonzero(peak_image, axis=-1).astype(numpy.uint16)
def normalize(image, kind_of_normalization=0):
"""
Normalize given line profile by using a normalization technique based on
the kind_of_normalization parameter.
0 : Scale line profile to be between 0 and 1
1 : Divide line profile through its mean value
Arguments:
image: Full SLI measurement (series of images) which is
prepared for the pipeline using the SLIX toolbox methods.
kind_of_normalization: Normalization technique which will be used for
the calculation
Returns:
numpy.array -- Image where each pixel is normalized by the last axis
of the image
"""
image = numpy.array(image, dtype=numpy.float32)
if kind_of_normalization == 0:
image = (image - image.min(axis=-1)[..., None]) \
/ numpy.maximum(1e-15, image.max(axis=-1)[..., None] -
image.min(axis=-1)[..., None])
else:
image = image / \
numpy.maximum(1e-15, numpy.mean(image, axis=-1)[..., None])
return image
def peak_prominence(image, peak_image=None, kind_of_normalization=0):
"""
Calculate the peak prominence of all given peak positions within a line
profile. The line profile will be normalized by dividing the line profile
through its mean value. Therefore, values above 1 are possible.
Args:
image: Original line profile used to detect all peaks. This array will be
further analyzed to better determine the peak positions.
peak_image: Boolean NumPy array specifying the peak positions in the full
SLI stack
kind_of_normalization: Normalize given line profile by using a
normalization technique based on the kind_of_normalization parameter.
0 : Scale line profile to be between 0 and 1
1 : Divide line profile through its mean value
Returns:
Floating point value containing the mean peak prominence of the line
profile in degrees. the mean peak prominence of the line
profile in degrees.
"""
image = numpy.array(image, dtype=numpy.float32)
if peak_image is None:
peak_image = peaks(image).astype('uint8')
else:
peak_image = numpy.array(peak_image).astype('uint8')
image = normalize(image, kind_of_normalization)
[image_x, image_y, image_z] = image.shape
image = image.reshape(image_x * image_y, image_z)
peak_image = peak_image.reshape(image_x * image_y, image_z).astype('uint8')
result_img = _prominence(image, peak_image)
result_img = result_img.reshape((image_x, image_y, image_z))
return result_img
def mean_peak_prominence(image, peak_image=None, kind_of_normalization=0):
"""
Calculate the mean peak prominence of all given peak positions within a
line profile. The line profile will be normalized by dividing the line
profile through its mean value. Therefore, values above 1 are possible.
Args:
image: Original line profile used to detect all peaks. This array will be
further analyzed to better determine the peak positions.
peak_image: Boolean NumPy array specifying the peak positions in the full
SLI stack
kind_of_normalization: Normalize given line profile by using a
normalization technique based on the kind_of_normalization parameter.
0 : Scale line profile to be between 0 and 1
1 : Divide line profile through its mean value
Returns:
Floating point value containing the mean peak prominence of the line
profile in degrees.
"""
if peak_image is not None:
peak_image = numpy.array(peak_image).astype('uint8')
else:
peak_image = peaks(image).astype('uint8')
result_img = peak_prominence(image, peak_image, kind_of_normalization)
result_img = numpy.sum(result_img, axis=-1) / \
numpy.maximum(1, numpy.count_nonzero(peak_image, axis=-1))
return result_img.astype('float32')
def peak_width(image, peak_image=None, target_height=0.5):
"""
Calculate the peak width of all given peak positions within a line profile.
Args:
image: Original line profile used to detect all peaks. This array will be
further analyzed to better determine the peak positions.
peak_image: Boolean NumPy array specifying the peak positions in the full
SLI stack
target_height: Relative peak height in relation to the prominence of the
given peak.
Returns:
NumPy array where each entry corresponds to the peak width of the line
profile. The values are in degree.
"""
image = numpy.array(image, dtype='float32')
if peak_image is not None:
peak_image = numpy.array(peak_image).astype('uint8')
else:
peak_image = peaks(image).astype('uint8')
[image_x, image_y, image_z] = image.shape
image = image.reshape(image_x * image_y, image_z)
peak_image = peak_image.reshape(image_x * image_y, image_z).astype('uint8')
prominence = _prominence(image, peak_image)
result_image = _peakwidth(image, peak_image, prominence, target_height)
result_image = result_image.reshape((image_x, image_y, image_z))
result_image = result_image * 360.0 / image_z
return result_image
def mean_peak_width(image, peak_image=None, target_height=0.5):
"""
Calculate the mean peak width of all given peak positions within a line
profile.
Args:
image: Original line profile used to detect all peaks. This array will be
further analyzed to better determine the peak positions.
peak_image: Boolean NumPy array specifying the peak positions in the full
SLI stack
target_height: Relative peak height in relation to the prominence of the
given peak.
Returns:
NumPy array where each entry corresponds to the mean peak width of the
line profile. The values are in degree.
"""
if peak_image is not None:
peak_image = numpy.array(peak_image).astype('uint8')
else:
peak_image = peaks(image).astype('uint8')
result_img = peak_width(image, peak_image, target_height)
result_img = numpy.sum(result_img, axis=-1) / \
numpy.maximum(1, numpy.count_nonzero(peak_image, axis=-1))
return result_img
def peak_distance(peak_image, centroids):
"""
Calculate the mean peak distance in degrees between two corresponding peaks
for each line profile in an SLI image series.
Args:
peak_image: Boolean NumPy array specifying the peak positions in the full
SLI stack
centroids: Use centroid calculation to better determine the peak position
regardless of the number of
measurements / illumination angles used.
Returns:
NumPy array of floating point values containing the peak distance of the
line profiles in degrees in their respective peak position. The first peak
of each peak pair will show the distance between peak_1 and peak_2 while
the second peak will show 360 - (peak_2 - peak_1).
"""
peak_image = numpy.array(peak_image).astype('uint8')
[image_x, image_y, image_z] = peak_image.shape
peak_image = peak_image.reshape(image_x * image_y, image_z).astype('uint8')
number_of_peaks = numpy.count_nonzero(peak_image, axis=-1).astype('uint8')
centroids = centroids.reshape(image_x * image_y, image_z).astype('float32')
result_img = _peakdistance(peak_image, centroids, number_of_peaks)
result_img = result_img.reshape((image_x, image_y, image_z))
return result_img
def mean_peak_distance(peak_image, centroids):
"""
Calculate the mean peak distance in degrees between two corresponding peaks
for each line profile in an SLI image series.
Args:
peak_image: Boolean NumPy array specifying the peak positions in the full
SLI stack
centroids: Use centroid calculation to better determine the peak position
regardless of the number of
measurements / illumination angles used.
Returns:
NumPy array of floating point values containing the mean peak distance of
the line profiles in degrees.
"""
result_image = peak_distance(peak_image, centroids)
result_image[result_image > 180] = 0
result_image = numpy.sum(result_image, axis=-1) / \
numpy.maximum(1, numpy.count_nonzero(result_image, axis=-1))
return result_image
def direction(peak_image, centroids, correction_angle=0,
number_of_directions=3):
"""
Calculate up to `number_of_directions` direction angles based on the given
peak positions. If more than `number_of_directions*2` peaks are present, no
direction angle will be calculated to avoid errors. This will result in a
direction angle of BACKGROUND_COLOR. The peak positions are determined by
the position of the corresponding peak pairs (i.e. 6 peaks: 1+4, 2+5, 3+6).
If two peaks are too far away or too near (outside of 180°±35°), the
direction angle will be considered as invalid, resulting in a direction
angle of BACKGROUND_COLOR.
Args:
correction_angle: Correct the resulting direction angle by the value.
This is useful when the stack or camera was rotated.
peak_image: Boolean NumPy array specifying the peak positions in the full
SLI stack
centroids: Centroids resulting from `centroid_correction` for more accurate
results
number_of_directions: Number of directions which shall be generated.
Returns:
NumPy array with the shape (x, y, `number_of_directions`) containing up to
`number_of_directions` direction angles. x equals the number of pixels of
the SLI image series. If a direction angle is invalid or missing, the
array entry will be BACKGROUND_COLOR instead.
"""
peak_image = numpy.array(peak_image).astype('uint8')
[image_x, image_y, image_z] = peak_image.shape
peak_image = peak_image.reshape(image_x * image_y, image_z).astype('uint8')
centroids = centroids.reshape(image_x * image_y, image_z).astype('float32')
number_of_peaks = numpy.count_nonzero(peak_image, axis=-1).astype('uint8')
result_img = _direction(peak_image, centroids, number_of_peaks,
number_of_directions, correction_angle)
result_img = result_img.reshape((image_x, image_y, number_of_directions))
return result_img
def centroid_correction(image, peak_image, low_prominence=TARGET_PROMINENCE,
high_prominence=None):
"""
Correct peak positions from a line profile by looking at only the peak
with a given threshold using a centroid calculation. If a minimum is found
in the considered interval, this minimum will be used as the limit instead.
The range for the peak correction is limited by
MAX_DISTANCE_FOR_CENTROID_ESTIMATION.
Args:
image: Original line profile used to detect all peaks. This array will be
further analyzed to better determine the peak positions.
peak_image: Boolean NumPy array specifying the peak positions in the full
SLI stack
low_prominence: Lower prominence bound for detecting a peak.
high_prominence: Higher prominence bound for detecting a peak.
Returns:
NumPy array with the positions of all detected peak positions corrected
with the centroid calculation.
"""
if peak_image is None:
peak_image = peaks(image).astype('uint8')
if low_prominence is None:
low_prominence = -numpy.inf
if high_prominence is None:
high_prominence = -numpy.inf
[image_x, image_y, image_z] = image.shape
image = normalize(image)
image = image.reshape(image_x * image_y, image_z).astype('float32')
peak_image = peak_image.reshape(image_x * image_y, image_z).astype('uint8')
reverse_image = -1 * image
reverse_peaks = peaks(reverse_image.reshape((image_x, image_y, image_z))) \
.astype('uint8') \
.reshape(image_x * image_y, image_z)
reverse_prominence = _prominence(reverse_image, reverse_peaks)
reverse_peaks[reverse_prominence < low_prominence] = False
reverse_peaks[reverse_prominence > high_prominence] = False
left_bases, right_bases = _centroid_correction_bases(image, peak_image,
reverse_peaks)
# Centroid calculation based on left_bases and right_bases
centroid_peaks = _centroid(image, peak_image, left_bases, right_bases)
centroid_peaks = centroid_peaks.reshape((image_x, image_y, image_z))
return centroid_peaks
def unit_vectors(direction):
"""
Calculate the unit vectors (UnitX, UnitY) from a given direction angle.
Args:
direction: 3D NumPy array - direction angles in degrees
Returns:
UnitX, UnitY: 3D NumPy array, 3D NumPy array
x- and y-vector component in arrays
"""
directions_rad = numpy.deg2rad(direction)
UnitX = -numpy.sin(0.5 * numpy.pi) * numpy.cos(directions_rad)
UnitY = numpy.sin(0.5 * numpy.pi) * numpy.sin(directions_rad)
UnitX[numpy.isclose(direction, -1)] = 0
UnitY[numpy.isclose(direction, -1)] = 0
return UnitX, UnitY
```
#### File: SLIX/SLIX/io.py
```python
from PIL import Image
import numpy
import tifffile
import nibabel
import h5py
import sys
import re
import os
import glob
import datetime
import SLIX
from .attributemanager import AttributeHandler
__all__ = ['H5FileReader', 'H5FileWriter', 'imread', 'imwrite', 'imwrite_rgb']
nibabel.openers.Opener.default_compresslevel = 9
_fileregex = r'.*_+p[0-9]+_?.*\.(tif{1,2}|jpe*g|nii|h5|png)'
class H5FileReader:
"""
This class allows to read HDF5 files from your file system.
It supports reading datasets but not reading attributes.
"""
def __init__(self):
self.path = None
self.file = None
self.content = None
def open(self, path):
"""
Args:
path: Path on the filesystem to the HDF5 file which will be read
Returns:
None
"""
if not path == self.path:
self.close()
self.path = path
self.file = h5py.File(path, 'r')
def close(self):
"""
Close the currently opened file, if any is open.
Returns:
None
"""
if self.file is not None:
self.file.close()
self.file = None
self.path = None
self.content = None
def read(self, dataset):
"""
Read a dataset from the currently opened file, if any is open.
The content of the dataset will be stored for future use.
Args:
dataset: Path to the dataset within the HDF5
Returns:
The opened dataset.
"""
if self.content is None:
self.content = {}
if dataset not in self.content.keys():
self.content[dataset] = numpy.squeeze(self.file[dataset][:])
return self.content[dataset]
class H5FileWriter:
"""
This class allows to write HDF5 files to your file system.
It supports writing datasets and writing attributes.
"""
def __init__(self):
self.path = None
self.file = None
def add_symlink(self, dataset, symlink_path):
"""
Adds a symbolic link from one dataset to another path.
Args:
dataset: Dataset path within the HDF5
symlink_path: Path to the symlink.
Returns:
None
"""
if self.file is None:
return
self.file[symlink_path] = self.file[dataset]
self.file.flush()
def add_plim_attributes(self, stack_path, dataset='/Image'):
"""
PLIM is a package used in the 3D-PLI group to read and write multiple
attributes from/to a HDF5 file. The basic functionality is added in
attributemanager.py. Calling this method will write many attributes to
the HDF5 file at the given dataset.
This includes: A unique ID, the creator, software parameters,
creation time, software_revision, image_modality and
all attributes from the original stack, if it was a
HDF5
Args:
stack_path: Path of the stack that was used to calculate the
content which will be written to the HDF5 file.
dataset: Dataset where the attributes shall be written to.
Returns:
None
"""
if self.path is None or self.file is None:
return
if dataset not in self.file:
self.file.create_dataset(dataset)
output_handler = AttributeHandler(self.file[dataset])
if stack_path[:-3] == ".h5":
original_file = h5py.File(stack_path, 'r')
original_dataset = original_file[dataset]
original_handler = AttributeHandler(original_dataset)
original_handler.copy_all_attributes_to(output_handler)
original_file.close()
output_handler.add_creator()
output_handler.set_attribute('software', sys.argv[0])
output_handler.set_attribute('software_revision',
SLIX.__version__)
output_handler.set_attribute('creation_time',
datetime.datetime.now()
.strftime('%Y-%m-%d %H:%M:%S'))
output_handler.set_attribute('software_parameters',
' '.join(sys.argv[1:]))
output_handler.set_attribute('image_modality', "Placeholder")
output_handler.add_id()
self.file.flush()
def write_attribute(self, dataset, attr_name, value):
"""
Write a single attribute to a dataset.
Args:
dataset: Path to the dataset within the HDF5
attr_name: Name of the attribute which shall be written.
value: Value of the attribute that shall be written.
Returns:
None
"""
if self.file is None:
return
if dataset not in self.file:
self.file.create_dataset(dataset)
output_handler = AttributeHandler(self.file[dataset])
output_handler.set_attribute(attr_name, value)
self.file.flush()
def write_dataset(self, dataset, content):
"""
Write a dataset to the currently opened HDF5 file, if any is open.
If no HDF5 file is open, this method will return without writing.
Args:
dataset: Path to the dataset within the HDF5 file.
content: Content which shall be written.
Returns:
None
"""
if self.file is None:
return
if dataset not in self.file:
# Change compression algorithm for large files as it can take
# very long for the compression to finish
if len(content.shape) == 3:
self.file.create_dataset(dataset, content.shape,
dtype=content.dtype, data=content,
compression='lzf', shuffle=True)
else:
self.file.create_dataset(dataset, content.shape,
dtype=content.dtype, data=content,
compression='gzip',
compression_opts=9,
shuffle=True)
else:
self.file[dataset] = content
self.file.flush()
def close(self):
"""
Close the currently opened file.
Returns:
None
"""
if self.file is None:
return
self.file.flush()
self.file.close()
self.path = None
self.file = None
def open(self, path):
"""
Open a new HDF5 file with the given path. If another file was opened,
it will be closed first.
Args:
path: Path to the HDF5 file.
Returns:
None
"""
if self.path != path:
self.close()
self.path = path
self.file = h5py.File(path, mode='w')
def read_folder(filepath):
"""
Reads multiple image files from a folder and returns the resulting stack.
To find the images in the right order, a regex is used which will search
for files with the following pattern:
[prefix]_p[Nr][suffix]. The start number doesn't need to be 0.
The files are sorted with a natural sort, meaning that files like
0002, 1, 004, 3 will be sorted as 1, 0002, 3, 004.
The follwing regex is used to find the measurements:
".*_+p[0-9]+_?.*\.(tif{1,2}|jpe*g|nii|h5|png)"
Supported file formats for the image file equal the supported formats of
SLIX.imread.
Args:
filepath: Path to folder
Returns:
numpy.array: Image with shape [x, y, z] where [x, y] is the size
of a single image and z specifies the number of measurements
"""
files_in_folder = glob.glob(filepath + '/*')
matching_files = []
for file in files_in_folder:
if re.match(_fileregex, file) is not None:
matching_files.append(file)
matching_files.sort(key=__natural_sort_filenames_key)
image = None
# Check if files contain the needed regex for our measurements
for file in matching_files:
measurement_image = imread(file)
if image is None:
image = measurement_image
elif len(image.shape) == 2:
image = numpy.stack((image, measurement_image), axis=-1)
else:
image = numpy.concatenate((image,
measurement_image
[:, :, numpy.newaxis]), axis=-1)
return image
def __natural_sort_filenames_key(string, regex=re.compile('([0-9]+)')):
return [int(text) if text.isdigit() else text.lower()
for text in regex.split(string)]
def imread(filepath, dataset="/Image"):
"""
Reads image file and returns it.
Supported file formats: HDF5, NIfTI, Tiff.
Args:
filepath: Path to image
dataset: When reading a HDF5 file, a dataset is required.
Default: '/Image'
Returns:
numpy.array: Image with shape [x, y, z] where [x, y] is the size
of a single image and z specifies the number of measurements
"""
if os.path.isdir(filepath):
data = read_folder(filepath)
# Load NIfTI dataset
elif filepath.endswith('.nii') or filepath.endswith('.nii.gz'):
data = nibabel.load(filepath).get_fdata()
data = numpy.squeeze(numpy.swapaxes(data, 0, 1))
elif filepath.endswith('.tiff') or filepath.endswith('.tif'):
data = tifffile.imread(filepath)
if len(data.shape) == 3:
data = numpy.squeeze(numpy.moveaxis(data, 0, -1))
elif filepath.endswith('.h5'):
reader = H5FileReader()
reader.open(filepath)
data = reader.read(dataset)
if len(data.shape) == 3:
data = numpy.squeeze(numpy.moveaxis(data, 0, -1))
reader.close()
return data
else:
data = numpy.array(Image.open(filepath))
return data
def imwrite(filepath, data, dataset='/Image', original_stack_path=""):
"""
Write generated image to given filepath.
Supported file formats: HDF5, NIfTI, Tiff.
Other file formats are only indirectly supported and might result in
errors.
Args:
filepath: Path to image
data: Data which will be written to the disk
dataset: When writing a HDF5 file, a dataset is required.
Default: '/Image'
original_stack_path: Path to the original image stack used to create
this content. Only required when a HDF5 file
is written.
Returns:
None
"""
save_data = data.copy()
if save_data.dtype == bool:
save_data = save_data.astype(numpy.uint8)
elif save_data.dtype == numpy.float64:
save_data = save_data.astype(numpy.float32)
elif save_data.dtype == numpy.int64:
save_data = save_data.astype(numpy.int32)
elif save_data.dtype == numpy.uint64:
save_data = save_data.astype(numpy.uint32)
if filepath.endswith('.nii') or filepath.endswith('.nii.gz'):
save_data = numpy.swapaxes(save_data, 0, 1)
nibabel.save(nibabel.Nifti1Image(save_data, numpy.eye(4)),
filepath)
elif filepath.endswith('.tiff') or filepath.endswith('.tif'):
if len(save_data.shape) == 3:
save_data = numpy.moveaxis(save_data, -1, 0)
tifffile_version_date = datetime.datetime.strptime(
tifffile.__version__, '%Y.%m.%d')
tifffile_comparison_date = datetime.datetime.strptime(
'2020.10.02', '%Y.%m.%d')
if tifffile_version_date > tifffile_comparison_date:
tifffile.imwrite(filepath, save_data, compression=8)
else:
tifffile.imwrite(filepath, save_data, compress=9)
elif filepath.endswith('.h5'):
if len(save_data.shape) == 3:
save_data = numpy.moveaxis(save_data, -1, 0)
writer = H5FileWriter()
writer.open(filepath)
writer.write_dataset(dataset, save_data)
writer.add_plim_attributes(original_stack_path, dataset)
writer.add_symlink(dataset, '/pyramid/00')
writer.close()
else:
Image.fromarray(save_data).save(filepath)
def imwrite_rgb(filepath, data, dataset='/Image', original_stack_path=""):
"""
Write generated RGB image to given filepath.
Supported file formats: HDF5, Tiff.
Other file formats are only indirectly supported and might result in
errors.
Args:
filepath: Path to image
data: Data which will be written to the disk
dataset: When reading a HDF5 file, a dataset is required.
Default: '/Image'
original_stack_path: Path to the original image stack used to
create this content. Only required when a
HDF5 file is written.
Returns:
None
"""
save_data = data.copy()
axis = numpy.argwhere(numpy.array(save_data.shape) == 3).flatten()
if len(axis) == 0:
print('Cannot create RGB image as no dimension has a depth of 3.')
return
if filepath.endswith('.tiff') or filepath.endswith('.tif'):
save_data = numpy.moveaxis(save_data, axis[0], 0)
tifffile.imwrite(filepath, save_data, photometric='rgb',
compression=8)
elif filepath.endswith('.h5'):
writer = H5FileWriter()
writer.open(filepath)
writer.write_dataset(dataset, save_data)
writer.add_plim_attributes(original_stack_path, dataset)
writer.add_symlink(dataset, '/pyramid/00')
writer.close()
else:
print("File type is not supported. "
"Supported file types are .h5, .tif(f)")
```
#### File: SLIX/SLIX/_preparation.py
```python
import numba
import numpy
_multiprocessing_worker_fourier_var_dict = {}
def _init_worker_fourier_smoothing(X, X_shape):
_multiprocessing_worker_fourier_var_dict['X'] = X
_multiprocessing_worker_fourier_var_dict['X_shape'] = X_shape
def _worker_function_fourier_smoothing(i, threshold, window):
x = i % _multiprocessing_worker_fourier_var_dict['X_shape'][0]
y = i // _multiprocessing_worker_fourier_var_dict['X_shape'][0]
image = numpy.frombuffer(_multiprocessing_worker_fourier_var_dict['X'])\
.reshape(_multiprocessing_worker_fourier_var_dict['X_shape'])
image[x, y, :] = _fourier_smoothing(image[x, y, :], threshold, window)
def _fourier_smoothing(image, threshold, window):
fft = numpy.fft.fft(image, axis=-1)
frequencies = numpy.fft.fftfreq(fft.shape[-1])
frequencies = frequencies / frequencies.max()
multiplier = 1 - (0.5 + 0.5 * numpy.tanh(
(numpy.abs(frequencies) - threshold) / window))
fft = numpy.multiply(fft, multiplier[numpy.newaxis, numpy.newaxis, ...])
return numpy.real(numpy.fft.ifft(fft)).astype(image.dtype)
@numba.jit(nopython=True)
def _thin_out_plain(image, factor):
return image[::factor, ::factor, :]
@numba.jit(nopython=True, parallel=True)
def _thin_out_average(image, factor):
nx = int(numpy.ceil(image.shape[0] / factor))
ny = int(numpy.ceil(image.shape[1] / factor))
result_image = numpy.empty((nx, ny, image.shape[2]), dtype=numpy.float64)
for i in numba.prange(0, nx):
for j in numba.prange(0, ny):
for k in numba.prange(0, image.shape[2]):
roi = image[i * factor:(i+1) * factor,
j * factor:(j+1) * factor,
k]
result_image[i, j, k] = numpy.mean(roi)
return result_image
@numba.jit(nopython=True, parallel=True)
def _thin_out_median(image, factor):
nx = int(numpy.ceil(image.shape[0] / factor))
ny = int(numpy.ceil(image.shape[1] / factor))
result_image = numpy.empty((nx, ny, image.shape[2]), dtype=numpy.float64)
for i in numba.prange(0, nx):
for j in numba.prange(0, ny):
for k in numba.prange(0, image.shape[2]):
roi = image[i * factor:(i+1) * factor,
j * factor:(j+1) * factor,
k]
result_image[i, j, k] = numpy.median(roi)
return result_image
```
#### File: SLIX/SLIX/toolbox.py
```python
try:
try:
import cupy
from numba import cuda
cupy.empty((0), dtype=float)
from SLIX.GPU import toolbox as gpu_toolbox
gpu_available = True
except cupy.cuda.runtime.CUDARuntimeError:
print('[WARNING] CuPy is installed but an error was thrown by the '
'runtime. SLIX will fall back to the CPU variant.')
gpu_available = False
except (cuda.cudadrv.driver.CudaAPIError,
cuda.cudadrv.driver.LinkerError):
print("[WARNING] Numba CUDA couldn't be initialized. "
"Please check if there are problems with your CUDA / Numba "
"version. SLIX will fall back to the CPU variant.")
gpu_available = False
except (ModuleNotFoundError, NameError):
gpu_available = False
print('[WARNING] CuPy is not installed. The toolbox will use the CPU '
'variant instead. If you want to use the GPU variant, please run '
'`pip install cupy`.')
from SLIX.CPU import toolbox as cpu_toolbox
import numpy
__all__ = ['background_mask', 'centroid_correction',
'direction', 'unit_vectors', 'num_peaks',
'mean_peak_prominence', 'peaks',
'peak_prominence', 'peak_width',
'mean_peak_distance', 'peak_distance',
'mean_peak_width', 'significant_peaks']
def background_mask(image, use_gpu=gpu_available,
return_numpy=True):
"""
Creates a background mask by setting all image pixels with low scattering
signals to zero. As all background pixels are near zero for all images in
the SLI image stack, this method should remove most of the background
allowing for better approximations using the available features.
It is advised to use this function.
Args:
image: Complete SLI measurement image stack as a 2D/3D NumPy array
threshold: Threshhold for mask creation (default: 10)
use_gpu: If available use the GPU for calculation
return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or
NumPy array will be returned.
Returns:
numpy.array: 1D/2D-image which masks the background as True and
foreground as False
"""
if use_gpu:
return gpu_toolbox.background_mask(image, return_numpy)
else:
return cpu_toolbox.background_mask(image)
def peaks(image, use_gpu=gpu_available, return_numpy=True):
"""
Detect all peaks from a full SLI measurement. Peaks will not be filtered
in any way. To detect only significant peaks, filter the peaks by using
the prominence as a threshold.
Args:
image: Complete SLI measurement image stack as a 2D/3D NumPy array
use_gpu: If available use the GPU for calculation
return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or
NumPy array will be returned.
Returns:
2D/3D boolean image containing masking the peaks with `True`
"""
if use_gpu:
return gpu_toolbox.peaks(image, return_numpy)
else:
return cpu_toolbox.peaks(image)
def significant_peaks(image, low_prominence=cpu_toolbox.TARGET_PROMINENCE,
high_prominence=numpy.inf,
use_gpu=gpu_available, return_numpy=True):
"""
Detect all peaks from a full SLI measurement and filter them by passing
thresholds.
Args:
image: Complete SLI measurement image stack as a 2D/3D NumPy array
low_prominence: Minimum prominence needed by peak to count as a peak.
Peaks below this threshold will not be considered as a
peak.
high_prominence: Maximum prominence needed by peak to count as a peak.
Peaks below this threshold will not be considered as a
peak.
use_gpu: If available use the GPU for calculation
return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or
NumPy array will be returned.
Returns:
2D/3D boolean image containing masking the peaks with `True`
"""
if use_gpu:
peaks = gpu_toolbox.peaks(image, return_numpy=return_numpy)
prominences = gpu_toolbox.peak_prominence(image, peaks,
return_numpy=return_numpy)
peaks[prominences < low_prominence] = False
peaks[prominences > high_prominence] = False
else:
peaks = cpu_toolbox.peaks(image)
prominences = cpu_toolbox.peak_prominence(image, peaks)
peaks[prominences < low_prominence] = False
peaks[prominences > high_prominence] = False
return peaks
def num_peaks(image, low_prominence=cpu_toolbox.TARGET_PROMINENCE,
high_prominence=numpy.inf,
use_gpu=gpu_available, return_numpy=True):
"""
Calculate the number of peaks from each line profile in an SLI image series
by detecting all peaks and applying thresholds to remove unwanted peaks.
Args:
image: Full SLI measurement (series of images) which is prepared for
the pipeline using the SLIX toolbox methods.
low_prominence: Lower prominence bound for detecting a peak.
high_prominence: Higher prominence bound for detecting a peak.
use_gpu: If available use the GPU for calculation
return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or
NumPy array will be returned.
Returns:
Array where each entry corresponds to the number of detected peaks within
the first dimension of the SLI image series.
"""
if use_gpu:
peaks = significant_peaks(image, low_prominence, high_prominence,
return_numpy=False)
return gpu_toolbox.num_peaks(peak_image=peaks,
return_numpy=return_numpy)
else:
peaks = significant_peaks(image, low_prominence, high_prominence,
use_gpu=False)
return cpu_toolbox.num_peaks(peak_image=peaks)
def direction(peak_image, centroids, correction_angle=0,
number_of_directions=3,
use_gpu=gpu_available, return_numpy=True):
"""
Calculate up to `number_of_directions` direction angles based on the given
peak positions. If more than `number_of_directions*2` peaks are present, no
direction angle will be calculated to avoid errors. This will result in a
direction angle of BACKGROUND_COLOR. The peak positions are determined by
the position of the corresponding peak pairs (i.e. 6 peaks: 1+4, 2+5, 3+6).
If two peaks are too far away or too near (outside of 180°±35°), the
direction angle will be considered as invalid, resulting in a direction
angle of BACKGROUND_COLOR.
Args:
correction_angle: Correct the resulting direction angle by the value.
This is useful when the stack or camera was rotated.
peak_image: Boolean NumPy array specifying the peak positions in the
full SLI stack
centroids: Centroids resulting from `centroid_correction` for more accurate
results
number_of_directions: Number of directions which shall be generated.
use_gpu: If available use the GPU for calculation
return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or
NumPy array will be returned.
Returns:
NumPy array with the shape (x, y, `number_of_directions`) containing
up to `number_of_directions` direction angles. x equals the number
of pixels of the SLI image series. If a direction angle is invalid
or missing, the array entry will be BACKGROUND_COLOR instead.
"""
if use_gpu:
return gpu_toolbox.direction(peak_image, centroids, correction_angle,
number_of_directions, return_numpy)
else:
return cpu_toolbox.direction(peak_image, centroids, correction_angle,
number_of_directions)
def peak_distance(peak_image, centroids, use_gpu=gpu_available,
return_numpy=True):
"""
Calculate the mean peak distance in degrees between two corresponding peaks
for each line profile in an SLI image series.
Args:
peak_image: Boolean NumPy array specifying the peak positions in the
full SLI stack
centroids: Use centroid calculation to better determine the peak
position regardless of the number of measurements / illumination
angles used.
use_gpu: If available use the GPU for calculation
return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or
NumPy array will be returned.
Returns:
NumPy array of floating point values containing the peak distance of
the line profiles in degrees in their respective peak position. The
first peak of each peak pair will show the distance between peak_1
and peak_2 while the second peak will show 360 - (peak_2 - peak_1).
"""
if use_gpu:
return gpu_toolbox.peak_distance(peak_image, centroids, return_numpy)
else:
return cpu_toolbox.peak_distance(peak_image, centroids)
def mean_peak_distance(peak_image, centroids, use_gpu=gpu_available,
return_numpy=True):
"""
Calculate the mean peak distance in degrees between two corresponding peaks
for each line profile in an SLI image series.
Args:
peak_image: Boolean NumPy array specifying the peak positions in the
full SLI stack
centroids: Use centroid calculation to better determine the peak position
regardless of the number of measurements / illumination angles used.
use_gpu: If available use the GPU for calculation
return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or
NumPy array will be returned.
Returns:
NumPy array of floating point values containing the mean peak distance
of the line profiles in degrees.
"""
if use_gpu:
return gpu_toolbox.mean_peak_distance(peak_image, centroids,
return_numpy)
else:
return cpu_toolbox.mean_peak_distance(peak_image, centroids)
def peak_prominence(image, peak_image=None, kind_of_normalization=1,
use_gpu=gpu_available, return_numpy=True):
"""
Calculate the peak prominence of all given peak positions within a line
profile. The line profile will be normalized by dividing the line profile
through its mean value. Therefore, values above 1 are possible.
Args:
image: Original line profile used to detect all peaks. This array will be
further analyzed to better determine the peak positions.
peak_image: Boolean NumPy array specifying the peak positions in the
full SLI stack
kind_of_normalization: Normalize given line profile by using a
normalization technique based on the kind_of_normalization parameter.
0 : Scale line profile to be between 0 and 1
1 : Divide line profile through its mean value
use_gpu: If available use the GPU for calculation
return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or
NumPy array will be returned.
Returns:
Floating point value containing the mean peak prominence of the line
profile in degrees.
"""
if use_gpu:
return gpu_toolbox.peak_prominence(image, peak_image,
kind_of_normalization, return_numpy)
else:
return cpu_toolbox.peak_prominence(image, peak_image,
kind_of_normalization)
def mean_peak_prominence(image, peak_image=None, kind_of_normalization=1,
use_gpu=gpu_available, return_numpy=True):
"""
Calculate the mean peak prominence of all given peak positions within a
line profile. The line profile will be normalized by dividing the line
profile through its mean value. Therefore, values above 1 are possible.
Args:
image: Original line profile used to detect all peaks. This array
will be further analyzed to better determine the peak positions.
peak_image: Boolean NumPy array specifying the peak positions in the
full SLI stack
kind_of_normalization: Normalize given line profile by using a
normalization technique based on the kind_of_normalization parameter.
0 : Scale line profile to be between 0 and 1
1 : Divide line profile through its mean value
use_gpu: If available use the GPU for calculation
return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or
NumPy array will be returned.
Returns:
Floating point value containing the mean peak prominence of the line
profile in degrees.
"""
if use_gpu:
return gpu_toolbox.mean_peak_prominence(image, peak_image,
kind_of_normalization,
return_numpy)
else:
return cpu_toolbox.mean_peak_prominence(image, peak_image,
kind_of_normalization)
def peak_width(image, peak_image=None, target_height=0.5,
use_gpu=gpu_available, return_numpy=True):
"""
Calculate the peak width of all given peak positions within a line profile.
Args:
image: Original line profile used to detect all peaks. This array will
be further analyzed to better determine the peak positions.
peak_image: Boolean NumPy array specifying the peak positions in the full
SLI stack
target_height: Relative peak height in relation to the prominence of the
given peak.
use_gpu: If available use the GPU for calculation
return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or
NumPy array will be returned.
Returns:
NumPy array where each entry corresponds to the peak width of the line
profile. The values are in degree.
"""
if use_gpu:
return gpu_toolbox.peak_width(image, peak_image, target_height,
return_numpy=return_numpy)
else:
return cpu_toolbox.peak_width(image, peak_image, target_height)
def mean_peak_width(image, peak_image=None, target_height=0.5,
use_gpu=gpu_available, return_numpy=True):
"""
Calculate the mean peak width of all given peak positions within a line
profile.
Args:
image: Original line profile used to detect all peaks. This array will
be further analyzed to better determine the peak positions.
peak_image: Boolean NumPy array specifying the peak positions in the
full SLI stack
target_height: Relative peak height in relation to the prominence of
the given peak.
use_gpu: If available use the GPU for calculation
return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or
NumPy array will be returned.
Returns:
NumPy array where each entry corresponds to the mean peak width of the
line profile. The values are in degree.
"""
if use_gpu:
return gpu_toolbox.mean_peak_width(image, peak_image, target_height,
return_numpy=return_numpy)
else:
return cpu_toolbox.mean_peak_width(image, peak_image, target_height)
def centroid_correction(image, peak_image,
low_prominence=cpu_toolbox.TARGET_PROMINENCE,
high_prominence=numpy.inf,
use_gpu=gpu_available, return_numpy=True):
"""
Correct peak positions from a line profile by looking at only the peak
with a given threshold using a centroid calculation. If a minimum is found
in the considered interval, this minimum will be used as the limit instead.
The range for the peak correction is limited by
MAX_DISTANCE_FOR_CENTROID_ESTIMATION.
Args:
image: Original line profile used to detect all peaks. This array will
be further analyzed to better determine the peak positions.
peak_image: Boolean NumPy array specifying the peak positions in the
full SLI stack
low_prominence: Lower prominence bound for detecting a peak.
high_prominence: Higher prominence bound for detecting a peak.
use_gpu: If available use the GPU for calculation
return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or
NumPy array will be returned.
Returns:
_numpy array with the positions of all detected peak positions corrected
with the centroid calculation.
"""
if use_gpu:
return gpu_toolbox.centroid_correction(image, peak_image,
low_prominence,
high_prominence, return_numpy)
else:
return cpu_toolbox.centroid_correction(image, peak_image,
low_prominence,
high_prominence)
def unit_vectors(direction, use_gpu=gpu_available, return_numpy=True):
"""
Calculate the unit vectors (UnitX, UnitY) from a given direction angle.
Args:
direction: 3D NumPy array - direction angles in degrees
use_gpu: If available use the GPU for calculation
return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or
NumPy array will be returned.
Returns:
UnitX, UnitY: 3D NumPy array, 3D NumPy array
x- and y-vector component in arrays
"""
if use_gpu:
return gpu_toolbox.unit_vectors(direction, return_numpy=return_numpy)
else:
return cpu_toolbox.unit_vectors(direction)
```
#### File: SLIX/SLIX/_visualization.py
```python
import numba
import numpy
from matplotlib.colors import hsv_to_rgb
@numba.njit()
def _count_nonzero(image):
iterator_image = image.flatten()
number_of_pixels = 0
for i in range(len(iterator_image)):
if iterator_image[i] != 0:
number_of_pixels += 1
return number_of_pixels
@numba.jit(parallel=True, nopython=True)
def _downsample_2d(image, kernel_size,
background_threshold, background_value):
nx = int(numpy.ceil(image.shape[0] / kernel_size))
ny = int(numpy.ceil(image.shape[1] / kernel_size))
output_image = numpy.empty((nx, ny))
output_image[:, :] = background_value
for i in numba.prange(0, nx):
for j in numba.prange(0, ny):
roi = image[kernel_size * i:kernel_size * i + kernel_size,
kernel_size * j:kernel_size * j + kernel_size]
roi = roi.flatten()
number_of_valid_vectors = _count_nonzero(roi != background_value)
if number_of_valid_vectors >= background_threshold * roi.size:
valid_vectors = 0
roi.sort()
for idx in range(roi.size):
if roi[idx] != background_value:
valid_vectors += 1
if valid_vectors == number_of_valid_vectors // 2:
if number_of_valid_vectors % 2 == 0:
output_image[i, j] = roi[idx]
else:
output_image[i, j] = (roi[idx+1] + roi[idx]) / 2
return output_image
def _downsample(image, kernel_size, background_threshold=0,
background_value=0):
nx = int(numpy.ceil(image.shape[0] / kernel_size))
ny = int(numpy.ceil(image.shape[1] / kernel_size))
if len(image.shape) < 3:
z = 1
else:
z = image.shape[2]
result_image = numpy.empty((nx, ny, z))
for sub_image in range(z):
result_image[:, :, sub_image] = \
_downsample_2d(image[:, :, sub_image], kernel_size,
background_threshold, background_value)
result_image = numpy.squeeze(result_image)
return result_image
def _visualize_one_direction(direction, rgb_stack):
output_image = rgb_stack
output_image[direction == -1] = 0
return output_image.astype('float32')
def _visualize_multiple_direction(direction, rgb_stack):
output_image = numpy.zeros((direction.shape[0] * 2,
direction.shape[1] * 2,
3))
# count valid directions
valid_directions = numpy.count_nonzero(direction > -1, axis=-1)
r = rgb_stack[..., 0]
g = rgb_stack[..., 1]
b = rgb_stack[..., 2]
# Now we need to place them in the right pixel on our output image
for x in range(direction.shape[0]):
for y in range(direction.shape[1]):
if valid_directions[x, y] == 0:
output_image[x * 2:x * 2 + 2, y * 2:y * 2 + 2] = 0
elif valid_directions[x, y] == 1:
output_image[x * 2:x * 2 + 2, y * 2:y * 2 + 2, 0] = r[x, y, 0]
output_image[x * 2:x * 2 + 2, y * 2:y * 2 + 2, 1] = g[x, y, 0]
output_image[x * 2:x * 2 + 2, y * 2:y * 2 + 2, 2] = b[x, y, 0]
else:
output_image[x * 2, y * 2, 0] = r[x, y, 0]
output_image[x * 2, y * 2, 1] = g[x, y, 0]
output_image[x * 2, y * 2, 2] = b[x, y, 0]
output_image[x * 2 + 1, y * 2, 0] = r[x, y, 1]
output_image[x * 2 + 1, y * 2, 1] = g[x, y, 1]
output_image[x * 2 + 1, y * 2, 2] = b[x, y, 1]
if valid_directions[x, y] == 2:
output_image[x * 2, y * 2 + 1, 0] = r[x, y, 1]
output_image[x * 2, y * 2 + 1, 1] = g[x, y, 1]
output_image[x * 2, y * 2 + 1, 2] = b[x, y, 1]
output_image[x * 2 + 1, y * 2 + 1, 0] = r[x, y, 0]
output_image[x * 2 + 1, y * 2 + 1, 1] = g[x, y, 0]
output_image[x * 2 + 1, y * 2 + 1, 2] = b[x, y, 0]
else:
output_image[x * 2, y * 2 + 1, 0] = r[x, y, 2]
output_image[x * 2, y * 2 + 1, 1] = g[x, y, 2]
output_image[x * 2, y * 2 + 1, 2] = b[x, y, 2]
if valid_directions[x, y] == 3:
output_image[x * 2 + 1, y * 2 + 1, 0] = 0
output_image[x * 2 + 1, y * 2 + 1, 1] = 0
output_image[x * 2 + 1, y * 2 + 1, 2] = 0
if valid_directions[x, y] == 4:
output_image[x * 2 + 1, y * 2 + 1, 0] = r[x, y, 3]
output_image[x * 2 + 1, y * 2 + 1, 1] = g[x, y, 3]
output_image[x * 2 + 1, y * 2 + 1, 2] = b[x, y, 3]
return output_image.astype('float32')
def _plot_axes_unit_vectors(ax, mesh_x, mesh_y, mesh_u, mesh_v,
scale, alpha, vector_width):
# Normalize the arrows:
mesh_u_normed = mesh_u / numpy.sqrt(numpy.maximum(1e-15,
mesh_u ** 2 +
mesh_v ** 2))
mesh_v_normed = mesh_v / numpy.sqrt(numpy.maximum(1e-15,
mesh_u ** 2 +
mesh_v ** 2))
# Convert to RGB colors
normed_angle = numpy.abs(numpy.arctan2(mesh_v_normed, -mesh_u_normed))
hsv_stack = numpy.stack((normed_angle / numpy.pi,
numpy.ones(normed_angle.shape),
numpy.ones(normed_angle.shape)))
hsv_stack = numpy.moveaxis(hsv_stack, 0, -1)
color_rgb = hsv_to_rgb(hsv_stack)
mesh_u_normed[numpy.isclose(mesh_u, 0) &
numpy.isclose(mesh_v, 0)] = numpy.nan
mesh_v_normed[numpy.isclose(mesh_u, 0) &
numpy.isclose(mesh_v, 0)] = numpy.nan
# 1/scale to increase vector length for scale > 1
ax.quiver(mesh_x, mesh_y, mesh_u_normed, mesh_v_normed,
color=color_rgb, angles='xy', scale_units='xy',
scale=1.0 / scale, headwidth=0, headlength=0, headaxislength=0,
minlength=0, pivot='mid', alpha=alpha,
width=vector_width, units='xy', edgecolors=color_rgb)
```
#### File: SLIX/SLIX/visualization.py
```python
import numpy
from matplotlib.colors import hsv_to_rgb
from matplotlib import pyplot as plt
from PIL import Image
import copy
import tqdm
from SLIX._visualization import _downsample, _plot_axes_unit_vectors, \
_visualize_multiple_direction, \
_visualize_one_direction
__all__ = ['parameter_map',
'unit_vectors',
'unit_vector_distribution',
'direction']
def parameter_map(parameter_map, fig=None, ax=None, alpha=1,
cmap='viridis', vmin=0, vmax=None, colorbar=True):
"""
This method will create a Matplotlib plot based on imshow to display the
given parameter map in different colors. The parameter map is plotted to
the current axis and figure. If neither is given, the method will
create a new subfigure. To show the results, please use pyplot.show().
Args:
parameter_map: 2D parameter map calculated with SLIX.toolbox.
fig: Matplotlib figure. If None, a new subfigure will be created for fig
and ax.
ax: Matplotlib axis. If None, a new subfigure will be created for fig
and ax.
alpha: Apply alpha to Matplotlib plots to overlay them with some other
image like the averaged transmitted light intensity.
cmap: Matplotlib color map which is used for displaying the image.
vmin: Minimum value in the resulting plot. If any value is below vmin,
it will be displayed in black.
vmax: Maximum value in the resulting plot. If any value is above vmax,
it will be displayed in white.
colorbar: Boolean value controlling if a color bar will be displayed in
the current subplot.
Returns:
The current Matplotlib figure and axis. The image can be shown with
pyplot.show().
"""
if fig is None or ax is None:
fig, ax = plt.subplots(1, 1)
cmap_mod = copy.copy(plt.get_cmap(cmap))
im = ax.imshow(parameter_map, interpolation='nearest', cmap=cmap_mod,
alpha=alpha)
im.cmap.set_under(color='k') # Color for values less than vmin
im.cmap.set_over(color='w') # Color for values more than vmax
im.set_clim(vmin, vmax)
ax.axis('off')
if colorbar:
fig.colorbar(im, ax=ax, fraction=0.046, pad=0.04)
return fig, ax
def unit_vectors(UnitX, UnitY, ax=None, thinout=20,
scale=-1, vector_width=1,
alpha=0.8, background_threshold=0.5,
background_value=0):
"""
This method will create a Matplotlib plot based on quiver to represent the
given unit vectors as colored lines (vector map).
Parameters like thinout can be used to reduce the computing load. If
thinout = 1, the resulting vectors might not be visible
without zooming in significantly. Here, the vectors will only be plotted
to the current axis. To show the results, please use pyplot.show().
Args:
UnitX: Unit vector components along the x-axis (3D NumPy array).
UnitY: Unit vector components along the y-axis (3D NumPy array).
thinout: Downscaling parameter N (defines how many vectors N x N are
replaced by one vector).
Unit vectors will be thinned out using downscaling and thinning in
combination. This will increase the
vector size in the resulting image but will also reduce the information
density. Please use with caution.
scale: Increase the vector length by the given scale. Vectors will be
longer and might overlap if the scale is too high.
ax: Matplotlib axis. If None, the current context axis will be used.
vector_width: When choosing a high scale, the vectors might appear
quite thin which results in hard to read images. This option allows to
increase the vector thickness to improve visibility.
alpha: Apply alpha to Matplotlib plots to overlay them with some other
other image like the averaged transmitted light intensity.
background_threshold: If the fraction of background pixels (number of
pixels without vector within N x N pixels) is below this threshold,
the downscaled pixel will not show a vector.
background_value: Background value of the parameter map. This is
generally 0 in both axes for unit vector maps
but can differ if another threshold was set.
background_value: Fraction of background pixels in the considered
(N x N) area for which the image pixels are set to background_value.
If the fraction of background pixels lies above this defined threshold,
background pixels will not be considered for computing the median.
Returns:
The current Matplotlib axis. The image can be shown with pyplot.show().
"""
if ax is None:
ax = plt.gca()
while len(UnitX.shape) < 3:
UnitX = UnitX[..., numpy.newaxis]
while len(UnitY.shape) < 3:
UnitY = UnitY[..., numpy.newaxis]
# The default scale is below zero to allow the user to define his own scale
# A scale below zero isn't valid for visualization. If the user
# defines no scale, we suspect that the user wants an image
# where each vector has a scale of one. Therefore we set the scale to
# the same as our thinout when we draw the image.
if scale < 0:
scale = thinout
if thinout > 1:
downscaled_unit_x = _downsample(UnitX, thinout,
background_threshold, background_value)
downscaled_unit_y = _downsample(UnitY, thinout,
background_threshold, background_value)
while len(downscaled_unit_x.shape) < 3:
downscaled_unit_x = downscaled_unit_x[numpy.newaxis, ...]
while len(downscaled_unit_y.shape) < 3:
downscaled_unit_y = downscaled_unit_y[numpy.newaxis, ...]
# Rescale images to original dimensions
for i in range(UnitX.shape[2]):
UnitX[:, :, i] = numpy.array(
Image.fromarray(downscaled_unit_x[:, :, i])
.resize(UnitX.shape[:2][::-1], Image.NEAREST)
)
UnitY[:, :, i] = numpy.array(
Image.fromarray(downscaled_unit_y[:, :, i])
.resize(UnitY.shape[:2][::-1], Image.NEAREST)
)
del downscaled_unit_y
del downscaled_unit_x
for i in range(UnitX.shape[2]):
mesh_x, mesh_y = numpy.meshgrid(numpy.arange(thinout // 2, UnitX.shape[1],
thinout),
numpy.arange(thinout // 2, UnitX.shape[0],
thinout))
mesh_u = UnitX[thinout // 2::thinout, thinout // 2::thinout, i]
mesh_v = UnitY[thinout // 2::thinout, thinout // 2::thinout, i]
_plot_axes_unit_vectors(ax,
mesh_x.flatten(),
mesh_y.flatten(),
mesh_u.flatten(),
mesh_v.flatten(),
scale, alpha, vector_width)
return ax
def unit_vector_distribution(UnitX, UnitY, ax=None, thinout=20,
scale=-1, vector_width=1,
alpha=0.01):
"""
This method will create a Matplotlib plot based on quiver to represent the
given unit vectors as colored lines (vector map).
Instead of showing a single vector like in unit_vector, here each vector
will be shown in the resulting image. The thinout parameter will determine
how many vectors will be overlapping. It is recommended to use a very small
alpha value to see which directions in the resulting plot are dominant.
Here, the vectors will only be plotted
to the current axis. To show the results, please use pyplot.show(). The
result might need some time to show depending on the input image size.
Args:
UnitX: Unit vector components along the x-axis (3D NumPy array).
UnitY: Unit vector components along the y-axis (3D NumPy array).
thinout: Downscaling parameter N (defines how many vectors N x N are
replaced by one vector).
Unit vectors will be thinned out using downscaling and thinning in
combination. This will increase the
vector size in the resulting image but will also reduce the information
density. Please use with caution.
scale: Increase the vector length by the given scale. Vectors will be
longer and might overlap if the scale is too high.
ax: Matplotlib axis. If None, the current context axis will be used.
vector_width: When choosing a high scale, the vectors might appear
quite thin which results in hard to read images. This option allows to
increase the vector thickness to improve visibility.
alpha: Apply alpha to Matplotlib plots to overlay them with some other
other image like the averaged transmitted light intensity.
Returns:
The current Matplotlib axis. The image can be shown with pyplot.show().
"""
if ax is None:
ax = plt.gca()
while len(UnitX.shape) < 3:
UnitX = UnitX[..., numpy.newaxis]
while len(UnitY.shape) < 3:
UnitY = UnitY[..., numpy.newaxis]
# The default scale is below zero to allow the user to define his own scale
# A scale below zero isn't valid for visualization. If the user
# defines no scale, we suspect that the user wants an image
# where each vector has a scale of one. Therefore we set the scale to
# the same as our thinout when we draw the image.
if scale < 0:
scale = thinout
mesh_x = numpy.empty(UnitX.size)
mesh_y = numpy.empty(UnitX.size)
mesh_u = numpy.empty(UnitX.size)
mesh_v = numpy.empty(UnitX.size)
idx = 0
progress_bar = tqdm.tqdm(total=thinout*thinout,
desc='Creating unit vectors.')
for offset_x in range(thinout):
for offset_y in range(thinout):
progress_bar.update(1)
for i in range(UnitX.shape[2]):
mesh_x_it, mesh_y_it = numpy.meshgrid(
numpy.arange(0, UnitX.shape[1] - offset_x, thinout),
numpy.arange(0, UnitX.shape[0] - offset_y, thinout)
)
mesh_x_it = mesh_x_it.flatten()
mesh_y_it = mesh_y_it.flatten()
mesh_u_it = UnitX[offset_y::thinout, offset_x::thinout, i]\
.flatten()
mesh_v_it = UnitY[offset_y::thinout, offset_x::thinout, i]\
.flatten()
mesh_x[idx:idx + len(mesh_x_it)] = mesh_x_it
mesh_y[idx:idx + len(mesh_y_it)] = mesh_y_it
mesh_u[idx:idx + len(mesh_u_it)] = mesh_u_it
mesh_v[idx:idx + len(mesh_v_it)] = mesh_v_it
idx = idx + len(mesh_x_it)
progress_bar.set_description('Finished. Plotting unit vectors.')
_plot_axes_unit_vectors(ax, mesh_x, mesh_y, mesh_u, mesh_v,
scale, alpha, vector_width)
progress_bar.set_description('Done')
progress_bar.close()
return ax
def direction(direction, saturation=None, value=None):
"""
Generate a 2D colorized direction image in the HSV color space based on
the original direction. Value and saturation of the color will always be
one. The hue is determined by the direction.
If the direction parameter is only a 2D numpy array, the result will be
a simple orientation map where each pixel contains the HSV value
corresponding to the direction angle.
When a 3D stack with max. three directions is used, the result will be
different. The resulting image will have two times the width and height.
Each 2x2 square will show the direction angle of up to three directions.
Depending on the number of directions, the following pattern is used to
show the different direction angles.
1 direction:
1 1
1 1
2 directions:
1 2
2 1
3 directions:
1 2
3 0
Args:
direction: 2D or 3D Numpy array containing the direction of the image
stack
saturation: Weight image by using the saturation value. Use either a 2D image
or a 3D image with the same shape as the direction. If no image
is used, the saturation for all image pixels will be set to 1
value: Weight image by using the value. Use either a 2D image
or a 3D image with the same shape as the direction. If no image
is used, the value for all image pixels will be set to 1
Returns:
numpy.ndarray: 2D image containing the resulting HSV orientation map
"""
direction = numpy.array(direction)
direction_shape = direction.shape
hue = direction
# If no saturation is given, create an "empty" saturation image that will be used
if saturation is None:
saturation = numpy.ones(direction.shape)
# Normalize saturation image
saturation = saturation / saturation.max()
# If we have a saturation image, check if the shape matches (3D) and correct accordingly
while len(saturation.shape) < len(direction.shape):
saturation = saturation[..., numpy.newaxis]
if not saturation.shape[-1] == direction_shape[-1]:
saturation = numpy.repeat(saturation, direction_shape[-1], axis=-1)
# If no value is given, create an "empty" value image that will be used
if value is None:
value = numpy.ones(direction.shape)
# Normalize value image
value = value / value.max()
# If we have a value image, check if the shape matches (3D) and correct accordingly
while len(value.shape) < len(direction.shape):
value = value[..., numpy.newaxis]
if not value.shape[-1] == direction_shape[-1]:
value = numpy.repeat(value, direction_shape[-1], axis=-1)
hsv_stack = numpy.stack((hue / 180.0, saturation, value))
hsv_stack = numpy.moveaxis(hsv_stack, 0, -1)
rgb_stack = hsv_to_rgb(hsv_stack)
if len(direction_shape) > 2:
return _visualize_multiple_direction(direction, rgb_stack)
else:
return _visualize_one_direction(direction, rgb_stack)
```
#### File: SLIX/tests/test_preparation.py
```python
from SLIX import preparation
import numpy
import pytest
class TestPreparation:
def test_smoothing(self):
pass
def test_thinout_plain(self):
test_arr = numpy.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] * 11)
test_img = test_arr.reshape((11, 11, 1))
thinned = preparation.thin_out(test_img, factor=2, strategy='plain')
assert numpy.all(thinned == 1)
thinned = preparation.thin_out(test_img, factor=2, strategy='pLaIn')
assert numpy.all(thinned == 1)
def test_thinout_median(self):
test_arr = numpy.array([1, 1, 0] * 4 * 12)
test_img = test_arr.reshape((12, 12, 1))
thinned = preparation.thin_out(test_img, factor=3, strategy='median')
assert numpy.all(thinned == 1)
thinned = preparation.thin_out(test_img, factor=3, strategy='MeDiAn')
assert numpy.all(thinned == 1)
def test_thinout_average(self):
test_arr = numpy.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0] * 12)
test_img = test_arr.reshape((12, 12, 1))
thinned = preparation.thin_out(test_img, factor=2, strategy='average')
assert numpy.all(thinned == 0.5)
thinned = preparation.thin_out(test_img, factor=2, strategy='AVERage')
assert numpy.all(thinned == 0.5)
def test_thinout_error(self):
test_arr = numpy.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] * 11)
test_img = test_arr.reshape((11, 11, 1))
with pytest.raises(ValueError):
preparation.thin_out(test_img, factor=2, strategy='error')
```
#### File: SLIX/tests/test_visualization.py
```python
import numpy
from SLIX import toolbox, io, visualization
import matplotlib
from matplotlib import pyplot as plt
import pytest
import shutil
import os
matplotlib.use('agg')
class TestVisualization:
def test_visualize_unit_vectors(self):
example = io.imread('tests/files/demo.nii')
peaks = toolbox.significant_peaks(example, use_gpu=False)
centroid = toolbox.centroid_correction(example, peaks, use_gpu=False)
direction = toolbox.direction(peaks, centroid, use_gpu=False)
unit_x, unit_y = toolbox.unit_vectors(direction, use_gpu=False)
visualization.unit_vectors(unit_x, unit_y, thinout=10)
plt.savefig('tests/output/vis/unit_vectors.tiff', dpi=100,
bbox_inches='tight')
orig = io.imread('tests/files/vis/unit_vectors.tiff')
to_compare = io.imread('tests/output/vis/unit_vectors.tiff')
if numpy.all(numpy.isclose(orig - to_compare, 0)):
assert True
else:
io.imwrite('tests/output/vis/unit_vectors-diff.tiff', orig - to_compare)
assert False
def test_visualize_unit_vector_distribution(self):
example = io.imread('tests/files/demo.nii')
peaks = toolbox.significant_peaks(example, use_gpu=False)
centroid = toolbox.centroid_correction(example, peaks, use_gpu=False)
direction = toolbox.direction(peaks, centroid, use_gpu=False)
unit_x, unit_y = toolbox.unit_vectors(direction, use_gpu=False)
visualization.unit_vector_distribution(unit_x, unit_y, thinout=15, vector_width=5, alpha=0.01)
plt.savefig('tests/output/vis/unit_vector_distribution.tiff', dpi=100,
bbox_inches='tight')
orig = io.imread('tests/files/vis/unit_vector_distribution.tiff')
to_compare = io.imread('tests/output/vis/unit_vector_distribution.tiff')
if numpy.all(numpy.isclose(orig - to_compare, 0)):
assert True
else:
io.imwrite('tests/output/vis/unit_vector_distribution-diff.tiff', orig - to_compare)
assert False
def test_visualize_parameter_map(self):
example = io.imread('tests/files/demo.nii')
prominence = toolbox.mean_peak_prominence(example, kind_of_normalization=1, use_gpu=False)
visualization.parameter_map(prominence, colorbar=False)
plt.savefig('tests/output/vis/parameter_map.tiff', dpi=100,
bbox_inches='tight')
orig = io.imread('tests/files/vis/parameter_map.tiff')
to_compare = io.imread('tests/output/vis/parameter_map.tiff')
assert numpy.all(numpy.isclose(orig - to_compare, 0))
def test_visualize_direction_one_dir(self):
image = numpy.arange(0, 180)
hsv_image = visualization.direction(image)
assert numpy.all(hsv_image[0, :] == [1, 0, 0])
assert numpy.all(hsv_image[30, :] == [1, 1, 0])
assert numpy.all(hsv_image[60, :] == [0, 1, 0])
assert numpy.all(hsv_image[90, :] == [0, 1, 1])
assert numpy.all(hsv_image[120, :] == [0, 0, 1])
assert numpy.all(hsv_image[150, :] == [1, 0, 1])
def test_visualize_direction_multiple_dir(self):
first_dir = numpy.arange(0, 180)[..., numpy.newaxis, numpy.newaxis]
second_dir = (first_dir + 30) % 180
second_dir[0:45] = -1
third_dir = (first_dir + 60) % 180
third_dir[0:90] = -1
fourth_dir = (first_dir + 90) % 180
fourth_dir[0:135] = -1
stack_direction = numpy.concatenate((first_dir,
second_dir,
third_dir,
fourth_dir),
axis=-1)
hsv_image = visualization.direction(stack_direction)
print(hsv_image)
# Check first direction
assert numpy.all(hsv_image[0, 0, :] == [1, 0, 0])
assert numpy.all(hsv_image[1, 1, :] == [1, 0, 0])
assert numpy.all(hsv_image[0, 1, :] == [1, 0, 0])
assert numpy.all(hsv_image[1, 0, :] == [1, 0, 0])
assert numpy.all(hsv_image[60, 0, :] == [1, 1, 0])
assert numpy.all(hsv_image[61, 1, :] == [1, 1, 0])
assert numpy.all(hsv_image[60, 1, :] == [1, 1, 0])
assert numpy.all(hsv_image[61, 0, :] == [1, 1, 0])
# Probe check second direction
assert numpy.all(hsv_image[120, 0, :] == [0, 1, 0])
assert numpy.all(hsv_image[121, 1, :] == [0, 1, 0])
assert numpy.all(hsv_image[120, 1, :] == [0, 1, 1])
assert numpy.all(hsv_image[121, 0, :] == [0, 1, 1])
# Probe check third direction
assert numpy.all(hsv_image[240, 0, :] == [0, 0, 1])
assert numpy.all(hsv_image[240, 1, :] == [1, 0, 0])
assert numpy.all(hsv_image[241, 0, :] == [1, 0, 1])
assert numpy.all(hsv_image[241, 1, :] == [0, 0, 0])
# Probe check fourth direction
assert numpy.all(hsv_image[300, 0, :] == [1, 0, 1])
assert numpy.all(hsv_image[300, 1, :] == [1, 1, 0])
assert numpy.all(hsv_image[301, 0, :] == [1, 0, 0])
assert numpy.all(hsv_image[301, 1, :] == [0, 1, 0])
@pytest.fixture(scope="session", autouse=True)
def run_around_tests(request):
if not os.path.isdir('tests/output/vis'):
os.makedirs('tests/output/vis')
# A test function will be run at this point
yield
def remove_test_dir():
if os.path.isdir('tests/output/vis'):
# shutil.rmtree('tests/output/vis')
pass
request.addfinalizer(remove_test_dir)
@pytest.fixture(scope="function", autouse=True)
def run_around_single_test(request):
plt.clf()
plt.cla()
plt.close()
plt.axis('off')
# A test function will be run at this point
yield
``` |
{
"source": "3dpose/3D-Multi-Person-Pose",
"score": 2
} |
#### File: 3D-Multi-Person-Pose/lib/inteutil.py
```python
import numpy as np
import pickle
import glob
import random
import os
class InteDataset():
def __init__(self, bu_path, bu_dep_path, td_path, td_dep_path):
self.vid_inst = []
pred_bu = []
for i in range(20):
data = pickle.load(open(os.path.join(bu_path, '%d.pkl'%(i+1)), 'rb'))
data = np.float32(data)
for j in range(data.shape[1]):
pred_bu.append(data[:,j])
self.vid_inst.append([i,j])
self.pred_bu = pred_bu
pred_bu_dep = []
for i in sorted(glob.glob(os.path.join(bu_dep_path,'*.pkl'))):
dep = pickle.load(open(i, 'rb'))
pred_bu_dep.append(dep)
self.pred_bu_dep = pred_bu_dep
pred_td = []
for i in range(20):
data = pickle.load(open(os.path.join(td_path,'%d.pkl'%(i+1)), 'rb'))
data = np.float32(data)
for j in range(data.shape[1]):
pred_td.append(data[:,j])
self.pred_td = pred_td
pred_td_dep = []
for i in sorted(glob.glob(os.path.join(td_dep_path,'*.pkl'))):
data = pickle.load(open(i, 'rb'))
pred_td_dep.append(data)
self.pred_td_dep = pred_td_dep
def __iter__(self):
self.pos = 0
return self
def __len__(self):
return len(self.pred_bu)
def __next__(self):
if self.pos>=len(self.pred_bu):
raise StopIteration
pred_bu = self.pred_bu[self.pos]
pred_bu_dep = self.pred_bu_dep[self.pos]
pred_td = self.pred_td[self.pos]
pred_td_dep = self.pred_td_dep[self.pos]
pred_bu = np.float32(pred_bu)
pred_bu_dep = np.float32(pred_bu_dep)
pred_td = np.float32(pred_td)
pred_td_dep = np.float32(pred_td_dep)
source_pts = np.stack([pred_td, pred_bu], axis=1)
source_dep = np.stack([pred_td_dep, pred_bu_dep], axis=1)
num_frames = source_pts.shape[0]
source_pts = source_pts.reshape([num_frames, -1])
source_dep = source_dep.reshape([num_frames, -1])
source_dep = source_dep / 1000
vid_inst = self.vid_inst[self.pos]
self.pos += 1
return source_pts, source_dep, vid_inst
```
#### File: lib/models/networkinte.py
```python
import torch
import numpy as np
import TorchSUL.Model as M
class IntegrationNet(M.Model):
def initialize(self):
self.fc1 = M.Dense(512, activation=M.PARAM_GELU)
self.fc2 = M.Dense(512, activation=M.PARAM_GELU)
self.fc3 = M.Dense(512, activation=M.PARAM_GELU)
self.fc4 = M.Dense(2)
def forward(self, pts, depths):
x = torch.cat([pts, depths], dim=1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
x = self.fc4(x)
bsize = pts.shape[0]
pts = pts.reshape(bsize, 2, -1)
w_pt = x[:,0:1]
w_pt = torch.sigmoid(w_pt)
pts = w_pt * pts[:,0] + (1 - w_pt) * pts[:,1] # use a weighted-sum term to increase the robustness
pts = pts.reshape(bsize, 3, 17)
w_dep = x[:,1:2]
w_dep = torch.tanh(w_dep) * 2
depths = w_dep * depths[:,0:1] + (1 - w_dep) * depths[:,1:2]
return pts, depths
```
#### File: lib/models/transnet.py
```python
from torch.nn.parameter import Parameter
import torch.nn.init as init
from TorchSUL import Model as M
import torch
class TransNet(M.Model):
def initialize(self):
self.f3 = M.Dense(3*17)
def forward(self, x):
x = self.f3(x)
return x
class LinearModel(M.Model):
def initialize(self):
self.weight = Parameter(torch.Tensor(3))
self.bias = Parameter(torch.Tensor(3))
init.normal_(self.weight, std=0.001)
init.zeros_(self.bias)
def forward(self, x):
x = self.weight * x
x = x + self.bias
return x
``` |
{
"source": "3D-Printing-for-Microfluidics/OpenGL-STL-slicer",
"score": 2
} |
#### File: OpenGL-STL-slicer/pyopengl/app_pyopengl.py
```python
import glfw
from OpenGL.GL import *
from OpenGL.arrays import vbo
import platform
import os
from stl import mesh
import numpy as np
from PIL import Image
from shader import OurShaderProgram
from printer import printer
import util
EPSILON = 0.0001
SCR_WIDTH = 640
SCR_HEIGHT = int(SCR_WIDTH * printer.height / printer.width)
class params:
VAO, vertVBO, maskVAO, maskVBO = 0, 0, 0, 0
num_of_verts = 0
bounds = dict()
total_thickness = 0.
class slice:
fbo, tex, buf = 0, 0, 0
def start_slicing_stl(stl_filename, layer_thickness, slice_save_path):
glfw.init()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
if platform.system() == 'Darwin': # for Mac OS
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE)
window = glfw.create_window(
SCR_WIDTH, SCR_HEIGHT, 'STL Slicer', None, None)
glfw.make_context_current(window)
glfw.set_framebuffer_size_callback(window, framebuffer_size_callback)
glfw.set_input_mode(window, glfw.CURSOR, glfw.CURSOR_NORMAL)
loadMesh(stl_filename)
glBindVertexArray(params.maskVAO)
sliceShader = OurShaderProgram('shaders/slice.vert', 'shaders/slice.frag')
prepareSlice()
i, height = 0, 0.
while not glfw.window_should_close(window):
processInput(window)
if height >= params.total_thickness - EPSILON:
break
else:
height += layer_thickness
i += 1
draw(sliceShader, height-EPSILON)
renderSlice(sliceShader, height-EPSILON,
os.path.join(slice_save_path, 'out{:04d}.png'.format(i-1)))
glfw.swap_buffers(window)
glfw.poll_events()
glfw.terminate()
def framebuffer_size_callback(window, width, height):
glViewport(0, 0, width, height)
def processInput(window):
if glfw.get_key(window, glfw.KEY_ESCAPE) == glfw.PRESS:
glfw.set_window_should_close(window, GL_TRUE)
def loadMesh(stl):
# Get information about our mesh
our_mesh = mesh.Mesh.from_file(stl)
params.num_of_verts = our_mesh.vectors.shape[0] * 3
params.bounds = {
'xmin': our_mesh.min_[0],
'xmax': our_mesh.max_[0],
'ymin': our_mesh.min_[1],
'ymax': our_mesh.max_[1],
'zmin': our_mesh.min_[2],
'zmax': our_mesh.max_[2]
}
params.total_thickness = params.bounds['zmax'] - params.bounds['zmin']
# make VAO for drawing our mesh
params.VAO = glGenVertexArrays(1)
glBindVertexArray(params.VAO)
vertVBO = vbo.VBO(data=our_mesh.vectors.astype(
GLfloat).tobytes(), usage='GL_STATIC_DRAW', target='GL_ARRAY_BUFFER')
vertVBO.bind()
vertVBO.copy_data()
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE,
3 * sizeof(GLfloat), vertVBO)
glEnableVertexAttribArray(0)
glBindVertexArray(0)
# a mask vertex array for stencil buffer to subtract
maskVert = np.array(
[[0, 0, 0],
[printer.width*printer.pixel, 0, 0],
[printer.width*printer.pixel, printer.height*printer.pixel, 0],
[0, 0, 0],
[printer.width*printer.pixel, printer.height*printer.pixel, 0],
[0, printer.height*printer.pixel, 0]], dtype=GLfloat
)
# make VAO for drawing mask
params.maskVAO = glGenVertexArrays(1)
glBindVertexArray(params.maskVAO)
maskVBO = vbo.VBO(data=maskVert.tobytes(),
usage='GL_STATIC_DRAW', target='GL_ARRAY_BUFFER')
maskVBO.bind()
maskVBO.copy_data()
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE,
3 * sizeof(GLfloat), maskVBO)
glEnableVertexAttribArray(0)
maskVBO.unbind()
glBindVertexArray(0)
def draw(shader, height):
glEnable(GL_STENCIL_TEST)
glClearColor(0., 0., 0., 1.)
glClear(GL_COLOR_BUFFER_BIT | GL_STENCIL_BUFFER_BIT)
glBindVertexArray(params.VAO)
shader.use()
proj = util.ortho(0, printer.width*printer.pixel,
0, printer.height*printer.pixel,
-params.total_thickness, params.total_thickness, GLfloat)
shader.setMat4("proj", proj)
model = util.translation([0, 0, params.total_thickness-height], GLfloat)
shader.setMat4("model", model)
glEnable(GL_CULL_FACE)
glCullFace(GL_FRONT)
glStencilFunc(GL_ALWAYS, 0, 0xFF)
glStencilOp(GL_KEEP, GL_KEEP, GL_INCR)
glDrawArrays(GL_TRIANGLES, 0, params.num_of_verts)
glCullFace(GL_BACK)
glStencilOp(GL_KEEP, GL_KEEP, GL_DECR)
glDrawArrays(GL_TRIANGLES, 0, params.num_of_verts)
glDisable(GL_CULL_FACE)
glClear(GL_COLOR_BUFFER_BIT)
glBindVertexArray(params.maskVAO)
glStencilFunc(GL_NOTEQUAL, 0, 0xFF)
glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP)
glDrawArrays(GL_TRIANGLES, 0, 6)
glDisable(GL_STENCIL_TEST)
def prepareSlice():
slice.fbo = glGenFramebuffers(1)
slice.tex = glGenTextures(1)
slice.buf = glGenRenderbuffers(1)
glBindTexture(GL_TEXTURE_2D, slice.tex)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, printer.width,
printer.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, None)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glBindTexture(GL_TEXTURE_2D, 0)
def renderSlice(shader, height, filename):
glEnable(GL_STENCIL_TEST)
glViewport(0, 0, printer.width, printer.height)
glBindFramebuffer(GL_FRAMEBUFFER, slice.fbo)
glFramebufferTexture2D(
GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, slice.tex, 0)
glBindRenderbuffer(GL_RENDERBUFFER, slice.buf)
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_STENCIL,
printer.width, printer.height)
glFramebufferRenderbuffer(
GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, slice.buf)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_STENCIL_BUFFER_BIT)
glBindVertexArray(params.VAO)
shader.use()
proj = util.ortho(0, printer.width*printer.pixel,
0, printer.height*printer.pixel,
-params.total_thickness, params.total_thickness, GLfloat)
shader.setMat4("proj", proj)
model = util.translation([0, 0, params.total_thickness-height], GLfloat)
shader.setMat4("model", model)
glEnable(GL_CULL_FACE)
glCullFace(GL_FRONT)
glStencilFunc(GL_ALWAYS, 0, 0xFF)
glStencilOp(GL_KEEP, GL_KEEP, GL_INCR)
glDrawArrays(GL_TRIANGLES, 0, params.num_of_verts)
glCullFace(GL_BACK)
glStencilOp(GL_KEEP, GL_KEEP, GL_DECR)
glDrawArrays(GL_TRIANGLES, 0, params.num_of_verts)
glDisable(GL_CULL_FACE)
glClear(GL_COLOR_BUFFER_BIT)
glBindVertexArray(params.maskVAO)
glStencilFunc(GL_NOTEQUAL, 0, 0xFF)
glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP)
glDrawArrays(GL_TRIANGLES, 0, 6)
glDisable(GL_STENCIL_TEST)
data = glReadPixels(0, 0, printer.width, printer.height,
GL_RED, GL_UNSIGNED_BYTE)
image = Image.frombytes('L', (printer.width, printer.height), data,
'raw', 'L', 0, -1)
image.save(filename)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glDisable(GL_STENCIL_TEST)
glViewport(0, 0, SCR_WIDTH, SCR_HEIGHT)
def main():
import sys
stl_filename = sys.argv[1]
layer_thickness = float(sys.argv[2])
temp = os.path.dirname(stl_filename)
slice_save_path = os.path.join(temp, 'slices')
if not os.path.exists(slice_save_path):
os.mkdir(slice_save_path)
start_slicing_stl(stl_filename, layer_thickness, slice_save_path)
if __name__ == '__main__':
main()
```
#### File: OpenGL-STL-slicer/pyopengl/util.py
```python
import numpy as np
def ortho(left, right, bottom, top, zNear, zFar, dtype):
'''
Return the following matrix
| 2 -(right+left) |
| ---------- 0 0 ------------- |
| right-left right-left |
| |
| 2 -(top+bottom) |
| 0 ---------- 0 ------------- |
| top-bottom top-bottom |
| |
| -2 -(zFar+zNear) |
| 0 0 ---------- ------------- |
| zFar-zNear zFar-zNear |
| |
| |
| 0 0 0 1 |
'''
M = np.identity(4, dtype=dtype)
M[0,0] = 2 / (right - left)
M[1,1] = 2 / (top - bottom)
M[2,2] = -2 / (zFar - zNear)
M[0,3] = -(right + left) / (right - left)
M[1,3] = -(top + bottom) / (top - bottom)
M[2,3] = -(zFar + zNear) / (zFar - zNear)
return M.T
def translation(direction, dtype):
"""Return matrix to translate by direction vector.
If direction is [x, y, z], return the following matrix
| 1 0 0 x |
| |
| 0 1 0 y |
| |
| 0 0 1 z |
| |
| 0 0 0 1 |
"""
M = np.identity(4, dtype=dtype)
M[:3, 3] = direction[:3]
return M.T
``` |
{
"source": "3dprintscanner/incubator-singa",
"score": 2
} |
#### File: test/python/test_operation.py
```python
import unittest
from builtins import str
from singa import tensor
from singa import singa_wrap as singa
from singa import device
from singa import autograd
import numpy as np
autograd.training = True
CTensor = singa.Tensor
gpu_dev = device.create_cuda_gpu()
cpu_dev = device.get_default_device()
dy = CTensor([2, 1, 2, 2])
singa.Gaussian(0.0, 1.0, dy)
def _tuple_to_string(t):
lt = [str(x) for x in t]
return '(' + ', '.join(lt) + ')'
def prepare_inputs_targets_for_rnn_test():
x_0 = np.random.random((2, 3)).astype(np.float32)
x_1 = np.random.random((2, 3)).astype(np.float32)
x_2 = np.random.random((2, 3)).astype(np.float32)
h_0 = np.zeros((2, 2)).astype(
np.float32)
t_0 = np.random.random((2, 2)).astype(np.float32)
t_1 = np.random.random((2, 2)).astype(np.float32)
t_2 = np.random.random((2, 2)).astype(np.float32)
x0 = tensor.Tensor(device=gpu_dev, data=x_0)
x1 = tensor.Tensor(device=gpu_dev, data=x_1)
x2 = tensor.Tensor(device=gpu_dev, data=x_2)
h0 = tensor.Tensor(device=gpu_dev, data=h_0)
t0 = tensor.Tensor(device=gpu_dev, data=t_0)
t1 = tensor.Tensor(device=gpu_dev, data=t_1)
t2 = tensor.Tensor(device=gpu_dev, data=t_2)
inputs = [x0, x1, x2]
targets = [t0, t1, t2]
return inputs, targets, h0
class TestPythonOperation(unittest.TestCase):
def check_shape(self, actual, expect):
self.assertEqual(actual, expect, 'shape mismatch, actual shape is %s'
' exepcted is %s' % (_tuple_to_string(actual),
_tuple_to_string(expect))
)
def test_conv2d_gpu(self):
# (in_channels, out_channels, kernel_size)
conv_0 = autograd.Conv2d(3, 1, 2)
conv_without_bias_0 = autograd.Conv2d(3, 1, 2, bias=False)
gpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev)
gpu_input_tensor.gaussian(0.0, 1.0)
y = conv_0(gpu_input_tensor) # PyTensor
dx, dW, db = y.creator.backward(dy) # CTensor
self.check_shape(y.shape, (2, 1, 2, 2))
self.check_shape(dx.shape(), (2, 3, 3, 3))
self.check_shape(dW.shape(), (1, 3, 2, 2))
self.check_shape(db.shape(), (1,))
# forward without bias
y_without_bias = conv_without_bias_0(gpu_input_tensor)
self.check_shape(y_without_bias.shape, (2, 1, 2, 2))
def test_conv2d_cpu(self):
# (in_channels, out_channels, kernel_size)
conv_1 = autograd.Conv2d(3, 1, 2)
conv_without_bias_1 = autograd.Conv2d(3, 1, 2, bias=False)
cpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=cpu_dev)
cpu_input_tensor.gaussian(0.0, 1.0)
y = conv_1(cpu_input_tensor) # PyTensor
dx, dW, db = y.creator.backward(dy) # CTensor
self.check_shape(y.shape, (2, 1, 2, 2))
self.check_shape(dx.shape(), (2, 3, 3, 3))
self.check_shape(dW.shape(), (1, 3, 2, 2))
self.check_shape(db.shape(), (1,))
# forward without bias
y_without_bias = conv_without_bias_1(cpu_input_tensor)
self.check_shape(y_without_bias.shape, (2, 1, 2, 2))
def test_SeparableConv2d_gpu(self):
separ_conv=autograd.SeparableConv2d(8, 16, 3, padding=1)
x=np.random.random((10,8,28,28)).astype(np.float32)
x=tensor.Tensor(device=gpu_dev, data=x)
#y = separ_conv(x)
y1 = separ_conv.spacial_conv(x)
y2 = separ_conv.depth_conv(y1)
dy1, dW_depth, _ = y2.creator.backward(y2.data)
dx, dW_spacial, _ = y1.creator.backward(dy1)
self.check_shape(y2.shape, (10, 16, 28, 28))
self.check_shape(dy1.shape(), (10, 8, 28, 28))
self.check_shape(dW_depth.shape(), (16, 8, 1, 1))
self.check_shape(dx.shape(), (10, 8, 28, 28))
self.check_shape(dW_spacial.shape(), (8, 1, 3, 3))
def test_batchnorm2d_gpu(self):
batchnorm_0 = autograd.BatchNorm2d(3)
gpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev)
gpu_input_tensor.gaussian(0.0, 1.0)
dy = CTensor([2, 3, 3, 3])
singa.Gaussian(0.0, 1.0, dy)
y = batchnorm_0(gpu_input_tensor)
dx, ds, db = y.creator.backward(dy)
self.check_shape(y.shape, (2, 3, 3, 3))
self.check_shape(dx.shape(), (2, 3, 3, 3))
self.check_shape(ds.shape(), (3,))
self.check_shape(db.shape(), (3,))
def test_vanillaRNN_gpu_tiny_ops_shape_check(self):
# gradients shape check.
inputs, target, h0 = prepare_inputs_targets_for_rnn_test()
rnn = autograd.RNN(3, 2)
hs, _ = rnn(inputs, h0)
loss = autograd.softmax_cross_entropy(hs[0], target[0])
for i in range(1, len(hs)):
l = autograd.softmax_cross_entropy(hs[i], target[i])
loss = autograd.add(loss, l)
# d=autograd.infer_dependency(loss.creator)
# print(d)
for t, dt in autograd.backward(loss):
self.check_shape(t.shape, dt.shape)
def test_LSTM_gpu_tiny_ops_shape_check(self):
# gradients shape check.
inputs, target, h0 = prepare_inputs_targets_for_rnn_test()
c_0 = np.random.random((2, 1)).astype(np.float32)
c0 = tensor.Tensor(device=gpu_dev, data=c_0)
rnn = autograd.LSTM(3, 2)
hs, _, _ = rnn(inputs, (h0, c0))
loss = autograd.softmax_cross_entropy(hs[0], target[0])
for i in range(1, len(hs)):
l = autograd.softmax_cross_entropy(hs[i], target[i])
loss = autograd.add(loss, l)
# d=autograd.infer_dependency(loss.creator)
# print(d)
for t, dt in autograd.backward(loss):
self.check_shape(t.shape, dt.shape)
def gradients_check(self, func, param, autograds, h=0.0005, df=1):
# param: PyTensor
# autograds: numpy_tensor
p = tensor.to_numpy(param)
it = np.nditer(p, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
diff = np.zeros_like(p)
diff[idx] += h
diff = tensor.from_numpy(diff)
diff.to_device(gpu_dev)
param += diff
pos = func()
pos = tensor.to_numpy(pos)
param -= diff
param -= diff
neg = func()
neg = tensor.to_numpy(neg)
numerical_grad = np.sum((pos - neg) * df) / (2 * h)
#print((autograds[idx] - numerical_grad)/numerical_grad)
# threshold set as -5% to +5%
#self.assertAlmostEqual((autograds[idx] - numerical_grad)/(numerical_grad+0.0000001), 0., places=1)
self.assertAlmostEqual(
autograds[idx] - numerical_grad, 0., places=2)
it.iternext()
def test_numerical_gradients_check_for_vallina_rnn(self):
inputs, target, h0 = prepare_inputs_targets_for_rnn_test()
rnn = autograd.RNN(3, 2)
def valinna_rnn_forward():
hs, _ = rnn(inputs, h0)
loss = autograd.softmax_cross_entropy(hs[0], target[0])
for i in range(1, len(hs)):
l = autograd.softmax_cross_entropy(hs[i], target[i])
loss = autograd.add(loss, l)
#grads = autograd.gradients(loss)
return loss
loss1 = valinna_rnn_forward()
auto_grads = autograd.gradients(loss1)
for param in rnn.params:
auto_grad = tensor.to_numpy(auto_grads[param])
self.gradients_check(valinna_rnn_forward, param, auto_grad)
def test_numerical_gradients_check_for_lstm(self):
inputs, target, h0 = prepare_inputs_targets_for_rnn_test()
c_0 = np.zeros((2, 2)).astype(np.float32)
c0 = tensor.Tensor(device=gpu_dev, data=c_0)
rnn = autograd.LSTM(3, 2)
def lstm_forward():
hs, _, _ = rnn(inputs, (h0, c0))
loss = autograd.softmax_cross_entropy(hs[0], target[0])
for i in range(1, len(hs)):
l = autograd.softmax_cross_entropy(hs[i], target[i])
loss = autograd.add(loss, l)
return loss
loss1 = lstm_forward()
auto_grads = autograd.gradients(loss1)
for param in rnn.params:
auto_grad = tensor.to_numpy(auto_grads[param])
self.gradients_check(lstm_forward, param, auto_grad)
def test_MeanSquareError(self):
X=np.array([4.3,5.4,3.3,3.6,5.7,6.0]).reshape(3,2).astype(np.float32)
T=np.array([4.4,5.3,3.2,3.7,5.4,6.3]).reshape(3,2).astype(np.float32)
x=tensor.from_numpy(X)
t=tensor.from_numpy(T)
x.to_device(gpu_dev)
t.to_device(gpu_dev)
loss= autograd.mse_loss(x,t)
dx=loss.creator.backward()[0]
loss_np=tensor.to_numpy(loss)
self.assertAlmostEqual(loss_np, 0.0366666, places=4)
self.check_shape(dx.shape(), (3, 2))
def test_Abs(self):
X=np.array([0.8,-1.2,3.3,-3.6,-0.5,0.5]).reshape(3,2).astype(np.float32)
XT=np.array([0.8,1.2,3.3,3.6,0.5,0.5]).reshape(3,2).astype(np.float32)
x=tensor.from_numpy(X)
x.to_device(gpu_dev)
result=autograd.abs(x)
Err=XT-result
dx=result.creator.backward()[0]
for ii in Err.flatten():
self.assertAlmostEquals(ii,0., places=3)
self.check_shape(dx.shape(), (3, 2))
def test_Exp(self):
X=np.array([0.8,-1.2,3.3,-3.6,-0.5,0.5]).reshape(3,2).astype(np.float32)
XT=np.array([2.2255409,0.22313017,27.112638,0.02732372,0.60653067,1.6487212]).reshape(3,2).astype(np.float32)
x=tensor.from_numpy(X)
x.to_device(gpu_dev)
result=autograd.exp(x)
Err=XT-result
dx=result.creator.backward()[0]
for ii in Err.flatten():
self.assertAlmostEquals(ii,0., places=3)
self.check_shape(dx.shape(), (3, 2))
def test_LeakyRelu(self):
X=np.array([0.8,-1.2,3.3,-3.6,-0.5,0.5]).reshape(3,2).astype(np.float32)
XT=np.array([0.8,-0.012,3.3,-0.036,-0.005,0.5]).reshape(3,2).astype(np.float32)
x=tensor.from_numpy(X)
x.to_device(gpu_dev)
result=autograd.LeakyRelu(x)
Err=XT-result
dx=result.creator.backward()[0]
for ii in Err.flatten():
self.assertAlmostEquals(ii,0., places=3)
self.check_shape(dx.shape(), (3, 2))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "3dprogramin/thinkpad-p51-archbang",
"score": 3
} |
#### File: dock/multihead3/change-wallpaper.py
```python
import os
from random import choice
from time import sleep
import sys
# pictures root folder
PICTURES_FOLDER = '/home/icebox/Pictures/wallpapers'
ROTATE_EVERY = 30 # minutes
# valid extensions
EXTENSIONS = [
'PNG',
'JPG',
'BMP',
'JPEG'
]
# checks if file has a good extension
def good_extension(f):
f = f.lower()
for e in EXTENSIONS:
if f.endswith(e.lower()):
return True
return False
# list all files in dir and subdir
def files_in_dir(d):
l = []
for dirpath, dirnames, filenames in os.walk(d):
for filename in [f for f in filenames if good_extension(f)]:
f = os.path.join(dirpath, filename)
l.append(f)
return l
def set_random_wallpaper():
files = files_in_dir(PICTURES_FOLDER)
if not files:
print '[!] no pictures in folder/s'
return # if no files, return
f = choice(files) # get random file
cmd = 'feh --bg-fill {}'.format(f)
os.system(cmd) # set wallpaper
# main method
def main():
while True:
set_random_wallpaper()
if sys.argv[-1] == 'oneshot': return
sleep(60 * ROTATE_EVERY)
main()
``` |
{
"source": "3d-py/data-science-learning",
"score": 2
} |
#### File: data-science-learning/cellular automata/automata_blender.py
```python
import bpy
import bmesh
from mathutils import Vector
import numpy as np
import math
import itertools
# Blender import system clutter
import sys
from pathlib import Path
UTILS_PATH = Path.home() / "Documents/python_workspace/data-science-learning"
SRC_PATH = UTILS_PATH / "cellular automata"
sys.path.append(str(UTILS_PATH))
sys.path.append(str(SRC_PATH))
import Automaton
import automata_blender_utils
import importlib
importlib.reload(Automaton)
importlib.reload(automata_blender_utils)
from Automaton import *
from ds_utils.blender_utils import init_grease_pencil, draw_square, draw_circle, draw_cube, delete_all, render
def random_camera_pos(radius, azimuth, inclination, center=(0, 0, 0)):
x = center[0] + radius * math.cos(azimuth) * math.sin(inclination)
y = center[1] + radius * math.sin(azimuth) * math.sin(inclination)
z = center[2] + radius * math.cos(inclination)
camera = bpy.context.scene.objects['Camera']
camera.location = (x, y, z)
def random_gp_material():
line_color = np.random.rand(3)
fill_color = np.random.rand(3)
line_alpha, fill_alpha = [(1, 1), (0, 1)][np.random.randint(2)] # random comb of alpha for line and fill
if fill_color.sum() > 1.5:
bpy.context.scene.world.color = (1, 1, 1)
else:
bpy.context.scene.world.color = (0, 0, 0)
material = bpy.context.object.active_material.grease_pencil
material.color = (line_color[0], line_color[1], line_color[2], line_alpha)
material.fill_color = (fill_color[0], fill_color[1], fill_color[2], fill_alpha)
##################
# 1-D Automata
##################
def animate_1d_automata(rule, nb_frames=10, scale=1., material_index=0):
# Init automata
automaton_size = nb_frames*2
automaton = Automaton1D(automaton_size, rule=rule)
# Set middle cell as the only active one
#automaton.grid = np.zeros(automaton_size, dtype=np.uint8)
#automaton.grid[automaton_size // 2] = 1
bpy.context.scene.frame_start = 0
bpy.context.scene.frame_end = nb_frames
gpencil_obj_name = "GPencil"
gp_layer = init_grease_pencil(clear_layer=True, gpencil_obj_name=gpencil_obj_name)
gp_frame = gp_layer.frames.new(0)
gpencil = bpy.context.view_layer.objects[gpencil_obj_name]
# center on middle cell
cell_size = 1
translate_vec = np.array([-(automaton_size/2), 0, 0])
for frame in range(nb_frames):
gp_frame = gp_layer.frames.copy(gp_frame)
for i, cell in enumerate(automaton.grid):
# maintain pyramid shape (render only if further from the center at least the current frame number)
if cell and ((automaton_size // 2 - frame) <= i <= (automaton_size // 2 + frame)):
# render cell
centers = [
(i, frame, 0), # normal center
#(i, frame - 1, automaton_size // 2 - frame), # center down
#(i, frame - 1, -(automaton_size // 2) + frame), # center up
]
for center in centers:
centers_shifted = np.array(center) + translate_vec
draw_square(gp_frame, centers_shifted, cell_size, material_index=material_index)
#draw_cube(gp_frame, centers_shifted, cell_size, material_index=material_index)
automaton.update()
# scale automaton size along the growth axis
if scale != 1.:
gpencil.scale[0] = scale
gpencil.select_set(True)
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
rule_0 = {'111': 1, '110': 1, '101': 1, '100': 1, '011': 1, '010': 1, '001': 1, '000': 0}
rule_sierpinski = {'111': 0, '110': 1, '101': 0, '100': 1, '011': 1, '010': 0, '001': 1, '000': 0}
rule_x = {'111': 0, '110': 0, '101': 0, '100': 1, '011': 1, '010': 1, '001': 1, '000': 0}
rule_106 = {'111': 0, '110': 1, '101': 1, '100': 0, '011': 1, '010': 0, '001': 1, '000': 0}
#animate_1d_automata(rule_sierpinski, nb_frames=50)
def explore_1d_automata(nb_frames, nb_runs: int, render_dir: Path):
nb_simmetry = 4
angle = 360/nb_simmetry
scale = angle/90
all_rules_config = list(itertools.product([0, 1], repeat=8))
configs_idxs = np.random.choice(np.arange(len(all_rules_config)), nb_runs)
for idx in configs_idxs:
print(scale)
print("#####")
print(f"Rule {idx}")
config = all_rules_config[idx]
print(config)
rule = dict(zip(['111', '110', '101', '100', '011', '010', '001', '000'], config))
animate_1d_automata(rule, nb_frames=nb_frames, scale=scale, material_index=0)
bpy.context.scene.frame_set(nb_frames)
#random_camera_pos(np.random.randint(5, 200), np.random.randint(360), np.random.randint(360))
random_gp_material()
render(str(render_dir / f"rule_{idx}"), animation=False)
render(str(render_dir / f"rule_{idx}"), animation=True)
#explore_1d_automata(50, nb_runs=20
# render_dir = Path.home() / "Downloads/automaton_1d/symm_4_colors_02")
##################
# 2-D Automata
##################
rule_gol = {'neighbours_count_born': 3, # count required to make a cell alive
'neighbours_maxcount_survive': 3, # max number (inclusive) of neighbours that a cell can handle before dying
'neighbours_mincount_survive': 2, # min number (inclusive) of neighbours that a cell needs in order to stay alive
}
# render automata with Grease Pencil
def animate_2d_automata(rule, nb_frames: 10, use_grease_pencil=True):
nb_rows = 10
nb_cols = 10
gol = Automaton2D(nb_rows, nb_cols, rule, seed=11)
FRAMES_SPACING = 1
bpy.context.scene.frame_start = 0
bpy.context.scene.frame_end = nb_frames * FRAMES_SPACING
if use_grease_pencil:
gp_layer = init_grease_pencil(clear_layer=True)
else:
obj_size = 0.7
subdivisions = 2
scale_factor = 0.2
init_mat_color = (0.7, 0.1, 0.1)
# obj_generator = lambda idx: automata_blender_utils.icosphere_generator(obj_size, subdivisions, idx[0], idx[1], 0)
obj_generator = lambda idx: automata_blender_utils.cube_generator(obj_size, idx[0], idx[1], 0)
obj_updater = lambda obj, grid, idx: automata_blender_utils.object_updater_hide(obj, grid[idx])
# obj_updater = lambda obj, grid, idx: automata_blender_utils.object_updater_scale(obj, grid[idx],
# scale_factor=scale_factor)
# obj_updater = lambda obj, grid, idx: automata_blender_utils.object_updater_color_vector(
# obj, grid[:, idx[0], idx[1]])
delete_all()
obj_grid = automata_blender_utils.create_grid(gol, obj_generator)
# automata_blender_utils.init_materials(obj_grid, init_mat_color)
gol.update()
for frame in range(nb_frames):
if use_grease_pencil:
gp_frame = gp_layer.frames.new(frame * FRAMES_SPACING)
for idx, val in np.ndenumerate(gol.grid):
if val:
draw_square(gp_frame, (idx[0], idx[1], 0), 1)
else:
bpy.context.scene.frame_set(frame)
automata_blender_utils.update_grid(obj_grid, gol, obj_updater)
gol.update()
#animate_2d_automata(rule_gol, nb_frames=10, use_grease_pencil=True)
def animate_hexagonal_automata(p_freeze, p_melt, nb_frames: 10,
nb_rows: int, nb_cols: int,
material_index=0):
automaton = HexagonalAutomaton(nb_rows=nb_rows, nb_cols=nb_cols, p_melt=p_melt, p_freeze=p_freeze)
# Set middle cell as the only active one
automaton.grid = np.zeros((nb_rows, nb_cols), dtype=np.uint8)
automaton.grid[(nb_rows // 2, nb_cols//2)] = 1
FRAMES_SPACING = 1
bpy.context.scene.frame_start = 0
bpy.context.scene.frame_end = nb_frames * FRAMES_SPACING
gp_layer = init_grease_pencil(clear_layer=True)
#gp_frame = gp_layer.frames.new(0)
z = 0
delete_all()
for frame in range(0, nb_frames):
if frame % 10 == 0:
print("Frame {}".format(frame))
#gp_frame = gp_layer.frames.copy(gp_frame)
gp_frame = gp_layer.frames.new(frame * FRAMES_SPACING)
# reduce reference size at each new frame
size = 1/(frame+1)
# Hexagonal shape size for grid adjustment
hex_size = size*math.cos(math.pi/6)
short_size = size/2
#z += size/2
z = 0
for row in range(nb_rows):
for col in range(nb_cols):
if automaton.grid[row, col]:
# Calculate row and col position for the current cell
# taking into account hexagonal shape and shifting by growth
row_pos = (row - nb_rows//2) * (2*size - short_size)
col_pos = (col - nb_cols//2) * (2*hex_size) - hex_size
# shift even rows
if row % 2 == 0:
col_pos += hex_size
# Render cell
#automata_blender_utils.cube_generator(size, row_pos, col_pos, z)
#draw_cube(gp_frame, (row_pos, col_pos, z), size, material_index=material_index)
draw_circle(gp_frame, (row_pos, col_pos, z), size, 6, material_index=material_index)
automaton.update()
p_freeze = [0, 1, 0., 0., 0, 0., 0., 1., 0, 0., 0., 0., 0., 0]
p_melt = [0, 0, 0., 0., 0., 0, 1, 0, 0., 1., 0, 1., 0., 0]
#animate_hexagonal_automata(p_freeze, p_melt, 10, nb_rows=120, nb_cols=120)
def explore_hexagonal_automata(nb_frames: int, nb_runs: int, nb_rows: int, nb_cols: int):
render_dir = Path.home() / f"Downloads/automaton_hexagonal/flat_hexa_logo/{nb_frames}"
render_dir.mkdir(exist_ok=True)
with open(str(render_dir / "logs.txt"), 'w+') as f:
for run in range(nb_runs):
p_freeze = np.random.choice([1., 0.], 14)
p_melt = np.random.choice([1., 0.], 14)
print("#####")
print(f"Run {run}")
print(f"p_freeze {p_freeze}")
print(f"p_melt {p_melt}")
animate_hexagonal_automata(p_freeze, p_melt, nb_frames=nb_frames, nb_rows=nb_rows, nb_cols=nb_cols,
material_index=0)
bpy.context.scene.frame_set(nb_frames)
#random_camera_pos(np.random.randint(5, 200), np.random.randint(360), np.random.randint(360))
#andom_gp_material()
render(str(render_dir / f"run_{run}"), animation=False)
#render(str(render_dir / f"run_{run}"), animation=True)
f.write(f"p_freeze:{p_freeze}-")
f.write(f"p_melt:{p_melt}\n")
#for nb_frames in range(10, 20):
# explore_hexagonal_automata(nb_frames, nb_runs=30, nb_rows=120, nb_cols=120)
```
#### File: data-science-learning/ds_utils/plot_utils.py
```python
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
def plot_sample_imgs(get_imgs_fun, img_shape, plot_side=5, savepath=None, cmap='gray'):
"""
Generate visual samples and plot on a grid
:param get_imgs_fun: function that given a int return a corresponding number of generated samples
:param img_shape: shape of image to plot
:param plot_side: samples per row (and column). Generated plot_side x plot_side samples
:param savepath: if given, save plot to such filepath, otherwise show plot
:param cmap: matplotlib specific cmap to use for the plot
"""
f, axarr = plt.subplots(plot_side, plot_side)
samples = get_imgs_fun(plot_side*plot_side)
for row in range(plot_side):
for col in range(plot_side):
axarr[row, col].imshow(samples[plot_side*row+col].reshape(img_shape), cmap=cmap)
axarr[row, col].set_title('')
axarr[row, col].axis('off')
if savepath:
f.savefig(savepath)
plt.close()
else:
plt.show()
def plot_correlation(df):
# Correlation
corr = df.corr()
print(corr)
# Plot masking the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(corr, mask=mask)
sns.plt.show()
"""
# Rotate tick labels
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=25)
# Save plot
sns_plot.savefig("output.png")
fig = swarm_plot.get_figure()
fig.savefig(...)
# Matplotlib to Plotly
import plotly.tools as tls
plotly_fig = tls.mpl_to_plotly(mpl_fig)
py.iplot(plotly_fig
"""
##############################
# Animation
##############################
#%matplotlib notebook # rely on notebook mode as the inline doesn't seem to work in Jupyter
from matplotlib import animation
plt.rcParams['animation.ffmpeg_path'] = '~/path/to/bin/ffmpeg'
def animated_plot(img_width: int, img_height: int, nb_frames: int, outpath: str = None):
# Setup plot
dpi = 100
if outpath:
fig, ax = plt.subplots(dpi=dpi, figsize=(img_width / dpi, img_height / dpi))
else:
fig, ax = plt.subplots(dpi=dpi, figsize=(5, 5))
plt.axis('off')
#line, = plt.plot([0, 1.0], [init_intercept, 1.0 * init_slope + init_intercept], 'k-')
#epoch_text = plt.text(0, 0, "Epoch 0")
#im = ax.imshow(np.zeros((28, 28)), cmap='gray')
def animate(i, ):
pass
#current_intercept, current_slope = res[i]
#line.set_ydata([current_intercept, 1.0 * current_slope + current_intercept])
#epoch_text.set_text("Epoch {}, cost {:.3f}".format(i, history[i][0]))
#return line,
# one other option is to set the data like
#im.set_data(np.zeros((28, 28))+1)
#ax.imshow(system.B, cmap='gray')
# Animate
ani = animation.FuncAnimation(fig, animate, frames=nb_frames, interval=100,
fargs=[]) # be sure to pass the additional args needed for the animation
if outpath:
ani.save(outpath, animation.FFMpegFileWriter(fps=30))
else:
return ani
"""
Writer = animation.writers['ffmpeg']
writer = Writer(fps=30)
animation.writers.list()
"""
##############################
# Drawing
##############################
def draw_template():
from PIL import Image, ImageDraw
img_size = 1000
img = Image.new('RGB', (img_size, img_size), (255, 255, 255))
draw = ImageDraw.Draw(img)
draw.ellipse((20, 20, 180, 180), fill='blue', outline='blue')
``` |
{
"source": "3drobotics/flight_review",
"score": 3
} |
#### File: flight_review/plot_app/db_entry.py
```python
from html import escape
from pyulog import *
from pyulog.px4 import *
from helper import get_log_filename, load_ulog_file, get_total_flight_time
#pylint: disable=missing-docstring, too-few-public-methods
class DBData:
""" simple class that contains information from the DB entry of a single
log file """
def __init__(self):
self.description = ''
self.feedback = ''
self.type = 'personal'
self.wind_speed = -1
self.rating = ''
self.video_url = ''
self.error_labels = []
self.source = ''
super().__init__()
def wind_speed_str(self):
return self.wind_speed_str_static(self.wind_speed)
@staticmethod
def wind_speed_str_static(wind_speed):
return {0: 'Calm', 5: 'Breeze', 8: 'Gale', 10: 'Storm'}.get(wind_speed, '')
def rating_str(self):
return self.rating_str_static(self.rating)
@staticmethod
def rating_str_static(rating):
return {'crash_pilot': 'Crashed (Pilot error)',
'crash_sw_hw': 'Crashed (Software or Hardware issue)',
'unsatisfactory': 'Unsatisfactory',
'good': 'Good',
'great': 'Great!'}.get(rating, '')
def to_json_dict(self):
jsondict = dict()
jsondict['description'] = self.description
jsondict['feedback'] = self.feedback
jsondict['type'] = self.type
jsondict['wind_speed'] = self.wind_speed
jsondict['rating'] = self.rating
jsondict['video_url'] = self.video_url
jsondict['error_labels'] = self.error_labels
jsondict['source'] = self.source
return jsondict
class DBDataGenerated:
""" information from the generated DB entry """
def __init__(self):
self.start_time_utc = 0
self.duration_s = 0
self.mav_type = ''
self.estimator = ''
self.sys_autostart_id = 0
self.sys_hw = ''
self.ver_sw = ''
self.ver_sw_release = ''
self.num_logged_errors = 0
self.num_logged_warnings = 0
self.flight_modes = set()
self.vehicle_uuid = ''
self.flight_mode_durations = [] # list of tuples of (mode, duration sec)
self.vehicle_flight_time = None
super().__init__()
def flight_mode_durations_str(self):
ret = []
for duration in self.flight_mode_durations:
ret.append(str(duration[0])+':'+str(duration[1]))
return ','.join(ret)
@classmethod
def from_log_file(cls, log_id):
""" initialize from a log file """
obj = cls()
ulog_file_name = get_log_filename(log_id)
ulog = load_ulog_file(ulog_file_name)
px4_ulog = PX4ULog(ulog)
# extract information
obj.duration_s = int((ulog.last_timestamp - ulog.start_timestamp)/1e6)
obj.mav_type = px4_ulog.get_mav_type()
obj.estimator = px4_ulog.get_estimator()
obj.sys_autostart_id = ulog.initial_parameters.get('SYS_AUTOSTART', 0)
obj.sys_hw = escape(ulog.msg_info_dict.get('ver_hw', ''))
obj.ver_sw = escape(ulog.msg_info_dict.get('ver_sw', ''))
version_info = ulog.get_version_info()
if version_info is not None:
obj.ver_sw_release = 'v{}.{}.{} {}'.format(*version_info)
obj.num_logged_errors = 0
obj.num_logged_warnings = 0
if 'sys_uuid' in ulog.msg_info_dict:
obj.vehicle_uuid = escape(ulog.msg_info_dict['sys_uuid'])
for m in ulog.logged_messages:
if m.log_level <= ord('3'):
obj.num_logged_errors += 1
if m.log_level == ord('4'):
obj.num_logged_warnings += 1
try:
cur_dataset = ulog.get_dataset('vehicle_status')
flight_mode_changes = cur_dataset.list_value_changes('nav_state')
obj.flight_modes = {int(x[1]) for x in flight_mode_changes}
# get the durations
# make sure the first entry matches the start of the logging
if len(flight_mode_changes) > 0:
flight_mode_changes[0] = (ulog.start_timestamp, flight_mode_changes[0][1])
flight_mode_changes.append((ulog.last_timestamp, -1))
for i in range(len(flight_mode_changes)-1):
flight_mode = flight_mode_changes[i][1]
flight_mode_duration = int((flight_mode_changes[i+1][0] -
flight_mode_changes[i][0]) / 1e6)
obj.flight_mode_durations.append((flight_mode, flight_mode_duration))
except (KeyError, IndexError) as error:
obj.flight_modes = set()
# logging start time & date
try:
# get the first non-zero timestamp
gps_data = ulog.get_dataset('vehicle_gps_position')
indices = np.nonzero(gps_data.data['time_utc_usec'])
if len(indices[0]) > 0:
obj.start_time_utc = int(gps_data.data['time_utc_usec'][indices[0][0]] / 1000000)
except:
# Ignore. Eg. if topic not found
pass
obj.vehicle_flight_time = get_total_flight_time(ulog)
return obj
def to_json_dict(self):
jsondict = dict()
jsondict['duration_s'] = int(self.duration_s)
jsondict['mav_type'] = self.mav_type
jsondict['estimator'] = self.estimator
jsondict['sys_autostart_id'] = int(self.sys_autostart_id)
jsondict['sys_hw'] = self.sys_hw
jsondict['ver_sw'] = self.ver_sw
jsondict['ver_sw_release'] = self.ver_sw_release
jsondict['num_logged_errors'] = self.num_logged_errors
jsondict['num_logged_warnings'] = self.num_logged_warnings
jsondict['flight_modes'] = list(self.flight_modes)
jsondict['vehicle_uuid'] = self.vehicle_uuid
jsondict['flight_mode_durations'] = self.flight_mode_durations
return jsondict
class DBVehicleData:
""" simple class that contains information from the DB entry of a vehicle """
def __init__(self):
self.uuid = None
self.log_id = ''
self.name = ''
self.flight_time = 0
``` |
{
"source": "3DRPP/printer",
"score": 3
} |
#### File: printer/printer/gpio.py
```python
try:
import RPi.GPIO as GPIO
except RuntimeError:
print("Error importing RPi.GPIO! This is probably because you need "
"superuser privileges. You can achieve this by using 'sudo' to run "
"your script")
gpios = [7, 8, 10, 11, 12, 13, 15, 16, 18, 19, 21, 22, 23, 24, 26, 29,
31, 32, 33, 35, 36, 37, 38, 40]
class Pin:
def __init__(self, number, value):
self.number = number
self.value = value
self.mode = 'out'
def set_value(self, value):
try:
GPIO.output(self.number, GPIO.HIGH if value else GPIO.LOW)
except:
pass
self.value = value
def set_mode(self, mode):
if mode == 'in' or mode == 'out':
self.mode = mode
try:
if mode == 'in':
GPIO.setup(self.number, GPIO.IN)
self.value = bool(GPIO.input(self.number))
print("set mode to in (value=" + str(self.value) + ")")
return self.value
else:
GPIO.setup(self.number, GPIO.OUT)
self.value = bool(GPIO.input(self.number))
print("set mode to out (value=" + str(self.value) + ")")
return self.value
except:
return self.value
def switch_value(self):
try:
GPIO.output(self.number, GPIO.LOW if self.value else GPIO.HIGH)
except:
pass
self.value = not self.value
def switch_mode(self):
if self.mode == 'out':
return 'in', self.set_mode('in')
else:
return 'out', self.set_mode('out')
class Header:
def __init__(self):
self.left_pins = []
self.right_pins = []
for x in gpios:
if x % 2 == 1:
self.left_pins.append(Pin(x, False))
else:
self.right_pins.append(Pin(x, False))
def get_value(self, number):
for pin in self.left_pins + self.right_pins:
if pin.number == number:
return pin.value
def set_value(self, number, value):
for pin in self.left_pins + self.right_pins:
if pin.number == number:
pin.set_value(value)
break
def switch_value(self, number):
for pin in self.left_pins + self.right_pins:
if pin.number == number:
pin.switch_value()
break
def switch_mode(self, number):
for pin in self.left_pins + self.right_pins:
if pin.number == number:
return pin.switch_mode()
header = Header()
try:
GPIO.setmode(GPIO.BOARD)
for id in gpios:
print('Initializing gpio ' + str(id))
GPIO.setup(id, GPIO.OUT, initial=GPIO.LOW)
print('Initialized GPIOs')
except:
print('Could not set GPIO mode to BOARD.')
```
#### File: printer/printer/models.py
```python
import uuid
import os
from datetime import datetime
import time
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from django.db import models
import threading
class UserManager(BaseUserManager):
def create_user(self, name, email, password=None):
user = self.model(name=name, email=email)
user.set_password(password)
user.save(using=self.db)
return user
def create_superuser(self, name, email, password):
user = self.create_user(name=name, email=email)
user.set_password(password)
user.is_admin = True
user.save(using=self.db)
return user
class User(AbstractBaseUser):
objects = UserManager()
USERNAME_FIELD = 'name'
REQUIRED_FIELDS = []
first_name = models.CharField(max_length=254)
last_name = models.CharField(max_length=254)
name = models.CharField('user name', max_length=254, unique=True)
email = models.EmailField('email address', max_length=254, unique=True)
pushbullet_api_key = models.TextField(null=True)
send_emails = models.BooleanField(default=False)
send_pushbullets = models.BooleanField(default=False)
date_joined = models.DateTimeField(default=datetime.now)
is_admin = models.BooleanField(default=False)
def save(self, *args, **kwargs):
if not self.pk:
super().save(*args, **kwargs)
# Do more things here
else:
super().save(*args, **kwargs)
def switch_emails(self):
self.send_emails = not self.send_emails
self.save()
def switch_pushbullets(self):
if self.pushbullet_api_key:
self.send_pushbullets = not self.send_pushbullets
self.save()
def get_file_path(instance, filename):
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return os.path.join('printables', filename)
class Printable(models.Model):
name = models.CharField('Object name', max_length=256, unique=True)
filename = models.CharField(max_length=256)
file = models.FileField(upload_to=get_file_path)
date_added = models.DateTimeField(default=datetime.now)
date_start = models.DateTimeField(null=True)
date_end = models.DateTimeField(null=True)
owner = models.ForeignKey(User)
comment = models.TextField()
class Task(Printable, threading.Thread):
def run(self):
self.date_start = datetime.now()
self.save()
print("Printable started to be printed!")
time.sleep(100)
print("Printable has been printed!")
self.date_end = datetime.now()
self.save()
printer = Printer()
printer.state = 'PRTD'
printer.current_task = None
def is_waiting(self):
return self.date_start == None
class Printer:
STATES = (
('IDLE', 'Inactive'),
('PRTG', 'Printing'),
('PAUS', 'Pause'),
('INIT', 'Initializing'),
('PRTD', 'Printed')
)
__shared_state = {}
current_task = None
state = 'IDLE'
def __init__(self):
if self.__shared_state == {}:
self.reset_position()
self.__dict__ = self.__shared_state
def start_task(self, task):
if self.current_task is not None:
print('Another task is already running.')
if not task.is_waiting():
print('This task is already launched or terminated.')
return False
self.current_task = task
self.current_task.setDaemon(True)
self.current_task.start()
self.state = 'WARM'
def reset_position(self):
self.state = 'INIT'
# Move axes to default position
# Check for each switch
# while (all switches are not closed):
# for stepper in steppers:
# if stepper.switch.is_open():
# rotate to start position
time.sleep(10)
self.state = 'IDLE'
def auto_warming(self):
# Launch a thread to handle temperature
pass
def stop_warming(self):
# Stop thread if currently running
pass
def get_nozzle_temperature(self):
return 250
def start_ventilation(self):
return
def stop_ventilation(self):
return
``` |
{
"source": "3D-semantic-Sgmentation/pointnet",
"score": 2
} |
#### File: pointnet/data/tum_mls_dataset.py
```python
import os
import open3d
import numpy as np
import utils.provider as provider
from utils.point_cloud_util import load_labels
from plyfile import PlyData, PlyElement
train_file_prefixes = [
"mls2016_8class_20cm_ascii_area1_1",
"mls2016_8class_20cm_ascii_area1_2",
]
validation_file_prefixes = [
"mls2016_8class_20cm_ascii_area3b",
]
test_file_prefixes = [
]
all_file_prefixes = train_file_prefixes + validation_file_prefixes + test_file_prefixes
map_name_to_file_prefixes = {
"train": train_file_prefixes,
"train_full": train_file_prefixes + validation_file_prefixes,
"validation": validation_file_prefixes,
"test": test_file_prefixes,
"all": all_file_prefixes,
}
class TUMMLSFileData:
def __init__(
self, file_path_without_ext, has_label, use_color, box_size_x, box_size_y
):
"""
Loads file data
"""
self.file_path_without_ext = file_path_without_ext
self.box_size_x = box_size_x
self.box_size_y = box_size_y
# Load points
pcd = open3d.io.read_point_cloud(file_path_without_ext + ".pcd")
self.points = np.asarray(pcd.points)
# Load label. In pure test set, fill with zeros.
if has_label:
self.labels = load_labels(file_path_without_ext + ".labels")
else:
self.labels = np.zeros(len(self.points)).astype(bool)
# Load colors. If not use_color, fill with zeros.
if use_color:
self.colors = np.asarray(pcd.colors)
else:
self.colors = np.zeros_like(self.points)
# Sort according to x to speed up computation of boxes and z-boxes
sort_idx = np.argsort(self.points[:, 0])
self.points = self.points[sort_idx]
self.labels = self.labels[sort_idx]
self.colors = self.colors[sort_idx]
def _get_fix_sized_sample_mask(self, points, num_points_per_sample):
"""
Get down-sample or up-sample mask to sample points to num_points_per_sample
"""
# TODO: change this to numpy's build-in functions
# Shuffling or up-sampling if needed
if len(points) - num_points_per_sample > 0:
true_array = np.ones(num_points_per_sample, dtype=bool)
false_array = np.zeros(len(points) - num_points_per_sample, dtype=bool)
sample_mask = np.concatenate((true_array, false_array), axis=0)
np.random.shuffle(sample_mask)
else:
# Not enough points, recopy the data until there are enough points
sample_mask = np.arange(len(points))
while len(sample_mask) < num_points_per_sample:
sample_mask = np.concatenate((sample_mask, sample_mask), axis=0)
sample_mask = sample_mask[:num_points_per_sample]
return sample_mask
def _center_box(self, points):
# Shift the box so that z = 0 is the min and x = 0 and y = 0 is the box center
# E.g. if box_size_x == box_size_y == 10, then the new mins are (-5, -5, 0)
box_min = np.min(points, axis=0)
shift = np.array(
[
box_min[0] + self.box_size_x / 2,
box_min[1] + self.box_size_y / 2,
box_min[2],
]
)
points_centered = points - shift
return points_centered
def _extract_z_box(self, center_point):
"""
Crop along z axis (vertical) from the center_point.
Args:
center_point: only x and y coordinates will be used
points: points (n * 3)
scene_idx: scene index to get the min and max of the whole scene
"""
# TODO TAKES LOT OF TIME !! THINK OF AN ALTERNATIVE !
scene_z_size = np.max(self.points, axis=0)[2] - np.min(self.points, axis=0)[2]
box_min = center_point - [
self.box_size_x / 2,
self.box_size_y / 2,
scene_z_size,
]
box_max = center_point + [
self.box_size_x / 2,
self.box_size_y / 2,
scene_z_size,
]
i_min = np.searchsorted(self.points[:, 0], box_min[0])
i_max = np.searchsorted(self.points[:, 0], box_max[0])
mask = (
np.sum(
(self.points[i_min:i_max, :] >= box_min)
* (self.points[i_min:i_max, :] <= box_max),
axis=1,
)
== 3
)
mask = np.hstack( # Stack arrays in sequence horizontally (column wise)
(
np.zeros(i_min, dtype=bool),
mask,
np.zeros(len(self.points) - i_max, dtype=bool),
)
)
# mask = np.sum((points>=box_min)*(points<=box_max),axis=1) == 3
assert np.sum(mask) != 0
return mask
def sample(self, num_points_per_sample):
points = self.points
# Pick a point, and crop a z-box around
center_point = points[np.random.randint(0, len(points))]
scene_extract_mask = self._extract_z_box(center_point)
points = points[scene_extract_mask]
labels = self.labels[scene_extract_mask]
colors = self.colors[scene_extract_mask]
sample_mask = self._get_fix_sized_sample_mask(points, num_points_per_sample)
points = points[sample_mask]
labels = labels[sample_mask]
colors = colors[sample_mask]
# Shift the points, such that min(z) == 0, and x = 0 and y = 0 is the center
# This canonical column is used for both training and inference
points_centered = self._center_box(points)
return points_centered, points, labels, colors
def sample_batch(self, batch_size, num_points_per_sample):
"""
TODO: change this to stack instead of extend
"""
batch_points_centered = []
batch_points_raw = []
batch_labels = []
batch_colors = []
for _ in range(batch_size):
points_centered, points_raw, gt_labels, colors = self.sample(
num_points_per_sample
)
batch_points_centered.append(points_centered)
batch_points_raw.append(points_raw)
batch_labels.append(gt_labels)
batch_colors.append(colors)
return (
np.array(batch_points_centered),
np.array(batch_points_raw),
np.array(batch_labels),
np.array(batch_colors),
)
class TUMMLSDataset:
def __init__(
self, num_points_per_sample, split, use_color, box_size_x, box_size_y, path
):
"""Create a dataset holder
num_points_per_sample (int): Defaults to 8192. The number of point per sample
split (str): Defaults to 'train'. The selected part of the data (train, test,
reduced...)
color (bool): Defaults to True. Whether to use colors or not
box_size_x (int): Defaults to 10. The size of the extracted cube.
box_size_y (int): Defaults to 10. The size of the extracted cube.
path (float): Defaults to 'dataset/semantic_data/'.
"""
# Dataset parameters
self.num_points_per_sample = num_points_per_sample
self.split = split
self.use_color = use_color
self.box_size_x = box_size_x
self.box_size_y = box_size_y
self.num_classes = 9
self.path = path
self.labels_names = [
"unlabeled",
"man-made terrain",
"natural terrain",
"high vegetation",
"low vegetation",
"buildings",
"hard scape",
"scanning artifact",
"cars",
]
# Get file_prefixes
file_prefixes = map_name_to_file_prefixes[self.split]
print("Dataset split:", self.split)
print("Loading file_prefixes:", file_prefixes)
# Load files
self.list_file_data = []
for file_prefix in file_prefixes:
file_path_without_ext = os.path.join(self.path, file_prefix)
file_data = TUMMLSFileData(
file_path_without_ext=file_path_without_ext,
has_label=self.split != "test",
use_color=self.use_color,
box_size_x=self.box_size_x,
box_size_y=self.box_size_y,
)
self.list_file_data.append(file_data)
# Pre-compute the probability of picking a scene
self.num_scenes = len(self.list_file_data)
self.scene_probas = [
len(fd.points) / self.get_total_num_points() for fd in self.list_file_data
]
# Pre-compute the points weights if it is a training set
if self.split == "train" or self.split == "train_full":
# First, compute the histogram of each labels
label_weights = np.zeros(9)
for labels in [fd.labels for fd in self.list_file_data]:
tmp, _ = np.histogram(labels, range(10))
label_weights += tmp
# Then, a heuristic gives the weights
# 1 / log(1.2 + probability of occurrence)
label_weights = label_weights.astype(np.float32)
label_weights = label_weights / np.sum(label_weights)
self.label_weights = 1 / np.log(1.2 + label_weights)
else:
self.label_weights = np.zeros(9)
def sample_batch_in_all_files(self, batch_size, augment=True):
batch_data = []
batch_label = []
batch_weights = []
for _ in range(batch_size):
points, labels, colors, weights = self.sample_in_all_files(is_training=True)
if self.use_color:
batch_data.append(np.hstack((points, colors)))
else:
batch_data.append(points)
batch_label.append(labels)
batch_weights.append(weights)
batch_data = np.array(batch_data)
batch_label = np.array(batch_label)
batch_weights = np.array(batch_weights)
if augment:
if self.use_color:
batch_data = provider.rotate_feature_point_cloud(batch_data, 3)
else:
batch_data = provider.rotate_point_cloud(batch_data)
return batch_data, batch_label, batch_weights
def sample_in_all_files(self, is_training):
"""
Returns points and other info within a z - cropped box.
"""
# Pick a scene, scenes with more points are more likely to be chosen
scene_index = np.random.choice(
np.arange(0, len(self.list_file_data)), p=self.scene_probas
)
# Sample from the selected scene
points_centered, points_raw, labels, colors = self.list_file_data[
scene_index
].sample(num_points_per_sample=self.num_points_per_sample)
if is_training:
weights = self.label_weights[labels]
return points_centered, labels, colors, weights
else:
return scene_index, points_centered, points_raw, labels, colors
def get_total_num_points(self):
list_num_points = [len(fd.points) for fd in self.list_file_data]
return np.sum(list_num_points)
def get_num_batches(self, batch_size):
return int(
self.get_total_num_points() / (batch_size * self.num_points_per_sample)
)
def get_file_paths_without_ext(self):
return [file_data.file_path_without_ext for file_data in self.list_file_data]
```
#### File: 3D-semantic-Sgmentation/pointnet/newtrain.py
```python
import os
import sys
import json
import datetime
import numpy as np
import tensorflow.compat.v1 as tf
import multiprocessing as mp
import argparse
import time
from datetime import datetime
import utils.metric as metric
from data.tum_mls_dataset import TUMMLSDataset
import importlib
from models import pointnet_seg
# Two global arg collections
parser = argparse.ArgumentParser()
parser.add_argument("--train_set", default="train", help="train, train_full")
parser.add_argument("--config_file", default="semantic_no_color.json", help="config file path")
parser.add_argument('--model', default='pointnet_seg',
help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
FLAGS = parser.parse_args()
PARAMS = json.loads(open(FLAGS.config_file).read())
os.makedirs(PARAMS["logdir"], exist_ok=True)
# Import dataset
TRAIN_DATASET = TUMMLSDataset(
num_points_per_sample=PARAMS["num_point"],
split="train",
box_size_x=PARAMS["box_size_x"],
box_size_y=PARAMS["box_size_y"],
use_color=PARAMS["use_color"],
path=PARAMS["data_path"],
)
VALIDATION_DATASET = TUMMLSDataset(
num_points_per_sample=PARAMS["num_point"],
split="validation",
box_size_x=PARAMS["box_size_x"],
box_size_y=PARAMS["box_size_y"],
use_color=PARAMS["use_color"],
path=PARAMS["data_path"],
)
print(TRAIN_DATASET.get_total_num_points())
print(VALIDATION_DATASET.get_total_num_points())
NUM_CLASSES = TRAIN_DATASET.num_classes
# Start logging
LOG_FOUT = open(os.path.join(PARAMS["logdir"], "log_train.txt"), "w")
EPOCH_CNT = 0
MODEL = importlib.import_module(FLAGS.model) # import network module
def log_string(out_str):
LOG_FOUT.write(out_str + "\n")
LOG_FOUT.flush()
print(out_str)
def update_progress(progress):
"""
Displays or updates a console progress bar
Args:
progress: A float between 0 and 1. Any int will be converted to a float.
A value under 0 represents a 'halt'.
A value at 1 or bigger represents 100%
"""
barLength = 10 # Modify this to change the length of the progress bar
if isinstance(progress, int):
progress = round(float(progress), 2)
if not isinstance(progress, float):
progress = 0
if progress < 0:
progress = 0
if progress >= 1:
progress = 1
block = int(round(barLength * progress))
text = "\rProgress: [{}] {}%".format(
"#" * block + "-" * (barLength - block), progress * 100
)
sys.stdout.write(text)
sys.stdout.flush()
def get_learning_rate(batch):
"""Compute the learning rate for a given batch size and global parameters
Args:
batch (tf.Variable): the batch size
Returns:
scalar tf.Tensor: the decayed learning rate
"""
learning_rate = tf.train.exponential_decay(
PARAMS["learning_rate"], # Base learning rate.
batch * PARAMS["batch_size"], # Current index into the dataset.
PARAMS["decay_step"], # Decay step.
PARAMS["learning_rate_decay_rate"], # Decay rate.
staircase=True,
)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
"""Compute the batch normalisation exponential decay
Args:
batch (tf.Variable): the batch size
Returns:
scalar tf.Tensor: the batch norm decay
"""
bn_momentum = tf.train.exponential_decay(
PARAMS["bn_init_decay"],
batch * PARAMS["batch_size"],
float(PARAMS["decay_step"]),
PARAMS["bn_decay_decay_rate"],
staircase=True,
)
bn_decay = tf.minimum(PARAMS["bn_decay_clip"], 1 - bn_momentum)
return bn_decay
def get_batch(split):
np.random.seed()
if split == "train":
return TRAIN_DATASET.sample_batch_in_all_files(
PARAMS["batch_size"], augment=True
)
else:
return VALIDATION_DATASET.sample_batch_in_all_files(
PARAMS["batch_size"], augment=False
)
def fill_queues(
stack_train, stack_validation, num_train_batches, num_validation_batches
):
"""
Args:
stack_train: mp.Queue to be filled asynchronously
stack_validation: mp.Queue to be filled asynchronously
num_train_batches: total number of training batches
num_validation_batches: total number of validationation batches
"""
pool = mp.Pool(processes=mp.cpu_count())
launched_train = 0
launched_validation = 0
results_train = [] # Temp buffer before filling the stack_train
results_validation = [] # Temp buffer before filling the stack_validation
# Launch as much as n
while True:
if stack_train.qsize() + launched_train < num_train_batches:
results_train.append(pool.apply_async(get_batch, args=("train",)))
launched_train += 1
elif stack_validation.qsize() + launched_validation < num_validation_batches:
results_validation.append(pool.apply_async(get_batch, args=("validation",)))
launched_validation += 1
for p in results_train:
if p.ready():
stack_train.put(p.get())
results_train.remove(p)
launched_train -= 1
for p in results_validation:
if p.ready():
stack_validation.put(p.get())
results_validation.remove(p)
launched_validation -= 1
# Stability
time.sleep(0.01)
def init_stacking():
"""
Returns:
stacker: mp.Process object
stack_validation: mp.Queue, use stack_validation.get() to read a batch
stack_train: mp.Queue, use stack_train.get() to read a batch
"""
with tf.device("/cpu:0"):
# Queues that contain several batches in advance
num_train_batches = TRAIN_DATASET.get_num_batches(PARAMS["batch_size"])
num_validation_batches = VALIDATION_DATASET.get_num_batches(
PARAMS["batch_size"]
)
stack_train = mp.Queue(num_train_batches)
stack_validation = mp.Queue(num_validation_batches)
stacker = mp.Process(
target=fill_queues,
args=(
stack_train,
stack_validation,
num_train_batches,
num_validation_batches,
),
)
stacker.start()
return stacker, stack_validation, stack_train
def train_one_epoch(sess, ops, train_writer, stack):
"""Train one epoch
Args:
sess (tf.Session): the session to evaluate Tensors and ops
ops (dict of tf.Operation): contain multiple operation mapped with with strings
train_writer (tf.FileSaver): enable to log the training with TensorBoard
compute_class_iou (bool): it takes time to compute the iou per class, so you can
disable it here
"""
is_training = True
num_batches = TRAIN_DATASET.get_num_batches(PARAMS["batch_size"])
log_string(str(datetime.now()))
update_progress(0)
# Reset metrics
loss_sum = 0
confusion_matrix = metric.ConfusionMatrix(NUM_CLASSES)
# Train over num_batches batches
for batch_idx in range(num_batches):
# Refill more batches if empty
progress = float(batch_idx) / float(num_batches)
update_progress(round(progress, 2))
batch_data, batch_label, batch_weights = stack.get()
# Get predicted labels
feed_dict = {
ops["pointclouds_pl"]: batch_data,
ops["labels_pl"]: batch_label,
# ops["smpws_pl"]: batch_weights,
ops["is_training_pl"]: is_training,
}
summary, step, _, loss_val, pred_val, _ = sess.run(
[
ops["merged"],
ops["step"],
ops["train_op"],
ops["loss"],
ops["pred"],
ops["update_iou"],
],
feed_dict=feed_dict,
)
train_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 2)
# Update metrics
for i in range(len(pred_val)):
for j in range(len(pred_val[i])):
confusion_matrix.increment(batch_label[i][j], pred_val[i][j])
loss_sum += loss_val
update_progress(1)
log_string("mean loss: %f" % (loss_sum / float(num_batches)))
log_string("Overall accuracy : %f" % (confusion_matrix.get_accuracy()))
log_string("Average IoU : %f" % (confusion_matrix.get_mean_iou()))
iou_per_class = confusion_matrix.get_per_class_ious()
iou_per_class = [0] + iou_per_class # label 0 is ignored
for i in range(1, NUM_CLASSES):
log_string("IoU of %s : %f" % (TRAIN_DATASET.labels_names[i], iou_per_class[i]))
def eval_one_epoch(sess, ops, validation_writer, stack):
"""Evaluate one epoch
Args:
sess (tf.Session): the session to evaluate tensors and operations
ops (tf.Operation): the dict of operations
validation_writer (tf.summary.FileWriter): enable to log the evaluation on TensorBoard
Returns:
float: the overall accuracy computed on the validationation set
"""
global EPOCH_CNT
is_training = False
num_batches = VALIDATION_DATASET.get_num_batches(PARAMS["batch_size"])
# Reset metrics
loss_sum = 0
confusion_matrix = metric.ConfusionMatrix(NUM_CLASSES)
log_string(str(datetime.now()))
log_string("---- EPOCH %03d EVALUATION ----" % (EPOCH_CNT))
update_progress(0)
for batch_idx in range(num_batches):
progress = float(batch_idx) / float(num_batches)
update_progress(round(progress, 2))
batch_data, batch_label, batch_weights = stack.get()
feed_dict = {
ops["pointclouds_pl"]: batch_data,
ops["labels_pl"]: batch_label,
# ops["smpws_pl"]: batch_weights,
ops["is_training_pl"]: is_training,
}
summary, step, loss_val, pred_val = sess.run(
[ops["merged"], ops["step"], ops["loss"], ops["pred"]], feed_dict=feed_dict
)
validation_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 2) # BxN
# Update metrics
for i in range(len(pred_val)):
for j in range(len(pred_val[i])):
confusion_matrix.increment(batch_label[i][j], pred_val[i][j])
loss_sum += loss_val
update_progress(1)
iou_per_class = confusion_matrix.get_per_class_ious()
# Display metrics
log_string("mean loss: %f" % (loss_sum / float(num_batches)))
log_string("Overall accuracy : %f" % (confusion_matrix.get_accuracy()))
log_string("Average IoU : %f" % (confusion_matrix.get_mean_iou()))
iou_per_class = [0] + iou_per_class # label 0 is ignored
for i in range(1, NUM_CLASSES):
log_string(
"IoU of %s : %f" % (VALIDATION_DATASET.labels_names[i], iou_per_class[i])
)
EPOCH_CNT += 5
return confusion_matrix.get_accuracy()
def train():
"""Train the model on a single GPU
"""
with tf.Graph().as_default():
stacker, stack_validation, stack_train = init_stacking()
with tf.device("/gpu:" + str(PARAMS["gpu"])):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(PARAMS["batch_size"], PARAMS["num_point"])
is_training_pl = tf.placeholder(tf.bool, shape=())
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for
# you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar("bn_decay", bn_decay)
print("--- Get model and loss")
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
loss = MODEL.get_loss(pred, labels_pl, end_points)
tf.summary.scalar('loss', loss)
# Compute accuracy
correct = tf.equal(tf.argmax(pred, 2), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(PARAMS["batch_size"])
tf.summary.scalar('accuracy', accuracy)
# Computer mean intersection over union
mean_intersection_over_union, update_iou_op = tf.metrics.mean_iou(
tf.to_int32(labels_pl), tf.to_int32(tf.argmax(pred, 2)), NUM_CLASSES
)
tf.summary.scalar("mIoU", tf.to_float(mean_intersection_over_union))
print("--- Get training operator")
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar("learning_rate", learning_rate)
if PARAMS["optimizer"] == "momentum":
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum=PARAMS["momentum"]
)
else:
assert PARAMS["optimizer"] == "adam"
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(
os.path.join(PARAMS["logdir"], "train"), sess.graph
)
validation_writer = tf.summary.FileWriter(
os.path.join(PARAMS["logdir"], "validation"), sess.graph
)
# Init variables
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer()) # important for mIoU
ops = {
"pointclouds_pl": pointclouds_pl,
"labels_pl": labels_pl,
# "smpws_pl": smpws_pl,
"is_training_pl": is_training_pl,
"pred": pred,
"loss": loss,
"train_op": train_op,
"merged": merged,
"step": batch,
"end_points": end_points,
"update_iou": update_iou_op,
}
# Train for hyper_params["max_epoch"] epochs
best_acc = 0
for epoch in range(PARAMS["max_epoch"]):
print("in epoch", epoch)
# print("max_epoch", PARAMS["max_epoch"])
log_string("**** EPOCH %03d ****" % (epoch))
sys.stdout.flush()
# Train one epoch
train_one_epoch(sess, ops, train_writer, stack_train)
# Evaluate, save, and compute the accuracy
if epoch % 5 == 0:
acc = eval_one_epoch(sess, ops, validation_writer, stack_validation)
if acc > best_acc:
best_acc = acc
save_path = saver.save(
sess,
os.path.join(
PARAMS["logdir"], "best_model_epoch_%03d.ckpt" % (epoch)
),
)
log_string("Model saved in file: %s" % save_path)
print("Model saved in file: %s" % save_path)
# Save the variables to disk.
if epoch % 10 == 0:
save_path = saver.save(
sess, os.path.join(PARAMS["logdir"], "model.ckpt")
)
log_string("Model saved in file: %s" % save_path)
print("Model saved in file: %s" % save_path)
# Kill the process, close the file and exit
stacker.terminate()
LOG_FOUT.close()
sys.exit()
if __name__ == "__main__":
train()
``` |
{
"source": "3dsf/librealsense",
"score": 3
} |
#### File: py/rspy/test.py
```python
import os, sys, subprocess, traceback, platform
from rspy import log
n_assertions = 0
n_failed_assertions = 0
n_tests = 0
n_failed_tests = 0
test_failed = False
test_in_progress = False
test_info = {} # Dictionary for holding additional information to print in case of a failed check.
def set_env_vars( env_vars ):
"""
We want certain environment variables set when we get here. We assume they're not set.
However, it is impossible to change the current running environment to see them. Instead, we rerun ourselves
in a child process that inherits the environment we set.
To do this, we depend on a specific argument in sys.argv that tells us this is the rerun (meaning child
process). When we see it, we assume the variables are set and don't do anything else.
For this to work well, the environment variable requirement (set_env_vars call) should appear as one of the
first lines of the test.
:param env_vars: A dictionary where the keys are the name of the environment variable and the values are the
wanted values in string form (environment variables must be strings)
"""
if sys.argv[-1] != 'rerun':
log.d( 'environment variables needed:', env_vars )
for env_var, val in env_vars.items():
os.environ[env_var] = val
cmd = [sys.executable]
if 'site' not in sys.modules:
# -S : don't imply 'import site' on initialization
cmd += ["-S"]
if sys.flags.verbose:
# -v : verbose (trace import statements)
cmd += ["-v"]
cmd += sys.argv # --debug, or any other args
cmd += ["rerun"]
log.d( 'running:', cmd )
p = subprocess.run( cmd, stderr=subprocess.PIPE, universal_newlines=True )
sys.exit( p.returncode )
log.d( 'rerun detected' )
sys.argv = sys.argv[:-1] # Remove the rerun
def find_first_device_or_exit():
"""
:return: the first device that was found, if no device is found the test is skipped. That way we can still run
the unit-tests when no device is connected and not fail the tests that check a connected device
"""
import pyrealsense2 as rs
c = rs.context()
if not c.devices.size(): # if no device is connected we skip the test
print("No device found, skipping test")
sys.exit( 0 )
return c.devices[0]
def find_devices_by_product_line_or_exit(product_line):
"""
:param product_line: The product line of the wanted devices
:return: A list of devices of specific product line that was found, if no device is found the test is skipped.
That way we can still run the unit-tests when no device is connected
and not fail the tests that check a connected device
"""
import pyrealsense2 as rs
c = rs.context()
devices_list = c.query_devices(product_line)
if devices_list.size() == 0:
print( "No device of the", product_line, "product line was found; skipping test" )
sys.exit( 0 )
return devices_list
def print_stack():
"""
Function for printing the current call stack. Used when an assertion fails
"""
print( 'Traceback (most recent call last):' )
stack = traceback.format_stack()
# Avoid stack trace into format_stack():
# File "C:/work/git/lrs\unit-tests\py\rspy\test.py", line 124, in check
# print_stack()
# File "C:/work/git/lrs\unit-tests\py\rspy\test.py", line 87, in print_stack
# stack = traceback.format_stack()
stack = stack[:-2]
for line in reversed( stack ):
print( line, end = '' ) # format_stack() adds \n
"""
The following functions are for asserting test cases:
The check family of functions tests an expression and continues the test whether the assertion succeeded or failed.
The require family are equivalent but execution is aborted if the assertion fails. In this module, the require family
is used by sending abort=True to check functions
"""
def check_failed():
"""
Function for when a check fails
"""
global n_failed_assertions, test_failed
n_failed_assertions += 1
test_failed = True
print_info()
def abort():
log.e( "Aborting test" )
sys.exit( 1 )
def check(exp, abort_if_failed = False):
"""
Basic function for asserting expressions.
:param exp: An expression to be asserted, if false the assertion failed
:param abort_if_failed: If True and assertion failed the test will be aborted
:return: True if assertion passed, False otherwise
"""
global n_assertions
n_assertions += 1
if not exp:
print_stack()
print("Check failed, received", exp)
check_failed()
if abort_if_failed:
abort()
return False
reset_info()
return True
def check_equal(result, expected, abort_if_failed = False):
"""
Used for asserting a variable has the expected value
:param result: The actual value of a variable
:param expected: The expected value of the variable
:param abort_if_failed: If True and assertion failed the test will be aborted
:return: True if assertion passed, False otherwise
"""
if type(expected) == list:
print("check_equal should not be used for lists. Use check_equal_lists instead")
if abort_if_failed:
abort()
return False
global n_assertions
n_assertions += 1
if result != expected:
print_stack()
print( "Result was:", result )
print( " expected:", expected )
check_failed()
if abort_if_failed:
abort()
return False
reset_info()
return True
def unreachable( abort_if_failed = False ):
"""
Used to assert that a certain section of code (exp: an if block) is not reached
:param abort_if_failed: If True and this function is reached the test will be aborted
"""
check(False, abort_if_failed)
def unexpected_exception():
"""
Used to assert that an except block is not reached. It's different from unreachable because it expects
to be in an except block and prints the stack of the error and not the call-stack for this function
"""
global n_assertions
n_assertions += 1
traceback.print_exc( file = sys.stdout )
check_failed()
def check_equal_lists(result, expected, abort_if_failed = False):
"""
Used to assert that 2 lists are identical. python "equality" (using ==) requires same length & elements
but not necessarily same ordering. Here we require exactly the same, including ordering.
:param result: The actual list
:param expected: The expected list
:param abort_if_failed: If True and assertion failed the test will be aborted
:return: True if assertion passed, False otherwise
"""
global n_assertions
n_assertions += 1
failed = False
if len(result) != len(expected):
failed = True
print("Check equal lists failed due to lists of different sizes:")
print("The resulted list has", len(result), "elements, but the expected list has", len(expected), "elements")
i = 0
for res, exp in zip(result, expected):
if res != exp:
failed = True
print("Check equal lists failed due to unequal elements:")
print("The element of index", i, "in both lists was not equal")
i += 1
if failed:
print_stack()
print("Result list:", result)
print("Expected list:", expected)
check_failed()
if abort_if_failed:
abort()
return False
reset_info()
return True
def check_exception(exception, expected_type, expected_msg = None, abort_if_failed = False):
"""
Used to assert a certain type of exception was raised, placed in the except block
:param exception: The exception that was raised
:param expected_type: The expected type of exception
:param expected_msg: The expected message in the exception
:param abort_if_failed: If True and assertion failed the test will be aborted
:return: True if assertion passed, False otherwise
"""
failed = False
if type(exception) != expected_type:
failed = [ "Raised exception was of type", type(exception), "and not of type", expected_type, "as expected" ]
if expected_msg and str(exception) != expected_msg:
failed = [ "Exception had message:", str(exception), "\nBut we expected:", expected_msg ]
if failed:
print_stack()
print( *failed )
check_failed()
if abort_if_failed:
abort()
return False
reset_info()
return True
def check_frame_drops(frame, previous_frame_number, allowed_drops = 1):
"""
Used for checking frame drops while streaming
:param frame: Current frame being checked
:param previous_frame_number: Number of the previous frame
:param allowed_drops: Maximum number of frame drops we accept
:return: False if dropped too many frames or frames were out of order, True otherwise
"""
global test_in_progress
if not test_in_progress:
return True
frame_number = frame.get_frame_number()
failed = False
if previous_frame_number > 0:
dropped_frames = frame_number - (previous_frame_number + 1)
if dropped_frames > allowed_drops:
print( dropped_frames, "frame(s) starting from frame", previous_frame_number + 1, "were dropped" )
failed = True
elif dropped_frames < 0:
print( "Frames repeated or out of order. Got frame", frame_number, "after frame",
previous_frame_number)
failed = True
if failed:
fail()
return False
reset_info()
return True
class Information:
"""
Class representing the information stored in test_info dictionary
"""
def __init__(self, value, persistent = False):
self.value = value
self.persistent = persistent
def info( name, value, persistent = False ):
"""
This function is used to store additional information to print in case of a failed test. This information is
erased after the next check. The information is stored in the dictionary test_info, Keys are names (strings)
and the items are of Information class
If information with the given name is already stored it will be replaced
:param name: The name of the variable
:param value: The value this variable stores
:param persistent: If this parameter is True, the information stored will be kept after the following check
and will only be erased at the end of the test ( or when reset_info is called with True)
"""
global test_info
test_info[name] = Information(value, persistent)
def reset_info(persistent = False):
"""
erases the stored information
:param persistent: If this parameter is True, even the persistent information will be erased
"""
global test_info
if persistent:
test_info.clear()
else:
for name, information in test_info.items():
if not information.persistent:
test_info.pop(name)
def print_info():
global test_info
if not test_info: # No information is stored
return
print("Printing information")
for name, information in test_info.items():
print("Name:", name, " value:", information.value)
reset_info()
def fail():
"""
Function for manually failing a test in case you want a specific test that does not fit any check function
"""
check_test_in_progress()
global n_failed_tests, test_failed
if not test_failed:
n_failed_tests += 1
test_failed = True
def check_test_in_progress( in_progress = True ):
global test_in_progress
if test_in_progress != in_progress:
if test_in_progress:
raise RuntimeError( "test case is already running" )
else:
raise RuntimeError( "no test case is running" )
def start(*test_name):
"""
Used at the beginning of each test to reset the global variables
:param test_name: Any number of arguments that combined give the name of this test
"""
print_separator()
global n_tests, test_failed, test_in_progress
n_tests += 1
test_failed = False
test_in_progress = True
reset_info( persistent = True )
print( *test_name )
def finish():
"""
Used at the end of each test to check if it passed and print the answer
"""
check_test_in_progress()
global test_failed, n_failed_tests, test_in_progress
if test_failed:
n_failed_tests += 1
print("Test failed")
else:
print("Test passed")
test_in_progress = False
def print_separator():
"""
For use only in-between test-cases, this will separate them in some visual way so as
to be easier to differentiate.
"""
check_test_in_progress( False )
global n_tests
if n_tests:
print()
print( '___' )
def print_results_and_exit():
"""
Used to print the results of the tests in the file. The format has to agree with the expected format in check_log()
in run-unit-tests and with the C++ format using Catch
"""
print_separator()
global n_assertions, n_tests, n_failed_assertions, n_failed_tests
if n_failed_tests:
passed = n_assertions - n_failed_assertions
print("test cases:", n_tests, "|" , n_failed_tests, "failed")
print("assertions:", n_assertions, "|", passed, "passed |", n_failed_assertions, "failed")
sys.exit(1)
print("All tests passed (" + str(n_assertions) + " assertions in " + str(n_tests) + " test cases)")
sys.exit(0)
``` |
{
"source": "3DTech-Steven7/3DTech",
"score": 3
} |
#### File: 3DTech-Steven7/3DTech/maya_dev.py
```python
import numpy as np
from cython.parallel import prange
#import pandas as pd
import pymel.core as pm
from collections import defaultdict
import time
def count_time(func):
def int_time(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
over_time = time.time()
total_time = over_time - start_time
print('程序共计耗时%s秒' % total_time)
return result
return int_time
@count_time
def target_mean_v0(data):
data_shape = len(data)
result = dict()
value_dict = defaultdict(int)
count_dict = defaultdict(int)
for i in range(data_shape):
data_loc_x = int(data[i][0])
data_loc_y = data[i][1]
value_dict[data_loc_x] += data_loc_y
count_dict[data_loc_x] += 1
for i in range(data_shape):
data_loc_x = int(data[i][0])
data_loc_y = data[i][1]
result[i] = (value_dict[data_loc_x] - data_loc_y) / count_dict[data_loc_x]
return result
@count_time
def target_mean_v1(data):
data_shape = data.shape[1]
result = np.zeros(data.shape[1])
value_dict = defaultdict(int)
count_dict = defaultdict(int)
for i in range(data_shape):
data_loc_x = int(data[0, i])
data_loc_y = data[1, i]
value_dict[data_loc_x] += data_loc_y
count_dict[data_loc_x] += 1
for i in range(data_shape):
data_loc_x = int(data[0, i])
data_loc_y = data[1, i]
result[i] = (value_dict[data_loc_x] - data_loc_y) / count_dict[data_loc_x]
return result
if __name__ == '__main__':
joint_list = pm.ls(typ="joint")
joint_list = [(i.side.get(), i.radius.get()) for i in joint_list]
joint_list*=1000
joint_array = np.array(joint_list).T
m=dict()
for i,j in enumerate(target_mean_v1(joint_array).tolist()):
m[i] = j
m == target_mean_v0(joint_list)
``` |
{
"source": "3DTK/comma",
"score": 2
} |
#### File: csv/applications/csv_eval.py
```python
import sys
import os
import argparse
import numpy as np
import re
import itertools
import ast
import comma
description = """
evaluate numerical expressions and append computed values to csv stream
"""
notes_and_examples = """
input fields:
1) slashes are replaced by underscores if --full-xpath is given, otherwise basenames are used
2) for ascii streams, input fields are treated as floating point numbers, unless --format is given
output fields:
1) inferred from expressions (by default) or specified by --output-fields
2) appended to possibly modified input
3) treated as floating point numbers, unless --output-format is given
examples:
# basic
( echo 1; echo 2; echo 3 ) | %(prog)s --fields=x 'y = x**2'
# using an intermediate variable
( echo 1; echo 2; echo 3 ) | %(prog)s --fields=x 'n = 2; y = x**n' --output-fields=y
# ascii stream with non-default formats
( echo 0,1; echo 1,1 ) | %(prog)s --fields=x,y 'n = x<y' --output-format=ub
( echo 0,1; echo 1,1 ) | %(prog)s --fields=i,j --format=2ub 'n = i==j' --output-format=ub
# binary stream
( echo 0.1,2; echo 0.1,3 ) | csv-to-bin d,i | %(prog)s --binary=d,i --fields=x,n 'y = x**n' | csv-from-bin d,i,d
# evaluate one of two expressions based on condition
( echo 1,2; echo 2,1 ) | %(prog)s --fields=x,y 'a=where(x<y,x+y,x-y)'
# select output based on condition
( echo 1,2 ; echo 1,3; echo 1,4 ) | %(prog)s --fields=a,b --format=2i --select="(a < b - 1) & (b < 4)"
# pass through input until condition is met
( echo 1,2 ; echo 1,3; echo 1,4 ) | csv-eval --fields=a,b --format=2i --exit-if="(a < b - 1) & (b < 4)"
# update input stream values in place
( echo 1,2 ; echo 3,4 ) | %(prog)s --fields=x,y "x=x+y; y=y-1"
# using full xpaths
( echo 1,2 ; echo 3,4 ) | %(prog)s --fields=one/x,two/y "x+=1; y-=1"
( echo 1,2 ; echo 3,4 ) | %(prog)s --fields=one/x,two/y "one_x+=1; two_y-=1" --full-xpath
# using default values
( echo 1,2 ; echo 3,4 ) | %(prog)s --fields=,y "a=x+y" --default-values="x=0;y=0"
# using init values: calculate triangular numbers
seq 0 10 | %(prog)s --fields=v "sum=sum+v" --init-values="sum=0"
# using init values: calculate fibonacci numbers
seq 0 10 | %(prog)s --fields=v "c=b;b=a+b;a=c" --output-fields a,b --init-values="a=0;b=1"
# operating on time (internally represented in microseconds)
echo 20171112T224515.5 | %(prog)s --format=t --fields=t1 "t2=t1+1000000" --output-format t
echo 20171112T224515.5 | csv-to-bin t | %(prog)s --binary=t --fields=t1 "t2=t1+1000000" --output-format t | csv-from-bin 2t
# using numpy min and max (note axis=0 needed due to implementation details)
echo 0,1,2,3,4 | %(prog)s --fields=a,b,c,d,e --format=5ui "f=min((a,b,c,d,e),axis=0);g=max((a,b,c,d,e),axis=0)"
# format agreement (output format should be considered)
echo 5 | csv-to-bin ul | csv-eval --binary=ul --fields=a "b=a" --output-format=ul | csv-from-bin 2ul
"""
numpy_functions = """
functions:
any function documented at
http://docs.scipy.org/doc/numpy/reference/routines.html
can be used in expressions provided that it is compatible with streaming, that is:
- it performs element-wise operations only
- it returns an array of the same shape and size as the input
some examples are given below
math functions:
http://docs.scipy.org/doc/numpy/reference/routines.math.html
( echo 1,2; echo 3,4 ) | %(prog)s --fields=x,y --precision=2 'a = 2/(x+y); b = a*sin(x-y)'
( echo 1,2; echo 4,3 ) | %(prog)s --fields=x,y 'm = minimum(x,y)'
( echo 1; echo 2; echo 3; echo 4 ) | %(prog)s --format=ui --fields=id 'c = clip(id,3,inf)' --output-format=ui
math constants: pi, e
echo pi | %(prog)s --fields name --format=s[2] 'value=pi' --precision=16 --delimiter='='
echo e | %(prog)s --fields name --format=s[2] 'value=e' --precision=16 --delimiter='='
logical functions:
http://docs.scipy.org/doc/numpy/reference/routines.logic.html
( echo 0,1; echo 1,2; echo 4,3 ) | %(prog)s --fields=x,y 'flag=logical_and(x<y,y<2)' --output-format=b
( echo 0,1; echo 1,2; echo 4,3 ) | %(prog)s --fields=x,y 'flag=logical_or(x>y,y<2)' --output-format=b
( echo 0; echo 1 ) | %(prog)s --format=b --fields=flag 'a=logical_not(flag)' --output-format=b
bitwise functions:
http://docs.scipy.org/doc/numpy/reference/routines.bitwise.html
( echo 0; echo 1 ) | %(prog)s --fields i --format=ub 'n = ~i'
( echo 0; echo 1 ) | %(prog)s --fields i --format=ub 'n = ~i.astype(bool)'
( echo 0,0; echo 0,1; echo 1,1 ) | %(prog)s --fields i,j --format=2ub 'm = i & j'
( echo 0,0; echo 0,1; echo 1,1 ) | %(prog)s --fields i,j --format=2ub 'm = i | j'
string functions:
http://docs.scipy.org/doc/numpy/reference/routines.char.html
( echo 'a'; echo 'a/b' ) | %(prog)s --fields=path --format=s[36] 'n=char.count(path,"/")' --output-format=ui
( echo 'a'; echo 'a/b' ) | %(prog)s --fields=path --format=s[36] 'r=char.replace(path,"/","_")' --output-format=s[36]
time arithmetic:
http://docs.scipy.org/doc/numpy/reference/arrays.datetime.html#datetime-and-timedelta-arithmetic
echo 20150101T000000.000000 | %(prog)s --fields=t --format=t 't1=t+1;t2=t-1' --output-format=2t
echo 20151231T000000 | %(prog)s --fields=t --format=t "t += timedelta64(1,'D')"
echo 20151231T000000,20160515T120000 | %(prog)s --fields=t1,t2 --format=2t "dt = (t2-t1)/timedelta64(1,'D')"
"""
class csv_eval_error(Exception):
pass
def custom_formatwarning(msg, *args):
return __name__ + " warning: " + str(msg) + '\n'
def add_csv_options(parser):
comma.csv.add_options(parser) # comma.csv.add_options(parser, defaults={'fields': 'x,y,z'})
parser.add_argument(
'--format',
default='',
metavar='<format>',
help="for ascii stream, format of named input fields (by default, 'd' for each)")
parser.add_argument(
'--output-fields',
'-o',
default=None,
metavar='<names>',
help="do not infer output fields from expressions; use specified fields instead")
parser.add_argument(
'--output-format',
default='',
metavar='<format>',
help="format of output fields (default: 'd' for each)")
# the options defined below are left for compatibility
# use --output-fields and --output-format instead
parser.add_argument('--append-fields', '-F', help=argparse.SUPPRESS)
parser.add_argument('--append-binary', '-B', help=argparse.SUPPRESS)
def get_args():
parser = argparse.ArgumentParser(
description=description,
epilog=notes_and_examples,
formatter_class=comma.util.argparse_fmt,
add_help=False)
parser.add_argument(
'expressions',
help='numerical expressions to evaluate (see examples)',
nargs='?')
parser.add_argument(
'--help',
'-h',
action='store_true',
help='show this help message and exit')
parser.add_argument(
'--verbose',
'-v',
action='store_true',
help='more output to stderr')
parser.add_argument(
'--permissive',
action='store_true',
help='leave python builtins in the exec environment (use with care)')
add_csv_options(parser)
parser.add_argument(
'--full-xpath',
action='store_true',
help='use full xpaths as variable names with / replaced by _ (default: use basenames of xpaths)')
parser.add_argument(
'--default-values',
'--default',
default='',
metavar='<assignments>',
help='default values for variables in expressions but not in input stream')
parser.add_argument(
'--init-values',
'--init',
default='',
metavar='<assignments>',
help='init values, applied only once')
parser.add_argument(
'--with-error',
default='',
metavar='<message>',
help='if --exit-if, exit with error and a given error message')
parser.add_argument(
'--exit-if',
'--output-until',
'--until',
default='',
metavar='<condition>',
help='output all records and exit when the condition is satisfied')
parser.add_argument(
'--select',
'--output-if',
'--if',
default='',
metavar='<condition>',
help='select and output records of input stream that satisfy the condition')
args = parser.parse_args()
if args.help:
if args.verbose:
parser.epilog += numpy_functions
else:
parser.epilog += "\nfor more help run '%(prog)s -h -v'"
parser.print_help()
parser.exit(0)
if args.fields is None or args.fields == "": sys.exit( "csv-eval: please specify --fields" )
if args.init_values == '' and args.verbose: print("csv-eval: --init currently reads one record at a time, which may be slow", file=sys.stderr)
return args
def ingest_deprecated_options(args):
if args.append_binary:
args.output_format = args.append_binary
del args.append_binary
if args.verbose:
with comma.util.warning(custom_formatwarning) as warn:
msg = "--append-binary is deprecated, consider using --output-format"
warn(msg)
if args.append_fields:
args.output_fields = args.append_fields
del args.append_fields
if args.verbose:
with comma.util.warning(custom_formatwarning) as warn:
msg = "--append-fields is deprecated, consider using --output-fields"
warn(msg)
def check_options(args):
if not (args.expressions or args.select or args.exit_if):
raise csv_eval_error("no expressions are given")
if args.binary and args.format:
raise csv_eval_error("--binary and --format are mutually exclusive")
if args.select or args.exit_if:
if args.expressions:
msg = "--select <condition> and --exit-if <condition> cannot be used with expressions"
raise csv_eval_error(msg)
if args.output_fields or args.output_format:
msg = "--select and --exit-if cannot be used with --output-fields or --output-format"
raise csv_eval_error(msg)
if args.with_error:
if not args.exit_if:
msg = "--with-error is only used with --exit-if"
raise csv_eval_error(msg)
def format_without_blanks(format, fields=[], unnamed_fields=True):
"""
>>> from comma.csv.applications.csv_eval import format_without_blanks
>>> format_without_blanks('3ui', fields=['a', 'b', 'c'])
'ui,ui,ui'
>>> format_without_blanks('ui', fields=['a', 'b', 'c'])
'ui,d,d'
>>> format_without_blanks('ui', fields=['a', '', 'c'])
'ui,s[0],d'
>>> format_without_blanks('4ui', fields=['a', '', 'c'])
'ui,s[0],ui,s[0]'
>>> format_without_blanks('3ui')
's[0],s[0],s[0]'
>>> format_without_blanks('')
''
>>> format_without_blanks('ui,t', ['a', 'b'], unnamed_fields=False)
'ui,t'
>>> format_without_blanks('ui,t', ['a', 'b', 'c'], unnamed_fields=False)
'ui,t,d'
>>> format_without_blanks('ui,,t', ['a', 'b', 'c'], unnamed_fields=False)
'ui,d,t'
>>> format_without_blanks('ui,t', ['', 'b'], unnamed_fields=False)
Traceback (most recent call last):
...
ValueError: expected all fields to be named, got ',b'
>>> format_without_blanks('ui,t,d', ['a', 'b'], unnamed_fields=False)
Traceback (most recent call last):
...
ValueError: format 'ui,t,d' is longer than fields 'a,b'
"""
def comma_type(maybe_type, field, default_type='d', type_of_unnamed_field='s[0]'):
return type_of_unnamed_field if not field else maybe_type or default_type
if not format and not fields:
return ''
maybe_types = comma.csv.format.expand(format).split(',')
if not unnamed_fields:
if '' in fields:
msg = "expected all fields to be named, got '{}'".format(','.join(fields))
raise ValueError(msg)
if len(maybe_types) > len(fields):
msg = "format '{}' is longer than fields '{}'".format(format, ','.join(fields))
raise ValueError(msg)
maybe_typed_fields = itertools.zip_longest(maybe_types, fields)
types = [comma_type(maybe_type, field) for maybe_type, field in maybe_typed_fields]
return ','.join(types)
def assignment_variable_names(expressions):
"""
>>> from comma.csv.applications.csv_eval import assignment_variable_names
>>> assignment_variable_names("a = 1; b = x + y; c = 'x = 1; y = 2'; d = (b == z)")
['a', 'b', 'c', 'd']
>>> assignment_variable_names("a, b = 1, 2")
['a', 'b']
>>> assignment_variable_names("a = b = 1")
['a', 'b']
>>> assignment_variable_names("x = 'a = \\"y = 1;a = 2\\"';")
['x']
>>> assignment_variable_names("")
[]
>>> assignment_variable_names("x=1; x=2; y+=1; y+=2; z=1; z+=2")
['x', 'y', 'z']
"""
tree = ast.parse(expressions, '<string>', mode='exec')
fields = []
for child in ast.iter_child_nodes(tree):
if type(child) == ast.Assign:
for target in child.targets:
for node in ast.walk(target):
if type(node) == ast.Name:
if node.id not in fields:
fields.append(node.id)
elif type(child) == ast.AugAssign:
if child.target.id not in fields:
fields.append(child.target.id)
return fields
def split_fields(fields):
"""
>>> from comma.csv.applications.csv_eval import split_fields
>>> split_fields('')
[]
>>> split_fields('x')
['x']
>>> split_fields('x,y')
['x', 'y']
"""
return fields.split(',') if fields else []
def normalise_full_xpath(fields, full_xpath=True):
"""
>>> from comma.csv.applications.csv_eval import normalise_full_xpath
>>> normalise_full_xpath('')
[]
>>> normalise_full_xpath('a/b')
['a_b']
>>> normalise_full_xpath(',a/b,,c,d/e,')
['', 'a_b', '', 'c', 'd_e', '']
>>> normalise_full_xpath(',a/b,,c,d/e,', full_xpath=False)
['', 'b', '', 'c', 'e', '']
"""
full_xpath_fields = split_fields(fields)
if full_xpath:
return [f.replace('/', '_') for f in full_xpath_fields]
return [f.split('/')[-1] for f in full_xpath_fields]
def prepare_options(args):
ingest_deprecated_options(args)
check_options(args)
check_fields(assignment_variable_names(args.default_values))
args.fields = normalise_full_xpath(args.fields, args.full_xpath)
if args.binary:
args.first_line = ''
args.format = comma.csv.format.expand(args.binary)
args.binary = True
elif args.format:
args.first_line = ''
args.format = format_without_blanks(args.format, args.fields)
args.binary = False
else:
args.first_line = comma.io.readlines_unbuffered(1, sys.stdin)
if not args.first_line:
raise csv_eval_error("first record is empty - could not guess format")
args.format = comma.csv.format.guess_format(args.first_line)
args.binary = False
if args.verbose:
print("{}: guessed format: {}".format(__name__, args.format), file=sys.stderr)
if args.select or args.exit_if:
return
var_names = assignment_variable_names(args.expressions)
args.update_fields = [f for f in var_names if f in args.fields]
if args.output_fields is None:
args.output_fields = [f for f in var_names if f not in args.fields]
else:
args.output_fields = split_fields(args.output_fields)
args.output_format = format_without_blanks(args.output_format,
args.output_fields,
unnamed_fields=False)
def restricted_numpy_env():
d = np.__dict__.copy()
d.update(__builtins__={})
d.pop('sys', None)
return d
def update_buffer(stream, update_array):
index = stream.fields.index
if stream.binary:
fields = stream._input_array.dtype.names
for f in update_array.dtype.names:
stream._input_array[fields[index(f)]] = update_array[f]
else:
def updated_lines():
for line, scalars in zip(stream._ascii_buffer, update_array):
values = line.split(stream.delimiter)
for f, s in zip(update_array.dtype.names, stream._strings(scalars)):
values[index(f)] = s
yield stream.delimiter.join(values)
stream._ascii_buffer = list(updated_lines())
class stream(object):
def __init__(self, args):
self.args = args
self.csv_options = dict(
full_xpath=False,
binary=self.args.binary,
flush=self.args.flush,
delimiter=self.args.delimiter,
precision=self.args.precision,
verbose=self.args.verbose)
self.initialize_input()
self.initialize_update_and_output()
if self.args.verbose: self.print_info()
def initialize_input(self):
self.nonblank_input_fields = [_f for _f in self.args.fields if _f]
if not self.nonblank_input_fields: raise csv_eval_error("please specify input stream fields, e.g. --fields=x,y")
check_fields(self.nonblank_input_fields)
types = comma.csv.format.to_numpy(self.args.format)
self.input_t = comma.csv.struct(','.join(self.args.fields), *types)
self.input = comma.csv.stream(self.input_t, **self.csv_options)
def initialize_update_and_output(self):
if self.args.select or self.args.exit_if: return
if self.args.update_fields:
all_types = comma.csv.format.to_numpy(self.args.format)
index = self.args.fields.index
update_types = [all_types[index(f)] for f in self.args.update_fields]
update_fields = ','.join(self.args.update_fields)
self.update_t = comma.csv.struct(update_fields, *update_types)
if self.args.output_fields:
check_output_fields(self.args.output_fields, self.nonblank_input_fields)
output_types = comma.csv.format.to_numpy(self.args.output_format)
output_fields = ','.join(self.args.output_fields)
self.output_t = comma.csv.struct(output_fields, *output_types)
self.output = comma.csv.stream(self.output_t, tied=self.input, **self.csv_options)
def print_info(self, file=sys.stderr):
fields = ','.join(self.input_t.nondefault_fields)
format = self.input_t.format
print("expressions: '{}'".format(self.args.expressions), file=file)
print("select: '{}'".format(self.args.select), file=file)
print("exit_if: '{}'".format(self.args.exit_if), file=file)
print("default values: '{}'".format(self.args.default_values), file=file)
print("input fields: '{}'".format(fields), file=file)
print("input format: '{}'".format(format), file=file)
if self.args.select or self.args.exit_if:
return
update_fields = ','.join(self.update_t.fields) if self.args.update_fields else ''
output_fields = ','.join(self.output_t.fields) if self.args.output_fields else ''
output_format = self.output_t.format if self.args.output_fields else ''
print("update fields: '{}'".format(update_fields), file=file)
print("output fields: '{}'".format(output_fields), file=file)
print("output format: '{}'".format(output_format), file=file)
def check_fields(fields, allow_numpy_names=True):
for field in fields:
if not re.match(r'^[a-z_]\w*$', field, re.I): raise csv_eval_error("'{}' is not a valid field name".format(field))
if field in ['_input', '_update', '_output']: raise csv_eval_error("'{}' is a reserved name".format(field))
if not allow_numpy_names and field in np.__dict__: raise csv_eval_error("'{}' is a reserved numpy name".format(field))
def check_output_fields(fields, input_fields):
check_fields(fields)
invalid_output_fields = set(fields).intersection(input_fields)
if invalid_output_fields:
msg = "output fields '{}' are present in input fields '{}'" \
.format(','.join(invalid_output_fields), ','.join(input_fields))
raise csv_eval_error(msg)
def evaluate(stream):
def disperse(var, fields): return '\n'.join("{f} = {v}['{f}']".format(v=var, f=f) for f in fields)
def collect(var, fields): return '\n'.join("{v}['{f}'] = {f}".format(v=var, f=f) for f in fields)
if stream.args.init_values == '':
read_size = None
init_code_string = ''
else:
read_size = 1
init_code_string = '\n'.join([stream.args.default_values,
stream.args.init_values,
disperse('_input', stream.nonblank_input_fields),
collect('_update', stream.args.update_fields),
collect('_output', stream.args.output_fields)])
code_string = '\n'.join([stream.args.default_values,
disperse('_input', stream.nonblank_input_fields),
disperse('_output', stream.args.output_fields),
stream.args.expressions,
collect('_update', stream.args.update_fields),
collect('_output', stream.args.output_fields)])
init_code = compile(init_code_string, '<string>', 'exec')
code = compile(code_string, '<string>', 'exec')
env = np.__dict__ if stream.args.permissive else restricted_numpy_env()
size = None
update = None
output = None
input = None
is_shutdown = comma.signal.is_shutdown()
if stream.args.first_line: input = stream.input.read_from_line(stream.args.first_line)
while not is_shutdown:
if input is not None:
if size != input.size:
size = input.size
if stream.args.update_fields: update = stream.update_t(size)
if stream.args.output_fields: output = stream.output_t(size)
exec(init_code, env, {'_input': input, '_update': update, '_output': output})
exec(code, env, {'_input': input, '_update': update, '_output': output})
if stream.args.update_fields: update_buffer(stream.input, update)
if stream.args.output_fields: stream.output.write(output)
else: stream.input.dump()
input = stream.input.read( read_size )
if input is None: break
def select(stream):
input = None
env = restricted_numpy_env()
exec(stream.args.default_values, env)
fields = stream.input.fields
code = compile(stream.args.select, '<string>', 'eval')
is_shutdown = comma.signal.is_shutdown()
if stream.args.first_line:
input = stream.input.read_from_line(stream.args.first_line)
while not is_shutdown:
if input is not None:
mask = eval(code, env, {f: input[f] for f in fields})
stream.input.dump(mask=mask)
input = stream.input.read()
if input is None: break
def exit_if(stream):
input = None
env = restricted_numpy_env()
exec(stream.args.default_values, env)
fields = stream.input.fields
code = compile(stream.args.exit_if, '<string>', 'eval')
is_shutdown = comma.signal.is_shutdown()
if stream.args.first_line: input = stream.input.read_from_line(stream.args.first_line)
while not is_shutdown:
if input is not None:
mask = eval(code, env, {f: input[f] for f in fields})
if mask:
if not stream.args.with_error: sys.exit()
name = os.path.basename(sys.argv[0])
print("{} error: {}".format(name, stream.args.with_error), file=sys.stderr)
sys.exit(1)
stream.input.dump()
input = stream.input.read()
if input is None:
break
def main():
try:
comma.csv.time.zone('UTC')
args = get_args()
prepare_options(args)
if args.select:
select(stream(args))
elif args.exit_if:
exit_if(stream(args))
else:
evaluate(stream(args))
except csv_eval_error as e:
name = os.path.basename(sys.argv[0])
print("{} error: {}".format(name, e), file=sys.stderr)
sys.exit(1)
except Exception as e:
import traceback
traceback.print_exc(file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
``` |
{
"source": "3dvkr/chemiscope",
"score": 3
} |
#### File: python/chemiscope/adapters.py
```python
import numpy as np
import warnings
try:
import ase
HAVE_ASE = True
except ImportError:
HAVE_ASE = False
def frames_to_json(frames):
"""
Convert the given ``frames`` to the JSON structure used by chemiscope.
This function is a shim calling specialized implementations for all the
supported frame types. Currently only `ase.Atoms` frames are supported.
:param frames: iterable over structures (typically a list of frames)
"""
frames_list = list(frames)
if HAVE_ASE and isinstance(frames_list[0], ase.Atoms):
return [_ase_to_json(frame) for frame in frames_list]
elif HAVE_ASE and isinstance(frames_list[0], ase.Atom):
# deal with the user passing a single frame
return frames_to_json([frames])
else:
raise Exception(f"unknown frame type: '{frames_list[0].__class__.__name__}'")
def atom_properties(frames):
"""
Extract "atom" properties from the given ``frames``, and give them as a
dictionary compatible with :py:func:`create_input`.
This function is a shim calling specialized implementations for all the
supported frame types. Currently only `ase.Atoms` frames are supported.
:param frames: iterable over structures (typically a list of frames)
"""
frames_list = list(frames)
if HAVE_ASE and isinstance(frames_list[0], ase.Atoms):
return _ase_atom_properties(frames_list)
elif HAVE_ASE and isinstance(frames_list[0], ase.Atom):
# deal with the user passing a single frame
return atom_properties([frames])
else:
raise Exception(f"unknown frame type: '{frames_list[0].__class__.__name__}'")
def structure_properties(frames):
"""
Extract "structure" properties from the given ``frames``, and give them as a
dictionary compatible with :py:func:`create_input`.
This function is a shim calling specialized implementations for all the
supported frame types. Currently only `ase.Atoms` frames are supported.
:param frames: iterable over structures (typically a list of frames)
"""
frames_list = list(frames)
if HAVE_ASE and isinstance(frames_list[0], ase.Atoms):
return _ase_structure_properties(frames_list)
elif HAVE_ASE and isinstance(frames_list[0], ase.Atom):
# deal with the user passing a single frame
return structure_properties([frames])
else:
raise Exception(f"unknown frame type: '{frames_list[0].__class__.__name__}'")
def _ase_to_json(frame):
"""Implementation of frame_to_json for ase.Atoms"""
data = {}
data["size"] = len(frame)
data["names"] = list(frame.symbols)
data["x"] = [float(value) for value in frame.positions[:, 0]]
data["y"] = [float(value) for value in frame.positions[:, 1]]
data["z"] = [float(value) for value in frame.positions[:, 2]]
if (frame.cell.lengths() != [0.0, 0.0, 0.0]).all():
data["cell"] = list(np.concatenate(frame.cell))
return data
def _ase_atom_properties(frames):
"""Implementation of atom_properties for ase.Atoms"""
IGNORED_ASE_ARRAYS = ["positions", "numbers"]
# extract the set of common properties between all frames
all_names = set()
extra = set()
for name in frames[0].arrays.keys():
if name in IGNORED_ASE_ARRAYS:
continue
all_names.add(name)
for frame in frames[1:]:
for name in frame.arrays.keys():
if name in IGNORED_ASE_ARRAYS:
continue
if name not in all_names:
extra.add(name)
remove = []
for name in all_names:
if name not in frame.arrays.keys():
remove.append(name)
for name in remove:
all_names.remove(name)
extra.add(name)
if len(extra) != 0:
warnings.warn(
"the following atomic properties properties are only defined "
+ f"for a subset of frames: {list(sorted(extra))}; they will be ignored"
)
# create property in the format expected by create_input
properties = {
name: {"target": "atom", "values": value}
for name, value in frames[0].arrays.items()
if name in all_names
}
for frame in frames[1:]:
for name, value in frame.arrays.items():
if name not in all_names:
continue
properties[name]["values"] = np.concatenate(
[properties[name]["values"], value]
)
_remove_invalid_properties(properties, "ASE")
return properties
def _ase_structure_properties(frames):
"""Implementation of structure_properties for ase.Atoms"""
# extract the set of common properties between all frames
all_names = set()
extra = set()
for name in frames[0].info.keys():
all_names.add(name)
for frame in frames[1:]:
for name in frame.info.keys():
if name not in all_names:
extra.add(name)
remove = []
for name in all_names:
if name not in frame.info.keys():
remove.append(name)
for name in remove:
all_names.remove(name)
extra.add(name)
if len(extra) != 0:
warnings.warn(
"the following structure properties properties are only defined "
+ f"for a subset of frames: {list(sorted(extra))}; they will be ignored"
)
# create property in the format expected by create_input
properties = {name: {"target": "structure", "values": []} for name in all_names}
for frame in frames:
for name, value in frame.info.items():
if name in all_names:
properties[name]["values"].append(value)
_remove_invalid_properties(properties, "ASE")
return properties
def _remove_invalid_properties(properties, origin):
"""
Remove invalid properties from the ``properties`` dictionary. ``origin`` is
used in error messages as the property origin
"""
to_remove = []
for name, property in properties.items():
for value in property["values"]:
if not _is_convertible_to_property(value):
warnings.warn(
f"value '{value}' of type '{type(value)}' for the '{name}' "
+ f"property from {origin} is not convertible to float or "
+ "string, this property will be ignored."
)
to_remove.append(name)
break
for name in to_remove:
del properties[name]
def _is_convertible_to_property(value):
"""
Check whether a value is convertible to a chemiscope property, i.e. if it is
a string or something convertible to float.
"""
if isinstance(value, (bytes, str)):
# string types
return True
else:
# everything convertible to float
try:
float(value)
return True
except Exception:
return False
``` |
{
"source": "3e45/minpiler",
"score": 3
} |
#### File: samples/object/find_player.py
```python
from minpiler.std import M, L, use_object
def find_player_by_name(name: str):
M.unit.bind(M.at.UnitType.gamma)
if M.at.const.unit.name != name:
M.unit.bind(M.at.UnitType.beta)
if M.at.const.unit.name != name:
M.unit.bind(M.at.UnitType.alpha)
if M.at.const.unit.name != name:
return None
return M.at.const.unit
player = use_object()
if player is None:
player = find_player_by_name("your_name_here")
if player is not None:
M.unit.bind(M.at.UnitType.flare)
M.unit.move(player.x, player.y)
M.unit.target(player.x, player.y, 1)
```
#### File: samples/object/horizon_bomb_2.py
```python
from minpiler.std import M, inline, util
gamma = util.find_player_by_name("your_name")
i = 0
while i < 10:
x = gamma.shootX
y = gamma.shootY
@inline
def move():
M.unit.bind(M.at.UnitType.horizon)
M.unit.move(x, y)
M.unit.targetp(M.at.const.unit, True)
move()
move()
move()
move()
move()
move()
move()
move()
move()
move()
i += 1
```
#### File: samples/std/lock.py
```python
from minpiler.std import MObject, L, M, inline, Processors, sync
class DekkersAlgorithm:
"""
Seems to be slower than the Peterson's algorithm.
https://en.wikipedia.org/wiki/Dekker%27s_algorithm
"""
@inline
@staticmethod
def reset(mem: MObject, addr_wants_to_enter_0: int, addr_wants_to_enter_1: int, addr_turn: int) -> None:
mem[addr_wants_to_enter_0] = 0
mem[addr_wants_to_enter_1] = 0
mem[addr_turn] = 0
@inline
@staticmethod
def lock_processor0(mem: MObject, addr_wants_to_enter_0: int, addr_wants_to_enter_1: int, addr_turn: int):
mem[addr_wants_to_enter_0] = 1
while mem[addr_wants_to_enter_1]:
if mem[addr_turn] != 0:
mem[addr_wants_to_enter_0] = 0
while mem[addr_turn] != 0:
pass
mem[addr_wants_to_enter_0] = 1
@inline
@staticmethod
def lock_processor1(mem: MObject, addr_wants_to_enter_0: int, addr_wants_to_enter_1: int, addr_turn: int):
mem[addr_wants_to_enter_1] = 1
while mem[addr_wants_to_enter_0]:
if mem[addr_turn] != 1:
mem[addr_wants_to_enter_1] = 0
while mem[addr_turn] != 1:
pass
mem[addr_wants_to_enter_1] = 1
@inline
@staticmethod
def unlock_processor0(mem: MObject, addr_wants_to_enter_0: int, addr_wants_to_enter_1: int, addr_turn: int):
mem[addr_turn] = 1
mem[addr_wants_to_enter_0] = 0
@inline
@staticmethod
def unlock_processor1(mem: MObject, addr_wants_to_enter_0: int, addr_wants_to_enter_1: int, addr_turn: int):
mem[addr_turn] = 0
mem[addr_wants_to_enter_1] = 0
class PetersonsAlgorithm:
"""
A mutual exclusion algorithm for two processors. Uses 3 memory addresses.
https://en.wikipedia.org/wiki/Peterson%27s_algorithm
"""
@inline
@staticmethod
def reset(mem: MObject, addr_wants_to_enter_0: int, addr_wants_to_enter_1: int, addr_turn: int) -> None:
mem[addr_wants_to_enter_0] = 0
mem[addr_wants_to_enter_1] = 0
mem[addr_turn] = 0
@inline
@staticmethod
def lock_processor0(mem: MObject, addr_wants_to_enter_0: int, addr_wants_to_enter_1: int, addr_turn: int):
mem[addr_wants_to_enter_0] = 1
mem[addr_turn] = 1
while mem[addr_wants_to_enter_1] and mem[addr_turn] != 0:
pass
@inline
@staticmethod
def lock_processor1(mem: MObject, addr_wants_to_enter_0: int, addr_wants_to_enter_1: int, addr_turn: int):
mem[addr_wants_to_enter_1] = 1
mem[addr_turn] = 0
while mem[addr_wants_to_enter_0] and mem[addr_turn] != 1:
pass
@inline
@staticmethod
def unlock_processor0(mem: MObject, addr_wants_to_enter_0: int, addr_wants_to_enter_1: int, addr_turn: int):
mem[addr_wants_to_enter_0] = 0
@inline
@staticmethod
def unlock_processor1(mem: MObject, addr_wants_to_enter_0: int, addr_wants_to_enter_1: int, addr_turn: int):
mem[addr_wants_to_enter_1] = 0
dekker: sync.SharedInt = 0
peterson: sync.SharedInt = 0
no_lock: sync.SharedInt = 0
@Processors.add
def dekker1():
global dekker
DekkersAlgorithm.lock_processor0(L.cell1, 10, 11, 12)
dekker += 1
DekkersAlgorithm.unlock_processor0(L.cell1, 10, 11, 12)
@Processors.add
def dekker2():
global dekker
DekkersAlgorithm.lock_processor1(L.cell1, 10, 11, 12)
dekker += 1
DekkersAlgorithm.unlock_processor1(L.cell1, 10, 11, 12)
@Processors.add
def peterson1():
global peterson
PetersonsAlgorithm.lock_processor0(L.cell1, 10, 11, 12)
peterson += 1
PetersonsAlgorithm.unlock_processor0(L.cell1, 10, 11, 12)
@Processors.add
def peterson2():
global peterson
PetersonsAlgorithm.lock_processor1(L.cell1, 10, 11, 12)
peterson += 1
PetersonsAlgorithm.unlock_processor1(L.cell1, 10, 11, 12)
@Processors.add
def processor3():
global no_lock
no_lock += 1
@Processors.add
def processor4():
M.print(f'dekker={dekker}, peterson={peterson}, no_lock={no_lock}')
M.print_flush(L.message1)
while True:
pass
```
#### File: cases/build/class_init5.py
```python
from minpiler.std import L, MObject, Processors, inline
class C:
@inline
def __init__(self, mem: MObject, addr: int) -> None:
self.mem = mem
self.addr = addr
def f():
x = C(L.cell1, 10)
x.mem[x.addr] = 100
@Processors.add
def g():
x = C(L.cell1, 10)
x.mem[x.addr] = 100
f()
# > # Processor "__main__"
# > write 100 cell1 10
# >
# > # Processor "g"
# > write 100 cell1 10
```
#### File: cases/build/const2.py
```python
from minpiler.std import Const, M, inline
class A(Const):
a = 1
b = 2
@inline
def f(a: int):
M.print(a)
f(A.a)
f(A.b)
# > print 1
# > print 2
```
#### File: cases/build/global.py
```python
from minpiler.std import M
x = 10
def f():
x = 20
def g():
def h():
global x
M.print(x)
h()
g()
f()
# > print 10
```
#### File: cases/build/name_propagation3.py
```python
from minpiler.std import L, M, inline
@inline
def f(x):
y = x
M.print_flush(y)
f(L.message1)
# > printflush message1
```
#### File: cases/run/function4.py
```python
from minpiler.std import M, emulator
def f(a):
return a
a = f(10)
M.print(a, ";")
b = f(11)
M.print(a, ";", b)
emulator.kill()
# > 10;10;11
```
#### File: cases/run/global1.py
```python
from minpiler.std import M, emulator
x = 20
def f():
global x
x = 10
f()
M.print(x)
emulator.kill()
# > 10
```
#### File: cases/run/global3.py
```python
from minpiler.std import M, emulator
a = 0
b = 0
def f(k: int, v: int):
global a, b
if k == 0:
a = v
else:
b = v
f(1, 10)
M.print(f'{a},{b}')
emulator.kill()
# > 0,10
```
#### File: cases/run/scope3.py
```python
from minpiler.std import M, emulator, inline
x = 20
@inline
def f():
x = 10
M.print(x)
f()
M.print(";", x)
emulator.kill()
# > 10;20
```
#### File: minpiler/tests/test_build.py
```python
from pathlib import Path
import pytest
from minpiler import compiler, optimizer
test_names: list[str] = []
cases: list[tuple[str, str, Path]] = []
for f in Path(__file__).parent.glob("cases/build/**/*.py"):
if f.name.startswith("_"):
continue
test_names.append(f.name)
input = ""
output = ""
for line in f.read_text(encoding='utf-8').splitlines():
if line.startswith("# >"):
output += line[len("# >"):].strip() + "\n"
else:
input += line + "\n"
cases.append((input, output, f))
@pytest.mark.parametrize('input, output, filepath', cases, ids=test_names)
def test_build(input: str, output: str, filepath: Path):
assert str(compiler.build(input, filepath, use_emulator_instructions=True, optimize=optimizer.optimize)).strip() == output.strip()
``` |
{
"source": "3err0/scuploader",
"score": 2
} |
#### File: 3err0/scuploader/main.py
```python
import ScreenCloud
import time, requests
from PythonQt.QtCore import QFile, QSettings, QUrl
from PythonQt.QtGui import QWidget, QDialog, QDesktopServices, QMessageBox
from PythonQt.QtUiTools import QUiLoader
class SCUploader():
def __init__(self):
self.uil = QUiLoader()
self.loadSettings()
def showSettingsUI(self, parentWidget):
self.parentWidget = parentWidget
self.settingsDialog = self.uil.load(QFile(workingDir + "/settings.ui"), parentWidget)
self.settingsDialog.connect("accepted()", self.saveSettings)
self.loadSettings()
self.settingsDialog.group_url.input_address.text = self.url_address
self.settingsDialog.open()
def loadSettings(self):
settings = QSettings()
settings.beginGroup("uploaders")
settings.beginGroup("scuploader")
self.url_address = settings.value("url-address", "http://s.876974.ru/upload")
settings.endGroup()
settings.endGroup()
def saveSettings(self):
settings = QSettings()
settings.beginGroup("uploaders")
settings.beginGroup("scuploader")
settings.setValue("url-address", self.settingsDialog.group_url.input_address.text)
settings.endGroup()
settings.endGroup()
def isConfigured(self):
self.loadSettings()
return not(not self.url_address)
def getFilename(self):
return time.time()
def upload(self, screenshot, name):
self.loadSettings()
url = self.url_address
if not url.startswith('http'):
ScreenCloud.setError('Invalid url!')
return False
timestamp = time.time()
tmpFilename = QDesktopServices.storageLocation(QDesktopServices.TempLocation) + "/" + ScreenCloud.formatFilename(str(timestamp))
screenshot.save(QFile(tmpFilename), ScreenCloud.getScreenshotFormat())
reply = requests.post(url,
files={'image': open(tmpFilename, 'rb')}
).json()
try:
ScreenCloud.setUrl(reply['href'])
except Exception as e:
ScreenCloud.setError("Could not upload to: " + self.url_address + "\nError: " + e.message)
return False
return True
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.