repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
braddockcg/internet-in-a-box | iiab/kiwix.py | 1 | 2600 | #!/usr/bin/env python
from xml.etree import ElementTree as etree
from base64 import b64decode
import iso639
import os
def clean_book(book):
"""Fixes up the book data"""
clean_book = {}
for k, v in book.items():
if k == "favicon":
v = b64decode(v)
elif k == "mediaCount":
v = int(v)
elif k == "size":
v = int(v)
elif k == "language":
if v == 'en': # Mislabel, replace with 3-letter label
v = 'eng'
if v in iso639.iso6392:
clean_book["languageEnglish"] = iso639.iso6392[v]['english']
else:
clean_book["languageEnglish"] = v
clean_book[k] = v
if 'language' not in clean_book:
title = clean_book.get('title', '')
if title.find(" ml ") != -1:
lang = 'mal'
elif title.find(" zh ") != -1:
lang = 'zho'
else:
# Assume english
lang = 'eng'
clean_book['language'] = lang
clean_book['languageEnglish'] = iso639.iso6392[lang]['english']
return clean_book
class Library(object):
def __init__(self, xml_filename):
self.books = {}
self._parse_library(xml_filename)
def _parse_library(self, library_xml_filename):
"""Parse a kiwix library xml file"""
with open(library_xml_filename, "r") as f:
et = etree.parse(f)
root = et.getroot()
self.books = root.findall("book")
self.books = map(clean_book, self.books)
def find_by_uuid(self, uuid):
for book in self.books:
if book['id'] == uuid:
return book
return None
def books_by_language(self):
"""Get a list of all unique languages found in the library,
sorted in decreasing order of total number of articles in that language"""
langs = dict()
for book in library:
lang = book['language']
langEng = book['languageEnglish']
articles = book['articleCount']
self.books = []
if lang in langs:
articles += langs[lang].get('articleCount', 0)
self.books = langs[lang].get('self.books', [])
self.books.append(book)
langs[lang] = {
'language': lang,
'languageEnglish': langEng,
'articleCount': articles,
'self.books': self.books
}
langs = langs.values()
langs.sort(key=lambda x: -x['articleCount'])
return langs
| bsd-2-clause | -2,734,033,899,992,651,000 | 31.5 | 82 | 0.517308 | false |
cmheisel/kardboard | kardboard/tests/unit/test_utils.py | 1 | 2398 | import datetime
import unittest2
class UtilTests(unittest2.TestCase):
def ztest_days_between(self):
from kardboard.util import days_between
wednesday = datetime.datetime(year=2011, month=6, day=1)
next_wednesday = datetime.datetime(year=2011, month=6, day=8)
result = days_between(wednesday, next_wednesday)
self.assertEqual(result, 7)
aday = datetime.datetime(year=2011, month=6, day=1)
manydayslater = datetime.datetime(year=2012, month=6, day=1)
result = days_between(aday, manydayslater)
self.assertEqual(result, 366)
def test_days_between_partial_days_over(self):
from kardboard.util import days_between
start = datetime.datetime(2013, 6, 20, 19, 40, 32, 60000)
end = datetime.datetime(2013, 6, 24, 8, 46, 35, 461000)
result = days_between(start, end)
self.assertEqual(result, 4)
def test_days_between_partial_days_under(self):
from kardboard.util import days_between
start = datetime.datetime(2013, 6, 20, 12, 59, 59)
end = datetime.datetime(2013, 6, 24, 0, 0, 0, 0)
result = days_between(start, end)
self.assertEqual(result, 3)
def test_month_range(self):
from kardboard.util import month_range
today = datetime.datetime(year=2011, month=6, day=12)
start, end = month_range(today)
self.assertEqual(6, start.month)
self.assertEqual(1, start.day)
self.assertEqual(2011, start.year)
self.assertEqual(6, end.month)
self.assertEqual(30, end.day)
self.assertEqual(2011, end.year)
def test_week_range(self):
from kardboard.util import week_range
today = datetime.datetime(year=2011, month=5, day=12)
start, end = week_range(today)
self.assertEqual(5, start.month)
self.assertEqual(8, start.day)
self.assertEqual(2011, start.year)
self.assertEqual(5, end.month)
self.assertEqual(14, end.day)
self.assertEqual(2011, end.year)
today = datetime.datetime(year=2011, month=6, day=5)
start, end = week_range(today)
self.assertEqual(6, start.month)
self.assertEqual(5, start.day)
self.assertEqual(2011, start.year)
self.assertEqual(6, end.month)
self.assertEqual(11, end.day)
self.assertEqual(2011, end.year)
| mit | 1,564,835,025,171,704,300 | 33.257143 | 69 | 0.639283 | false |
LPgenerator/django-db-mailer | dbmail/backends/mail.py | 1 | 11834 | # -*- encoding: utf-8 -*-
import traceback
import pprint
import uuid
import time
import sys
from django.db.models.fields.related import ManyToManyField, ForeignKey
from django.core.mail import EmailMessage, EmailMultiAlternatives
try:
from django.core.urlresolvers import reverse, NoReverseMatch
except ImportError:
from django.urls import reverse, NoReverseMatch
from django.contrib.sites.models import Site
from django.template import Template, Context
from django.core.mail import get_connection
from django.utils import translation
from django.conf import settings
from django.core import signing
from dbmail.models import MailTemplate, MailLog, MailGroup, MailLogException
from dbmail.defaults import SHOW_CONTEXT, ENABLE_LOGGING, ADD_HEADER
from dbmail.exceptions import StopSendingException
from dbmail.utils import clean_html
from dbmail import import_module
from dbmail import get_version
from dbmail import defaults
class Sender(object):
provider = defaults.MAIL_PROVIDER
def __init__(self, slug, recipient, *args, **kwargs):
self._slug = slug
self._recipient_list = self._get_recipient_list(recipient)
self._cc = self._email_to_list(kwargs.pop('cc', None))
self._bcc = self._email_to_list(kwargs.pop('bcc', None))
self._user = kwargs.pop('user', None)
self._language = kwargs.pop('language', None)
self._backend = kwargs.pop('backend')
self._provider = kwargs.pop('provider', self.provider)
self._signals_kw = kwargs.pop('signals_kwargs', {})
self._template = self._get_template()
self._context = self._get_context(args)
self._subject = self._get_subject()
self._message = self._get_message()
self._files = kwargs.pop('files', [])
self._kwargs = kwargs
self._num = 1
self._err_msg = None
self._err_exc = None
self._log_id = self._get_log_id()
self._kwargs.pop('retry', None)
self._kwargs.pop('max_retries', None)
self._kwargs.pop('retry_delay', None)
self._from_email = self._get_from_email()
self._update_bcc_from_template_settings()
self._insert_mailer_identification_head()
@staticmethod
def _get_log_id():
return '%f-%s' % (time.time(), uuid.uuid4())
def _insert_mailer_identification_head(self):
if not ADD_HEADER:
return
headers = self._kwargs.pop('headers', {})
headers.update(
{'X-Mailer-Wrapper': 'django-db-mailer ver %s' % get_version()})
self._kwargs['headers'] = headers
def _get_connection(self):
if self._template.auth_credentials:
return self._kwargs.pop('connection', None) or get_connection(
**self._template.auth_credentials)
return self._kwargs.pop('connection', None)
def _get_template(self):
return MailTemplate.get_template(slug=self._slug)
def _get_context(self, context_list):
try:
data = self._model_to_dict(Site.objects.get_current())
except Site.DoesNotExist:
data = {}
for context in context_list:
if isinstance(context, dict):
data.update(context)
elif hasattr(context, '_meta'):
data.update(self._model_to_dict(context))
data.update({self._get_context_module_name(context): context})
if settings.DEBUG and SHOW_CONTEXT:
pprint.pprint(data)
return data
@staticmethod
def _get_context_module_name(context):
from distutils.version import StrictVersion
import django
current_version = django.get_version()
if StrictVersion(current_version) < StrictVersion('1.8'):
return context._meta.module_name
return context._meta.model_name
def _get_str_by_language(self, field, template=None):
obj = template if template else self._template
template = getattr(obj, field)
if self._language is not None:
field = '%s_%s' % (field, self._language)
if hasattr(obj, field):
if getattr(obj, field):
template = getattr(obj, field)
return template
def _get_subject(self):
return self._render_template(
self._get_str_by_language('subject'), self._context)
def _get_message_with_base(self):
self._context['content'] = self._render_template(
self._get_str_by_language('message'), self._context)
return self._render_template(
self._get_str_by_language('message', self._template.base),
self._context
)
def _get_standard_message(self):
return self._render_template(
self._get_str_by_language('message'), self._context)
def _get_message(self):
if self._template.base:
return self._get_message_with_base()
return self._get_standard_message()
def _get_msg_with_track(self):
message = self._message
if defaults.TRACK_ENABLE is False:
return message
if ENABLE_LOGGING and self._template.enable_log:
try:
domain = Site.objects.get_current().domain
encrypted = signing.dumps(self._log_id, compress=True)
path = reverse('db-mail-tracker', args=[encrypted])
message += defaults.TRACK_HTML % {
'url': 'http://%s%s' % (domain, path)}
except (Site.DoesNotExist, NoReverseMatch):
pass
return message
def _attach_files(self, mail):
for file_object in self._template.files_list:
mail.attach_file(file_object.filename.path)
for filename in self._files:
mail.attach_file(filename)
def _send_html_message(self):
msg = EmailMultiAlternatives(
self._subject, clean_html(self._message), cc=self._cc,
from_email=self._from_email, to=self._recipient_list,
bcc=self._bcc, connection=self._get_connection(), **self._kwargs
)
msg.attach_alternative(self._get_msg_with_track(), "text/html")
self._attach_files(msg)
msg.send()
def _send_plain_message(self):
msg = EmailMessage(
self._subject, self._message, from_email=self._from_email,
to=self._recipient_list, cc=self._cc, bcc=self._bcc,
connection=self._get_connection(), **self._kwargs
)
self._attach_files(msg)
msg.send()
def _get_recipient_list(self, recipient):
if not isinstance(recipient, list) and '@' not in recipient:
return self._group_emails(recipient)
return self._email_to_list(recipient)
def _update_bcc_from_template_settings(self):
if self._template.bcc_list:
if self._bcc:
self._bcc.extend(self._template.bcc_list)
else:
self._bcc = self._template.bcc_list
def _get_from_email(self):
if self._kwargs.get('from_email'):
return self._kwargs.pop('from_email', None)
elif not self._template.from_email:
return settings.DEFAULT_FROM_EMAIL
return self._template.from_email.get_mail_from
@staticmethod
def _group_emails(recipient):
email_list = []
for slug in recipient.split(','):
email_list.extend(MailGroup.get_emails(slug))
return list(set(email_list))
@staticmethod
def _email_to_list(recipient):
if recipient is None:
return None
elif not isinstance(recipient, list):
recipient = [d.strip() for d in recipient.split(',') if d.strip()]
return recipient
def _render_template(self, template, context):
translation.activate(self._language or settings.LANGUAGE_CODE)
return Template(template).render(Context(context))
@staticmethod
def _model_to_dict(instance):
opts, data = getattr(instance, '_meta'), dict()
for f in opts.fields + opts.many_to_many:
if isinstance(f, ManyToManyField):
if instance.pk is None:
data[f.name] = []
else:
data[f.name] = [
item.pk for item in f.value_from_object(instance)]
elif isinstance(f, ForeignKey):
if getattr(instance, f.name):
data[f.name] = getattr(instance, f.name).__str__()
else:
data[f.name] = f.value_from_object(instance)
return data
def _send_by_custom_provider(self):
module = import_module(self._provider)
module.send(self)
def _send_by_native_provider(self):
if self._template.is_html:
return self._send_html_message()
return self._send_plain_message()
def _send(self):
if self._provider is not None:
return self._send_by_custom_provider()
return self._send_by_native_provider()
def _store_log(self, is_sent):
if ENABLE_LOGGING is True:
if self._template.enable_log or not is_sent:
MailLog.store(
self._recipient_list, self._cc, self._bcc,
is_sent, self._template, self._user,
self._num, self._err_msg, self._err_exc,
self._log_id, self._backend, self._provider
)
def _try_to_send(self):
self._kwargs.pop('queue', None)
for self._num in range(1, self._template.num_of_retries + 1):
try:
self._send()
break
except Exception as exc:
print('[dbmail] %s' % exc)
if self._template.num_of_retries == self._num:
raise
time.sleep(defaults.SEND_RETRY_DELAY_DIRECT)
def _ignore_exception(self):
return self._err_exc in MailLogException.get_ignored_exceptions()
def send(self, is_celery=True):
from dbmail.signals import pre_send, post_send, post_exception
if self._template.is_active:
try:
pre_send.send(
self.__class__, instance=self, **self._signals_kw)
if is_celery is True:
self._send()
else:
self._try_to_send()
self._store_log(True)
post_send.send(
self.__class__, instance=self, **self._signals_kw)
return 'OK'
except StopSendingException:
return
except Exception as exc:
post_exception.send(
self.__class__,
instance=self,
exc_instance=exc,
**self._signals_kw
)
self._err_msg = traceback.format_exc()
self._err_exc = exc.__class__.__name__
self._store_log(False)
if self._ignore_exception():
return
raise
@staticmethod
def debug(key, value):
from django.utils.termcolors import colorize
if value:
sys.stdout.write(colorize(key, fg='green'))
sys.stdout.write(": ")
sys.stdout.write(colorize(repr(value), fg='white'))
sys.stdout.write("\n")
class SenderDebug(Sender):
def _send(self):
self.debug('Provider', self._provider or 'default')
self.debug('Message', self._message)
self.debug('From', self._from_email)
self.debug('Recipients', self._recipient_list)
self.debug('CC', self._cc)
self.debug('BCC', self._bcc)
self.debug('Additional kwargs', self._kwargs)
| gpl-2.0 | 8,582,603,927,581,929,000 | 34.537538 | 78 | 0.577404 | false |
Arcanemagus/SickRage | sickbeard/providers/pretome.py | 1 | 7003 | # coding=utf-8
# Author: Nick Sologoub
#
# URL: https://sick-rage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import re
import traceback
from requests.utils import dict_from_cookiejar
# noinspection PyUnresolvedReferences
from six.moves.urllib.parse import quote
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class PretomeProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "Pretome")
self.username = None
self.password = None
self.pin = None
self.minseed = None
self.minleech = None
self.urls = {'base_url': 'https://pretome.info',
'login': 'https://pretome.info/takelogin.php',
'detail': 'https://pretome.info/details.php?id=%s',
'search': 'https://pretome.info/browse.php?search=%s%s',
'download': 'https://pretome.info/download.php/%s/%s.torrent'}
self.url = self.urls['base_url']
self.categories = "&st=1&cat%5B%5D=7"
self.proper_strings = ['PROPER', 'REPACK']
self.cache = tvcache.TVCache(self)
def _check_auth(self):
if not self.username or not self.password or not self.pin:
logger.log("Invalid username or password or pin. Check your settings", logger.WARNING)
return True
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {'username': self.username,
'password': self.password,
'login_pin': self.pin}
response = self.get_url(self.urls['login'], post_data=login_params, returns='text')
if not response:
logger.log("Unable to connect to provider", logger.WARNING)
return False
if re.search('Username or password incorrect', response):
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def search(self, search_params, age=0, ep_obj=None): # pylint: disable=too-many-branches, too-many-statements, too-many-locals
results = []
if not self.login():
return results
for mode in search_params:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_params[mode]:
if mode != 'RSS':
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_url = self.urls['search'] % (quote(search_string), self.categories)
data = self.get_url(search_url, returns='text')
if not data:
continue
try:
with BS4Parser(data, 'html5lib') as html:
# Continue only if one Release is found
empty = html.find('h2', text="No .torrents fit this filter criteria")
if empty:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
torrent_table = html.find('table', style='border: none; width: 100%;')
if not torrent_table:
logger.log("Could not find table of torrents", logger.ERROR)
continue
torrent_rows = torrent_table('tr', class_='browse')
for result in torrent_rows:
cells = result('td')
size = None
link = cells[1].find('a', style='font-size: 1.25em; font-weight: bold;')
torrent_id = link['href'].replace('details.php?id=', '')
try:
if link.get('title', ''):
title = link['title']
else:
title = link.contents[0]
download_url = self.urls['download'] % (torrent_id, link.contents[0])
seeders = int(cells[9].contents[0])
leechers = int(cells[10].contents[0])
# Need size for failed downloads handling
if size is None:
torrent_size = cells[7].text
size = convert_size(torrent_size) or -1
except (AttributeError, TypeError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
except Exception:
logger.log("Failed parsing provider. Traceback: {0}".format(traceback.format_exc()), logger.ERROR)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
provider = PretomeProvider()
| gpl-3.0 | 3,621,022,508,012,013,600 | 39.017143 | 149 | 0.532914 | false |
jsalva/ndim | ndim/ndim/wsgi.py | 1 | 1166 | """
WSGI config for ndim project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ndim.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | -2,149,861,468,380,887,800 | 37.866667 | 79 | 0.798456 | false |
sol-ansano-kim/minimap | minimap/ui.py | 1 | 8970 | from PySide import QtGui
from PySide import QtCore
from maya import OpenMayaUI
import shiboken
from . import mayaFunction as func
from . import model
import os
import re
RE_NUMBER = re.compile("^[0-9.]+$")
UI_TITLE = "MINIMAP"
UI_OBJECT_NAME = "minimap_maya_camera_navigator"
class DrawWidget(QtGui.QLabel):
backgroun_color = QtGui.QColor.fromHsv(0, 0, 20)
dot_line_pen = QtGui.QPen(QtGui.QColor.fromHsv(0, 0, 200))
def __init__(self):
super(DrawWidget, self).__init__()
self.setObjectName("minimap_draw_widget")
self.image = None
self.left_pushed = False
self.right_pushed = False
self.old_pos = None
self.rect_x = 0
self.rect_y = 0
self.rect_w = 10
self.rect_h = 10
self.frame_w = 100
self.frame_h = 100
self.image_w = 0
self.image_h = 0
self.zoom = 1
self.clear()
def paintEvent(self, evnt):
paint = QtGui.QPainter()
paint.begin(self)
### set black
paint.fillRect(0, 0, self.frame_w, self.frame_h, self.backgroun_color)
### set image
if self.image:
paint.drawImage(self.__imageGeometry(), self.image)
### draw rect
paint.setPen(self.dot_line_pen)
### draw rect
paint.drawRect(*self.__rectGeometry())
### draw line
paint.drawLine(self.rect_x - 5, self.rect_y,
self.rect_x + 5, self.rect_y)
paint.drawLine(self.rect_x, self.rect_y - 5,
self.rect_x, self.rect_y + 5)
model.UI2Pan((self.rect_x - self.rect_w / 2.0) / self.rect_w,
(self.rect_y - self.rect_h / 2.0) / self.rect_h,
self.zoom)
paint.end()
def __imageGeometry(self):
x = 0
y = 0
if self.frame_w != self.image_w:
x = int((self.frame_w - self.image_w) / 2.0)
if self.frame_h != self.image_h:
y = int((self.frame_h - self.image_h) / 2.0)
return QtCore.QRect(x, y, self.image_w, self.image_h)
def __rectGeometry(self):
### position
posx = self.rect_x - (self.rect_w / 2.0 * self.zoom)
posy = self.rect_y - (self.rect_h / 2.0 * self.zoom)
### rect size
w = self.rect_w * self.zoom
h = self.rect_h * self.zoom
### if frame size and rect size are same then minus 1 pixel
sizew = w if abs(w - self.frame_w) > 0.1 else w - 1
sizeh = h if abs(h - self.frame_h) > 0.1 else h - 1
return (posx, posy, sizew, sizeh)
def mouseMoveEvent(self, evnt):
if self.left_pushed:
self.__moveRect(evnt)
elif self.right_pushed:
if self.old_pos:
self.__scaleRect(evnt)
self.old_pos = evnt.pos()
def __scaleRect(self, evnt):
moved = evnt.pos() - self.old_pos
self.old_pos = evnt.pos()
zoom = self.zoom + (moved.x() + moved.y()) * 0.001
zoom = zoom if zoom > 0 else 0.001
model.modifyZoom(zoom)
self.setZoom(zoom)
def __moveRect(self, evnt):
pos = evnt.pos()
self.rect_x = pos.x()
self.rect_y = pos.y()
self.update()
def mousePressEvent(self, evnt):
if (evnt.button() == QtCore.Qt.MouseButton.LeftButton):
self.left_pushed = True
self.__moveRect(evnt)
if (evnt.button() == QtCore.Qt.MouseButton.RightButton):
self.right_pushed = True
def mouseReleaseEvent(self, evnt):
self.left_pushed = False
self.right_pushed = False
self.old_pos = None
def __setSize(self):
self.setFixedSize(self.frame_w, self.frame_h)
def clear(self):
self.setImage(None, 100, 100)
def reset(self):
path = model.CAMERA.image_path
img_obj = None
w = 100
h = 100
if path:
ori_img = QtGui.QImage(path)
size = ori_img.size()
## image size
self.image_w = int(size.width() * model.DRAW_SCALE)
self.image_h = int(size.height() * model.DRAW_SCALE)
img_obj = ori_img.scaled(self.image_w, self.image_h)
(w, h) = model.getScreenSize(self.image_w, self.image_h)
self.setImage(img_obj, w, h)
def setImage(self, image_obj, w, h):
self.image = image_obj
self.rect_x = w / 2.0
self.rect_y = h / 2.0
self.rect_w = w
self.rect_h = h
self.frame_w = w
self.frame_h = h
self.__setSize()
self.zoom = 1
self.update()
def getZoom(self):
return self.zoom
def setZoom(self, zoom):
self.zoom = zoom
self.update()
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__(wrapQt())
self.setObjectName(UI_OBJECT_NAME)
self.setWindowTitle(UI_TITLE)
self.__makeWidgets()
def __makeWidgets(self):
### central widget, main layout
central_widget = QtGui.QWidget()
main_layout = QtGui.QVBoxLayout()
self.setCentralWidget(central_widget)
central_widget.setLayout(main_layout)
### draw widget
draw_layout = QtGui.QHBoxLayout()
draw_layout.setAlignment(QtCore.Qt.AlignCenter)
self.draw_widget = DrawWidget()
draw_layout.addWidget(self.draw_widget)
main_layout.addLayout(draw_layout)
### zoom widget
zoom_layout = QtGui.QHBoxLayout()
self.zoom_line = QtGui.QLineEdit("1.0")
self.zoom_line.setFixedWidth(40)
reset_button = QtGui.QPushButton("reset")
self.slider = QtGui.QSlider(QtCore.Qt.Horizontal)
self.slider.setMinimum(model.ZOOM_MIN)
self.slider.setMaximum(model.ZOOM_MAX)
self.slider.setValue(model.ZOOM_DEFAULT)
self.slider.setTracking(False)
zoom_layout.addWidget(reset_button)
zoom_layout.addWidget(self.zoom_line)
zoom_layout.addWidget(self.slider)
main_layout.addLayout(zoom_layout)
### button layout
button_layout = QtGui.QHBoxLayout()
self.size_1_2 = QtGui.QRadioButton("1/2")
self.size_1_4 = QtGui.QRadioButton("1/4")
self.size_1_8 = QtGui.QRadioButton("1/8")
self.size_1_16 = QtGui.QRadioButton("1/16")
self.size_1_4.setChecked(True)
button_layout.addWidget(self.size_1_2)
button_layout.addWidget(self.size_1_4)
button_layout.addWidget(self.size_1_8)
button_layout.addWidget(self.size_1_16)
main_layout.addLayout(button_layout)
### signal
self.zoom_line.editingFinished.connect(self.slotZoomChanged)
reset_button.clicked.connect(self.reset)
self.size_1_2.clicked.connect(self.slotImageSize)
self.size_1_4.clicked.connect(self.slotImageSize)
self.size_1_8.clicked.connect(self.slotImageSize)
self.size_1_16.clicked.connect(self.slotImageSize)
self.slider.valueChanged.connect(self.slotSliderChanged)
self.slider.sliderMoved.connect(self.slotSliderChanged)
def slotSliderChanged(self, value):
zoom_value = value * 0.01
self.zoom_line.setText(str(zoom_value))
self.setZoom(zoom_value)
def slotZoomChanged(self):
txt = self.zoom_line.text()
if RE_NUMBER.match(txt):
value = float(txt)
min_v = model.ZOOM_MIN * 0.01
max_v = model.ZOOM_MAX * 0.01
value = value if value > min_v else min_v
value = value if value < max_v else max_v
else:
value = 1.0
self.slider.setValue(int(value * 100))
def slotImageSize(self):
if self.size_1_2.isChecked():
model.DRAW_SCALE = 0.5
elif self.size_1_4.isChecked():
model.DRAW_SCALE = 0.25
elif self.size_1_8.isChecked():
model.DRAW_SCALE = 0.125
elif self.size_1_16.isChecked():
model.DRAW_SCALE = 0.0625
self.reset()
def reset(self):
self.zoom_line.setText("1.0")
self.slider.setValue(model.ZOOM_DEFAULT)
self.draw_widget.reset()
def setZoom(self, value):
self.draw_widget.setZoom(value)
def closeEvent(self, evnt):
model.UI2Pan(0, 0, 1)
super(QtGui.QMainWindow, self).closeEvent(evnt)
def wrapQt():
parent = None
try:
parent = shiboken.wrapInstance(long(OpenMayaUI.MQtUtil.mainWindow()),
QtGui.QWidget)
except:
pass
### i don`t like this way..
if parent == None:
RE_MAIN = re.compile("Autodesk Maya.*")
for wid in QtGui.QApplication.topLevelWidgets():
name = wid.windowTitle()
if RE_MAIN.match(name):
parent = wid
break
return parent
def Create():
func.killExistenceWindow(UI_OBJECT_NAME)
return MainWindow()
| gpl-2.0 | -8,338,255,359,178,029,000 | 32.099631 | 78 | 0.572352 | false |
oesteban/mriqc | mriqc/qc/__init__.py | 1 | 2424 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Some no-reference :abbr:`IQMs (image quality metrics)` are extracted in the
final stage of all processing workflows run by MRIQC.
A no-reference :abbr:`IQM (image quality metric)` is a measurement of some aspect
of the actual image which cannot be compared to a reference value for the metric
since there is no ground-truth about what this number should be.
All the computed :abbr:`IQMs (image quality metrics)` corresponding to
an image are saved in a `JSON file <iqm_json>`_ under the ``<output-dir>/derivatives/``
folder.
The IQMs can be grouped in four broad categories, providing a vector of 56
features per anatomical image. Some measures characterize the impact of noise and/or
evaluate the fitness of a noise model. A second family of measures use information
theory and prescribed masks to evaluate the spatial distribution of information. A third
family of measures look for the presence and impact of particular artifacts. Specifically,
the INU artifact, and the signal leakage due to rapid motion (e.g. eyes motion or blood
vessel pulsation) are identified. Finally, some measures that do not fit within the
previous categories characterize the statistical properties of tissue distributions, volume
overlap of tissues with respect to the volumes projected from MNI space, the
sharpness/blurriness of the images, etc.
.. note ::
Most of the :abbr:`IQMs (image quality metrics)` in this module are adapted, derived or
reproduced from the :abbr:`QAP (quality assessment protocols)` project [QAP]_.
We particularly thank Steve Giavasis (`@sgiavasis <http://github.com/sgiavasis>`_) and
Krishna Somandepali for their original implementations of the code in this module that
we took from the [QAP]_.
The [QAP]_ has a very good description of the :abbr:`IQMs (image quality metrics)`
in [QAP-measures]_.
.. toctree::
:maxdepth: 3
iqms/t1w
iqms/bold
.. topic:: References
.. [QAP] `The QAP project
<https://github.com/oesteban/quality-assessment-protocol/blob/enh/SmartQCWorkflow/qap/temporal_qc.py#L16>`_.
.. [QAP-measures] `The Quality Assessment Protocols website: Taxonomy of QA Measures
<http://preprocessed-connectomes-project.github.io/quality-assessment-protocol/#taxonomy-of-qa-measures>`_.
"""
| bsd-3-clause | 4,142,940,389,628,371,500 | 43.888889 | 112 | 0.755363 | false |
pavpanchekha/oranj | oranj/core/cli.py | 1 | 5900 | from objects.orobject import OrObject
from collections import namedtuple
from libbuiltin import typeof
from objects.number import Number
import objects.constants as constants
import sys
overrides = {
"stdin": "", # oranj script -
"count": "q", # in `who`
"digits": "n", # csplit
"exit": "x",
"extract": "x",
"zip": "z",
"gzip": "z",
"compress": "z",
"literal": "N", # ls
}
class CLIArgs:
def __init__(self):
self.mandatory = []
self.short = {}
self.long = {}
self.dump = None
self.kwdump = None
def shortarg(arg):
return [arg[0], arg[0].swapcase] + (overrides[arg] if arg in overrides else [])
def getargs(intp, arglist):
args = CLIArgs()
for i in arglist:
if i[0] == "ARG":
args.mandatory.append(i[1])
args.long[i[1]] = "str"
for short in shortarg(i[1]):
if short not in args.short:
args.short[short] = i[1]
break
elif i[0] == "DEFARG":
args.long[i[1]] = typeof(intp.run(i[2]))
for short in shortarg(i[1]):
if short not in args.short:
args.short[short] = i[1]
break
elif i[0] == "UNWRAPABLE":
args.mandatory.append(i[1])
if not args.dump:
args.dump = i[1]
elif i[0] == "UNWRAPABLEKW":
if not args.kwdump:
args.kwdump = i[1]
return args
def evalval(type, val):
if type == "int":
return Number(val, intonlystr=True)
elif type == "num":
return Number(val)
elif type == "list":
return OrObject.from_py(val.split(","))
elif type == "bool":
return constants.true
elif type == "str":
return OrObject.from_py(val)
else:
raise TypeError, "`%s` type not supported for command-line \
arguments" % type
def parseargs(args, schema):
passargs = {}
takingopts = True
errors = False
setval = ""
if schema.dump:
passargs[schema.dump] = [] # Not OrObjected yet
if schema.kwdump:
passargs[schema.kwdump] = {} # Also not OO'd yet
for i in args:
if i.startswith("--") and len(i) > 2 and takingopts:
kw, t, val = i[2:].partition("=")
if kw not in schema.long and not schema.kwdump:
print "ERROR: Unknown option `%s`" % kw
errors = True
continue
elif schema.kwdump:
passargs[schema.kwdump][kw] = evalval("str", val)
continue
if kw in schema.mandatory:
schema.mandatory.remove(kw)
passargs[kw] = evalval(schema.long[kw], val)
elif i == "--" and takingopts:
takingopts = False
elif i.startswith("-") and takingopts:
key = i[1:2]
val = i[2:]
if key not in schema.short:
print "ERROR: Unknown option `%s`" % key
errors = True
continue
elif schema.kwdump:
setval = ":kwdump:"
continue
if schema.short[key] in schema.mandatory:
schema.mandatory.remove(schema.short[key])
if schema.long[schema.short[key]] == "bool":
passargs[schema.short[key]] = constants.true
elif val:
passargs[schema.short[key]] = evalval(schema.long[schema.short[key]], val)
else:
setval = schema.short[key]
elif setval:
if setval == ":kwdump:":
passargs[schema.kwdump][setval] = evalval("str", val)
else:
passargs[setval] = evalval(schema.long[setval], i)
setval = ""
else:
try:
kw = schema.mandatory[0]
except IndexError:
print "ERROR: Too many arguments"
errors = True
continue
if kw == schema.dump:
passargs[schema.dump].append(i)
takingopts = False
continue
passargs[kw] = evalval(schema.long[kw], i)
schema.mandatory.pop(0)
if schema.dump:
passargs[schema.dump] = OrObject.from_py(passargs[schema.dump])
if schema.kwdump:
passargs[schema.kwdump] = OrObject.from_py(passargs[schema.kwdump])
if len(schema.mandatory) and schema.mandatory[0] != schema.dump:
m = len(schema.mandatory) - (1 if schema.dump in schema.mandatory else 0)
print "Arguments Missing: " + ", ".join(map(lambda x: "`%s`"%x, schema.mandatory))
print "ERROR: Missing %d arguments; consult --help for command syntax" % m
errors = True
if setval:
print "ERROR: Expecting value for argument `%s`" % setval
errors = True
if errors:
sys.exit(1)
return passargs
def run_main(intp, args):
if "--help" in args or "-h" in args or "-?" in args \
and "$$help" in intp.curr:
__help = intp.curr["$$help"]
if typeof(__help) == "str":
print __help
else:
__help()
elif "--version" in args and "$$version" in intp.curr:
__version = intp.curr["$$version"]
if typeof(__version) == "str":
print __version
else:
__version()
else:
main = intp.curr["$$main"]
schema = getargs(intp, main.arglist)
kwargs = parseargs(args, schema)
main(**kwargs)
def run(intp, args, wrapfn=lambda fn, i, glob: fn()):
if "$$main" in intp.curr:
wrapfn(lambda: run_main(intp, args), intp, globals())
#run_main(intp, args)
return
| gpl-3.0 | -8,221,908,617,069,433,000 | 30.382979 | 90 | 0.505932 | false |
acsone/acsone-addons | project_code/tests/test_project_code.py | 1 | 1632 | # -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of project_code, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# project_code is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# project_code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with project_code.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp.tests.common as common
class TestProjectCode(common.TransactionCase):
def test_0_sanity(self):
p = self.env.ref('project.all_projects_account')
self.assertEqual(p.name, "Projects", "Unexpected demo data")
self.assertEqual(p.code, "PP001", "Unexpected demo data")
def test_1_display_name(self):
p = self.env.ref('project.all_projects_account')
self.assertEqual(p.display_name, "PP001 - Projects")
def test_2_complete_name(self):
p = self.env.ref('project.all_projects_account')
self.assertEqual(p.complete_name, "PP001 - Projects")
| agpl-3.0 | 1,057,581,003,818,622,700 | 38.804878 | 78 | 0.615809 | false |
OpenSPA/dvbapp | lib/python/Plugins/Extensions/MediaPlayer/plugin.py | 1 | 40178 | import os
import time
from enigma import iPlayableService, eTimer, eServiceCenter, iServiceInformation, ePicLoad, eServiceReference
from ServiceReference import ServiceReference
from Screens.Screen import Screen
from Screens.HelpMenu import HelpableScreen
from Screens.MessageBox import MessageBox
from Screens.InputBox import InputBox
from Screens.ChoiceBox import ChoiceBox
from Screens.InfoBar import InfoBar
from Screens.InfoBarGenerics import InfoBarSeek, InfoBarScreenSaver, InfoBarAudioSelection, InfoBarAspectSelection, InfoBarCueSheetSupport, InfoBarNotifications, InfoBarSubtitleSupport, InfoBarResolutionSelection
from Components.ActionMap import NumberActionMap, HelpableActionMap
from Components.Label import Label
from Components.Pixmap import Pixmap,MultiPixmap
from Components.FileList import FileList
from Components.MediaPlayer import PlayList
from Components.MovieList import AUDIO_EXTENSIONS
from Components.ServicePosition import ServicePositionGauge
from Components.ServiceEventTracker import ServiceEventTracker, InfoBarBase
from Components.Playlist import PlaylistIOInternal, PlaylistIOM3U, PlaylistIOPLS
from Components.AVSwitch import AVSwitch
from Components.Harddisk import harddiskmanager
from Components.config import config
from Tools.Directories import fileExists, pathExists, resolveFilename, SCOPE_CONFIG, SCOPE_PLAYLIST, SCOPE_CURRENT_SKIN
from Tools.BoundFunction import boundFunction
from settings import MediaPlayerSettings
import random
class MyPlayList(PlayList):
def __init__(self):
PlayList.__init__(self)
def PlayListShuffle(self):
random.shuffle(self.list)
self.l.setList(self.list)
self.currPlaying = -1
self.oldCurrPlaying = -1
class MediaPixmap(Pixmap):
def __init__(self):
Pixmap.__init__(self)
self.coverArtFileName = ""
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.paintCoverArtPixmapCB)
self.coverFileNames = ["folder.png", "folder.jpg"]
def applySkin(self, desktop, screen):
from Tools.LoadPixmap import LoadPixmap
noCoverFile = None
if self.skinAttributes is not None:
for (attrib, value) in self.skinAttributes:
if attrib == "pixmap":
noCoverFile = value
break
if noCoverFile is None:
noCoverFile = resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/no_coverArt.png")
self.noCoverPixmap = LoadPixmap(noCoverFile)
return Pixmap.applySkin(self, desktop, screen)
def onShow(self):
Pixmap.onShow(self)
sc = AVSwitch().getFramebufferScale()
#0=Width 1=Height 2=Aspect 3=use_cache 4=resize_type 5=Background(#AARRGGBB)
self.picload.setPara((self.instance.size().width(), self.instance.size().height(), sc[0], sc[1], False, 1, "#00000000"))
def paintCoverArtPixmapCB(self, picInfo=None):
ptr = self.picload.getData()
if ptr != None:
self.instance.setPixmap(ptr.__deref__())
def updateCoverArt(self, path):
while not path.endswith("/"):
path = path[:-1]
new_coverArtFileName = None
for filename in self.coverFileNames:
if fileExists(path + filename):
new_coverArtFileName = path + filename
if self.coverArtFileName != new_coverArtFileName:
self.coverArtFileName = new_coverArtFileName
if new_coverArtFileName:
self.picload.startDecode(self.coverArtFileName)
else:
self.showDefaultCover()
def showDefaultCover(self):
self.instance.setPixmap(self.noCoverPixmap)
def embeddedCoverArt(self):
print "[embeddedCoverArt] found"
self.coverArtFileName = "/tmp/.id3coverart"
self.picload.startDecode(self.coverArtFileName)
class MediaPlayerInfoBar(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.skinName = "MoviePlayer"
class MediaPlayer(Screen, InfoBarBase, InfoBarScreenSaver, InfoBarSeek, InfoBarAudioSelection, InfoBarAspectSelection, InfoBarCueSheetSupport, InfoBarNotifications, InfoBarSubtitleSupport, HelpableScreen, InfoBarResolutionSelection):
ALLOW_SUSPEND = True
ENABLE_RESUME_SUPPORT = True
def __init__(self, session, args = None):
Screen.__init__(self, session)
InfoBarAudioSelection.__init__(self)
InfoBarAspectSelection.__init__(self)
InfoBarCueSheetSupport.__init__(self, actionmap = "MediaPlayerCueSheetActions")
InfoBarNotifications.__init__(self)
InfoBarBase.__init__(self)
InfoBarScreenSaver.__init__(self)
InfoBarSubtitleSupport.__init__(self)
HelpableScreen.__init__(self)
InfoBarResolutionSelection.__init__(self)
self.summary = None
self.oldService = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.session.nav.stopService()
self.playlistparsers = {}
self.addPlaylistParser(PlaylistIOM3U, "m3u")
self.addPlaylistParser(PlaylistIOPLS, "pls")
self.addPlaylistParser(PlaylistIOInternal, "e2pls")
# 'None' is magic to start at the list of mountpoints
defaultDir = config.mediaplayer.defaultDir.getValue()
self.filelist = FileList(defaultDir, matchingPattern = "(?i)^.*\.(mp2|mp3|ogg|ts|mts|m2ts|wav|wave|m3u|pls|e2pls|mpg|vob|avi|divx|m4v|mkv|mp4|m4a|dat|flac|flv|mov|dts|3gp|3g2|asf|wmv|wma|webm)", useServiceRef = True, additionalExtensions = "4098:m3u 4098:e2pls 4098:pls")
self["filelist"] = self.filelist
self.playlist = MyPlayList()
self.is_closing = False
self.delname = ""
self.playlistname = ""
self["playlist"] = self.playlist
self["PositionGauge"] = ServicePositionGauge(self.session.nav)
self["currenttext"] = Label("")
self["artisttext"] = Label(_("Artist")+':')
self["artist"] = Label("")
self["titletext"] = Label(_("Title")+':')
self["title"] = Label("")
self["albumtext"] = Label(_("Album")+':')
self["album"] = Label("")
self["yeartext"] = Label(_("Year")+':')
self["year"] = Label("")
self["genretext"] = Label(_("Genre")+':')
self["genre"] = Label("")
self["coverArt"] = MediaPixmap()
self["repeat"] = MultiPixmap()
self.seek_target = None
try:
from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
hotplugNotifier.append(self.hotplugCB)
except Exception, ex:
print "[MediaPlayer] No hotplug support", ex
class MoviePlayerActionMap(NumberActionMap):
def __init__(self, player, contexts = [ ], actions = { }, prio=0):
NumberActionMap.__init__(self, contexts, actions, prio)
self.player = player
def action(self, contexts, action):
self.player.show()
return NumberActionMap.action(self, contexts, action)
self["OkCancelActions"] = HelpableActionMap(self, "OkCancelActions",
{
"ok": (self.ok, _("Add file to playlist")),
"cancel": (self.exit, _("Exit mediaplayer")),
}, -2)
self["MediaPlayerActions"] = HelpableActionMap(self, "MediaPlayerActions",
{
"play": (self.xplayEntry, _("Play entry")),
"pause": (self.pauseEntry, _("Pause")),
"stop": (self.stopEntry, _("Stop entry")),
"previous": (self.previousMarkOrEntry, _("Play from previous mark or playlist entry")),
"next": (self.nextMarkOrEntry, _("Play from next mark or playlist entry")),
"menu": (self.showMenu, _("Menu")),
"skipListbegin": (self.skip_listbegin, _("Jump to beginning of list")),
"skipListend": (self.skip_listend, _("Jump to end of list")),
"prevBouquet": (self.switchLists, _("Switch between filelist/playlist")),
"nextBouquet": (self.switchLists, _("Switch between filelist/playlist")),
"delete": (self.deletePlaylistEntry, _("Delete playlist entry")),
"shift_stop": (self.clear_playlist, _("Clear playlist")),
"shift_record": (self.playlist.PlayListShuffle, _("Shuffle playlist")),
"subtitles": (self.subtitleSelection, _("Subtitle selection")),
}, -2)
self["InfobarEPGActions"] = HelpableActionMap(self, "InfobarEPGActions",
{
"showEventInfo": (self.showEventInformation, _("show event details")),
})
self["actions"] = MoviePlayerActionMap(self, ["DirectionActions"],
{
"right": self.rightDown,
"rightRepeated": self.doNothing,
"rightUp": self.rightUp,
"left": self.leftDown,
"leftRepeated": self.doNothing,
"leftUp": self.leftUp,
"up": self.up,
"upRepeated": self.up,
"upUp": self.doNothing,
"down": self.down,
"downRepeated": self.down,
"downUp": self.doNothing,
}, -2)
InfoBarSeek.__init__(self, actionmap = "MediaPlayerSeekActions")
self.mediaPlayerInfoBar = self.session.instantiateDialog(MediaPlayerInfoBar)
self.onClose.append(self.delMPTimer)
self.onClose.append(self.__onClose)
self.onShow.append(self.timerHideMediaPlayerInfoBar)
self.righttimer = False
self.rightKeyTimer = eTimer()
self.rightKeyTimer.callback.append(self.rightTimerFire)
self.lefttimer = False
self.leftKeyTimer = eTimer()
self.leftKeyTimer.callback.append(self.leftTimerFire)
self.hideMediaPlayerInfoBar = eTimer()
self.hideMediaPlayerInfoBar.callback.append(self.timerHideMediaPlayerInfoBar)
self.currList = "filelist"
self.isAudioCD = False
self.ext = None
self.AudioCD_albuminfo = {}
self.cdAudioTrackFiles = []
self.onShown.append(self.applySettings)
self.playlistIOInternal = PlaylistIOInternal()
list = self.playlistIOInternal.open(resolveFilename(SCOPE_CONFIG, "playlist.e2pls"))
if list:
for x in list:
self.playlist.addFile(x.ref)
self.playlist.updateList()
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedInfo: self.__evUpdatedInfo,
iPlayableService.evUser+10: self.__evAudioDecodeError,
iPlayableService.evUser+11: self.__evVideoDecodeError,
iPlayableService.evUser+12: self.__evPluginError,
iPlayableService.evUser+13: self["coverArt"].embeddedCoverArt
})
def hideAndInfoBar(self):
self.hide()
self.mediaPlayerInfoBar.show()
if config.mediaplayer.alwaysHideInfoBar.value or self.ext not in AUDIO_EXTENSIONS and not self.isAudioCD:
self.hideMediaPlayerInfoBar.start(5000, True)
def timerHideMediaPlayerInfoBar(self):
self.hideMediaPlayerInfoBar.stop()
self.mediaPlayerInfoBar.hide()
def doNothing(self):
pass
def createSummary(self):
return MediaPlayerLCDScreen
def exit(self):
if self.mediaPlayerInfoBar.shown:
self.timerHideMediaPlayerInfoBar()
else:
self.session.openWithCallback(self.exitCallback, MessageBox, _("Exit media player?"), simple = not self.shown)
def exitCallback(self, answer):
if answer:
self.playlistIOInternal.clear()
for x in self.playlist.list:
self.playlistIOInternal.addService(ServiceReference(x[0]))
if self.savePlaylistOnExit:
try:
self.playlistIOInternal.save(resolveFilename(SCOPE_CONFIG, "playlist.e2pls"))
except IOError:
print "couldn't save playlist.e2pls"
if config.mediaplayer.saveDirOnExit.getValue():
config.mediaplayer.defaultDir.setValue(self.filelist.getCurrentDirectory())
config.mediaplayer.defaultDir.save()
try:
from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
hotplugNotifier.remove(self.hotplugCB)
except:
pass
del self["coverArt"].picload
self.close()
def checkSkipShowHideLock(self):
self.updatedSeekState()
def doEofInternal(self, playing):
if playing:
self.nextEntry()
else:
self.show()
def __onClose(self):
self.mediaPlayerInfoBar.doClose()
self.session.nav.playService(self.oldService)
def __evUpdatedInfo(self):
currPlay = self.session.nav.getCurrentService()
sTagTrackNumber = currPlay.info().getInfo(iServiceInformation.sTagTrackNumber)
sTagTrackCount = currPlay.info().getInfo(iServiceInformation.sTagTrackCount)
sTagTitle = currPlay.info().getInfoString(iServiceInformation.sTagTitle)
if sTagTrackNumber or sTagTrackCount or sTagTitle:
print "[__evUpdatedInfo] title %d of %d (%s)" % (sTagTrackNumber, sTagTrackCount, sTagTitle)
self.readTitleInformation()
def __evAudioDecodeError(self):
currPlay = self.session.nav.getCurrentService()
sTagAudioCodec = currPlay.info().getInfoString(iServiceInformation.sTagAudioCodec)
print "[__evAudioDecodeError] audio-codec %s can't be decoded by hardware" % (sTagAudioCodec)
self.session.open(MessageBox, _("This receiver cannot decode %s streams!") % sTagAudioCodec, type = MessageBox.TYPE_INFO,timeout = 20 )
def __evVideoDecodeError(self):
currPlay = self.session.nav.getCurrentService()
sTagVideoCodec = currPlay.info().getInfoString(iServiceInformation.sTagVideoCodec)
print "[__evVideoDecodeError] video-codec %s can't be decoded by hardware" % (sTagVideoCodec)
self.session.open(MessageBox, _("This receiver cannot decode %s streams!") % sTagVideoCodec, type = MessageBox.TYPE_INFO,timeout = 20 )
def __evPluginError(self):
currPlay = self.session.nav.getCurrentService()
message = currPlay.info().getInfoString(iServiceInformation.sUser+12)
print "[__evPluginError]" , message
self.session.open(MessageBox, message, type = MessageBox.TYPE_INFO,timeout = 20 )
def delMPTimer(self):
del self.rightKeyTimer
del self.leftKeyTimer
def readTitleInformation(self):
currPlay = self.session.nav.getCurrentService()
if currPlay is not None:
sTitle = currPlay.info().getInfoString(iServiceInformation.sTagTitle)
sAlbum = currPlay.info().getInfoString(iServiceInformation.sTagAlbum)
sGenre = currPlay.info().getInfoString(iServiceInformation.sTagGenre)
sArtist = currPlay.info().getInfoString(iServiceInformation.sTagArtist)
sYear = currPlay.info().getInfoString(iServiceInformation.sTagDate)
if sTitle == "":
if not self.isAudioCD:
sTitle = currPlay.info().getName().split('/')[-1]
else:
sTitle = self.playlist.getServiceRefList()[self.playlist.getCurrentIndex()].getName()
if self.AudioCD_albuminfo:
if sAlbum == "" and "title" in self.AudioCD_albuminfo:
sAlbum = self.AudioCD_albuminfo["title"]
if sGenre == "" and "genre" in self.AudioCD_albuminfo:
sGenre = self.AudioCD_albuminfo["genre"]
if sArtist == "" and "artist" in self.AudioCD_albuminfo:
sArtist = self.AudioCD_albuminfo["artist"]
if "year" in self.AudioCD_albuminfo:
sYear = self.AudioCD_albuminfo["year"]
self.updateMusicInformation( sArtist, sTitle, sAlbum, sYear, sGenre, clear = True )
else:
self.updateMusicInformation()
def updateMusicInformation(self, artist = "", title = "", album = "", year = "", genre = "", clear = False):
self.updateSingleMusicInformation("artist", artist, clear)
self.updateSingleMusicInformation("title", title, clear)
self.updateSingleMusicInformation("album", album, clear)
self.updateSingleMusicInformation("year", year, clear)
self.updateSingleMusicInformation("genre", genre, clear)
def updateSingleMusicInformation(self, name, info, clear):
if info != "" or clear:
if self[name].getText() != info:
self[name].setText(info)
if info != "":
if name == "artist":
self.summaries.setText(info,1)
elif name == "title":
idx = self.playlist.getCurrentIndex()
currref = self.playlist.getServiceRefList()[idx]
if info != self.getIdentifier(currref):
self.summaries.setText(info,3)
elif name == "album":
self.summaries.setText(info,4)
def leftDown(self):
self.lefttimer = True
self.leftKeyTimer.start(1000)
def rightDown(self):
self.righttimer = True
self.rightKeyTimer.start(1000)
def leftUp(self):
if self.lefttimer:
self.leftKeyTimer.stop()
self.lefttimer = False
self[self.currList].pageUp()
self.updateCurrentInfo()
def rightUp(self):
if self.righttimer:
self.rightKeyTimer.stop()
self.righttimer = False
self[self.currList].pageDown()
self.updateCurrentInfo()
def leftTimerFire(self):
self.leftKeyTimer.stop()
self.lefttimer = False
self.switchToFileList()
def rightTimerFire(self):
self.rightKeyTimer.stop()
self.righttimer = False
self.switchToPlayList()
def switchLists(self):
if self.currList == "filelist":
self.switchToPlayList()
return
self.switchToFileList()
def switchToFileList(self):
self.currList = "filelist"
self.filelist.selectionEnabled(1)
self.playlist.selectionEnabled(0)
self.updateCurrentInfo()
def switchToPlayList(self):
if len(self.playlist) != 0:
self.currList = "playlist"
self.filelist.selectionEnabled(0)
self.playlist.selectionEnabled(1)
self.updateCurrentInfo()
def up(self):
self[self.currList].up()
self.updateCurrentInfo()
def down(self):
self[self.currList].down()
self.updateCurrentInfo()
def showAfterSeek(self):
if not self.shown:
self.hideAndInfoBar()
def showAfterCuesheetOperation(self):
self.show()
def hideAfterResume(self):
self.hideAndInfoBar()
def getIdentifier(self, ref):
if self.isAudioCD:
return ref.getName()
else:
text = ref.getPath()
return text.split('/')[-1]
# FIXME: maybe this code can be optimized
def updateCurrentInfo(self):
text = ""
if self.currList == "filelist":
idx = self.filelist.getSelectionIndex()
r = self.filelist.list[idx]
text = r[1][7]
if r[0][1] == True:
if len(text) < 2:
text += " "
if text[:2] != "..":
text = "/" + text
self.summaries.setText(text,1)
idx += 1
if idx < len(self.filelist.list):
r = self.filelist.list[idx]
text = r[1][7]
if r[0][1] == True:
text = "/" + text
self.summaries.setText(text,3)
else:
self.summaries.setText(" ",3)
idx += 1
if idx < len(self.filelist.list):
r = self.filelist.list[idx]
text = r[1][7]
if r[0][1] == True:
text = "/" + text
self.summaries.setText(text,4)
else:
self.summaries.setText(" ",4)
text = ""
if not self.filelist.canDescent():
r = self.filelist.getServiceRef()
if r is None:
return
text = r.getPath()
self["currenttext"].setText(os.path.basename(text))
if self.currList == "playlist":
t = self.playlist.getSelection()
if t is None:
return
#display current selected entry on LCD
text = self.getIdentifier(t)
self.summaries.setText(text,1)
self["currenttext"].setText(text)
idx = self.playlist.getSelectionIndex()
idx += 1
if idx < len(self.playlist):
currref = self.playlist.getServiceRefList()[idx]
text = self.getIdentifier(currref)
self.summaries.setText(text,3)
else:
self.summaries.setText(" ",3)
idx += 1
if idx < len(self.playlist):
currref = self.playlist.getServiceRefList()[idx]
text = self.getIdentifier(currref)
self.summaries.setText(text,4)
else:
self.summaries.setText(" ",4)
def ok(self):
if self.currList == "filelist":
if self.filelist.canDescent():
self.filelist.descent()
self.updateCurrentInfo()
else:
self.copyFile()
if self.currList == "playlist":
if self.playlist.getCurrentIndex() == self.playlist.getSelectionIndex() and not self.playlist.isStopped():
if self.shown:
self.hideAndInfoBar()
elif self.mediaPlayerInfoBar.shown:
self.mediaPlayerInfoBar.hide()
self.hideMediaPlayerInfoBar.stop()
if self.ext in AUDIO_EXTENSIONS or self.isAudioCD:
self.show()
else:
self.mediaPlayerInfoBar.show()
else:
self.changeEntry(self.playlist.getSelectionIndex())
def showMenu(self):
menu = []
if len(self.cdAudioTrackFiles):
menu.insert(0,(_("Play audio-CD..."), "audiocd"))
if self.currList == "filelist":
if self.filelist.canDescent():
menu.append((_("Add directory to playlist"), "copydir"))
else:
menu.append((_("Add files to playlist"), "copyfiles"))
menu.append((_("Switch to playlist"), "playlist"))
if config.usage.setup_level.index >= 1: # intermediate+
menu.append((_("Delete file"), "deletefile"))
else:
menu.append((_("Switch to filelist"), "filelist"))
menu.append((_("Clear playlist"), "clear"))
menu.append((_("Delete entry"), "deleteentry"))
if config.usage.setup_level.index >= 1: # intermediate+
menu.append((_("Shuffle playlist"), "shuffle"))
menu.append((_("Hide player"), "hide"));
menu.append((_("Load playlist"), "loadplaylist"));
if config.usage.setup_level.index >= 1: # intermediate+
menu.append((_("Save playlist"), "saveplaylist"));
menu.append((_("Delete saved playlist"), "deleteplaylist"));
menu.append((_("Edit settings"), "settings"))
self.timerHideMediaPlayerInfoBar()
self.session.openWithCallback(self.menuCallback, ChoiceBox, title="", list=menu)
def menuCallback(self, choice):
self.show()
if choice is None:
return
if choice[1] == "copydir":
self.copyDirectory(self.filelist.getSelection()[0])
elif choice[1] == "copyfiles":
self.copyDirectory(os.path.dirname(self.filelist.getSelection()[0].getPath()) + "/", recursive = False)
elif choice[1] == "playlist":
self.switchToPlayList()
elif choice[1] == "filelist":
self.switchToFileList()
elif choice[1] == "deleteentry":
if self.playlist.getSelectionIndex() == self.playlist.getCurrentIndex():
self.stopEntry()
self.deleteEntry()
elif choice[1] == "clear":
self.clear_playlist()
elif choice[1] == "hide":
self.hideAndInfoBar()
elif choice[1] == "saveplaylist":
self.save_playlist()
elif choice[1] == "loadplaylist":
self.load_playlist()
elif choice[1] == "deleteplaylist":
self.delete_saved_playlist()
elif choice[1] == "shuffle":
self.playlist.PlayListShuffle()
elif choice[1] == "deletefile":
self.deleteFile()
elif choice[1] == "settings":
self.session.openWithCallback(self.applySettings, MediaPlayerSettings, self)
elif choice[1] == "audiocd":
self.playAudioCD()
def playAudioCD(self):
from enigma import eServiceReference
if len(self.cdAudioTrackFiles):
self.playlist.clear()
self.savePlaylistOnExit = False
self.isAudioCD = True
for x in self.cdAudioTrackFiles:
ref = eServiceReference(4097, 0, x)
self.playlist.addFile(ref)
try:
from Plugins.Extensions.CDInfo.plugin import Query
cdinfo = Query(self)
cdinfo.scan()
except ImportError:
pass # we can live without CDInfo
self.changeEntry(0)
self.switchToPlayList()
def applySettings(self):
self.savePlaylistOnExit = config.mediaplayer.savePlaylistOnExit.getValue()
if config.mediaplayer.repeat.getValue() == True:
self["repeat"].setPixmapNum(1)
else:
self["repeat"].setPixmapNum(0)
def showEventInformation(self):
from Screens.EventView import EventViewSimple
from ServiceReference import ServiceReference
evt = self[self.currList].getCurrentEvent()
if evt:
self.session.open(EventViewSimple, evt, ServiceReference(self.getCurrent()))
# also works on filelist (?)
def getCurrent(self):
return self["playlist"].getCurrent()
def deletePlaylistEntry(self):
if self.currList == "playlist":
if self.playlist.getSelectionIndex() == self.playlist.getCurrentIndex():
self.stopEntry()
self.deleteEntry()
def skip_listbegin(self):
if self.currList == "filelist":
self.filelist.moveToIndex(0)
else:
self.playlist.moveToIndex(0)
self.updateCurrentInfo()
def skip_listend(self):
if self.currList == "filelist":
idx = len(self.filelist.list)
self.filelist.moveToIndex(idx - 1)
else:
self.playlist.moveToIndex(len(self.playlist)-1)
self.updateCurrentInfo()
def save_playlist(self):
self.session.openWithCallback(self.save_playlist2,InputBox, title=_("Please enter filename (empty = use current date)"),windowTitle = _("Save playlist"), text=self.playlistname)
def save_playlist2(self, name):
if name is not None:
name = name.strip()
if name == "":
name = time.strftime("%y%m%d_%H%M%S")
self.playlistname = name
name += ".e2pls"
self.playlistIOInternal.clear()
for x in self.playlist.list:
self.playlistIOInternal.addService(ServiceReference(x[0]))
self.playlistIOInternal.save(resolveFilename(SCOPE_PLAYLIST) + name)
def load_playlist(self):
listpath = []
playlistdir = resolveFilename(SCOPE_PLAYLIST)
try:
for i in os.listdir(playlistdir):
listpath.append((i,playlistdir + i))
except IOError,e:
print "Error while scanning subdirs ",e
if config.mediaplayer.sortPlaylists.value:
listpath.sort()
self.session.openWithCallback(self.PlaylistSelected, ChoiceBox, title=_("Please select a playlist..."), list = listpath)
def PlaylistSelected(self,path):
if path is not None:
self.playlistname = path[0].rsplit('.',1)[-2]
self.clear_playlist()
extension = path[0].rsplit('.',1)[-1]
if self.playlistparsers.has_key(extension):
playlist = self.playlistparsers[extension]()
list = playlist.open(path[1])
for x in list:
self.playlist.addFile(x.ref)
self.playlist.updateList()
def delete_saved_playlist(self):
listpath = []
playlistdir = resolveFilename(SCOPE_PLAYLIST)
try:
for i in os.listdir(playlistdir):
listpath.append((i,playlistdir + i))
except IOError,e:
print "Error while scanning subdirs ",e
if config.mediaplayer.sortPlaylists.value:
listpath.sort()
self.session.openWithCallback(self.DeletePlaylistSelected, ChoiceBox, title=_("Please select a playlist to delete..."), list = listpath)
def DeletePlaylistSelected(self,path):
if path is not None:
self.delname = path[1]
self.session.openWithCallback(self.deleteConfirmed, MessageBox, _("Do you really want to delete %s?") % (path[1]))
def deleteConfirmed(self, confirmed):
if confirmed:
try:
os.remove(self.delname)
except OSError,e:
print "delete failed:", e
self.session.open(MessageBox, _("Delete failed!"), MessageBox.TYPE_ERROR)
def clear_playlist(self):
self.isAudioCD = False
self.stopEntry()
self.playlist.clear()
self.switchToFileList()
def copyDirectory(self, directory, recursive = True):
print "copyDirectory", directory
if directory == '/':
print "refusing to operate on /"
return
filelist = FileList(directory, useServiceRef = True, showMountpoints = False, isTop = True)
for x in filelist.getFileList():
if x[0][1] == True: #isDir
if recursive:
if x[0][0] != directory:
self.copyDirectory(x[0][0])
elif filelist.getServiceRef() and filelist.getServiceRef().type == 4097:
self.playlist.addFile(x[0][0])
self.playlist.updateList()
def deleteFile(self):
if self.currList == "filelist":
self.service = self.filelist.getServiceRef()
else:
self.service = self.playlist.getSelection()
if self.service is None:
return
if self.service.type != 4098 and self.session.nav.getCurrentlyPlayingServiceReference() is not None:
if self.service == self.session.nav.getCurrentlyPlayingServiceReference():
self.stopEntry()
serviceHandler = eServiceCenter.getInstance()
offline = serviceHandler.offlineOperations(self.service)
info = serviceHandler.info(self.service)
name = info and info.getName(self.service)
result = False
if offline is not None:
# simulate first
if not offline.deleteFromDisk(1):
result = True
if result == True:
self.session.openWithCallback(self.deleteConfirmed_offline, MessageBox, _("Do you really want to delete %s?") % (name))
else:
self.session.openWithCallback(self.close, MessageBox, _("You cannot delete this!"), MessageBox.TYPE_ERROR)
def deleteConfirmed_offline(self, confirmed):
if confirmed:
serviceHandler = eServiceCenter.getInstance()
offline = serviceHandler.offlineOperations(self.service)
result = False
if offline is not None:
# really delete!
if not offline.deleteFromDisk(0):
result = True
if result == False:
self.session.open(MessageBox, _("Delete failed!"), MessageBox.TYPE_ERROR)
else:
self.removeListEntry()
def removeListEntry(self):
currdir = self.filelist.getCurrentDirectory()
self.filelist.changeDir(currdir)
deleteend = False
while not deleteend:
index = 0
deleteend = True
if len(self.playlist) > 0:
for x in self.playlist.list:
if self.service == x[0]:
self.playlist.deleteFile(index)
deleteend = False
break
index += 1
self.playlist.updateList()
if self.currList == "playlist":
if len(self.playlist) == 0:
self.switchToFileList()
def copyFile(self):
if self.filelist.getServiceRef().type == 4098: # playlist
ServiceRef = self.filelist.getServiceRef()
extension = ServiceRef.getPath()[ServiceRef.getPath().rfind('.') + 1:]
if self.playlistparsers.has_key(extension):
playlist = self.playlistparsers[extension]()
list = playlist.open(ServiceRef.getPath())
for x in list:
self.playlist.addFile(x.ref)
self.playlist.updateList()
# check if MerlinMusicPlayer is installed and merlinmp3player.so is running
# so we need the right id to play now the mp3-file
elif self.filelist.getServiceRef().type == 4116:
if self.filelist.getSelection() is not None:
inst = self.filelist.getSelection()[0]
if isinstance(inst, eServiceReference):
path = inst.getPath()
service = eServiceReference(4097, 0, path)
self.playlist.addFile(service)
self.playlist.updateList()
if len(self.playlist) == 1:
self.changeEntry(0)
else:
self.playlist.addFile(self.filelist.getServiceRef())
self.playlist.updateList()
if len(self.playlist) == 1:
self.changeEntry(0)
def addPlaylistParser(self, parser, extension):
self.playlistparsers[extension] = parser
def nextEntry(self):
next = self.playlist.getCurrentIndex() + 1
if next < len(self.playlist):
self.changeEntry(next)
elif ( len(self.playlist) > 0 ) and ( config.mediaplayer.repeat.getValue() == True ):
self.stopEntry()
self.changeEntry(0)
elif ( len(self.playlist) > 0 ):
self.stopEntry()
def nextMarkOrEntry(self):
if not self.jumpPreviousNextMark(lambda x: x):
next = self.playlist.getCurrentIndex() + 1
if next < len(self.playlist):
self.changeEntry(next)
else:
self.doSeek(-1)
def previousMarkOrEntry(self):
if not self.jumpPreviousNextMark(lambda x: -x-5*90000, start=True):
next = self.playlist.getCurrentIndex() - 1
if next >= 0:
self.changeEntry(next)
def deleteEntry(self):
self.playlist.deleteFile(self.playlist.getSelectionIndex())
self.playlist.updateList()
if len(self.playlist) == 0:
self.switchToFileList()
def changeEntry(self, index):
self.playlist.setCurrentPlaying(index)
self.playEntry()
def playServiceRefEntry(self, serviceref):
serviceRefList = self.playlist.getServiceRefList()
for count in range(len(serviceRefList)):
if serviceRefList[count] == serviceref:
self.changeEntry(count)
break
def xplayEntry(self):
if self.currList == "playlist":
self.playEntry()
else:
self.stopEntry()
self.playlist.clear()
self.isAudioCD = False
sel = self.filelist.getSelection()
if sel:
if sel[1]: # can descent
# add directory to playlist
self.copyDirectory(sel[0])
else:
# add files to playlist
self.copyDirectory(os.path.dirname(sel[0].getPath()) + "/", recursive = False)
if len(self.playlist) > 0:
self.changeEntry(0)
def playEntry(self):
if len(self.playlist.getServiceRefList()):
needsInfoUpdate = False
currref = self.playlist.getServiceRefList()[self.playlist.getCurrentIndex()]
if self.session.nav.getCurrentlyPlayingServiceReference() is None or currref != self.session.nav.getCurrentlyPlayingServiceReference() or self.playlist.isStopped():
self.session.nav.playService(self.playlist.getServiceRefList()[self.playlist.getCurrentIndex()])
info = eServiceCenter.getInstance().info(currref)
description = info and info.getInfoString(currref, iServiceInformation.sDescription) or ""
self["title"].setText(description)
# display just playing musik on LCD
idx = self.playlist.getCurrentIndex()
currref = self.playlist.getServiceRefList()[idx]
text = self.getIdentifier(currref)
self.ext = os.path.splitext(text)[1].lower()
text = ">"+text
# FIXME: the information if the service contains video (and we should hide our window) should com from the service instead
if self.ext not in AUDIO_EXTENSIONS and not self.isAudioCD:
self.hideAndInfoBar()
else:
needsInfoUpdate = True
self.summaries.setText(text,1)
# get the next two entries
idx += 1
if idx < len(self.playlist):
currref = self.playlist.getServiceRefList()[idx]
text = self.getIdentifier(currref)
self.summaries.setText(text,3)
else:
self.summaries.setText(" ",3)
idx += 1
if idx < len(self.playlist):
currref = self.playlist.getServiceRefList()[idx]
text = self.getIdentifier(currref)
self.summaries.setText(text,4)
else:
self.summaries.setText(" ",4)
else:
idx = self.playlist.getCurrentIndex()
currref = self.playlist.getServiceRefList()[idx]
text = currref.getPath()
ext = os.path.splitext(text)[1].lower()
if self.ext not in AUDIO_EXTENSIONS and not self.isAudioCD:
self.hideAndInfoBar()
else:
needsInfoUpdate = True
self.unPauseService()
if needsInfoUpdate == True:
path = self.playlist.getServiceRefList()[self.playlist.getCurrentIndex()].getPath()
self["coverArt"].updateCoverArt(path)
else:
self["coverArt"].showDefaultCover()
self.readTitleInformation()
def updatedSeekState(self):
if self.seekstate == self.SEEK_STATE_PAUSE:
self.playlist.pauseFile()
elif self.seekstate == self.SEEK_STATE_PLAY:
self.playlist.playFile()
elif self.isStateForward(self.seekstate):
self.playlist.forwardFile()
elif self.isStateBackward(self.seekstate):
self.playlist.rewindFile()
def pauseEntry(self):
if self.currList == "playlist" and self.seekstate == self.SEEK_STATE_PAUSE:
self.playEntry()
elif self.isStateForward(self.seekstate) or self.isStateBackward(self.seekstate):
self.playEntry()
else:
self.pauseService()
if self.seekstate == self.SEEK_STATE_PAUSE:
self.show()
else:
self.hideAndInfoBar()
def stopEntry(self):
self.playlist.stopFile()
self.session.nav.playService(None)
self.updateMusicInformation(clear=True)
self.show()
def unPauseService(self):
self.setSeekState(self.SEEK_STATE_PLAY)
def subtitleSelection(self):
from Screens.AudioSelection import SubtitleSelection
self.session.open(SubtitleSelection, self)
def hotplugCB(self, dev, media_state):
if media_state == "audiocd" or media_state == "audiocdadd":
self.cdAudioTrackFiles = []
if os.path.isfile('/media/audiocd/cdplaylist.cdpls'):
list = open("/media/audiocd/cdplaylist.cdpls")
if list:
self.isAudioCD = True
for x in list:
xnon = x.replace("\n", "")
self.cdAudioTrackFiles.append(xnon)
self.playAudioCD()
else:
self.cdAudioTrackFiles = []
if self.isAudioCD:
self.clear_playlist()
else:
self.cdAudioTrackFiles = []
if self.isAudioCD:
self.clear_playlist()
class MediaPlayerLCDScreen(Screen):
skin = (
"""<screen name="MediaPlayerLCDScreen" position="0,0" size="132,64" id="1">
<widget name="text1" position="4,0" size="132,35" font="Regular;16"/>
<widget name="text3" position="4,36" size="132,14" font="Regular;10"/>
<widget name="text4" position="4,49" size="132,14" font="Regular;10"/>
</screen>""",
"""<screen name="MediaPlayerLCDScreen" position="0,0" size="96,64" id="2">
<widget name="text1" position="0,0" size="96,35" font="Regular;14"/>
<widget name="text3" position="0,36" size="96,14" font="Regular;10"/>
<widget name="text4" position="0,49" size="96,14" font="Regular;10"/>
</screen>""")
def __init__(self, session, parent):
Screen.__init__(self, session)
self["text1"] = Label("Media player")
self["text3"] = Label("")
self["text4"] = Label("")
def setText(self, text, line):
if len(text) > 10:
if text[-4:] == ".mp3":
text = text[:-4]
textleer = " "
text = text + textleer*10
if line == 1:
self["text1"].setText(text)
elif line == 3:
self["text3"].setText(text)
elif line == 4:
self["text4"].setText(text)
def mainCheckTimeshiftCallback(session, answer):
if answer:
session.open(MediaPlayer)
def main(session, **kwargs):
InfoBar.instance.checkTimeshiftRunning(boundFunction(mainCheckTimeshiftCallback, session))
def menu(menuid, **kwargs):
if menuid == "mainmenu" and config.mediaplayer.onMainMenu.getValue():
return [(_("Media player"), main, "media_player", 45)]
return []
def filescan_open(list, session, **kwargs):
from enigma import eServiceReference
mp = session.open(MediaPlayer)
mp.playlist.clear()
mp.savePlaylistOnExit = False
for file in list:
if file.mimetype == "video/MP2T":
stype = 1
else:
stype = 4097
ref = eServiceReference(stype, 0, file.path)
mp.playlist.addFile(ref)
mp.changeEntry(0)
mp.switchToPlayList()
def audioCD_open(list, session, **kwargs):
from enigma import eServiceReference
if os.path.isfile('/media/audiocd/cdplaylist.cdpls'):
list = open("/media/audiocd/cdplaylist.cdpls")
else:
# to do : adding msgbox to inform user about failure of opening audiocd.
return False
mp = session.open(MediaPlayer)
if list:
mp.isAudioCD = True
for x in list:
xnon = x.replace("\n", "")
mp.cdAudioTrackFiles.append(xnon)
mp.playAudioCD()
else:
# to do : adding msgbox to inform user about failure of opening audiocd.
return False
def audioCD_open_mn(session, **kwargs):
from enigma import eServiceReference
if os.path.isfile('/media/audiocd/cdplaylist.cdpls'):
list = open("/media/audiocd/cdplaylist.cdpls")
else:
# to do : adding msgbox to inform user about failure of opening audiocd.
return False
mp = session.open(MediaPlayer)
if list:
mp.isAudioCD = True
for x in list:
xnon = x.replace("\n", "")
mp.cdAudioTrackFiles.append(xnon)
mp.playAudioCD()
else:
# to do : adding msgbox to inform user about failure of opening audiocd.
return False
def movielist_open(list, session, **kwargs):
if not list:
# sanity
return
from enigma import eServiceReference
from Screens.InfoBar import InfoBar
f = list[0]
if f.mimetype == "video/MP2T":
stype = 1
else:
stype = 4097
if InfoBar.instance:
path = os.path.split(f.path)[0]
if not path.endswith('/'):
path += '/'
config.movielist.last_videodir.value = path
InfoBar.instance.showMovies(eServiceReference(stype, 0, f.path))
def audiocdscan(menuid, **kwargs):
try:
from Plugins.SystemPlugins.Hotplug.plugin import AudiocdAdded
except Exception, e:
print "[Mediaplayer.plugin] no hotplug support",e
return []
if menuid == "mainmenu" and AudiocdAdded() and os.path.isfile('/media/audiocd/cdplaylist.cdpls'):
return [(_("Play audio-CD..."), audioCD_open_mn, "play_cd", 45)]
else:
return []
def filescan(**kwargs):
from Components.Scanner import Scanner, ScanPath
return [
Scanner(mimetypes = ["video/mpeg", "video/MP2T", "video/x-msvideo", "video/mkv"],
paths_to_scan =
[
ScanPath(path = "", with_subdirs = False),
ScanPath(path = "PRIVATE/AVCHD/BDMV/STREAM", with_subdirs = False),
],
name = "Movie",
description = _("Watch movies..."),
openfnc = movielist_open,
),
Scanner(mimetypes = ["video/x-vcd"],
paths_to_scan =
[
ScanPath(path = "mpegav", with_subdirs = False),
ScanPath(path = "MPEGAV", with_subdirs = False),
],
name = "Video CD",
description = _("View video CD..."),
openfnc = filescan_open,
),
Scanner(mimetypes = ["audio/mpeg", "audio/x-wav", "application/ogg", "audio/x-flac"],
paths_to_scan =
[
ScanPath(path = "", with_subdirs = False),
],
name = "Music",
description = _("Play music..."),
openfnc = filescan_open,
),
Scanner(mimetypes = ["audio/x-cda"],
paths_to_scan =
[
ScanPath(path = "", with_subdirs = False),
],
name = "Audio-CD",
description = _("Play audio-CD..."),
openfnc = audioCD_open,
),
]
from Plugins.Plugin import PluginDescriptor
def Plugins(**kwargs):
return [
PluginDescriptor(name = _("Media player"), description = _("Play back media files"), where = PluginDescriptor.WHERE_PLUGINMENU, icon="MediaPlayer.png", needsRestart = False, fnc = main),
PluginDescriptor(name = _("Media player"), where = PluginDescriptor.WHERE_FILESCAN, needsRestart = False, fnc = filescan),
PluginDescriptor(name = _("Media player"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = audiocdscan),
PluginDescriptor(name = _("Media player"), description = _("Play back media files"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = menu)
]
| gpl-2.0 | -7,173,184,004,439,990,000 | 32.453789 | 273 | 0.707203 | false |
bright-sparks/wpull | wpull/proxy/client.py | 1 | 5588 | '''Proxy support for HTTP requests.'''
import base64
import io
import logging
from trollius import From, Return
import trollius
from wpull.connection import ConnectionPool
from wpull.errors import NetworkError
from wpull.http.request import RawRequest
from wpull.http.stream import Stream
import wpull.string
_logger = logging.getLogger(__name__)
class HTTPProxyConnectionPool(ConnectionPool):
'''Establish pooled connections to a HTTP proxy.
Args:
proxy_address (tuple): Tuple containing host and port of the proxy
server.
connection_pool (:class:`.connection.ConnectionPool`): Connection pool
proxy_ssl (bool): Whether to connect to the proxy using HTTPS.
authentication (tuple): Tuple containing username and password.
ssl_context: SSL context for SSL connections on TCP tunnels.
host_filter (:class:`.proxy.hostfilter.HostFilter`): Host filter which
for deciding whether a connection is routed through the proxy. A
test result that returns True is routed through the proxy.
'''
def __init__(self, proxy_address, *args,
proxy_ssl=False, authentication=None, ssl_context=True,
host_filter=None, **kwargs):
super().__init__(*args, **kwargs)
self._proxy_address = proxy_address
self._proxy_ssl = proxy_ssl
self._authentication = authentication
self._ssl_context = ssl_context
self._host_filter = host_filter
if authentication:
self._auth_header_value = 'Basic {}'.format(
base64.b64encode(
'{}:{}'.format(authentication[0], authentication[1])
.encode('ascii')
).decode('ascii')
)
else:
self._auth_header_value = None
self._connection_map = {}
@trollius.coroutine
def acquire(self, host, port, use_ssl=False, host_key=None):
yield From(self.acquire_proxy(host, port, use_ssl=use_ssl,
host_key=host_key))
@trollius.coroutine
def acquire_proxy(self, host, port, use_ssl=False, host_key=None,
tunnel=True):
'''Check out a connection.
This function is the same as acquire but with extra arguments
concerning proxies.
Coroutine.
'''
if self._host_filter and not self._host_filter.test(host):
connection = yield From(
super().acquire(host, port, use_ssl, host_key)
)
raise Return(connection)
host_key = host_key or (host, port, use_ssl)
proxy_host, proxy_port = self._proxy_address
connection = yield From(super().acquire(
proxy_host, proxy_port, self._proxy_ssl, host_key=host_key
))
connection.proxied = True
_logger.debug('Request for proxy connection.')
if connection.closed():
_logger.debug('Connecting to proxy.')
yield From(connection.connect())
if tunnel:
yield From(self._establish_tunnel(connection, (host, port)))
if use_ssl:
ssl_connection = yield From(connection.start_tls(self._ssl_context))
ssl_connection.proxied = True
ssl_connection.tunneled = True
self._connection_map[ssl_connection] = connection
connection.wrapped_connection = ssl_connection
raise Return(ssl_connection)
if connection.wrapped_connection:
ssl_connection = connection.wrapped_connection
self._connection_map[ssl_connection] = connection
raise Return(ssl_connection)
else:
raise Return(connection)
@trollius.coroutine
def release(self, proxy_connection):
connection = self._connection_map.pop(proxy_connection, proxy_connection)
yield From(super().release(connection))
def no_wait_release(self, proxy_connection):
connection = self._connection_map.pop(proxy_connection, proxy_connection)
super().no_wait_release(connection)
@trollius.coroutine
def _establish_tunnel(self, connection, address):
'''Establish a TCP tunnel.
Coroutine.
'''
host = '[{}]'.format(address[0]) if ':' in address[0] else address[0]
port = address[1]
request = RawRequest('CONNECT', '{0}:{1}'.format(host, port))
self.add_auth_header(request)
stream = Stream(connection, keep_alive=True)
_logger.debug('Sending Connect.')
yield From(stream.write_request(request))
_logger.debug('Read proxy response.')
response = yield From(stream.read_response())
if response.status_code != 200:
debug_file = io.BytesIO()
_logger.debug('Read proxy response body.')
yield From(stream.read_body(request, response, file=debug_file))
debug_file.seek(0)
_logger.debug(ascii(debug_file.read()))
if response.status_code == 200:
connection.tunneled = True
else:
raise NetworkError(
'Proxy does not support CONNECT: {} {}'
.format(response.status_code,
wpull.string.printable_str(response.reason))
)
def add_auth_header(self, request):
'''Add the username and password to the HTTP request.'''
if self._authentication:
request.fields['Proxy-Authorization'] = self._auth_header_value
| gpl-3.0 | 1,173,998,090,200,418,600 | 34.367089 | 84 | 0.603615 | false |
tristanbrown/whaler | whaler/analysis.py | 1 | 15090 | """
"""
import os
import numpy as np
import pandas as pd
from .dataprep import IO
from .dataprep import extract_floats as extr
from .dataprep import dict_values as dvals
class Analysis():
"""
"""
def __init__(self):
self.loc = os.getcwd()
self.structs = next(os.walk('.'))[1]
self.logfile = IO('whaler.log', self.loc)
self.states = ['S', 'T', 'P', 'D', 'Q']
self.spinflip = {
'S' : 'T',
'T' : 'S',
'D' : 'Q',
'Q' : 'D'
}
self.thermvals = [
'U', 'H', 'S*T (el)', 'S*T (vib)', 'S*T (trans)', 'qrot', 'rot #']
elnums = [1, 3, 5, 2, 4]
self.statekey = {
self.states[i]:elnums[i] for i in range(len(elnums))}
# Analysis output filenames.
self.gs_out = "groundstate_Es.csv"
self.crude_out = "cruderxn_Es.csv"
self.thermo_out = "thermo_Es.csv"
def write_data(self, type, custom_out=None,
custom_data=None, format=None):
# Choose the data type and output location.
if type == "gs":
out = self.gs_out
try:
os.remove(os.path.join(self.loc, out))
print("Overwriting %s." % out)
except:
pass
data = self.gEs
message = "optimization energies and ground states"
elif type == "thermo":
out = self.thermo_out
try:
os.remove(os.path.join(self.loc, out))
print("Overwriting %s." % out)
except:
pass
data = self.therm_Es
message = "thermodynamic values"
elif type == "bonds":
out = custom_out
data = custom_data
message = "bond lengths"
elif type == "cruderxn":
out = custom_out
data = custom_data
message = "crude reaction energies"
elif type == "N2act":
out = custom_out
data = custom_data
message = "reaction energies"
else:
raise
# Write the data.
data.to_csv(os.path.join(self.loc, out), float_format=format)
print("Wrote {0} to {1}.".format(message, out))
@property
def gEs(self):
"""Returns self.gEs, either from the existing assignment, from the
output file, or from a fresh calculation.
"""
try:
return self._gEs
except AttributeError:
try:
self._gEs = pd.read_csv(
os.path.join(self.loc, self.gs_out),
index_col=0)
print("Reading ground spin states from %s." % self.gs_out)
except OSError:
self._gEs = self.groundstates_all()
return self._gEs
@property
def therm_Es(self):
"""Returns self.therm_Es, either from the existing assignment, from the
output file, or from a fresh calculation.
"""
try:
return self._therm_Es
except AttributeError:
try:
self._therm_Es = pd.read_csv(
os.path.join(self.loc, self.thermo_out),
index_col=0)
print("Reading thermodynamic values from %s."
% self.thermo_out)
except OSError:
self._therm_Es = self.thermo_all()
return self._therm_Es
def groundstates_all(self):
"""Compares the energies of each calculated spin state for a structure
and writes the energy differences as a table."""
print("Calculating ground spin states.")
# Collect state energies from files.
results = [self.get_states(struct) for struct in self.structs]
# Construct dataframe.
headers = np.array(self.states)
gEs = (
pd.DataFrame(data=results, index=self.structs, columns=headers))
gEs['Ground State'] = gEs.idxmin(axis=1)
return gEs
def thermo_all(self):
"""Compares the energies of each calculated spin state for a structure
and writes the energy differences as a table."""
print("Calculating thermodynamic values.")
# Collect thermodynamic values from files.
results = dvals([self.get_thermo(struct) for struct in self.structs])
# Construct dataframe.
headers = np.array(self.thermvals)
thermoEs = (
pd.DataFrame(data=results, index=self.structs, columns=headers))
# thermoEs['Ground State'] = gEs.idxmin(axis=1)
# print(thermoEs)
return thermoEs
def get_states(self, structure):
"""Returns a dictionary of energies of the various spin states for a
structure, using all available distinct spin-state calculations.
"""
dir = IO(dir=os.path.join(self.loc, structure))
return dir.get_values(
structure, "geo.log", self.geovalid, self.finalE)
def get_thermo(self, structure):
"""Returns a dictionary of thermodynamic values for a structure, using all available distinct spin-state calculations.
"""
dir = IO(dir=os.path.join(self.loc, structure))
values = dir.get_values(
structure, "freq.log", self.freqvalid, self.thermo_vals)
return values
def write_inp_all(self, type, template):
"""Used for writing input files based on previous calculations that
generate .xyz and .gbw files.
"""
for struct in self.structs:
try:
state = self.gEs.loc[struct,'Ground State']
if state in self.states:
self.assemble_inp(struct, template, state, type)
except KeyError:
print("Ground state missing for %s. Rerun whaler gs." % struct)
def write_inp(self, struct, template, state, coords, filename, gbw=None):
"""
"""
path = os.path.join(self.loc, struct)
outfile = os.path.join(path, filename)
# Choose the state number.
statenum = self.statekey[state]
# Read the template. Plug values into the template.
if os.path.exists(outfile.split('.')[0] + ".gbw"):
message = ("Skipping %s"
" because it has already been used in a calculation."
% filename)
else:
reader = IO(template, self.loc)
replacekey = {
"[struct]":struct,
"[spin]":str(statenum),
"[coords]":"\n".join(coords)}
if gbw is None:
replacekey["MOREAD"] = "#MOREAD"
replacekey["%moinp"] = "# %moinp"
else:
replacekey["[gbw]"] = gbw
reader.replace_all_vals(replacekey, outfile)
message = "Wrote " + filename + "."
print(message)
self.logfile.appendline(message)
def assemble_inp(self, struct, template, state, type):
"""
"""
# Get the xyz coordinates for the input file.
xyzfile, coords = self.get_xyz(struct, state, "geo")
# Make the filename.
filename = xyzfile.split("geo")[0] + type + ".inp"
# Find the gbw file.
gbw = xyzfile.split(".")[0] + ".gbw"
# Write the .inp file.
if coords != []:
self.write_inp(struct, template, state, coords, filename, gbw)
def get_xyz(self, struct, state, type="start"):
"""
"""
path = os.path.join(self.loc, struct)
dir = IO(dir=path)
# Filter down to the appropriate .xyz file.
xyzfile = sorted(dir.files_end_with(state + type + ".xyz"))[-1]
# Check if the .xyz file has been aligned.
if self.xyz_aligned(xyzfile, path):
reader = IO(xyzfile, path)
return (xyzfile, reader.lines()[2:])
else:
return (xyzfile, [])
def xyz_aligned(self, filename, dir):
"""
"""
reader = IO(filename, dir)
xyzhead = reader.head(3)
if 'Coordinates' in xyzhead[1]:
message = filename + ' needs alignment.'
print(message)
self.logfile.appendline(message)
return False
elif len(xyzhead[2]) == 49:
return True
else:
message = filename + ' has unknown structure.'
print(message)
self.logfile.appendline(message)
return False
def xyz_to_coords(self, xyz):
"""Converts a list of .xyz file lines into a list of atom-labeled
coordinates.
"""
coords = []
for line in xyz:
rawcoord = line.split()
coord = [rawcoord[0]] + [float(n) for n in rawcoord[1:]]
coords.append(coord)
return coords
def bondlength(self, struct, state, elem1, elem2, axis='z', skip=0):
"""
"""
axiskey = {'x':1, 'y':2, 'z':3}
# Get the coordinates.
file, rawcoords = self.get_xyz(struct, state, "geo")
coords = self.xyz_to_coords(rawcoords)
if coords == []:
print("Can't get coordinates from %s." % file)
return None
else:
# Find the atoms of the right elements.
elems = [elem1, elem2]
for i in range(2):
if elems[i] == 'M':
elems[i] = coords[0][0]
atomlist = [atom for atom in coords if atom[0] in elems]
# Eliminate skipped atoms.
for x in range(skip):
axis_coord = list(zip(*atomlist))[axiskey[axis]]
maxindex = axis_coord.index(max(axis_coord))
del atomlist[maxindex]
# Choose the 2 atoms furthest along the given axis.
atoms = []
for elem in elems:
axis_max = -99999
maxindex = None
for i,atom in enumerate(atomlist):
if atom[0] == elem and atom[axiskey[axis]] > axis_max:
axis_max = atom[axiskey[axis]]
maxindex = i
atoms.append(np.array(atomlist.pop(maxindex)[1:]))
# Calculate the bond length.
length = np.sqrt(np.sum((atoms[0] - atoms[1])**2))
return length
def geovalid(self, file, path):
"""
"""
return self.isvalid(file, path) and self.geoconverged(file, path)
def freqvalid(self, file, path):
"""
"""
return self.isvalid(file, path) and self.freqconverged(file, path)
def isvalid(self, file, path):
"""
"""
reader = IO(file, path)
end = reader.tail(2)
if 'ABORTING THE RUN\n' in end:
message = file + ' aborted abnormally.'
self.logfile.appendline(message)
print(message)
return False
elif 'ORCA TERMINATED NORMALLY' in end[0]:
return True
else:
message = file + ' has unknown structure.'
self.logfile.appendline(message)
print(message)
return False
def geoconverged(self, file, path, chunk=100, maxsearch=1000):
"""
"""
reader = IO(file, path)
tail = reader.tail(chunk)
if chunk > maxsearch:
self.logfile.appendline(file + ' has unknown structure.')
return False
elif 'WARNING!!!!!!!\n' in tail:
self.logfile.appendline(file + ' has not converged.')
return False
elif '*** OPTIMIZATION RUN DONE ***' in ''.join(tail):
return True
else:
self.geoconverged(file, path, chunk*2)
def freqconverged(self, file, path):
"""
"""
reader = IO(file, path)
lines = reader.lines()
if ("ORCA_NUMFREQ: ORCA finished with an error in the energy"
" calculation") in lines:
print("SCF convergence error in %s." % file)
return False
else:
return True
def finalE(self, file, path, chunk=100):
"""Extracts the final Single Point Energy from a .log file.
"""
reader = IO(file, path)
tail = reader.tail(chunk)
marker = 'FINAL SINGLE POINT ENERGY'
energyline = [s for s in tail if marker in s]
if chunk > 1000:
self.logfile.appendline(file + ': cannot find final energy.')
return np.nan
elif energyline == []:
return self.finalE(file, path, chunk+100)
else:
return float(energyline[-1].split()[-1])
def thermo_vals(self, file, path, chunk=100):
"""Extracts the thermodynamic values from a .log file.
"""
reader = IO(file, path)
# print("Thermodynamic values can be extracted from %s." % file)
lines = reader.lines()
# Mark the data locations.
marker1 = 'VIBRATIONAL FREQUENCIES'
marker2 = 'NORMAL MODES'
marker3 = 'INNER ENERGY'
for i in range(len(lines)):
line = lines[i]
if line == marker1:
vib_start = i+3
elif line == marker2:
vib_end = i-3
elif line == marker3:
therm_start = i
# Extract the data values.
vib_lines = lines[vib_start:vib_end]
U = extr(lines[therm_start+19])[0]
H = extr(lines[therm_start+39])[0]
S_el = extr(lines[therm_start+54])[0]
S_vib = extr(lines[therm_start+55])[0]
S_trans = extr(lines[therm_start+57])[0]
linearity = lines[therm_start+65]
if ' linear' in linearity:
rot_num = 1
elif 'nonlinear' in linearity:
rot_num = 1.5
else:
raise
qrot = extr(lines[therm_start+68])[0]
vibs = [extr(line)[:2] for line in vib_lines]
img_modes = []
for vib in vibs:
if vib[1] < 0:
img_modes.append(vib)
if len(img_modes) > 0:
values = {}
print("ERROR: %s contains imaginary modes:" % file)
for mode in img_modes:
print("#{0}: {1} cm^-1".format(mode[0], mode[1]))
else:
values = {
'U':U, 'H':H, 'S*T (el)':S_el, 'S*T (vib)':S_vib,
'S*T (trans)':S_trans, 'qrot':qrot, 'rot #':rot_num}
return values
| mit | -1,162,687,698,635,704,000 | 33.065463 | 127 | 0.500398 | false |
twotwo/tools-python | log_to_graphs/daily_log_plot.py | 1 | 6050 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
daily_log_plot.py
parse day log to 3 figs: 1. requests count; 2. requests throughput; 3. responses time
Copyright (c) 2016年 li3huo.com All rights reserved.
"""
import argparse,logging
from subprocess import Popen, PIPE
import numpy as np
import matplotlib.pyplot as plt
# labels = (u'0点', u'1点', u'2点', u'3点', u'4点', u'5点', u'6点', u'7点', u'8点', u'9点', u'10点', u'11点', u'12点',
# u'13点', u'14点', u'15点', u'16点', u'17点', u'18点', u'19点', u'20点', u'21点', u'22点', u'23点'
# )
labels = []
N = 24
def sample_by_group(requests, n):
"""最大值/中位值/平均值/最小值 max/median/mean/min
根据n值取一个区间的四种数值
"""
max_count = []
median_count = []
mean_count = []
min_count = []
num = 0
group = []
for c in requests:
num += 1
group.append(c)
if num % n == 0:
max_count.append(np.max(group))
median_count.append(np.median(group))
mean_count.append(np.mean(group))
min_count.append(np.min(group))
group = []
return (max_count, median_count, mean_count, min_count)
def autolabel(bars, axes):
# attach some text labels
for bar in bars:
height = bar.get_height()
if '%f' % float(height) == 'nan': height = 0
axes.text(bar.get_x() + bar.get_width()/2., 1.05*height,
'%d' % int(height), ha='center', va='bottom')
def text(axes, xs, ys, values):
for x, y, value in zip(xs, ys, values):
axes.text(x, 1.05*y, '%d' % int(value), fontsize=10, fontweight=800, bbox=dict(facecolor='green', alpha=0.8),
ha='right', va='baseline')
def paint(data, picturename, title, show=True):
for i in range(N): # init x labels
labels.append( str(i)+':00' )
width = 0.35 # the width of the bars
day_requests = data['day_requests']
day_resp_time_by_hour = data['day_resp_time_by_hour']
resp_errs = data['day_resp_err_by_hour']
##################################################
# Plot to Chart
##################################################
group = 60
fig = plt.figure(figsize=(15, 15))
# plt.style.use('classic')
# 设置图的底边距
plt.subplots_adjust(bottom = 0.15)
#开启网格
plt.grid()
# picture title
fig.suptitle(title, fontsize=16, fontweight=900)
##################################################
# subplot 1:
##################################################
count_by_hours = []
for a in np.array(day_requests).reshape(24, 60*60):
count_by_hours.append(np.sum(a))
axes = fig.add_subplot(3,1,1)
bars1 = axes.bar(np.arange(24)+width, count_by_hours, width, label=u'All Requests', color='g')
autolabel(bars1, axes)
if resp_errs.any() != None:
bars2 = axes.bar(np.arange(24)+width*2, [len(err) for err in resp_errs], width, label=u'Errors', color='r')
autolabel(bars2, axes)
plt.ylabel('Daily Processes by Hours')
plt.xticks(np.arange(24), labels)
plt.legend( loc='best', fontsize='x-small' )
#####################################################
# subplot 2: plot throughput by day_requests
#####################################################
(max_count, median_count, mean_count, min_count) = sample_by_group(day_requests, 60)
fig.add_subplot(3, 1, 2)
plt.plot(np.arange(group * 24), max_count, label=u'Max Requests', color='r')
plt.plot(np.arange(group * 24), median_count, label=u'Median Requests', color='g')
plt.plot(np.arange(group * 24), mean_count, label=u'Mean Requests', color='y')
plt.plot(np.arange(group * 24), min_count, label=u'Min Requests', color='b')
plt.xlabel('time (h)')
plt.ylabel('Daily Throughput(requests/s)')
# the x locations for one day: 24 * group
ind = list(xrange(0, 24 * group, group))
plt.xticks(ind, labels)
plt.legend( loc='best', fontsize='x-small' )
#####################################################
# subplot 3: plot response time by helper.day_responses
#####################################################
if day_resp_time_by_hour.any() != None:
# Sorted by Response Time
resps_sorted = [np.sort(resp) for resp in day_resp_time_by_hour]
axes = fig.add_subplot(3, 1, 3)
bars1 = axes.bar(np.arange(24), [np.mean(resp[-1000:]) for resp in resps_sorted], width, label=u'Last 1000', color='g')
bars2 = axes.bar(np.arange(24)+width, [np.mean(resp[-100:]) for resp in resps_sorted], width, label=u'Last 100', color='b')
bars3 = axes.bar(np.arange(24)+width*2, [np.mean(resp[-10:]) for resp in resps_sorted], width, label=u'Last 10', color='r')
plt.ylabel('Average Response Time(ms)')
plt.xticks(np.arange(24), labels)
autolabel(bars1, axes)
autolabel(bars2, axes)
autolabel(bars3, axes)
plt.legend( loc='best', fontsize='x-small' )
#自动调整label显示方式,如果太挤则倾斜显示
fig.autofmt_xdate()
plt.savefig(picturename)
logging.info('save to %s' % picturename)
if show: plt.show()
def main():
parser = argparse.ArgumentParser(description='Create Bar Chart from log.')
parser.add_argument('-n', dest='npz', type=str, default='data.npz',
help='NumPy binary file')
parser.add_argument('-t', dest='title', type=str, default='Project xx on Date yy',
help='the image title')
parser.add_argument('-p', dest='picturename', type=str, default='request.png',
help='The name of the chart picture.')
parser.add_argument('--show', dest='show', action='store_true')
parser.add_argument('--not-show', dest='show', action='store_false')
parser.set_defaults(show=True)
args = parser.parse_args()
##################################################
# Load Response Data
##################################################
# requests = load_requests(file)
from numpy_helper import Helper
logging.basicConfig(filename='./l2g.log', level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
paint(Helper.load(args.npz), picturename=args.picturename, title=args.title, show=args.show)
if __name__ == '__main__':
'''
python daily_log_plot.py -h
python daily_log_plot.py -n agent.npz -p agent.png --not-show -t "Project SDK-Agent on Date 2017-05-02"
'''
main()
| mit | -1,676,849,737,288,395,800 | 31.027174 | 125 | 0.601663 | false |
lem9/weblate | weblate/trans/tests/test_checks.py | 1 | 6105 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2017 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Helper for quality checks tests."""
from __future__ import unicode_literals
import random
from django.test import TestCase
class MockLanguage(object):
"""Mock language object."""
def __init__(self, code='cs'):
self.code = code
class MockProject(object):
"""Mock project object."""
def __init__(self):
self.id = 1
self.source_language = MockLanguage('en')
class MockSubProject(object):
"""Mock subproject object."""
def __init__(self):
self.id = 1
self.project = MockProject()
class MockTranslation(object):
"""Mock translation object."""
def __init__(self, code='cs'):
self.language = MockLanguage(code)
self.subproject = MockSubProject()
self.template = False
def is_template(self):
return self.template
class MockUnit(object):
"""Mock unit object."""
def __init__(self, id_hash=None, flags='', code='cs', source='',
comment=''):
if id_hash is None:
id_hash = random.randint(0, 65536)
self.id_hash = id_hash
self.flags = flags
self.translation = MockTranslation(code)
self.source = source
self.fuzzy = False
self.translated = True
self.comment = comment
@property
def all_flags(self):
return self.flags.split(',')
def get_source_plurals(self):
return [self.source]
class CheckTestCase(TestCase):
"""Generic test, also serves for testing base class."""
check = None
def setUp(self):
self.test_empty = ('', '', '')
self.test_good_matching = ('string', 'string', '')
self.test_good_none = ('string', 'string', '')
self.test_good_ignore = ()
self.test_failure_1 = ()
self.test_failure_2 = ()
self.test_failure_3 = ()
self.test_ignore_check = (
'x', 'x', self.check.ignore_string if self.check else ''
)
self.test_highlight = ()
def do_test(self, expected, data, lang='cs'):
"""Perform single check if we have data to test."""
if not data or self.check is None:
return
result = self.check.check_single(
data[0],
data[1],
MockUnit(None, data[2], lang),
)
if expected:
self.assertTrue(
result,
'Check did not fire for "{0}"/"{1}" ({2})'.format(*data)
)
else:
self.assertFalse(
result,
'Check did fire for "{0}"/"{1}" ({2})'.format(*data)
)
def test_single_good_matching(self):
self.do_test(False, self.test_good_matching)
def test_single_good_none(self):
self.do_test(False, self.test_good_none)
def test_single_good_ignore(self):
self.do_test(False, self.test_good_ignore)
def test_single_empty(self):
self.do_test(False, self.test_empty)
def test_single_failure_1(self):
self.do_test(True, self.test_failure_1)
def test_single_failure_2(self):
self.do_test(True, self.test_failure_2)
def test_single_failure_3(self):
self.do_test(True, self.test_failure_3)
def test_check_good_matching_singular(self):
if self.check is None:
return
self.assertFalse(
self.check.check_target(
[self.test_good_matching[0]],
[self.test_good_matching[1]],
MockUnit(None, self.test_good_matching[2])
)
)
def test_check_good_matching_plural(self):
if self.check is None:
return
self.assertFalse(
self.check.check_target(
[self.test_good_matching[0]] * 2,
[self.test_good_matching[1]] * 3,
MockUnit(None, self.test_good_matching[2])
)
)
def test_check_failure_1_singular(self):
if not self.test_failure_1 or self.check is None:
return
self.assertTrue(
self.check.check_target(
[self.test_failure_1[0]],
[self.test_failure_1[1]],
MockUnit(None, self.test_failure_1[2])
)
)
def test_check_failure_1_plural(self):
if not self.test_failure_1 or self.check is None:
return
self.assertTrue(
self.check.check_target(
[self.test_failure_1[0]] * 2,
[self.test_failure_1[1]] * 3,
MockUnit(None, self.test_failure_1[2])
)
)
def test_check_ignore_check(self):
if self.check is None:
return
self.assertFalse(
self.check.check_target(
[self.test_ignore_check[0]] * 2,
[self.test_ignore_check[1]] * 3,
MockUnit(None, self.test_ignore_check[2])
)
)
def test_check_highlight(self):
if self.check is None or not self.test_highlight:
return
unit = MockUnit(
None,
self.test_highlight[0],
source=self.test_highlight[1]
)
self.assertEqual(
self.check.check_highlight(self.test_highlight[1], unit),
self.test_highlight[2]
)
| gpl-3.0 | -5,348,374,958,073,392,000 | 28.621359 | 72 | 0.562766 | false |
davy39/eric | DebugClients/Python3/eric6dbgstub.py | 1 | 2458 | # -*- coding: utf-8 -*-
# Copyright (c) 2009 - 2014 Detlev Offenbach <[email protected]>
#
"""
Module implementing a debugger stub for remote debugging.
"""
import os
import sys
import distutils.sysconfig
from eric6config import getConfig
debugger = None
__scriptname = None
modDir = distutils.sysconfig.get_python_lib(True)
ericpath = os.getenv('ERICDIR', getConfig('ericDir'))
if ericpath not in sys.path:
sys.path.insert(-1, ericpath)
def initDebugger(kind="standard"):
"""
Module function to initialize a debugger for remote debugging.
@param kind type of debugger ("standard" or "threads")
@return flag indicating success (boolean)
@exception ValueError raised to indicate a wrong debugger kind
"""
global debugger
res = True
try:
if kind == "standard":
import DebugClient
debugger = DebugClient.DebugClient()
elif kind == "threads":
import DebugClientThreads
debugger = DebugClientThreads.DebugClientThreads()
else:
raise ValueError
except ImportError:
debugger = None
res = False
return res
def runcall(func, *args):
"""
Module function mimicing the Pdb interface.
@param func function to be called (function object)
@param *args arguments being passed to func
@return the function result
"""
global debugger, __scriptname
return debugger.run_call(__scriptname, func, *args)
def setScriptname(name):
"""
Module function to set the scriptname to be reported back to the IDE.
@param name absolute pathname of the script (string)
"""
global __scriptname
__scriptname = name
def startDebugger(enableTrace=True, exceptions=True,
tracePython=False, redirect=True):
"""
Module function used to start the remote debugger.
@keyparam enableTrace flag to enable the tracing function (boolean)
@keyparam exceptions flag to enable exception reporting of the IDE
(boolean)
@keyparam tracePython flag to enable tracing into the Python library
(boolean)
@keyparam redirect flag indicating redirection of stdin, stdout and
stderr (boolean)
"""
global debugger
if debugger:
debugger.startDebugger(enableTrace=enableTrace, exceptions=exceptions,
tracePython=tracePython, redirect=redirect)
| gpl-3.0 | 7,533,310,348,466,007,000 | 26.311111 | 78 | 0.663954 | false |
bnjones/Mathics | mathics/builtin/system.py | 1 | 2219 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
System functions
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from mathics.core.expression import Expression, String, strip_context
from mathics.builtin.base import Builtin, Predefined
from mathics import version_string
class Version(Predefined):
"""
<dl>
<dt>'$Version'
<dd>returns a string with the current Mathics version and the versions of relevant libraries.
</dl>
>> $Version
= Mathics ...
"""
name = '$Version'
def evaluate(self, evaluation):
return String(version_string.replace('\n', ' '))
class Names(Builtin):
"""
<dl>
<dt>'Names["$pattern$"]'
<dd>returns the list of names matching $pattern$.
</dl>
>> Names["List"]
= {List}
The wildcard '*' matches any character:
>> Names["List*"]
= {List, ListLinePlot, ListPlot, ListQ, Listable}
The wildcard '@' matches only lowercase characters:
>> Names["List@"]
= {Listable}
>> x = 5;
>> Names["Global`*"]
= {x}
The number of built-in symbols:
>> Length[Names["System`*"]]
= ...
#> Length[Names["System`*"]] > 350
= True
"""
def apply(self, pattern, evaluation):
'Names[pattern_]'
pattern = pattern.get_string_value()
if pattern is None:
return
names = set([])
for full_name in evaluation.definitions.get_matching_names(pattern):
short_name = strip_context(full_name)
names.add(short_name if short_name not in names else full_name)
# TODO: Mathematica ignores contexts when it sorts the list of
# names.
return Expression('List', *[String(name) for name in sorted(names)])
class Aborted(Predefined):
"""
<dl>
<dt>'$Aborted'
<dd>is returned by a calculation that has been aborted.
</dl>
"""
name = '$Aborted'
class Failed(Predefined):
"""
<dl>
<dt>'$Failed'
<dd>is returned by some functions in the event of an error.
</dl>
>> Get["nonexistent_file.m"]
: Cannot open nonexistent_file.m.
= $Failed
"""
name = '$Failed'
| gpl-3.0 | -5,421,035,499,512,175,000 | 20.543689 | 101 | 0.585399 | false |
TE-ToshiakiTanaka/atve | project/fleet/utility.py | 1 | 1286 | import os
import sys
import logging
from atve import log
WORK_DIR = os.path.normpath(os.path.dirname(__file__))
LIB_DIR = os.path.normpath(os.path.join(WORK_DIR, "lib"))
SCRIPT_DIR = os.path.normpath(os.path.join(WORK_DIR, "script"))
TMP_DIR = os.path.normpath(os.path.join(WORK_DIR, "tmp"))
LOG_DIR = os.path.normpath(os.path.join(WORK_DIR, "log"))
BIN_DIR = os.path.normpath(os.path.join(WORK_DIR, "bin"))
PROFILE_DIR = os.path.normpath(os.path.join(WORK_DIR, "conf", "profile"))
AURA_APK_DIR = os.path.normpath(os.path.join(BIN_DIR, "apk", "aura"))
AUBS_JAR_DIR = os.path.normpath(os.path.join(BIN_DIR, "jar", "aubs"))
LOG = log.Log("Project.ATVE")
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
logfile = os.path.join(LOG_DIR, "system.log")
if not os.path.exists(logfile):
with open(logfile, 'a') as f:
os.utime(logfile, None)
LOG.addHandler(log.Log.fileHandler(logfile, log.BASE_FORMAT, logging.DEBUG))
class POINT(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def __repr__(self):
return "POINT()"
def __str__(self):
return "(X, Y) = (%s, %s), Width = %s, Height = %s" \
% (self.x, self.y, self.width, self.height)
| mit | 5,832,410,568,297,995,000 | 30.365854 | 76 | 0.63297 | false |
rcbuild-info/scrape | rcbi/rcbi/spiders/Hoverthings.py | 1 | 2084 | import scrapy
from scrapy import log
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from rcbi.items import Part
import urllib
import urlparse
MANUFACTURERS = ["Gemfan"]
CORRECT = {"HQ Prop": "HQProp"}
MANUFACTURERS.extend(CORRECT.keys())
QUANTITY = {}
STOCK_STATE_MAP = {"http://schema.org/InStock": "in_stock",
"http://schema.org/OutOfStock": "out_of_stock"}
class HoverthingsSpider(CrawlSpider):
name = "hoverthings"
allowed_domains = ["hoverthings.com"]
start_urls = ["http://hoverthings.com"]
rules = (
Rule(LinkExtractor(restrict_css=[".nav-primary", ".pages"])),
Rule(LinkExtractor(restrict_css=".product-name"), callback='parse_item'),
)
def parse_item(self, response):
item = Part()
item["site"] = self.name
product_name = response.css(".product-name span")
if not product_name:
return
item["name"] = product_name[0].css("::text").extract_first().strip()
variant = {}
item["variants"] = [variant]
variant["url"] = response.url
price = response.css("[itemprop=\"price\"]::text")
if price:
variant["price"] = price.extract_first().strip()
availability = response.css("[itemprop=\"availability\"]::attr(href)")
if availability:
text = availability.extract_first().strip()
if text in STOCK_STATE_MAP:
variant["stock_state"] = STOCK_STATE_MAP[text]
else:
print(text)
for quantity in QUANTITY:
if quantity in item["name"]:
variant["quantity"] = QUANTITY[quantity]
item["name"] = item["name"].replace(quantity, "")
for m in MANUFACTURERS:
if item["name"].startswith(m):
item["name"] = item["name"][len(m):].strip("- ")
item["manufacturer"] = m
break
elif item["name"].endswith(m):
item["name"] = item["name"][:-len(m)].strip("- ")
item["manufacturer"] = m
break
if "manufacturer" in item:
m = item["manufacturer"]
if m in CORRECT:
item["manufacturer"] = CORRECT[m]
return item
| apache-2.0 | 8,699,467,472,133,358,000 | 29.647059 | 77 | 0.621881 | false |
shivanipoddariiith/gnome-weather-tests-final | tests/steps/steps.py | 1 | 4737 | # -*- coding: UTF-8 -*-
from behave import step
from dogtail.tree import root
from behave_common_steps import *
from random import sample
from behave_common_steps import limit_execution_time_to
CITIES = [
{ 'partial': 'Brno',
'full': 'Brno, Czech Republic'},
{ 'partial': 'Minsk',
'full': 'Minsk (Loshitsa / Minsk International), Belarus'},
{ 'partial': 'New York',
'full': 'Albany, New York, United States'},
]
@step(u'Press new to add city')
def press_new_to_add_city(context):
context.app.instance.button("New").click()
@step(u'Add random city')
def add_random_city(context):
# Choose a random city out of the list
context.random_city = sample(CITIES, 1)[0]
# Remember a number of icons
icons_num = len(context.app.instance.child(roleName='layered pane').\
findChildren(lambda x: x.roleName == 'icon'))
context.app.instance.button("New").click()
dialog = context.app.instance.dialog("New Location")
# Probably a bug: textfield should be labelled
#dialog.childLabelled('Search for a city:').typeText(context.random_city)
textentry = dialog.textentry('')
textentry.grabFocus()
textentry.typeText(context.random_city['partial'])
# Wait for autocompletion
sleep(0.1)
textentry.keyCombo("<Down>")
textentry.keyCombo("<Enter>")
assert textentry.text == context.random_city['full'],\
"Incorrect full city name, expected '%s'" % context.random_city['full']
dialog.button('Add').click()
# A new icon should be added
new_icons_num = len(context.app.instance.child(roleName='layered pane').\
findChildren(lambda x: x.roleName == 'icon'))
assert new_icons_num == icons_num + 1,\
"Incorrect icon number, expected '%s' but was '%s'" % (icons_num+1, new_icons_num)
@step(u'Select added city')
def select_added_city(context):
# As gnome-weather is poorly introspected we should choose the last icon
pane = context.app.instance.child(roleName='layered pane')
pane.findChildren(lambda x: x.roleName == 'icon')[-1].click()
wait_for_loading_screen_to_disappear(context)
# Pane becomes hidden
assert context.app.instance.child('World Weather').showing, "World Weather button is hidden"
assert not context.app.instance.child('New').showing, "New button is not hidden"
@limit_execution_time_to(30)
def wait_for_loading_screen_to_disappear(context):
spinner = context.app.instance.child('Spinner')
while(spinner.showing):
sleep(0.5)
sleep(0.5)
@step(u'{action:w} forecast details')
def forecast_details(context, action):
if action not in ['Show', 'Hide']:
raise RuntimeError("Incorrect action: %s" % action)
context.app.instance.child(roleName='push button').click()
# FIXME: check that forecast is displayed/hidden
@then(u'forecast for today is {state:w}')
def forecast_for_today(context, state):
if state not in ['shown', 'hidden']:
raise RuntimeError("Incorrect state: %s" % state)
boolean_state = state == 'shown'
label = context.app.instance.child("Forecast for Today")
assert label.showing == boolean_state
@step(u'Refresh forecast for selected city')
def refresh_forecast(context):
context.app.instance.child(roleName='layered pane').button("Refresh").click()
@then(u'loading page is visible')
def loading_page_visible(context):
pane = context.app.instance.child(roleName='layered pane')
assert pane.label('Loading...')
@step(u'Remove last added city')
def remove_last_added_city(context):
pane = context.app.instance.child(roleName='layered pane')
# Right-click the icon
pane.findChildren(lambda x: x.roleName == 'icon')[-1].click(button=3)
context.app.instance.button("Delete").click()
context.app.instance.button("Done").click()
@step(u'Delete selected cities')
def delete_selected_cities(context):
context.app.instance.button("Delete").click()
context.app.instance.button("Done").click()
@then(u'no cities displayed')
def no_cities_displayed(context):
pane = context.app.instance.child(roleName='layered pane')
actual = len(pane.findChildren(lambda x: x.roleName == 'icon'))
assert actual == 0, "%s cities displayed, though none expected" % actual
@step(u'Return to World Weather')
def return_to_world_weather(context):
context.app.instance.button("World Weather").click()
# Give it some time to display the list
sleep(0.1)
@then(u'a list of cities is displayed')
def list_of_cities_is_displayed(context):
pane = context.app.instance.child(roleName='layered pane')
cities_container = pane.child(roleName='icon').parent
assert cities_container.showing, "Cities list is not visible"
| gpl-2.0 | -798,814,159,004,918,800 | 32.595745 | 96 | 0.687355 | false |
sb1992/NETL-Automatic-Topic-Labelling- | training/extract.py | 1 | 1255 | """
Author: Shraey Bhatia
Date: October 2016
File: extract.py
This file uses WikiExtractor tool to generate documents from wikipedia xml dump.
WikExtractor tool can be found at https://github.com/attardi/wikiextractor.
If you use a diffferent path than to one mentioned in readme update it in main_train.py
Arguments for this file are taken from there.
"""
import os
import argparse
import sys
# The arguments for WikiExtractor. These parameters have been explained in main_train.py
parser = argparse.ArgumentParser()
parser.add_argument("wiki_extractor_path")
parser.add_argument("input_dump") # The Xml dump
parser.add_argument("size")
parser.add_argument("template")
parser.add_argument("output_processed_dir") # the output directory
args = parser.parse_args()
# Checks if the output directory specified already exists. If it does removes it.
if os.path.isdir(args.output_processed_dir):
del_query = "rm -r "+args.output_processed_dir
os.system(del_query)
# Creates the output directory.
query1 = "mkdir "+args.output_processed_dir
os.system(query1)
query2 = "python "+args.wiki_extractor_path+" "+args.input_dump +" -o" +args.output_processed_dir +" -b " +args.size +" --"+args.template
os.system(query2)
| apache-2.0 | 3,511,821,827,934,689,000 | 32.918919 | 137 | 0.739442 | false |
watsonpy/watson-routing | setup.py | 1 | 1836 | # -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
import watson.routing
name = 'watson-routing'
description = 'Process and route HTTP Request messages.'
version = watson.routing.__version__
def read(filename, as_list=False):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
contents = f.read()
if as_list:
return contents.splitlines()
return contents
setup(
name=name,
version=version,
url='http://github.com/watsonpy/' + name,
description=description,
long_description=read('README.md'),
long_description_content_type='text/markdown',
author='Simon Coulton',
author_email='[email protected]',
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=read('requirements.txt', as_list=True),
extras_require={
'test': read('requirements-test.txt', as_list=True)
},
)
| bsd-3-clause | 5,292,309,774,089,427,000 | 31.785714 | 79 | 0.615468 | false |
kdurril/internaljobmarket | internaljobmarket/models.py | 1 | 4988 | from sqlalchemy import Column, Integer, String
from internaljobmarket.database import Base
class StudentModel(Base):
__tablename__ = 'student'
student_id = Column(String(50), primary_key=True)
studentUid = Column(String(120))
nameLast = Column(String(120))
nameFirst = Column(String(120))
email = Column(String(120))
phone = Column(String(120))
major = Column(String(120))
programCode = Column(String(120))
semBegin = Column(String(120))
graduationExpected = Column(String(120))
creditFall = Column(Integer)
creditSpring = Column(Integer)
request201408 = Column(String(120))
request201501 = Column(String(120))
def __init__(self, student_id=None,
studentUid=None, nameLast=None,
nameFirst=None, email=None,
phone=None, major=None,
programCode=None, semBegin=None,
graduationExpected=None, creditFall=None,
creditSpring=None, request201408=None,
request201501=None):
self.student_id = student_id
self.studentUid = studentUid
self.nameLast, = nameLast
self.nameFirst = nameFirst
self.email = email
self.phone = phone
self.major = major
self.programCode = programCode
self.semBegin = semBegin
self.graduationExpected = graduationExpected
self.creditFall = creditFall
self.creditSpring = CreditSpring
self.request201408 = request201408
self.request201501 = request201501
class SupervisorModel(Base):
__tablename__ = 'supervisor'
supervisor_id = Column(String(50), primary_key=True)
nameLast = Column(String(120))
nameFirst = Column(String(120))
phone = Column(String(120))
email = Column(String(120))
room = Column(String(120))
center = Column(String(120))
def __init__(self, supervisor_id=None,
nameLast=None, nameFirst=None,
phone=None, email=None,
room=None, center=None):
self.supervisor_id = supervisor_id
self.nameLast = nameLast
self.nameFirst = nameFirst
self.phone = phone
self.email = email
self.room = room
self.center = center
class PositionModel(Base):
__tablename__ = 'position'
position_id = Column(Integer, primary_key=True)
title = Column(String(120))
workGroup = Column(String(120))
position_type = Column(String(120))
course = Column(String(120))
programMin = Column(String(120))
programStd = Column(String(120))
positionOverview = Column(String(120))
primaryDuties = Column(String(120))
necessarySkill = Column(String(120))
preferredSkill = Column(String(120))
dateOpen = Column(String(120))
dateClosed = Column(String(120))
available = Column(String(120))
supervisor_id = Column(String(120))
def __init__(self, position_id=None,
title=None, workGroup=None, position_type=None,
course=None, programMin=None, programStd=None,
positionOverview=None, primaryDuties=None,
necessarySkill=None, preferredSkill=None,
dateOpen=None, dateClosed=None,
available=None, supervisor_id=None):
self.selfposition_id = position_id
self.title = title
self.workGroup =workGroup
self.position_type = position_type
self.course = course
self.programMin = programMin
self.programStd = programStd
self.positionOverview = positionOverview
self.primaryDuties = primaryDuties
self.necessarySkill = necessarySkill
self.preferredSkill = preferredSkill
self.dateOpen = dateOpen
self.dateClosed = dateClosed
self.available = available
self.supervisor_id = supervisor_id
class ApplicationModel(Base):
__tablename__ = 'application'
app_id = Column(Integer, primary_key=True)
student_id = Column(String(120))
position_id = Column(Integer, primary_key=True)
def __init__(self, app_id=None,
student_id=None,
position_id=None):
self.app_id = app_id
self.student_id = student_id
self.position_id = position_id
class OfferModel(Base):
__tablename__ = 'offer'
offer_id
app_id
offerMade = Column(String(120))
offer_date = Column(String(120))
response = Column(String(120))
response_date = Column(String(120))
available = Column(String(120))
def __init__(self, offer_id=None, app_id=None,
offerMade=None, offer_date=None,
response=None, response_date=None,
available=None):
self.offer_id = offer_id
self.app_id = app_id
self.offerMade = offerMade
self.offer_date = offer_date
self.response = response
self.response_date = response_date
self.available = available | gpl-2.0 | 6,918,131,131,799,236,000 | 34.382979 | 63 | 0.626303 | false |
rizar/attention-lvcsr | libs/blocks/blocks/bricks/attention.py | 1 | 30408 | """Attention mechanisms.
This module defines the interface of attention mechanisms and a few
concrete implementations. For a gentle introduction and usage examples see
the tutorial TODO.
An attention mechanism decides to what part of the input to pay attention.
It is typically used as a component of a recurrent network, though one can
imagine it used in other conditions as well. When the input is big and has
certain structure, for instance when it is sequence or an image, an
attention mechanism can be applied to extract only information which is
relevant for the network in its current state.
For the purpose of documentation clarity, we fix the following terminology
in this file:
* *network* is the network, typically a recurrent one, which
uses the attention mechanism.
* The network has *states*. Using this word in plural might seem weird, but
some recurrent networks like :class:`~blocks.bricks.recurrent.LSTM` do
have several states.
* The big structured input, to which the attention mechanism is applied,
is called the *attended*. When it has variable structure, e.g. a sequence
of variable length, there might be a *mask* associated with it.
* The information extracted by the attention from the attended is called
*glimpse*, more specifically *glimpses* because there might be a few
pieces of this information.
Using this terminology, the attention mechanism computes glimpses
given the states of the network and the attended.
An example: in the machine translation network from [BCB]_ the attended is
a sequence of so-called annotations, that is states of a bidirectional
network that was driven by word embeddings of the source sentence. The
attention mechanism assigns weights to the annotations. The weighted sum of
the annotations is further used by the translation network to predict the
next word of the generated translation. The weights and the weighted sum
are the glimpses. A generalized attention mechanism for this paper is
represented here as :class:`SequenceContentAttention`.
"""
from abc import ABCMeta, abstractmethod
from theano import tensor
from six import add_metaclass
from blocks.bricks import (Brick, Initializable, Sequence,
Feedforward, Linear, Tanh)
from blocks.bricks.base import lazy, application
from blocks.bricks.parallel import Parallel, Distribute
from blocks.bricks.recurrent import recurrent, BaseRecurrent
from blocks.utils import dict_union, dict_subset, pack
class AbstractAttention(Brick):
"""The common interface for attention bricks.
First, see the module-level docstring for terminology.
A generic attention mechanism functions as follows. Its inputs are the
states of the network and the attended. Given these two it produces
so-called *glimpses*, that is it extracts information from the attended
which is necessary for the network in its current states
For computational reasons we separate the process described above into
two stages:
1. The preprocessing stage, :meth:`preprocess`, includes computation
that do not involve the state. Those can be often performed in advance.
The outcome of this stage is called *preprocessed_attended*.
2. The main stage, :meth:`take_glimpses`, includes all the rest.
When an attention mechanism is applied sequentially, some glimpses from
the previous step might be necessary to compute the new ones. A
typical example for that is when the focus position from the previous
step is required. In such cases :meth:`take_glimpses` should specify
such need in its interface (its docstring explains how to do that). In
addition :meth:`initial_glimpses` should specify some sensible
initialization for the glimpses to be carried over.
.. todo::
Only single attended is currently allowed.
:meth:`preprocess` and :meth:`initial_glimpses` might end up
needing masks, which are currently not provided for them.
Parameters
----------
state_names : list
The names of the network states.
state_dims : list
The state dimensions corresponding to `state_names`.
attended_dim : int
The dimension of the attended.
Attributes
----------
state_names : list
state_dims : list
attended_dim : int
"""
@lazy(allocation=['state_names', 'state_dims', 'attended_dim'])
def __init__(self, state_names, state_dims, attended_dim, **kwargs):
self.state_names = state_names
self.state_dims = state_dims
self.attended_dim = attended_dim
super(AbstractAttention, self).__init__(**kwargs)
@application(inputs=['attended'], outputs=['preprocessed_attended'])
def preprocess(self, attended):
"""Perform the preprocessing of the attended.
Stage 1 of the attention mechanism, see :class:`AbstractAttention`
docstring for an explanation of stages. The default implementation
simply returns attended.
Parameters
----------
attended : :class:`~theano.Variable`
The attended.
Returns
-------
preprocessed_attended : :class:`~theano.Variable`
The preprocessed attended.
"""
return attended
@abstractmethod
def take_glimpses(self, attended, preprocessed_attended=None,
attended_mask=None, **kwargs):
r"""Extract glimpses from the attended given the current states.
Stage 2 of the attention mechanism, see :class:`AbstractAttention`
for an explanation of stages. If `preprocessed_attended` is not
given, should trigger the stage 1.
This application method *must* declare its inputs and outputs.
The glimpses to be carried over are identified by their presence
in both inputs and outputs list. The attended *must* be the first
input, the preprocessed attended *must* be the second one.
Parameters
----------
attended : :class:`~theano.Variable`
The attended.
preprocessed_attended : :class:`~theano.Variable`, optional
The preprocessed attended computed by :meth:`preprocess`. When
not given, :meth:`preprocess` should be called.
attended_mask : :class:`~theano.Variable`, optional
The mask for the attended. This is required in the case of
padded structured output, e.g. when a number of sequences are
force to be the same length. The mask identifies position of
the `attended` that actually contain information.
\*\*kwargs : dict
Includes the states and the glimpses to be carried over from
the previous step in the case when the attention mechanism is
applied sequentially.
"""
pass
@abstractmethod
def initial_glimpses(self, batch_size, attended):
"""Return sensible initial values for carried over glimpses.
Parameters
----------
batch_size : int or :class:`~theano.Variable`
The batch size.
attended : :class:`~theano.Variable`
The attended.
Returns
-------
initial_glimpses : list of :class:`~theano.Variable`
The initial values for the requested glimpses. These might
simply consist of zeros or be somehow extracted from
the attended.
"""
pass
def get_dim(self, name):
if name in ['attended', 'preprocessed_attended']:
return self.attended_dim
if name in ['attended_mask']:
return 0
return super(AbstractAttention, self).get_dim(name)
class GenericSequenceAttention(AbstractAttention):
"""Logic common for sequence attention mechanisms."""
@application
def compute_weights(self, energies, attended_mask):
"""Compute weights from energies in softmax-like fashion.
.. todo ::
Use :class:`~blocks.bricks.Softmax`.
Parameters
----------
energies : :class:`~theano.Variable`
The energies. Must be of the same shape as the mask.
attended_mask : :class:`~theano.Variable`
The mask for the attended. The index in the sequence must be
the first dimension.
Returns
-------
weights : :class:`~theano.Variable`
Summing to 1 non-negative weights of the same shape
as `energies`.
"""
# Stabilize energies first and then exponentiate
energies = energies - energies.max(axis=0)
unnormalized_weights = tensor.exp(energies)
if attended_mask:
unnormalized_weights *= attended_mask
# If mask consists of all zeros use 1 as the normalization coefficient
normalization = (unnormalized_weights.sum(axis=0) +
tensor.all(1 - attended_mask, axis=0))
return unnormalized_weights / normalization
@application
def compute_weighted_averages(self, weights, attended):
"""Compute weighted averages of the attended sequence vectors.
Parameters
----------
weights : :class:`~theano.Variable`
The weights. The shape must be equal to the attended shape
without the last dimension.
attended : :class:`~theano.Variable`
The attended. The index in the sequence must be the first
dimension.
Returns
-------
weighted_averages : :class:`~theano.Variable`
The weighted averages of the attended elements. The shape
is equal to the attended shape with the first dimension
dropped.
"""
return (tensor.shape_padright(weights) * attended).sum(axis=0)
class SequenceContentAttention(GenericSequenceAttention, Initializable):
"""Attention mechanism that looks for relevant content in a sequence.
This is the attention mechanism used in [BCB]_. The idea in a nutshell:
1. The states and the sequence are transformed independently,
2. The transformed states are summed with every transformed sequence
element to obtain *match vectors*,
3. A match vector is transformed into a single number interpreted as
*energy*,
4. Energies are normalized in softmax-like fashion. The resulting
summing to one weights are called *attention weights*,
5. Weighted average of the sequence elements with attention weights
is computed.
In terms of the :class:`AbstractAttention` documentation, the sequence
is the attended. The weighted averages from 5 and the attention
weights from 4 form the set of glimpses produced by this attention
mechanism.
Parameters
----------
state_names : list of str
The names of the network states.
attended_dim : int
The dimension of the sequence elements.
match_dim : int
The dimension of the match vector.
state_transformer : :class:`.Brick`
A prototype for state transformations. If ``None``,
a linear transformation is used.
attended_transformer : :class:`.Feedforward`
The transformation to be applied to the sequence. If ``None`` an
affine transformation is used.
energy_computer : :class:`.Feedforward`
Computes energy from the match vector. If ``None``, an affine
transformations preceeded by :math:`tanh` is used.
Notes
-----
See :class:`.Initializable` for initialization parameters.
.. [BCB] Dzmitry Bahdanau, Kyunghyun Cho and Yoshua Bengio. Neural
Machine Translation by Jointly Learning to Align and Translate.
"""
@lazy(allocation=['match_dim'])
def __init__(self, match_dim, state_transformer=None,
attended_transformer=None, energy_computer=None, **kwargs):
super(SequenceContentAttention, self).__init__(**kwargs)
if not state_transformer:
state_transformer = Linear(use_bias=False)
self.match_dim = match_dim
self.state_transformer = state_transformer
self.state_transformers = Parallel(input_names=self.state_names,
prototype=state_transformer,
name="state_trans")
if not attended_transformer:
attended_transformer = Linear(name="preprocess")
if not energy_computer:
energy_computer = ShallowEnergyComputer(name="energy_comp")
self.attended_transformer = attended_transformer
self.energy_computer = energy_computer
self.children = [self.state_transformers, attended_transformer,
energy_computer]
def _push_allocation_config(self):
self.state_transformers.input_dims = self.state_dims
self.state_transformers.output_dims = [self.match_dim
for name in self.state_names]
self.attended_transformer.input_dim = self.attended_dim
self.attended_transformer.output_dim = self.match_dim
self.energy_computer.input_dim = self.match_dim
self.energy_computer.output_dim = 1
@application
def compute_energies(self, attended, preprocessed_attended, states):
if not preprocessed_attended:
preprocessed_attended = self.preprocess(attended)
transformed_states = self.state_transformers.apply(as_dict=True,
**states)
# Broadcasting of transformed states should be done automatically
match_vectors = sum(transformed_states.values(),
preprocessed_attended)
energies = self.energy_computer.apply(match_vectors).reshape(
match_vectors.shape[:-1], ndim=match_vectors.ndim - 1)
return energies
@application(outputs=['weighted_averages', 'weights'])
def take_glimpses(self, attended, preprocessed_attended=None,
attended_mask=None, **states):
r"""Compute attention weights and produce glimpses.
Parameters
----------
attended : :class:`~tensor.TensorVariable`
The sequence, time is the 1-st dimension.
preprocessed_attended : :class:`~tensor.TensorVariable`
The preprocessed sequence. If ``None``, is computed by calling
:meth:`preprocess`.
attended_mask : :class:`~tensor.TensorVariable`
A 0/1 mask specifying available data. 0 means that the
corresponding sequence element is fake.
\*\*states
The states of the network.
Returns
-------
weighted_averages : :class:`~theano.Variable`
Linear combinations of sequence elements with the attention
weights.
weights : :class:`~theano.Variable`
The attention weights. The first dimension is batch, the second
is time.
"""
energies = self.compute_energies(attended, preprocessed_attended,
states)
weights = self.compute_weights(energies, attended_mask)
weighted_averages = self.compute_weighted_averages(weights, attended)
return weighted_averages, weights.T
@take_glimpses.property('inputs')
def take_glimpses_inputs(self):
return (['attended', 'preprocessed_attended', 'attended_mask'] +
self.state_names)
@application(outputs=['weighted_averages', 'weights'])
def initial_glimpses(self, batch_size, attended):
return [tensor.zeros((batch_size, self.attended_dim)),
tensor.zeros((batch_size, attended.shape[0]))]
@application(inputs=['attended'], outputs=['preprocessed_attended'])
def preprocess(self, attended):
"""Preprocess the sequence for computing attention weights.
Parameters
----------
attended : :class:`~tensor.TensorVariable`
The attended sequence, time is the 1-st dimension.
"""
return self.attended_transformer.apply(attended)
def get_dim(self, name):
if name in ['weighted_averages']:
return self.attended_dim
if name in ['weights']:
return 0
return super(SequenceContentAttention, self).get_dim(name)
class ShallowEnergyComputer(Sequence, Initializable, Feedforward):
"""A simple energy computer: first tanh, then weighted sum.
Parameters
----------
use_bias : bool, optional
Whether a bias should be added to the energies. Does not change
anything if softmax normalization is used to produce the attention
weights, but might be useful when e.g. spherical softmax is used.
"""
@lazy()
def __init__(self, use_bias=False, **kwargs):
super(ShallowEnergyComputer, self).__init__(
[Tanh().apply, Linear(use_bias=use_bias).apply], **kwargs)
@property
def input_dim(self):
return self.children[1].input_dim
@input_dim.setter
def input_dim(self, value):
self.children[1].input_dim = value
@property
def output_dim(self):
return self.children[1].output_dim
@output_dim.setter
def output_dim(self, value):
self.children[1].output_dim = value
@add_metaclass(ABCMeta)
class AbstractAttentionRecurrent(BaseRecurrent):
"""The interface for attention-equipped recurrent transitions.
When a recurrent network is equipped with an attention mechanism its
transition typically consists of two steps: (1) the glimpses are taken
by the attention mechanism and (2) the next states are computed using
the current states and the glimpses. It is required for certain
usecases (such as sequence generator) that apart from a do-it-all
recurrent application method interfaces for the first step and
the second steps of the transition are provided.
"""
@abstractmethod
def apply(self, **kwargs):
"""Compute next states taking glimpses on the way."""
pass
@abstractmethod
def take_glimpses(self, **kwargs):
"""Compute glimpses given the current states."""
pass
@abstractmethod
def compute_states(self, **kwargs):
"""Compute next states given current states and glimpses."""
pass
class AttentionRecurrent(AbstractAttentionRecurrent, Initializable):
"""Combines an attention mechanism and a recurrent transition.
This brick equips a recurrent transition with an attention mechanism.
In order to do this two more contexts are added: one to be attended and
a mask for it. It is also possible to use the contexts of the given
recurrent transition for these purposes and not add any new ones,
see `add_context` parameter.
At the beginning of each step attention mechanism produces glimpses;
these glimpses together with the current states are used to compute the
next state and finish the transition. In some cases glimpses from the
previous steps are also necessary for the attention mechanism, e.g.
in order to focus on an area close to the one from the previous step.
This is also supported: such glimpses become states of the new
transition.
To let the user control the way glimpses are used, this brick also
takes a "distribute" brick as parameter that distributes the
information from glimpses across the sequential inputs of the wrapped
recurrent transition.
Parameters
----------
transition : :class:`.BaseRecurrent`
The recurrent transition.
attention : :class:`.Brick`
The attention mechanism.
distribute : :class:`.Brick`, optional
Distributes the information from glimpses across the input
sequences of the transition. By default a :class:`.Distribute` is
used, and those inputs containing the "mask" substring in their
name are not affected.
add_contexts : bool, optional
If ``True``, new contexts for the attended and the attended mask
are added to this transition, otherwise existing contexts of the
wrapped transition are used. ``True`` by default.
attended_name : str
The name of the attended context. If ``None``, "attended"
or the first context of the recurrent transition is used
depending on the value of `add_contents` flag.
attended_mask_name : str
The name of the mask for the attended context. If ``None``,
"attended_mask" or the second context of the recurrent transition
is used depending on the value of `add_contents` flag.
Notes
-----
See :class:`.Initializable` for initialization parameters.
Wrapping your recurrent brick with this class makes all the
states mandatory. If you feel this is a limitation for you, try
to make it better! This restriction does not apply to sequences
and contexts: those keep being as optional as they were for
your brick.
Those coming to Blocks from Groundhog might recognize that this is
a `RecurrentLayerWithSearch`, but on steroids :)
"""
def __init__(self, transition, attention, distribute=None,
add_contexts=True,
attended_name=None, attended_mask_name=None,
**kwargs):
super(AttentionRecurrent, self).__init__(**kwargs)
self._sequence_names = list(transition.apply.sequences)
self._state_names = list(transition.apply.states)
self._context_names = list(transition.apply.contexts)
if add_contexts:
if not attended_name:
attended_name = 'attended'
if not attended_mask_name:
attended_mask_name = 'attended_mask'
self._context_names += [attended_name, attended_mask_name]
else:
attended_name = self._context_names[0]
attended_mask_name = self._context_names[1]
if not distribute:
normal_inputs = [name for name in self._sequence_names
if 'mask' not in name]
distribute = Distribute(normal_inputs,
attention.take_glimpses.outputs[0])
self.transition = transition
self.attention = attention
self.distribute = distribute
self.add_contexts = add_contexts
self.attended_name = attended_name
self.attended_mask_name = attended_mask_name
self.preprocessed_attended_name = "preprocessed_" + self.attended_name
self._glimpse_names = self.attention.take_glimpses.outputs
# We need to determine which glimpses are fed back.
# Currently we extract it from `take_glimpses` signature.
self.previous_glimpses_needed = [
name for name in self._glimpse_names
if name in self.attention.take_glimpses.inputs]
self.children = [self.transition, self.attention, self.distribute]
def _push_allocation_config(self):
self.attention.state_dims = self.transition.get_dims(
self.attention.state_names)
self.attention.attended_dim = self.get_dim(self.attended_name)
self.distribute.source_dim = self.attention.get_dim(
self.distribute.source_name)
self.distribute.target_dims = self.transition.get_dims(
self.distribute.target_names)
@application
def take_glimpses(self, **kwargs):
r"""Compute glimpses with the attention mechanism.
A thin wrapper over `self.attention.take_glimpses`: takes care
of choosing and renaming the necessary arguments.
Parameters
----------
\*\*kwargs
Must contain the attended, previous step states and glimpses.
Can optionaly contain the attended mask and the preprocessed
attended.
Returns
-------
glimpses : list of :class:`~tensor.TensorVariable`
Current step glimpses.
"""
states = dict_subset(kwargs, self._state_names, pop=True)
glimpses = dict_subset(kwargs, self._glimpse_names, pop=True)
glimpses_needed = dict_subset(glimpses, self.previous_glimpses_needed)
result = self.attention.take_glimpses(
kwargs.pop(self.attended_name),
kwargs.pop(self.preprocessed_attended_name, None),
kwargs.pop(self.attended_mask_name, None),
**dict_union(states, glimpses_needed))
# At this point kwargs may contain additional items.
# e.g. AttentionRecurrent.transition.apply.contexts
return result
@take_glimpses.property('outputs')
def take_glimpses_outputs(self):
return self._glimpse_names
@application
def compute_states(self, **kwargs):
r"""Compute current states when glimpses have already been computed.
Combines an application of the `distribute` that alter the
sequential inputs of the wrapped transition and an application of
the wrapped transition. All unknown keyword arguments go to
the wrapped transition.
Parameters
----------
\*\*kwargs
Should contain everything what `self.transition` needs
and in addition the current glimpses.
Returns
-------
current_states : list of :class:`~tensor.TensorVariable`
Current states computed by `self.transition`.
"""
# make sure we are not popping the mask
normal_inputs = [name for name in self._sequence_names
if 'mask' not in name]
sequences = dict_subset(kwargs, normal_inputs, pop=True)
glimpses = dict_subset(kwargs, self._glimpse_names, pop=True)
if self.add_contexts:
kwargs.pop(self.attended_name)
# attended_mask_name can be optional
kwargs.pop(self.attended_mask_name, None)
sequences.update(self.distribute.apply(
as_dict=True, **dict_subset(dict_union(sequences, glimpses),
self.distribute.apply.inputs)))
current_states = self.transition.apply(
iterate=False, as_list=True,
**dict_union(sequences, kwargs))
return current_states
@compute_states.property('outputs')
def compute_states_outputs(self):
return self._state_names
@recurrent
def do_apply(self, **kwargs):
r"""Process a sequence attending the attended context every step.
In addition to the original sequence this method also requires
its preprocessed version, the one computed by the `preprocess`
method of the attention mechanism. Unknown keyword arguments
are passed to the wrapped transition.
Parameters
----------
\*\*kwargs
Should contain current inputs, previous step states, contexts,
the preprocessed attended context, previous step glimpses.
Returns
-------
outputs : list of :class:`~tensor.TensorVariable`
The current step states and glimpses.
"""
attended = kwargs[self.attended_name]
preprocessed_attended = kwargs.pop(self.preprocessed_attended_name)
attended_mask = kwargs.get(self.attended_mask_name)
sequences = dict_subset(kwargs, self._sequence_names, pop=True,
must_have=False)
states = dict_subset(kwargs, self._state_names, pop=True)
glimpses = dict_subset(kwargs, self._glimpse_names, pop=True)
current_glimpses = self.take_glimpses(
as_dict=True,
**dict_union(
states, glimpses,
{self.attended_name: attended,
self.attended_mask_name: attended_mask,
self.preprocessed_attended_name: preprocessed_attended}))
current_states = self.compute_states(
as_list=True,
**dict_union(sequences, states, current_glimpses, kwargs))
return current_states + list(current_glimpses.values())
@do_apply.property('sequences')
def do_apply_sequences(self):
return self._sequence_names
@do_apply.property('contexts')
def do_apply_contexts(self):
return self._context_names + [self.preprocessed_attended_name]
@do_apply.property('states')
def do_apply_states(self):
return self._state_names + self._glimpse_names
@do_apply.property('outputs')
def do_apply_outputs(self):
return self._state_names + self._glimpse_names
@application
def apply(self, **kwargs):
"""Preprocess a sequence attending the attended context at every step.
Preprocesses the attended context and runs :meth:`do_apply`. See
:meth:`do_apply` documentation for further information.
"""
preprocessed_attended = self.attention.preprocess(
kwargs[self.attended_name])
return self.do_apply(
**dict_union(kwargs,
{self.preprocessed_attended_name:
preprocessed_attended}))
@apply.delegate
def apply_delegate(self):
# TODO: Nice interface for this trick?
return self.do_apply.__get__(self, None)
@apply.property('contexts')
def apply_contexts(self):
return self._context_names
@application
def initial_states(self, batch_size, **kwargs):
return (pack(self.transition.initial_states(
batch_size, **kwargs)) +
pack(self.attention.initial_glimpses(
batch_size, kwargs[self.attended_name])))
@initial_states.property('outputs')
def initial_states_outputs(self):
return self.do_apply.states
def get_dim(self, name):
if name in self._glimpse_names:
return self.attention.get_dim(name)
if name == self.preprocessed_attended_name:
(original_name,) = self.attention.preprocess.outputs
return self.attention.get_dim(original_name)
if self.add_contexts:
if name == self.attended_name:
return self.attention.get_dim(
self.attention.take_glimpses.inputs[0])
if name == self.attended_mask_name:
return 0
return self.transition.get_dim(name)
| mit | -4,698,357,808,242,801,000 | 38.388601 | 78 | 0.650848 | false |
unrealcv/unrealcv | test/server/stereo_test.py | 1 | 2309 | # pytest -s stereo.py -k [name]
from unrealcv import client
import math, random
from conftest import checker, ver
import pytest
class Vec3:
def __init__(self, data):
if isinstance(data, str):
self.vec = [float(v) for v in data.split(' ')]
if isinstance(data, list):
self.vec = data
def __str__(self):
return ' '.join([str(v) for v in self.vec])
def l2norm(self):
return math.sqrt(sum([v*v for v in self.vec]))
def __sub__(self, v):
return Vec3([a-b for (a,b) in zip(self.vec, v.vec)])
def __add__(self, v):
return Vec3([a+b for (a,b) in zip(self.vec, v.vec)])
def random_vec3(min=-90, max=90):
return Vec3([random.randrange(min, max) for _ in range(3)])
def approx(a, b, tol = 0.01):
return abs(a - b) < tol
@pytest.mark.skipif(ver() < (0, 3, 2), reason = 'eyes_distance is implemented before v0.3.2')
def test_camera_distance():
client.connect()
for test_distance in [20, 40, 60]:
res = client.request('vset /action/eyes_distance %d' % test_distance)
assert checker.is_ok(res)
for _ in range(5):
client.request('vset /camera/0/rotation %s' % str(random_vec3()))
actor_loc = Vec3(client.request('vget /actor/location'))
loc1 = Vec3(client.request('vget /camera/0/location'))
loc2 = Vec3(client.request('vget /camera/1/location'))
print('%s %s %s' % (actor_loc, loc1, loc2))
actual_dist = (loc1 - loc2).l2norm()
expect_dist = test_distance
assert approx(actual_dist, expect_dist)
actor_cam0_distance = (actor_loc - loc1).l2norm()
assert approx(actor_cam0_distance, 0)
@pytest.mark.skipif(ver() < (0, 3, 2), reason = 'pause is implemented before v0.3.2')
def test_pause():
client.connect()
cmds = [
'vset /action/game/pause',
'vget /camera/0/lit',
'vget /camera/1/lit',
'vset /action/game/pause',
]
for cmd in cmds:
res = client.request(cmd)
assert checker.not_error(res)
if __name__ == '__main__':
def test_vec3():
a = Vec3([0, 0, 0])
b = Vec3([1, 1, 1])
assert(approx((a-b).l2norm(), math.sqrt(3)))
assert(approx((a+b).l2norm(), math.sqrt(3)))
| mit | -8,115,494,819,332,759,000 | 31.521127 | 93 | 0.565613 | false |
UFTS-Device/NanoCoin | contrib/devtools/fix-copyright-headers.py | 1 | 1353 | #!/usr/bin/env python
'''
Run this script to update all the copyright headers of files
that were changed this year.
For example:
// Copyright (c) 2009-2012 The NanoCoin Core developers
it will change it to
// Copyright (c) 2009-2015 The NanoCoin Core developers
'''
import os
import time
import re
year = time.gmtime()[0]
CMD_GIT_DATE = 'git log --format=@%%at -1 %s | date +"%%Y" -u -f -'
CMD_REGEX= "perl -pi -e 's/(20\d\d)(?:-20\d\d)? The NanoCoin/$1-%s The NanoCoin/' %s"
REGEX_CURRENT= re.compile("%s The NanoCoin" % year)
CMD_LIST_FILES= "find %s | grep %s"
FOLDERS = ["./qa", "./src"]
EXTENSIONS = [".cpp",".h", ".py"]
def get_git_date(file_path):
r = os.popen(CMD_GIT_DATE % file_path)
for l in r:
# Result is one line, so just return
return l.replace("\n","")
return ""
n=1
for folder in FOLDERS:
for extension in EXTENSIONS:
for file_path in os.popen(CMD_LIST_FILES % (folder, extension)):
file_path = os.getcwd() + file_path[1:-1]
if file_path.endswith(extension):
git_date = get_git_date(file_path)
if str(year) == git_date:
# Only update if current year is not found
if REGEX_CURRENT.search(open(file_path, "r").read()) is None:
print n,"Last git edit", git_date, "-", file_path
os.popen(CMD_REGEX % (year,file_path))
n = n + 1
| mit | -4,160,406,511,019,043,000 | 28.413043 | 85 | 0.617147 | false |
esc/castra | castra/core.py | 1 | 12203 | from collections import Iterator
import os
from os.path import exists, isdir
try:
import cPickle as pickle
except ImportError:
import pickle
import shutil
import tempfile
from functools import partial
import blosc
import bloscpack
import numpy as np
import pandas as pd
from pandas import msgpack
def escape(text):
return str(text)
def mkdir(path):
if not exists(path):
os.makedirs(path)
class Castra(object):
meta_fields = ['columns', 'dtypes', 'index_dtype', 'axis_names']
def __init__(self, path=None, template=None, categories=None):
# check if we should create a random path
self._explicitly_given_path = path is not None
if not self._explicitly_given_path:
self.path = tempfile.mkdtemp(prefix='castra-')
else:
self.path = path
# check if the given path exists already and create it if it doesn't
mkdir(self.path)
# raise an Exception if it isn't a directory
if not isdir(self.path):
raise ValueError("'path': %s must be a directory")
# either we have a meta directory
if isdir(self.dirname('meta')):
if template is not None:
raise ValueError(
"'template' must be 'None' when opening a Castra")
self.load_meta()
self.load_partitions()
self.load_categories()
# or we don't, in which case we need a template
elif template is not None:
self.columns, self.dtypes, self.index_dtype = \
list(template.columns), template.dtypes, template.index.dtype
self.axis_names = [template.index.name, template.columns.name]
self.partitions = pd.Series([], dtype='O',
index=template.index.__class__([]))
self.minimum = None
if isinstance(categories, (list, tuple)):
self.categories = dict((col, []) for col in categories)
elif categories is True:
self.categories = dict((col, [])
for col in template.columns
if template.dtypes[col] == 'object')
else:
self.categories = dict()
if self.categories:
categories = set(self.categories)
template_categories = set(template.dtypes.index.values)
if categories.difference(template_categories):
raise ValueError('passed in categories %s are not all '
'contained in template dataframe columns '
'%s' % (categories, template_categories))
for c in self.categories:
self.dtypes[c] = pd.core.categorical.CategoricalDtype()
mkdir(self.dirname('meta', 'categories'))
self.flush_meta()
self.save_partitions()
else:
raise ValueError(
"must specify a 'template' when creating a new Castra")
def load_meta(self, loads=pickle.loads):
for name in self.meta_fields:
with open(self.dirname('meta', name), 'rb') as f:
setattr(self, name, loads(f.read()))
def flush_meta(self, dumps=partial(pickle.dumps, protocol=2)):
for name in self.meta_fields:
with open(self.dirname('meta', name), 'wb') as f:
f.write(dumps(getattr(self, name)))
def load_partitions(self, loads=pickle.loads):
with open(self.dirname('meta', 'plist'), 'rb') as f:
self.partitions = loads(f.read())
with open(self.dirname('meta', 'minimum'), 'rb') as f:
self.minimum = loads(f.read())
def save_partitions(self, dumps=partial(pickle.dumps, protocol=2)):
with open(self.dirname('meta', 'minimum'), 'wb') as f:
f.write(dumps(self.minimum))
with open(self.dirname('meta', 'plist'), 'wb') as f:
f.write(dumps(self.partitions))
def append_categories(self, new, dumps=partial(pickle.dumps, protocol=2)):
separator = b'-sep-'
for col, cat in new.items():
if cat:
with open(self.dirname('meta', 'categories', col), 'ab') as f:
f.write(separator.join(map(dumps, cat)))
f.write(separator)
def load_categories(self, loads=pickle.loads):
separator = b'-sep-'
self.categories = dict()
for col in self.columns:
fn = self.dirname('meta', 'categories', col)
if os.path.exists(fn):
with open(fn, 'rb') as f:
text = f.read()
self.categories[col] = [loads(x)
for x in text.split(separator)[:-1]]
def extend(self, df):
# TODO: Ensure that df is consistent with existing data
if not df.index.is_monotonic_increasing:
df = df.sort_index(inplace=False)
index = df.index.values
partition_name = '--'.join([escape(index.min()), escape(index.max())])
mkdir(self.dirname(partition_name))
new_categories, self.categories, df = _decategorize(self.categories, df)
self.append_categories(new_categories)
# Store columns
for col in df.columns:
pack_file(df[col].values, self.dirname(partition_name, col))
# Store index
fn = self.dirname(partition_name, '.index')
x = df.index.values
bloscpack.pack_ndarray_file(x, fn)
if not len(self.partitions):
self.minimum = index.min()
self.partitions[index.max()] = partition_name
self.flush()
def dirname(self, *args):
return os.path.join(self.path, *args)
def load_partition(self, name, columns, categorize=True):
if isinstance(columns, Iterator):
columns = list(columns)
if not isinstance(columns, list):
df = self.load_partition(name, [columns], categorize=categorize)
return df.iloc[:, 0]
arrays = [unpack_file(self.dirname(name, col)) for col in columns]
index = unpack_file(self.dirname(name, '.index'))
df = pd.DataFrame(dict(zip(columns, arrays)),
columns=pd.Index(columns, name=self.axis_names[1]),
index=pd.Index(index, dtype=self.index_dtype,
name=self.axis_names[0]))
if categorize:
df = _categorize(self.categories, df)
return df
def __getitem__(self, key):
if isinstance(key, tuple):
key, columns = key
else:
columns = self.columns
start, stop = key.start, key.stop
names = select_partitions(self.partitions, key)
data_frames = [self.load_partition(name, columns, categorize=False)
for name in names]
data_frames[0] = data_frames[0].loc[start:]
data_frames[-1] = data_frames[-1].loc[:stop]
df = pd.concat(data_frames)
df = _categorize(self.categories, df)
return df
def drop(self):
if os.path.exists(self.path):
shutil.rmtree(self.path)
def flush(self):
self.save_partitions()
def __enter__(self):
return self
def __exit__(self, *args):
if not self._explicitly_given_path:
self.drop()
else:
self.flush()
def __del__(self):
if not self._explicitly_given_path:
self.drop()
else:
self.flush()
def __getstate__(self):
self.flush()
return (self.path, self._explicitly_given_path)
def __setstate__(self, state):
self.path = state[0]
self._explicitly_given_path = state[1]
self.load_meta()
self.load_partitions()
self.load_categories()
def to_dask(self, columns=None):
if columns is None:
columns = self.columns
import dask.dataframe as dd
name = 'from-castra' + next(dd.core.tokens)
dsk = dict(((name, i), (Castra.load_partition, self, part, columns))
for i, part in enumerate(self.partitions.values))
divisions = [self.minimum] + list(self.partitions.index)
if isinstance(columns, list):
return dd.DataFrame(dsk, name, columns, divisions)
else:
return dd.Series(dsk, name, columns, divisions)
def pack_file(x, fn, encoding='utf8'):
""" Pack numpy array into filename
Supports binary data with bloscpack and text data with msgpack+blosc
>>> pack_file(np.array([1, 2, 3]), 'foo.blp') # doctest: +SKIP
See also:
unpack_file
"""
if x.dtype != 'O':
bloscpack.pack_ndarray_file(x, fn)
else:
bytes = blosc.compress(msgpack.packb(x.tolist(), encoding=encoding), 1)
with open(fn, 'wb') as f:
f.write(bytes)
def unpack_file(fn, encoding='utf8'):
""" Unpack numpy array from filename
Supports binary data with bloscpack and text data with msgpack+blosc
>>> unpack_file('foo.blp') # doctest: +SKIP
array([1, 2, 3])
See also:
pack_file
"""
try:
return bloscpack.unpack_ndarray_file(fn)
except ValueError:
with open(fn, 'rb') as f:
return np.array(msgpack.unpackb(blosc.decompress(f.read()),
encoding=encoding))
def coerce_index(dt, o):
if np.issubdtype(dt, np.datetime64):
return pd.Timestamp(o)
return o
def select_partitions(partitions, key):
""" Select partitions from partition list given slice
>>> p = pd.Series(['a', 'b', 'c', 'd', 'e'], index=[0, 10, 20, 30, 40])
>>> select_partitions(p, slice(3, 25))
['b', 'c', 'd']
"""
assert key.step is None, 'step must be None but was %s' % key.step
start, stop = key.start, key.stop
names = list(partitions.loc[start:stop])
last = partitions.searchsorted(names[-1])[0]
stop2 = coerce_index(partitions.index.dtype, stop)
if (stop2 is not None and
partitions.index[last] < stop2 and
len(partitions) > last + 1):
names.append(partitions.iloc[last + 1])
return names
def _decategorize(categories, df):
""" Strip object dtypes from dataframe, update categories
Given a DataFrame
>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': ['C', 'B', 'B']})
And a dict of known categories
>>> _ = categories = {'y': ['A', 'B']}
Update dict and dataframe in place
>>> extra, categories, df = _decategorize(categories, df)
>>> extra
{'y': ['C']}
>>> categories
{'y': ['A', 'B', 'C']}
>>> df
x y
0 1 2
1 2 1
2 3 1
"""
extra = dict()
new_categories = dict()
new_columns = dict((col, df[col]) for col in df.columns)
for col, cat in categories.items():
idx = pd.Index(df[col])
idx = getattr(idx, 'categories', idx)
extra[col] = idx[~idx.isin(cat)].unique().tolist()
new_categories[col] = cat + extra[col]
new_columns[col] = pd.Categorical(df[col], new_categories[col]).codes
new_df = pd.DataFrame(new_columns, columns=df.columns, index=df.index)
return extra, new_categories, new_df
def _categorize(categories, df):
""" Categorize columns in dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': [0, 2, 0]})
>>> categories = {'y': ['A', 'B', 'c']}
>>> _categorize(categories, df)
x y
0 1 A
1 2 c
2 3 A
"""
if isinstance(df, pd.Series):
if df.name in categories:
cat = pd.Categorical.from_codes(df.values, categories[df.name])
return pd.Series(cat, index=df.index)
else:
return df
else:
return pd.DataFrame(
dict((col, pd.Categorical.from_codes(df[col], categories[col])
if col in categories
else df[col])
for col in df.columns),
columns=df.columns,
index=df.index)
| bsd-3-clause | -5,507,845,283,135,704,000 | 31.541333 | 80 | 0.557158 | false |
simpeg/discretize | examples/plot_cahn_hilliard.py | 1 | 4099 | """
Operators: Cahn Hilliard
========================
This example is based on the example in the FiPy_ library.
Please see their documentation for more information about the
Cahn-Hilliard equation.
The "Cahn-Hilliard" equation separates a field \\\\( \\\\phi \\\\)
into 0 and 1 with smooth transitions.
.. math::
\\frac{\partial \phi}{\partial t} = \\nabla \cdot D \\nabla \left( \\frac{\partial f}{\partial \phi} - \epsilon^2 \\nabla^2 \phi \\right)
Where \\\\( f \\\\) is the energy function \\\\( f = ( a^2 / 2 )\\\\phi^2(1 - \\\\phi)^2 \\\\)
which drives \\\\( \\\\phi \\\\) towards either 0 or 1, this competes with the term
\\\\(\\\\epsilon^2 \\\\nabla^2 \\\\phi \\\\) which is a diffusion term that creates smooth changes in \\\\( \\\\phi \\\\).
The equation can be factored:
.. math::
\\frac{\partial \phi}{\partial t} = \\nabla \cdot D \\nabla \psi \\\\
\psi = \\frac{\partial^2 f}{\partial \phi^2} (\phi - \phi^{\\text{old}}) + \\frac{\partial f}{\partial \phi} - \epsilon^2 \\nabla^2 \phi
Here we will need the derivatives of \\\\( f \\\\):
.. math::
\\frac{\partial f}{\partial \phi} = (a^2/2)2\phi(1-\phi)(1-2\phi)
\\frac{\partial^2 f}{\partial \phi^2} = (a^2/2)2[1-6\phi(1-\phi)]
The implementation below uses backwards Euler in time with an
exponentially increasing time step. The initial \\\\( \\\\phi \\\\)
is a normally distributed field with a standard deviation of 0.1 and
mean of 0.5. The grid is 60x60 and takes a few seconds to solve ~130
times. The results are seen below, and you can see the field separating
as the time increases.
.. _FiPy: https://github.com/usnistgov/fipy
.. http://www.ctcms.nist.gov/fipy/examples/cahnHilliard/generated/examples.cahnHilliard.mesh2DCoupled.html
"""
import discretize
from pymatsolver import Solver
import numpy as np
import matplotlib.pyplot as plt
def run(plotIt=True, n=60):
np.random.seed(5)
# Here we are going to rearrange the equations:
# (phi_ - phi)/dt = A*(d2fdphi2*(phi_ - phi) + dfdphi - L*phi_)
# (phi_ - phi)/dt = A*(d2fdphi2*phi_ - d2fdphi2*phi + dfdphi - L*phi_)
# (phi_ - phi)/dt = A*d2fdphi2*phi_ + A*( - d2fdphi2*phi + dfdphi - L*phi_)
# phi_ - phi = dt*A*d2fdphi2*phi_ + dt*A*(- d2fdphi2*phi + dfdphi - L*phi_)
# phi_ - dt*A*d2fdphi2 * phi_ = dt*A*(- d2fdphi2*phi + dfdphi - L*phi_) + phi
# (I - dt*A*d2fdphi2) * phi_ = dt*A*(- d2fdphi2*phi + dfdphi - L*phi_) + phi
# (I - dt*A*d2fdphi2) * phi_ = dt*A*dfdphi - dt*A*d2fdphi2*phi - dt*A*L*phi_ + phi
# (dt*A*d2fdphi2 - I) * phi_ = dt*A*d2fdphi2*phi + dt*A*L*phi_ - phi - dt*A*dfdphi
# (dt*A*d2fdphi2 - I - dt*A*L) * phi_ = (dt*A*d2fdphi2 - I)*phi - dt*A*dfdphi
h = [(0.25, n)]
M = discretize.TensorMesh([h, h])
# Constants
D = a = epsilon = 1.0
I = discretize.utils.speye(M.nC)
# Operators
A = D * M.faceDiv * M.cellGrad
L = epsilon ** 2 * M.faceDiv * M.cellGrad
duration = 75
elapsed = 0.0
dexp = -5
phi = np.random.normal(loc=0.5, scale=0.01, size=M.nC)
ii, jj = 0, 0
PHIS = []
capture = np.logspace(-1, np.log10(duration), 8)
while elapsed < duration:
dt = min(100, np.exp(dexp))
elapsed += dt
dexp += 0.05
dfdphi = a ** 2 * 2 * phi * (1 - phi) * (1 - 2 * phi)
d2fdphi2 = discretize.utils.sdiag(a ** 2 * 2 * (1 - 6 * phi * (1 - phi)))
MAT = dt * A * d2fdphi2 - I - dt * A * L
rhs = (dt * A * d2fdphi2 - I) * phi - dt * A * dfdphi
phi = Solver(MAT) * rhs
if elapsed > capture[jj]:
PHIS += [(elapsed, phi.copy())]
jj += 1
if ii % 10 == 0:
print(ii, elapsed)
ii += 1
if plotIt:
fig, axes = plt.subplots(2, 4, figsize=(14, 6))
axes = np.array(axes).flatten().tolist()
for ii, ax in zip(np.linspace(0, len(PHIS) - 1, len(axes)), axes):
ii = int(ii)
M.plotImage(PHIS[ii][1], ax=ax)
ax.axis("off")
ax.set_title("Elapsed Time: {0:4.1f}".format(PHIS[ii][0]))
if __name__ == "__main__":
run()
plt.show()
| mit | -7,125,815,124,115,153,000 | 34.336207 | 141 | 0.567212 | false |
dr4ke616/pinky | pinky/test/test_output.py | 1 | 1154 | # Copyright (c) 2012 - Oscar Campos <[email protected]>
# See LICENSE for more details
"""
Tests for pinky.scripts.output
This code is taken straight from https://github.com/PyMamba/mamba-framework
"""
from twisted.trial import unittest
from pinky.scripts.output import codes, _styles, style_to_ansi_code
class TestOutput(unittest.TestCase):
def test_colors(self):
for color in codes.keys():
if color not in ['normal', 'reset', 'underline', 'overline']:
objs = [color]
module_color = __import__(
'pinky.scripts.output', globals(), locals(), objs
)
test_color = getattr(module_color, objs[0])
if color in codes:
the_color = codes[color] + 'Test' + codes['reset']
elif color in _styles:
the_color = (
style_to_ansi_code(color) + 'Test' + codes['reset']
)
else:
the_color = 'Test'
self.assertEqual(test_color('Test'), the_color)
self.flushLoggedErrors()
| mit | -8,895,483,601,745,495,000 | 31.055556 | 75 | 0.538995 | false |
tvotyakov/codeeval | easy/not-so-clever/code.py | 1 | 2945 | #!python3
def sort_by_stupid_algorithm(in_list, iter_count):
''' (list, int) -> None
Applies iter_count iterations of "stupid sort" algorithm to the given
input list in place (its mean that source in_list will be mutated).
The resulting list will be also returned.
>>> _list = []; print(sort_by_stupid_algorithm(_list, 1), _list)
[] []
>>> _list = [1]; print(sort_by_stupid_algorithm(_list, 2), _list)
[1] [1]
>>> _list = [3, 2, 1]; print(sort_by_stupid_algorithm(_list, 1), _list)
[2, 3, 1] [2, 3, 1]
>>> _list = [1, 2, 3]; print(sort_by_stupid_algorithm(_list, 2), _list)
[1, 2, 3] [1, 2, 3]
>>> _list = [5, 4, 9, 10, 7, 3, 2, 1, 6]; print(sort_by_stupid_algorithm(_list, 1), _list)
[4, 5, 9, 10, 7, 3, 2, 1, 6] [4, 5, 9, 10, 7, 3, 2, 1, 6]
>>> _list = [5, 4, 9, 10, 7, 3, 2, 1, 6]; print(sort_by_stupid_algorithm(_list, 2), _list)
[4, 5, 9, 7, 10, 3, 2, 1, 6] [4, 5, 9, 7, 10, 3, 2, 1, 6]
>>> _list = [9, 8, 7, 6, 5, 4, 3, 2, 1]; print(sort_by_stupid_algorithm(_list, 3), _list)
[7, 8, 9, 6, 5, 4, 3, 2, 1] [7, 8, 9, 6, 5, 4, 3, 2, 1]
'''
if len(in_list) < 2: return in_list
for iter_num in range(iter_count):
changed = False
for i in range(len(in_list) - 1):
current_val, next_val = in_list[i:i+2]
if current_val > next_val:
in_list[i], in_list[i + 1] = next_val, current_val
changed = True
break
if not changed: break
return in_list
def parse_in_str(in_str):
''' (string) -> tuple(list, integer)
Expects string with a list of integers separated by space followed by
an integer separated by a pipeline '|'. Returns those list and the integer.
>>> parse_in_str('1 2 3 4 | 1')
([1, 2, 3, 4], 1)
>>> parse_in_str('1 | 2')
([1], 2)
>>> parse_in_str('9 8 7 6 5 | 9')
([9, 8, 7, 6, 5], 9)
'''
_list, num = in_str.split('|')
_list = list(map(int, (i for i in _list.split(' ') if i)))
num = int(num)
return _list, num
def serialize_str(in_list):
''' (list of integers) -> string
Returns string with integers from the given list separated by space
>>> serialize_str([1, 2, 3])
'1 2 3'
>>> serialize_str([])
''
>>> serialize_str([1])
'1'
>>> serialize_str([99, 56, 100, 20, 33])
'99 56 100 20 33'
'''
return ' '.join(map(str, in_list))
if __name__ == '__main__':
import sys
if (len(sys.argv) <= 1):
import doctest
doctest.testmod()
else:
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
test = test.rstrip('\n')
if not test: continue # ignore an empty line
print(serialize_str(sort_by_stupid_algorithm(*parse_in_str(test))))
test_cases.close()
| gpl-2.0 | -5,107,519,359,634,921,000 | 28.677083 | 94 | 0.504584 | false |
gvalkov/git-link | tests/test_git.py | 1 | 3345 | #!/usr/bin/env python
# encoding: utf-8
import pytest
from util import *
from gitlink.git import *
@pytest.fixture
def repo(request):
repo = Repo('https://github.com/gvalkov/git-link.git', '%s/test-git' % test_repo_dir, 'HEAD')
repo.clone()
repo.chdir()
return repo
def test_get_config(repo):
repo.config('link.browser', '123')
repo.config('link.url', 'asdf')
repo.config('link.clipboard', 'true')
assert get_config('link') == {
'url': 'asdf',
'browser': '123',
'clipboard': True
}
assert get_config('link', False) == {
'link.url': 'asdf',
'link.browser': '123',
'link.clipboard': True,
}
def test_cat_commit(repo):
assert cat_commit('v0.1.0') == {
'author': 'Georgi Valkov <[email protected]> 1329252276 +0200',
'committer': 'Georgi Valkov <[email protected]> 1329252276 +0200',
'sha': '29faca327f595c01f795f9a2e9c27dca8aabcaee',
'tree': '80f10ec249b6916adcf6c95f575a0125b8541c05',
'parent': 'f5d981a75b18533c270d4aa4ffffa9fcf67f9a8b',
}
def test_commit(repo):
assert commit('v0.1.0^') == commit('v0.1.0~')
assert commit('v0.1.0~5')['sha'] == 'eb17e35ec82ab6ac947d73d4dc782d1f680d191d'
def test_tree(repo):
assert tree('v0.1.0^^{tree}') == tree('v0.1.0~^{tree}')
assert tree('v0.1.0~5^{tree}')['sha'] == 'cfa9b260c33b9535c945c14564dd50c8ffa3c89e'
def test_blob(repo):
assert blob('v0.1.0^:setup.py') == blob('v0.1.0~:setup.py')
assert blob('v0.1.0~2:setup.py') == blob('v0.1.0~2:setup.py')
assert blob('v0.1.0~2:setup.py') == blob('v0.1.0~2:setup.py')
assert blob('v0.1.0~5:gitlink/main.py') == {
'path': 'gitlink/main.py',
'sha': '489de118c078bd472073d2f20267e441a931b9d0',
'type': LT.blob,
'commit_sha': 'eb17e35ec82ab6ac947d73d4dc782d1f680d191d',
'tree_sha': '42706139a2f62814251c5b027f8e9d38239fbcee'
}
def test_branch(repo):
assert branch('master')['ref'] == 'refs/remotes/origin/master'
assert branch('master')['shortref'] == 'master'
assert branch('origin/master')['ref'] == 'refs/remotes/origin/master'
assert branch('origin/master')['shortref'] == 'master'
def test_tag(repo):
assert cat_tag('v0.1.0') == {
'sha': '29faca327f595c01f795f9a2e9c27dca8aabcaee',
'tagger': 'Georgi Valkov <[email protected]> 1329252311 +0200',
'object': 'f54a0b6ad8518babf440db870dc778acc84877a8',
'type': 'commit',
'tag': 'v0.1.0'
}
def test_path(repo):
assert path('gitlink/main.py', 'v0.1.0') == {
'tree_sha': 'b8ff9fc80e42bec20cfb1638f4efa0215fe4987a',
'commit_sha': 'f54a0b6ad8518babf440db870dc778acc84877a8',
'top_tree_sha': '80f10ec249b6916adcf6c95f575a0125b8541c05',
'sha': 'd930815a23f0cf53a471e2993bc42401926793fa',
'path': 'gitlink/main.py',
'type': LT.blob,
}
assert path('tests', 'v0.1.0') == {
'tree_sha': 'None',
'commit_sha': 'f54a0b6ad8518babf440db870dc778acc84877a8',
'top_tree_sha': '80f10ec249b6916adcf6c95f575a0125b8541c05',
'sha': '1a5bf01fcd47ff9936aac0344c587b616f081dfd',
'path': 'tests',
'type': LT.path,
}
assert path('non-existant') == {}
| bsd-3-clause | 4,998,531,882,876,660,000 | 33.84375 | 97 | 0.612556 | false |
joakim-hove/ert | python/python/ert_gui/shell/export.py | 1 | 2789 | from __future__ import print_function
from ecl.util.util import IntVector
from res.enkf.enums import ErtImplType
from res.enkf.data import EnkfNode
from ert_gui.shell import assertConfigLoaded, ErtShellCollection
from ert_gui.shell.libshell import autoCompleteList, splitArguments
class Export(ErtShellCollection):
DEFAULT_EXPORT_PATH = "export/%s/%s_%%d"
def __init__(self, parent):
super(Export, self).__init__("export", parent)
default_path = Export.DEFAULT_EXPORT_PATH % ("{KEY}", "{KEY}")
self.addShellFunction(name="FIELD",
function=Export.exportFIELD,
completer=Export.completeFIELD,
help_arguments="<keyword> [%s] [1,4,7-10]" % default_path,
help_message="Export parameters; path and realisations in [...] are optional.")
def supportedFIELDKeys(self):
ens_config = self.ert().ensembleConfig()
key_list = ens_config.getKeylistFromImplType(ErtImplType.FIELD)
return key_list
@assertConfigLoaded
def completeFIELD(self, text, line, begidx, endidx):
arguments = splitArguments(line)
if len(arguments) > 2 or len(arguments) == 2 and not text:
return []
return autoCompleteList(text, self.supportedFIELDKeys())
@assertConfigLoaded
def exportFIELD(self, line):
arguments = splitArguments(line)
if len(arguments) >= 1:
ens_config = self.ert().ensembleConfig()
key = arguments[0]
if key in self.supportedFIELDKeys():
config_node = ens_config[key]
if len(arguments) >= 2:
path_fmt = arguments[1]
else:
path_fmt = Export.DEFAULT_EXPORT_PATH % (key, key) + ".grdecl"
if len(arguments) >= 3:
range_string = "".join(arguments[2:])
iens_list = IntVector.active_list(range_string)
else:
ens_size = self.ert().getEnsembleSize()
iens_list = IntVector.createRange(0, ens_size, 1)
fs_manager = self.ert().getEnkfFsManager()
fs = fs_manager.getCurrentFileSystem()
mc = self.ert().getModelConfig()
init_file = config_node.getInitFile(mc.getRunpathFormat())
if init_file:
print('Using init file: %s' % init_file)
EnkfNode.exportMany(config_node, path_fmt, fs, iens_list, arg=init_file)
else:
self.lastCommandFailed("No such FIELD node: %s" % key)
else:
self.lastCommandFailed("Expected at least one argument: <keyword> received: '%s'" % line)
| gpl-3.0 | -3,140,374,453,435,462,000 | 40.014706 | 109 | 0.574399 | false |
sapcc/monasca-agent | monasca_setup/service/linux.py | 1 | 5985 | # (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
""" Systemd based service
"""
import glob
import logging
import os
import pwd
import subprocess
import service
log = logging.getLogger(__name__)
class LinuxInit(service.Service):
"""Parent class for all Linux based init systems.
"""
def enable(self):
"""Does user/group directory creation.
"""
# Create user/group if needed
try:
user = pwd.getpwnam(self.username)
except KeyError:
subprocess.check_call(['useradd', '-r', self.username])
user = pwd.getpwnam(self.username)
# Create dirs
# todo log dir is hardcoded
for path in (self.log_dir, self.config_dir, '%s/conf.d' % self.config_dir):
if not os.path.exists(path):
os.makedirs(path, 0o755)
os.chown(path, 0, user.pw_gid)
# the log dir needs to be writable by the user
os.chown(self.log_dir, user.pw_uid, user.pw_gid)
def start(self, restart=True):
if not self.is_enabled():
log.error('The service is not enabled')
return False
def stop(self):
if not self.is_enabled():
log.error('The service is not enabled')
return True
def is_enabled(self):
"""Returns True if monasca-agent is setup to start on boot, false otherwise.
"""
raise NotImplementedError
class Systemd(LinuxInit):
def enable(self):
"""Sets monasca-agent to start on boot.
Generally this requires running as super user
"""
LinuxInit.enable(self)
# Write the systemd script
init_path = '/etc/systemd/system/{0}.service'.format(self.name)
with open(os.path.join(self.template_dir, 'monasca-agent.service.template'), 'r') as template:
with open(init_path, 'w') as service_script:
service_script.write(template.read().format(prefix=self.prefix_dir, monasca_user=self.username,
config_dir=self.config_dir))
os.chown(init_path, 0, 0)
os.chmod(init_path, 0o644)
# Enable the service
subprocess.check_call(['systemctl', 'daemon-reload'])
subprocess.check_call(['systemctl', 'enable', '{0}.service'.format(self.name)])
log.info('Enabled {0} service via systemd'.format(self.name))
def start(self, restart=True):
"""Starts monasca-agent.
If the agent is running and restart is True, restart
"""
LinuxInit.start(self)
log.info('Starting {0} service via systemd'.format(self.name))
if restart:
subprocess.check_call(['systemctl', 'restart', '{0}.service'.format(self.name)])
else:
subprocess.check_call(['systemctl', 'start', '{0}.service'.format(self.name)])
return True
def stop(self):
"""Stops monasca-agent.
"""
LinuxInit.stop(self)
log.info('Stopping {0} service'.format(self.name))
subprocess.check_call(['systemctl', 'stop', '{0}.service'.format(self.name)])
return True
def is_enabled(self):
"""Returns True if monasca-agent is setup to start on boot, false otherwise.
"""
try:
subprocess.check_output(['systemctl', 'is-enabled', '{0}.service'.format(self.name)])
except subprocess.CalledProcessError:
return False
return True
class SysV(LinuxInit):
def __init__(self, prefix_dir, config_dir, log_dir, template_dir, username, name='monasca-agent'):
"""Setup this service with the given init template.
"""
service.Service.__init__(self, prefix_dir, config_dir, log_dir, template_dir, name, username)
self.init_script = '/etc/init.d/%s' % self.name
self.init_template = os.path.join(template_dir, 'monasca-agent.init.template')
def enable(self):
"""Sets monasca-agent to start on boot.
Generally this requires running as super user
"""
LinuxInit.enable(self)
# Write the init script and enable.
with open(self.init_template, 'r') as template:
with open(self.init_script, 'w') as conf:
conf.write(template.read().format(prefix=self.prefix_dir, monasca_user=self.username,
config_dir=self.config_dir))
os.chown(self.init_script, 0, 0)
os.chmod(self.init_script, 0o755)
for runlevel in ['2', '3', '4', '5']:
link_path = '/etc/rc%s.d/S10monasca-agent' % runlevel
if not os.path.exists(link_path):
os.symlink(self.init_script, link_path)
log.info('Enabled {0} service via SysV init script'.format(self.name))
def start(self, restart=True):
"""Starts monasca-agent.
If the agent is running and restart is True, restart
"""
LinuxInit.start(self)
log.info('Starting {0} service via SysV init script'.format(self.name))
if restart:
subprocess.check_call([self.init_script, 'restart']) # Throws CalledProcessError on error
else:
subprocess.check_call([self.init_script, 'start']) # Throws CalledProcessError on error
return True
def stop(self):
"""Stops monasca-agent.
"""
LinuxInit.stop(self)
log.info('Stopping {0} service via SysV init script'.format(self.name))
subprocess.check_call([self.init_script, 'stop']) # Throws CalledProcessError on error
return True
def is_enabled(self):
"""Returns True if monasca-agent is setup to start on boot, false otherwise.
"""
if not os.path.exists(self.init_script):
return False
if len(glob.glob('/etc/rc?.d/S??monasca-agent')) > 0:
return True
else:
return False
| bsd-3-clause | -2,910,082,518,258,402,300 | 33.005682 | 111 | 0.590977 | false |
adamziel/python_translate | python_translate/extractors/python.py | 1 | 6128 | # -*- coding: utf-8 -*-
"""
This file is a part of python_translate package
(c) Adam Zieliński <[email protected]>
For the full copyright and license information, please view the LICENSE and LICENSE_SYMFONY_TRANSLATION
files that were distributed with this source code.
"""
import ast
import codegen
from python_translate.extractors.base import Translation, TransVar, ExtensionBasedExtractor
class PythonExtractor(ExtensionBasedExtractor):
def __init__(
self,
file_extensions=None,
tranz_functions=None,
tranzchoice_functions=None):
file_extensions = file_extensions if file_extensions is not None else (
"*.py",
)
self.tranz_functions = tranz_functions if tranz_functions is not None else (
'_',
'tranz')
self.tranzchoice_functions = tranzchoice_functions if tranzchoice_functions is not None else (
'tranzchoice',
)
super(PythonExtractor, self).__init__(file_extensions=file_extensions)
def extract_translations(self, string):
"""Extract messages from Python string."""
tree = ast.parse(string)
# ast_visit(tree)
visitor = TransVisitor(
self.tranz_functions,
self.tranzchoice_functions)
visitor.visit(tree)
return visitor.translations
class TransVisitor(ast.NodeVisitor):
def __init__(self, tranz_functions, tranzchoice_functions):
self.tranz_functions = tranz_functions
self.tranzchoice_functions = tranzchoice_functions
self.translations = []
super(TransVisitor, self).__init__()
def visit(self, node):
if isinstance(node, ast.Call):
self.process_node(node)
return self.generic_visit(node)
def process_node(self, node):
func_name = self.get_func_name(node.func)
if func_name not in self.tranz_functions + self.tranzchoice_functions:
return
kwargs = {}
# Arguments
kwargs['id'] = self.prepare_arg(
node.args[0]) if len(
node.args) > 0 else None
idx = 1
if func_name in self.tranzchoice_functions:
kwargs['number'] = self.prepare_arg(
node.args[1]) if len(
node.args) > 1 else None
idx += 1
kwargs['parameters'] = self.parse_kwargs(
node.args[idx]) if len(
node.args) > idx else None
kwargs['domain'] = self.prepare_arg(
node.args[idx + 1]) if len(node.args) > idx + 1 else None
kwargs['locale'] = self.prepare_arg(
node.args[idx + 2]) if len(node.args) > idx + 2 else None
# Keyword arguments
if node.keywords:
for keyword in node.keywords:
if keyword.arg == "id" and not kwargs['id']:
kwargs['id'] = self.prepare_arg(keyword.value)
if keyword.arg == "number" and not kwargs['number']:
kwargs['number'] = self.prepare_arg(keyword.value)
if keyword.arg == "domain" and not kwargs['domain']:
kwargs['domain'] = self.prepare_arg(keyword.value)
if keyword.arg == 'parameters':
kwargs['parameters'] = self.parse_kwargs(keyword.value)
if keyword.arg == 'locale':
kwargs['locale'] = self.parse_kwargs(keyword.value)
# Splats
if node.starargs or node.kwargs:
_id = "*" + node.starargs.id if node.starargs else "**" + \
node.kwargs.id
if not kwargs['number']:
kwargs['number'] = TransVar(_id, TransVar.VARNAME)
if not kwargs['id']:
kwargs['id'] = TransVar(_id, TransVar.VARNAME)
if not kwargs['domain']:
kwargs['domain'] = TransVar(_id, TransVar.VARNAME)
if not kwargs['locale']:
kwargs['locale'] = TransVar(_id, TransVar.VARNAME)
if not kwargs['parameters']:
kwargs['parameters'] = self.parse_kwargs(kwargs['parameters'])
fixed = ast.fix_missing_locations(node)
kwargs.update({
"is_transchoice": func_name in self.tranzchoice_functions,
"lineno": fixed.lineno,
"column": fixed.col_offset,
})
self.translations.append(Translation(**kwargs))
def parse_kwargs(self, Dict):
if not isinstance(Dict, ast.Dict):
return self.expr_to_source(Dict)
parameters = []
for k in Dict.keys:
if isinstance(k, ast.Str):
parameters.append(k.s)
else:
return self.expr_to_source(Dict)
return TransVar(parameters, TransVar.LITERAL)
def expr_to_source(self, expr):
try:
src = codegen.to_source(expr)
except Exception as e:
src = "-unknown-"
return TransVar(src, TransVar.UNKNOWN)
def prepare_arg(self, value):
if value is None:
return None
if isinstance(value, ast.Str):
return TransVar(value.s, TransVar.LITERAL)
if isinstance(value, ast.Num):
return TransVar(value.n, TransVar.LITERAL)
if isinstance(value, ast.Attribute):
return TransVar(
value.attr if isinstance(
value.attr,
str) else value.attr.id,
TransVar.VARNAME)
if isinstance(value, ast.Call):
return TransVar(self.get_func_name(value), TransVar.VARNAME)
return TransVar(None, TransVar.UNKNOWN)
def get_func_name(self, func):
if isinstance(func, ast.Attribute):
return self.get_attr_name(func.attr)
elif isinstance(func, ast.Name):
return func.id
else:
return None
# lambda or so
raise ValueError('Unexpected type of Call node')
def get_attr_name(self, attr):
return attr if isinstance(attr, str) else attr.id
| mit | 6,097,684,120,699,615,000 | 31.590426 | 103 | 0.567488 | false |
OpenMined/PySyft | packages/syft/tests/syft/lib/python/dict/dict_test.py | 1 | 51498 | # flake8: noqa
"""
Tests copied from cpython test suite:
https://github.com/python/cpython/blob/3.9/Lib/test/test_dict.py
"""
# stdlib
import collections
import collections.abc
import gc
import pickle
import random
import string
import sys
from test import support
import unittest
import weakref
# third party
import pytest
# syft absolute
from syft.lib.python.dict import Dict
from syft.lib.python.none import SyNone
from syft.lib.python.string import String
# import weakref
class DictTest(unittest.TestCase):
def test_invalid_keyword_arguments(self):
class Custom(dict):
pass
for invalid in {1: 2}, Custom({1: 2}):
with self.assertRaises(TypeError):
dict(**invalid)
with self.assertRaises(TypeError):
{}.update(**invalid)
def test_constructor(self):
# calling built-in types without argument must return empty
self.assertEqual(Dict(), {})
self.assertIsNot(Dict(), {})
@pytest.mark.slow
def test_literal_constructor(self):
# check literal constructor for different sized dicts
# (to exercise the BUILD_MAP oparg).
for n in (0, 1, 6, 256, 400):
items = [
("".join(random.sample(string.ascii_letters, 8)), i) for i in range(n)
]
random.shuffle(items)
formatted_items = (f"{k!r}: {v:d}" for k, v in items)
dictliteral = "{" + ", ".join(formatted_items) + "}"
self.assertEqual(eval(dictliteral), dict(items))
def test_merge_operator(self):
a = Dict({0: 0, 1: 1, 2: 1})
b = Dict({1: 1, 2: 2, 3: 3})
if sys.version_info >= (3, 9):
c = a.copy()
c |= b
self.assertEqual(a | b, Dict({0: 0, 1: 1, 2: 2, 3: 3}))
self.assertEqual(c, Dict({0: 0, 1: 1, 2: 2, 3: 3}))
c = b.copy()
c |= a
self.assertEqual(b | a, Dict({1: 1, 2: 1, 3: 3, 0: 0}))
self.assertEqual(c, Dict({1: 1, 2: 1, 3: 3, 0: 0}))
c = a.copy()
c |= [(1, 1), (2, 2), (3, 3)]
self.assertEqual(c, Dict({0: 0, 1: 1, 2: 2, 3: 3}))
self.assertIs(a.__or__(None), NotImplemented)
self.assertIs(a.__or__(()), NotImplemented)
self.assertIs(a.__or__("BAD"), NotImplemented)
self.assertIs(a.__or__(""), NotImplemented)
self.assertRaises(TypeError, a.__ior__, None)
self.assertEqual(a.__ior__(()), {0: 0, 1: 1, 2: 1})
self.assertRaises(ValueError, a.__ior__, "BAD")
self.assertEqual(a.__ior__(""), {0: 0, 1: 1, 2: 1})
def test_bool(self):
self.assertIs(not {}, True)
self.assertTrue(Dict({1: 2}))
self.assertIs(bool(Dict({})), False)
self.assertIs(bool(Dict({1: 2})), True)
def test_keys(self):
d = Dict()
self.assertEqual(set(d.keys()), set())
d = {"a": 1, "b": 2}
k = d.keys()
self.assertEqual(set(k), {"a", "b"})
self.assertIn("a", k)
self.assertIn("b", k)
self.assertIn("a", d)
self.assertIn("b", d)
self.assertRaises(TypeError, d.keys, None)
self.assertEqual(repr(dict(a=1).keys()), "dict_keys(['a'])")
def test_values(self):
d = Dict()
self.assertEqual(set(d.values()), set())
d = Dict({1: 2})
self.assertEqual(set(d.values()), {2})
self.assertRaises(TypeError, d.values, None)
self.assertEqual(repr(dict(a=1).values()), "dict_values([1])")
@pytest.mark.xfail
def test_items(self):
# TODO: support this when we have sets:
d = Dict()
self.assertEqual(set(d.items()), set())
d = Dict({1: 2})
self.assertEqual(set(d.items()), {(1, 2)})
self.assertRaises(TypeError, d.items, None)
self.assertEqual(repr(dict(a=1).items()), "dict_items([('a', 1)])")
def test_contains(self):
d = Dict()
self.assertNotIn("a", d)
self.assertFalse("a" in d)
self.assertTrue("a" not in d)
d = Dict({"a": 1, "b": 2})
self.assertIn("a", d)
self.assertIn("b", d)
self.assertNotIn("c", d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
d = Dict()
self.assertEqual(len(d), 0)
d = Dict({"a": 1, "b": 2})
self.assertEqual(len(d), 2)
def test_getitem(self):
d = Dict({"a": 1, "b": 2})
self.assertEqual(d["a"], 1)
self.assertEqual(d["b"], 2)
d["c"] = 3
d["a"] = 4
self.assertEqual(d["c"], 3)
self.assertEqual(d["a"], 4)
del d["b"]
self.assertEqual(d, {"a": 4, "c": 3})
self.assertRaises(TypeError, d.__getitem__)
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = Dict()
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class Exc(Exception):
pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_clear(self):
d = Dict({1: 1, 2: 2, 3: 3})
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
d = Dict()
d.update({1: 100})
d.update({2: 20})
d.update({1: 1, 2: 2, 3: 3})
self.assertEqual(d, {1: 1, 2: 2, 3: 3})
d.update()
self.assertEqual(d, {1: 1, 2: 2, 3: 3})
self.assertRaises((TypeError, AttributeError), d.update, None)
class SimpleUserDict:
def __init__(self):
self.d = {1: 1, 2: 2, 3: 3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1: 1, 2: 2, 3: 3})
class Exc(Exception):
pass
d.clear()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return "a"
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord("a")
def __iter__(self):
return self
def __next__(self):
if self.i <= ord("z"):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class badseq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, {}.update, badseq())
self.assertRaises(ValueError, {}.update, [(1, 2, 3)])
def test_fromkeys(self):
self.assertEqual(dict.fromkeys("abc"), {"a": None, "b": None, "c": None})
d = Dict()
self.assertIsNot(d.fromkeys("abc"), d)
self.assertEqual(d.fromkeys("abc"), {"a": None, "b": None, "c": None})
self.assertEqual(d.fromkeys((4, 5), 0), {4: 0, 5: 0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1: None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(dict):
pass
self.assertEqual(dictlike.fromkeys("a"), {"a": None})
self.assertEqual(dictlike().fromkeys("a"), {"a": None})
self.assertIsInstance(dictlike.fromkeys("a"), dictlike)
self.assertIsInstance(dictlike().fromkeys("a"), dictlike)
class mydict(dict):
def __new__(cls):
return Dict()
ud = mydict.fromkeys("ab")
self.assertEqual(ud, {"a": None, "b": None})
self.assertIsInstance(ud, Dict)
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception):
pass
class baddict1(dict):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, dict.fromkeys, BadSeq())
class baddict2(dict):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
# test fast path for dictionary inputs
d = dict(zip(range(6), range(6)))
self.assertEqual(dict.fromkeys(d, 0), dict(zip(range(6), [0] * 6)))
class baddict3(dict):
def __new__(cls):
return d
d = {i: i for i in range(10)}
res = d.copy()
res.update(a=None, b=None, c=None)
self.assertEqual(baddict3.fromkeys({"a", "b", "c"}), res)
def test_copy(self):
d = Dict({1: 1, 2: 2, 3: 3})
self.assertIsNot(d.copy(), d)
self.assertEqual(d.copy(), d)
self.assertEqual(d.copy(), {1: 1, 2: 2, 3: 3})
copy = d.copy()
d[4] = 4
self.assertNotEqual(copy, d)
self.assertEqual({}.copy(), {})
self.assertRaises(TypeError, d.copy, None)
@pytest.mark.slow
def test_copy_fuzz(self):
for dict_size in [10, 100, 1000]: # TODO: 10000, 100000
dict_size = random.randrange(dict_size // 2, dict_size + dict_size // 2)
with self.subTest(dict_size=dict_size):
d = Dict()
for i in range(dict_size):
d[i] = i
d2 = d.copy()
self.assertIsNot(d2, d)
self.assertEqual(d, d2)
d2["key"] = "value"
self.assertNotEqual(d, d2)
self.assertEqual(len(d2), len(d) + 1)
def test_copy_maintains_tracking(self):
class A:
pass
key = A()
for d in (Dict(), Dict({"a": 1}), Dict({key: "val"})):
d2 = d.copy()
self.assertEqual(gc.is_tracked(d), gc.is_tracked(d2))
def test_copy_noncompact(self):
# Dicts don't compact themselves on del/pop operations.
# Copy will use a slow merging strategy that produces
# a compacted copy when roughly 33% of dict is a non-used
# keys-space (to optimize memory footprint).
# In this test we want to hit the slow/compacting
# branch of dict.copy() and make sure it works OK.
d = Dict({k: k for k in range(1000)})
for k in range(950):
del d[k]
d2 = d.copy()
self.assertEqual(d2, d)
def test_get(self):
# We call dict_get because of the conflict with our "get" method
# from pointers
d = Dict()
self.assertIs(d.dict_get("c"), SyNone)
self.assertEqual(d.dict_get("c", 3), 3)
d = Dict({"a": 1, "b": 2})
self.assertIs(d.dict_get("c"), SyNone)
self.assertEqual(d.dict_get("c", 3), 3)
self.assertEqual(d.dict_get("a"), 1)
self.assertEqual(d.dict_get("a", 3), 1)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
# dict.setdefault()
d = Dict()
self.assertIs(d.setdefault("key0"), SyNone)
d.setdefault("key0", [])
self.assertIs(d.setdefault("key0"), SyNone)
d.setdefault("key", []).append(3)
self.assertEqual(d["key"][0], 3)
d.setdefault("key", []).append(4)
self.assertEqual(len(d["key"]), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception):
pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
def test_setdefault_atomic(self):
# Issue #13521: setdefault() calls __hash__ and __eq__ only once.
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
y = {hashed1: 5}
hashed2 = Hashed()
y.setdefault(hashed2, [])
self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
def test_setitem_atomic_at_resize(self):
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
# 5 items
y = Dict({hashed1: 5, 0: 0, 1: 1, 2: 2, 3: 3})
hashed2 = Hashed()
# 6th item forces a resize
y[hashed2] = []
# this is different for UserDict which is 3
# we are subclassing UserDict so if we match UserDict that should be correct
# self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed1.hash_count, 3)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
@pytest.mark.slow
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2 ** log2size
a = Dict()
b = Dict()
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, ka.__int__())
kb, vb = tb = b.popitem()
self.assertEqual(vb, kb.__int__())
self.assertFalse(copymode < 0 and ta != tb)
self.assertFalse(a)
self.assertFalse(b)
d = {}
self.assertRaises(KeyError, d.popitem)
def test_pop(self):
# Tests for pop with specified key
d = Dict()
k, v = "abc", "def"
d[k] = v
self.assertRaises(KeyError, d.pop, "ghi")
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
self.assertRaises(TypeError, d.pop)
class Exc(Exception):
pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutating_iteration(self):
# changing dict size during iteration
d = Dict()
d[1] = 1
with self.assertRaises(RuntimeError):
for i in d:
d[i + 1] = 1
def test_mutating_iteration_delete(self):
# change dict content during iteration
d = Dict()
d[0] = 0
# python 3.8+ raise RuntimeError but older versions do not
if sys.version_info >= (3, 8):
with self.assertRaises(RuntimeError):
for i in d:
del d[0]
d[0] = 0
def test_mutating_iteration_delete_over_values(self):
# change dict content during iteration
d = Dict()
d[0] = 0
# python 3.8+ raise RuntimeError but older versions do not
if sys.version_info >= (3, 8):
with self.assertRaises(RuntimeError):
for i in d.values():
del d[0]
d[0] = 0
@pytest.mark.xfail
def test_mutating_iteration_delete_over_items(self):
# TODO: proper iterators needed over the views, currently, we convert them to lists
# change dict content during iteration
d = Dict()
d[0] = 0
if sys.version_info >= (3, 8):
with self.assertRaises(RuntimeError):
for i in d.items():
del d[0]
d[0] = 0
@pytest.mark.xfail
def test_mutating_lookup(self):
# changing dict during a lookup (issue #14417)
# TODO: investigate this at some point
class NastyKey:
mutate_dict = None
def __init__(self, value):
self.value = value
def __hash__(self):
# hash collision!
return 1
def __eq__(self, other):
if NastyKey.mutate_dict:
mydict, key = NastyKey.mutate_dict
NastyKey.mutate_dict = None
del mydict[key]
return self.value == other.value
key1 = NastyKey(1)
key2 = NastyKey(2)
d = Dict({key1: 1})
NastyKey.mutate_dict = (d, key1)
d[key2] = 2
self.assertEqual(d, {key2: 2})
def test_repr(self):
d = Dict()
self.assertEqual(repr(d), "{}")
d[1] = 2
self.assertEqual(repr(d), "{1: 2}")
d = Dict()
d[1] = d
self.assertEqual(repr(d), "{1: {...}}")
class Exc(Exception):
pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = Dict({1: BadRepr()})
self.assertRaises(Exc, repr, d)
def test_repr_deep(self):
d = Dict()
for i in range(sys.getrecursionlimit() + 100):
d = Dict({1: d})
self.assertRaises(RecursionError, repr, d)
def test_eq(self):
self.assertEqual(Dict(), {})
self.assertEqual(Dict({1: 2}), {1: 2})
class Exc(Exception):
pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 1
d1 = Dict({BadCmp(): 1})
d2 = Dict({1: 1})
with self.assertRaises(Exc):
d1 == d2
@pytest.mark.xfail
def test_keys_contained(self):
self.helper_keys_contained(lambda x: x.keys())
self.helper_keys_contained(lambda x: x.items())
@pytest.mark.xfail
def helper_keys_contained(self, fn):
# TODO add this when we have set support
# Test rich comparisons against dict key views, which should behave the
# same as sets.
empty = fn(Dict())
empty2 = fn(Dict())
smaller = fn(Dict({1: 1, 2: 2}))
larger = fn(Dict({1: 1, 2: 2, 3: 3}))
larger2 = fn(Dict({1: 1, 2: 2, 3: 3}))
larger3 = fn(Dict({4: 1, 2: 2, 3: 3}))
self.assertTrue(smaller < larger)
self.assertTrue(smaller <= larger)
self.assertTrue(larger > smaller)
self.assertTrue(larger >= smaller)
self.assertFalse(smaller >= larger)
self.assertFalse(smaller > larger)
self.assertFalse(larger <= smaller)
self.assertFalse(larger < smaller)
self.assertFalse(smaller < larger3)
self.assertFalse(smaller <= larger3)
self.assertFalse(larger3 > smaller)
self.assertFalse(larger3 >= smaller)
# Inequality strictness
self.assertTrue(larger2 >= larger)
self.assertTrue(larger2 <= larger)
self.assertFalse(larger2 > larger)
self.assertFalse(larger2 < larger)
self.assertTrue(larger == larger2)
self.assertTrue(smaller != larger)
# There is an optimization on the zero-element case.
self.assertTrue(empty == empty2)
self.assertFalse(empty != empty2)
self.assertFalse(empty == smaller)
self.assertTrue(empty != smaller)
# With the same size, an elementwise compare happens
self.assertTrue(larger != larger3)
self.assertFalse(larger == larger3)
@pytest.mark.xfail
def test_errors_in_view_containment_check(self):
# TODO: add support for custom objects
class C:
def __eq__(self, other):
raise RuntimeError
d1 = Dict({1: C()})
d2 = Dict({1: C()})
with self.assertRaises(RuntimeError):
d1.items() == d2.items()
with self.assertRaises(RuntimeError):
d1.items() != d2.items()
with self.assertRaises(RuntimeError):
d1.items() <= d2.items()
with self.assertRaises(RuntimeError):
d1.items() >= d2.items()
d3 = Dict({1: C(), 2: C()})
with self.assertRaises(RuntimeError):
d2.items() < d3.items()
with self.assertRaises(RuntimeError):
d3.items() > d2.items()
@pytest.mark.xfail
def test_dictview_set_operations_on_keys(self):
# TODO add support for sets
k1 = Dict({1: 1, 2: 2}).keys()
k2 = Dict({1: 1, 2: 2, 3: 3}).keys()
k3 = Dict({4: 4}).keys()
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {1, 2})
self.assertEqual(k2 - k1, {3})
self.assertEqual(k3 - k1, {4})
self.assertEqual(k1 & k2, {1, 2})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {1, 2, 3})
self.assertEqual(k1 ^ k2, {3})
self.assertEqual(k1 ^ k3, {1, 2, 4})
@pytest.mark.xfail
def test_dictview_set_operations_on_items(self):
# TODO add support for sets
k1 = Dict({1: 1, 2: 2}).items()
k2 = Dict({1: 1, 2: 2, 3: 3}).items()
k3 = Dict({4: 4}).items()
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {(1, 1), (2, 2)})
self.assertEqual(k2 - k1, {(3, 3)})
self.assertEqual(k3 - k1, {(4, 4)})
self.assertEqual(k1 & k2, {(1, 1), (2, 2)})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {(1, 1), (2, 2), (3, 3)})
self.assertEqual(k1 ^ k2, {(3, 3)})
self.assertEqual(k1 ^ k3, {(1, 1), (2, 2), (4, 4)})
@pytest.mark.xfail
def test_dictview_mixed_set_operations(self):
# TODO add support for sets
# Just a few for .keys()
self.assertTrue(Dict({1: 1}).keys() == {1})
self.assertEqual(Dict({1: 1}).keys() | {2}, {1, 2})
# And a few for .items()
self.assertTrue(Dict({1: 1}).items() == {(1, 1)})
# This test has been changed to reflect the behavior of UserDict
self.assertTrue(Dict({(1, 1)}) == {1: 1})
# UserDict does not support init with set items like:
# UserDict({2}) so neither do we with Dict
with pytest.raises(TypeError):
self.assertEqual(Dict({2}) | Dict({1: 1}).keys(), {1, 2})
self.assertTrue(Dict({1}) == {1: 1}.keys())
self.assertEqual(Dict({2}) | Dict({1: 1}).items(), {(1, 1), 2})
self.assertEqual(Dict({1: 1}).items() | Dict({2}), {(1, 1), 2})
def test_missing(self):
# Make sure dict doesn't have a __missing__ method
self.assertFalse(hasattr(Dict, "__missing__"))
self.assertFalse(hasattr(Dict(), "__missing__"))
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at all
class D(Dict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assertNotIn(2, d)
self.assertNotIn(2, d.keys())
self.assertEqual(d[2], 42)
class E(dict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
with self.assertRaises(RuntimeError) as c:
e[42]
self.assertEqual(c.exception.args, (42,))
class F(dict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
with self.assertRaises(KeyError) as c:
f[42]
self.assertEqual(c.exception.args, (42,))
class G(dict):
pass
g = G()
with self.assertRaises(KeyError) as c:
g[42]
self.assertEqual(c.exception.args, (42,))
def test_tuple_keyerror(self):
# SF #1576657
d = Dict()
with self.assertRaises(KeyError) as c:
d[(1,)]
self.assertEqual(c.exception.args, ((1,),))
def test_bad_key(self):
# Dictionary lookups should fail if __eq__() raises an exception.
class CustomException(Exception):
pass
class BadDictKey:
def __hash__(self):
return hash(self.__class__)
def __eq__(self, other):
if isinstance(other, self.__class__):
raise CustomException
return other
d = Dict()
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
for stmt in [
"d[x2] = 2",
"z = d[x2]",
"x2 in d",
"d.get(x2)",
"d.setdefault(x2, 42)",
"d.pop(x2)",
"d.update({x2: 2})",
]:
with self.assertRaises(CustomException):
exec(stmt, locals())
def test_resize1(self):
# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = Dict()
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
def test_resize2(self):
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = Dict()
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
def test_empty_presized_dict_in_freelist(self):
# Bug #3537: if an empty but presized dict with a size larger
# than 7 was in the freelist, it triggered an assertion failure
with self.assertRaises(ZeroDivisionError):
d = Dict(
{
"a": 1 // 0,
"b": None,
"c": None,
"d": None,
"e": None,
"f": None,
"g": None,
"h": None,
}
)
d.clear()
@pytest.mark.xfail
@pytest.mark.slow
def test_container_iterator(self):
# TODO: make this pass
# Bug #3680: tp_traverse was not implemented for dictiter and
# dictview objects.
class C(object):
pass
views = (Dict.items, Dict.values, Dict.keys)
for v in views:
obj = C()
ref = weakref.ref(obj)
container = {obj: 1}
obj.v = v(container)
obj.x = iter(obj.v)
del obj, container
gc.collect()
self.assertIs(ref(), None, "Cycle was not collected")
def _not_tracked(self, t):
# Nested containers can take several collections to untrack
gc.collect()
gc.collect()
# UserDict is tracked unlike normal dict so we have to change
# this test for our Dict
# self.assertFalse(gc.is_tracked(t), t)
self.assertTrue(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
@pytest.mark.slow
@support.cpython_only
def test_track_literals(self):
# Test GC-optimization of dict literals
x, y, z, w = 1.5, "a", (1, None), []
self._not_tracked(Dict())
self._not_tracked(Dict({x: (), y: x, z: 1}))
self._not_tracked(Dict({1: "a", "b": 2}))
self._not_tracked(Dict({1: 2, (None, True, False, ()): int}))
self._not_tracked(Dict({1: object()}))
# Dicts with mutable elements are always tracked, even if those
# elements are not tracked right now.
self._tracked(Dict({1: []}))
self._tracked(Dict({1: ([],)}))
self._tracked(Dict({1: {}}))
self._tracked(Dict({1: set()}))
@pytest.mark.slow
@support.cpython_only
def test_track_dynamic(self):
# Test GC-optimization of dynamically-created dicts
class MyObject(object):
pass
x, y, z, w, o = 1.5, "a", (1, object()), [], MyObject()
d = Dict()
self._not_tracked(d)
d[1] = "a"
self._not_tracked(d)
d[y] = 2
self._not_tracked(d)
d[z] = 3
self._not_tracked(d)
self._not_tracked(d.copy())
d[4] = w
self._tracked(d)
self._tracked(d.copy())
d[4] = None
self._not_tracked(d)
self._not_tracked(d.copy())
# dd isn't tracked right now, but it may mutate and therefore d
# which contains it must be tracked.
d = Dict()
dd = Dict()
d[1] = dd
self._not_tracked(dd)
self._tracked(d)
dd[1] = d
self._tracked(dd)
d = Dict.fromkeys([x, y, z])
self._not_tracked(d)
dd = Dict()
dd.update(d)
self._not_tracked(dd)
d = Dict.fromkeys([x, y, z, o])
self._tracked(d)
dd = Dict()
dd.update(d)
self._tracked(dd)
d = Dict(x=x, y=y, z=z)
self._not_tracked(d)
d = Dict(x=x, y=y, z=z, w=w)
self._tracked(d)
d = Dict()
d.update(x=x, y=y, z=z)
self._not_tracked(d)
d.update(w=w)
self._tracked(d)
d = Dict([(x, y), (z, 1)])
self._not_tracked(d)
d = Dict([(x, y), (z, w)])
self._tracked(d)
d = Dict()
d.update([(x, y), (z, 1)])
self._not_tracked(d)
d.update([(x, y), (z, w)])
self._tracked(d)
@support.cpython_only
def test_track_subtypes(self):
# Dict subtypes are always tracked
class MyDict(Dict):
pass
self._tracked(MyDict())
def make_shared_key_dict(self, n):
class C:
pass
dicts = []
for i in range(n):
a = C()
a.x, a.y, a.z = 1, 2, 3
dicts.append(a.__dict__)
return dicts
@support.cpython_only
def test_splittable_setdefault(self):
"""split table must be combined when setdefault()
breaks insertion order"""
a, b = self.make_shared_key_dict(2)
a["a"] = 1
size_a = sys.getsizeof(a)
a["b"] = 2
b.setdefault("b", 2)
size_b = sys.getsizeof(b)
b["a"] = 1
self.assertGreater(size_b, size_a)
self.assertEqual(list(a), ["x", "y", "z", "a", "b"])
self.assertEqual(list(b), ["x", "y", "z", "b", "a"])
@support.cpython_only
def test_splittable_del(self):
"""split table must be combined when del d[k]"""
a, b = self.make_shared_key_dict(2)
orig_size = sys.getsizeof(a)
del a["y"] # split table is combined
with self.assertRaises(KeyError):
del a["y"]
self.assertGreater(sys.getsizeof(a), orig_size)
self.assertEqual(list(a), ["x", "z"])
self.assertEqual(list(b), ["x", "y", "z"])
# Two dicts have different insertion order.
a["y"] = 42
self.assertEqual(list(a), ["x", "z", "y"])
self.assertEqual(list(b), ["x", "y", "z"])
@support.cpython_only
def test_splittable_pop(self):
"""split table must be combined when d.pop(k)"""
a, b = self.make_shared_key_dict(2)
orig_size = sys.getsizeof(a)
a.pop("y") # split table is combined
with self.assertRaises(KeyError):
a.pop("y")
self.assertGreater(sys.getsizeof(a), orig_size)
self.assertEqual(list(a), ["x", "z"])
self.assertEqual(list(b), ["x", "y", "z"])
# Two dicts have different insertion order.
a["y"] = 42
self.assertEqual(list(a), ["x", "z", "y"])
self.assertEqual(list(b), ["x", "y", "z"])
@support.cpython_only
def test_splittable_pop_pending(self):
"""pop a pending key in a splitted table should not crash"""
a, b = self.make_shared_key_dict(2)
a["a"] = 4
with self.assertRaises(KeyError):
b.pop("a")
@support.cpython_only
def test_splittable_popitem(self):
"""split table must be combined when d.popitem()"""
a, b = self.make_shared_key_dict(2)
orig_size = sys.getsizeof(a)
item = a.popitem() # split table is combined
self.assertEqual(item, ("z", 3))
with self.assertRaises(KeyError):
del a["z"]
self.assertGreater(sys.getsizeof(a), orig_size)
self.assertEqual(list(a), ["x", "y"])
self.assertEqual(list(b), ["x", "y", "z"])
@support.cpython_only
def test_splittable_setattr_after_pop(self):
"""setattr() must not convert combined table into split table."""
# Issue 28147
# third party
import _testcapi
class C:
pass
a = C()
a.a = 1
self.assertTrue(_testcapi.dict_hassplittable(a.__dict__))
# dict.pop() convert it to combined table
a.__dict__.pop("a")
self.assertFalse(_testcapi.dict_hassplittable(a.__dict__))
# But C should not convert a.__dict__ to split table again.
a.a = 1
self.assertFalse(_testcapi.dict_hassplittable(a.__dict__))
# Same for popitem()
a = C()
a.a = 2
self.assertTrue(_testcapi.dict_hassplittable(a.__dict__))
a.__dict__.popitem()
self.assertFalse(_testcapi.dict_hassplittable(a.__dict__))
a.a = 3
self.assertFalse(_testcapi.dict_hassplittable(a.__dict__))
@pytest.mark.xfail
def test_iterator_pickling(self):
# set to xfail because we dont really care about pickling
# see test_valuesiterator_pickling
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
data = Dict({1: "a", 2: "b", 3: "c"})
it = iter(data)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), list(data))
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop]
self.assertEqual(list(it), list(data))
def test_itemiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# UserDict fails these tests so our Dict should fail as well
with pytest.raises(TypeError):
data = Dict({1: "a", 2: "b", 3: "c"})
# dictviews aren't picklable, only their iterators
itorg = iter(data.items())
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# note that the type of the unpickled iterator
# is not necessarily the same as the original. It is
# merely an object supporting the iterator protocol, yielding
# the same objects as the original one.
# self.assertEqual(type(itorg), type(it))
self.assertIsInstance(it, collections.abc.Iterator)
self.assertEqual(Dict(it), data)
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop[0]]
self.assertEqual(Dict(it), data)
def test_valuesiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# UserDict fails these tests so our Dict should fail as well
with pytest.raises(TypeError):
data = Dict({1: "a", 2: "b", 3: "c"})
# data.values() isn't picklable, only its iterator
it = iter(data.values())
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), list(data.values()))
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
values = list(it) + [drop]
self.assertEqual(sorted(values), sorted(list(data.values())))
def test_reverseiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# UserDict fails these tests so our Dict should fail as well
with pytest.raises(TypeError):
data = Dict({1: "a", 2: "b", 3: "c"})
it = reversed(data)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), list(reversed(data)))
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop]
self.assertEqual(list(it), list(reversed(data)))
def test_reverseitemiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# UserDict fails these tests so our Dict should fail as well
with pytest.raises(TypeError):
data = Dict({1: "a", 2: "b", 3: "c"})
# dictviews aren't picklable, only their iterators
itorg = reversed(data.items())
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# note that the type of the unpickled iterator
# is not necessarily the same as the original. It is
# merely an object supporting the iterator protocol, yielding
# the same objects as the original one.
# self.assertEqual(type(itorg), type(it))
self.assertIsInstance(it, collections.abc.Iterator)
self.assertEqual(Dict(it), data)
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop[0]]
self.assertEqual(Dict(it), data)
def test_reversevaluesiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# UserDict fails these tests so our Dict should fail as well
with pytest.raises(TypeError):
data = Dict({1: "a", 2: "b", 3: "c"})
# data.values() isn't picklable, only its iterator
it = reversed(data.values())
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), list(reversed(data.values())))
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
values = list(it) + [drop]
self.assertEqual(sorted(values), sorted(data.values()))
def test_instance_dict_getattr_str_subclass(self):
class Foo:
def __init__(self, msg):
self.msg = msg
f = Foo("123")
class _str(str):
pass
self.assertEqual(f.msg, getattr(f, _str("msg")))
self.assertEqual(f.msg, f.__dict__[_str("msg")])
def test_object_set_item_single_instance_non_str_key(self):
class Foo:
pass
f = Foo()
f.__dict__[1] = 1
f.a = "a"
self.assertEqual(f.__dict__, {1: 1, "a": "a"})
def check_reentrant_insertion(self, mutate):
# This object will trigger mutation of the dict when replaced
# by another value. Note this relies on refcounting: the test
# won't achieve its purpose on fully-GCed Python implementations.
class Mutating:
def __del__(self):
mutate(d)
d = Dict({k: Mutating() for k in "abcdefghijklmnopqr"})
for k in list(d):
d[k] = k
def test_reentrant_insertion(self):
# Reentrant insertion shouldn't crash (see issue #22653)
def mutate(d):
d["b"] = 5
self.check_reentrant_insertion(mutate)
def mutate(d):
d.update(self.__dict__)
d.clear()
self.check_reentrant_insertion(mutate)
def mutate(d):
while d:
d.popitem()
self.check_reentrant_insertion(mutate)
@pytest.mark.slow
def test_merge_and_mutate(self):
# this fails because it expects a RuntimeError when the keys change, however
# the test_dictitems_contains_use_after_free expects StopIteration when the
# keys change?
class X:
def __hash__(self):
return 0
def __eq__(self, o):
other.clear()
return False
l = [(i, 0) for i in range(1, 1337)]
other = Dict(l)
other[X()] = 0
d = Dict({X(): 0, 1: 1})
self.assertRaises(RuntimeError, d.update, other)
@pytest.mark.xfail
@pytest.mark.slow
def test_free_after_iterating(self):
# this seems like a bit of a puzzle
support.check_free_after_iterating(self, iter, Dict)
support.check_free_after_iterating(self, lambda d: iter(d.keys()), Dict)
support.check_free_after_iterating(self, lambda d: iter(d.values()), Dict)
support.check_free_after_iterating(self, lambda d: iter(d.items()), Dict)
def test_equal_operator_modifying_operand(self):
# test fix for seg fault reported in bpo-27945 part 3.
class X:
def __del__(self):
dict_b.clear()
def __eq__(self, other):
dict_a.clear()
return True
def __hash__(self):
return 13
dict_a = Dict({X(): 0})
dict_b = Dict({X(): X()})
self.assertTrue(dict_a == dict_b)
# test fix for seg fault reported in bpo-38588 part 1.
class Y:
def __eq__(self, other):
dict_d.clear()
return True
dict_c = Dict({0: Y()})
dict_d = Dict({0: set()})
self.assertTrue(dict_c == dict_d)
def test_fromkeys_operator_modifying_dict_operand(self):
# test fix for seg fault reported in issue 27945 part 4a.
class X(int):
def __hash__(self):
return 13
def __eq__(self, other):
if len(d) > 1:
d.clear()
return False
d = Dict() # this is required to exist so that d can be constructed!
d = Dict({X(1): 1, X(2): 2})
try:
dict.fromkeys(d) # shouldn't crash
except RuntimeError: # implementation defined
pass
def test_fromkeys_operator_modifying_set_operand(self):
# test fix for seg fault reported in issue 27945 part 4b.
class X(int):
def __hash__(self):
return 13
def __eq__(self, other):
if len(d) > 1:
d.clear()
return False
d = {} # this is required to exist so that d can be constructed!
d = {X(1), X(2)}
try:
Dict.fromkeys(d) # shouldn't crash
except RuntimeError: # implementation defined
pass
@pytest.mark.xfail
def test_dictitems_contains_use_after_free(self):
# this seems like a bit of a puzzle
# see iterator.py for more details
class X:
def __eq__(self, other):
d.clear()
return NotImplemented
d = Dict({0: set()}) # TODO: we should be able to support set
(0, X()) in d.items()
def test_dict_contain_use_after_free(self):
# bpo-40489
class S(str):
def __eq__(self, other):
d.clear()
return NotImplemented
def __hash__(self):
return hash("test")
d = Dict({S(): "value"})
self.assertFalse("test" in d)
def test_init_use_after_free(self):
class X:
def __hash__(self):
pair[:] = []
return 13
pair = [X(), 123]
Dict([pair])
@pytest.mark.xfail
def test_oob_indexing_dictiter_iternextitem(self):
class X(int):
def __del__(self):
d.clear()
d = Dict({i: X(i) for i in range(8)})
def iter_and_mutate():
for result in d.items():
if result[0] == 2:
d[2] = None # free d[2] --> X(2).__del__ was called
self.assertRaises(RuntimeError, iter_and_mutate)
def test_reversed(self):
d = Dict({"a": 1, "b": 2, "foo": 0, "c": 3, "d": 4})
del d["foo"]
# UserDict does not support reversed so we do not either
with pytest.raises(TypeError):
r = reversed(d)
self.assertEqual(list(r), list("dcba"))
self.assertRaises(StopIteration, next, r)
def test_reverse_iterator_for_empty_dict(self):
# bpo-38525: revered iterator should work properly
# empty dict is directly used for reference count test
# UserDict does not support reversed so we do not either
with pytest.raises(TypeError):
self.assertEqual(list(reversed(Dict())), [])
self.assertEqual(list(reversed(Dict().items())), [])
self.assertEqual(list(reversed(Dict().values())), [])
self.assertEqual(list(reversed(Dict().keys())), [])
# dict() and {} don't trigger the same code path
self.assertEqual(list(reversed(dict())), [])
self.assertEqual(list(reversed(dict().items())), [])
self.assertEqual(list(reversed(dict().values())), [])
self.assertEqual(list(reversed(dict().keys())), [])
@pytest.mark.xfail
def test_reverse_iterator_for_shared_shared_dicts(self):
# UserDict doesnt support reversed and this causes infinite recursion
# we will just disable this test
class A:
def __init__(self, x, y):
if x:
self.x = x
if y:
self.y = y
self.assertEqual(list(reversed(A(1, 2).__dict__)), ["y", "x"])
self.assertEqual(list(reversed(A(1, 0).__dict__)), ["x"])
self.assertEqual(list(reversed(A(0, 1).__dict__)), ["y"])
@pytest.mark.xfail
def test_dict_copy_order(self):
# bpo-34320
od = collections.OrderedDict([("a", 1), ("b", 2)])
od.move_to_end("a")
expected = list(od.items())
copy = Dict(od)
self.assertEqual(list(copy.items()), expected)
# dict subclass doesn't override __iter__
class CustomDict(Dict):
pass
pairs = [("a", 1), ("b", 2), ("c", 3)]
d = CustomDict(pairs)
self.assertEqual(pairs, list(Dict(d).items()))
# UserDict doesnt support reversed and this causes infinite recursion
# we will just disable this test
class CustomReversedDict(dict):
def keys(self):
return reversed(list(dict.keys(self)))
__iter__ = keys
def items(self):
return reversed(dict.items(self))
d = CustomReversedDict(pairs)
self.assertEqual(pairs[::-1], list(dict(d).items()))
@support.cpython_only
def test_dict_items_result_gc(self):
# bpo-42536: dict.items's tuple-reuse speed trick breaks the GC's
# assumptions about what can be untracked. Make sure we re-track result
# tuples whenever we reuse them.
it = iter(Dict({None: []}).items())
gc.collect()
# That GC collection probably untracked the recycled internal result
# tuple, which is initialized to (None, None). Make sure it's re-tracked
# when it's mutated and returned from __next__:
self.assertTrue(gc.is_tracked(next(it)))
@pytest.mark.xfail
@support.cpython_only
def test_dict_items_result_gc_reversed(self):
# UserDict doesnt support reversed and this causes infinite recursion
# Same as test_dict_items_result_gc above, but reversed.
it = reversed(Dict({None: []}).items())
gc.collect()
self.assertTrue(gc.is_tracked(next(it)))
| apache-2.0 | 1,982,131,372,011,168,800 | 30.847866 | 91 | 0.507185 | false |
point86/Videocompress | worker.py | 1 | 8523 | from PyQt5.QtCore import *
import time
import sys
import subprocess
import re
import shutil
import pathlib
VIDEO_EXTENSIONS = [".mp4",".avi",".mkv",".3gp", ".mov"] #most used video extensions
INFO = 0 #loglevel
ERROR = 1 #loglevel
#string format: Duration: 00:02:00.92, start: 0.000000, bitrate: 10156 kb/s
durationRegex = re.compile("[ ]+Duration: (\d{2}):(\d{2}):(\d{2}.\d{2})")
#string format: frame= 361 fps= 51 q=32.0 size= 1792kB time=00:00:12.04 bitrate=1219.0kbits/s speed=1.71x
progressRegex = re.compile("frame=[ 0-9]+fps=[ 0-9\.]+q=[ 0-9\.\-]+L*size=[ 0-9]+[bBkKgGmM ]+time=(\d{2}):(\d{2}):(\d{2}.\d{2})")
class Worker(QObject):
finished = pyqtSignal() #taskPerformer onThreadFinished() will be called
emitLog = pyqtSignal(int, str) #emit log to taskPerformer (displayLog(i))
emitProgress = pyqtSignal(int, int) #emit progress to taskPerformer
proc = None
continueWork = True
totSize = processedSize = 0 # tot files size
converted = copied = fails = 0
def __init__(self, inputPath, outputPath, ffmpeg_opt, parent=None):
super(Worker, self).__init__(parent)
self.inputPath = pathlib.Path(inputPath)
self.outputPath = pathlib.Path(outputPath)
self.ffmpeg_opt = ffmpeg_opt
@pyqtSlot()
def operationRunner(self):
#collecting and printing stats
time_start = time.time() #start time
t = time.localtime(time_start) #convert time_start in a tuple, for easily extracting hour, min, sec
self.totSize = self.getTotalSize(self.inputPath)
self.thick = 100/self.totSize
self.emitLog.emit(INFO, "Launched at %d:%02d:%02d\n" %(t.tm_hour, t.tm_min, t.tm_sec))
self.emitLog.emit(INFO, "Input path: %s\n" % str(self.inputPath))
self.emitLog.emit(INFO, "Total input size: %0.f MB\n" % round((self.totSize/(1024*1024.0)), 2))
self.emitLog.emit(INFO, "Output path: %s\n" % str(self.outputPath))
self.emitLog.emit(INFO, "ffmpeg options: %s\n" % str(self.ffmpeg_opt))
self.emitLog.emit(INFO, "-------------------------------------------------------------\n")
self.fileManager(self.inputPath, self.outputPath)
self.emitLog.emit(INFO, "-------------------------- Done --------------------------\n")
sec = time.time() - time_start
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
self.emitLog.emit(INFO, "Total time: %d:%02d:%02d sec - It's now safe to close this window.\n" %(h,m,s))
self.emitLog.emit(INFO, "Processed: %d - copied files: %d - errors: %d" % (self.converted, self.copied, self.fails))
self.finished.emit()
#convert file only if it's a video, otherwise copy it
#input_file: type(input_file) = type(output_file) = pathlib.Path
def convert_or_copy(self, input_file, output_dir):
if self.continueWork == False:
return
output_name = output_dir / input_file.name
try:
if input_file.suffix in VIDEO_EXTENSIONS:
self.emitLog.emit(INFO, "Converson: %s " % str(input_file))
self.processedSize += self.convert_file(input_file, output_name, self.updProgress)
self.converted +=1
else:
self.emitLog.emit(INFO, "Copy: %s " % str(input_file))
self.processedSize += self.copy(input_file, output_name, self.updProgress)
self.copied +=1
except Exception as e:
self.emitLog.emit(INFO, "- Failed")
self.emitLog.emit(ERROR, "%s" % str(e))
self.fails += 1
else:
self.emitLog.emit(INFO, "- OK\n")
#rate: percentage of current file progress
#fSize: current file size in bytes
def updProgress(self, rate, fSize):
#total progress = self.processedSize + current file processed bytes
self.emitProgress.emit(round((100/self.totSize)*(self.processedSize+(fSize/100*rate))), rate)
#scan all inputPath and perform operations
def fileManager(self, inputPath, outputPath):
if self.continueWork == False:
return
if inputPath == outputPath:
self.emitLog.emit(ERROR, "ERROR!: input path is the same as output path\n")
return
if inputPath.is_file() and (inputPath.parent == outputPath):
self.emitLog.emit(ERROR, "ERROR!: input and output files must be in different folders.\n")
if not outputPath.exists():
self.emitLog.emit(INFO, "Creating dir: %s\n" % str(outputPath))
outputPath.mkdir()
#input is a file, need only to convert (or copy) to new location
if inputPath.is_file():
self.convert_or_copy(inputPath, outputPath)
#input is a directory
else:
for item in inputPath.iterdir():
if item.is_dir():
destin_dir = outputPath / item.name #path concatenation
self.fileManager(item, destin_dir)
else:
self.convert_or_copy(item, outputPath)
#TODO: preserve input file permissions? (output file permission are different)
#conversion of a read-only file will generate a non-readonly file.
def convert_file(self, input_name, output_name, updProgress):
fileSize = input_name.stat().st_size
progress=0
lastLine = slastLine = ""
DQ="\"" #double quote
#ffmpeg: sane values are between 18 and 28
#https://trac.ffmpeg.org/wiki/Encode/H.264
#ffmpeg -i input.mp4 -c:v libx264 -crf 26 output.mp4
self.proc = subprocess.Popen("ffmpeg -y -loglevel info -i " + DQ + str(input_name) + DQ + " " + self.ffmpeg_opt + " " + DQ+str(output_name)+DQ,stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, universal_newlines=True)
while True:
#another way is to use ffmpeg -y -progress filename ....and parse filename, but there are the same info ffmpeg print to stderr.
sys.stderr.flush()
#read STDERR output (only for ffmpeg, because it have the option to send video output to stdout stream, so it uses stderr for logs.)
line=self.proc.stderr.readline()
if line:
slastLine = lastLine
lastLine = line
p = re.match(progressRegex, line)
if p is not None:
#reading current time interval
hh=float(p.group(1)) #hours
mm=float(p.group(2)) #mins
ss=float(p.group(3)) #secs (floating point, ex: 21.95)
progress=hh*3600+mm*60+ss
updProgress(round(100/duration*progress), fileSize)
else:
#reading total video length
p=re.match(durationRegex,line)
if p is not None:
hh=float(p.group(1)) #hours
mm=float(p.group(2)) #mins
ss=float(p.group(3)) #secs (floating point, ex: 21.95)
duration = hh*3600+mm*60+ss
if self.proc.poll() == 0:
break
elif self.proc.poll()==1:
raise Exception(" %s<br> %s" % (slastLine, lastLine))
break
self.proc=None
shutil.copymode(input_name, output_name, follow_symlinks=False)
return fileSize
#copy file inputPath to outputPath, calling callback every 250KB copied.
#(250=trigger value)
#https://hg.python.org/cpython/file/eb09f737120b/Lib/shutil.py#l215
def copy(self, inputPath, outputPath, updProgress):
length = 16*1024
trigger = 250*1024
fileSize = inputPath.stat().st_size
copied = count = 0
fsrc = open(inputPath, 'rb')
fdst = open(outputPath, 'wb')
while self.continueWork:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
copied += len(buf)
count += len(buf)
if count >= trigger:
count = 0
updProgress(round(100/fileSize*copied), fileSize)
shutil.copymode(inputPath, outputPath, follow_symlinks=False)
return fileSize
def getTotalSize(self, inputPath): #type (inputPath) = <class pathlib>
#inputPath is a file:
size = 0
if inputPath.is_file():
return inputPath.stat().st_size
#inputPath is a folder:
for item in inputPath.iterdir():
size += self.getTotalSize(item)
return size
| mit | -8,190,610,483,280,207,000 | 44.57754 | 234 | 0.587 | false |
overactor/OdeToJS | quiz/admin.py | 1 | 1080 | """
Admin settings for OdeToJS
Copyright (C) 2015 Rik de Graaff
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from django.contrib import admin
from .models import Expression
class ExpressionAdmin(admin.ModelAdmin):
fieldsets= [
(None, {'fields': ['expression', 'number']}),
]
list_display = ('number', 'expression')
admin.site.register(Expression, ExpressionAdmin) | gpl-2.0 | -886,005,479,568,842,800 | 35.033333 | 75 | 0.725926 | false |
binyuanchen/arena-dev-cdh-hadoop | deploy/restutil.py | 1 | 3554 | #!/usr/bin/env python
import json
import base64
import urllib
import urllib2
class RestUtil(object):
def __init__(self, username=None, password=None):
if not username:
raise RuntimeError("missing rest api username")
self.username = username
if not password:
raise RuntimeError("missing rest api password")
self.password = password
def _populate_default_headers(self, headers):
if not headers:
headers = {}
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
if 'Authorization' not in headers:
b64_auth = base64.b64encode('%s:%s' % (self.username, self.password))
headers['Authorization'] = 'Basic %s' % b64_auth
return headers
def put(self, url=None, headers=None, params=None, body=None):
headers = self._populate_default_headers(headers)
if params:
url += '?' + urllib.urlencode(params)
print 'REST[put] - url = %s' % url
print 'REST[put] - headers = %s' % headers
print 'REST[put] - body = %s' % body
response_body = None
try:
req = urllib2.Request(url, headers=headers)
resp = None
opener = urllib2.build_opener(urllib2.HTTPHandler)
req.get_method = lambda: 'PUT'
resp = opener.open(req, json.dumps(body))
raw = resp.read()
if raw:
response_body = json.loads(raw)
print 'REST[put] - response_body = %s' % response_body
except urllib2.HTTPError, e:
raise RuntimeError('rest call failed for url %s, status=%s, reason=%s' % (url, e.code, e.reason))
return response_body
def post(self, url=None, headers=None, params=None, body=None):
headers = self._populate_default_headers(headers)
if params:
url += '?' + urllib.urlencode(params)
print 'REST[post] - url = %s' % url
print 'REST[post] - headers = %s' % headers
print 'REST[post] - body = %s' % body
response_body = None
try:
req = urllib2.Request(url, headers=headers)
resp = None
if body:
resp = urllib2.urlopen(req, json.dumps(body))
else:
# handles also the cases when body = {}, None
resp = urllib2.urlopen(req, json.dumps({}))
raw = resp.read()
if raw:
response_body = json.loads(raw)
print 'REST[post] - response_body = %s' % response_body
except urllib2.HTTPError, e:
raise RuntimeError('rest call failed for url %s, status=%s, reason=%s' % (url, e.code, e.reason))
return response_body
def get(self, url=None, headers=None, params=None):
headers = self._populate_default_headers(headers)
if params:
url += '?' + urllib.urlencode(params)
print 'REST[get] - url = %s' % url
print 'REST[get] - headers = %s' % headers
response_body = None
try:
req = urllib2.Request(url, headers=headers)
resp = urllib2.urlopen(req)
raw = resp.read()
if raw:
response_body = json.loads(raw)
print 'REST[get] - response_body = %s' % response_body
except urllib2.HTTPError, e:
raise RuntimeError('REST[GET] failed, url=%s, status=%s, reason=%s' % (url, e.code, e.reason))
return response_body
| apache-2.0 | -4,008,567,834,921,852,000 | 36.808511 | 109 | 0.554305 | false |
spiceqa/tp-spice | spice/tests/rv_input.py | 1 | 7340 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
"""
Test keyboard inputs through spice. Send keys through qemu monitor to client.
Requires
--------
- Deployed PyGTK on guest VM.
Presumes the numlock state at startup is 'OFF'.
"""
import logging
from spice.lib import act
from spice.lib import stest
from spice.lib import utils
logger = logging.getLogger(__name__)
#
#def test_leds_migration(client_vm, guest_vm, guest_session, params):
# """
# Check LEDS after migration.
# Function sets LEDS (caps, num) to ON and send scancodes of "a" and
# "1 (num)" and expected to get keycodes of "A" and "1" after migration.
#
# Parameters
# ----------
# client_vm :
# Vm object.
# guest_vm :
# Vm object.
# guest_session :
# Ssh session to guest VM.
# params : virttest.utils_params.Params
# Dictionary with the test parameters.
# """
# # Turn numlock on RHEL6 on before the test begins:
# grep_ver_cmd = "grep -o 'release [[:digit:]]' /etc/redhat-release"
# rhel_ver = guest_session.cmd(grep_ver_cmd).strip()
# logging.info("RHEL version: #{0}#".format(rhel_ver))
# if rhel_ver == "release 6":
# client_vm.send_key('num_lock')
# #Run PyGTK form catching KeyEvents on guest
# run_test_form(guest_session, params)
# utils_spice.wait_timeout(3)
# # Tested keys before migration
# test_keys = ['a', 'kp_1', 'caps_lock', 'num_lock', 'a', 'kp_1']
# logging.info("Sending leds keys to client machine before migration")
# for key in test_keys:
# client_vm.send_key(key)
# utils_spice.wait_timeout(0.3)
# guest_vm.migrate()
# utils_spice.wait_timeout(8)
# #Tested keys after migration
# test_keys = ['a', 'kp_1', 'caps_lock', 'num_lock']
# logging.info("Sending leds keys to client machine after migration")
# for key in test_keys:
# client_vm.send_key(key)
# utils_spice.wait_timeout(0.3)
# utils_spice.wait_timeout(30)
#expected_keysyms = [97, 65457, 65509, 65407, 65, 65436, 65, 65436,
# 65509, 65407]
#
def test_seq(test, send_keys, expected_keysyms):
ssn = act.klogger_start(test.vmi_g)
for i in send_keys:
test.vm_c.send_key(i)
logged_keys = act.klogger_stop(test.vmi_g, ssn)
keysyms = map(lambda (_, keysym): keysym, logged_keys)
assert keysyms == expected_keysyms
ssn.close()
def run(vt_test, test_params, env):
"""Test for testing keyboard inputs through spice.
Parameters
----------
vt_test : avocado.core.plugins.vt.VirtTest
QEMU test object.
test_params : virttest.utils_params.Params
Dictionary with the test parameters.
env : virttest.utils_env.Env
Dictionary with test environment.
"""
test = stest.ClientGuestTest(vt_test, test_params, env)
cfg = test.cfg
#test.cmd_g.install_rpm(cfg.xev)
act.x_active(test.vmi_c)
act.x_active(test.vmi_g)
ssn = act.new_ssn(test.vmi_c, dogtail_ssn=test.vmi_c.vm.is_rhel8())
act.rv_connect(test.vmi_c, ssn)
act.rv_chk_con(test.vmi_c)
if cfg.ttype == 'type_and_func_keys':
"""Test typewriter and functional keys."""
keycodes = range(1, 69)
# Skip Ctrl, RSH, LSH, PtScr, Alt, CpsLk
skip = [29, 42, 54, 55, 56, 58]
send_keys = [hex(k) for k in keycodes if k not in skip]
expected_keysyms = [65307, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 45,
61, 65288, 65289, 113, 119, 101, 114, 116, 121,
117, 105, 111, 112, 91, 93, 65293, 97, 115, 100,
102, 103, 104, 106, 107, 108, 59, 39, 96, 92, 122,
120, 99, 118, 98, 110, 109, 44, 46, 47, 32, 65470,
65471, 65472, 65473, 65474, 65475, 65476, 65477,
65478, 65479]
test_seq(test, send_keys, expected_keysyms)
if cfg.ttype == 'leds_and_esc_keys':
escaped = ['insert', 'delete', 'home', 'end', 'pgup', 'pgdn', 'up',
'down', 'right', 'left']
expected_keysyms = [65379, 65535, 65360, 65367,
65365, 65366, 65362, 65364, 65363, 65361]
test_seq(test, escaped, expected_keysyms)
shortcuts = ['a', 'shift-a', 'shift_r-a', 'ctrl-a', 'ctrl-c', 'ctrl-v',
'alt-x']
expected_keysyms = [97, 65505, 65, 65506, 65, 65507, 97, 65507, 99,
65507, 118, 65513, 120]
test_seq(test, shortcuts, expected_keysyms)
leds = ['a', 'caps_lock', 'a', 'caps_lock', 'num_lock', 'kp_1',
'num_lock', 'kp_1']
expected_keysyms = [97, 65509, 65, 65509, 65407, 65457, 65407, 65436]
test_seq(test, leds, expected_keysyms)
if cfg.ttype == 'nonus_layout':
cmd = utils.Cmd("setxkbmap", "cz")
act.run(test.vmi_g, cmd)
keys = ['7', '8', '9', '0', 'alt_r-x', 'alt_r-c', 'alt_r-v']
expected_keysyms = [253, 225, 237, 233, 65027, 35, 65027, 38, 65027,
64]
test_seq(test, keys, expected_keysyms)
cmd = utils.Cmd("setxkbmap", "de")
act.run(test.vmi_g, cmd)
keys = ['minus', '0x1a', 'alt_r-q', 'alt_r-m']
expected_keysyms = [223, 252, 65027, 64, 65027, 181]
test_seq(test, keys, expected_keysyms)
cmd = utils.Cmd("setxkbmap", "us")
act.run(test.vmi_g, cmd)
if cfg.ttype == "leds_migration":
if test.vm_c.is_rhel6():
test.vm_c.send_key('num_lock')
keys1 = ['a', 'kp_1', 'caps_lock', 'num_lock', 'a', 'kp_1']
keys2 = ['a', 'kp_1', 'caps_lock', 'num_lock']
expected_keysyms = ['97', '65457', '65509', '65407', '65', '65436',
'65', '65436', '65509', '65407']
ssn = act.klogger_start(test.vmi_g)
for i in keys1:
test.vm_c.send_key(i)
test.vm_g.migrate()
for i in keys2:
test.vm_c.send_key(i)
logged_keys = act.klogger_stop(test.vmi_g, ssn)
ssn.close()
keysyms = [key[1] for key in logged_keys]
assert keysyms == expected_keysyms
""" Useful links
https://code.google.com/archive/p/key-mon/
http://www.shallowsky.com/software/crikey/pykey-0.1
https://www.berrange.com/tags/key-codes/
ftp://ftp.suse.com/pub/people/sbrabec/keyboards/
http://python-evdev.readthedocs.io/en/latest/index.html
http://python-xlib.sourceforge.net/doc/html/python-xlib_16.html#SEC15
https://en.wikipedia.org/wiki/Evdev
http://python-evdev.readthedocs.io/en/latest/apidoc.html#module-evdev.ecodes
https://www.vmware.com/support/ws4/doc/devices_linux_kb_ws.html
http://www.madore.org/~david/linux/linux-old.html
http://www.comptechdoc.org/os/linux/howlinuxworks/linux_hlkeycodes.html
https://wiki.ubuntu.com/Hotkeys/Architecture
http://www.tcl.tk/man/tcl8.4/TkCmd/keysyms.htm
"""
| gpl-2.0 | 8,741,865,384,683,159,000 | 36.258883 | 79 | 0.595232 | false |
astaninger/speakout | venv/lib/python3.6/site-packages/pymongo/cursor_manager.py | 1 | 2088 | # Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DEPRECATED - A manager to handle when cursors are killed after they are
closed.
New cursor managers should be defined as subclasses of CursorManager and can be
installed on a client by calling
:meth:`~pymongo.mongo_client.MongoClient.set_cursor_manager`.
.. versionchanged:: 3.3
Deprecated, for real this time.
.. versionchanged:: 3.0
Undeprecated. :meth:`~pymongo.cursor_manager.CursorManager.close` now
requires an `address` argument. The ``BatchCursorManager`` class is removed.
"""
import warnings
import weakref
from bson.py3compat import integer_types
class CursorManager(object):
"""DEPRECATED - The cursor manager base class."""
def __init__(self, client):
"""Instantiate the manager.
:Parameters:
- `client`: a MongoClient
"""
warnings.warn(
"Cursor managers are deprecated.",
DeprecationWarning,
stacklevel=2)
self.__client = weakref.ref(client)
def close(self, cursor_id, address):
"""Kill a cursor.
Raises TypeError if cursor_id is not an instance of (int, long).
:Parameters:
- `cursor_id`: cursor id to close
- `address`: the cursor's server's (host, port) pair
.. versionchanged:: 3.0
Now requires an `address` argument.
"""
if not isinstance(cursor_id, integer_types):
raise TypeError("cursor_id must be an integer")
self.__client().kill_cursors([cursor_id], address)
| mit | 6,227,628,158,408,121,000 | 31.123077 | 79 | 0.678161 | false |
ujiro99/auto_logger | logger/remote.py | 1 | 7680 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import shutil
import time
import pexpect
from . import watch, log
class RemoteLogger:
PROMPT = "[#$%>]"
TIMEOUT_EXPECT = 20
TIMEOUT_LOGGING = 30
TIMEOUT_MOVE = 30
def __init__(self, params):
"""
constructor
:param logger.params.LogParam params: execution parameter
"""
self.params = params # type: import logger.params
self.p = None # type: pexpect.spawn
def get_log(self, to_usb=False):
"""
Get remote log using shell command.
:param bool to_usb: If true, log file is copied to USB.
:return: Log file name. If failed, returns None.
:rtype: list of str
"""
self.__connect()
log.d("- prepare logging")
# check current files.
before = self.__get_file_set()
log.d(before)
# execute log command
# self.params.log_cmd = "touch 1." + self.params.log_extension # for develop
self.__send(self.params.log_cmd)
log.i("- execute %s" % self.params.log_cmd)
# wait log file created
timeout = RemoteLogger.TIMEOUT_LOGGING
while timeout > 0:
time.sleep(0.1)
timeout -= 0.1
after = self.__get_file_set()
created = after - before
if len(created) != 0:
break
if timeout <= 0:
log.w("- time out to logging.")
self.__disconnect()
return None # Failed to logging
f = created.pop()
log.i("- created: " + f)
ls = self.__move_file([f], to_usb)
self.__disconnect()
return ls
def move_log(self, file_name, to_usb=False):
"""
Move specified file from remote_log_dir to remote_dist_dir .
:param str file_name: File name to be moved.
:param bool to_usb: If true, log file is copied to USB.
:return: Log file name. If failed, returns None.
:rtype: list of str
"""
self.__connect()
ls, err = self.__get_file_list(file_name)
if (not err is None) or (len(ls) <= 0):
log.w("- not found: %s" % file_name)
ls = None
else:
ls = self.__move_file(ls, to_usb)
self.__disconnect()
return ls
def list_log(self):
"""
List remote log files in remote_log_dir.
:return: List of files
:rtype list of str
"""
self.__connect()
ls, err = self.__get_file_list()
if not err is None: log.w(err)
self.__disconnect()
return ls
def clear_log(self, buffer=False):
"""
:param bool buffer: Also remove buffer.
Remove all remote log files in remote_log_dir.
"""
self.__connect()
self.__send("rm *.%s" % self.params.log_extension)
time.sleep(0.1)
if buffer:
self.__send("%s" % self.params.log_clear_cmd)
self.__disconnect()
def __connect(self):
"""
Connect to remote shell.
"""
# launch shell
log.i("- launch %s to %s" % (self.params.shell, self.params.host_name))
self.p = p = pexpect.spawn("%s %s" % (self.params.shell, self.params.host_name))
# self.p = p = pexpect.spawn("bash") # for develop
log.d("- check is required to add known hosts.")
p.expect([r"yes", r"[#$%>]"])
log.d(p.before)
log.d(p.after)
if p.after == b'yes':
log.d("-- required.")
self.__send('yes')
p.timeout = RemoteLogger.TIMEOUT_EXPECT
self.__send("PS1='#'")
self.__send("cd %s" % self.params.remote_log_dir)
def __disconnect(self):
"""
Disconnect from remote shell.
"""
self.p.terminate()
self.p.expect(pexpect.EOF)
def __send(self, cmd):
"""
Send command to shell.
:param str cmd: Command string to be send.
:return: Output of cmd.
:rtype str
"""
if cmd is None:
log.w("Error: cmd is None")
return None
log.d(" > $ %s" % cmd)
self.p.sendline(cmd + ";echo")
self.p.expect("echo\r\n(.*)%s" % RemoteLogger.PROMPT)
ret = self.p.match.groups()[0].decode("utf-8") # type: str
ret = ret.strip("\r\n")
log.d(" > %s" % ret)
return ret
def __get_file_set(self):
"""
Get current directory's file set.
:return: File set.
:rtype set of str
"""
return set(self.__get_file_list()[0])
def __get_file_list(self, pattern=None):
"""
Get current directory's file list
:return: File list and error message(if error occurred).
:rtype (list of str, str)
"""
if pattern is None: pattern = '*.' + self.params.log_extension
self.p.sendline("ls %s -1 --color=no" % pattern)
self.p.expect("no(.*)" + RemoteLogger.PROMPT)
ls = self.p.match.groups()[0].decode("utf-8") # type: str
if ls.find("No such file or directory") > 0:
return [], "File not found."
ls = list(filter(lambda x: bool(re.match('\S+', x)), ls.splitlines()))
log.d(ls)
return ls, None
def __move_file(self, files, to_usb=False):
"""
Move files.
:param list of str files: target file names
:param bool to_usb: if true, move files to usb.
:return Moved file list. If failed, returns None.
:rtype list of str
"""
if to_usb:
return self.__move_file_to_usb(files)
else:
return self.__move_file_to_shared_dir(files)
def __move_file_to_usb(self, files):
"""
Move files to remote_dist_dir -> usb.
:param list of str files: target file names
:return Moved file list. If failed, returns None.
:rtype list of str
"""
# checks does the usb_dir exist.
dir_exists = self.__send("test -d %s && echo $?" % self.params.usb_dir) is "0"
if not dir_exists:
log.e("%s not exists" % self.params.usb_dir)
return None
# mv log file to usb
ls = []
log.i("- move file to %s" % self.params.remote_dist_dir)
for f in files:
self.__send("mkdir -p %s" % self.params.remote_dist_dir)
self.__send("mv %s %s" % (f, self.params.remote_dist_dir))
self.__send("sync")
ls.append(os.path.join(self.params.remote_dist_dir, f))
return ls
def __move_file_to_shared_dir(self, files):
"""
Move files to remote_dist_dir -> local_dist_dir.
:param list of str files: target file names
:return Moved file list.
:rtype list of str
"""
ret = []
for f in files:
# mv log file - remote to local
log.i("- move file to %s" % self.params.remote_dist_dir)
self.__send("mv %s %s" % (f, self.params.remote_dist_dir))
is_created = watch.file(self.params.local_src_dir, f, RemoteLogger.TIMEOUT_MOVE)
if not is_created:
log.w("- move failed: %s" % f)
continue
# mv log file - local to local
sp = os.path.join(self.params.local_src_dir, f)
dp = os.path.join(self.params.local_dist_dir, f)
os.makedirs(self.params.local_dist_dir, exist_ok=True)
shutil.move(sp, self.params.local_dist_dir)
ret.append(dp)
log.i("- moved: %s" % dp)
return ret
| mit | 5,026,616,677,315,661,000 | 29.59761 | 92 | 0.521094 | false |
SportySpice/Collections | src/videosource/youtube/Channel.py | 1 | 13138 | from src.videosource.VideoSource import VideoSource, SourceType
import Playlist
import service
from Thumb import Thumb
import settings as s
from src.paths.root import CHANNEL_CACHE_DIR
from src.file import File
from datetime import datetime
from Pages import Pages, ItemType, TimeUnit
from src.tools import DataDictionary
from src import router
USERNAME_DIC_FILE = '__usernames.dic'
CACHE_FILE_EXTENSION = '.cha'
MY_CHANNEL_FILE = '_mine.cha'
channelsLoaded = {}
mineLoaded = None
usernameDic = DataDictionary.load(USERNAME_DIC_FILE, CHANNEL_CACHE_DIR)
class Channel(VideoSource):
def __init__(self, channelId=None, username=None, mine=False):
self.username = username
self.channelId = channelId
self.mine = mine
self._uploadPlaylist = None
self.playlists = None
self._gotInfo = False
self.viewCount = None
self.commentCount = None
self.subscriberCount = None
self.videoCount = None
#must be called at least once before making any use of the object
def updateInfo(self, snippet, channelId, contentDetails=None, statistics=None, videoCount=None, subNewItemCount=None, updatedInfoAt=datetime.now()):
title = snippet['title']
studioTitle = title
tvShowTitle = '%s Uploads' %title
description = snippet['description']
youtubeThumb = Thumb(snippet['thumbnails'])
thumb = youtubeThumb.get(s.sourceThumbres)
#self.youtubeThumb = youtubeThumb
sourceType = SourceType.CHANNEL
sourceId = channelId
super(Channel, self).__init__(title, studioTitle, tvShowTitle, description, thumb, sourceType, sourceId)
self.channelId = channelId
cacheFileName = self.channelId + CACHE_FILE_EXTENSION
self.cacheFile = File.fromNameAndDir(cacheFileName, CHANNEL_CACHE_DIR)
if self.username:
usernameDic.setIfNonExistent(self.username, channelId)
if contentDetails:
relatedPlaylists = contentDetails['relatedPlaylists']
uploadsPID = relatedPlaylists['uploads']
if (self._uploadPlaylist is None) or (self._uploadPlaylist and self._uploadPlaylist.playlistId != uploadsPID):
self._uploadPlaylist = Playlist.Playlist(uploadsPID, None, channelSource=self)
self.videos = self._uploadPlaylist.videos
#self._uploadPlaylist.fetchInfo()
if self.mine:
likesPID = relatedPlaylists['likes']
favoritesPID = relatedPlaylists['favorites']
watchHistoryPID = relatedPlaylists['watchHistory']
watchLaterPID = relatedPlaylists['watchLater']
self.likesPL = Playlist.Playlist(likesPID, None, channelSource=self)
self.favoritesPL = Playlist.Playlist(favoritesPID, None, channelSource=self)
self.watchHistoryPL= Playlist.Playlist(watchHistoryPID,None, channelSource=self)
self.watchLaterPL = Playlist.Playlist(watchLaterPID, None, channelSource=self)
if statistics:
self.viewCount = int(statistics['viewCount'])
self.commentCount = int(statistics['commentCount'])
self.subscriberCount = int(statistics['subscriberCount'])
if statistics['videoCount'] != 0:
self.videoCount = int(statistics['videoCount'])
if videoCount and videoCount!=0:
self.videoCount = videoCount
if subNewItemCount:
self.subNewItemCount = subNewItemCount #this is for subscription channels
if self.playlists is None:
self.playlists = Pages(self._playlistsRequest, self. _playlistsResponseProcesse, ItemType.VSOURCE, 'channel playlists', s.channelPlaylistsCacheTime, TimeUnit.DAYS, self)
if self.mine:
global mineLoaded
mineLoaded = self
else:
global channelsLoaded
if self.channelId in channelsLoaded:
if channelsLoaded[self.channelId] != self:
raise ValueError('Channel is in memory but with a different instance. This should never happen.')
else:
channelsLoaded[self.channelId] = self
self._gotInfo = True
self._updatedInfoAt = updatedInfoAt
self.cache()
###################
## Public Methods##
###################
#override
def fetchInfo(self):
request = self._channelInfoRequest()
response = request.execute()
channelId, snippet, contentDetails, statistics = self._processChannelInfoResposne(response)
self.updateInfo(snippet, channelId, contentDetails, statistics)
#override
def fetchInfoBatchRequest(self):
request = self._channelInfoRequest()
def callback(request_id, response, exception):
channelId, snippet, contentDetails, statistics = self._processBatchChannelInfoResponse(request_id, response, exception)
self.updateInfo(snippet, channelId, contentDetails, statistics)
return (request, callback)
#override
def needsInfoUpdate(self, checkUploadPlaylist=False):
if checkUploadPlaylist and self.needPlaylistInfo():
return True
if not self._gotInfo:
return True
timePassed = datetime.now() - self._updatedInfoAt
if timePassed.seconds > s.sourceInfoCacheTime()*86400:
return True
return False
def needPlaylistInfo(self):
if not self._uploadPlaylist:
return True
return False
def cache(self, fromPages=False):
if fromPages:
self.cacheFile.dumpObject(self)
return
self.playlists.enterCachedModeAndCacheSourceObject()
####################
## Private Methods##
####################
def _channelInfoRequest(self):
requestInfo = dict(part = "contentDetails,snippet,statistics")
if self.mine: requestInfo['mine'] = True
elif self.channelId: requestInfo['id'] = self.channelId
else: requestInfo['forUsername'] = self.username
request = service.service().channels().list(**requestInfo)
return request
def _processChannelInfoResposne(self, response):
items = response.get("items", [])
if len(items) != 1:
raise ValueError('Channel list request by username, id, or mine should return exactly 1 result. Returned: %s \nusername: %s. channelId:%s' % (len(items), self.username, self.channelId))
item = items[0]
return Channel._processChannelInfoItem(item)
@staticmethod
def _processChannelInfoItem(item):
channelId = item['id']
snippet = item['snippet']
contentDetails = item['contentDetails']
statistics = item['statistics']
return channelId, snippet, contentDetails, statistics
def _processBatchChannelInfoResponse(self, request_id, response, exception):
if exception:
raise ValueError('Exception thrown in channel info request from batch. \nRequest ID: {0}. Channel username: {1}. Channel ID: {2} \nException: {3}'.format(request_id, self.username, self.channelId, exception))
channelId, snippet, contentDetails, statistics = self._processChannelInfoResposne(response)
return channelId, snippet, contentDetails, statistics
def _playlistsRequest(self, pageToken):
return service.service().playlists().list(channelId=self.channelId, part='snippet,contentDetails', maxResults=50, pageToken=pageToken)
def _playlistsResponseProcesse(self, response):
playlists = []
for item in response['items']:
playlist = Playlist.fromPlaylistsRequest(item)
playlists.append(playlist)
return playlists
def browseUrl(self, pageNum=1):
return router.browseYoutubeChannelUrl(self.cacheFile, pageNum)
#
# def fromBatchInfoRequest(username=None, channelId=None, sourceCallback):
# request = Channel._channelInfoRequest(username, channelId)
#
# def callback(request_id, response, exception):
# channelId, snippet, contentDetails = Channel._processBatchChannelInfoResponse(request_id, response, exception, username, channelId)
# channel = Channel(snippet, channelId, username, contentDetails)
#
# sourceCallback(channel)
#
# return (request, callback)
def fromCacheFile(cacheFile, mine=False):
if mine:
global mineLoaded
if mineLoaded:
raise ValueError("Tried loading my channel from cache when it's already in memory")
channel = cacheFile.loadObject()
channel.playlists.loadFromCachedMode()
mineLoaded = channel
return channel
global channelsLoaded
channel = cacheFile.loadObject()
channel.playlists.loadFromCachedMode()
if channel.channelId in channelsLoaded:
raise ValueError("Tried loading channel from cache when it's already in memory")
channelsLoaded[channel.channelId] = channel
return channel
def _fromMemoryOrCache(channelId=None, username=None, mine=None):
if username is None and channelId is None and mine is None:
raise ValueError('Channel loader must get either username, channelId or mine. Got neither.')
if mine:
if mineLoaded:
return mineLoaded
cacheFile = File.fromNameAndDir(MY_CHANNEL_FILE, CHANNEL_CACHE_DIR)
if cacheFile.exists():
channel = fromCacheFile(cacheFile, mine=True)
return channel
return None
if username and not channelId:
if not usernameDic.has(username):
return None
channelId = usernameDic.get(username)
if channelId in channelsLoaded:
return channelsLoaded[channelId]
cacheFileName = channelId + CACHE_FILE_EXTENSION
cacheFile = File.fromNameAndDir(cacheFileName, CHANNEL_CACHE_DIR)
if cacheFile.exists():
channel = fromCacheFile(cacheFile)
return channel
return None
def fromUserOrId(channelId=None, username=None):
channel = _fromMemoryOrCache(channelId, username)
if not channel:
channel = Channel(channelId, username)
needsInfoUpdate = True if channel.needsInfoUpdate() else False
return channel, needsInfoUpdate
def mine():
channel = _fromMemoryOrCache(mine=True)
if not channel:
channel = Channel(mine=True)
needsInfoUpdate = True if channel.needsInfoUpdate() else False
return channel, needsInfoUpdate
def _fromSnippet(channelId, snippet, contentDetails=None, statistics=None, videoCount=None, subNewItemCount=None):
channel = _fromMemoryOrCache(channelId)
if not channel:
channel = Channel(channelId=channelId)
channel.updateInfo(snippet, channelId, contentDetails, statistics, videoCount, subNewItemCount)
return channel
def fromChannelsRequest(item):
channelId, snippet, contentDetails, statistics = Channel._processChannelInfoItem(item)
return _fromSnippet(channelId, snippet, contentDetails, statistics)
def fromSearchRequest(item):
channelId = item['id']['channelId']
snippet = item['snippet']
#username = snippet['channelTitle'] #maybe use later, make sure is correct (not positive)
return _fromSnippet(channelId, snippet) #incomplete info, need to call fetchlInfo if channel not found in cache
def fromSubscriptionsRequest(item):
channelId = item['snippet']['resourceId']['channelId']
snippet = item['snippet']
videoCount = int(item['contentDetails']['totalItemCount'])
newItemCount = item['contentDetails']['newItemCount']
if videoCount == 0:
videoCount = None
return _fromSnippet(channelId, snippet, videoCount=videoCount, subNewItemCount=newItemCount) #incomplete info, need to call fetchInfo if channel not found in cache
| gpl-2.0 | 3,947,631,276,630,320,600 | 30.355609 | 220 | 0.610062 | false |
fahhem/openhtf | openhtf/util/__init__.py | 1 | 6047 | # Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""One-off utilities."""
import logging
import re
import threading
import time
import weakref
from datetime import datetime
from pkg_resources import get_distribution, DistributionNotFound
import mutablerecords
from openhtf.util import threads
def _log_every_n_to_logger(n, logger, level, message, *args): # pylint: disable=invalid-name
"""Logs the given message every n calls to a logger.
Args:
n: Number of calls before logging.
logger: The logger to which to log.
level: The logging level (e.g. logging.INFO).
message: A message to log
*args: Any format args for the message.
Returns:
A method that logs and returns True every n calls.
"""
logger = logger or logging.getLogger()
def _gen(): # pylint: disable=missing-docstring
while True:
for _ in xrange(n):
yield False
logger.log(level, message, *args)
yield True
gen = _gen()
return lambda: next(gen)
def log_every_n(n, level, message, *args): # pylint: disable=invalid-name
"""Logs a message every n calls. See _log_every_n_to_logger."""
return _log_every_n_to_logger(n, None, level, message, *args)
def time_millis(): # pylint: disable=invalid-name
"""The time in milliseconds."""
return int(time.time() * 1000)
def get_version():
"""Return the version string of the 'openhtf' package.
Note: the version number doesn't seem to get properly set when using ipython.
"""
version = 'Unknown'
try:
version = get_distribution('openhtf')
except DistributionNotFound:
version = 'Unknown - Perhaps openhtf was not installed via setup.py or pip.'
return version
class NonLocalResult(mutablerecords.Record('NonLocal', [], {'result': None})):
"""Holds a single result as a nonlocal variable.
Comparable to using Python 3's nonlocal keyword, it allows an inner function
to set the value in an outer function's namespace:
def WrappingFunction():
x = NonLocalResult()
def InnerFunction():
# This is what we'd do in Python 3:
# nonlocal x
# x = 1
# In Python 2 we use NonLocalResult instead.
x.result = 1
InnerFunction()
return x.result
"""
# TODO(jethier): Add a pylint plugin to avoid the no-self-argument for this.
class classproperty(object):
"""Exactly what it sounds like.
Note that classproperties don't have setters, so setting them will replace
the classproperty with the new value. In most use cases (forcing subclasses
to override the classproperty, for example) this is desired.
"""
def __init__(self, func):
self._func = func
def __get__(self, instance, owner):
return self._func(owner)
def partial_format(target, **kwargs):
"""Formats a string without requiring all values to be present.
This function allows substitutions to be gradually made in several steps
rather than all at once. Similar to string.Template.safe_substitute.
"""
output = target[:]
for tag, var in re.findall(r'(\{(.*?)\})', output):
root = var.split('.')[0] # dot notation
root = root.split('[')[0] # dict notation
if root in kwargs:
output = output.replace(tag, tag.format(**{root: kwargs[root]}))
return output
def format_string(target, kwargs):
"""Formats a string in any of three ways (or not at all).
Args:
target: The target string to format. This can be a function that takes a
dict as its only argument, a string with {}- or %-based formatting, or
a basic string with none of those. In the latter case, the string is
returned as-is, but in all other cases the string is formatted (or the
callback called) with the given kwargs.
If this is None (or otherwise falsey), it is returned immediately.
kwargs: The arguments to use for formatting.
Passed to safe_format, %, or target if it's
callable.
"""
if not target:
return target
if callable(target):
return target(**kwargs)
if '{' in target:
return partial_format(target, **kwargs)
if '%' in target:
return target % kwargs
return target
class SubscribableStateMixin(object):
"""Gives an object the capability of notifying watchers of state changes.
The state should be represented as a dictionary and returned by _asdict.
An object that wants to watch this object's state should call
asdict_with_event to get the current state and an event object. This object
can then notify watchers holding those events that the state has changed by
calling notify_update.
"""
def __init__(self):
super(SubscribableStateMixin, self).__init__()
self._lock = threading.Lock() # Used by threads.synchronized.
self._update_events = weakref.WeakSet()
def _asdict(self):
raise NotImplementedError(
'Subclasses of SubscribableStateMixin must implement _asdict.')
@threads.synchronized
def asdict_with_event(self):
"""Get a dict representation of this object and an update event.
Returns:
state: Dict representation of this object.
update_event: An event that is guaranteed to be set if an update has been
triggered since the returned dict was generated.
"""
event = threading.Event()
self._update_events.add(event)
return self._asdict(), event
@threads.synchronized
def notify_update(self):
"""Notify any update events that there was an update."""
for event in self._update_events:
event.set()
self._update_events.clear()
| apache-2.0 | -1,061,353,911,702,329,900 | 30.494792 | 93 | 0.696378 | false |
sjpet/epysteme | epysteme/pandas.py | 1 | 1167 | # -*- coding: utf-8 -*-
"""Functions that extend and leverage pandas data frames.
Author: Stefan Peterson
"""
import re
import pandas as pd
col_name_regex = re.compile(r"(.*?)([-_\.]?)([0-9]*)$")
def is_successor(this, that):
try:
this_index = int(this[2])
that_index = int(that[2])
except ValueError:
return False
return this[0] == that[0] and \
this[1] == that[1] and \
this_index == that_index + 1
def csv_to_pd(file_name):
df = pd.read_csv(file_name)
# Group columns
col_map = map(lambda s: re.match(col_name_regex, s).groups(1), df.columns)
columns_ = [[col_map.__next__()]]
for column in col_map:
if is_successor(column, columns_[-1][-1]):
columns_[-1].append(column)
else:
columns_.append([column])
new_columns = []
for group in columns_:
if len(group) == 1:
new_columns.append(("".join(group[0]), ""))
else:
new_columns.extend([(column[0], int(column[2]))
for column in group])
df.columns = pd.MultiIndex.from_tuples(new_columns)
return df
| mit | -3,200,114,144,116,869,000 | 23.3125 | 78 | 0.542416 | false |
rwl/openpowersystem | dynamics/dynamics/turbine_governors/gov_hydro3.py | 1 | 1390 | #------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
# <<< imports
# @generated
from dynamics.dynamics.turbine_governors.turbine_governor import TurbineGovernor
from google.appengine.ext import db
# >>> imports
class GovHydro3(TurbineGovernor):
# <<< gov_hydro3.attributes
# @generated
# >>> gov_hydro3.attributes
# <<< gov_hydro3.references
# @generated
# >>> gov_hydro3.references
# <<< gov_hydro3.operations
# @generated
# >>> gov_hydro3.operations
# EOF -------------------------------------------------------------------------
| agpl-3.0 | -4,395,564,334,420,245,000 | 32.902439 | 80 | 0.607194 | false |
habitam/habitam-core | habitam/entities/billable.py | 1 | 9250 | # -*- coding: utf-8 -*-
'''
This file is part of Habitam.
Habitam is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Habitam is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with Habitam. If not, see
<http://www.gnu.org/licenses/>.
Created on Jul 15, 2013
@author: Stefan Guna
'''
from datetime import datetime
from decimal import Decimal
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext as _
from habitam.entities.accessor import apartment_by_pk, apartment_consumption, \
service_consumption
from habitam.entities.base import SingleAccountEntity
from habitam.financial.models import Quota
import logging
logger = logging.getLogger(__name__)
class Billable(SingleAccountEntity):
QUOTA_TYPES = (
('equally', _(u'în mod egal')),
('inhabitance', _(u'după număr persoane')),
('area', _(u'după suprafață')),
('rooms', _(u'după număr camere')),
('consumption', _(u'după consum')),
('manual', _(u'cotă indiviză')),
('noquota', _('la fiecare introducere')),
)
archived = models.BooleanField(default=False)
archive_date = models.DateTimeField(null=True, blank=True)
billed = models.ForeignKey('ApartmentGroup')
quota_type = models.CharField(max_length=15, choices=QUOTA_TYPES)
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
money_type = kwargs['money_type'] if 'money_type' in kwargs else 'money_type'
account_type = kwargs['account_type'] if 'account_type' in kwargs else 'std'
if 'account_type' in kwargs: del kwargs['account_type']
if 'money_type' in kwargs: del kwargs['money_type']
super(Billable, self).__init__(account_type, money_type, *args, **kwargs)
try:
self._old_billed = self.billed
except:
self._old_billed = None
try:
self._old_quota_type = self.quota_type
except:
self._old_quota_type = None
self._old_archived = self.archived
def __change_quotas(self):
if self.quota_type in ['noquota', 'consumption']:
return False
return self.quota_type == 'manual' or self._old_billed != self.billed \
or self._old_quota_type != self.quota_type
def __change_billed(self):
return self._old_billed != self.billed and self._old_billed != None
def __new_charge_with_quotas(self, amount, description, date, invoice):
accounts = []
for ap in self.billed.apartments():
accounts.append(ap.account)
mine = self.billed.default_account
account = mine if mine != None else self.building().default_account
self.account.new_charge(amount, date, description, account, accounts,
self.charge_type(), invoice)
def __new_charge_with_consumptions(self, amount, description, ap_consumptions,
consumption, date, invoice):
ops = []
db_ap_consumptions = []
declared = sum(ap_consumptions.values())
if declared > consumption:
raise NameError(_('Ceva e ciudat cu consumul declarat! E mai mare decat cel de pe document! Declarat de locatari=') + str(declared))
per_unit = amount / consumption
logger.info('Declared consumption is %f, price per unit is %f' %
(declared, per_unit))
for k, v in ap_consumptions.items():
ap = apartment_by_pk(k)
total_ap = Decimal(v) / declared * amount
loss = total_ap - v * per_unit
logger.info('Consumption for %s is %f, total to pay %f, lost %f' %
(ap, v, total_ap, loss))
ops.append((ap.account, total_ap, loss))
db_ap_consumptions.append(apartment_consumption(v, ap))
doc = self.account.new_multi_transfer(description, self.billed.default_account,
ops, date, self.charge_type(),
invoice=invoice)
svc_consumption = service_consumption(consumption, self, doc)
svc_consumption.save()
for ap_consumption in db_ap_consumptions:
ap_consumption.doc = doc
ap_consumption.save()
def __new_charge_without_quotas(self, ap_sums, description, date, invoice):
ops = []
for k, v in ap_sums.items():
ap = apartment_by_pk(k)
ops.append((ap.account, v))
self.account.new_multi_transfer(description, self.billed.default_account, ops,
date, self.charge_type(),
invoice=invoice)
def __unicode__(self):
return self.name
def __update_archived(self):
if self.archived == False:
self.archive_date = None
return
if self.archived == self._old_archived:
return
self.archive_date = datetime.now()
def __update_quotas(self, ap_quotas):
if ap_quotas != None and self.quota_type == 'manual':
self.set_manual_quota(ap_quotas)
return
self.set_quota()
def building(self):
return self.billed.building()
def can_delete(self):
return self.account.can_delete()
def initial_operation(self):
return {'amount': 0}
def new_inbound_operation(self, amount, description, invoice=None, ap_sums=None,
ap_consumptions=None, consumption=None,
date=timezone.now()):
logger.info(u'new inbound op for %s amount=%f description=%s ap_sums=%s ap_consumptions=%s consumption=%s date=%s' %
(self, amount, description, ap_sums, ap_consumptions, consumption, date))
if ap_consumptions != None:
self.__new_charge_with_consumptions(amount, description, ap_consumptions,
consumption, date, invoice)
elif ap_sums != None:
self.__new_charge_without_quotas(ap_sums, description, date, invoice)
else:
self.__new_charge_with_quotas(amount, description, date, invoice)
def delete(self):
if not self.can_delete():
raise ValueError(_(u'Acest serviciu nu se poate șterge'))
Quota.del_quota(self.account)
self.account.delete()
super(Billable, self).delete()
def drop_quota(self):
logger.info('Pruning all quotas on %s', self)
Quota.del_quota(self.account)
def save(self, **kwargs):
ap_quotas = None
if 'ap_quotas' in kwargs.keys():
ap_quotas = kwargs['ap_quotas']
del kwargs['ap_quotas']
self.__update_archived()
#TODO this is a hack
if not 'money_type' in kwargs:
kwargs['money_type'] = 'cash'
if not 'account_type' in kwargs:
kwargs['account_type'] = 'special'
super(Billable, self).save(**kwargs)
if self.__change_billed() or self.__change_quotas() or \
self.quota_type in ['noquota', 'consumption']:
self.drop_quota()
if self.__change_quotas():
self.__update_quotas(ap_quotas)
def set_manual_quota(self, ap_quotas):
logger.info('Setting quota %s (%s) on %s', self.quota_type, ap_quotas,
self)
if self.quota_type != 'manual':
logger.error('Quota type %s is invalid', self.quota_type)
raise NameError(_(u'Tipul de cotă este invalid'))
for k, v in ap_quotas.items():
a = apartment_by_pk(k)
Quota.set_quota(self.account, a.account, v)
def set_quota(self):
logger.info('Setting quota %s on %s', self.quota_type, self)
found = False
for qt in Billable.QUOTA_TYPES:
if qt[0] == self.quota_type:
found = True
if self.quota_type in ['manual', 'noquota'] or not found:
logger.error('Quota type %s is invalid', self.quota_type)
raise NameError(_(u'Tipul de cotă este invalid'))
apartments = self.billed.apartments()
total = reduce(lambda t, a: t + a.weight(self.quota_type), apartments,
0)
for a in apartments:
Quota.set_quota(self.account, a.account,
Decimal(a.weight(self.quota_type)) / Decimal(total))
def to_collect(self):
charged = self.account.charged()
received = self.account.received()
return charged - received
| agpl-3.0 | 3,038,924,432,090,427,000 | 38.810345 | 144 | 0.581529 | false |
sileht/deb-openstack-python-keystoneclient | keystoneclient/v2_0/endpoints.py | 1 | 1348 | # Copyright 2012 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
class Endpoint(base.Resource):
def __repr__(self):
return "<Endpoint %s>" % self._info
class EndpointManager(base.ManagerWithFind):
resource_class = Endpoint
def list(self):
return self._list('/endpoints', 'endpoints')
def create(self, region, service_id, publicurl, adminurl, internalurl):
body = {'endpoint': {'region': region,
'service_id': service_id,
'publicurl': publicurl,
'adminurl': adminurl,
'internalurl': internalurl}}
return self._create('/endpoints', body, 'endpoint')
def delete(self, id):
return self._delete('/endpoints/%s' % id)
| apache-2.0 | -6,486,988,229,904,656,000 | 33.564103 | 78 | 0.654303 | false |
pepsipepsi/nodebox_opengl_python3 | nodebox/graphics/physics.py | 1 | 77201 | #=== PHYSICS =========================================================================================
# 2D physics functions.
# Authors: Tom De Smedt, Giorgio Olivero (Vector class)
# License: BSD (see LICENSE.txt for details).
# Copyright (c) 2008 City In A Bottle (cityinabottle.org)
# http://cityinabottle.org/nodebox
# This module can benefit greatly from loading psyco.
from math import sqrt, pow
from math import sin, cos, atan2, degrees, radians, pi
from random import random
from heapq import heappush, heappop
from warnings import warn
# float("inf") doesn't work on windows.
INFINITE = 1e20
# This module is standalone, line(), ellipse() and Text.draw()
# must be either implemented or patched:
def line(x1, y1, x2, y2, stroke=(0,0,0,1), strokewidth=1):
pass
def ellipse(x, y, width, height, fill=(0,0,0,1), stroke=None, strokewidth=1):
pass
class Text:
def __init__(self, string, **kwargs):
self.string = string
self.__dict__.update(kwargs)
def copy(self):
k = self.__dict__.copy()
k.pop("string")
return Text(self.string, **k)
def draw(self):
pass
#=====================================================================================================
#--- VECTOR ------------------------------------------------------------------------------------------
# A Euclidean vector (sometimes called a geometric or spatial vector, or - as here - simply a vector)
# is a geometric object that has both a magnitude (or length) and direction.
# A vector is frequently represented by a line segment with an arrow.
class Vector(object):
def __init__(self, x=0, y=0, z=0, length=None, angle=None):
""" A vector represents a direction and a magnitude (or length).
Vectors can be added, subtracted, multiplied, divided, flipped, and 2D rotated.
Vectors are used in physics to represent velocity and acceleration.
"""
self.x = float(x)
self.y = float(y)
self.z = float(z)
if length is not None:
self.length = length
if angle is not None:
self.angle = angle
def copy(self):
return Vector(self.x, self.y, self.z)
def __getitem__(self, i):
return (self.x, self.y, self.z)[i]
def __setitem__(self, i, v):
setattr(self, ("x", "y", "z")[i], float(v))
def _get_xyz(self):
return (self.x, self.y, self.z)
def _set_xyz(self, coords):
self.x = float(coords[0])
self.y = float(coords[1])
self.z = float(coords[2])
xyz = property(_get_xyz, _set_xyz)
def _get_xy(self):
return (self.x, self.y)
def _set_xy(self, coords):
self.x = float(coords[0])
self.y = float(coords[1])
xy = property(_get_xy, _set_xy)
def _get_length(self):
return sqrt(self.x**2 + self.y**2 + self.z**2)
def _set_length(self, n):
d = self.length or 1
self.x *= n/d
self.y *= n/d
self.z *= n/d
length = magnitude = property(_get_length, _set_length)
def distance(self, v):
""" Returns the distance between two vectors,
e.g. if two vectors would be two sides of a triangle, returns the third side.
"""
dx = v.x - self.x
dy = v.y - self.y
dz = v.z - self.z
return sqrt(dx**2 + dy**2 + dz**2)
def distance2(self, v):
# Squared distance, avoiding the costly root calculation.
return (v.x-self.x)**2 + (v.y-self.y)**2 + (v.z-self.z)**2
def normalize(self):
""" Normalizes the vector to a unit vector with length=1.
"""
d = self.length or 1
self.x /= d
self.y /= d
self.z /= d
def _normalized(self):
""" Yields a new vector that is the normalized vector of this vector.
"""
d = self.length
if d == 0:
return self.copy()
return Vector(self.x/d, self.y/d, self.z/d)
normalized = unit = property(_normalized)
def reverse(self):
""" Reverses the direction of the vector so it points in the opposite direction.
"""
self.x = -self.x
self.y = -self.y
self.z = -self.z
flip = reverse
def _reversed(self):
""" Yields a new vector pointing in the opposite direction of this vector.
"""
return Vector(-self.x, -self.y, -self.z)
reversed = flipped = inverse = property(_reversed)
# v.normal, v.angle, v.rotate(), v.rotated() and v.angle_to() are defined in 2D.
# v.in2D.rotate() is here for decorational purposes.
@property
def in2D(self):
return self
def _orthogonal(self):
""" Yields a new vector whose 2D angle is 90 degrees (perpendicular) to this vector.
In 3D, there would be many perpendicular vectors.
"""
return Vector(self.y, -self.x, self.z)
orthogonal = perpendicular = normal = property(_orthogonal)
def _get_angle(self):
""" Yields the 2D direction of the vector.
"""
return degrees(atan2(self.y, self.x))
def _set_angle(self, degrees):
d = self.length
self.x = cos(radians(degrees)) * d
self.y = sin(radians(degrees)) * d
angle = direction = property(_get_angle, _set_angle)
def rotate(self, degrees):
""" Rotates the direction of the vector in 2D.
"""
self.angle += degrees
def rotated(self, degrees):
""" Returns a copy of the vector with direction rotated in 2D.
"""
v = self.copy()
v.rotate(degrees)
return v
def angle_to(self, v):
""" Returns the 2D angle between two vectors.
"""
return degrees(atan2(v.y, v.x) - atan2(self.y, self.x))
angle_between = angle_to
# Arithmetic operators.
# + - * / returns new vector objects.
def __add__(self, v):
if isinstance(v, (int, float)):
return Vector(self.x+v, self.y+v, self.z+v)
return Vector(self.x+v.x, self.y+v.y, self.z+v.z)
def __sub__(self, v):
if isinstance(v, (int, float)):
return Vector(self.x-v, self.y-v, self.z-v)
return Vector(self.x-v.x, self.y-v.y, self.z-v.z)
def __mul__(self, v):
if isinstance(v, (int, float)):
return Vector(self.x*v, self.y*v, self.z*v)
return Vector(self.x*v.x, self.y*v.y, self.z*v.z)
def __div__(self, v):
if isinstance(v, (int, float)):
return Vector(self.x/v, self.y/v, self.z/v)
return Vector(self.x/v.x, self.y/v.y, self.z/v.z)
# += -= *= /= modify the vector coordinates in-place.
def __iadd__(self, v):
if isinstance(v, (int, float)):
self.x+=v; self.y+=v; self.z+=v; return self
self.x+=v.x; self.y+=v.y; self.z+=v.z; return self
def __isub__(self, v):
if isinstance(v, (int, float)):
self.x-=v; self.y-=v; self.z-=v; return self
self.x-=v.x; self.y-=v.y; self.z-=v.z; return self
def __imul__(self, v):
if isinstance(v, (int, float)):
self.x*=v; self.y*=v; self.z*=v; return self
self.x*=v.x; self.y*=v.y; self.z*=v.z; return self
def __idiv__(self, v):
if isinstance(v, (int, float)):
self.x/=v; self.y/=v; self.z/=v; return self
self.x/=v.x; self.y/=v.y; self.z/=v.z; return self
def dot(self, v):
""" Returns a scalar that is the dot product between the two vectors.
"""
return self.x*v.x + self.y*v.y + self.z*v.z
def cross(self, v):
""" Returns a new vector that is the cross product between the two vectors.
"""
return Vector(self.y*v.z - self.z*v.y,
self.z*v.x - self.x*v.z,
self.x*v.y - self.y*v.x)
def __neg__(self):
return Vector(-self.x, -self.y, -self.z)
def __eq__(self, v):
return isinstance(v, Vector) and self.x == v.x and self.y == v.y and self.z == v.z
def __ne__(self, v):
return not self.__eq__(v)
def __repr__(self):
return "%s(%.2f, %.2f, %.2f)" % (self.__class__.__name__, self.x, self.y, self.z)
def draw(self, x, y):
""" Draws the vector in 2D (z-axis is ignored).
Set stroke() and strokewidth() first.
"""
ellipse(x, y, 4, 4)
line(x, y, x+self.x, y+self.y)
#=====================================================================================================
#--- FLOCKING ----------------------------------------------------------------------------------------
# Boids is an artificial life program, developed by Craig Reynolds in 1986,
# which simulates the flocking behavior of birds.
# Boids is an example of emergent behavior, the complexity of Boids arises
# from the interaction of individual agents adhering to a set of simple rules:
# - separation: steer to avoid crowding local flockmates,
# - alignment: steer towards the average heading of local flockmates,
# - cohesion: steer to move toward the average position of local flockmates.
# Unexpected behavior, such as splitting flocks and reuniting after avoiding obstacles,
# can be considered emergent. The boids framework is often used in computer graphics,
# providing realistic-looking representations of flocks of birds and other creatures,
# such as schools of fish or herds of animals.
_UID = 0
def _uid():
global _UID; _UID+=1; return _UID
class Boid:
def __init__(self, flock, x=0, y=0, z=0, sight=70, space=30):
""" An agent in a flock with an (x,y,z)-position subject to different forces.
- sight : radius of local flockmates when calculating cohesion and alignment.
- space : radius of personal space when calculating separation.
"""
self._id = _uid()
self.flock = flock
self.x = x
self.y = y
self.z = z
self.velocity = Vector(random()*2-1, random()*2-1, random()*2-1)
self.target = None # A target Vector towards which the boid will steer.
self.sight = sight # The radius of cohesion and alignment, and visible obstacles.
self.space = space # The radius of separation.
self.dodge = False # Avoiding an obstacle?
self.crowd = 0 # Percentage (0.0-1.0) of flockmates within sight.
def __eq__(self, other):
# Comparing boids by id makes it significantly faster.
return isinstance(other, Boid) and self._id == other._id
def __ne__(self, other):
return not self.__eq__(other)
def copy(self):
b = Boid(self.flock, self.x, self.y, self.z, self.sight, self.space)
b.velocity = self.velocity.copy()
b.target = self.target
return b
@property
def heading(self):
""" The boid's heading as an angle in degrees.
"""
return self.velocity.angle
@property
def depth(self):
""" The boid's relative depth (0.0-1.0) in the flock's container box.
"""
return not self.flock.depth and 1.0 or max(0.0, min(1.0, self.z / self.flock.depth))
def near(self, boid, distance=50):
""" Returns True if the given boid is within distance.
"""
# Distance is measured in a box instead of a sphere for performance.
return abs(self.x - boid.x) < distance and \
abs(self.y - boid.y) < distance and \
abs(self.z - boid.z) < distance
def separation(self, distance=25):
""" Returns steering velocity (vx,vy,vz) to avoid crowding local flockmates.
"""
vx = vy = vz = 0.0
for b in self.flock:
if b != self:
if abs(self.x-b.x) < distance: vx += self.x - b.x
if abs(self.y-b.y) < distance: vy += self.y - b.y
if abs(self.z-b.z) < distance: vz += self.z - b.z
return vx, vy, vz
def alignment(self, distance=50):
""" Returns steering velocity (vx,vy,vz) towards the average heading of local flockmates.
"""
vx = vy = vz = n = 0.0
for b in self.flock:
if b != self and b.near(self, distance):
vx += b.velocity.x
vy += b.velocity.y
vz += b.velocity.z; n += 1
if n:
return (vx/n-self.velocity.x), (vy/n-self.velocity.y), (vz/n-self.velocity.z)
return vx, vy, vz
def cohesion(self, distance=40):
""" Returns steering velocity (vx,vy,vz) towards the average position of local flockmates.
"""
vx = vy = vz = n = 0.0
for b in self.flock:
if b != self and b.near(self, distance):
vx += b.x
vy += b.y
vz += b.z; n += 1
# Calculate percentage of flockmates within sight.
self.crowd = float(n) / (len(self.flock) or 1)
if n:
return (vx/n-self.x), (vy/n-self.y), (vz/n-self.z)
return vx, vy, vz
def avoidance(self):
""" Returns steering velocity (vx,vy,0) to avoid 2D obstacles.
The boid is not guaranteed to avoid collision.
"""
vx = vy = 0.0
self.dodge = False
for o in self.flock.obstacles:
dx = o.x - self.x
dy = o.y - self.y
d = sqrt(dx**2 + dy**2) # Distance to obstacle.
s = (self.sight + o.radius) # Visibility range.
if d < s:
self.dodge = True
# Force grows exponentially from 0.0 to 1.0,
# where 1.0 means the boid touches the obstacle circumference.
f = (d-o.radius) / (s-o.radius)
f = (1-f)**2
if d < o.radius:
f *= 4
#self.velocity.reverse()
vx -= dx * f
vy -= dy * f
return (vx, vy, 0)
def limit(self, speed=10.0):
""" Limits the boid's velocity (the boid can momentarily go very fast).
"""
v = self.velocity
m = max(abs(v.x), abs(v.y), abs(v.z)) or 1
if abs(v.x) > speed: v.x = v.x / m * speed
if abs(v.y) > speed: v.y = v.y / m * speed
if abs(v.z) > speed: v.z = v.z / m * speed
def update(self, separation=0.2, cohesion=0.2, alignment=0.6, avoidance=0.6, target=0.2, limit=15.0):
""" Updates the boid's velocity based on the cohesion, separation and alignment forces.
- separation: force that keeps boids apart.
- cohesion : force that keeps boids closer together.
- alignment : force that makes boids move in the same direction.
- avoidance : force that steers the boid away from obstacles.
- target : force that steers the boid towards a target vector.
- limit : maximum velocity.
"""
f = 0.1
m1, m2, m3, m4, m5 = separation*f, cohesion*f, alignment*f, avoidance*f, target*f
vx1, vy1, vz1 = self.separation(self.space)
vx2, vy2, vz2 = self.cohesion(self.sight)
vx3, vy3, vz3 = self.alignment(self.sight)
vx4, vy4, vz4 = self.avoidance()
vx5, vy5, vz5 = self.target and (
(self.target.x-self.x),
(self.target.y-self.y),
(self.target.z-self.z)) or (0,0,0)
self.velocity.x += m1*vx1 + m2*vx2 + m3*vx3 + m4*vx4 + m5*vx5
self.velocity.y += m1*vy1 + m2*vy2 + m3*vy3 + m4*vy4 + m5*vy5
self.velocity.z += m1*vz1 + m2*vz2 + m3*vz3 + m4*vz4 + m5*vz5
self.velocity.z = self.flock.depth and self.velocity.z or 0 # No z-axis for Flock.depth=0
self.limit(speed=limit)
self.x += self.velocity.x
self.y += self.velocity.y
self.z += self.velocity.z
def seek(self, vector):
""" Sets the given Vector as the boid's target.
"""
self.target = vector
def __repr__(self):
return "Boid(x=%.1f, y=%.1f, z=%.1f)" % (self.x, self.y, self.z)
class Obstacle:
def __init__(self, x=0, y=0, z=0, radius=10):
""" An obstacle with an (x, y, z) position and a radius.
Boids will steer around obstacles that the flock is aware of, and that they can see.
"""
self.x = x
self.y = y
self.z = z
self.radius = radius
def copy(self):
return Obstacle(self.x, self.y, self.z, self.radius)
def __repr__(self):
return "Obstacle(x=%.1f, y=%.1f, z=%.1f, radius=%.1f)" % (self.x, self.y, self.z, self.radius)
class Flock(list):
def __init__(self, amount, x, y, width, height, depth=100.0, obstacles=[]):
""" A flock of the given amount of boids, confined to a box.
Obstacles can be added to Flock.obstacles (boids will steer away from them).
"""
self.x = x
self.y = y
self.width = width
self.height = height
self.depth = depth
self.scattered = False
self.gather = 0.05
self.obstacles = []
for i in range(amount):
# Boids will originate from the center of the flocking area.
b = Boid(self,
self.x + 0.5 * (width or 0),
self.y + 0.5 * (height or 0),
0.5 * (depth or 0))
self.append(b)
@property
def boids(self):
return self
def copy(self):
f = Flock(0, self.x, self.y, self.width, self.height, self.depth)
f.scattered = self.scattered
f.gather = self.gather
f.obstacles = [o.copy() for o in self.obstacles]
for b in self:
f.append(b.copy())
return f
def seek(self, target):
""" Sets the target vector of all boids in the flock (None for no target).
"""
for b in self:
b.seek(target)
def sight(self, distance):
for b in self:
b.sight = distance
def space(self, distance):
for b in self:
b.space = distance
def constrain(self, force=1.0, teleport=False):
""" Keep the flock inside the rectangular flocking area.
The given force determines how fast the boids will swivel when near an edge.
Alternatively, with teleport=True boids that cross a 2D edge teleport to the opposite side.
"""
f = 5
def _teleport(b):
if b.x < self.x:
b.x = self.x + self.width
if b.x > self.x + self.width:
b.x = self.x
if b.y < self.y:
b.y = self.y + self.height
if b.y > self.y + self.height:
b.y = self.y
def _constrain(b):
if b.x < self.x:
b.velocity.x += force * f * random()
if b.x > self.x + self.width:
b.velocity.x -= force * f * random()
if b.y < self.y:
b.velocity.y += force * f * random()
if b.y > self.y + self.height:
b.velocity.y -= force * f * random()
for b in self:
if b.z < 0:
b.velocity.z += force * f * random()
if b.z > self.depth:
b.velocity.z -= force * f * random()
teleport and _teleport(b) \
or _constrain(b)
def scatter(self, gather=0.05):
""" Scatters the flock, until Flock.scattered=False.
Flock.gather is the chance (0.0-1.0, or True/False) that the flock will reunite by itself.
"""
self.scattered = True
self.gather = gather
def update(self, separation=0.2, cohesion=0.2, alignment=0.6, avoidance=0.6, target=0.2, limit=15.0, constrain=1.0, teleport=False):
""" Updates the boid velocities based on the given forces.
Different forces elicit different flocking behavior; fine-tuning them can be delicate.
"""
if self.scattered:
# When scattered, make the boid cohesion negative and diminish alignment.
self.scattered = (random() > self.gather)
cohesion = -0.01
alignment *= 0.25
for b in self:
b.update(separation, cohesion, alignment, avoidance, target, limit)
self.constrain(force=constrain, teleport=teleport)
def by_depth(self):
""" Returns the boids in the flock sorted by depth (z-axis).
"""
return sorted(self, key=lambda boid: boid.z)
def __repr__(self):
return "Flock(%s)" % repr(list(self))
flock = Flock
#=== SYSTEM ==========================================================================================
# A computer graphics technique to simulate certain fuzzy phenomena,
# which are otherwise very hard to reproduce with conventional rendering techniques:
# fire, explosions, smoke, moving water, sparks, falling leaves, clouds, fog, snow, dust,
# meteor tails, hair, fur, grass, or abstract visual effects like glowing trails, magic spells.
#--- FORCE -------------------------------------------------------------------------------------------
class Force:
def __init__(self, particle1, particle2, strength=1.0, threshold=100.0):
""" An attractive or repulsive force that causes objects with a mass to accelerate.
A negative strength indicates an attractive force.
"""
self.particle1 = particle1
self.particle2 = particle2
self.strength = strength
self.threshold = threshold
def apply(self):
""" Applies the force between two particles, based on the distance and mass of the particles.
"""
# Distance has a minimum threshold to keep forces from growing too large,
# e.g. distance 100 divides force by 10000, distance 5 only by 25.
# Decreasing the threshold moves particles that are very close to each other away faster.
dx = self.particle2.x - self.particle1.x
dy = self.particle2.y - self.particle1.y
d = sqrt(dx*dx + dy*dy)
d = max(d, self.threshold)
# The force between particles increases according to their weight.
# The force decreases as distance between them increases.
f = 10.0 * -self.strength * self.particle1.mass * self.particle2.mass
f = f / (d*d)
fx = f * dx / d
fy = f * dy / d
self.particle1.force.x += fx
self.particle1.force.y += fy
self.particle2.force.x -= fx
self.particle2.force.y -= fy
def __repr__(self):
return "Force(strength=%.2f)" % self.strength
force = Force
#--- SPRING ------------------------------------------------------------------------------------------
class Spring:
def __init__(self, particle1, particle2, length, strength=1.0):
""" A force that exerts attractive resistance when its length changes.
A spring acts as a flexible (but secure) connection between two particles.
"""
self.particle1 = particle1
self.particle2 = particle2
self.strength = strength
self.length = length
self.snapped = False
def snap(self):
""" Breaks the connection between the two particles.
"""
self.snapped = True
def apply(self):
""" Applies the force between two particles.
"""
# Distance between two particles.
dx = self.particle2.x - self.particle1.x
dy = self.particle2.y - self.particle1.y
d = sqrt(dx*dx + dy*dy)
if d == 0:
return
# The attractive strength decreases for heavy particles.
# The attractive strength increases when the spring is stretched.
f = 10.0 * self.strength / (self.particle1.mass * self.particle2.mass)
f = f * (d - self.length)
fx = f * dx / d
fy = f * dy / d
self.particle1.force.x += fx
self.particle1.force.y += fy
self.particle2.force.x -= fx
self.particle2.force.y -= fy
def draw(self, **kwargs):
line(self.particle1.x, self.particle1.y,
self.particle2.x, self.particle2.y, **kwargs)
def __repr__(self):
return "Spring(strength='%.2f', length='%.2f')" % (self.strength, self.length)
spring = Spring
#--- PARTICLE ----------------------------------------------------------------------------------------
MASS = "mass"
class Particle:
def __init__(self, x, y, velocity=(0.0,0.0), mass=10.0, radius=10.0, life=None, fixed=False):
""" An object with a mass subjected to attractive and repulsive forces.
The object's velocity is an inherent force (e.g. a rocket propeller to escape gravity).
"""
self._id = _uid()
self.x = x + random()
self.y = y + random()
self.mass = mass
self.radius = radius == MASS and mass or radius
self.velocity = isinstance(velocity, tuple) and Vector(*velocity) or velocity
self.force = Vector(0.0, 0.0) # Force accumulator.
self.life = life
self._age = 0.0
self.dead = False
self.fixed = fixed
@property
def age(self):
# Yields the particle's age as a number between 0.0 and 1.0.
return self.life and min(1.0, float(self._age) / self.life) or 0.0
def draw(self, **kwargs):
r = self.radius * (1 - self.age)
ellipse(self.x, self.y, r*2, r*2, **kwargs)
def __eq__(self, other):
return isinstance(other, Particle) and self._id == other._id
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "Particle(x=%.1f, y=%.1f, radius=%.1f, mass=%.1f)" % (
self.x, self.y, self.radius, self.mass)
particle = Particle
#--- SYSTEM ------------------------------------------------------------------------------------------
class flist(list):
def __init__(self, system):
# List of forces or springs that keeps System.dynamics in synch.
self.system = system
def insert(self, i, force):
list.insert(self, i, force)
self.system._dynamics.setdefault(force.particle1._id, []).append(force)
self.system._dynamics.setdefault(force.particle2._id, []).append(force)
def append(self, force):
self.insert(len(self), force)
def extend(self, forces):
for f in forces: self.append(f)
def pop(self, i):
f = list.pop(self, i)
self.system._dynamics.pop(force.particle1._id)
self.system._dynamics.pop(force.particle2._id)
return f
def remove(self, force):
i = self.index(force); self.pop(i)
class System(object):
def __init__(self, gravity=(0,0), drag=0.0):
""" A collection of particles and the forces working on them.
"""
self.particles = []
self.emitters = []
self.forces = flist(self)
self.springs = flist(self)
self.gravity = isinstance(gravity, tuple) and Vector(*gravity) or gravity
self.drag = drag
self._dynamics = {} # Particle id linked to list of applied forces.
def __len__(self):
return len(self.particles)
def __iter__(self):
return iter(self.particles)
def __getitem__(self, i):
return self.particles[i]
def extend(self, x):
for x in x: self.append(x)
def append(self, x):
if isinstance(x, Particle) and not x in self.particles:
self.particles.append(x)
elif isinstance(x, Force):
self.forces.append(x)
elif isinstance(x, Spring):
self.springs.append(x)
elif isinstance(x, Emitter):
self.emitters.append(x)
self.extend(x.particles)
x.system = self
def _cross(self, f=lambda particle1, particle2: None, source=None, particles=[]):
# Applies function f to any two given particles in the list,
# or between source and any other particle if source is given.
P = particles or self.particles
for i, p1 in enumerate(P):
if source is None:
[f(p1, p2) for p2 in P[i+1:]]
else:
f(source, p1)
def force(self, strength=1.0, threshold=100, source=None, particles=[]):
""" The given force is applied between each two particles.
The effect this yields (with a repulsive force) is an explosion.
- source: one vs. all, apply the force to this particle with all others.
- particles: a list of particles to apply the force to (some vs. some or some vs. source).
Be aware that 50 particles wield yield 1250 forces: O(n**2/2); or O(n) with source.
The force is applied to particles present in the system,
those added later on are not subjected to the force.
"""
f = lambda p1, p2: self.forces.append(Force(p1, p2, strength, threshold))
self._cross(f, source, particles)
def dynamics(self, particle, type=None):
""" Returns a list of forces working on the particle, optionally filtered by type (e.g. Spring).
"""
F = self._dynamics.get(isinstance(particle, Particle) and particle._id or particle, [])
F = [f for f in F if type is None or isinstance(f, type)]
return F
def limit(self, particle, m=None):
""" Limits the movement of the particle to m.
When repulsive particles are close to each other, their force can be very high.
This results in large movement steps, and gaps in the animation.
This can be remedied by limiting the total force.
"""
# The right way to do it requires 4x sqrt():
# if m and particle.force.length > m:
# particle.force.length = m
# if m and particle.velocity.length > m:
# particle.velocity.length = m
if m is not None:
for f in (particle.force, particle.velocity):
if abs(f.x) > m:
f.y *= m / abs(f.x)
f.x *= m / abs(f.x)
if abs(f.y) > m:
f.x *= m / abs(f.y)
f.y *= m / abs(f.y)
def update(self, limit=30):
""" Updates the location of the particles by applying all the forces.
"""
for e in self.emitters:
# Fire particles from emitters.
e.update()
for p in self.particles:
# Apply gravity. Heavier objects have a stronger attraction.
p.force.x = 0
p.force.y = 0
p.force.x += 0.1 * self.gravity.x * p.mass
p.force.y += 0.1 * -self.gravity.y * p.mass
for f in self.forces:
# Apply attractive and repulsive forces between particles.
if not f.particle1.dead and \
not f.particle2.dead:
f.apply()
for s in self.springs:
# Apply spring forces between particles.
if not s.particle1.dead and \
not s.particle2.dead and \
not s.snapped:
s.apply()
for p in self.particles:
if not p.fixed:
# Apply drag.
p.velocity.x *= 1.0 - min(1.0, self.drag)
p.velocity.y *= 1.0 - min(1.0, self.drag)
# Apply velocity.
p.force.x += p.velocity.x
p.force.y += p.velocity.y
# Limit the accumulated force and update the particle's position.
self.limit(p, limit)
p.x += p.force.x
p.y += p.force.y
if p.life:
# Apply lifespan.
p._age += 1
p.dead = p._age > p.life
@property
def dead(self):
# Yields True when all particles are dead (and we don't need to update anymore).
for p in self.particles:
if not p.dead: return False
return True
def draw(self, **kwargs):
""" Draws the system at the current iteration.
"""
for s in self.springs:
if not s.particle1.dead and \
not s.particle2.dead and \
not s.snapped:
s.draw(**kwargs)
for p in self.particles:
if not p.dead:
p.draw(**kwargs)
def __repr__(self):
return "System(particles=%i, forces=%i, springs=%i)" % \
(len(self.particles), len(self.forces), len(self.springs))
system = System
# Notes:
# While this system is interesting for many effects, it is unstable.
# If for example very strong springs are applied, particles will start "shaking".
# This is because the forces are simply added to the particle's position instead of integrated.
# See also:
# http://local.wasp.uwa.edu.au/~pbourke/miscellaneous/particle/
# http://local.wasp.uwa.edu.au/~pbourke/miscellaneous/particle/particlelib.c
#def euler_derive(particle, dt=0.1):
# particle.x += particle.velocity.x * dt
# particle.y += particle.velocity.y * dt
# particle.velocity.x += particle.force.x / particle.mass * dt
# particle.velocity.y += particle.force.y / particle.mass * dt
# If this is applied, springs will need a velocity dampener:
#fx = f + 0.01 + (self.particle2.velocity.x - self.particle1.velocity.x) * dx / d
#fy = f + 0.01 + (self.particle2.velocity.y - self.particle1.velocity.y) * dy / d
# In pure Python this is slow, since only 1/10 of the force is applied each System.update().
#--- EMITTER -----------------------------------------------------------------------------------------
class Emitter(object):
def __init__(self, x, y, angle=0, strength=1.0, spread=10):
""" A source that shoots particles in a given direction with a given strength.
"""
self.system = None # Set when appended to System.
self.particles = []
self.x = x
self.y = y
self.velocity = Vector(1, 1, length=strength, angle=angle)
self.spread = spread # Angle-of-view.
self._i = 0 # Current iteration.
def __len__(self):
return len(self.particles)
def __iter__(self):
return iter(self.particles)
def __getitem__(self, i):
return self.particles[i]
def extend(self, x, life=100):
for x in x: self.append(x, life)
def append(self, particle, life=100):
particle.life = particle.life or life
particle._age = particle.life
particle.dead = True
self.particles.append(particle)
if self.system is not None:
# Also append the particle to the system the emitter is part of.
self.system.append(particle)
def _get_angle(self):
return self.velocity.angle
def _set_angle(self, v):
self.velocity.angle = v
angle = property(_get_angle, _set_angle)
def _get_strength(self):
return self.velocity.length
def _set_strength(self, v):
self.velocity.length = max(v, 0.01)
strength = length = magnitude = property(_get_strength, _set_strength)
def update(self):
""" Update the system and respawn dead particles.
When a particle dies, it can be reused as a new particle fired from the emitter.
This is more efficient than creating a new Particle object.
"""
self._i += 1 # Respawn occurs gradually.
p = self.particles[self._i % len(self.particles)]
if p.dead:
p.x = self.x
p.y = self.y
p.velocity = self.velocity.rotated(self.spread * 0.5 * (random()*2-1))
p._age = 0
p.dead = False
emitter = Emitter
#=== GRAPH ===========================================================================================
# Graph visualization is a way of representing information as diagrams of abstract graphs and networks.
# Automatic graph drawing has many important applications in software engineering,
# database and web design, networking, and in visual interfaces for many other domains.
#--- NODE --------------------------------------------------------------------------------------------
def deepcopy(o):
# A color can be represented as a tuple or as a nodebox.graphics.Color object,
# in which case it needs to be copied by invoking Color.copy().
if o is None:
return o
if hasattr(o, "copy"):
return o.copy()
if isinstance(o, (str, bool, int, float, complex)):
return o
if isinstance(o, (list, tuple, set)):
return o.__class__(deepcopy(v) for v in o)
if isinstance(o, dict):
return dict((deepcopy(k), deepcopy(v)) for k,v in o.iteritems())
raise Exception("don't know how to copy %s" % o.__class__.__name__)
class Node(object):
def __init__(self, id="", radius=5, **kwargs):
""" A node with a unique id in the graph.
Node.id is drawn as a text label, unless optional parameter text=False.
Optional parameters include: fill, stroke, strokewidth, text, font, fontsize, fontweight.
"""
self.graph = None
self.links = Links()
self.id = id
self._x = 0.0 # Calculated by Graph.layout.update().
self._y = 0.0 # Calculated by Graph.layout.update().
self.force = Vector(0.0, 0.0)
self.radius = radius
self.fixed = kwargs.pop("fixed", False)
self.fill = kwargs.pop("fill", None)
self.stroke = kwargs.pop("stroke", (0,0,0,1))
self.strokewidth = kwargs.pop("strokewidth", 1)
self.text = kwargs.get("text", True) and \
Text(isinstance(id, bytes) and id or str(id),
width = 85,
fill = kwargs.pop("text", (0,0,0,1)),
fontsize = kwargs.pop("fontsize", 11), **kwargs) or None
self._weight = None # Calculated by Graph.eigenvector_centrality().
self._centrality = None # Calculated by Graph.betweenness_centrality().
@property
def _distance(self):
# Graph.distance controls the (x,y) spacing between nodes.
return self.graph and float(self.graph.distance) or 1.0
def _get_x(self):
return self._x * self._distance
def _get_y(self):
return self._y * self._distance
def _set_x(self, v):
self._x = v / self._distance
def _set_y(self, v):
self._y = v / self._distance
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
@property
def edges(self):
""" Yields a list of edges from/to the node.
"""
return self.graph is not None \
and [e for e in self.graph.edges if self.id in (e.node1.id, e.node2.id)] \
or []
@property
def weight(self):
""" Yields eigenvector centrality as a number between 0.0-1.0.
"""
if self.graph and self._weight is None:
self.graph.eigenvector_centrality()
return self._weight
@property
def centrality(self):
""" Yields betweenness centrality as a number between 0.0-1.0.
"""
if self.graph and self._centrality is None:
self.graph.betweenness_centrality()
return self._centrality
def flatten(self, depth=1, traversable=lambda node, edge: True, _visited=None):
""" Recursively lists the node and nodes linked to it.
Depth 0 returns a list with the node.
Depth 1 returns a list with the node and all the directly linked nodes.
Depth 2 includes the linked nodes' links, and so on.
"""
_visited = _visited or {}
_visited[self.id] = (self, depth)
if depth >= 1:
for n in self.links:
if n.id not in _visited or _visited[n.id][1] < depth-1:
if traversable(self, self.links.edges[n.id]):
n.flatten(depth-1, traversable, _visited)
return [n for n,d in _visited.values()] # Fast, but not order-preserving.
def draw(self, weighted=False):
""" Draws the node as a circle with the given radius, fill, stroke and strokewidth.
Draws the node centrality as a shadow effect when weighted=True.
Draws the node text label.
Override this method in a subclass for custom drawing.
"""
# Draw the node weight as a shadow (based on node betweenness centrality).
if weighted is not False :
w = 0.25 * 35
ellipse(
self.x,
self.y,
self.radius*2 + w,
self.radius*2 + w, fill=(0,0,0,0.2), stroke=None)
# Draw the node.
ellipse(
self.x,
self.y,
self.radius*2,
self.radius*2, fill=self.fill, stroke=self.stroke, strokewidth=self.strokewidth)
# Draw the node text label.
if self.text:
self.text.draw(
self.x + self.radius,
self.y + self.radius)
def contains(self, x, y):
""" Returns True if the given coordinates (x, y) are inside the node radius.
"""
return abs(self.x - x) < self.radius*2 and \
abs(self.y - y) < self.radius*2
def __repr__(self):
return "%s(id=%s)" % (self.__class__.__name__, repr(self.id))
def __eq__(self, node):
return isinstance(node, Node) and self.id == node.id
def __ne__(self, node):
return not self.__eq__(node)
class Links(list):
def __init__(self):
""" A list in which each node has an associated edge.
The edge() method returns the edge for a given node id.
"""
self.edges = dict()
def append(self, node, edge=None):
if node.id not in self.edges:
list.append(self, node)
self.edges[node.id] = edge
def remove(self, node):
list.remove(self, node)
self.edges.pop(node.id, None)
def edge(self, node):
return self.edges.get(isinstance(node, Node) and node.id or node)
#--- EDGE --------------------------------------------------------------------------------------------
coordinates = lambda x, y, d, a: (x + d*cos(radians(a)), y + d*sin(radians(a)))
class Edge(object):
def __init__(self, node1, node2, weight=0.0, length=1.0, type=None, stroke=(0,0,0,1), strokewidth=1):
""" A connection between two nodes.
Its weight indicates the importance (not the cost) of the connection.
Its type is useful in a semantic network (e.g. "is-a", "is-part-of", ...)
"""
self.node1 = node1
self.node2 = node2
self._weight = weight
self.length = length
self.type = type
self.stroke = stroke
self.strokewidth = strokewidth
def _get_weight(self):
return self._weight
def _set_weight(self, v):
self._weight = v
# Clear cached adjacency map in the graph, since edge weights have changed.
if self.node1.graph is not None:
self.node1.graph._adjacency = None
if self.node2.graph is not None:
self.node2.graph._adjacency = None
weight = property(_get_weight, _set_weight)
def draw(self, weighted=False, directed=False):
""" Draws the edge as a line with the given stroke and strokewidth (increased with Edge.weight).
Override this method in a subclass for custom drawing.
"""
w = weighted and self.weight or 0
line(
self.node1.x,
self.node1.y,
self.node2.x,
self.node2.y, stroke=self.stroke, strokewidth=self.strokewidth+w)
if directed:
self.draw_arrow(stroke=self.stroke, strokewidth=self.strokewidth+w)
def draw_arrow(self, **kwargs):
""" Draws the direction of the edge as an arrow on the rim of the receiving node.
"""
x0, y0 = self.node1.x, self.node1.y
x1, y1 = self.node2.x, self.node2.y
# Find the edge's angle based on node1 and node2 position.
a = degrees(atan2(y1-y0, x1-x0))
# The arrow points to node2's rim instead of it's center.
r = self.node2.radius
d = sqrt(pow(x1-x0, 2) + pow(y1-y0, 2))
x01, y01 = coordinates(x0, y0, d-r-1, a)
# Find the two other arrow corners under the given angle.
r = max(kwargs.get("strokewidth", 1) * 3, 6)
dx1, dy1 = coordinates(x01, y01, -r, a-20)
dx2, dy2 = coordinates(x01, y01, -r, a+20)
line(x01, y01, dx1, dy1, **kwargs)
line(x01, y01, dx2, dy2, **kwargs)
line(dx1, dy1, dx2, dy2, **kwargs)
def __repr__(self):
return "%s(id1=%s, id2=%s)" % (self.__class__.__name__, repr(self.node1.id), repr(self.node2.id))
#--- GRAPH -------------------------------------------------------------------------------------------
# Return value of Graph.shortest_paths().
# Dictionary values can be accessed by Node as well as by node id.
class nodedict(dict):
def __init__(self, graph, *args, **kwargs):
#dict.__init__(self, *args, **kwargs)
self.graph = graph
def __contains__(self, node):
return dict.__contains__(self, self.graph.get(node, node))
def __getitem__(self, node):
return dict.__getitem__(self, isinstance(node, Node) and node or self.graph[node])
def get(self, node, default=None):
return dict.get(self, self.graph.get(node, node), default)
def unique(list):
u, b = [], {}
for item in list:
if item not in b: u.append(item); b[item]=True
return u
# Graph layouts:
SPRING = "spring"
# Graph node sort order:
WEIGHT, CENTRALITY = "weight", "centrality"
ALL = "all"
class Graph(dict):
def __init__(self, layout=SPRING, distance=10.0):
""" A network of nodes connected by edges that can be drawn with a given layout.
"""
self.nodes = [] # List of Node objects.
self.edges = [] # List of Edge objects.
self.root = None
self._adjacency = None # Cached adjacency() dict.
self.layout = layout==SPRING and GraphSpringLayout(self) or GraphLayout(self)
self.distance = distance
def __getitem__(self, id):
try:
return dict.__getitem__(self, id)
except KeyError:
raise Exception("no node with id '%s' in graph" % id)
def append(self, base, *args, **kwargs):
""" Appends a Node or Edge to the graph: Graph.append(Node, id="rabbit").
"""
kwargs["base"] = base
if issubclass(base, Node):
return self.add_node(*args, **kwargs)
if issubclass(base, Edge):
return self.add_edge(*args, **kwargs)
def add_node(self, id, *args, **kwargs):
""" Appends a new Node to the graph.
An optional base parameter can be used to pass a subclass of Node.
"""
n = kwargs.pop("base", Node)
n = isinstance(id, Node) and id or self.get(id) or n(id, *args, **kwargs)
if n.id not in self:
self.nodes.append(n)
self[n.id] = n; n.graph = self
self.root = kwargs.get("root", False) and n or self.root
# Clear adjacency cache.
self._adjacency = None
return n
def add_edge(self, id1, id2, *args, **kwargs):
""" Appends a new Edge to the graph.
An optional base parameter can be used to pass a subclass of Edge:
Graph.add_edge("cold", "winter", base=IsPropertyOf)
"""
# Create nodes that are not yet part of the graph.
n1 = self.add_node(id1)
n2 = self.add_node(id2)
# Creates an Edge instance.
# If an edge (in the same direction) already exists, yields that edge instead.
e1 = n1.links.edge(n2)
if e1 and e1.node1 == n1 and e1.node2 == n2:
return e1
e2 = kwargs.pop("base", Edge)
e2 = e2(n1, n2, *args, **kwargs)
self.edges.append(e2)
# Synchronizes Node.links:
# A.links.edge(B) yields edge A->B
# B.links.edge(A) yields edge B->A
n1.links.append(n2, edge=e2)
n2.links.append(n1, edge=e1 or e2)
# Clear adjacency cache.
self._adjacency = None
return e2
def remove(self, x):
""" Removes the given Node (and all its edges) or Edge from the graph.
Note: removing Edge a->b does not remove Edge b->a.
"""
if isinstance(x, Node) and x.id in self:
self.pop(x.id)
self.nodes.remove(x); x.graph = None
# Remove all edges involving the given node.
for e in list(self.edges):
if x in (e.node1, e.node2):
if x in e.node1.links: e.node1.links.remove(x)
if x in e.node2.links: e.node2.links.remove(x)
self.edges.remove(e)
if isinstance(x, Edge):
self.edges.remove(x)
# Clear adjacency cache.
self._adjacency = None
def node(self, id):
""" Returns the node in the graph with the given id.
"""
return self.get(id)
def edge(self, id1, id2):
""" Returns the edge between the nodes with given id1 and id2.
"""
return id1 in self and id2 in self and self[id1].links.edge(id2) or None
def paths(self, node1, node2, length=4, path=[]):
""" Returns a list of paths (shorter than or equal to given length) connecting the two nodes.
"""
if not isinstance(node1, Node):
node1 = self[node1]
if not isinstance(node2, Node):
node2 = self[node2]
return [[self[id] for id in p] for p in paths(self, node1.id, node2.id, length, path)]
def shortest_path(self, node1, node2, heuristic=None, directed=False):
""" Returns a list of nodes connecting the two nodes.
"""
if not isinstance(node1, Node):
node1 = self[node1]
if not isinstance(node2, Node):
node2 = self[node2]
try:
p = dijkstra_shortest_path(self, node1.id, node2.id, heuristic, directed)
p = [self[id] for id in p]
return p
except IndexError:
return None
def shortest_paths(self, node, heuristic=None, directed=False):
""" Returns a dictionary of nodes, each linked to a list of nodes (shortest path).
"""
if not isinstance(node, Node):
node = self[node]
p = nodedict(self)
for id, path in dijkstra_shortest_paths(self, node.id, heuristic, directed).iteritems():
p[self[id]] = path and [self[id] for id in path] or None
return p
def eigenvector_centrality(self, normalized=True, reversed=True, rating={}, iterations=100, tolerance=0.0001):
""" Calculates eigenvector centrality and returns a node => weight dictionary.
Node.weight is updated in the process.
Node.weight is higher for nodes with a lot of (indirect) incoming traffic.
"""
ec = eigenvector_centrality(self, normalized, reversed, rating, iterations, tolerance)
ec = nodedict(self, ((self[id], w) for id, w in ec.items()))
for n, w in ec.items():
n._weight = w
return ec
def betweenness_centrality(self, normalized=True, directed=False):
""" Calculates betweenness centrality and returns a node => weight dictionary.
Node.centrality is updated in the process.
Node.centrality is higher for nodes with a lot of passing traffic.
"""
bc = brandes_betweenness_centrality(self, normalized, directed)
bc = nodedict(self, ((self[id], w) for id, w in bc.items()))
for n, w in bc.items():
n._centrality = w
return bc
def sorted(self, order=WEIGHT, threshold=0.0):
""" Returns a list of nodes sorted by WEIGHT or CENTRALITY.
Nodes with a lot of traffic will be at the start of the list.
"""
o = lambda node: getattr(node, order)
nodes = ((o(n), n) for n in self.nodes if o(n) >= threshold)
nodes = reversed(sorted(nodes))
return [n for w, n in nodes]
def prune(self, depth=0):
""" Removes all nodes with less or equal links than depth.
"""
for n in (n for n in self.nodes if len(n.links) <= depth):
self.remove(n)
def fringe(self, depth=0):
""" For depth=0, returns the list of leaf nodes (nodes with only one connection).
For depth=1, returns the list of leaf nodes and their connected nodes, and so on.
"""
u = []; [u.extend(n.flatten(depth)) for n in self.nodes if len(n.links) == 1]
return unique(u)
@property
def density(self):
""" Yields the number of edges vs. the maximum number of possible edges.
For example, <0.35 => sparse, >0.65 => dense, 1.0 => complete.
"""
return 2.0*len(self.edges) / (len(self.nodes) * (len(self.nodes)-1))
@property
def is_complete(self):
return self.density == 1.0
@property
def is_dense(self):
return self.density > 0.65
@property
def is_sparse(self):
return self.density < 0.35
def split(self):
""" Returns the list of unconnected subgraphs.
"""
return partition(self)
def update(self, iterations=10, **kwargs):
""" Graph.layout.update() is called the given number of iterations.
"""
for i in range(iterations):
self.layout.update(**kwargs)
def draw(self, weighted=False, directed=False):
""" Draws all nodes and edges.
"""
for e in self.edges:
e.draw(weighted, directed)
for n in reversed(self.nodes): # New nodes (with Node._weight=None) first.
n.draw(weighted)
def node_at(self, x, y):
""" Returns the node at (x,y) or None.
"""
for n in self.nodes:
if n.contains(x, y): return n
def _add_node_copy(self, n, **kwargs):
# Magical fairy dust to copy subclasses of Node.
# We assume that the subclass constructor takes an optional "text" parameter
# (Text objects in NodeBox for OpenGL's implementation are expensive).
try:
new = self.add_node(n.id, root=kwargs.get("root",False), text=False)
except TypeError:
new = self.add_node(n.id, root=kwargs.get("root",False))
new.__class__ = n.__class__
new.__dict__.update((k, deepcopy(v)) for k,v in n.__dict__.iteritems()
if k not in ("graph", "links", "_x", "_y", "force", "_weight", "_centrality"))
def _add_edge_copy(self, e, **kwargs):
if kwargs.get("node1", e.node1).id not in self \
or kwargs.get("node2", e.node2).id not in self:
return
new = self.add_edge(
kwargs.get("node1", self[e.node1.id]),
kwargs.get("node2", self[e.node2.id]))
new.__class__ = e.__class__
new.__dict__.update((k, deepcopy(v)) for k,v in e.__dict__.iteritems()
if k not in ("node1", "node2"))
def copy(self, nodes=ALL):
""" Returns a copy of the graph with the given list of nodes (and connecting edges).
The layout will be reset.
"""
g = Graph(layout=None, distance=self.distance)
g.layout = self.layout.copy(graph=g)
for n in (nodes==ALL and self.nodes or (isinstance(n, Node) and n or self[n] for n in nodes)):
g._add_node_copy(n, root=self.root==n)
for e in self.edges:
g._add_edge_copy(e)
return g
#--- GRAPH LAYOUT ------------------------------------------------------------------------------------
# Graph drawing or graph layout, as a branch of graph theory,
# applies topology and geometry to derive two-dimensional representations of graphs.
class GraphLayout:
def __init__(self, graph):
""" Calculates node positions iteratively when GraphLayout.update() is called.
"""
self.graph = graph
self.iterations = 0
def update(self):
self.iterations += 1
def reset(self):
self.iterations = 0
for n in self.graph.nodes:
n._x = 0
n._y = 0
n.force = Vector(0,0)
@property
def bounds(self):
""" Returns a (x, y, width, height)-tuple of the approximate layout dimensions.
"""
x0, y0 = +INFINITE, +INFINITE
x1, y1 = -INFINITE, -INFINITE
for n in self.graph.nodes:
if (n.x < x0): x0 = n.x
if (n.y < y0): y0 = n.y
if (n.x > x1): x1 = n.x
if (n.y > y1): y1 = n.y
return (x0, y0, x1-x0, y1-y0)
def copy(self, graph):
return GraphLayout(self, graph)
class GraphSpringLayout(GraphLayout):
def __init__(self, graph):
""" A force-based layout in which edges are regarded as springs.
The forces are applied to the nodes, pulling them closer or pushing them apart.
"""
# Based on: http://snipplr.com/view/1950/graph-javascript-framework-version-001/
GraphLayout.__init__(self, graph)
self.k = 4.0 # Force constant.
self.force = 0.01 # Force multiplier.
self.repulsion = 15 # Maximum repulsive force radius.
def _distance(self, node1, node2):
# Yields a tuple with distances (dx, dy, d, d**2).
# Ensures that the distance is never zero (which deadlocks the animation).
dx = node2._x - node1._x
dy = node2._y - node1._y
d2 = dx*dx + dy*dy
if d2 < 0.01:
dx = random() * 0.1 + 0.1
dy = random() * 0.1 + 0.1
d2 = dx*dx + dy*dy
return dx, dy, sqrt(d2), d2
def _repulse(self, node1, node2):
# Updates Node.force with the repulsive force.
dx, dy, d, d2 = self._distance(node1, node2)
if d < self.repulsion:
f = self.k**2 / d2
node2.force.x += f * dx
node2.force.y += f * dy
node1.force.x -= f * dx
node1.force.y -= f * dy
def _attract(self, node1, node2, weight=0, length=1.0):
# Updates Node.force with the attractive edge force.
dx, dy, d, d2 = self._distance(node1, node2)
d = min(d, self.repulsion)
f = (d2 - self.k**2) / self.k * length
f *= weight * 0.5 + 1
f /= d
node2.force.x -= f * dx
node2.force.y -= f * dy
node1.force.x += f * dx
node1.force.y += f * dy
def update(self, weight=10.0, limit=0.5):
""" Updates the position of nodes in the graph.
The weight parameter determines the impact of edge weight.
The limit parameter determines the maximum movement each update().
"""
GraphLayout.update(self)
# Forces on all nodes due to node-node repulsions.
for i, n1 in enumerate(self.graph.nodes):
for j, n2 in enumerate(self.graph.nodes[i+1:]):
self._repulse(n1, n2)
# Forces on nodes due to edge attractions.
for e in self.graph.edges:
self._attract(e.node1, e.node2, weight*e.weight, 1.0/(e.length or 0.01))
# Move nodes by given force.
for n in self.graph.nodes:
if not n.fixed:
n._x += max(-limit, min(self.force * n.force.x, limit))
n._y += max(-limit, min(self.force * n.force.y, limit))
n.force.x = 0
n.force.y = 0
def copy(self, graph):
g = GraphSpringLayout(graph)
g.k, g.force, g.repulsion = self.k, self.force, self.repulsion
return g
#--- GRAPH TRAVERSAL ---------------------------------------------------------------------------------
def depth_first_search(node, visit=lambda node: False, traversable=lambda node, edge: True, _visited=None):
""" Visits all the nodes connected to the given root node, depth-first.
The visit function is called on each node.
Recursion will stop if it returns True, and subsequently dfs() will return True.
The traversable function takes the current node and edge,
and returns True if we are allowed to follow this connection to the next node.
For example, the traversable for directed edges is follows:
lambda node, edge: node == edge.node1
"""
stop = visit(node)
_visited = _visited or {}
_visited[node.id] = True
for n in node.links:
if stop: return True
if traversable(node, node.links.edge(n)) is False: continue
if not n.id in _visited:
stop = depth_first_search(n, visit, traversable, _visited)
return stop
dfs = depth_first_search;
def breadth_first_search(node, visit=lambda node: False, traversable=lambda node, edge: True):
""" Visits all the nodes connected to the given root node, breadth-first.
"""
q = [node]
_visited = {}
while q:
node = q.pop(0)
if not node.id in _visited:
if visit(node):
return True
q.extend((n for n in node.links if traversable(node, node.links.edge(n)) is not False))
_visited[node.id] = True
return False
bfs = breadth_first_search;
def paths(graph, id1, id2, length=4, path=[], _root=True):
""" Returns a list of paths from node with id1 to node with id2.
Only paths shorter than or equal to the given length are included.
Uses a brute-force DFS approach (performance drops exponentially for longer paths).
"""
if len(path) >= length:
return []
if id1 not in graph:
return []
if id1 == id2:
return [path + [id1]]
path = path + [id1]
p = []
s = set(path) # 5% speedup.
for node in graph[id1].links:
if node.id not in s:
p.extend(paths(graph, node.id, id2, length, path, False))
return _root and sorted(p, key=len) or p
def edges(path):
""" Returns an iterator of Edge objects for the given list of nodes.
It yields None where two successive nodes are not connected.
"""
# For example, the distance (i.e., edge weight sum) of a path:
# sum(e.weight for e in edges(path))
return len(path) > 1 and (n.links.edge(path[i+1]) for i,n in enumerate(path[:-1])) or iter(())
#--- GRAPH THEORY ------------------------------------------------------------------------------------
def adjacency(graph, directed=False, reversed=False, stochastic=False, heuristic=None):
""" Returns a dictionary indexed by node id1's,
in which each value is a dictionary of connected node id2's linking to the edge weight.
If directed=True, edges go from id1 to id2, but not the other way.
If stochastic=True, all the weights for the neighbors of a given node sum to 1.
A heuristic function can be given that takes two node id's and returns
an additional cost for movement between the two nodes.
"""
# Caching a heuristic from a method won't work.
# Bound method objects are transient,
# i.e., id(object.method) returns a new value each time.
if graph._adjacency is not None and \
graph._adjacency[1:] == (directed, reversed, stochastic, heuristic and id(heuristic)):
return graph._adjacency[0]
map = {}
for n in graph.nodes:
map[n.id] = {}
for e in graph.edges:
id1, id2 = not reversed and (e.node1.id, e.node2.id) or (e.node2.id, e.node1.id)
map[id1][id2] = 1.0 - 0.5 * e.weight
if heuristic:
map[id1][id2] += heuristic(id1, id2)
if not directed:
map[id2][id1] = map[id1][id2]
if stochastic:
for id1 in map:
n = sum(map[id1].values())
for id2 in map[id1]:
map[id1][id2] /= n
# Cache the adjacency map: this makes dijkstra_shortest_path() 2x faster in repeated use.
graph._adjacency = (map, directed, reversed, stochastic, heuristic and id(heuristic))
return map
def dijkstra_shortest_path(graph, id1, id2, heuristic=None, directed=False):
""" Dijkstra algorithm for finding the shortest path between two nodes.
Returns a list of node id's, starting with id1 and ending with id2.
Raises an IndexError between nodes on unconnected graphs.
"""
# Based on: Connelly Barnes, http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
def flatten(list):
# Flattens a linked list of the form [0,[1,[2,[]]]]
while len(list) > 0:
yield list[0]; list=list[1]
G = adjacency(graph, directed=directed, heuristic=heuristic)
q = [(0, id1, ())] # Heap of (cost, path_head, path_rest).
visited = set() # Visited nodes.
while True:
(cost1, n1, path) = heappop(q)
if n1 not in visited:
visited.add(n1)
if n1 == id2:
return list(flatten(path))[::-1] + [n1]
path = (n1, path)
for (n2, cost2) in G[n1].iteritems():
if n2 not in visited:
heappush(q, (cost1 + cost2, n2, path))
def dijkstra_shortest_paths(graph, id, heuristic=None, directed=False):
""" Dijkstra algorithm for finding the shortest paths from the given node to all other nodes.
Returns a dictionary of node id's, each linking to a list of node id's (i.e., the path).
"""
# Based on: Dijkstra's algorithm for shortest paths modified from Eppstein.
# Based on: NetworkX 1.4.1: Aric Hagberg, Dan Schult and Pieter Swart.
# This is 5x faster than:
# for n in g: dijkstra_shortest_path(g, id, n.id)
W = adjacency(graph, directed=directed, heuristic=heuristic)
Q = [] # Use Q as a heap with (distance, node id)-tuples.
D = {} # Dictionary of final distances.
P = {} # Dictionary of paths.
P[id] = [id]
seen = {id: 0}
heappush(Q, (0, id))
while Q:
(dist, v) = heappop(Q)
if v in D: continue
D[v] = dist
for w in W[v].iterkeys():
vw_dist = D[v] + W[v][w]
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
heappush(Q, (vw_dist, w))
P[w] = P[v] + [w]
for n in graph:
if n not in P: P[n]=None
return P
def floyd_warshall_all_pairs_distance(graph, heuristic=None, directed=False):
""" Floyd-Warshall's algorithm for finding the path length for all pairs for nodes.
Returns a dictionary of node id's,
each linking to a dictionary of node id's linking to path length.
"""
from collections import defaultdict # Requires Python 2.5+.
g = graph.keys()
d = defaultdict(lambda: defaultdict(lambda: 1e30)) # float('inf')
p = defaultdict(dict) # Predecessors.
for e in graph.edges:
u = e.node1.id
v = e.node2.id
w = 1.0 - 0.5 * e.weight
w = heuristic and heuristic(u, v) + w or w
d[u][v] = min(w, d[u][v])
d[u][u] = 0
p[u][v] = u
if not directed:
d[v][u] = min(w, d[v][u])
p[v][u] = v
for w in g:
dw = d[w]
for u in g:
du, duw = d[u], d[u][w]
for v in g:
# Performance optimization, assumes d[w][v] > 0.
#if du[v] > duw + dw[v]:
if du[v] > duw and du[v] > duw + dw[v]:
d[u][v] = duw + dw[v]
p[u][v] = p[w][v]
class pdict(dict):
def __init__(self, predecessors, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.predecessors = predecessors
return pdict(p, ((u, dict((v, w) for v,w in d[u].iteritems() if w < 1e30)) for u in d))
def predecessor_path(tree, u, v):
""" Returns the path between node u and node v as a list of node id's.
The given tree is the return value of floyd_warshall_all_pairs_distance().predecessors.
"""
def _traverse(u, v):
w = tree[u][v]
if w == u:
return []
return _traverse(u,w) + [w] + _traverse(w,v)
return [u] + _traverse(u,v) + [v]
def brandes_betweenness_centrality(graph, normalized=True, directed=False):
""" Betweenness centrality for nodes in the graph.
Betweenness centrality is a measure of the number of shortests paths that pass through a node.
Nodes in high-density areas will get a good score.
"""
# Ulrik Brandes, A Faster Algorithm for Betweenness Centrality,
# Journal of Mathematical Sociology 25(2):163-177, 2001,
# http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
# Based on: Dijkstra's algorithm for shortest paths modified from Eppstein.
# Based on: NetworkX 1.0.1: Aric Hagberg, Dan Schult and Pieter Swart.
# http://python-networkx.sourcearchive.com/documentation/1.0.1/centrality_8py-source.html
W = adjacency(graph, directed=directed)
b = dict.fromkeys(graph, 0.0)
for id in graph:
Q = [] # Use Q as a heap with (distance, node id)-tuples.
D = {} # Dictionary of final distances.
P = {} # Dictionary of paths.
for n in graph: P[n]=[]
seen = {id: 0}
heappush(Q, (0, id, id))
S = []
E = dict.fromkeys(graph, 0) # sigma
E[id] = 1.0
while Q:
(dist, pred, v) = heappop(Q)
if v in D:
continue
D[v] = dist
S.append(v)
E[v] += E[pred]
for w in W[v]:
vw_dist = D[v] + W[v][w]
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
heappush(Q, (vw_dist, v, w))
P[w] = [v]
E[w] = 0.0
elif vw_dist == seen[w]: # Handle equal paths.
P[w].append(v)
E[w] += E[v]
d = dict.fromkeys(graph, 0.0)
for w in reversed(S):
for v in P[w]:
d[v] += (1.0 + d[w]) * E[v] / E[w]
if w != id:
b[w] += d[w]
# Normalize between 0.0 and 1.0.
m = normalized and max(b.values()) or 1
b = dict((id, w/m) for id, w in b.items())
return b
def eigenvector_centrality(graph, normalized=True, reversed=True, rating={}, iterations=100, tolerance=0.0001):
""" Eigenvector centrality for nodes in the graph (cfr. Google's PageRank).
Eigenvector centrality is a measure of the importance of a node in a directed network.
It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes.
Nodes with no incoming connections have a score of zero.
If you want to measure outgoing connections, reversed should be False.
"""
# Based on: NetworkX, Aric Hagberg ([email protected])
# http://python-networkx.sourcearchive.com/documentation/1.0.1/centrality_8py-source.html
# Note: much faster than betweenness centrality (which grows exponentially).
def normalize(vector):
w = 1.0 / (sum(vector.values()) or 1)
for node in vector:
vector[node] *= w
return vector
G = adjacency(graph, directed=True, reversed=reversed)
v = normalize(dict([(n, random()) for n in graph])) # Node ID => weight vector.
# Eigenvector calculation using the power iteration method: y = Ax.
# It has no guarantee of convergence.
for i in range(iterations):
v0 = v
v = dict.fromkeys(v0.keys(), 0)
for n1 in v:
for n2 in G[n1]:
v[n1] += 0.01 + v0[n2] * G[n1][n2] * rating.get(n1, 1)
normalize(v)
e = sum([abs(v[n]-v0[n]) for n in v]) # Check for convergence.
if e < len(G) * tolerance:
# Normalize between 0.0 and 1.0.
m = normalized and max(v.values()) or 1
v = dict((id, w/m) for id, w in v.items())
return v
warn("node weight is 0 because eigenvector_centrality() did not converge.", Warning)
return dict((n, 0) for n in G)
# a | b => all elements from a and all the elements from b.
# a & b => elements that appear in a as well as in b.
# a - b => elements that appear in a but not in b.
def union(a, b):
return list(set(a) | set(b))
def intersection(a, b):
return list(set(a) & set(b))
def difference(a, b):
return list(set(a) - set(b))
def partition(graph):
""" Returns a list of unconnected subgraphs.
"""
# Creates clusters of nodes and directly connected nodes.
# Iteratively merges two clusters if they overlap.
g = []
for n in graph.nodes:
g.append(dict.fromkeys((n.id for n in n.flatten()), True))
for i in reversed(range(len(g))):
for j in reversed(range(i+1, len(g))):
if g[i] and g[j] and len(intersection(g[i], g[j])) > 0:
g[i] = union(g[i], g[j])
g[j] = []
g = [graph.copy(nodes=[graph[id] for id in n]) for n in g if n]
g.sort(lambda a, b: len(b) - len(a))
return g
#--- GRAPH THEORY | CLIQUE ---------------------------------------------------------------------------
def is_clique(graph):
""" A clique is a set of nodes in which each node is connected to all other nodes.
"""
#for n1 in graph.nodes:
# for n2 in graph.nodes:
# if n1 != n2 and graph.edge(n1.id, n2.id) is None:
# return False
return graph.density == 1.0
def clique(graph, id):
""" Returns the largest possible clique for the node with given id.
"""
if isinstance(id, Node):
id = id.id
a = [id]
for n in graph.nodes:
try:
# Raises StopIteration if all nodes in the clique are connected to n:
(id for id in a if n.id==id or graph.edge(n.id, id) is None).next()
except StopIteration:
a.append(n.id)
return a
def cliques(graph, threshold=3):
""" Returns all cliques in the graph with at least the given number of nodes.
"""
a = []
for n in graph.nodes:
c = clique(graph, n.id)
if len(c) >= threshold:
c.sort()
if c not in a: a.append(c)
return a
#--- GRAPH MAINTENANCE -------------------------------------------------------------------------------
# Utility commands for safe linking and unlinking of nodes,
# with respect for the surrounding nodes.
def unlink(graph, node1, node2=None):
""" Removes the edges between node1 and node2.
If only node1 is given, removes all edges to and from it.
This does not remove node1 from the graph.
"""
if not isinstance(node1, Node):
node1 = graph[node1]
if not isinstance(node2, Node) and node2 is not None:
node2 = graph[node2]
for e in list(graph.edges):
if node1 in (e.node1, e.node2) and node2 in (e.node1, e.node2, None):
graph.edges.remove(e)
try:
node1.links.remove(node2)
node2.links.remove(node1)
except: # 'NoneType' object has no attribute 'links'
pass
def redirect(graph, node1, node2):
""" Connects all of node1's edges to node2 and unlinks node1.
"""
if not isinstance(node1, Node):
node1 = graph[node1]
if not isinstance(node2, Node):
node2 = graph[node2]
for e in graph.edges:
if node1 in (e.node1, e.node2):
if e.node1 == node1 and e.node2 != node2:
graph._add_edge_copy(e, node1=node2, node2=e.node2)
if e.node2 == node1 and e.node1 != node2:
graph._add_edge_copy(e, node1=e.node1, node2=node2)
unlink(graph, node1)
def cut(graph, node):
""" Unlinks the given node, but keeps edges intact by connecting the surrounding nodes.
If A, B, C, D are nodes and A->B, B->C, B->D, if we then cut B: A->C, A->D.
"""
if not isinstance(node, Node):
node = graph[node]
for e in graph.edges:
if node in (e.node1, e.node2):
for n in node.links:
if e.node1 == node and e.node2 != n:
graph._add_edge_copy(e, node1=n, node2=e.node2)
if e.node2 == node and e.node1 != n:
graph._add_edge_copy(e, node1=e.node1, node2=n)
unlink(graph, node)
def insert(graph, node, a, b):
""" Inserts the given node between node a and node b.
If A, B, C are nodes and A->B, if we then insert C: A->C, C->B.
"""
if not isinstance(node, Node):
node = graph[node]
if not isinstance(a, Node):
a = graph[a]
if not isinstance(b, Node):
b = graph[b]
for e in graph.edges:
if e.node1 == a and e.node2 == b:
graph._add_edge_copy(e, node1=a, node2=node)
graph._add_edge_copy(e, node1=node, node2=b)
if e.node1 == b and e.node2 == a:
graph._add_edge_copy(e, node1=b, node2=node)
graph._add_edge_copy(e, node1=node, node2=a)
unlink(graph, a, b)
| bsd-3-clause | 5,461,694,732,288,640,000 | 38.069332 | 136 | 0.554371 | false |
google/sling | sling/nlp/parser/trainer/lexicon.py | 1 | 3676 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Class for computing and serving a lexicon.
# Usage:
# lexicon = Lexicon(normalize_digits=...)
# lexicon.add("foo")
# lexicon.add("bar")
# ...
# index = lexicon.index("foo")
#
# Methods with names starting with an underscore are meant to be internal.
class Lexicon:
def __init__(self, normalize_digits=True, oov_item="<UNKNOWN>"):
self.normalize_digits = normalize_digits
self.item_to_index = {}
self.index_to_item = {}
if oov_item is not None:
self.oov_item = oov_item
self.oov_index = 0 # Don't change this; OOV is always at position 0
self.index_to_item[self.oov_index] = self.oov_item
self.item_to_index[self.oov_item] = self.oov_index
else:
self.oov_item = None
self.oov_index = None
# Returns whether the lexicon has an OOV item.
def has_oov(self):
return self.oov_index is not None
# Returns the key internally used by the lexicon.
def _key(self, item):
if self.normalize_digits:
return "".join([c if not c.isdigit() else '9' for c in list(item)])
else:
return item
# Loads the lexicon from a text file with one key per line.
def load(self, vocabfile):
with open(vocabfile, "r") as f:
index = 0
for line in f:
line = line.strip()
if line == self.oov_item:
assert index == self.oov_index, index
self.item_to_index[line] = index
self.index_to_item[index] = line
index += 1
# Reads the lexicon from a delimited string.
def read(self, vocabstring, delimiter):
index = 0
lines = vocabstring.split(delimiter)
if lines[-1] == '': lines.pop()
for line_index, line in enumerate(lines):
if line == self.oov_item:
assert index == self.oov_index, index
self.item_to_index[line] = index
self.index_to_item[index] = line
index += 1
# Adds 'item' to the lexicon.
def add(self, item):
item = self._key(item)
if item not in self.item_to_index:
i = len(self.item_to_index)
self.item_to_index[item] = i
self.index_to_item[i] = item
# Returns the size of the lexicon, including the OOV item, if any.
def size(self):
return len(self.item_to_index)
# Returns the integer index of 'item' in the lexicon, if present.
def index(self, item):
item = self._key(item)
if item not in self.item_to_index:
return self.oov_index # this is None if !has_oov()
else:
return self.item_to_index[item]
# Returns a string representation of the key whose id is 'index'.
def value(self, index):
assert index >= 0 and index < len(self.index_to_item), "%r" % index
return self.index_to_item[index]
# Returns a string representation of the lexicon.
def __str__(self):
s = [self.index_to_item[i] for i in range(self.size())]
return "\n".join(s)
# Returns the string representation of the first 'n' keys in the lexicon.
def first_few(self, prefix="", n=100):
s = []
for i in range(min(n, self.size())):
s.append(prefix + str(i) + " = " + self.index_to_item[i])
return "\n".join(s)
| apache-2.0 | 2,777,742,630,145,954,300 | 29.633333 | 75 | 0.646083 | false |
kaiweifan/horizon | openstack_dashboard/dashboards/admin/metering/views.py | 1 | 8054 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime # noqa
from datetime import timedelta # noqa
import json
from django.http import HttpResponse # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from django.views.generic import TemplateView # noqa
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.api import ceilometer
from openstack_dashboard.dashboards.admin.metering import tabs as \
metering_tabs
class IndexView(tabs.TabbedTableView):
tab_group_class = metering_tabs.CeilometerOverviewTabs
template_name = 'admin/metering/index.html'
class SamplesView(TemplateView):
template_name = "admin/metering/samples.csv"
@staticmethod
def _series_for_meter(aggregates,
resource_name,
meter_name,
stats_name,
unit):
"""Construct datapoint series for a meter from resource aggregates."""
series = []
for resource in aggregates:
if getattr(resource, meter_name):
point = {'unit': unit,
'name': getattr(resource, resource_name),
'data': []}
for statistic in getattr(resource, meter_name):
date = statistic.duration_end[:19]
value = float(getattr(statistic, stats_name))
point['data'].append({'x': date, 'y': value})
series.append(point)
return series
def get(self, request, *args, **kwargs):
meter = request.GET.get('meter', None)
meter_name = meter.replace(".", "_")
date_options = request.GET.get('date_options', None)
date_from = request.GET.get('date_from', None)
date_to = request.GET.get('date_to', None)
stats_attr = request.GET.get('stats_attr', 'avg')
# TODO(lsmola) all timestamps should probably work with
# current timezone. And also show the current timezone in chart.
if (date_options == "other"):
try:
if date_from:
date_from = datetime.strptime(date_from,
"%Y-%m-%d")
else:
# TODO(lsmola) there should be probably the date
# of the first sample as default, so it correctly
# counts the time window. Though I need ordering
# and limit of samples to obtain that.
pass
if date_to:
date_to = datetime.strptime(date_to,
"%Y-%m-%d")
# It return beginning of the day, I want the and of
# the day, so i will add one day without a second.
date_to = (date_to + timedelta(days=1) -
timedelta(seconds=1))
else:
date_to = datetime.now()
except Exception:
raise ValueError("The dates haven't been "
"recognized")
else:
try:
date_from = datetime.now() - timedelta(days=int(date_options))
date_to = datetime.now()
except Exception:
raise ValueError("The time delta must be an "
"integer representing days.")
if date_from and date_to:
if date_to < date_from:
# TODO(lsmola) propagate the Value error through Horizon
# handler to the client with verbose message.
raise ValueError("Date to must be bigger than date "
"from.")
# get the time delta in seconds
delta = date_to - date_from
if delta.days <= 0:
# it's one day
delta_in_seconds = 3600 * 24
else:
delta_in_seconds = delta.days * 24 * 3600 + delta.seconds
# Lets always show 400 samples in the chart. Know that it is
# maximum amount of samples and it can be lower.
number_of_samples = 400
period = delta_in_seconds / number_of_samples
else:
# If some date is missing, just set static window to one day.
period = 3600 * 24
query = [{"field": "metadata.OS-EXT-AZ:availability_zone",
"op": "eq",
"value": "nova"}]
additional_query = []
if date_from:
additional_query += [{'field': 'timestamp',
'op': 'ge',
'value': date_from}]
if date_to:
additional_query += [{'field': 'timestamp',
'op': 'le',
'value': date_to}]
# TODO(lsmola) replace this by logic implemented in I1 in bugs
# 1226479 and 1226482, this is just a quick fix for RC1
try:
meter_list = [m for m in ceilometer.meter_list(request)
if m.name == meter]
unit = meter_list[0].unit
except Exception:
unit = ""
if request.GET.get('group_by', None) == "project":
try:
tenants, more = api.keystone.tenant_list(
request,
domain=None,
paginate=True,
marker="tenant_marker")
except Exception:
tenants = []
exceptions.handle(request,
_('Unable to retrieve tenant list.'))
queries = {}
for tenant in tenants:
tenant_query = [{
"field": "project_id",
"op": "eq",
"value": tenant.id}]
queries[tenant.name] = tenant_query
ceilometer_usage = ceilometer.CeilometerUsage(request)
resources = ceilometer_usage.resource_aggregates_with_statistics(
queries, [meter], period=period, stats_attr=None,
additional_query=additional_query)
series = self._series_for_meter(resources,
'id',
meter_name,
stats_attr,
unit)
else:
ceilometer_usage = ceilometer.CeilometerUsage(request)
try:
resources = ceilometer_usage.resources_with_statistics(
query, [meter], period=period, stats_attr=None,
additional_query=additional_query)
except Exception:
resources = []
exceptions.handle(request,
_('Unable to retrieve statistics.'))
series = self._series_for_meter(resources,
'resource_id',
meter_name,
stats_attr,
unit)
ret = {}
ret['series'] = series
ret['settings'] = {}
return HttpResponse(json.dumps(ret),
mimetype='application/json')
| apache-2.0 | -3,676,180,423,407,761,400 | 39.472362 | 78 | 0.502607 | false |
slobberchops/rop | opc/drivers/opc.py | 1 | 3777 | """Simple high-performance Open Pixel Control client."""
#
# Copyright (c) 2013 Micah Elizabeth Scott
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# This code is modified from the original.
from drivers.baseclass import RopDriver
import json
import numpy
import os
import socket
import struct
import time
class Driver(RopDriver):
"""High-performance Open Pixel Control client, using Numeric Python.
By default, assumes the OPC server is running on localhost. This may be
overridden with the OPC_SERVER environment variable, or the 'server'
keyword argument.
"""
def __init__(self, width, height, address):
self.server = address or os.getenv('OPC_SERVER') or '127.0.0.1:7890'
self.host, port = self.server.split(':')[1:]
self.port = int(port)
self.socket = None
def send(self, packet):
"""Send a low-level packet to the OPC server, connecting if necessary
and handling disconnects. Returns True on success.
"""
if self.socket is None:
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.host, self.port))
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,
True)
except socket.error:
self.socket = None
if self.socket is not None:
try:
self.socket.send(packet)
return True
except socket.error:
self.socket = None
# Limit CPU usage when polling for a server
time.sleep(0.1)
return False
def putPixels(self, channel, *sources):
"""
Send a list of 8-bit colors to the indicated channel. (OPC command
0x00). This command accepts a list of pixel sources, which are
concatenated and sent. Pixel sources may be:
- NumPy arrays or sequences containing 8-bit RGB pixel data.
If values are out of range, the array is modified.
"""
parts = []
bytes = 0
for source in sources:
numpy.clip(source, 0, 255, source)
source = source.astype('B').tostring()
bytes += len(source)
parts.append(source)
parts.insert(0, struct.pack('>BBH', channel, 0, bytes))
self.send(''.join(parts))
def sysEx(self, systemId, commandId, msg):
self.send(struct.pack(">BBHHH", 0, 0xFF, len(msg) + 4, systemId,
commandId) + msg)
def setGlobalColorCorrection(self, gamma, r, g, b):
self.sysEx(1, 1, json.dumps({'gamma': gamma, 'whitepoint': [r, g, b]}))
def terminate(self):
pass
| gpl-3.0 | -5,717,644,401,036,324,000 | 35.317308 | 79 | 0.642044 | false |
jacquev6/ActionTree | ActionTree/tests/stock_actions.py | 1 | 7753 | # coding: utf8
# Copyright 2015-2018 Vincent Jacques <[email protected]>
import errno
import pickle
import subprocess
import unittest
from ActionTree.stock import *
from ActionTree import *
class PatchingTestCase(unittest.TestCase):
def patch(self, *args, **kwds):
patcher = unittest.mock.patch(*args, **kwds)
patched = patcher.start()
self.addCleanup(patcher.stop)
return patched
class CreateDirectoryTestCase(PatchingTestCase):
def setUp(self):
self.makedirs = self.patch("os.makedirs")
self.isdir = self.patch("os.path.isdir")
def test_label(self):
self.assertEqual(CreateDirectory("xxx").label, "mkdir xxx")
self.makedirs.assert_not_called()
self.isdir.assert_not_called()
def test_pickle(self):
self.assertIsInstance(pickle.dumps(CreateDirectory("xxx")), bytes)
def test_success(self):
self.makedirs.expect("xxx")
CreateDirectory("xxx").do_execute({})
self.makedirs.assert_called_once_with("xxx")
self.isdir.assert_not_called()
def test_directory_exists(self):
self.makedirs.side_effect = OSError(errno.EEXIST, "File exists")
self.isdir.return_value = True
CreateDirectory("xxx").do_execute({})
self.makedirs.assert_called_once_with("xxx")
self.isdir.assert_called_once_with("xxx")
def test_file_exists(self):
self.makedirs.side_effect = OSError(errno.EEXIST, "File exists")
self.isdir.return_value = False
with self.assertRaises(OSError):
CreateDirectory("xxx").do_execute({})
self.makedirs.assert_called_once_with("xxx")
self.isdir.assert_called_once_with("xxx")
def test_other_failure(self):
self.makedirs.side_effect = OSError(-1, "Foobar")
with self.assertRaises(OSError):
CreateDirectory("xxx").do_execute({})
self.makedirs.assert_called_once_with("xxx")
self.isdir.assert_not_called()
class CallSubprocessTestCase(PatchingTestCase):
def setUp(self):
self.check_call = self.patch("subprocess.check_call")
def test_default_label(self):
self.assertEqual(CallSubprocess(["xxx", "yyy"]).label, "xxx yyy")
def test_label(self):
self.assertEqual(CallSubprocess(["xxx", "yyy"], label="foo").label, "foo")
def test_accept_failed_dependencies(self):
self.assertTrue(CallSubprocess(["xxx", "yyy"], accept_failed_dependencies=True).accept_failed_dependencies)
def test_pickle(self):
self.assertIsInstance(pickle.dumps(CallSubprocess(["xxx", "yyy"])), bytes)
def test_simple_call(self):
CallSubprocess(["xxx"]).do_execute({})
self.check_call.assert_called_once_with(["xxx"])
def test_call_with_several_args(self):
self.check_call.expect(["xxx", "yyy"])
CallSubprocess(["xxx", "yyy"]).do_execute({})
self.check_call.assert_called_once_with(["xxx", "yyy"])
def test_call_with_kwds(self):
CallSubprocess(["xxx", "yyy"], kwargs=dict(foo="bar")).do_execute({})
self.check_call.assert_called_once_with(["xxx", "yyy"], foo="bar")
def test_called_process_error(self):
exn = subprocess.CalledProcessError(1, ["false"], None)
self.check_call.side_effect = exn
with self.assertRaises(subprocess.CalledProcessError) as catcher:
CallSubprocess(["false"]).do_execute({})
self.assertIs(catcher.exception, exn)
class CallSubprocessForRealTestCase(unittest.TestCase):
def test_called_process_error(self):
with self.assertRaises(CompoundException) as catcher:
execute(CallSubprocess(["false"]))
self.assertEqual(catcher.exception.exceptions[0].args, (1, ["false"]))
class DeleteFileTestCase(PatchingTestCase):
def setUp(self):
self.unlink = self.patch("os.unlink")
def test_label(self):
self.assertEqual(DeleteFile("xxx").label, "rm xxx")
self.unlink.assert_not_called()
def test_pickle(self):
self.assertIsInstance(pickle.dumps(DeleteFile("xxx")), bytes)
def test_success(self):
DeleteFile("xxx").do_execute({})
self.unlink.assert_called_once_with("xxx")
def test_file_does_not_exist(self):
self.unlink.side_effect = OSError(errno.ENOENT, "No such file or directory")
DeleteFile("xxx").do_execute({})
self.unlink.assert_called_once_with("xxx")
def test_other_failure(self):
self.unlink.side_effect = OSError(-1, "Foobar")
with self.assertRaises(OSError):
DeleteFile("xxx").do_execute({})
self.unlink.assert_called_once_with("xxx")
class DeleteDirectoryTestCase(PatchingTestCase):
def setUp(self):
self.rmtree = self.patch("shutil.rmtree")
def test_label(self):
self.assertEqual(DeleteDirectory("xxx").label, "rm -r xxx")
self.rmtree.assert_not_called()
def test_pickle(self):
self.assertIsInstance(pickle.dumps(DeleteDirectory("xxx")), bytes)
def test_success(self):
DeleteDirectory("xxx").do_execute({})
self.rmtree.assert_called_once_with("xxx")
def test_directory_does_not_exist(self):
self.rmtree.side_effect = OSError(errno.ENOENT, "No such file or directory")
DeleteDirectory("xxx").do_execute({})
self.rmtree.assert_called_once_with("xxx")
def test_other_failure(self):
self.rmtree.side_effect = OSError(-1, "Foobar")
with self.assertRaises(OSError):
DeleteDirectory("xxx").do_execute({})
self.rmtree.assert_called_once_with("xxx")
class CopyFileTestCase(PatchingTestCase):
def setUp(self):
self.copy = self.patch("shutil.copy")
def test_label(self):
self.assertEqual(CopyFile("from", "to").label, "cp from to")
self.copy.assert_not_called()
def test_pickle(self):
self.assertIsInstance(pickle.dumps(CopyFile("from", "to")), bytes)
def test_success(self):
CopyFile("from", "to").do_execute({})
self.copy.assert_called_once_with("from", "to")
def test_failure(self):
self.copy.side_effect = OSError(-1, "Foobar")
with self.assertRaises(OSError):
CopyFile("from", "to").do_execute({})
self.copy.assert_called_once_with("from", "to")
class TouchFileTestCase(PatchingTestCase):
def setUp(self):
self.open = self.patch("ActionTree.stock.open", new=unittest.mock.mock_open(), create=True)
self.utime = self.patch("os.utime")
def test_label(self):
self.assertEqual(TouchFile("xxx").label, "touch xxx")
self.open.assert_not_called()
self.utime.assert_not_called()
def test_pickle(self):
self.assertIsInstance(pickle.dumps(TouchFile("xxx")), bytes)
def test_success(self):
TouchFile("xxx").do_execute({})
self.open.assert_called_once_with("xxx", "ab")
self.open().close.assert_called_once_with()
self.utime.assert_called_once_with("xxx", None)
class NullActionTestCase(unittest.TestCase):
def test_label(self):
self.assertIsNone(NullAction().label)
def test_pickle(self):
self.assertIsInstance(pickle.dumps(NullAction()), bytes)
def test(self):
NullAction().do_execute({})
class SleepTestCase(PatchingTestCase):
def setUp(self):
self.sleep = self.patch("time.sleep")
def test_label(self):
self.assertEqual(Sleep(1).label, "sleep 1")
self.sleep.assert_not_called()
def test_pickle(self):
self.assertIsInstance(pickle.dumps(Sleep(1)), bytes)
def test(self):
Sleep(1).do_execute({})
self.sleep.assert_called_once_with(1)
| mit | 7,862,216,126,984,711,000 | 28.591603 | 115 | 0.644138 | false |
stvstnfrd/edx-platform | openedx/core/djangoapps/credit/tests/test_tasks.py | 1 | 8475 | """
Tests for credit course tasks.
"""
from datetime import datetime
import mock
import six
from edx_proctoring.api import create_exam
from openedx.core.djangoapps.credit.api import get_credit_requirements
from openedx.core.djangoapps.credit.exceptions import InvalidCreditRequirements
from openedx.core.djangoapps.credit.models import CreditCourse
from openedx.core.djangoapps.credit.signals import on_course_publish
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
class TestTaskExecution(ModuleStoreTestCase):
"""Set of tests to ensure that the task code will do the right thing when
executed directly.
The test course gets created without the listeners being present, which
allows us to ensure that when the listener is executed, it is done as
expected.
"""
def mocked_set_credit_requirements(course_key, requirements): # pylint: disable=no-self-argument, unused-argument
"""Used as a side effect when mocking method credit api method
'set_credit_requirements'.
"""
raise InvalidCreditRequirements
def setUp(self):
super(TestTaskExecution, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.course = CourseFactory.create(start=datetime(2015, 3, 1))
self.section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
self.subsection = ItemFactory.create(parent=self.section, category='sequential', display_name='Test Subsection')
self.vertical = ItemFactory.create(parent=self.subsection, category='vertical', display_name='Test Unit')
def test_task_adding_requirements_invalid_course(self):
"""
Test that credit requirements cannot be added for non credit course.
"""
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
def test_task_adding_requirements(self):
"""Test that credit requirements are added properly for credit course.
Make sure that the receiver correctly fires off the task when
invoked by signal.
"""
self.add_credit_course(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 1
def test_proctored_exam_requirements(self):
"""
Make sure that proctored exams are being registered as requirements
"""
self.add_credit_course(self.course.id)
create_exam(
course_id=six.text_type(self.course.id),
content_id=six.text_type(self.subsection.location),
exam_name='A Proctored Exam',
time_limit_mins=10,
is_proctored=True,
is_active=True
)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 2
assert requirements[1]['namespace'] == 'proctored_exam'
assert requirements[1]['name'] == six.text_type(self.subsection.location)
assert requirements[1]['display_name'] == 'A Proctored Exam'
assert requirements[1]['criteria'] == {}
def test_proctored_exam_filtering(self):
"""
Make sure that timed or inactive exams do not end up in the requirements table
Also practice protored exams are not a requirement
"""
self.add_credit_course(self.course.id)
create_exam(
course_id=six.text_type(self.course.id),
content_id='foo',
exam_name='A Proctored Exam',
time_limit_mins=10,
is_proctored=False,
is_active=True
)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 1
# make sure we don't have a proctoring requirement
assert not [requirement for requirement in requirements if requirement['namespace'] == 'proctored_exam']
create_exam(
course_id=six.text_type(self.course.id),
content_id='foo2',
exam_name='A Proctored Exam',
time_limit_mins=10,
is_proctored=True,
is_active=False
)
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 1
# make sure we don't have a proctoring requirement
assert not [requirement for requirement in requirements if requirement['namespace'] == 'proctored_exam']
# practice proctored exams aren't requirements
create_exam(
course_id=six.text_type(self.course.id),
content_id='foo3',
exam_name='A Proctored Exam',
time_limit_mins=10,
is_proctored=True,
is_active=True,
is_practice_exam=True
)
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 1
# make sure we don't have a proctoring requirement
assert not [requirement for requirement in requirements if requirement['namespace'] == 'proctored_exam']
@mock.patch(
'openedx.core.djangoapps.credit.tasks.set_credit_requirements',
mock.Mock(
side_effect=mocked_set_credit_requirements
)
)
def test_retry(self):
"""Test that adding credit requirements is retried when
'InvalidCreditRequirements' exception is raised.
Make sure that the receiver correctly fires off the task when
invoked by signal
"""
self.add_credit_course(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
def test_credit_requirement_blocks_ordering(self):
"""
Test ordering of proctoring blocks.
"""
self.add_credit_course(self.course.id)
subsection = ItemFactory.create(parent=self.section, category='sequential', display_name='Dummy Subsection')
create_exam(
course_id=six.text_type(self.course.id),
content_id=six.text_type(subsection.location),
exam_name='A Proctored Exam',
time_limit_mins=10,
is_proctored=True,
is_active=True
)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 2
assert requirements[1]['namespace'] == 'proctored_exam'
assert requirements[1]['name'] == six.text_type(subsection.location)
assert requirements[1]['display_name'] == 'A Proctored Exam'
assert requirements[1]['criteria'] == {}
# Primary sort is based on start date
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
# grade requirement is added on publish of the requirements
assert len(requirements) == 2
# check requirements are added in the desired order
# 1st Minimum grade then the blocks with start date than other blocks
assert requirements[0]['display_name'] == 'Minimum Grade'
assert requirements[1]['display_name'] == 'A Proctored Exam'
def add_credit_course(self, course_key):
"""Add the course as a credit.
Args:
course_key(CourseKey): Identifier for the course
Returns:
CreditCourse object added
"""
credit_course = CreditCourse(course_key=course_key, enabled=True)
credit_course.save()
return credit_course
| agpl-3.0 | -2,392,428,938,518,736,000 | 36.171053 | 120 | 0.650029 | false |
m0nk/Pinna | pinna/ui.py | 1 | 2546 | import gtk
import gtk.glade
import os
import pynotify
if os.path.dirname(__file__):
root_path=os.path.dirname(__file__)+"/"
else:
root_path=""
def init_notifier():
pynotify.init('pinna song notification')
notifier=pynotify.Notification('testtickles')
notifier.set_urgency(pynotify.URGENCY_LOW)
notifier.set_timeout(3000)
return notifier
class dialog_input:
def __init__(self,title,parent,message):
self.dialog = gtk.MessageDialog(
parent,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK_CANCEL,
None)
#dialog.title(title)
self.entry=gtk.Entry()
self.entry.connect("activate", self.enter_callback,self.dialog, gtk.RESPONSE_OK)
if message:
self.dialog.set_markup(message)
self.dialog.vbox.pack_end(self.entry, True, True, 0)
def enter_callback(self,entry,dialog,response):
dialog.response(response)
def run(self):
self.dialog.show_all()
signal=self.dialog.run()
if signal==-4 or signal==-6:
self.dialog.hide_all()
self.entry.set_text('')
return None
elif signal==-5:
text=self.entry.get_text()
if text.strip(' '):
self.entry.set_text('')
self.dialog.hide_all()
return text
else:
self.entry.set_text('')
self.run()
else:
self.run()
class ask_yes_no:
def __init__(self,title,parent,message):
self.dialog=gtk.MessageDialog(
parent,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_YES_NO,
None)
if message:
self.dialog.set_markup(message)
def run(self):
signal=self.dialog.run()
if signal==-8:
self.dialog.hide_all()
return True
elif signal==-4 or signal==-9:
self.dialog.hide_all()
return False
mainwindow_wTree=gtk.glade.XML(root_path+"glade/main_window.glade")
settingswindow_wTree=gtk.glade.XML(root_path+"glade/settings_window.glade")
browserwindow_wTree=gtk.glade.XML(root_path+"glade/browser_window.glade")
infowindow_wTree=gtk.glade.XML(root_path+"glade/info_window.glade")
default_albumart=gtk.gdk.pixbuf_new_from_file(root_path+"glade/no_image.png")
notifier=init_notifier()
tray_icon=gtk.StatusIcon()
tray_icon.set_from_file(root_path+'glade/no_image.png')
browser_popups=(dialog_input(None,browserwindow_wTree.get_widget('brwoser_window'),'Please enter a name for the playlist:'),
ask_yes_no(None,browserwindow_wTree.get_widget('browser_window'),'Are you sure?'))
| gpl-3.0 | -3,246,051,747,510,442,000 | 28.264368 | 124 | 0.672427 | false |
joehillen/txjason | txjason/tests/test_client.py | 1 | 3893 | import json
from twisted.internet import defer, task
from twisted.trial import unittest
from txjason import client
clock = task.Clock()
class ClientTestCase(unittest.TestCase):
def setUp(self):
self.client = client.JSONRPCClient(reactor=clock)
def checkPayload(self, payload, expected, d=None):
payload = json.loads(payload)
self.assertEqual(payload, expected)
if d:
d.callback(None)
def test_timeout(self):
called = []
def eb(r):
called.append(r.value)
payload, d = self.client.getRequest('foo')
d.addErrback(eb)
clock.advance(self.client.timeout - 1)
self.assertFalse(called)
clock.advance(1)
self.assertIsInstance(called[0], defer.CancelledError)
def test_response(self):
called = []
def cb(r):
called.append(r)
payload, d = self.client.getRequest('foo')
d.addCallback(cb)
response = {'id': 1, 'jsonrpc': '2.0', 'result': 'bar'}
self.client.handleResponse(json.dumps(response))
self.assertEqual(called, ['bar'])
def test_error(self):
called = []
def eb(r):
called.append(r.value)
payload, d = self.client.getRequest('foo')
d.addErrback(eb)
response = {'id': 1, 'jsonrpc': '2.0', 'error': {'code': -32601, 'message': 'Method not found'}}
self.client.handleResponse(json.dumps(response))
self.assertIsInstance(called[0], client.JSONRPCClientError)
def test_args_and_kwargs(self):
self.assertRaises(client.JSONRPCClientError, self.client.getRequest, 'foo', 1, bar='bar')
def test_positional_params(self):
payload, d = self.client.getRequest('foo', 1, 2, 3)
expected = {'id': 1, 'jsonrpc': '2.0', 'method': 'foo', 'params': [1, 2, 3]}
self.checkPayload(payload, expected, d)
def test_named_params(self):
payload, d = self.client.getRequest('foo', a=1, b=2)
expected = {'id': 1, 'jsonrpc': '2.0', 'method': 'foo', 'params': {'a': 1, 'b': 2}}
self.checkPayload(payload, expected, d)
def test_no_params(self):
payload, d = self.client.getRequest('foo')
expected = {'id': 1, 'jsonrpc': '2.0', 'method': 'foo', 'params': []}
self.checkPayload(payload, expected, d)
def test_notification(self):
payload = self.client.getNotification('foo', 1)
expected = {'jsonrpc': '2.0', 'method': 'foo', 'params': [1]}
self.checkPayload(payload, expected)
def test_id_increment(self):
payload, d = self.client.getRequest('foo')
expected = {'id': 1, 'jsonrpc': '2.0', 'method': 'foo', 'params': []}
self.checkPayload(payload, expected)
payload, d = self.client.getRequest('foo')
expected['id'] = 2
self.checkPayload(payload, expected)
def test_no_id(self):
response = {'jsonrpc': '2.0', 'result': 'bar'}
self.assertRaises(client.JSONRPCProtocolError, self.client.handleResponse, json.dumps(response))
def test_bad_version(self):
response = {'jsonrpc': '3.0', 'id': 1, 'result': 'bar'}
self.assertRaises(client.JSONRPCProtocolError, self.client.handleResponse, json.dumps(response))
def test_no_version(self):
response = {'id': 1, 'result': 'bar'}
self.assertRaises(client.JSONRPCProtocolError, self.client.handleResponse, json.dumps(response))
def test_request_not_found(self):
response = {'jsonrpc': '2.0', 'id': 999, 'result': 'bar'}
self.assertRaises(client.JSONRPCClientError, self.client.handleResponse, json.dumps(response))
def test_no_result(self):
payload, d = self.client.getRequest('foo')
response = {'jsonrpc': '2.0', 'id': 1}
self.assertRaises(client.JSONRPCProtocolError, self.client.handleResponse, json.dumps(response))
| mit | 3,735,510,001,943,927,300 | 37.544554 | 104 | 0.612638 | false |
rosenvladimirov/addons | product_print_category/models/product_print_category.py | 1 | 2437 | # coding: utf-8
# Copyright (C) 2016-Today: La Louve (<http://www.lalouve.net/>)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, fields, models
class ProductPrintCategory(models.Model):
_name = 'product.print.category'
_inherit = 'ir.needaction_mixin'
# View Section
@api.model
def _needaction_count(self, domain=None, context=None):
product_obj = self.env['product.product']
return len(product_obj.search([('print_count', '=', True)]))
# Fields Section
name = fields.Char(string='Name', required=True)
active = fields.Boolean(string='Active', default=True)
company_id = fields.Many2one(
string='Company', comodel_name='res.company', index=True,
default=lambda self: self.env['res.company']._company_default_get())
product_ids = fields.One2many(
comodel_name='product.product', inverse_name='print_category_id')
product_qty = fields.Integer(
string='Products', compute='_compute_product_qty',
store=True)
product_to_print_ids = fields.One2many(
comodel_name='product.product', compute='_compute_to_print',
multi='print_count')
product_to_print_qty = fields.Integer(
compute='_compute_to_print', multi='print_count',
string='Products To Print')
field_ids = fields.Many2many(
comodel_name='ir.model.fields',
column1='category_id', column2='field_id',
domain="[('model', '=', 'product.template')]")
report_xml_id = fields.Many2one(
comodel_name='ir.actions.report.xml', string='Report name',
domain="[('model','in', ('product.product', 'product.template'))]"
)
# Compute Section
@api.multi
@api.depends(
'product_ids.print_category_id',
'product_ids.product_tmpl_id.print_category_id')
def _compute_product_qty(self):
for category in self:
category.product_qty = len(category.product_ids)
@api.multi
def _compute_to_print(self):
product_obj = self.env['product.product']
for category in self:
products = product_obj.search([
('print_category_id', '=', category.id),
('print_count', '=', True)])
category.product_to_print_qty = len(products)
category.product_to_print_ids = products
| agpl-3.0 | -7,535,654,423,478,292,000 | 33.814286 | 76 | 0.62659 | false |
sunlightlabs/foia-data | data/foia.gov/scrape-foia-data-officialx-double-output.py | 1 | 4647 | from urllib2 import urlopen
import lxml.html
import requests
import json
import csv
# dataReports = ["DataRequest","DataExemption","DataAppeal","DataProcessTime","DataFeewaiver","DataPerson","DataBacklog","DataConsultant","RequestDisposition","RequestDenial","RequestPending","Exemption3Statutes","appealDisposition","appealDenialEx","appealDenialOther","appealDenialReason","appealResponseTime","appeal10Pending","processingGranted","processingSimple","processingComplex","processingExpedited","processingPending","processing10Request","personnelCost","consultation10Oldest","requestCompare","requestBacklog","appealCompare","appealBacklog","feewaiverWaiver","DataPerson"]
# dataReports = ["processingGranted","processingSimple","processingComplex","processingExpedited","processingPending","processing10Request"]
# dataYears = ['2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']
# dataAgencies = ['USDA','DOC','DoD','ED','DOE','HHS','DHS','HUD','DOI','DOJ','U.S. DOL','State','DOT','Treasury','VA','ACUS','USAID','ABMC','NRPC','AFRH','BBG','CIA','CSB','USCCR','CPPBSD','CFTC','CFPB','U.S. CPSC','CNCS','CIGIE','CSOSA','DNFSB','EPA','EEOC','CEQ','OMB','ONDCP','OSTP','USTR','Ex-Im Bank','FCA','FCSIC','FCC','FDIC','FEC','FERC','FFIEC','FHFA','FLRA','FMC','FMCS','FMSHRC','FOMC','FRB','FRTIB','FTC','GSA','IMLS','IAF','LSC','MSPB','MCC','NASA','NARA','NCPC','NCUA','NEA','NEH','NIGC','NLRB','NMB','NSF','NTSB','USNRC','OSHRC','OGE','ONHIR','OPM','OSC','ODNI','OPIC','PC','PBGC','PRC','PCLOB','RATB','US RRB','SEC','SSS','SBA','SSA','SIGAR','SIGIR','STB','TVA','USAB','US ADF','CO','USIBWC','USITC','USPS','USTDA']
dataReports = ["processingGranted","processingSimple","processingComplex","processingExpedited","processingPending","processing10Request"]
dataYears = ['2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']
dataAgencies = ['USDA']
# baseURL = "https://www.foia.gov/foia/Services/processing10Request.jsp?requestYear="
newBaseURL = "https://www.foia.gov/foia/Services/"
newBaseURLAppend = ".jsp?requestYear="
# baseURL = newBaseURL + dataReport + newBaseURLAppend
# 2014
agencyURL = "&agencyName="
# FDIC"
for dataReport in dataReports:
baseURL = newBaseURL + dataReport + newBaseURLAppend
for year in dataYears:
# print baseURL + year
for agency in dataAgencies:
# print baseURL + year + agencyURL + agency
# tree = LH.parse(baseURL + year + agencyURL + agency)
tree = lxml.html.parse(urlopen(baseURL + year + agencyURL + agency))
table = [[th.text_content().strip() for th in tree.xpath('//th') if th.text_content().strip()]]
#tableHead = ([th.text_content() for th in tree.xpath('//th')])
for tr in tree.xpath('//tr'):
row = []
for td in tr.xpath('//td'):
content = td.text_content().strip()
if content not in ["", "\\u00a0"]:
row.append(content)
table.append(row)
#tableBody = ([td.text_content() for td in tree.xpath('//td')])
#tableData = tableHead + tableBody
# print tableData
# jsonFileName = year + "/" + year + "-" + agency + ".json"
# make csv with json. what could go wrong?
# jsonFileName = year + "/" + year + "-" + dataReport + "-" + agency + ".csv"
# with open(jsonFileName, 'w') as outfile:
# json.dump(table, outfile)
# with open(jsonFileName, 'w') as f:
# writer = csv.writer(f)
# writer.writerows(table)
dataFileName = year + "/" + year + "-" + dataReport + "-" + agency
jsonFileName = dataFileName + ".json"
csvFileName = dataFileName + ".csv"
with open(csvFileName, 'w') as outfile:
json.dump(table, outfile)
with open(csvFileName, 'w') as f:
writer = csv.writer(f)
writer.writerows(table)
with open(jsonFileName, 'w') as outfile:
json.dump(table, outfile)
def scrape():
pass
# TODO move code here
def merge():
pass
"""
Problem to fix first
1) Fix number of headers in CSV files not matching row data.
2) Figure out why number of rows doesn’t match number of rows found in HTML table element (i.e. USDA 2008 processPending)
Merge algorithm psuedocode
1) For each method, create a CSV file of the same name (i.e. processPending.csv)
2) Create columns for year and agency
2) Iterate over CSV files for each year, the method, and agency (i.e. 2008-processingPending-USDA.csv)
3) For each CSV file, open and read data.
4) Remove first and last line (assuming they are the headers and the total line)
5) Add remaining lines to the CSV file opened in step #1.
6) Add data for year and agency created in step #2 (i.e. add 2008 and USDA in the columns you created)
"""
if __name__ == '__main__':
scrape()
merge()
| gpl-3.0 | 2,669,185,570,743,464,000 | 47.385417 | 732 | 0.672336 | false |
edgimar/borg | src/borg/version.py | 1 | 1632 | import re
def parse_version(version):
"""
simplistic parser for setuptools_scm versions
supports final versions and alpha ('a'), beta ('b') and rc versions. It just discards commits since last tag
and git revision hash.
Output is a version tuple containing integers. It ends with one or two elements that ensure that relational
operators yield correct relations for alpha, beta and rc versions too. For final versions the last element
is a -1, for prerelease versions the last two elements are a smaller negative number and the number of e.g.
the beta.
Note, this sorts version 1.0 before 1.0.0.
This version format is part of the remote protocol, don‘t change in breaking ways.
"""
parts = version.split('+')[0].split('.')
if parts[-1].startswith('dev'):
del parts[-1]
version = [int(segment) for segment in parts[:-1]]
prerelease = re.fullmatch('([0-9]+)(a|b|rc)([0-9]+)', parts[-1])
if prerelease:
version_type = {'a': -4, 'b': -3, 'rc': -2}[prerelease.group(2)]
version += [int(prerelease.group(1)), version_type, int(prerelease.group(3))]
else:
version += [int(parts[-1]), -1]
return tuple(version)
def format_version(version):
"""a reverse for parse_version (obviously without the dropped information)"""
f = []
it = iter(version)
while True:
part = next(it)
if part >= 0:
f += str(part)
elif part == -1:
break
else:
f[-1] = f[-1] + {-2: 'rc', -3: 'b', -4: 'a'}[part] + str(next(it))
break
return '.'.join(f)
| bsd-3-clause | -5,804,413,520,198,349,000 | 32.265306 | 112 | 0.605521 | false |
elyezer/robottelo | tests/foreman/ui/test_computeprofile.py | 1 | 3856 | """Test class for Compute Profile UI
:Requirement: Computeprofile
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: UI
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_string
from robottelo.datafactory import (
generate_strings_list,
invalid_values_list,
valid_data_list,
)
from robottelo.decorators import tier1
from robottelo.test import UITestCase
from robottelo.ui.factory import make_compute_profile
from robottelo.ui.locators import common_locators
from robottelo.ui.session import Session
class ComputeProfileTestCase(UITestCase):
"""Implements Compute Profile tests in UI."""
@tier1
def test_positive_create(self):
"""Create new Compute Profile using different names
:id: 138a3e6f-7eb5-4204-b48d-edc6ce363576
:expectedresults: Compute Profile is created
:CaseImportance: Critical
"""
with Session(self.browser) as session:
for name in valid_data_list():
with self.subTest(name):
make_compute_profile(session, name=name)
self.assertIsNotNone(self.compute_profile.search(name))
@tier1
def test_negative_create(self):
"""Attempt to create Compute Profile using invalid names only
:id: 6da73996-c235-45ee-a11e-5b4f0ae75d93
:expectedresults: Compute Profile is not created
:CaseImportance: Critical
"""
with Session(self.browser) as session:
for name in invalid_values_list('ui'):
with self.subTest(name):
make_compute_profile(session, name=name)
self.assertIsNotNone(session.nav.wait_until_element(
common_locators['name_haserror']))
@tier1
def test_positive_update(self):
"""Update selected Compute Profile entity using proper names
:id: b6dac9a4-8c5d-44d4-91e4-be2813e3ea50
:expectedresults: Compute Profile is updated.
:CaseImportance: Critical
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_compute_profile(session, name=name)
self.assertIsNotNone(self.compute_profile.search(name))
for new_name in generate_strings_list(length=7):
with self.subTest(new_name):
self.compute_profile.update(name, new_name)
self.assertIsNotNone(self.compute_profile.search(new_name))
name = new_name
@tier1
def test_negative_update(self):
"""Attempt to update Compute Profile entity using invalid names only
:id: cf7d46c2-6edc-43be-b5d4-ba92f10b921b
:expectedresults: Compute Profile is not updated.
:CaseImportance: Critical
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_compute_profile(session, name=name)
self.assertIsNotNone(self.compute_profile.search(name))
for new_name in invalid_values_list('ui'):
with self.subTest(new_name):
self.compute_profile.update(name, new_name)
self.assertIsNotNone(session.nav.wait_until_element(
common_locators['name_haserror']))
@tier1
def test_positive_delete(self):
"""Delete Compute Profile entity
:id: 9029b8ec-44c3-4f41-9ea0-0c13c2add76c
:expectedresults: Compute Profile is deleted successfully.
:CaseImportance: Critical
"""
with Session(self.browser) as session:
for name in generate_strings_list(length=7):
with self.subTest(name):
make_compute_profile(session, name=name)
self.compute_profile.delete(name)
| gpl-3.0 | -7,459,642,537,115,759,000 | 30.867769 | 79 | 0.633039 | false |
wavicles/fossasia-pslab-apps | PSL_Apps/templates/widgets/ui_voltWidget.py | 1 | 5765 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'PSL_Apps/templates/widgets/voltWidget.ui'
#
# Created: Mon Jul 11 21:45:34 2016
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(330, 125)
Form.setMaximumSize(QtCore.QSize(16777215, 125))
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.frame_7 = QtGui.QFrame(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_7.sizePolicy().hasHeightForWidth())
self.frame_7.setSizePolicy(sizePolicy)
self.frame_7.setMaximumSize(QtCore.QSize(400, 125))
self.frame_7.setFrameShape(QtGui.QFrame.NoFrame)
self.frame_7.setFrameShadow(QtGui.QFrame.Raised)
self.frame_7.setObjectName(_fromUtf8("frame_7"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.frame_7)
self.verticalLayout_5.setSpacing(0)
self.verticalLayout_5.setContentsMargins(0, 5, 0, 0)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.frame = QtGui.QFrame(self.frame_7)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
self.frame.setSizePolicy(sizePolicy)
self.frame.setFrameShape(QtGui.QFrame.NoFrame)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.horizontalLayout = QtGui.QHBoxLayout(self.frame)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setContentsMargins(4, 0, 4, 0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.frame)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.commandLinkButton = QtGui.QCommandLinkButton(self.frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.commandLinkButton.sizePolicy().hasHeightForWidth())
self.commandLinkButton.setSizePolicy(sizePolicy)
self.commandLinkButton.setMinimumSize(QtCore.QSize(94, 0))
self.commandLinkButton.setMaximumSize(QtCore.QSize(16777215, 30))
self.commandLinkButton.setAutoRepeatDelay(100)
self.commandLinkButton.setObjectName(_fromUtf8("commandLinkButton"))
self.horizontalLayout.addWidget(self.commandLinkButton)
self.verticalLayout_5.addWidget(self.frame)
self.Frame_4 = QtGui.QFrame(self.frame_7)
self.Frame_4.setProperty("PeripheralCollectionInner", _fromUtf8(""))
self.Frame_4.setObjectName(_fromUtf8("Frame_4"))
self.gridLayout_4 = QtGui.QGridLayout(self.Frame_4)
self.gridLayout_4.setSizeConstraint(QtGui.QLayout.SetNoConstraint)
self.gridLayout_4.setMargin(0)
self.gridLayout_4.setSpacing(0)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.table = QtGui.QTableWidget(self.Frame_4)
self.table.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.table.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.table.setRowCount(3)
self.table.setColumnCount(4)
self.table.setObjectName(_fromUtf8("table"))
self.table.horizontalHeader().setVisible(False)
self.table.horizontalHeader().setDefaultSectionSize(80)
self.table.horizontalHeader().setMinimumSectionSize(85)
self.table.horizontalHeader().setStretchLastSection(True)
self.table.verticalHeader().setVisible(False)
self.table.verticalHeader().setCascadingSectionResizes(True)
self.table.verticalHeader().setStretchLastSection(True)
self.gridLayout_4.addWidget(self.table, 0, 0, 1, 1)
self.verticalLayout_5.addWidget(self.Frame_4)
self.verticalLayout.addWidget(self.frame_7)
self.retranslateUi(Form)
QtCore.QObject.connect(self.commandLinkButton, QtCore.SIGNAL(_fromUtf8("clicked()")), Form.read)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.frame_7.setProperty("class", _translate("Form", "ControlWidget", None))
self.label.setText(_translate("Form", "Voltmeters", None))
self.commandLinkButton.setToolTip(_translate("Form", "Read voltages from all channels and display them", None))
self.commandLinkButton.setText(_translate("Form", "Update", None))
self.Frame_4.setProperty("class", _translate("Form", "ControlWidgetInner", None))
| gpl-3.0 | -6,214,400,697,271,422,000 | 50.017699 | 119 | 0.713617 | false |
Smartupz/django-rosetta-old | rosetta/poutil.py | 1 | 3811 | import re
import os
import django
from django.conf import settings
from rosetta.conf import settings as rosetta_settings
from django.core.cache import cache
from django.utils.importlib import import_module
import itertools
def find_pos(lang, project_apps=True, django_apps=False, third_party_apps=False):
"""
scans a couple possible repositories of gettext catalogs for the given
language code
"""
paths = []
# project/locale
parts = settings.SETTINGS_MODULE.split('.')
project = __import__(parts[0], {}, {}, [])
abs_project_path = os.path.normpath(os.path.abspath(os.path.dirname(project.__file__)))
if project_apps:
paths.append(os.path.abspath(os.path.join(os.path.dirname(project.__file__), 'locale')))
# django/locale
if django_apps:
django_paths = cache.get('rosetta_django_paths')
if django_paths is None:
django_paths = []
for root, dirnames, filename in os.walk(os.path.abspath(os.path.dirname(django.__file__))):
if 'locale' in dirnames:
django_paths.append(os.path.join(root , 'locale'))
continue
cache.set('rosetta_django_paths', django_paths, 60 * 60)
paths = paths + django_paths
# settings
for localepath in settings.LOCALE_PATHS:
if os.path.isdir(localepath):
paths.append(localepath)
# project/app/locale
for appname in settings.INSTALLED_APPS:
if rosetta_settings.EXCLUDED_APPLICATIONS and appname in rosetta_settings.EXCLUDED_APPLICATIONS:
continue
# this yields the modules.py of application
app = import_module(appname)
apppath = os.path.normpath(os.path.abspath(os.path.join(os.path.dirname(app.__file__), 'locale')))
# django apps
if appname.startswith('django.') or appname == "django":
continue
# third party external
if not third_party_apps and abs_project_path not in apppath:
continue
# local apps
if not project_apps and abs_project_path in apppath:
continue
if os.path.isdir(apppath):
paths.append(apppath)
pofiles = set()
langs = [lang]
if u'-' in lang:
lc, cc = lang.lower().split('-')
langs.append("%s_%s" % (lc, cc))
langs.append("%s_%s" % (lc, cc.upper()))
if u'_' in lang:
lc, cc = lang.lower().split('_')
langs.append("%s-%s" % (lc, cc))
langs.append("%s-%s" % (lc, cc.upper()))
for path, langcode in itertools.product(paths, langs):
dirname = os.path.join(path, langcode, "LC_MESSAGES")
if not os.path.isdir(dirname):
continue
for fname in os.listdir(dirname):
fpath = os.path.join(dirname, fname)
if (os.path.isfile(fpath)
and fname.startswith('django')
and fname.endswith('.po')):
pofiles.add(os.path.abspath(fpath))
return list(pofiles)
def pagination_range(first, last, current):
r = []
r.append(first)
if first + 1 < last: r.append(first + 1)
if current - 2 > first and current - 2 < last: r.append(current - 2)
if current - 1 > first and current - 1 < last: r.append(current - 1)
if current > first and current < last: r.append(current)
if current + 1 < last and current + 1 > first: r.append(current + 1)
if current + 2 < last and current + 2 > first: r.append(current + 2)
if last - 1 > first: r.append(last - 1)
r.append(last)
r = list(set(r))
r.sort()
prev = 10000
for e in r[:]:
if prev + 1 < e:
try:
r.insert(r.index(e), '...')
except ValueError:
pass
prev = e
return r
| mit | 8,625,855,195,036,189,000 | 33.963303 | 106 | 0.587772 | false |
hemmerling/nosql-mongodb2013 | src/m101j/final/final-4/validate_sourcecode.py | 1 | 10656 | import pymongo
import urllib2
import urllib
import cookielib
import random
import re
import string
import sys
import getopt
# init the global cookie jar
cj = cookielib.CookieJar()
# declare the variables to connect to db
connection = None
db = None
webhost = "localhost:8082"
mongostr = "mongodb://localhost:27017"
db_name = "blog"
# this script will check that homework 3.2 is correct
# makes a little salt
def make_salt(n):
salt = ""
for i in range(n):
salt = salt + random.choice(string.ascii_letters)
return salt
# this is a validation script to make sure the blog works correctly.
def create_user(username, password):
global cj
try:
print "Trying to create a test user ", username
url = "http://{0}/signup".format(webhost)
data = urllib.urlencode([("email",""),("username",username), ("password",password), ("verify",password)])
request = urllib2.Request(url=url, data=data)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
f = opener.open(request)
users = db.users
# check that the user is in users collection
user = users.find_one({'_id':username})
if (user == None):
print "Could not find the test user ", username, "in the users collection."
return False
print "Found the test user ", username, " in the users collection"
# check that the user has been built
result = f.read()
expr = re.compile("Welcome\s+"+ username)
if expr.search(result):
return True
print "When we tried to create a user, here is the output we got\n"
print result
return False
except:
print "the request to ", url, " failed, so your blog may not be running."
raise
return False
def try_to_login(username, password):
try:
print "Trying to login for test user ", username
url = "http://{0}/login".format(webhost)
data = urllib.urlencode([("username",username), ("password",password)])
request = urllib2.Request(url=url, data=data)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
f = opener.open(request)
# check for successful login
result = f.read()
expr = re.compile("Welcome\s+"+ username)
if expr.search(result):
return True
print "When we tried to login, here is the output we got\n"
print result
return False
except:
print "the request to ", url, " failed, so your blog may not be running."
return False
def add_blog_post(title,post,tags):
try:
print "Trying to submit a post with title ", title
data = urllib.urlencode([("body",post), ("subject",title), ("tags",tags)])
url = "http://{0}/newpost".format(webhost)
request = urllib2.Request(url=url, data=data)
cj.add_cookie_header(request)
opener = urllib2.build_opener()
f = opener.open(request)
# check for successful login
result = f.read()
expr = re.compile(title + ".+" + post, re.DOTALL)
if expr.search(result):
return True
print "When we tried to post, here is the output we got\n"
print result
return False
except:
print "the request to ", url, " failed, so your blog may not be running."
raise
return False
def add_blog_comment(title,post):
try:
print "+Trying to submit a blog comment for post with title", title
url = "http://{0}/newcomment".format(webhost)
doc = {}
check_mongo_for_post(title, post, doc)
permalink = doc['doc']['permalink']
comment_name = make_salt(12)
comment_body = make_salt(12)
data = urllib.urlencode([("commentName",comment_name), ("commentBody",comment_body), ("permalink",permalink)])
request = urllib2.Request(url=url, data=data)
cj.add_cookie_header(request)
opener = urllib2.build_opener()
f = opener.open(request)
# check for successful addition of comment on page
result = f.read()
expr = re.compile(title + ".+" + post, re.DOTALL)
if not expr.search(result):
print "When we tried to find the comment we posted at the ", url, " here is what we got"
print result
return False
# check for successful addition of comment..retrieve the doc again
if(not check_mongo_for_post(title, post, doc)):
print "Could not find comment in database"
return False
found = False
if ('comments' in doc['doc']):
for comment in doc['doc']['comments']:
if (comment['body'] == comment_body and comment['author'] == comment_name):
found = True
return found
except:
print "the request to ", url, " failed, so your blog may not be running."
raise
return False
# fetch the blog home page and return the link of the first post
def fetch_blog_home_page(posts):
try:
url = "http://{0}/".format(webhost)
print "Trying to grab the blog home page at url and find the first post.", url
request = urllib2.Request(url=url)
cj.add_cookie_header(request)
opener = urllib2.build_opener()
f = opener.open(request)
# Look for a post
result = f.read()
expr = re.compile("<a href=\"([^\"]+)\"\w*?>", re.DOTALL)
match = expr.search(result)
if match is not None:
print "Fount a post url: ", match.group(1)
posts.append(match.group(1))
return True
print "Hmm, can't seem to find a post. Is the blog populated with posts?"
print "When we tried to read the blog index at ", url, " here is what we got"
print result
return False
except:
print "the request to ", url, " failed, so your blog may not be running."
raise
return False
# gets the likes value off the first commment or returns None
def fetch_likes(url):
try:
url = "http://{0}{1}".format(webhost, url)
print "Trying to grab the number of likes for url ", url
request = urllib2.Request(url=url)
cj.add_cookie_header(request)
opener = urllib2.build_opener()
f = opener.open(request)
# let's get the first form element
result = f.read()
expr = re.compile("<form[^>]*>.*?Likes:\s*(\d+)\s*<.*?</form>", re.DOTALL)
match = expr.search(result)
if match is not None:
print "Likes value ", match.group(1)
return int(match.group(1))
print "Can't fetch the like value for the first comment. Perhaps the blog entry has no comments?"
print "When we tried to read the blog permalink at ", url, " here is what we got"
return None
except:
print "the request to ", url, " failed, so your blog may not be running."
raise
return None
# gets the likes value off the first commment or returns None
def click_on_like(permalink):
print "Clicking on Like link for post: ", permalink
try:
expr = re.compile("[^/]+/([^/]+)")
match = expr.search(permalink)
if match is None:
return False
permalink = match.group(1)
url = "http://{0}/like".format(webhost)
# print "Like POST url", url
data = urllib.urlencode([("permalink",permalink), ("comment_ordinal","0")])
request = urllib2.Request(url=url, data=data)
cj.add_cookie_header(request)
opener = urllib2.build_opener()
f = opener.open(request)
return True
except:
print "the request to ", url, " failed, so your blog may not be running."
raise
# command line arg parsing to make folks happy who want to run at mongolabs or mongohq
# this functions uses global vars to communicate. forgive me.
def arg_parsing(argv):
global webhost
global mongostr
global db_name
try:
opts, args = getopt.getopt(argv, "-p:-m:-d:")
except getopt.GetoptError:
print "usage validate.py -p webhost -m mongoConnectString -d databaseName"
print "\twebhost defaults to {0}".format(webhost)
print "\tmongoConnectionString default to {0}".format(mongostr)
print "\tdatabaseName defaults to {0}".format(db_name)
sys.exit(2)
for opt, arg in opts:
if (opt == '-h'):
print "usage validate.py -p webhost -m mongoConnectString -d databaseName"
sys.exit(2)
elif opt in ("-p"):
webhost = arg
print "Overriding HTTP host to be ", webhost
elif opt in ("-m"):
mongostr = arg
print "Overriding MongoDB connection string to be ", mongostr
elif opt in ("-d"):
db_name = arg
print "Overriding MongoDB database to be ", db_name
# main section of the code
def main(argv):
arg_parsing(argv)
global connection
global db
print "Welcome to the M101 Final Exam, Question 4 Validation Checker"
# connect to the db (mongostr was set in arg_parsing)
connection = pymongo.Connection(mongostr, safe=True)
db = connection[db_name]
# grab the blog home page and find the first post
posts = []
if (not fetch_blog_home_page(posts)):
print "I can't grab the home page of the blog"
sys.exit(1)
# now go to the permalink page for that post
likes_value = fetch_likes(posts[0])
if (likes_value is None):
print "Can't fetch the like value"
sys.exit(1)
click_on_like(posts[0])
new_likes_value = fetch_likes(posts[0])
if (new_likes_value != (likes_value + 1)):
print "I was not able to increment the likes on a comment"
print "old likes value was ", likes_value
print "likes value after I clicked was ", new_likes_value
print "Sorry, you have not solved it yet."
sys.exit(1)
print "Tests Passed for Final 4. Your validation code is 983nf93ncafjn20fn10f"
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 | 4,197,648,597,348,295,000 | 28.886957 | 118 | 0.57789 | false |
SEL-Columbia/commcare-hq | corehq/apps/api/resources/v0_4.py | 1 | 17039 | from collections import defaultdict
from django.core.urlresolvers import reverse
from django.http import HttpResponseForbidden, HttpResponse, HttpResponseBadRequest
from tastypie import fields
from tastypie.bundle import Bundle
from tastypie.authentication import Authentication
from tastypie.exceptions import BadRequest
from corehq.apps.api.resources.v0_1 import CustomResourceMeta, RequirePermissionAuthentication
from couchforms.models import XFormInstance
from casexml.apps.case.models import CommCareCase
from casexml.apps.case import xform as casexml_xform
from custom.hope.models import HOPECase, CC_BIHAR_NEWBORN, CC_BIHAR_PREGNANCY
from corehq.apps.api.util import get_object_or_not_exist
from corehq.apps.app_manager import util as app_manager_util
from corehq.apps.app_manager.models import ApplicationBase, Application, RemoteApp, Form, get_app
from corehq.apps.receiverwrapper.models import Repeater, repeater_types
from corehq.apps.groups.models import Group
from corehq.apps.cloudcare.api import ElasticCaseQuery
from corehq.apps.users.util import format_username
from corehq.apps.users.models import CouchUser, Permissions
from corehq.apps.api.resources import v0_1, v0_3, JsonResource, DomainSpecificResourceMixin, dict_object, SimpleSortableResourceMixin
from corehq.apps.api.es import XFormES, CaseES, ESQuerySet, es_search
from corehq.apps.api.fields import ToManyDocumentsField, UseIfRequested, ToManyDictField, ToManyListDictField
from corehq.apps.api.serializers import CommCareCaseSerializer
from no_exceptions.exceptions import Http400
# By the time a test case is running, the resource is already instantiated,
# so as a hack until this can be remedied, there is a global that
# can be set to provide a mock.
MOCK_XFORM_ES = None
MOCK_CASE_ES = None
class XFormInstanceResource(SimpleSortableResourceMixin, v0_3.XFormInstanceResource, DomainSpecificResourceMixin):
# Some fields that were present when just fetching individual docs are
# not present for e.g. devicelogs and must be allowed blank
uiversion = fields.CharField(attribute='uiversion', blank=True, null=True)
metadata = fields.DictField(attribute='metadata', blank=True, null=True)
domain = fields.CharField(attribute='domain')
app_id = fields.CharField(attribute='app_id', null=True)
build_id = fields.CharField(attribute='build_id', null=True)
cases = UseIfRequested(ToManyDocumentsField('corehq.apps.api.resources.v0_4.CommCareCaseResource',
attribute=lambda xform: casexml_xform.cases_referenced_by_xform(xform)))
is_phone_submission = fields.BooleanField(readonly=True)
def dehydrate_is_phone_submission(self, bundle):
return (
getattr(bundle.obj, 'openrosa_headers', None)
and bundle.obj.openrosa_headers.get('HTTP_X_OPENROSA_VERSION')
)
# Prevent hitting Couch to md5 the attachment. However, there is no way to
# eliminate a tastypie field defined in a parent class.
md5 = fields.CharField(attribute='uiversion', blank=True, null=True)
def dehydrate_md5(self, bundle):
return 'OBSOLETED'
def xform_es(self, domain):
return MOCK_XFORM_ES or XFormES(domain)
def obj_get_list(self, bundle, domain, **kwargs):
try:
es_query = es_search(bundle.request, domain)
except Http400 as e:
raise BadRequest(e.message)
es_query['filter']['and'].append({'term': {'doc_type': 'xforminstance'}})
# Note that XFormES is used only as an ES client, for `run_query` against the proper index
return ESQuerySet(payload = es_query,
model = XFormInstance,
es_client=self.xform_es(domain)).order_by('-received_on')
class Meta(v0_3.XFormInstanceResource.Meta):
ordering = ['received_on']
list_allowed_methods = ['get']
class RepeaterResource(JsonResource, DomainSpecificResourceMixin):
id = fields.CharField(attribute='_id', readonly=True, unique=True)
type = fields.CharField(attribute='doc_type')
domain = fields.CharField(attribute='domain')
url = fields.CharField(attribute='url')
version = fields.CharField(attribute='version', null=True)
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
if isinstance(bundle_or_obj, Bundle):
obj = bundle_or_obj.obj
elif bundle_or_obj is None:
return None
else:
obj = bundle_or_obj
return reverse('api_dispatch_detail', kwargs=dict(resource_name=self._meta.resource_name,
domain=obj.domain,
api_name=self._meta.api_name,
pk=obj._id))
def obj_get_list(self, bundle, domain, **kwargs):
repeaters = Repeater.by_domain(domain)
return list(repeaters)
def obj_get(self, bundle, **kwargs):
return get_object_or_not_exist(Repeater, kwargs['pk'], kwargs['domain'],
additional_doc_types=repeater_types.keys())
def obj_create(self, bundle, request=None, **kwargs):
bundle.obj.domain = kwargs['domain']
bundle = self._update(bundle)
bundle.obj.save()
return bundle
def obj_update(self, bundle, **kwargs):
bundle.obj = Repeater.get(kwargs['pk'])
assert bundle.obj.domain == kwargs['domain']
bundle = self._update(bundle)
assert bundle.obj.domain == kwargs['domain']
bundle.obj.save()
return bundle
def _update(self, bundle):
for key, value in bundle.data.items():
setattr(bundle.obj, key, value)
bundle = self.full_hydrate(bundle)
return bundle
class Meta(CustomResourceMeta):
authentication = v0_1.DomainAdminAuthentication()
object_class = Repeater
resource_name = 'data-forwarding'
detail_allowed_methods = ['get', 'put', 'delete']
list_allowed_methods = ['get', 'post']
def group_by_dict(objs, fn):
"""
Itertools.groupby returns a transient iterator with alien
data types in it. This returns a dictionary of lists.
Less efficient but clients can write naturally and used
only for things that have to fit in memory easily anyhow.
"""
result = defaultdict(list)
for obj in objs:
key = fn(obj)
result[key].append(obj)
return result
class CommCareCaseResource(SimpleSortableResourceMixin, v0_3.CommCareCaseResource, DomainSpecificResourceMixin):
xforms_by_name = UseIfRequested(ToManyListDictField(
'corehq.apps.api.resources.v0_4.XFormInstanceResource',
attribute=lambda case: group_by_dict(case.get_forms(), lambda form: form.name)
))
xforms_by_xmlns = UseIfRequested(ToManyListDictField(
'corehq.apps.api.resources.v0_4.XFormInstanceResource',
attribute=lambda case: group_by_dict(case.get_forms(), lambda form: form.xmlns)
))
child_cases = UseIfRequested(ToManyDictField('corehq.apps.api.resources.v0_4.CommCareCaseResource',
attribute=lambda case: dict([ (index.identifier, CommCareCase.get(index.referenced_id)) for index in case.indices])))
parent_cases = UseIfRequested(ToManyDictField('corehq.apps.api.resources.v0_4.CommCareCaseResource',
attribute=lambda case: dict([ (index.identifier, CommCareCase.get(index.referenced_id)) for index in case.reverse_indices])))
domain = fields.CharField(attribute='domain')
# Fields that v0.2 assumed were pre-transformed but we are now operating on straight CommCareCase objects again
date_modified = fields.CharField(attribute='modified_on', default="1900-01-01")
server_date_modified = fields.CharField(attribute='server_modified_on', default="1900-01-01")
server_date_opened = fields.CharField(attribute='server_opened_on', default="1900-01-01")
def case_es(self, domain):
return MOCK_CASE_ES or CaseES(domain)
def obj_get_list(self, bundle, domain, **kwargs):
filters = v0_3.CaseListFilters(bundle.request.GET).filters
# Since tastypie handles the "from" and "size" via slicing, we have to wipe them out here
# since ElasticCaseQuery adds them. I believe other APIs depend on the behavior of ElasticCaseQuery
# hence I am not modifying that
query = ElasticCaseQuery(domain, filters).get_query()
if 'from' in query:
del query['from']
if 'size' in query:
del query['size']
return ESQuerySet(payload = query,
model = CommCareCase,
es_client = self.case_es(domain)).order_by('server_modified_on') # Not that CaseES is used only as an ES client, for `run_query` against the proper index
class Meta(v0_3.CommCareCaseResource.Meta):
max_limit = 100 # Today, takes ~25 seconds for some domains
serializer = CommCareCaseSerializer()
ordering = ['server_date_modified', 'date_modified']
class GroupResource(JsonResource, DomainSpecificResourceMixin):
id = fields.CharField(attribute='get_id', unique=True, readonly=True)
domain = fields.CharField(attribute='domain')
name = fields.CharField(attribute='name')
users = fields.ListField(attribute='get_user_ids')
path = fields.ListField(attribute='path')
case_sharing = fields.BooleanField(attribute='case_sharing', default=False)
reporting = fields.BooleanField(default=True, attribute='reporting')
metadata = fields.DictField(attribute='metadata', null=True, blank=True)
def obj_get(self, bundle, **kwargs):
return get_object_or_not_exist(Group, kwargs['pk'], kwargs['domain'])
def obj_get_list(self, bundle, domain, **kwargs):
groups = Group.by_domain(domain)
return groups
class Meta(CustomResourceMeta):
authentication = RequirePermissionAuthentication(Permissions.edit_commcare_users)
object_class = Group
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'group'
class SingleSignOnResource(JsonResource, DomainSpecificResourceMixin):
"""
This resource does not require "authorization" per se, but
rather allows a POST of username and password and returns
just the authenticated user, if the credentials and domain
are correct.
"""
def post_list(self, request, **kwargs):
domain = kwargs.get('domain')
request.domain = domain
username = request.POST.get('username')
password = request.POST.get('password')
if username is None:
return HttpResponseBadRequest('Missing required parameter: username')
if password is None:
return HttpResponseBadRequest('Missing required parameter: password')
if '@' not in username:
username = format_username(username, domain)
# Convert to the appropriate type of user
couch_user = CouchUser.get_by_username(username)
if couch_user is None or not couch_user.is_member_of(domain) or not couch_user.check_password(password):
return HttpResponseForbidden()
if couch_user.is_commcare_user():
user_resource = v0_1.CommCareUserResource()
elif couch_user.is_web_user():
user_resource = v0_1.WebUserResource()
else:
return HttpResponseForbidden()
bundle = user_resource.build_bundle(obj=couch_user, request=request)
bundle = user_resource.full_dehydrate(bundle)
return user_resource.create_response(request, bundle, response_class=HttpResponse)
def get_list(self, bundle, **kwargs):
return HttpResponseForbidden()
def get_detail(self, bundle, **kwargs):
return HttpResponseForbidden()
class Meta(CustomResourceMeta):
authentication = Authentication()
resource_name = 'sso'
detail_allowed_methods = []
list_allowed_methods = ['post']
class ApplicationResource(JsonResource, DomainSpecificResourceMixin):
id = fields.CharField(attribute='_id')
name = fields.CharField(attribute='name')
modules = fields.ListField()
def dehydrate_module(self, app, module, langs):
"""
Convert a Module object to a JValue representation
with just the good parts.
NOTE: This is not a tastypie "magic"-name method to
dehydrate the "module" field; there is no such field.
"""
dehydrated = {}
dehydrated['case_type'] = module.case_type
dehydrated['case_properties'] = app_manager_util.get_case_properties(app, [module.case_type], defaults=['name'])[module.case_type]
dehydrated['forms'] = []
for form in module.forms:
form = Form.get_form(form.unique_id)
form_jvalue = {
'xmlns': form.xmlns,
'name': form.name,
'questions': form.get_questions(langs),
}
dehydrated['forms'].append(form_jvalue)
return dehydrated
def dehydrate_modules(self, bundle):
app = bundle.obj
if app.doc_type == Application._doc_type:
return [self.dehydrate_module(app, module, app.langs) for module in bundle.obj.modules]
elif app.doc_type == RemoteApp._doc_type:
return []
def obj_get_list(self, bundle, domain, **kwargs):
# There should be few enough apps per domain that doing an explicit refresh for each is OK.
# This is the easiest way to filter remote apps
# Later we could serialize them to their URL or whatevs but it is not that useful yet
application_bases = ApplicationBase.by_domain(domain)
# This wraps in the appropriate class so that is_remote_app() returns the correct answer
applications = [get_app(domain, application_base.id) for application_base in application_bases]
return [app for app in applications if not app.is_remote_app()]
def obj_get(self, bundle, **kwargs):
return get_object_or_not_exist(Application, kwargs['pk'], kwargs['domain'])
class Meta(CustomResourceMeta):
authentication = RequirePermissionAuthentication(Permissions.edit_apps)
object_class = Application
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'application'
def bool_to_yesno(value):
if value is None:
return None
elif value:
return 'yes'
else:
return 'no'
def get_yesno(attribute):
return lambda obj: bool_to_yesno(getattr(obj, attribute, None))
class HOPECaseResource(CommCareCaseResource):
"""
Custom API endpoint for custom case wrapper
"""
events_attributes = fields.ListField()
other_properties = fields.DictField()
def dehydrate_events_attributes(self, bundle):
return bundle.obj.events_attributes
def dehydrate_other_properties(self, bundle):
return bundle.obj.other_properties
def obj_get(self, bundle, **kwargs):
return get_object_or_not_exist(HOPECase, kwargs['pk'], kwargs['domain'],
additional_doc_types=['CommCareCase'])
def obj_get_list(self, bundle, domain, **kwargs):
"""
Overridden to wrap the case JSON from ElasticSearch with the custom.hope.case.HOPECase class
"""
filters = v0_3.CaseListFilters(bundle.request.GET).filters
# Since tastypie handles the "from" and "size" via slicing, we have to wipe them out here
# since ElasticCaseQuery adds them. I believe other APIs depend on the behavior of ElasticCaseQuery
# hence I am not modifying that
query = ElasticCaseQuery(domain, filters).get_query()
if 'from' in query:
del query['from']
if 'size' in query:
del query['size']
# Note that CaseES is used only as an ES client, for `run_query` against the proper index
return ESQuerySet(payload=query,
model=HOPECase,
es_client=self.case_es(domain)).order_by('server_modified_on')
def alter_list_data_to_serialize(self, request, data):
# rename 'properties' field to 'case_properties'
for bundle in data['objects']:
bundle.data['case_properties'] = bundle.data['properties']
del bundle.data['properties']
mother_lists = filter(lambda x: x.obj.type == CC_BIHAR_PREGNANCY, data['objects'])
child_lists = filter(lambda x: x.obj.type == CC_BIHAR_NEWBORN, data['objects'])
return {'objects': {
'mother_lists': mother_lists,
'child_lists': child_lists
}, 'meta': data['meta']}
class Meta(CommCareCaseResource.Meta):
resource_name = 'hope-case'
| bsd-3-clause | -4,855,601,299,716,612,000 | 39.763158 | 179 | 0.663654 | false |
miraculixx/tastypie-async | tpasync/tests.py | 1 | 5935 | import time
from django import http
from tpasync.resources import BaseAsyncResource
from tastypie import fields
from tastypie.test import ResourceTestCaseMixin
from . import tasks
from django.test.testcases import TestCase
class EmptyTestResource(BaseAsyncResource):
class Meta:
resource_name = 'empty'
class TestResource(BaseAsyncResource):
id = fields.IntegerField()
result = fields.CharField()
class Meta:
resource_name = 'test'
def async_get_detail(self, request, **kwargs):
return tasks.successful_task.apply_async()
def async_get_list(self, request, **kwargs):
return tasks.list_task.apply_async()
def async_patch_list(self, request, **kwargs):
pass
def async_post_detail(self, request, **kwargs):
return tasks.failing_task.apply_async()
class AsyncResourceTest(ResourceTestCaseMixin, TestCase):
def setUp(self):
super(AsyncResourceTest, self).setUp()
self.empty_resource = EmptyTestResource()
self.test_resource = TestResource()
def test_empty_methods(self):
for verb in ('get', 'put', 'post', 'delete', 'patch'):
for suffix in ('detail', 'list'):
request = http.HttpRequest()
self.assertHttpNotImplemented(
getattr(
self.empty_resource,
'_'.join((verb, suffix)))(request))
response = self.api_client.get('/api/v1/empty/')
self.assertHttpNotImplemented(response)
def test_successful_task(self):
# Method returns None, should give HTTP bad request
response = self.api_client.patch('/api/v1/test/', data={})
self.assertHttpBadRequest(response)
# Send task request and get its Location header
result = self.api_client.get('/api/v1/test/1/')
self.assertHttpAccepted(result)
state_url = result['Location']
# Location should contain state URL, but not result_uri
response = self.api_client.get(state_url)
self.assertHttpOK(response)
data = self.deserialize(response)
self.assertEqual(data['state'], 'PENDING')
self.assertIn('resource_uri', data)
self.assertIn('id', data)
task_id = data['id']
self.assertEqual(
data['resource_uri'],
'/api/v1/test/state/{}/'.format(task_id))
self.assertNotIn('result_uri', data)
# Wait 4 seconds and retry. This time result_uri should be ready
time.sleep(4)
response = self.api_client.get(state_url)
self.assertHttpOK(response)
data = self.deserialize(response)
self.assertIn('result_uri', data)
self.assertEqual(data['state'], 'SUCCESS')
# Go to result page.
response = self.api_client.get(data['result_uri'])
self.assertHttpOK(response)
data = self.deserialize(response)
self.assertEqual(data['result'], 'ok')
# We can't delete task that is competed
response = self.api_client.delete(state_url)
self.assertHttpBadRequest(response)
def test_canceling_task(self):
# Send task request and get its Location header
result = self.api_client.get('/api/v1/test/1/')
self.assertHttpAccepted(result)
state_url = result['Location']
# We can delete the task until it has finisheed executing
time.sleep(2)
response = self.api_client.delete(state_url)
self.assertHttpGone(response)
def test_failing_task(self):
# Send task request and get its Location header
result = self.api_client.post('/api/v1/test/1/')
self.assertHttpAccepted(result)
state_url = result['Location']
# This request will have failed in 1 second. Location should contain
# result_uri.
time.sleep(1)
response = self.api_client.get(state_url)
self.assertHttpOK(response)
data = self.deserialize(response)
self.assertEqual(data['state'], 'FAILURE')
self.assertIn('result_uri', data)
result_url = data['result_uri']
# Get result, check error message.
response = self.api_client.get(result_url)
self.assertHttpOK(response)
data = self.deserialize(response)
self.assertEqual(data['error'], 'I failed miserably')
response = self.api_client.delete(state_url)
self.assertHttpBadRequest(response)
def test_list_deserialization(self):
# Send task request and get its Location header
result = self.api_client.get('/api/v1/test/')
self.assertHttpAccepted(result)
state_url = result['Location']
time.sleep(1)
response = self.api_client.get(state_url)
self.assertHttpOK(response)
data = self.deserialize(response)
self.assertEqual(data['state'], 'SUCCESS')
self.assertIn('result_uri', data)
result_url = data['result_uri']
print result_url
# Get results
response = self.api_client.get(result_url)
self.assertHttpOK(response)
data = self.deserialize(response)
self.assertEqual(
data,
{u'meta': {
u'previous': None, u'total_count': 2, u'offset': 0,
u'limit': 20, u'next': None},
u'objects': [
{u'id': 1, u'result': u'ok'},
{u'id': 2, u'result': u'not bad'}]})
# Get only first page (we use 1 object per page here)
response = self.api_client.get(result_url + '?limit=1')
self.assertHttpOK(response)
data = self.deserialize(response)
self.assertEqual(
data,
{u'meta': {
u'previous': None, u'total_count': 2, u'offset': 0,
u'limit': 1, u'next': u'/api/v1/test/?limit=1&offset=1'},
u'objects': [{u'id': 1, u'result': u'ok'}]})
| mit | -2,464,982,984,483,011,000 | 36.09375 | 76 | 0.608088 | false |
anselmobd/fo2 | src/lotes/views/parametros/regras_lote_min_tamanho.py | 1 | 7818 | from operator import itemgetter
from pprint import pprint
from django.db import IntegrityError
from django.shortcuts import render, redirect
from django.urls import reverse
from django.views import View
from geral.functions import has_permission
import systextil.models
import lotes.forms as forms
import lotes.models as models
from lotes.views.parametros_functions import *
class RegrasLoteMinTamanho(View):
def __init__(self):
self.Form_class = forms.RegrasLoteMinTamanhoForm
self.template_name = 'lotes/regras_lote_min_tamanho.html'
self.title_name = 'Regras de lote mínimo por tamanho'
self.id = None
self.context = {'titulo': self.title_name}
def lista(self):
try:
tamanhos = systextil.models.Tamanho.objects.all(
).order_by('tamanho_ref')
except systextil.models.Tamanho.DoesNotExist:
self.context.update({
'msg_erro': 'Tamanhos não encontrados',
})
return
try:
RLM = models.RegraLMTamanho.objects.all(
).order_by('tamanho')
except models.RegraLMTamanho.DoesNotExist:
self.context.update({
'msg_erro': 'Regras de lote mínimo não encontrados',
})
return
regras = {}
inter_tam = tamanhos.iterator()
inter_RLM = RLM.iterator()
walk = 'b' # from, to, both
while True:
if walk in ['f', 'b']:
try:
tam = next(inter_tam)
except StopIteration:
tam = None
if walk in ['t', 'b']:
try:
rlm = next(inter_RLM)
except StopIteration:
rlm = None
if rlm is None and tam is None:
break
rec = {
'min_para_lm': 0,
'lm_cor_sozinha': 's',
}
acao_definida = False
if rlm is not None:
if tam is None or tam.tamanho_ref > rlm.tamanho:
acao_definida = True
rec['status'] = 'd'
rec['tamanho'] = rlm.tamanho
walk = 't'
if not acao_definida:
rec['tamanho'] = tam.tamanho_ref
rec['ordem_tamanho'] = tam.ordem_tamanho
if rlm is None or tam.tamanho_ref < rlm.tamanho:
acao_definida = True
rec['status'] = 'i'
walk = 'f'
if not acao_definida:
rec['min_para_lm'] = rlm.min_para_lm
rec['lm_cor_sozinha'] = rlm.lm_cor_sozinha
rec['status'] = 'u'
walk = 'b'
regras[rec['tamanho']] = rec
data = []
for key in regras:
if regras[key]['status'] == 'd':
try:
models.RegraLMTamanho.objects.filter(
tamanho=key).delete()
except models.RegraLMTamanho.DoesNotExist:
self.context.update({
'msg_erro': 'Erro apagando regras de lote mínimo',
})
return
continue
if regras[key]['status'] == 'i':
try:
rlm = models.RegraLMTamanho()
rlm.tamanho = key
rlm.ordem_tamanho = regras[key]['ordem_tamanho']
rlm.min_para_lm = regras[key]['min_para_lm']
rlm.lm_cor_sozinha = regras[key]['lm_cor_sozinha']
rlm.save()
except Exception:
self.context.update({
'msg_erro': 'Erro salvando regras de lote mínimo',
})
return
regras[key].update({
'edit': ('<a title="Editar" '
'href="{}">'
'<span class="glyphicon glyphicon-pencil" '
'aria-hidden="true"></span></a>'
).format(reverse(
'producao:regras_lote_min_tamanho', args=[key])),
})
data.append(regras[key])
data = sorted(data, key=itemgetter('ordem_tamanho'))
headers = ['Tamanho', 'Ordem do tamanho',
'Mínimo para aplicação do lote mínimo',
'Aplica lote mínimo por cor quando único tamanho']
fields = ['tamanho', 'ordem_tamanho',
'min_para_lm',
'lm_cor_sozinha']
if has_permission(self.request, 'lotes.change_regralmtamanho'):
headers.insert(0, '')
fields.insert(0, 'edit')
self.context.update({
'headers': headers,
'fields': fields,
'data': data,
'safe': ['edit'],
})
def get(self, request, *args, **kwargs):
self.request = request
if 'id' in kwargs:
self.id = kwargs['id']
if self.id:
if has_permission(request, 'lotes.change_regralmtamanho'):
try:
rlm = models.RegraLMTamanho.objects.get(tamanho=self.id)
except models.RegraLMTamanho.DoesNotExist:
self.context.update({
'msg_erro': 'Regras de lote mínimo não encontradas',
})
return render(
self.request, self.template_name, self.context)
try:
tamanho = systextil.models.Tamanho.objects.get(
tamanho_ref=self.id)
except systextil.models.Tamanho.DoesNotExist:
self.context.update({
'msg_erro': 'Tamanho não encontrado',
})
return render(
self.request, self.template_name, self.context)
self.context['id'] = self.id
self.context['form'] = self.Form_class(
initial={
'min_para_lm': rlm.min_para_lm,
'lm_cor_sozinha': rlm.lm_cor_sozinha,
})
else:
self.id = None
if not self.id:
self.lista()
return render(self.request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
self.request = request
if 'id' in kwargs:
self.id = kwargs['id']
form = self.Form_class(request.POST)
if self.id and form.is_valid():
min_para_lm = form.cleaned_data['min_para_lm']
lm_cor_sozinha = form.cleaned_data['lm_cor_sozinha']
try:
rlm = models.RegraLMTamanho.objects.get(tamanho=self.id)
except models.RegraLMTamanho.DoesNotExist:
self.context.update({
'msg_erro': 'Parâmetros de coleção não encontrados',
})
return render(
self.request, self.template_name, self.context)
try:
rlm.min_para_lm = min_para_lm
rlm.lm_cor_sozinha = lm_cor_sozinha
rlm.save()
except IntegrityError as e:
self.context.update({
'msg_erro':
(
'Ocorreu um erro ao gravar '
'o lotes mínimos. <{}>'
).format(str(e)),
})
self.lista()
else:
self.context['form'] = form
return redirect('producao:regras_lote_min_tamanho')
| mit | 225,207,211,094,632,960 | 33.504425 | 77 | 0.468966 | false |
oroca/raspberrypi_projects | rccar/rcserver_lb1630.py | 1 | 3493 | import socket
import sys
import RPi.GPIO as gpio
import time
HOST = '192.168.0.9' # RC카 라즈베리파이 IP값으로 변경필요
PORT = 10000
TIME_OUT = 100
#Motor 1 GPIO Pin
IC1A = 22
IC2A = 27
#Motor 2 GPIO Pin
IC3A = 4
IC4A = 17
gpio.cleanup()
gpio.setmode(gpio.BCM)
#Motor Pin Setup
gpio.setup(IC1A, gpio.OUT)
gpio.setup(IC2A, gpio.OUT)
gpio.setup(IC3A, gpio.OUT)
gpio.setup(IC4A, gpio.OUT)
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = (HOST, PORT)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
def main():
# Listen for incoming connections
sock.listen(1)
while True:
# Wait for a connection
print >>sys.stderr, 'waiting for a connection'
connection, client_address = sock.accept()
# Timeout
connection.settimeout(TIME_OUT)
try:
print >>sys.stderr, 'connection from', client_address
# Receive the data in small chunks and retransmit it
while True:
data = connection.recv(16)
print >>sys.stderr, 'received "%s"' % data
if data:
pdata = parsing_data(data)
print 'Go %s' % pdata
else:
print >>sys.stderr, 'no more data from', client_address
break
except socket.timeout:
print 'timeout error : "%d" secs' % TIME_OUT
connection.close()
finally:
# Clean up the connection
connection.close()
def parsing_data(data) :
data = data.lower()
print 'receive data : %s' % data
try:
intindex = data.index("rcpi")
getStr = data.replace("rcpi","")
getStr = getStr.strip()
print >>sys.stderr, 'Receive Key : "%s"' % getStr
if ( getStr == 'ff' ):
print 'Move Forward / "%d" speed'
elif ( getStr == 'bb' ):
print 'Move Backward / "%d" speed'
elif ( getStr == 'll' ):
print 'Turn Left'
elif ( getStr == 'rr' ):
print 'Turn Right'
elif ( getStr == 'bf' ):
print 'stop For/Backward'
elif ( getStr == 'rl' ):
print 'stop Left/Right'
elif ( getStr == 'ee' ):
print 'toggle Motor Enable'
elif ( getStr == 'dd' ):
print 'toggle Motor Disable'
else:
print 'unknown commend'
return 'u'
run_motor(getStr)
except ValueError:
return 'a'
def run_motor(rcvStr):
if ( rcvStr == 'rr' ):
print 'GPIO Turn Right'
gpio.output(IC1A, gpio.LOW)
gpio.output(IC2A, gpio.HIGH)
elif ( rcvStr == 'll' ):
print 'GPIO Turn Left'
gpio.output(IC1A, gpio.HIGH)
gpio.output(IC2A, gpio.LOW)
elif ( rcvStr == 'rl' ):
print 'GPIO Front Wheel Zero'
gpio.output(IC1A, gpio.LOW)
gpio.output(IC2A, gpio.LOW)
elif ( rcvStr == 'ff' ):
print 'GPIO Forward'
gpio.output(IC3A, gpio.LOW)
gpio.output(IC4A, gpio.HIGH)
elif ( rcvStr == 'bb' ):
print 'GPIO Backward'
gpio.output(IC3A, gpio.HIGH)
gpio.output(IC4A, gpio.LOW)
elif ( rcvStr == 'bf' ):
print 'GPIO Stop Back Wheel'
gpio.output(IC3A, gpio.LOW)
gpio.output(IC4A, gpio.LOW)
if __name__ == "__main__":
main()
| mit | -3,131,656,717,313,517,000 | 24.858209 | 75 | 0.543434 | false |
GreenJoey/My-Simple-Programs | functional/EromeDownloader/__main__.py | 1 | 1514 | from lxml.html import fromstring as etree_fromstring
import requests
from argparse import ArgumentParser
from pathlib import Path
from os import chdir
from sys import exit
import pprint
import subprocess
parser = ArgumentParser()
parser.add_argument("url", type=str, help="Url of the Erome Playlist")
parser.add_argument("-f", "--folder", help="Download path", default=Path.cwd(), type=Path)
arguments = parser.parse_args()
if arguments.folder.exists() and arguments.folder.is_dir():
chdir(arguments.folder)
else:
print("Destination not found")
exit(1)
page = requests.get(arguments.url).text
etree = etree_fromstring(page)
video = {}
for videos in etree.xpath("//div[@class='media-group']"):
name = videos.xpath("./div[@class='media-details']/h2/text()")
if name:
name = name[0] # Get the first entry, which should be the media name
else:
name = videos.xpath("./@id")[0]
# Check for the avaliable for format and download the highest one
video_format = videos.xpath("./div[@class='video']/video/source/@res")[0]
video_url = ""
if "1080" in video_format:
video_url = videos.xpath("./div[@class='video']/video/source[@res=1080]/@src")[0]
else:
video_url = videos.xpath("./div[@class='video']/video/source/@src")[0]
video[name] = {}
video[name]["url"] = video_url
video[name]["format"] = video_format
pprint.pprint(video)
for vid in video:
subprocess.call(["/usr/bin/wget", "-O", vid+".mp4", video[vid]["url"]])
| gpl-2.0 | -6,119,119,812,970,750,000 | 31.212766 | 90 | 0.667768 | false |
JudoWill/glue | glue/qt/widgets/tests/test_image_widget.py | 1 | 7572 | # pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
from __future__ import absolute_import, division, print_function
import numpy as np
from mock import MagicMock
from ..image_widget import ImageWidget
from .... import core
from ....core.tests.test_state import TestApplication
from ...glue_application import GlueApplication
from . import simple_session
import os
os.environ['GLUE_TESTING'] = 'True'
class _TestImageWidgetBase(object):
widget_cls = None
def setup_method(self, method):
self.session = simple_session()
self.hub = self.session.hub
self.collect = self.session.data_collection
self.im = core.Data(label='im',
x=[[1, 2], [3, 4]],
y=[[2, 3], [4, 5]])
self.cube = core.Data(label='cube',
x=[[[1, 2], [3, 4]], [[1, 2], [3, 4]]],
y=[[[1, 2], [3, 4]], [[1, 2], [3, 4]]])
self.widget = self.widget_cls(self.session)
self.connect_to_hub()
self.collect.append(self.im)
self.collect.append(self.cube)
def assert_title_correct(self):
expected = "%s - %s" % (self.widget.data.label,
self.widget.attribute.label)
assert self.widget.windowTitle() == expected
def connect_to_hub(self):
self.widget.register_to_hub(self.hub)
self.collect.register_to_hub(self.hub)
def _test_widget_synced_with_collection(self):
dc = self.widget.ui.displayDataCombo
assert dc.count() == len(self.collect)
for data in self.collect:
label = data.label
pos = dc.findText(label)
assert pos >= 0
assert dc.itemData(pos) is data
def test_synced_on_init(self):
self._test_widget_synced_with_collection()
def test_multi_add_ignored(self):
"""calling add_data multiple times doesn't corrupt data combo"""
self.widget.add_data(self.collect[0])
self.widget.add_data(self.collect[0])
self._test_widget_synced_with_collection()
def test_synced_on_remove(self):
self.collect.remove(self.cube)
self._test_widget_synced_with_collection()
def test_window_title_matches_data(self):
self.widget.add_data(self.collect[0])
self.assert_title_correct()
def test_window_title_updates_on_label_change(self):
self.connect_to_hub()
self.widget.add_data(self.collect[0])
self.collect[0].label = 'Changed'
self.assert_title_correct()
def test_window_title_updates_on_component_change(self):
self.connect_to_hub()
self.widget.add_data(self.collect[0])
self.widget.ui.attributeComboBox.setCurrentIndex(1)
self.assert_title_correct()
def test_data_combo_updates_on_change(self):
self.connect_to_hub()
self.widget.add_data(self.collect[0])
self.collect[0].label = 'changed'
data_labels = self._data_combo_labels()
assert self.collect[0].label in data_labels
def test_data_not_added_on_init(self):
w = ImageWidget(self.session)
assert self.im not in w.client.artists
def test_selection_switched_on_add(self):
w = self.widget_cls(self.session)
assert self.im not in w.client.artists
w.add_data(self.im)
assert self.im in w.client.artists
w.add_data(self.cube)
assert self.im not in w.client.artists
assert self.cube in w.client.artists
def test_component_add_updates_combo(self):
self.widget.add_data(self.im)
self.im.add_component(self.im[self.im.components[0]], 'testing')
combo = self.widget.ui.attributeComboBox
cids = [combo.itemText(i) for i in range(combo.count())]
assert 'testing' in cids
def test_image_correct_on_init_if_first_attribute_hidden(self):
"""Regression test for #127"""
self.im.components[0]._hidden = True
self.widget.add_data(self.im)
combo = self.widget.ui.attributeComboBox
index = combo.currentIndex()
assert self.widget.client.display_attribute is combo.itemData(index)
def _data_combo_labels(self):
combo = self.widget.ui.displayDataCombo
return [combo.itemText(i) for i in range(combo.count())]
def test_plugins_closed_when_viewer_closed(self):
# Regression test for #518
self.widget.add_data(self.im)
tool = self.widget._tools[0]
tool.close = MagicMock()
self.widget.close()
assert tool.close.call_count == 1
class TestImageWidget(_TestImageWidgetBase):
widget_cls = ImageWidget
def test_intensity_label(self):
self.widget.add_data(self.im)
att = self.widget.attribute
intensity = self.im[att][1, 0]
x, y = self.widget.client.axes.transData.transform([(0.5, 1.5)])[0]
assert self.widget._intensity_label(x, y) == 'data: %s' % intensity
def test_paint(self):
# make sure paint Events don't trigger any errors
self.widget.add_data(self.im)
self.widget.show()
self.widget.close()
def test_enable_rgb_doesnt_close_viewer(self):
# regression test for #446
def fail():
assert False
self.widget.add_data(self.im)
self.widget._container.on_empty(fail)
self.widget.rgb_mode = True
self.widget.rgb_mode = False
class TestStateSave(TestApplication):
def setup_method(self, method):
LinkSame = core.link_helpers.LinkSame
d = core.Data(label='im', x=[[1, 2], [2, 3]], y=[[2, 3], [4, 5]])
d2 = core.Data(label='cat',
x=[0, 1, 0, 1],
y=[0, 0, 1, 1],
z=[1, 2, 3, 4])
dc = core.DataCollection([d, d2])
dc.add_link(LinkSame(d.get_pixel_component_id(0), d2.id['x']))
dc.add_link(LinkSame(d.get_pixel_component_id(1), d2.id['y']))
app = GlueApplication(dc)
w = app.new_data_viewer(ImageWidget, data=d)
self.d = d
self.app = app
self.w = w
self.d2 = d2
self.dc = dc
def test_image_viewer(self):
self.check_clone(self.app)
def test_subset(self):
d, w, app = self.d, self.w, self.app
self.dc.new_subset_group()
assert len(w.layers) == 2
self.check_clone(app)
def test_scatter_layer(self):
# add scatter layer
d, w, app, d2 = self.d, self.w, self.app, self.d2
w.add_data(d2)
assert len(w.layers) == 2
self.check_clone(app)
def test_cube(self):
d = core.Data(label='cube',
x=np.zeros((2, 2, 2)))
dc = core.DataCollection([d])
app = GlueApplication(dc)
w = app.new_data_viewer(ImageWidget, d)
w.slice = ('x', 'y', 1)
assert w.slice == ('x', 'y', 1)
c = self.check_clone(app)
w2 = c.viewers[0][0]
assert w2.ui.slice.slice == w.slice
def test_rgb_layer(self):
d, w, app = self.d, self.w, self.app
x = d.id['x']
y = d.id['y']
w.client.display_data = d
w.rgb_mode = True
w.rgb_viz = (True, True, False)
w.ratt = x
w.gatt = y
w.batt = x
clone = self.check_clone(app)
w = clone.viewers[0][0]
assert w.rgb_viz == (True, True, False)
assert w.rgb_mode
assert w.ratt.label == 'x'
assert w.gatt.label == 'y'
assert w.batt.label == 'x'
| bsd-3-clause | -3,600,583,416,219,547,000 | 31.084746 | 76 | 0.580692 | false |
yishayv/lyacorr | ism_spectra/extract_galaxy_metadata.py | 1 | 3934 | """
Get galaxy metadata from a CasJobs fits table.
Add an SFD extinction field, and optional custom fields from HealPix maps.
"""
import cProfile
from collections import namedtuple
import astropy.table as table
import astropy.units as u
import healpy as hp
import numpy as np
from astropy.coordinates import SkyCoord
from astropy.io import fits
from mpi4py import MPI
import common_settings
from ism_spectra.sfd_lookup import SFDLookUp
from python_compat import range, zip
comm = MPI.COMM_WORLD
settings = common_settings.Settings() # type: common_settings.Settings
galaxy_file_fits = settings.get_galaxy_metadata_fits()
galaxy_file_npy = settings.get_galaxy_metadata_npy()
HealPixMapEntry = namedtuple('HealPixMapEntry', ['data', 'nside', 'filename', 'column_name'])
column_names = settings.get_custom_column_names()
file_names = settings.get_custom_healpix_maps()
fields = settings.get_custom_healpix_data_fields()
def make_heal_pix_map_entry(filename, column_name, field):
print("Loading: {0}:{2} as column '{1}'".format(filename, column_name, field))
data = hp.fitsfunc.read_map(filename, field=field)
nside = hp.npix2nside(data.size)
return HealPixMapEntry(data=data, nside=nside, filename=filename, column_name=column_name)
healpix_maps = [make_heal_pix_map_entry(filename, column_name, field)
for filename, column_name, field in zip(file_names, column_names, fields)]
def ra_dec2ang(ra, dec):
return (90. - dec) * np.pi / 180., ra / 180. * np.pi
def convert_fits_columns(fits_data):
t = table.Table()
t.add_columns([table.Column(range(len(fits_data)), name='index', dtype='i8', unit=None),
table.Column(fits_data['specObjID'], name='specObjID', dtype='i8', unit=None),
table.Column(fits_data['z'], name='z', unit=u.dimensionless_unscaled),
table.Column(fits_data['ra'], name='ra', unit=u.degree),
table.Column(fits_data['dec'], name='dec', unit=u.degree),
table.Column(fits_data['plate'], name='plate', dtype='i4', unit=None),
table.Column(fits_data['mjd'], name='mjd', dtype='i4', unit=None),
table.Column(fits_data['fiberID'], name='fiberID', dtype='i4', unit=None),
table.Column(fits_data['extinction_g'], name='extinction_g', unit=u.dimensionless_unscaled),
table.Column(fits_data['class'], name='class', unit=u.dimensionless_unscaled)
])
return t
def fill_galaxy_table():
fits_data = fits.getdata(galaxy_file_fits)
return convert_fits_columns(fits_data)
def profile_main():
if comm.rank == 0:
t = fill_galaxy_table()
t.sort(['plate', 'mjd', 'fiberID'])
# add indices after sort
t['index'] = range(len(t))
ar_ra, ar_dec = t['ra'], t['dec']
coordinates_icrs = SkyCoord(ra=ar_ra, dec=ar_dec)
coordinates_galactic = coordinates_icrs.galactic
# add a column for extinction from the full resolution SFD map:
sfd = SFDLookUp(*settings.get_sfd_maps_fits())
t['extinction_sfd_hires'] = sfd.lookup_bilinear(coordinates_galactic.l.to(u.rad).value,
coordinates_galactic.b.to(u.rad).value)
# add custom columns from healpix map lookup, based on the common settings.
theta, phi = ra_dec2ang(coordinates_galactic.l.value, coordinates_galactic.b.value)
for healpix_map in healpix_maps:
# lookup values in current map
map_lookup_results = hp.ang2pix(healpix_map.nside, theta, phi)
# add a new column to the table
t[healpix_map.column_name] = healpix_map.data[map_lookup_results]
np.save(galaxy_file_npy, t)
if settings.get_profile():
cProfile.run('profile_main()', filename='extract_galaxy_metadata.prof', sort=2)
else:
profile_main()
| mit | -7,801,464,748,114,146,000 | 36.826923 | 111 | 0.648958 | false |
googleinterns/intern2020_cocal | uncertainty/data/two_gaussians.py | 1 | 5145 | import os, sys
import numpy as np
from data.util import *
import tensorflow as tf
import tensorflow_probability as tfp
class TwoGaussians(DataLoader):
def __init__(self, batch_size,
n_pos, n_neg, mu_pos, cov_pos, mu_neg, cov_neg,
rot=0.0, train_ratio=0.8, val_ratio=0.1,
train_shuffle=True, val_shuffle=True, test_shuffle=False,
seed=0
):
dim = len(mu_pos)
assert(dim == cov_pos.shape[0] == len(mu_neg) == cov_neg.shape[0])
rot_rad = np.deg2rad(rot)
R = np.array([[np.cos(rot_rad), -np.sin(rot_rad)], [np.sin(rot_rad), np.cos(rot_rad)]], dtype=np.float32)
## generate data
np.random.seed(seed)
x_pos = np.random.multivariate_normal(mu_pos, cov_pos, n_pos).astype(np.float32)
x_neg = np.random.multivariate_normal(mu_neg, cov_neg, n_neg).astype(np.float32)
y_pos = np.ones((n_pos), dtype=np.int64)
y_neg = np.zeros((n_neg), dtype=np.int64)
x = np.concatenate((x_pos, x_neg), 0)
y = np.concatenate((y_pos, y_neg))
x = np.transpose(np.matmul(R, np.transpose(x)))
i_rnd = np.random.permutation(len(y))
x = x[i_rnd]
y = y[i_rnd]
## split
n = len(y)
n_train = int(n*train_ratio)
n_val = int(n*val_ratio)
n_test = n - n_train - n_val
x_train, y_train = x[:n_train], y[:n_train]
x_val, y_val = x[n_train:n_train+n_val], y[n_train:n_train+n_val]
x_test, y_test = x[n_train+n_val:], y[n_train+n_val:]
self.n_train, self.n_val, self.n_test = n_train, n_val, n_test
## init loaders
self.train = self._init_loader(tf.data.Dataset.from_tensor_slices((x_train, y_train)), self.n_train, train_shuffle, batch_size)
self.val = self._init_loader(tf.data.Dataset.from_tensor_slices((x_val, y_val)), self.n_val, val_shuffle, batch_size)
self.test = self._init_loader(tf.data.Dataset.from_tensor_slices((x_test, y_test)), self.n_test, test_shuffle, batch_size)
if __name__ == '__main__':
rot_deg = 60
mu_pos = [+0.6, +0.6]
mu_neg = [-0.6, -0.6]
cov_pos = np.diag([0.01, 0.1])
cov_neg = np.diag([0.01, 0.1])
mu_pos_rot, cov_pos_rot = rot_gaussian(rot_deg, mu_pos, cov_pos)
mu_neg_rot, cov_neg_rot = rot_gaussian(rot_deg, mu_neg, cov_neg)
dsld_src = TwoGaussians(
100,
n_pos=10000, n_neg=10000,
mu_pos=mu_pos, cov_pos=cov_pos,
mu_neg=mu_neg, cov_neg=cov_neg
)
dsld_tar = TwoGaussians(
100,
n_pos=10000, n_neg=10000,
mu_pos=mu_pos_rot, cov_pos=cov_pos_rot,
mu_neg=mu_neg_rot, cov_neg=cov_neg_rot,
seed=1,
)
x_tr_src, y_tr_src = [], []
for x, y in dsld_src.train:
x_tr_src.append(x.numpy())
y_tr_src.append(y.numpy())
x_tr_src = np.concatenate(x_tr_src, 0)
y_tr_src = np.concatenate(y_tr_src)
x_tr_tar, y_tr_tar = [], []
for x, y in dsld_tar.train:
x_tr_tar.append(x.numpy())
y_tr_tar.append(y.numpy())
x_tr_tar = np.concatenate(x_tr_tar, 0)
y_tr_tar = np.concatenate(y_tr_tar)
y_tr_tar[y_tr_tar==0] = 2
y_tr_tar[y_tr_tar==1] = 3
## IW
from model import IW_MoG
mu_pos, mu_neg = tf.constant(mu_pos, dtype=tf.float32), tf.constant(mu_neg, dtype=tf.float32)
cov_pos, cov_neg = tf.constant(cov_pos, dtype=tf.float32), tf.constant(cov_neg, dtype=tf.float32)
mu_pos_rot, mu_neg_rot = tf.constant(mu_pos_rot, dtype=tf.float32), tf.constant(mu_neg_rot, dtype=tf.float32)
cov_pos_rot, cov_neg_rot = tf.constant(cov_pos_rot, dtype=tf.float32), tf.constant(cov_neg_rot, dtype=tf.float32)
p_pos = tfp.distributions.MultivariateNormalTriL(loc=mu_pos, scale_tril=tf.linalg.cholesky(cov_pos))
p_neg = tfp.distributions.MultivariateNormalTriL(loc=mu_neg, scale_tril=tf.linalg.cholesky(cov_neg))
#p_pos = tfp.distributions.MultivariateNormalFullCovariance(mu_pos, cov_pos)
#p_neg = tfp.distributions.MultivariateNormalFullCovariance(mu_neg, cov_neg)
p = tfp.distributions.Mixture(
cat=tfp.distributions.Categorical(probs=[0.5, 0.5]),
components=[p_pos, p_neg])
q_pos = tfp.distributions.MultivariateNormalTriL(loc=mu_pos_rot, scale_tril=tf.linalg.cholesky(cov_pos_rot))
q_neg = tfp.distributions.MultivariateNormalTriL(loc=mu_neg_rot, scale_tril=tf.linalg.cholesky(cov_neg_rot))
#q_pos = tfp.distributions.MultivariateNormalFullCovariance(mu_pos_rot, cov_pos_rot)
#q_neg = tfp.distributions.MultivariateNormalFullCovariance(mu_neg_rot, cov_neg_rot)
q = tfp.distributions.Mixture(
cat=tfp.distributions.Categorical(probs=[0.5, 0.5]),
components=[q_pos, q_neg])
w_mog = IW_MoG(p, q)
## plot data
plot_data(
np.concatenate((x_tr_src, x_tr_tar), 0),
np.concatenate((y_tr_src, y_tr_tar)),
['s', 's', 's', 's'],
['orange', 'b', 'r', 'g'],
[0.5, 0.5, 0.5, 0.5],
[r'$-$'+' (src)', r'$+$'+' (src)', r'$-$'+' (tar)', r'$+$'+' (tar)'],
fn="two_gaussian_tr",
w=w_mog
)
| apache-2.0 | -178,502,891,350,616,220 | 38.883721 | 135 | 0.587366 | false |
Connexions/cnx-epub | cnxepub/tests/test_formatters.py | 1 | 37587 | # -*- coding: utf-8 -*-
# ###
# Copyright (c) 2016, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
import codecs
import io
import json
import mimetypes
import os
import subprocess
import sys
import unittest
try:
from unittest import mock
except ImportError:
import mock
from lxml import etree
from ..testing import (TEST_DATA_DIR, unescape,
_get_memcache_client, IS_MEMCACHE_ENABLED)
from ..formatters import exercise_callback_factory
here = os.path.abspath(os.path.dirname(__file__))
IS_PY3 = sys.version_info.major == 3
XMLPP_DIR = os.path.join(here, 'utils')
def xmlpp(input_):
"""Pretty Print XML"""
proc = subprocess.Popen(['./xmlpp.pl', '-sSten'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=XMLPP_DIR)
output, _ = proc.communicate(input_)
return output
def _c14n(val):
ov = io.BytesIO()
ET = etree.fromstring(str(val)).getroottree()
ET.write_c14n(ov)
return ov.getvalue().decode('utf-8')
def last_extension(*args, **kwargs):
# Always return the last value of sorted mimetypes.guess_all_extensions
exts = mimetypes.guess_all_extensions(*args, **kwargs)
return sorted(exts)[-1]
EXERCISE_JSON_HTML = {
"items": [
{
"uid": "93@3",
"group_uuid": "e071207a-9d26-4cff-bbe9-9060d3d13ca6",
"copyright_holders": [
{
"user_id": 2,
"name": "Rice University"
}
],
"uuid": "8fa80526-0720-4a98-99c8-5d6113482424",
"authors": [
{
"user_id": 1,
"name": "OpenStax"
}
],
"published_at": "2016-09-16T17:40:20.497Z",
"number": 93,
"editors": [],
"is_vocab": False,
"stimulus_html": "<p>Please answer the following question:</p>",
"questions": [
{
"stimulus_html": "",
"formats": [
"free-response",
"multiple-choice"
],
"hints": [],
"id": 63062,
"is_answer_order_important": True,
"answers": [
{
"id": 259956,
"content_html": "monomers",
"correctness": "0.0"
},
{
"content_html": "polymers (<span data-math='retry' />)",
"id": 259957,
"correctness": "1.0"
},
{
"id": 259958,
"content_html": "carbohydrates only (<span data-math='' />)",
"correctness": "0.0"
},
{
"content_html": "water only (<span data-math='\\text{H}_2\\text{O}'>\\text{H}_2\\text{O}</span>)",
"id": 259959,
"correctness": "0.0"
},
{
"content_html": "polymer and water (<div data-math='\\text{H}_2\\text{O}'>\\text{H}_2\\text{O}</div>)",
"id": 259959,
"correctness": "1.0"
}
],
"combo_choices": [],
"stem_html": "Dehydration <img href='none'> synthesis leads to the formation of what?"
}
],
"tags": [
"apbio",
"inbook-yes",
"ost-chapter-review",
"review",
"apbio-ch03",
"apbio-ch03-s01",
"apbio-ch03-s01-lo01",
"apbio-ch03-ex002",
"dok:1",
"blooms:1",
"time:short",
"book:stax-bio",
"context-cnxmod:ea44b8fa-e7a2-4360-ad34-ac081bcf104f",
"exid:apbio-ch03-ex002",
"context-cnxmod:85d6c500-9860-42e8-853a-e6940a50224f",
"book:stax-apbio",
"filter-type:import:hs",
"type:conceptual-or-recall"
],
"derived_from": [],
"version": 3
}
],
"total_count": 1
}
EXERCISE_JSON = {
"items": [
{
"uid": "93@3",
"group_uuid": "e071207a-9d26-4cff-bbe9-9060d3d13ca6",
"copyright_holders": [
{
"user_id": 2,
"name": "Rice University"
}
],
"uuid": "8fa80526-0720-4a98-99c8-5d6113482424",
"authors": [
{
"user_id": 1,
"name": "OpenStax"
}
],
"published_at": "2016-09-16T17:40:20.497Z",
"number": 93,
"editors": [],
"is_vocab": False,
"stimulus_html": "",
"questions": [
{
"stimulus_html": "",
"formats": [
"free-response",
"multiple-choice"
],
"hints": [],
"id": 63062,
"is_answer_order_important": True,
"answers": [
{
"id": 259956,
"content_html": "monomers"
},
{
"content_html": "polymers",
"id": 259957
},
{
"id": 259958,
"content_html": "carbohydrates only"
},
{
"content_html": "water only",
"id": 259959
}
],
"combo_choices": [],
"stem_html": "Dehydration <img href='none'/> synthesis leads to the formation of what?"
}
],
"tags": [
"apbio",
"inbook-yes",
"ost-chapter-review",
"review",
"apbio-ch03",
"apbio-ch03-s01",
"apbio-ch03-s01-lo01",
"apbio-ch03-ex002",
"dok:1",
"blooms:1",
"time:short",
"book:stax-bio",
"context-cnxmod:ea44b8fa-e7a2-4360-ad34-ac081bcf104f",
"exid:apbio-ch03-ex002",
"context-cnxmod:85d6c500-9860-42e8-853a-e6940a50224f",
"book:stax-apbio",
"filter-type:import:hs",
"type:conceptual-or-recall"
],
"derived_from": [],
"version": 3
}
],
"total_count": 1
}
BAD_EQUATION_JSON = {
"error": "E_VALIDATION",
"status": 400,
"summary": "1 attribute is invalid",
"model": "Equation",
"invalidAttributes": {
"math": [{"rule": "required",
"message": "\"required\" validation rule failed for input: ''\nSpecifically, it threw an error. Details:\n undefined"}]
}
}
EQUATION_JSON = {
"updatedAt": "2016-10-31T16:06:44.413Z",
"cloudUrl": "https://mathmlcloud.cnx.org:1337/equation/58176c14d08360010084f48c",
"mathType": "TeX",
"math": "\\text{H}_2\\text{O}",
"components": [
{
"format": "mml",
"equation": "58176c14d08360010084f48c",
"source": '<math xmlns="http://www.w3.org/1998/Math/MathML" display="block">\n <msub>\n <mtext>H</mtext>\n <mn>2</mn>\n </msub>\n <mtext>O</mtext>\n</math>',
"updatedAt": "2016-10-31T16:06:44.477Z",
"id": "58176c14d08360010084f48d",
"createdAt": "2016-10-31T16:06:44.477Z"
}
],
"submittedBy": None,
"ip_address": "::ffff:10.64.71.226",
"id": "58176c14d08360010084f48c",
"createdAt": "2016-10-31T16:06:44.413Z"
}
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.text = json.dumps(json_data)
self.status_code = status_code
def json(self):
return self.json_data
def mocked_requests_get(*args, **kwargs):
# Replace requests.get with this mock
# modified from http://stackoverflow.com/a/28507806/5430
if args[0] == 'https://exercises.openstax.org/api/exercises?q=tag:apbio-ch03-ex002':
if 'headers' in kwargs:
assert kwargs['headers'] == {'Authorization': 'Bearer somesortoftoken'}
return MockResponse(EXERCISE_JSON_HTML, 200)
return MockResponse(EXERCISE_JSON, 200)
else:
return MockResponse({"total_count": 0, "items": []}, 200)
def mocked_requests_post(*args, **kwargs):
if args[0].startswith('http://mathmlcloud.cnx.org/equation'):
if args[1]['math'] == b'\\text{H}_2\\text{O}':
return MockResponse(EQUATION_JSON, 200)
elif args[1]['math'] == b'retry':
return MockResponse('{}', 200)
elif args[1]['math'] == b'':
return MockResponse(BAD_EQUATION_JSON, 400)
else:
return MockResponse('', 500)
return MockResponse({}, 404)
class DocumentContentFormatterTestCase(unittest.TestCase):
def test_document(self):
from ..models import Document
from ..formatters import DocumentContentFormatter
base_metadata = {
'publishers': [],
'created': '2013/03/19 15:01:16 -0500',
'revised': '2013/06/18 15:22:55 -0500',
'authors': [
{'type': 'cnx-id',
'name': 'Sponge Bob',
'id': 'sbob'}],
'editors': [],
'copyright_holders': [],
'illustrators': [],
'subjects': ['Science and Mathematics'],
'translators': [],
'keywords': ['Bob', 'Sponge', 'Rock'],
'title': "Goofy Goober Rock",
'license_text': 'CC-By 4.0',
'license_url': 'http://creativecommons.org/licenses/by/4.0/',
'summary': "<p>summary</p>",
'version': 'draft',
'language': 'en'
}
# Build test document.
metadata = base_metadata.copy()
document = Document('title',
io.BytesIO(u'<body><p>コンテンツ...</p></body>'.encode('utf-8')),
metadata=metadata)
html = str(DocumentContentFormatter(document))
expected_html = u"""\
<html xmlns="http://www.w3.org/1999/xhtml">
<body><p>コンテンツ...</p></body>
</html>
"""
self.assertEqual(expected_html, unescape(html))
def test_document_mathjax(self):
from ..models import Document
from ..formatters import DocumentContentFormatter
base_metadata = {
'publishers': [],
'created': '2013/03/19 15:01:16 -0500',
'revised': '2013/06/18 15:22:55 -0500',
'authors': [
{'type': 'cnx-id',
'name': 'Sponge Bob',
'id': 'sbob'}],
'editors': [],
'copyright_holders': [],
'illustrators': [],
'subjects': ['Science and Mathematics'],
'translators': [],
'keywords': ['Bob', 'Sponge', 'Rock'],
'title': "Goofy Goober Rock",
'license_text': 'CC-By 4.0',
'license_url': 'http://creativecommons.org/licenses/by/4.0/',
'summary': "<p>summary</p>",
'version': 'draft',
'language': 'en'
}
# Build test document.
metadata = base_metadata.copy()
document = Document('title',
io.BytesIO(u'<body><p><m:math xmlns:m="http://www.w3.org/1998/Math/MathML"/></p></body>'.encode('utf-8')),
metadata=metadata)
html = str(DocumentContentFormatter(document))
expected_html = u"""\
<html
xmlns='http://www.w3.org/1999/xhtml'
xmlns:m='http://www.w3.org/1998/Math/MathML'
>
<body>
<p>
<math></math>
</p>
</body>
</html>
"""
self.assertMultiLineEqual(
expected_html,
xmlpp(unescape(html).encode('utf-8')).decode('utf-8'))
# Second variation. Hoisted namespace declaration
document = Document('title',
io.BytesIO(u'<body><p xmlns:m="http://www.w3.org/1998/Math/MathML"><m:math/></p></body>'.encode('utf-8')),
metadata=metadata)
html = str(DocumentContentFormatter(document))
self.assertMultiLineEqual(
expected_html,
xmlpp(unescape(html).encode('utf-8')).decode('utf-8'))
class DocumentSummaryFormatterTestCase(unittest.TestCase):
def test_summary_w_one_tag(self):
from ..formatters import DocumentSummaryFormatter
from ..models import Document
document = Document('title', io.BytesIO(b'<body><p>contents</p></body>'),
metadata={'summary': '<p>résumé</p>'})
html = str(DocumentSummaryFormatter(document))
self.assertEqual('<p>résumé</p>', html)
def test_summary_w_just_text(self):
from ..formatters import DocumentSummaryFormatter
from ..models import Document
document = Document('title', io.BytesIO(b'<body><p>contents</p></body>'),
metadata={'summary': 'résumé'})
html = str(DocumentSummaryFormatter(document))
expected = """\
<div class="description" data-type="description"\
xmlns="http://www.w3.org/1999/xhtml">
résumé
</div>"""
self.assertEqual(expected, html)
def test_summary_w_text_and_tags(self):
from ..formatters import DocumentSummaryFormatter
from ..models import Document
document = Document('title', io.BytesIO(b'<body><p>contents</p></body>'),
metadata={'summary': 'résumé<p>etc</p><p>...</p>'})
html = str(DocumentSummaryFormatter(document))
expected = """\
<div class="description" data-type="description"\
xmlns="http://www.w3.org/1999/xhtml">
résumé<p>etc</p><p>...</p>
</div>"""
self.assertEqual(expected, html)
@mock.patch('mimetypes.guess_extension', last_extension)
class HTMLFormatterTestCase(unittest.TestCase):
base_metadata = {
'publishers': [],
'created': '2013/03/19 15:01:16 -0500',
'revised': '2013/06/18 15:22:55 -0500',
'authors': [
{'type': 'cnx-id',
'name': 'Sponge Bob',
'id': 'sbob'}],
'editors': [],
'copyright_holders': [],
'illustrators': [],
'subjects': ['Science and Mathematics'],
'translators': [],
'keywords': ['Bob', 'Sponge', 'Rock'],
'title': 'タイトル',
'license_text': 'CC-By 4.0',
'license_url': 'http://creativecommons.org/licenses/by/4.0/',
'summary': "<p>summary</p>",
'version': 'draft',
'language': 'en'
}
maxDiff = None
def xpath(self, path):
from ..html_parsers import HTML_DOCUMENT_NAMESPACES
return self.root.xpath(path, namespaces=HTML_DOCUMENT_NAMESPACES)
def test_document(self):
from ..models import Document
from ..formatters import HTMLFormatter
# Build test document.
metadata = self.base_metadata.copy()
metadata['canonical_book_uuid'] = 'ea4244ce-dd9c-4166-9c97-acae5faf0ba1'
document = Document(
metadata['title'],
io.BytesIO(u'<body><p>コンテンツ...</p></body>'.encode('utf-8')),
metadata=metadata)
html = str(HTMLFormatter(document))
html = unescape(html)
self.root = etree.fromstring(html.encode('utf-8'))
self.assertIn(u'<title>タイトル</title>', html)
self.assertIn(u'<p>コンテンツ...</p>', html)
self.assertEqual(
u'タイトル',
self.xpath('//*[@data-type="document-title"]/text()')[0])
self.assertEqual(
'summary',
self.xpath('//*[@class="description"]/xhtml:p/text()')[0])
self.assertEqual(
metadata['created'],
self.xpath('//xhtml:meta[@itemprop="dateCreated"]/@content')[0])
self.assertEqual(
metadata['revised'],
self.xpath('//xhtml:meta[@itemprop="dateModified"]/@content')[0])
self.assertEqual(
metadata['revised'],
self.xpath('.//xhtml:*[@data-type="revised"]/@data-value')[0])
self.assertEqual(
metadata['canonical_book_uuid'],
self.xpath('.//xhtml:*[@data-type="canonical-book-uuid"]/@data-value')[0]
)
self.assertEqual(
metadata['language'],
self.xpath('//xhtml:html/@lang')[0]
)
self.assertEqual(
metadata['language'],
self.xpath('//xhtml:meta[@itemprop="inLanguage"]/@content')[0]
)
def test_document_nolang(self):
from ..models import Document
from ..formatters import HTMLFormatter
# Build test document.
metadata = self.base_metadata.copy()
metadata['language'] = None
document = Document(
metadata['title'],
io.BytesIO(b'<body><p>Hello.</p></body>'),
metadata=metadata)
html = str(HTMLFormatter(document))
html = unescape(html)
self.root = etree.fromstring(html.encode('utf-8'))
self.assertEqual(
0,
len(self.xpath('//xhtml:html/@lang'))
)
self.assertEqual(
0,
len(self.xpath('//xhtml:meta[@itemprop="inLanguage"]/@content'))
)
def test_document_nocreated(self):
from ..models import Document
from ..formatters import HTMLFormatter
# Build test document.
metadata = self.base_metadata.copy()
metadata['created'] = None
document = Document(
metadata['title'],
io.BytesIO(b'<body><p>Hello.</p></body>'),
metadata=metadata)
html = str(HTMLFormatter(document))
html = unescape(html)
self.root = etree.fromstring(html.encode('utf-8'))
self.assertEqual(
0,
len(self.xpath('//xhtml:meta[@itemprop="dateCreated"]/@content'))
)
def test_document_pointer(self):
from ..models import DocumentPointer
from ..formatters import HTMLFormatter
# Build test document pointer.
pointer = DocumentPointer('pointer@1', {
'title': self.base_metadata['title'],
'cnx-archive-uri': 'pointer@1',
'url': 'https://cnx.org/contents/pointer@1',
})
html = str(HTMLFormatter(pointer))
html = unescape(html)
self.root = etree.fromstring(html.encode('utf-8'))
self.assertIn(u'<title>タイトル</title>', html)
self.assertIn(
u'<a href="https://cnx.org/contents/pointer@1">', html)
self.assertEqual(
u'タイトル',
self.xpath('//*[@data-type="document-title"]/text()')[0])
self.assertEqual(
'pointer@1',
self.xpath('//*[@data-type="cnx-archive-uri"]/@data-value')[0])
def test_binder(self):
from ..models import (Binder, TranslucentBinder, Document,
DocumentPointer)
from ..formatters import HTMLFormatter
# Build test binder.
binder = Binder(self.base_metadata['title'], metadata={
'title': self.base_metadata['title'],
'license_url': self.base_metadata['license_url'],
'license_text': self.base_metadata['license_text'],
'language': self.base_metadata['language']
})
metadata = self.base_metadata.copy()
metadata.update({
'title': "entrée",
'derived_from_uri': 'http://cnx.org/contents/'
'dd68a67a-11f4-4140-a49f-b78e856e2262@1',
'derived_from_title': "Taking Customers' Orders",
})
binder.append(Document('ingress', io.BytesIO(b'<body><p>Hello.</p></body>'),
metadata=metadata))
translucent_binder = TranslucentBinder(metadata={'title': 'Kranken'})
binder.append(translucent_binder)
metadata = self.base_metadata.copy()
metadata.update({
'title': "egress",
'cnx-archive-uri': 'e78d4f90-e078-49d2-beac-e95e8be70667'})
translucent_binder.append(
Document('egress', io.BytesIO(u'<body><p>hüvasti.</p></body>'.encode('utf-8')),
metadata=metadata))
binder.append(DocumentPointer('pointer@1', {
'title': 'Pointer',
'cnx-archive-uri': 'pointer@1',
'url': 'http://cnx.org/contents/pointer@1'}))
html = str(HTMLFormatter(binder))
html = unescape(html)
self.root = etree.fromstring(html.encode('utf-8'))
self.assertIn(u'<title>タイトル</title>', html)
lis = self.xpath('//xhtml:nav/xhtml:ol/xhtml:li')
self.assertEqual(3, len(lis))
self.assertEqual('[email protected]', lis[0][0].attrib['href'])
self.assertEqual(u'entrée', lis[0][0].text)
self.assertEqual('Kranken', lis[1][0].text)
self.assertEqual('[email protected]', lis[2][0].attrib['href'])
self.assertEqual('Pointer', lis[2][0].text)
lis = self.xpath('//xhtml:nav/xhtml:ol/xhtml:li[2]/xhtml:ol/xhtml:li')
self.assertEqual(1, len(lis))
self.assertEqual('[email protected]', lis[0][0].attrib['href'])
self.assertEqual('egress', lis[0][0].text)
def test_translucent_binder(self):
from ..models import (TranslucentBinder, Document)
from ..formatters import HTMLFormatter
# Build test translucent binder.
binder = TranslucentBinder(metadata={
'title': self.base_metadata['title'],
})
metadata = self.base_metadata.copy()
metadata.update({
'title': "entrée",
'derived_from_uri': 'http://cnx.org/contents/'
'dd68a67a-11f4-4140-a49f-b78e856e2262@1',
'derived_from_title': "Taking Customers' Orders",
})
binder.append(Document('ingress', io.BytesIO(b'<body><p>Hello.</p></body>'),
metadata=metadata))
html = str(HTMLFormatter(binder))
html = unescape(html)
self.root = etree.fromstring(html.encode('utf-8'))
self.assertIn(u'<title>タイトル</title>', html)
lis = self.xpath('//xhtml:nav/xhtml:ol/xhtml:li')
self.assertEqual(1, len(lis))
self.assertEqual('[email protected]', lis[0][0].attrib['href'])
self.assertEqual(u'entrée', lis[0][0].text)
def test_document_auto_generate_ids(self):
from ..models import Document
from ..formatters import HTMLFormatter
content = """<body>\
<div class="title" id="title">Preface</div>
<p class="para" id="my-id">This thing and <em>that</em> thing.</p>
<p class="para"><a href="#title">Link</a> to title</p></body>"""
page_one_id = 'fa21215a-91b5-424a-9fbd-5c451f309b87'
expected_content = """\
<div class="title" id="auto_{id}_title">Preface</div>
<p class="para" id="auto_{id}_my-id">This thing and <em>that</em> thing.</p>
<p class="para" id="auto_{id}_{n}"><a href="#auto_{id}_title">Link</a> to title</p>\
""".format(id=page_one_id, n=0)
document = Document(page_one_id, content)
formatted = str(HTMLFormatter(document, generate_ids=True))
self.assertIn(expected_content, formatted)
@mock.patch('mimetypes.guess_extension', last_extension)
class SingleHTMLFormatterTestCase(unittest.TestCase):
base_metadata = {
'publishers': [],
'created': '2016/03/04 17:05:20 -0500',
'revised': '2013/03/05 09:35:24 -0500',
'authors': [
{'type': 'cnx-id',
'name': 'Good Food',
'id': 'yum'}],
'editors': [],
'copyright_holders': [],
'illustrators': [],
'subjects': ['Humanities'],
'translators': [],
'keywords': ['Food', 'デザート', 'Pudding'],
'title': 'チョコレート',
'license_text': 'CC-By 4.0',
'license_url': 'http://creativecommons.org/licenses/by/4.0/',
'summary': "<p>summary</p>",
'version': 'draft',
}
maxDiff = None
def setUp(self):
from ..models import (TranslucentBinder, Binder, Document,
Resource, CompositeDocument)
with open(os.path.join(TEST_DATA_DIR, '1x1.jpg'), 'rb') as f:
jpg = Resource('1x1.jpg', io.BytesIO(f.read()), 'image/jpeg',
filename='small.jpg')
metadata = self.base_metadata.copy()
contents = io.BytesIO(u"""\
<body>
<h1>Chocolate Desserts</h1>
<p><a href="#list">List</a> of desserts to try:</p>
<div data-type="list" id="list"><ul><li>Chocolate Orange Tart,</li>
<li>Hot Mocha Puddings,</li>
<li>Chocolate and Banana French Toast,</li>
<li>Chocolate Truffles...</li>
</ul></div><img src="/resources/1x1.jpg" /><p>チョコレートデザート</p>
</body>
""".encode('utf-8'))
self.chocolate = Document('chocolate', contents, metadata=metadata,
resources=[jpg])
metadata = self.base_metadata.copy()
metadata['title'] = 'Apple'
metadata['canonical_book_uuid'] = 'ea4244ce-dd9c-4166-9c97-acae5faf0ba1'
contents = io.BytesIO(b"""\
<body>
<h1>Apple Desserts</h1>
<p><a href="/contents/lemon">Link to lemon</a>. Here are some examples:</p>
<ul><li id="auto_apple_1">Apple Crumble,</li>
<li>Apfelstrudel,</li>
<li id="auto_apple_0">Caramel Apple,</li>
<li>Apple Pie,</li>
<li>Apple sauce...</li>
</ul>
</body>
""")
self.apple = Document('apple', contents, metadata=metadata)
metadata = self.base_metadata.copy()
metadata['title'] = 'Lemon'
contents = io.BytesIO(b"""\
<body class="fruity">
<h1>Lemon Desserts</h1>
<p>Yum! <img src="/resources/1x1.jpg" /></p>
<div data-type="exercise">
<a href="#ost/api/ex/apbio-ch03-ex002">[link]</a>
</div>
<div data-type="exercise">
<p>
<a href="#ost/api/ex/nosuchtag">[link]</a>
</p>
</div>
<ul><li>Lemon & Lime Crush,</li>
<li>Lemon Drizzle Loaf,</li>
<li>Lemon Cheesecake,</li>
<li>Raspberry & Lemon Polenta Cake...</li>
</ul>
</body>
""")
self.lemon = Document('lemon', contents, metadata=metadata,
resources=[jpg])
metadata = self.base_metadata.copy()
metadata['title'] = 'Citrus'
self.citrus = TranslucentBinder([self.lemon], metadata=metadata)
title_overrides = [
self.apple.metadata['title'],
u'<span>1.1</span> <span>|</span> <span>レモン</span>',
'<span>Chapter</span> <span>2</span> <span>citrus</span>']
self.fruity = Binder('ec84e75d-9973-41f1-ab9d-1a3ebaef87e2', [self.apple, self.lemon, self.citrus],
metadata={'title': 'Fruity',
'cnx-archive-uri': 'ec84e75d-9973-41f1-ab9d-1a3ebaef87e2',
'cnx-archive-shortid': 'frt',
'license_text': 'CC-By 4.0',
'license_url': 'http://creativecommons.org/licenses/by/4.0/',
},
title_overrides=title_overrides)
metadata = self.base_metadata.copy()
metadata['title'] = 'Extra Stuff'
contents = io.BytesIO(b"""\
<body>
<h1>Extra Stuff</h1>
<p>This is a composite page.</p>
<p>Here is a <a href="#auto_chocolate_list">link</a> to another document.</p>
</body>
""")
self.extra = CompositeDocument(
'extra', contents, metadata=metadata)
with open(os.path.join(TEST_DATA_DIR, 'cover.png'), 'rb') as f:
cover_png = Resource(
'cover.png', io.BytesIO(f.read()), 'image/png',
filename='cover.png')
self.desserts = Binder(
'Desserts', [self.fruity, self.chocolate, self.extra],
metadata={'title': 'Desserts',
'license_url': 'http://creativecommons.org/licenses/by/4.0/',
'license_text': 'CC-By 4.0',
'cnx-archive-uri': '[email protected]',
'language': 'en',
'slug': 'desserts'},
resources=[cover_png])
def test_binder(self):
from ..formatters import SingleHTMLFormatter
page_path = os.path.join(TEST_DATA_DIR, 'desserts-single-page.xhtml')
if not IS_PY3:
page_path = page_path.replace('.xhtml', '-py2.xhtml')
with open(page_path, 'r') as f:
expected_content = f.read()
actual = str(SingleHTMLFormatter(self.desserts))
out_path = os.path.join(TEST_DATA_DIR,
'desserts-single-page-actual.xhtml')
if not IS_PY3:
out_path = out_path.replace('.xhtml', '-py2.xhtml')
with open(out_path, 'w') as out:
out.write(actual)
self.assertMultiLineEqual(expected_content, actual)
# Placed after the assert, so only called if success:
os.remove(out_path)
def test_str_unicode_bytes(self):
from ..formatters import SingleHTMLFormatter
html = bytes(SingleHTMLFormatter(self.desserts))
if IS_PY3:
self.assertMultiLineEqual(
html.decode('utf-8'), str(SingleHTMLFormatter(self.desserts)))
else:
self.assertMultiLineEqual(
html, str(SingleHTMLFormatter(self.desserts)))
self.assertMultiLineEqual(
html,
unicode(SingleHTMLFormatter(self.desserts)).encode('utf-8'))
@mock.patch('requests.get', mocked_requests_get)
def test_includes_callback(self):
from ..formatters import SingleHTMLFormatter
def _upcase_text(elem):
if elem.text:
elem.text = elem.text.upper()
for child in elem.iterdescendants():
if child.text:
child.text = child.text.upper()
if child.tail:
child.tail = child.tail.upper()
page_path = os.path.join(TEST_DATA_DIR, 'desserts-includes.xhtml')
if not IS_PY3:
page_path = page_path.replace('.xhtml', '-py2.xhtml')
with codecs.open(page_path, 'r', encoding='utf-8') as f:
expected_content = f.read()
exercise_url = \
'https://%s/api/exercises?q=tag:{itemCode}' % ('exercises.openstax.org')
exercise_match = '#ost/api/ex/'
if IS_MEMCACHE_ENABLED:
mc_client = _get_memcache_client()
else:
mc_client = None
includes = [exercise_callback_factory(exercise_match,
exercise_url,
mc_client),
('//xhtml:*[@data-type = "exercise"]', _upcase_text),
('//xhtml:a', _upcase_text)]
actual = SingleHTMLFormatter(self.desserts,
includes=includes)
out_path = os.path.join(TEST_DATA_DIR, 'desserts-includes-actual.xhtml')
if not IS_PY3:
out_path = out_path.replace('.xhtml', '-py2.xhtml')
with open(out_path, 'w') as out:
out.write(xmlpp(unicode(actual).encode('utf-8')))
with codecs.open(out_path, 'r', encoding='utf-8') as f:
actual_content = f.read()
self.assertEqual(xmlpp(expected_content.encode('utf-8')).split(b'\n'),
xmlpp(actual_content.encode('utf-8')).split(b'\n'))
else:
with open(out_path, 'w') as out:
out.write(str(actual))
self.assertMultiLineEqual(expected_content, str(actual))
# After assert, so won't clean up if test fails
os.remove(out_path)
@mock.patch('requests.post', mocked_requests_post)
@mock.patch('requests.get', mocked_requests_get)
def test_includes_token_callback(self):
from ..formatters import SingleHTMLFormatter
def _upcase_text(elem):
if elem.text:
elem.text = elem.text.upper()
for child in elem.iterdescendants():
if child.text:
child.text = child.text.upper()
if child.tail:
child.tail = child.tail.upper()
page_path = os.path.join(TEST_DATA_DIR, 'desserts-includes-token.xhtml')
if not IS_PY3:
page_path = page_path.replace('.xhtml', '-py2.xhtml')
with codecs.open(page_path, 'r', encoding='utf-8') as f:
expected_content = f.read()
exercise_url = \
'https://%s/api/exercises?q=tag:{itemCode}' % ('exercises.openstax.org')
exercise_match = '#ost/api/ex/'
exercise_token = 'somesortoftoken'
mathml_url = 'http://mathmlcloud.cnx.org/equation'
if IS_MEMCACHE_ENABLED:
mc_client = _get_memcache_client()
else:
mc_client = None
includes = [exercise_callback_factory(exercise_match,
exercise_url,
mc_client,
exercise_token,
mathml_url),
('//xhtml:*[@data-type = "exercise"]', _upcase_text),
('//xhtml:a', _upcase_text)]
actual = SingleHTMLFormatter(self.desserts,
includes=includes)
out_path = os.path.join(TEST_DATA_DIR,
'desserts-includes-token-actual.xhtml')
if not IS_PY3:
out_path = out_path.replace('.xhtml', '-py2.xhtml')
with open(out_path, 'w') as out:
out.write(xmlpp(unicode(actual).encode('utf-8')))
with codecs.open(out_path, 'r', encoding='utf-8') as f:
actual_content = f.read()
self.assertEqual(xmlpp(expected_content.encode('utf-8')).split(b'\n'),
xmlpp(actual_content.encode('utf-8')).split(b'\n'))
else:
with open(out_path, 'w') as out:
out.write(str(actual))
self.assertMultiLineEqual(expected_content, str(actual))
# After assert, so won't clean up if test fails
os.remove(out_path)
class FixNamespacesTestCase(unittest.TestCase):
def test(self):
from ..formatters import _fix_namespaces
actual = _fix_namespaces("""\
<html xmlns="http://www.w3.org/1999/xhtml" lang="en">
<body xmlns:bib="http://bibtexml.sf.net/">
<p>Some text<em><!-- no-selfclose --></em>!</p>
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mtext>H</mtext>
</math>
</body>
</html>""")
expected_content = """\
<html
lang='en'
xmlns='http://www.w3.org/1999/xhtml'
xmlns:m='http://www.w3.org/1998/Math/MathML'
>
<body>
<p>Some text
<em><!-- no-selfclose --></em>!
</p>
<m:math>
<m:mtext>H</m:mtext>
</m:math>
</body>
</html>
"""
self.maxDiff = None
self.assertMultiLineEqual(expected_content, xmlpp(actual).decode('utf-8'))
class ExerciseCallbackTestCase(unittest.TestCase):
@mock.patch('cnxepub.formatters.logger')
@mock.patch('cnxepub.formatters.requests.get')
@mock.patch('cnxepub.formatters.requests.post')
def test_xmlsyntaxerror(self, requests_post, requests_get, logger):
from ..formatters import exercise_callback_factory
xpath, cb = exercise_callback_factory(
'#ost/api/ex/',
'https://exercises/{itemCode}',
mml_url='https://mathmlcloud/')
self.assertEqual(xpath, '//xhtml:a[contains(@href, "#ost/api/ex/")]')
node = etree.fromstring("""
<div>
<a href="#ost/api/ex/book-ch01-ex001"></a>
</div>""")
tex_math = r'<span data-math="1\ \text{kcal}"></span>'
get_resp = mock.Mock()
get_resp.json.return_value = {
'total_count': 1,
'items': [{
'questions': [{
'stem_html': tex_math,
}],
}]}
requests_get.return_value = get_resp
mathml = r"""<math xmlns="http://www.w3.org/1998/Math/MathML"
display="block" alttext="1 kcal">
<mn>1</mn>
<mtext> </mtext>
<mtext>kcal</mtext>
</math>
"""
post_resp = mock.Mock()
post_resp.json.return_value = {'components': [
{'format': 'mml',
'source': mathml}]}
requests_post.return_value = post_resp
self.assertRaises(etree.XMLSyntaxError, cb, node.getchildren()[0])
self.assertEqual(logger.error.call_args[0][0].strip(), u"""\
Error converting math in book-ch01-ex001:
math: 1\\ \\text{kcal}
mathml: <math xmlns="http://www.w3.org/1998/Math/MathML"
display="block" alttext="1 kcal">
<mn>1</mn>
<mtext> </mtext>
<mtext>kcal</mtext>
</math>""")
| agpl-3.0 | 1,293,149,302,066,881,000 | 33.655556 | 178 | 0.523271 | false |
snim2/nxt-turtle | tests/play_scale.py | 1 | 2017 | """
Play scales on the Lego NXT.
Copyright (C) Sarah Mount, 2008.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import nxt_turtle
__author__ = 'Sarah Mount <[email protected]>'
__date__ = 'March 2008'
def play_scale(turtle):
turtle.play_tone(LegoTurtle.FREQ_C)
turtle.play_tone(LegoTurtle.FREQ_D)
turtle.play_tone(LegoTurtle.FREQ_E)
turtle.play_tone(LegoTurtle.FREQ_F)
turtle.play_tone(LegoTurtle.FREQ_G)
turtle.play_tone(LegoTurtle.FREQ_A)
turtle.play_tone(LegoTurtle.FREQ_B)
turtle.play_tone(LegoTurtle.FREQ_C2)
def play_chromatic(turtle):
turtle.play_tone(LegoTurtle.FREQ_C)
turtle.play_tone(LegoTurtle.FREQ_D_FLAT)
turtle.play_tone(LegoTurtle.FREQ_D)
turtle.play_tone(LegoTurtle.FREQ_E_FLAT)
turtle.play_tone(LegoTurtle.FREQ_E)
turtle.play_tone(LegoTurtle.FREQ_F)
turtle.play_tone(LegoTurtle.FREQ_G_FLAT)
turtle.play_tone(LegoTurtle.FREQ_G
turtle.play_tone(LegoTurtle.FREQ_A_FLAT)
turtle.play_tone(LegoTurtle.FREQ_A)
turtle.play_tone(LegoTurtle.FREQ_B_FLAT)
turtle.play_tone(LegoTurtle.FREQ_B)
turtle.play_tone(LegoTurtle.FREQ_C2)
if __name__ == '__main__':
import time
turtle = nxt_turtle.LegoTurtle()
play_scale(turtle)
time.sleep(1)
play_chromatic(turtle)
turtle.close()
| gpl-2.0 | 6,849,550,351,123,141,000 | 32.065574 | 78 | 0.702529 | false |
Victordeleon/os-data-importers | eu-structural-funds/common/processors/reshape_data.py | 1 | 1662 | """This processor reshapes the data to match the fiscal schema."""
from datapackage_pipelines.wrapper import ingest
from datapackage_pipelines.wrapper import spew
from common.utilities import get_fiscal_field_names
import logging
def process_row(row, fiscal_fields):
"""Add and remove appropriate columns.
"""
surplus_keys = set(row) - set(fiscal_fields)
missing_keys = set(fiscal_fields) - set(row)
for key in missing_keys:
row[key] = None
for key in surplus_keys:
del row[key]
assert set(row) == set(fiscal_fields)
return row
def process_resources(resources, fiscal_fields):
"""Return an iterator of row iterators.
"""
for resource in resources:
def process_rows(resource_):
for i, row in enumerate(resource_):
yield process_row(row, fiscal_fields)
yield process_rows(resource)
if __name__ == '__main__':
parameters_, datapackage_, resources_ = ingest()
for resource in datapackage_['resources']:
fiscal_fields_ = set(get_fiscal_field_names())
fields = resource['schema']['fields']
new_fields = []
for field in fields:
if field['name'] in fiscal_fields_:
new_fields.append(field)
fiscal_fields_.remove(field['name'])
for f in fiscal_fields_:
new_fields.append({
'name': f,
'type': 'string'
})
resource['schema']['fields'] = new_fields
fiscal_fields_ = set(get_fiscal_field_names())
new_resources_ = process_resources(resources_, fiscal_fields_)
spew(datapackage_, new_resources_)
| mit | 3,459,077,310,441,693,000 | 31.588235 | 66 | 0.615523 | false |
mburakergenc/Malware-Detection-using-Machine-Learning | cuckoo/modules/auxiliary/sniffer.py | 1 | 5764 | # Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import getpass
import logging
import subprocess
from lib.cuckoo.common.abstracts import Auxiliary
from lib.cuckoo.common.constants import CUCKOO_ROOT, CUCKOO_GUEST_PORT
from lib.cuckoo.common.exceptions import CuckooOperationalError
log = logging.getLogger(__name__)
class Sniffer(Auxiliary):
def __init__(self):
Auxiliary.__init__(self)
self.proc = None
def start(self):
if not self.machine.interface:
log.error("Network interface not defined, network capture aborted")
return
# Handle special pcap dumping options.
if "nictrace" in self.machine.options:
return
tcpdump = self.options.get("tcpdump", "/usr/sbin/tcpdump")
bpf = self.options.get("bpf", "")
file_path = os.path.join(CUCKOO_ROOT, "storage", "analyses",
"%s" % self.task.id, "dump.pcap")
if not os.path.exists(tcpdump):
log.error("Tcpdump does not exist at path \"%s\", network "
"capture aborted", tcpdump)
return
# TODO: this isn't working. need to fix.
# mode = os.stat(tcpdump)[stat.ST_MODE]
# if (mode & stat.S_ISUID) == 0:
# log.error("Tcpdump is not accessible from this user, "
# "network capture aborted")
# return
pargs = [
tcpdump, "-U", "-q", "-s", "0", "-n",
"-i", self.machine.interface,
]
# Trying to save pcap with the same user which cuckoo is running.
try:
user = getpass.getuser()
pargs.extend(["-Z", user])
except:
pass
pargs.extend(["-w", file_path])
pargs.extend(["host", self.machine.ip])
if self.task.options.get("sniffer.debug") != "1":
# Do not capture Agent traffic.
pargs.extend([
"and", "not", "(",
"dst", "host", self.machine.ip, "and",
"dst", "port", str(CUCKOO_GUEST_PORT),
")", "and", "not", "(",
"src", "host", self.machine.ip, "and",
"src", "port", str(CUCKOO_GUEST_PORT),
")",
])
# Do not capture ResultServer traffic.
pargs.extend([
"and", "not", "(",
"dst", "host", self.machine.resultserver_ip, "and",
"dst", "port", self.machine.resultserver_port,
")", "and", "not", "(",
"src", "host", self.machine.resultserver_ip, "and",
"src", "port", self.machine.resultserver_port,
")",
])
if bpf:
pargs.extend(["and", "(", bpf, ")"])
try:
self.proc = subprocess.Popen(
pargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True
)
except (OSError, ValueError):
log.exception(
"Failed to start sniffer (interface=%s, host=%s, pcap=%s)",
self.machine.interface, self.machine.ip, file_path,
)
return
log.info(
"Started sniffer with PID %d (interface=%s, host=%s, pcap=%s)",
self.proc.pid, self.machine.interface, self.machine.ip, file_path,
)
def _check_output(self, out, err):
if out:
raise CuckooOperationalError(
"Potential error while running tcpdump, did not expect "
"standard output, got: %r." % out
)
err_whitelist = (
"packets captured",
"packets received by filter",
"packets dropped by kernel",
)
for line in err.split("\n"):
if not line or line.startswith("tcpdump: listening on "):
continue
if line.endswith(err_whitelist):
continue
raise CuckooOperationalError(
"Potential error while running tcpdump, did not expect "
"the following standard error output: %r." % line
)
def stop(self):
"""Stop sniffing.
@return: operation status.
"""
# The tcpdump process was never started in the first place.
if not self.proc:
return
# The tcpdump process has already quit, generally speaking this
# indicates an error such as "permission denied".
if self.proc.poll():
out, err = self.proc.communicate()
raise CuckooOperationalError(
"Error running tcpdump to sniff the network traffic during "
"the analysis; stdout = %r and stderr = %r. Did you enable "
"the extra capabilities to allow running tcpdump as non-root "
"user and disable AppArmor properly (only applies to Ubuntu)?"
% (out, err)
)
try:
self.proc.terminate()
except:
try:
if not self.proc.poll():
log.debug("Killing sniffer")
self.proc.kill()
except OSError as e:
log.debug("Error killing sniffer: %s. Continue", e)
except Exception as e:
log.exception("Unable to stop the sniffer with pid %d: %s",
self.proc.pid, e)
# Ensure expected output was received from tcpdump.
out, err = self.proc.communicate()
self._check_output(out, err)
| mit | 5,280,735,725,692,390,000 | 33.933333 | 85 | 0.523074 | false |
sumedh123/debatify | venv/lib/python2.7/site-packages/socketio/namespace.py | 1 | 4422 | class Namespace(object):
"""Base class for class-based namespaces.
A class-based namespace is a class that contains all the event handlers
for a Socket.IO namespace. The event handlers are methods of the class
with the prefix ``on_``, such as ``on_connect``, ``on_disconnect``,
``on_message``, ``on_json``, and so on.
:param namespace: The Socket.IO namespace to be used with all the event
handlers defined in this class. If this argument is
omitted, the default namespace is used.
"""
def __init__(self, namespace=None):
self.namespace = namespace or '/'
self.server = None
def _set_server(self, server):
self.server = server
def trigger_event(self, event, *args):
"""Dispatch an event to the proper handler method.
In the most common usage, this method is not overloaded by subclasses,
as it performs the routing of events to methods. However, this
method can be overriden if special dispatching rules are needed, or if
having a single method that catches all events is desired.
"""
handler_name = 'on_' + event
if hasattr(self, handler_name):
return getattr(self, handler_name)(*args)
def emit(self, event, data=None, room=None, skip_sid=None, namespace=None,
callback=None):
"""Emit a custom event to one or more connected clients.
The only difference with the :func:`socketio.Server.emit` method is
that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.server.emit(event, data=data, room=room, skip_sid=skip_sid,
namespace=namespace or self.namespace,
callback=callback)
def send(self, data, room=None, skip_sid=None, namespace=None,
callback=None):
"""Send a message to one or more connected clients.
The only difference with the :func:`socketio.Server.send` method is
that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.server.send(data, room=room, skip_sid=skip_sid,
namespace=namespace or self.namespace,
callback=callback)
def enter_room(self, sid, room, namespace=None):
"""Enter a room.
The only difference with the :func:`socketio.Server.enter_room` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.server.enter_room(sid, room,
namespace=namespace or self.namespace)
def leave_room(self, sid, room, namespace=None):
"""Leave a room.
The only difference with the :func:`socketio.Server.leave_room` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.server.leave_room(sid, room,
namespace=namespace or self.namespace)
def close_room(self, room, namespace=None):
"""Close a room.
The only difference with the :func:`socketio.Server.close_room` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.server.close_room(room,
namespace=namespace or self.namespace)
def rooms(self, sid, namespace=None):
"""Return the rooms a client is in.
The only difference with the :func:`socketio.Server.rooms` method is
that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.server.rooms(sid, namespace=namespace or self.namespace)
def disconnect(self, sid, namespace=None):
"""Disconnect a client.
The only difference with the :func:`socketio.Server.disconnect` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.server.disconnect(sid,
namespace=namespace or self.namespace)
| mit | -3,340,919,251,761,928,700 | 41.932039 | 79 | 0.615106 | false |
googleapis/python-aiplatform | google/cloud/aiplatform_v1beta1/types/artifact.py | 1 | 4560 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1", manifest={"Artifact",},
)
class Artifact(proto.Message):
r"""Instance of a general artifact.
Attributes:
name (str):
Output only. The resource name of the
Artifact.
display_name (str):
User provided display name of the Artifact.
May be up to 128 Unicode characters.
uri (str):
The uniform resource identifier of the
artifact file. May be empty if there is no
actual artifact file.
etag (str):
An eTag used to perform consistent read-
odify-write updates. If not set, a blind
"overwrite" update happens.
labels (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact.LabelsEntry]):
The labels with user-defined metadata to
organize your Artifacts.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed. No more than 64 user labels can be
associated with one Artifact (System labels are
excluded).
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Artifact was
created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Artifact was
last updated.
state (google.cloud.aiplatform_v1beta1.types.Artifact.State):
The state of this Artifact. This is a
property of the Artifact, and does not imply or
capture any ongoing process. This property is
managed by clients (such as Vertex Pipelines),
and the system does not prescribe or check the
validity of state transitions.
schema_title (str):
The title of the schema describing the
metadata.
Schema title and version is expected to be
registered in earlier Create Schema calls. And
both are used together as unique identifiers to
identify schemas within the local metadata
store.
schema_version (str):
The version of the schema in schema_name to use.
Schema title and version is expected to be registered in
earlier Create Schema calls. And both are used together as
unique identifiers to identify schemas within the local
metadata store.
metadata (google.protobuf.struct_pb2.Struct):
Properties of the Artifact.
description (str):
Description of the Artifact
"""
class State(proto.Enum):
r"""Describes the state of the Artifact."""
STATE_UNSPECIFIED = 0
PENDING = 1
LIVE = 2
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
uri = proto.Field(proto.STRING, number=6,)
etag = proto.Field(proto.STRING, number=9,)
labels = proto.MapField(proto.STRING, proto.STRING, number=10,)
create_time = proto.Field(
proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp,
)
update_time = proto.Field(
proto.MESSAGE, number=12, message=timestamp_pb2.Timestamp,
)
state = proto.Field(proto.ENUM, number=13, enum=State,)
schema_title = proto.Field(proto.STRING, number=14,)
schema_version = proto.Field(proto.STRING, number=15,)
metadata = proto.Field(proto.MESSAGE, number=16, message=struct_pb2.Struct,)
description = proto.Field(proto.STRING, number=17,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -2,119,502,530,256,270,300 | 39.714286 | 86 | 0.650877 | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/test/test_dictviews.py | 1 | 10376 | import copy
import pickle
import unittest
class DictSetTest(unittest.TestCase):
def test_constructors_not_callable(self):
kt = type({}.keys())
self.assertRaises(TypeError, kt, {})
self.assertRaises(TypeError, kt)
it = type({}.items())
self.assertRaises(TypeError, it, {})
self.assertRaises(TypeError, it)
vt = type({}.values())
self.assertRaises(TypeError, vt, {})
self.assertRaises(TypeError, vt)
def test_dict_keys(self):
d = {1: 10, "a": "ABC"}
keys = d.keys()
self.assertEqual(len(keys), 2)
self.assertEqual(set(keys), {1, "a"})
self.assertEqual(keys, {1, "a"})
self.assertNotEqual(keys, {1, "a", "b"})
self.assertNotEqual(keys, {1, "b"})
self.assertNotEqual(keys, {1})
self.assertNotEqual(keys, 42)
self.assertIn(1, keys)
self.assertIn("a", keys)
self.assertNotIn(10, keys)
self.assertNotIn("Z", keys)
self.assertEqual(d.keys(), d.keys())
e = {1: 11, "a": "def"}
self.assertEqual(d.keys(), e.keys())
del e["a"]
self.assertNotEqual(d.keys(), e.keys())
def test_dict_items(self):
d = {1: 10, "a": "ABC"}
items = d.items()
self.assertEqual(len(items), 2)
self.assertEqual(set(items), {(1, 10), ("a", "ABC")})
self.assertEqual(items, {(1, 10), ("a", "ABC")})
self.assertNotEqual(items, {(1, 10), ("a", "ABC"), "junk"})
self.assertNotEqual(items, {(1, 10), ("a", "def")})
self.assertNotEqual(items, {(1, 10)})
self.assertNotEqual(items, 42)
self.assertIn((1, 10), items)
self.assertIn(("a", "ABC"), items)
self.assertNotIn((1, 11), items)
self.assertNotIn(1, items)
self.assertNotIn((), items)
self.assertNotIn((1,), items)
self.assertNotIn((1, 2, 3), items)
self.assertEqual(d.items(), d.items())
e = d.copy()
self.assertEqual(d.items(), e.items())
e["a"] = "def"
self.assertNotEqual(d.items(), e.items())
def test_dict_mixed_keys_items(self):
d = {(1, 1): 11, (2, 2): 22}
e = {1: 1, 2: 2}
self.assertEqual(d.keys(), e.items())
self.assertNotEqual(d.items(), e.keys())
def test_dict_values(self):
d = {1: 10, "a": "ABC"}
values = d.values()
self.assertEqual(set(values), {10, "ABC"})
self.assertEqual(len(values), 2)
def test_dict_repr(self):
d = {1: 10, "a": "ABC"}
self.assertIsInstance(repr(d), str)
r = repr(d.items())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_items([('a', 'ABC'), (1, 10)])" or
r == "dict_items([(1, 10), ('a', 'ABC')])")
r = repr(d.keys())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_keys(['a', 1])" or
r == "dict_keys([1, 'a'])")
r = repr(d.values())
self.assertIsInstance(r, str)
self.assertTrue(r == "dict_values(['ABC', 10])" or
r == "dict_values([10, 'ABC'])")
def test_keys_set_operations(self):
d1 = {'a': 1, 'b': 2}
d2 = {'b': 3, 'c': 2}
d3 = {'d': 4, 'e': 5}
self.assertEqual(d1.keys() & d1.keys(), {'a', 'b'})
self.assertEqual(d1.keys() & d2.keys(), {'b'})
self.assertEqual(d1.keys() & d3.keys(), set())
self.assertEqual(d1.keys() & set(d1.keys()), {'a', 'b'})
self.assertEqual(d1.keys() & set(d2.keys()), {'b'})
self.assertEqual(d1.keys() & set(d3.keys()), set())
self.assertEqual(d1.keys() & tuple(d1.keys()), {'a', 'b'})
self.assertEqual(d1.keys() | d1.keys(), {'a', 'b'})
self.assertEqual(d1.keys() | d2.keys(), {'a', 'b', 'c'})
self.assertEqual(d1.keys() | d3.keys(), {'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() | set(d1.keys()), {'a', 'b'})
self.assertEqual(d1.keys() | set(d2.keys()), {'a', 'b', 'c'})
self.assertEqual(d1.keys() | set(d3.keys()),
{'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() | (1, 2), {'a', 'b', 1, 2})
self.assertEqual(d1.keys() ^ d1.keys(), set())
self.assertEqual(d1.keys() ^ d2.keys(), {'a', 'c'})
self.assertEqual(d1.keys() ^ d3.keys(), {'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() ^ set(d1.keys()), set())
self.assertEqual(d1.keys() ^ set(d2.keys()), {'a', 'c'})
self.assertEqual(d1.keys() ^ set(d3.keys()),
{'a', 'b', 'd', 'e'})
self.assertEqual(d1.keys() ^ tuple(d2.keys()), {'a', 'c'})
self.assertEqual(d1.keys() - d1.keys(), set())
self.assertEqual(d1.keys() - d2.keys(), {'a'})
self.assertEqual(d1.keys() - d3.keys(), {'a', 'b'})
self.assertEqual(d1.keys() - set(d1.keys()), set())
self.assertEqual(d1.keys() - set(d2.keys()), {'a'})
self.assertEqual(d1.keys() - set(d3.keys()), {'a', 'b'})
self.assertEqual(d1.keys() - (0, 1), {'a', 'b'})
self.assertFalse(d1.keys().isdisjoint(d1.keys()))
self.assertFalse(d1.keys().isdisjoint(d2.keys()))
self.assertFalse(d1.keys().isdisjoint(list(d2.keys())))
self.assertFalse(d1.keys().isdisjoint(set(d2.keys())))
self.assertTrue(d1.keys().isdisjoint({'x', 'y', 'z'}))
self.assertTrue(d1.keys().isdisjoint(['x', 'y', 'z']))
self.assertTrue(d1.keys().isdisjoint(set(['x', 'y', 'z'])))
self.assertTrue(d1.keys().isdisjoint(set(['x', 'y'])))
self.assertTrue(d1.keys().isdisjoint(['x', 'y']))
self.assertTrue(d1.keys().isdisjoint({}))
self.assertTrue(d1.keys().isdisjoint(d3.keys()))
de = {}
self.assertTrue(de.keys().isdisjoint(set()))
self.assertTrue(de.keys().isdisjoint([]))
self.assertTrue(de.keys().isdisjoint(de.keys()))
self.assertTrue(de.keys().isdisjoint([1]))
def test_items_set_operations(self):
d1 = {'a': 1, 'b': 2}
d2 = {'a': 2, 'b': 2}
d3 = {'d': 4, 'e': 5}
self.assertEqual(
d1.items() & d1.items(), {('a', 1), ('b', 2)})
self.assertEqual(d1.items() & d2.items(), {('b', 2)})
self.assertEqual(d1.items() & d3.items(), set())
self.assertEqual(d1.items() & set(d1.items()),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() & set(d2.items()), {('b', 2)})
self.assertEqual(d1.items() & set(d3.items()), set())
self.assertEqual(d1.items() | d1.items(),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() | d2.items(),
{('a', 1), ('a', 2), ('b', 2)})
self.assertEqual(d1.items() | d3.items(),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() | set(d1.items()),
{('a', 1), ('b', 2)})
self.assertEqual(d1.items() | set(d2.items()),
{('a', 1), ('a', 2), ('b', 2)})
self.assertEqual(d1.items() | set(d3.items()),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() ^ d1.items(), set())
self.assertEqual(d1.items() ^ d2.items(),
{('a', 1), ('a', 2)})
self.assertEqual(d1.items() ^ d3.items(),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
self.assertEqual(d1.items() - d1.items(), set())
self.assertEqual(d1.items() - d2.items(), {('a', 1)})
self.assertEqual(d1.items() - d3.items(), {('a', 1), ('b', 2)})
self.assertEqual(d1.items() - set(d1.items()), set())
self.assertEqual(d1.items() - set(d2.items()), {('a', 1)})
self.assertEqual(d1.items() - set(d3.items()), {('a', 1), ('b', 2)})
self.assertFalse(d1.items().isdisjoint(d1.items()))
self.assertFalse(d1.items().isdisjoint(d2.items()))
self.assertFalse(d1.items().isdisjoint(list(d2.items())))
self.assertFalse(d1.items().isdisjoint(set(d2.items())))
self.assertTrue(d1.items().isdisjoint({'x', 'y', 'z'}))
self.assertTrue(d1.items().isdisjoint(['x', 'y', 'z']))
self.assertTrue(d1.items().isdisjoint(set(['x', 'y', 'z'])))
self.assertTrue(d1.items().isdisjoint(set(['x', 'y'])))
self.assertTrue(d1.items().isdisjoint({}))
self.assertTrue(d1.items().isdisjoint(d3.items()))
de = {}
self.assertTrue(de.items().isdisjoint(set()))
self.assertTrue(de.items().isdisjoint([]))
self.assertTrue(de.items().isdisjoint(de.items()))
self.assertTrue(de.items().isdisjoint([1]))
def test_recursive_repr(self):
d = {}
d[42] = d.values()
self.assertRaises(RecursionError, repr, d)
def test_copy(self):
d = {1: 10, "a": "ABC"}
self.assertRaises(TypeError, copy.copy, d.keys())
self.assertRaises(TypeError, copy.copy, d.values())
self.assertRaises(TypeError, copy.copy, d.items())
def test_compare_error(self):
class Exc(Exception):
pass
class BadEq:
def __hash__(self):
return 7
def __eq__(self, other):
raise Exc
k1, k2 = BadEq(), BadEq()
v1, v2 = BadEq(), BadEq()
d = {k1: v1}
self.assertIn(k1, d)
self.assertIn(k1, d.keys())
self.assertIn(v1, d.values())
self.assertIn((k1, v1), d.items())
self.assertRaises(Exc, d.__contains__, k2)
self.assertRaises(Exc, d.keys().__contains__, k2)
self.assertRaises(Exc, d.items().__contains__, (k2, v1))
self.assertRaises(Exc, d.items().__contains__, (k1, v2))
with self.assertRaises(Exc):
v2 in d.values()
def test_pickle(self):
d = {1: 10, "a": "ABC"}
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises((TypeError, pickle.PicklingError),
pickle.dumps, d.keys(), proto)
self.assertRaises((TypeError, pickle.PicklingError),
pickle.dumps, d.values(), proto)
self.assertRaises((TypeError, pickle.PicklingError),
pickle.dumps, d.items(), proto)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -5,265,286,852,581,064,000 | 40.504 | 76 | 0.502313 | false |
jtimon/bitcoin | test/functional/feature_abortnode.py | 1 | 1700 | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind aborts if can't disconnect a block.
- Start a single node and generate 3 blocks.
- Delete the undo data.
- Mine a fork that requires disconnecting the tip.
- Verify that bitcoind AbortNode's.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until, get_datadir_path, connect_nodes
import os
class AbortNodeTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.setup_nodes()
# We'll connect the nodes later
def run_test(self):
self.nodes[0].generate(3)
datadir = get_datadir_path(self.options.tmpdir, 0)
# Deleting the undo file will result in reorg failure
os.unlink(os.path.join(datadir, self.chain, 'blocks', 'rev00000.dat'))
# Connecting to a node with a more work chain will trigger a reorg
# attempt.
self.nodes[1].generate(3)
with self.nodes[0].assert_debug_log(["Failed to disconnect block"]):
connect_nodes(self.nodes[0], 1)
self.nodes[1].generate(1)
# Check that node0 aborted
self.log.info("Waiting for crash")
wait_until(lambda: self.nodes[0].is_node_stopped(), timeout=60)
self.log.info("Node crashed - now verifying restart fails")
self.nodes[0].assert_start_raises_init_error()
if __name__ == '__main__':
AbortNodeTest().main()
| mit | 5,538,898,424,799,185,000 | 34.416667 | 78 | 0.67 | false |
jtoppins/beaker | IntegrationTests/src/bkr/inttest/server/tools/__init__.py | 1 | 1364 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import sys
import pkg_resources
import subprocess
class CommandError(Exception):
def __init__(self, command, status, stderr_output):
Exception.__init__(self, 'Command %r failed '
'with exit status %s:\n%s' % (command, status, stderr_output))
self.status = status
self.stderr_output = stderr_output
def run_command(script_filename, executable_filename, args=None, ignore_stderr=False):
# XXX maybe find a better condition than this?
if os.environ.get('BEAKER_CLIENT_COMMAND') == 'bkr':
# Running in dogfood, invoke the real executable
cmdline = [executable_filename] + (args or [])
else:
# Running from the source tree
script = pkg_resources.resource_filename('bkr.server.tools', script_filename)
cmdline = [sys.executable, script] + (args or [])
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode:
raise CommandError(cmdline, p.returncode, err)
if not ignore_stderr:
assert err == '', err
return out
| gpl-2.0 | 6,330,281,277,080,774,000 | 39.117647 | 86 | 0.678886 | false |
peterkmurphy/glyphviewer | glyphviewer/blocks.py | 1 | 13058 | #!/usr/bin/python
#-*- coding: UTF-8 -*-
# File: blocks.py
# Solely for identifying Unicode blocks for unicode characters.
# Based on code from:
# http://stackoverflow.com/questions/243831/unicode-block-of-a-character-in-python
# But updated for 2013.
# Copyright (C) 2013-2020 Peter Murphy <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re;
PRIV_USE_BLOCK = 151;
# If fonts characters are not in an assigned block, then they are assigned to the
# Private Use Area by default.
def block(ch):
'''
Return the Unicode block name for ch, or None if ch has no block.
>>> block(u'a')
'Basic Latin'
>>> block(unichr(0x0b80))
'Tamil'
>>> block(unichr(0xe0080))
'''
assert isinstance(ch, str) and len(ch) == 1, repr(ch);
cp = ord(ch);
for start, end, name in _blocks:
if start <= cp <= end:
return name;
def blockbyint(intval):
for start, end, name in _blocks:
if start <= intval <= end:
return name;
def namefromindex(ith):
''' Returns the name of the ith block. '''
return _blocks[ith][2];
def indexfromname(name):
''' Returns the index of a block name. '''
if name:
return _blockmap[name];
else:
return PRIV_USE_BLOCK;
def numblocks():
''' Gets the number of blocks. '''
return _blocksize;
def _initBlocks(text):
global _blocks, _blockmap, _blocksize;
_blocks = [];
_blockmap = {};
iter = 0;
pattern = re.compile(r'([0-9A-F]+)\.\.([0-9A-F]+);\ (\S.*\S)')
for line in text.splitlines():
m = pattern.match(line)
if m:
start, end, name = m.groups()
_blocks.append((int(start, 16), int(end, 16), name))
_blockmap[name] = iter;
iter += 1;
_blocksize = len(_blocks);
# retrieved from http://unicode.org/Public/UNIDATA/Blocks.txt
_initBlocks('''
# Blocks-13.0.0.txt
# Date: 2019-07-10, 19:06:00 GMT [KW]
# © 2019 Unicode®, Inc.
# For terms of use, see http://www.unicode.org/terms_of_use.html
#
# Unicode Character Database
# For documentation, see http://www.unicode.org/reports/tr44/
#
# Format:
# Start Code..End Code; Block Name
# ================================================
# Note: When comparing block names, casing, whitespace, hyphens,
# and underbars are ignored.
# For example, "Latin Extended-A" and "latin extended a" are equivalent.
# For more information on the comparison of property values,
# see UAX #44: http://www.unicode.org/reports/tr44/
#
# All block ranges start with a value where (cp MOD 16) = 0,
# and end with a value where (cp MOD 16) = 15. In other words,
# the last hexadecimal digit of the start of range is ...0
# and the last hexadecimal digit of the end of range is ...F.
# This constraint on block ranges guarantees that allocations
# are done in terms of whole columns, and that code chart display
# never involves splitting columns in the charts.
#
# All code points not explicitly listed for Block
# have the value No_Block.
# Property: Block
#
# @missing: 0000..10FFFF; No_Block
0000..007F; Basic Latin
0080..00FF; Latin-1 Supplement
0100..017F; Latin Extended-A
0180..024F; Latin Extended-B
0250..02AF; IPA Extensions
02B0..02FF; Spacing Modifier Letters
0300..036F; Combining Diacritical Marks
0370..03FF; Greek and Coptic
0400..04FF; Cyrillic
0500..052F; Cyrillic Supplement
0530..058F; Armenian
0590..05FF; Hebrew
0600..06FF; Arabic
0700..074F; Syriac
0750..077F; Arabic Supplement
0780..07BF; Thaana
07C0..07FF; NKo
0800..083F; Samaritan
0840..085F; Mandaic
0860..086F; Syriac Supplement
08A0..08FF; Arabic Extended-A
0900..097F; Devanagari
0980..09FF; Bengali
0A00..0A7F; Gurmukhi
0A80..0AFF; Gujarati
0B00..0B7F; Oriya
0B80..0BFF; Tamil
0C00..0C7F; Telugu
0C80..0CFF; Kannada
0D00..0D7F; Malayalam
0D80..0DFF; Sinhala
0E00..0E7F; Thai
0E80..0EFF; Lao
0F00..0FFF; Tibetan
1000..109F; Myanmar
10A0..10FF; Georgian
1100..11FF; Hangul Jamo
1200..137F; Ethiopic
1380..139F; Ethiopic Supplement
13A0..13FF; Cherokee
1400..167F; Unified Canadian Aboriginal Syllabics
1680..169F; Ogham
16A0..16FF; Runic
1700..171F; Tagalog
1720..173F; Hanunoo
1740..175F; Buhid
1760..177F; Tagbanwa
1780..17FF; Khmer
1800..18AF; Mongolian
18B0..18FF; Unified Canadian Aboriginal Syllabics Extended
1900..194F; Limbu
1950..197F; Tai Le
1980..19DF; New Tai Lue
19E0..19FF; Khmer Symbols
1A00..1A1F; Buginese
1A20..1AAF; Tai Tham
1AB0..1AFF; Combining Diacritical Marks Extended
1B00..1B7F; Balinese
1B80..1BBF; Sundanese
1BC0..1BFF; Batak
1C00..1C4F; Lepcha
1C50..1C7F; Ol Chiki
1C80..1C8F; Cyrillic Extended-C
1C90..1CBF; Georgian Extended
1CC0..1CCF; Sundanese Supplement
1CD0..1CFF; Vedic Extensions
1D00..1D7F; Phonetic Extensions
1D80..1DBF; Phonetic Extensions Supplement
1DC0..1DFF; Combining Diacritical Marks Supplement
1E00..1EFF; Latin Extended Additional
1F00..1FFF; Greek Extended
2000..206F; General Punctuation
2070..209F; Superscripts and Subscripts
20A0..20CF; Currency Symbols
20D0..20FF; Combining Diacritical Marks for Symbols
2100..214F; Letterlike Symbols
2150..218F; Number Forms
2190..21FF; Arrows
2200..22FF; Mathematical Operators
2300..23FF; Miscellaneous Technical
2400..243F; Control Pictures
2440..245F; Optical Character Recognition
2460..24FF; Enclosed Alphanumerics
2500..257F; Box Drawing
2580..259F; Block Elements
25A0..25FF; Geometric Shapes
2600..26FF; Miscellaneous Symbols
2700..27BF; Dingbats
27C0..27EF; Miscellaneous Mathematical Symbols-A
27F0..27FF; Supplemental Arrows-A
2800..28FF; Braille Patterns
2900..297F; Supplemental Arrows-B
2980..29FF; Miscellaneous Mathematical Symbols-B
2A00..2AFF; Supplemental Mathematical Operators
2B00..2BFF; Miscellaneous Symbols and Arrows
2C00..2C5F; Glagolitic
2C60..2C7F; Latin Extended-C
2C80..2CFF; Coptic
2D00..2D2F; Georgian Supplement
2D30..2D7F; Tifinagh
2D80..2DDF; Ethiopic Extended
2DE0..2DFF; Cyrillic Extended-A
2E00..2E7F; Supplemental Punctuation
2E80..2EFF; CJK Radicals Supplement
2F00..2FDF; Kangxi Radicals
2FF0..2FFF; Ideographic Description Characters
3000..303F; CJK Symbols and Punctuation
3040..309F; Hiragana
30A0..30FF; Katakana
3100..312F; Bopomofo
3130..318F; Hangul Compatibility Jamo
3190..319F; Kanbun
31A0..31BF; Bopomofo Extended
31C0..31EF; CJK Strokes
31F0..31FF; Katakana Phonetic Extensions
3200..32FF; Enclosed CJK Letters and Months
3300..33FF; CJK Compatibility
3400..4DBF; CJK Unified Ideographs Extension A
4DC0..4DFF; Yijing Hexagram Symbols
4E00..9FFF; CJK Unified Ideographs
A000..A48F; Yi Syllables
A490..A4CF; Yi Radicals
A4D0..A4FF; Lisu
A500..A63F; Vai
A640..A69F; Cyrillic Extended-B
A6A0..A6FF; Bamum
A700..A71F; Modifier Tone Letters
A720..A7FF; Latin Extended-D
A800..A82F; Syloti Nagri
A830..A83F; Common Indic Number Forms
A840..A87F; Phags-pa
A880..A8DF; Saurashtra
A8E0..A8FF; Devanagari Extended
A900..A92F; Kayah Li
A930..A95F; Rejang
A960..A97F; Hangul Jamo Extended-A
A980..A9DF; Javanese
A9E0..A9FF; Myanmar Extended-B
AA00..AA5F; Cham
AA60..AA7F; Myanmar Extended-A
AA80..AADF; Tai Viet
AAE0..AAFF; Meetei Mayek Extensions
AB00..AB2F; Ethiopic Extended-A
AB30..AB6F; Latin Extended-E
AB70..ABBF; Cherokee Supplement
ABC0..ABFF; Meetei Mayek
AC00..D7AF; Hangul Syllables
D7B0..D7FF; Hangul Jamo Extended-B
D800..DB7F; High Surrogates
DB80..DBFF; High Private Use Surrogates
DC00..DFFF; Low Surrogates
E000..F8FF; Private Use Area
F900..FAFF; CJK Compatibility Ideographs
FB00..FB4F; Alphabetic Presentation Forms
FB50..FDFF; Arabic Presentation Forms-A
FE00..FE0F; Variation Selectors
FE10..FE1F; Vertical Forms
FE20..FE2F; Combining Half Marks
FE30..FE4F; CJK Compatibility Forms
FE50..FE6F; Small Form Variants
FE70..FEFF; Arabic Presentation Forms-B
FF00..FFEF; Halfwidth and Fullwidth Forms
FFF0..FFFF; Specials
10000..1007F; Linear B Syllabary
10080..100FF; Linear B Ideograms
10100..1013F; Aegean Numbers
10140..1018F; Ancient Greek Numbers
10190..101CF; Ancient Symbols
101D0..101FF; Phaistos Disc
10280..1029F; Lycian
102A0..102DF; Carian
102E0..102FF; Coptic Epact Numbers
10300..1032F; Old Italic
10330..1034F; Gothic
10350..1037F; Old Permic
10380..1039F; Ugaritic
103A0..103DF; Old Persian
10400..1044F; Deseret
10450..1047F; Shavian
10480..104AF; Osmanya
104B0..104FF; Osage
10500..1052F; Elbasan
10530..1056F; Caucasian Albanian
10600..1077F; Linear A
10800..1083F; Cypriot Syllabary
10840..1085F; Imperial Aramaic
10860..1087F; Palmyrene
10880..108AF; Nabataean
108E0..108FF; Hatran
10900..1091F; Phoenician
10920..1093F; Lydian
10980..1099F; Meroitic Hieroglyphs
109A0..109FF; Meroitic Cursive
10A00..10A5F; Kharoshthi
10A60..10A7F; Old South Arabian
10A80..10A9F; Old North Arabian
10AC0..10AFF; Manichaean
10B00..10B3F; Avestan
10B40..10B5F; Inscriptional Parthian
10B60..10B7F; Inscriptional Pahlavi
10B80..10BAF; Psalter Pahlavi
10C00..10C4F; Old Turkic
10C80..10CFF; Old Hungarian
10D00..10D3F; Hanifi Rohingya
10E60..10E7F; Rumi Numeral Symbols
10E80..10EBF; Yezidi
10F00..10F2F; Old Sogdian
10F30..10F6F; Sogdian
10FB0..10FDF; Chorasmian
10FE0..10FFF; Elymaic
11000..1107F; Brahmi
11080..110CF; Kaithi
110D0..110FF; Sora Sompeng
11100..1114F; Chakma
11150..1117F; Mahajani
11180..111DF; Sharada
111E0..111FF; Sinhala Archaic Numbers
11200..1124F; Khojki
11280..112AF; Multani
112B0..112FF; Khudawadi
11300..1137F; Grantha
11400..1147F; Newa
11480..114DF; Tirhuta
11580..115FF; Siddham
11600..1165F; Modi
11660..1167F; Mongolian Supplement
11680..116CF; Takri
11700..1173F; Ahom
11800..1184F; Dogra
118A0..118FF; Warang Citi
11900..1195F; Dives Akuru
119A0..119FF; Nandinagari
11A00..11A4F; Zanabazar Square
11A50..11AAF; Soyombo
11AC0..11AFF; Pau Cin Hau
11C00..11C6F; Bhaiksuki
11C70..11CBF; Marchen
11D00..11D5F; Masaram Gondi
11D60..11DAF; Gunjala Gondi
11EE0..11EFF; Makasar
11FB0..11FBF; Lisu Supplement
11FC0..11FFF; Tamil Supplement
12000..123FF; Cuneiform
12400..1247F; Cuneiform Numbers and Punctuation
12480..1254F; Early Dynastic Cuneiform
13000..1342F; Egyptian Hieroglyphs
13430..1343F; Egyptian Hieroglyph Format Controls
14400..1467F; Anatolian Hieroglyphs
16800..16A3F; Bamum Supplement
16A40..16A6F; Mro
16AD0..16AFF; Bassa Vah
16B00..16B8F; Pahawh Hmong
16E40..16E9F; Medefaidrin
16F00..16F9F; Miao
16FE0..16FFF; Ideographic Symbols and Punctuation
17000..187FF; Tangut
18800..18AFF; Tangut Components
18B00..18CFF; Khitan Small Script
18D00..18D8F; Tangut Supplement
1B000..1B0FF; Kana Supplement
1B100..1B12F; Kana Extended-A
1B130..1B16F; Small Kana Extension
1B170..1B2FF; Nushu
1BC00..1BC9F; Duployan
1BCA0..1BCAF; Shorthand Format Controls
1D000..1D0FF; Byzantine Musical Symbols
1D100..1D1FF; Musical Symbols
1D200..1D24F; Ancient Greek Musical Notation
1D2E0..1D2FF; Mayan Numerals
1D300..1D35F; Tai Xuan Jing Symbols
1D360..1D37F; Counting Rod Numerals
1D400..1D7FF; Mathematical Alphanumeric Symbols
1D800..1DAAF; Sutton SignWriting
1E000..1E02F; Glagolitic Supplement
1E100..1E14F; Nyiakeng Puachue Hmong
1E2C0..1E2FF; Wancho
1E800..1E8DF; Mende Kikakui
1E900..1E95F; Adlam
1EC70..1ECBF; Indic Siyaq Numbers
1ED00..1ED4F; Ottoman Siyaq Numbers
1EE00..1EEFF; Arabic Mathematical Alphabetic Symbols
1F000..1F02F; Mahjong Tiles
1F030..1F09F; Domino Tiles
1F0A0..1F0FF; Playing Cards
1F100..1F1FF; Enclosed Alphanumeric Supplement
1F200..1F2FF; Enclosed Ideographic Supplement
1F300..1F5FF; Miscellaneous Symbols and Pictographs
1F600..1F64F; Emoticons
1F650..1F67F; Ornamental Dingbats
1F680..1F6FF; Transport and Map Symbols
1F700..1F77F; Alchemical Symbols
1F780..1F7FF; Geometric Shapes Extended
1F800..1F8FF; Supplemental Arrows-C
1F900..1F9FF; Supplemental Symbols and Pictographs
1FA00..1FA6F; Chess Symbols
1FA70..1FAFF; Symbols and Pictographs Extended-A
1FB00..1FBFF; Symbols for Legacy Computing
20000..2A6DF; CJK Unified Ideographs Extension B
2A700..2B73F; CJK Unified Ideographs Extension C
2B740..2B81F; CJK Unified Ideographs Extension D
2B820..2CEAF; CJK Unified Ideographs Extension E
2CEB0..2EBEF; CJK Unified Ideographs Extension F
2F800..2FA1F; CJK Compatibility Ideographs Supplement
30000..3134F; CJK Unified Ideographs Extension G
E0000..E007F; Tags
E0100..E01EF; Variation Selectors Supplement
F0000..FFFFF; Supplementary Private Use Area-A
100000..10FFFF; Supplementary Private Use Area-B
# EOF''')
if __name__ == '__main__':
print(indexfromname("Private Use Area"));
print(block('a'))
print(block(chr(0xE000)))
print(block(chr(0xF8FF)))
print(block(chr(0x10000)))
print(block(chr(0x10ffff)))
| gpl-3.0 | 5,632,453,972,763,632,000 | 29.082949 | 82 | 0.748162 | false |
rombie/contrail-controller | src/container/kube-manager/kube_manager/kube/kube_monitor.py | 1 | 12402 | #
# Copyright (c) 2017 Juniper Networks, Inc. All rights reserved.
#
from cStringIO import StringIO
import json
import socket
import time
import requests
from cfgm_common.utils import cgitb_hook
class KubeMonitor(object):
def __init__(self, args=None, logger=None, q=None, db=None,
resource_name='KubeMonitor', beta=False, api_group=None,
api_version=None):
self.name = type(self).__name__
self.args = args
self.logger = logger
self.q = q
self.cloud_orchestrator = self.args.orchestrator
self.token = self.args.token # valid only for OpenShift
self.headers = {'Connection': 'Keep-Alive'}
self.verify = False
self.timeout = 60
# Per-monitor stream handle to api server.
self.kube_api_resp = None
self.kube_api_stream_handle = None
# Resource name corresponding to this monitor.
self.resource_name = resource_name
self.resource_beta = beta
# Use Kube DB if kube object caching is enabled in config.
if args.kube_object_cache == 'True':
self.db = db
else:
self.db = None
self.kubernetes_api_server = self.args.kubernetes_api_server
if self.token:
protocol = "https"
header = {'Authorization': "Bearer " + self.token}
self.headers.update(header)
self.verify = False
self.kubernetes_api_server_port = \
self.args.kubernetes_api_secure_port
else: # kubernetes
protocol = "http"
self.kubernetes_api_server_port = self.args.kubernetes_api_port
# URL to the api server.
self.url = "%s://%s:%s" % (protocol,
self.kubernetes_api_server,
self.kubernetes_api_server_port)
# Get the base kubernetes url to use for this resource.
# Each resouce can be independently configured to use difference
# versions or api groups. So we let the resource class specify what
# version and api group it is interested in. The base_url is constructed
# with the input from the derived class and does not change for the
# course of the process.
self.base_url = self._get_base_url(self.url, beta, api_group,
api_version)
if not self._is_kube_api_server_alive():
msg = "kube_api_service is not available"
self.logger.error("%s - %s" % (self.name, msg))
raise Exception(msg)
self.logger.info("%s - KubeMonitor init done." % self.name)
def _is_kube_api_server_alive(self, wait=False):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
result = sock.connect_ex((self.kubernetes_api_server,
self.kubernetes_api_server_port))
if wait == True and result != 0:
# Connect to Kubernetes API server was not successful.
# If requested, wait indefinitely till connection is up.
msg = "kube_api_service is not reachable. Retry in %s secs." %\
(self.timeout)
self.logger.error("%s - %s" %(self.name, msg))
time.sleep(self.timeout)
continue
# Return result of connection attempt to kubernetes api server.
return result == 0
@classmethod
def _get_base_url(cls, url, beta, api_group, api_version):
''' Construct a base url. '''
if beta:
# URL to v1-beta1 components to api server.
version = api_version if api_version else "v1beta1"
url = "/".join([url, "apis/extensions", version])
else:
""" Get the base URL for the resource. """
version = api_version if api_version else "v1"
group = api_group if api_group else "api"
# URL to the v1-components in api server.
url = "/".join([url, group, version])
return url
def get_component_url(self):
"""URL to a component.
This method return the URL for the component represented by this
monitor instance.
"""
return "%s/%s" % (self.base_url, self.resource_name)
@staticmethod
def get_entry_url(base_url, entry):
"""URL to an entry of this component.
This method returns a URL to a specific entry of this component.
"""
return base_url + entry['metadata']['selfLink']
def init_monitor(self):
"""Initialize/sync a monitor component.
This method will initialize a monitor component.
As a part of this init, this method will read existing entries in api
server and populate the local db.
"""
# Get the URL to this component.
url = self.get_component_url()
try:
resp = requests.get(url, headers=self.headers, verify=self.verify)
if resp.status_code != 200:
resp.close()
return
except requests.exceptions.RequestException as e:
self.logger.error("%s - %s" % (self.name, e))
return
initial_entries = resp.json()['items']
resp.close()
if initial_entries:
for entry in initial_entries:
entry_url = self.get_entry_url(self.url, entry)
try:
resp = requests.get(entry_url, headers=self.headers,
verify=self.verify)
if resp.status_code != 200:
resp.close()
continue
except requests.exceptions.RequestException as e:
self.logger.error("%s - %s" % (self.name, e))
continue
try:
# Construct the event and initiate processing.
event = {'object':resp.json(), 'type':'ADDED'}
self.process_event(event)
except ValueError:
self.logger.error("Invalid data read from kube api server:"
" %s" % (entry))
except Exception as e:
string_buf = StringIO()
cgitb_hook(file=string_buf, format="text")
err_msg = string_buf.getvalue()
self.logger.error("%s - %s" %(self.name, err_msg))
resp.close()
def register_monitor(self):
"""Register this component for notifications from api server.
"""
if self.kube_api_resp:
self.kube_api_resp.close()
# Check if kubernetes api service is up. If not, wait till its up.
self._is_kube_api_server_alive(wait=True)
url = self.get_component_url()
try:
resp = requests.get(url, params={'watch': 'true'},
stream=True, headers=self.headers,
verify=self.verify)
if resp.status_code != 200:
resp.close()
return
# Get handle to events for this monitor.
self.kube_api_resp = resp
self.kube_api_stream_handle = resp.iter_lines(chunk_size=256,
delimiter='\n')
self.logger.info("%s - Watches %s" %(self.name, url))
except requests.exceptions.RequestException as e:
self.logger.error("%s - %s" % (self.name, e))
def get_resource(self, resource_type, resource_name,
namespace=None, beta=False, api_group=None,
api_version=None):
json_data = {}
base_url = self._get_base_url(self.url, beta, api_group, api_version)
if resource_type in ("namespaces", "customresourcedefinitions"):
url = "%s/%s" % (base_url, resource_type)
else:
url = "%s/namespaces/%s/%s/%s" % (base_url, namespace,
resource_type, resource_name)
try:
resp = requests.get(url, stream=True,
headers=self.headers, verify=self.verify)
if resp.status_code == 200:
json_data = json.loads(resp.raw.read())
resp.close()
except requests.exceptions.RequestException as e:
self.logger.error("%s - %s" % (self.name, e))
return json_data
def patch_resource(
self, resource_type, resource_name,
merge_patch, namespace=None, beta=False, sub_resource_name=None,
api_group=None, api_version=None):
base_url = self._get_base_url(self.url, beta, api_group, api_version)
if resource_type == "namespaces":
url = "%s/%s" % (base_url, resource_type)
else:
url = "%s/namespaces/%s/%s/%s" % (base_url, namespace,
resource_type, resource_name)
if sub_resource_name:
url = "%s/%s" %(url, sub_resource_name)
headers = {'Accept': 'application/json',
'Content-Type': 'application/strategic-merge-patch+json'}
headers.update(self.headers)
try:
resp = requests.patch(url, headers=headers,
data=json.dumps(merge_patch),
verify=self.verify)
if resp.status_code != 200:
resp.close()
return
except requests.exceptions.RequestException as e:
self.logger.error("%s - %s" % (self.name, e))
return
return resp.iter_lines(chunk_size=10, delimiter='\n')
def post_resource(
self, resource_type, resource_name,
body_params, namespace=None, beta=False, sub_resource_name=None,
api_group=None, api_version=None):
base_url = self._get_base_url(self.url, beta, api_group, api_version)
if resource_type in ("namespaces", "customresourcedefinitions"):
url = "%s/%s" % (base_url, resource_type)
else:
url = "%s/namespaces/%s/%s/%s" % (base_url, namespace,
resource_type, resource_name)
if sub_resource_name:
url = "%s/%s" %(url, sub_resource_name)
headers = {'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': "Bearer " + self.token}
headers.update(self.headers)
try:
resp = requests.post(url, headers=headers,
data=json.dumps(body_params),
verify=self.verify)
if resp.status_code not in [200, 201]:
resp.close()
return
except requests.exceptions.RequestException as e:
self.logger.error("%s - %s" % (self.name, e))
return
return resp.iter_lines(chunk_size=10, delimiter='\n')
def process(self):
"""Process available events."""
if not self.kube_api_stream_handle:
self.logger.error("%s - Event handler not found. "
"Cannot process its events." % self.name)
return
resp = self.kube_api_resp
fp = resp.raw._fp.fp
if fp is None:
self.register_monitor()
return
try:
line = next(self.kube_api_stream_handle)
if not line:
return
except StopIteration:
return
except requests.exceptions.ChunkedEncodingError as e:
self.logger.error("%s - %s" % (self.name, e))
return
try:
self.process_event(json.loads(line))
except ValueError:
self.logger.error(
"Invalid JSON data from response stream:%s" % line)
except Exception as e:
string_buf = StringIO()
cgitb_hook(file=string_buf, format="text")
err_msg = string_buf.getvalue()
self.logger.error("%s - %s" % (self.name, err_msg))
def process_event(self, event):
"""Process an event."""
pass
| apache-2.0 | -8,774,820,231,122,944,000 | 37.635514 | 80 | 0.529915 | false |
googleads/google-ads-python | google/ads/googleads/v8/errors/types/recommendation_error.py | 1 | 1672 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.errors",
marshal="google.ads.googleads.v8",
manifest={"RecommendationErrorEnum",},
)
class RecommendationErrorEnum(proto.Message):
r"""Container for enum describing possible errors from applying a
recommendation.
"""
class RecommendationError(proto.Enum):
r"""Enum describing possible errors from applying a
recommendation.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BUDGET_AMOUNT_TOO_SMALL = 2
BUDGET_AMOUNT_TOO_LARGE = 3
INVALID_BUDGET_AMOUNT = 4
POLICY_ERROR = 5
INVALID_BID_AMOUNT = 6
ADGROUP_KEYWORD_LIMIT = 7
RECOMMENDATION_ALREADY_APPLIED = 8
RECOMMENDATION_INVALIDATED = 9
TOO_MANY_OPERATIONS = 10
NO_OPERATIONS = 11
DIFFERENT_TYPES_NOT_SUPPORTED = 12
DUPLICATE_RESOURCE_NAME = 13
RECOMMENDATION_ALREADY_DISMISSED = 14
INVALID_APPLY_REQUEST = 15
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 7,326,649,095,652,851,000 | 30.54717 | 74 | 0.675239 | false |
stuart-knock/tvb-framework | tvb/core/traits/core.py | 1 | 8770 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
.. moduleauthor:: Bogdan Neacsa <[email protected]>
.. moduleauthor:: Lia Domide <[email protected]>
.. moduleauthor:: marmaduke <[email protected]>
"""
import re
import sqlalchemy
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
from tvb.core.traits.sql_mapping import get_sql_mapping
from tvb.basic.traits.core import MetaType, Type, SPECIAL_KWDS, KWARS_USE_STORAGE
from tvb.basic.logger.builder import get_logger
LOG = get_logger(__name__)
SPECIAL_KWDS.remove(KWARS_USE_STORAGE)
def compute_table_name(class_name):
"""
Given a class name compute the name of the corresponding SQL table.
"""
tablename = 'MAPPED' + re.sub('((?=[A-Z][a-z])|(?<=[a-z])(?=[A-Z]))', '_', class_name).upper()
if tablename.count('MAPPED_') > 1:
tablename = tablename.replace('MAPPED_', '', 1)
return tablename
class DeclarativeMetaType(DeclarativeMeta, MetaType):
"""
The DeclarativeMetaType class helps with class creation by automating
some of the sqlalchemy code generation. We code for three possibilities:
- the sql or db keywords are False, no sqlalch used
- sql or db keywords are True or unset, default sqlalche used
- sql or db keywords are set with sqlalchemy.Column instances, and
that is used
If it is desired that no sql/db is used, import traits.core and set
TVBSettings.TRAITS_CONFIGURATION.use_storage = False. This will have the (hopefully
desired) effect that all sql and db keyword args are ignored.
"""
def __new__(*args):
mcs, name, bases, dikt = args
if dikt.get('__generate_table__', False):
tablename = compute_table_name(name)
if '__tablename__' not in dikt:
dikt['__tablename__'] = tablename
newcls = super(DeclarativeMetaType, mcs).__new__(*args)
if newcls.__name__ in ('DataType', 'MappedType'):
return newcls
mro_names = map(lambda cls: cls.__name__, newcls.mro())
if Type in newcls.mro() and 'DataType' in mro_names:
LOG.debug('new mapped, typed class %r', newcls)
else:
LOG.debug('new mapped, non-typed class %r', newcls)
return newcls
## Compute id foreign-key to parent
all_parents = []
for b in bases:
all_parents.extend(b.mro())
mapped_parent = filter(lambda cls: issubclass(cls, Type) and hasattr(cls, '__tablename__')
and getattr(cls, '__tablename__') is not None, all_parents)
# Identify DATA_TYPE class, to be used for specific references
datatype_class = filter(lambda cls: hasattr(cls, '__tablename__') and cls.__tablename__ == 'DATA_TYPES',
all_parents)[0]
###### Map Trait attributes to SQL Columns as necessary
all_class_traits = getattr(newcls, 'trait', {})
super_traits = dict()
for parent_class in filter(lambda cls: issubclass(cls, Type), all_parents):
super_traits.update(getattr(parent_class, 'trait', {}))
newclass_only_traits = dict([(key, all_class_traits[key])
for key in all_class_traits if key not in super_traits])
LOG.debug('mapped, typed class has traits %r', newclass_only_traits)
for key, attr in newclass_only_traits.iteritems():
kwd = attr.trait.inits.kwd
##### Either True or a Column instance
sql = kwd.get('db', True)
if isinstance(sql, sqlalchemy.Column):
setattr(newcls, '_' + key, sql)
elif get_sql_mapping(attr.__class__):
defsql = get_sql_mapping(attr.__class__)
sqltype, args, kwds = defsql[0], (), {}
for arg in defsql[1:]:
if type(arg) is tuple:
args = arg
elif type(arg) is dict:
kwds = arg
setattr(newcls, '_' + key, sqlalchemy.Column('_' + key, sqltype, *args, **kwds))
elif Type in attr.__class__.mro() and hasattr(attr.__class__, 'gid'):
#### Is MappedType
fk = sqlalchemy.ForeignKey('DATA_TYPES.gid', ondelete="SET NULL")
setattr(newcls, '_' + key, sqlalchemy.Column('_' + key, sqlalchemy.String, fk))
if newcls.__tablename__:
#### Add relationship for specific class, to have the original entity loaded
#### In case of cascade = 'save-update' we would need to SET the exact instance type
#### as defined in atrr description
rel = relationship(attr.__class__, lazy='joined', cascade="none",
primaryjoin=(eval('newcls._' + key) == attr.__class__.gid),
enable_typechecks = False)
setattr(newcls, '__' + key, rel)
else:
#### no default, nothing given
LOG.warning('no sql column generated for attr %s, %r', key, attr)
DeclarativeMetaType.__add_class_mapping_attributes(newcls, mapped_parent)
return newcls
@staticmethod
def __add_class_mapping_attributes(newcls, mapped_parent):
"""
Add Column ID and update __mapper_args__
"""
#### Determine best FOREIGN KEY
mapped_parent = mapped_parent[0]
fkparentid = mapped_parent.__tablename__ + '.id'
### Update __mapper_args__ SQL_ALCHEMY attribute.
if newcls.__tablename__:
LOG.debug('cls %r has dtparent %r', newcls, mapped_parent)
LOG.debug('%r using %r as id foreignkey', newcls, fkparentid)
column_id = sqlalchemy.Column('id', sqlalchemy.Integer,
sqlalchemy.ForeignKey(fkparentid, ondelete="CASCADE"), primary_key=True)
setattr(newcls, 'id', column_id)
### We can not use such a backref for cascading deletes, as we will have a cyclic dependency
# (DataType > Mapped DT > Operation).
# rel = relationship(mapped_parent, primaryjoin=(eval('newcls.id')==mapped_parent.id),
# backref = backref('__' +newcls.__name__, cascade="delete"))
# setattr(newcls, '__id_' + mapped_parent.__name__, rel)
mapper_arg = {}
kwd = newcls.trait.inits.kwd
if hasattr(newcls, '__mapper_args__'):
mapper_arg = getattr(newcls, '__mapper_args__')
if 'polymorphic_on' in mapper_arg and isinstance(mapper_arg['polymorphic_on'], (str, unicode)):
discriminator_name = mapper_arg['polymorphic_on']
LOG.debug("Polymorphic_on %s - %s " % (newcls.__name__, discriminator_name))
mapper_arg['polymorphic_on'] = getattr(newcls, '_' + discriminator_name)
mapper_arg['inherit_condition'] = (newcls.id == mapped_parent.id)
if 'exclude_properties' in mapper_arg:
del mapper_arg['exclude_properties']
del mapper_arg['inherits']
setattr(newcls, '__mapper_args__', mapper_arg)
TypeBase = declarative_base(cls=Type, name='TypeBase', metaclass=DeclarativeMetaType)
| gpl-2.0 | 5,534,416,123,025,169,000 | 44.91623 | 114 | 0.604903 | false |
kaji-project/rekishi | rekishi/api/query_builder.py | 1 | 2428 |
class InfluxQueryHelper(object):
def __init__(self):
self.where_clause = ''
self.limit_clause = ''
self.query = ''
def build_query(self, base_query, **kwargs):
where_clause_dict = {}
if 'start' in kwargs:
start = kwargs['start']
if isinstance(start, (int, float)):
start = '%ss' % int(start)
where_clause_dict['start'] = start
if 'end' in kwargs:
end = kwargs['end']
if isinstance(end, (int, float)):
end = '%ss' % int(end)
where_clause_dict['end'] = end
if 'where' in kwargs:
where_clause_dict['where'] = kwargs['where']
if len(where_clause_dict) > 0:
self.where_clause = self.build_where_clause(where_clause_dict)
if 'limit' in kwargs:
self.limit_clause = self.build_limit_clause(kwargs['limit'])
# SELECT * FROM SERIE_NAME WHERE TIME=XX LIMIT 1;
self.query = "%s%s%s;" % (base_query, self.where_clause, self.limit_clause)
return self.query
def build_limit_clause(self, limit):
return ' limit %s' % (limit)
def build_where_clause(self, where_dict):
where_clause = ''
for key, value in where_dict.iteritems():
new_segment = ''
# Where clause still empty
if where_clause == '':
new_segment += ' WHERE '
else:
new_segment += ' AND '
if key == 'start':
new_segment += 'time > %s' % value
where_clause += new_segment
elif key == 'end':
new_segment += 'time < %s' % value
where_clause += new_segment
# Where list
elif key == 'where':
cond_list = value.split(';')
for cond in cond_list:
if where_clause == '':
new_segment = ' WHERE '
else:
new_segment = ' AND '
try:
wkey, wop, wval = cond.split(',')
new_segment += '%s %s %s' % (wkey, wop, wval)
where_clause += new_segment
except:
new_segment = ''
raise ValueError('Invalid WHERE clause.')
return where_clause
| gpl-3.0 | -7,164,196,831,225,149,000 | 31.810811 | 83 | 0.458402 | false |
tarunchhabra26/PythonPlayground | BirthdayParadox/birthday_paradox.py | 1 | 2292 | #!/bin/python
"""
1. Write a function called has_duplicates that takes a list and returns True if there is any
element that appears more than once. It should not modify the original list.
2. If there are 23 students in your class, what are the chances that two of you have the same
birthday? You can estimate this probability by generating random samples of 23 birthdays and
checking for matches.
"""
from __future__ import division
import random
__author__ = "Tarun Chhabra"
__copyright__ = "Copyright 2016"
__license__ = "MIT"
__version__ = "2.0"
__maintainer__ = "Tarun Chhabra"
__status__ = "Development"
def has_duplicates(input_list):
"""
Method to check if a given collection has duplicate values
:rtype: bool
:param input_list: A list of values
:return: returns True if there are any duplicate elements
"""
if input_list is None:
return False
unique = set(input_list)
if len(unique) == len(input_list):
return False
return True
def generate_random(n, max_range):
"""
Generate n random numbers for a given range
:rtype: list
:param n: Length of random numbers
:param max_range: The maximum value of the integer
:return: A list of random numbers
"""
if type(n) is not int or type(max_range) is not int:
return None
output = []
for i in range(n):
number = random.randint(1, max_range)
output.append(number)
return output
def count_positives(students, simulations):
"""
Generate simulations of students and count how many of them have at least one pair of students with the
same birthday.
:rtype: int
:param students:
:param simulations:
:return: Number of positive matches
"""
positives = 0
for i in range(simulations):
random_inputs = generate_random(students, 365)
if has_duplicates(random_inputs):
positives += 1
return positives
# Run the simulations and calculate the probability
students = 23
simulations = 10000
positives = count_positives(students, simulations)
print 'Number of students : %d' % students
print 'Number of simulations : %d' % simulations
print 'Number of positive matches : %d' % positives
print 'Probability : %3.2f ' % ((positives / simulations) * 100)
| mit | -7,760,202,132,229,844,000 | 28.012658 | 107 | 0.679756 | false |
iRGBit/indexhelper | indexer.py | 1 | 3827 | #!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
# iRGBit's indexhelper for reading texts and helping with creating an index #
# http://www.github.com/iRGBit #
# hypertext ät birgitbachler dot com #
################################################################################
import sys
import string
import numpy
languages = ['EN', 'DE']
mylang = 0;
defaultFile = 'files/sample.txt'
defaultStopWords = 'stopwords/stop_words_%s.txt' % languages[mylang]
defaultOut = 'out.txt'
def main():
mystring = "Select Language from the following:%s - default is EN: " % (concat(languages))
slang = raw_input(mystring).upper()
if slang in languages:
si = languages[languages.index(slang)]
yourStopWords = 'stopwords/stop_words_%s.txt' % si
print "Parsing your text with the %s stopwords" % si
else:
yourStopWords = defaultStopWords
print "Not a valid language. Assuming English..."
mystring = "Select name of ouput text file (default is %s ): " % defaultOut
sout = raw_input(mystring)
if sout=="":
yourOut = defaultOut
elif sout.endswith('.txt'):
yourOut = sout
else:
yourOut = sout + '.txt'
print "Printing your results to %s." % yourOut
if len(sys.argv) > 2:
print
print "Usage: python indexer.py <yourFile>"
print "If no arguments are given %s and %s will be used as default files" % (defaultFile, defaultStopWords)
print
sys.exit()
elif len(sys.argv) == 2:
yourFile = sys.argv[1]
elif len(sys.argv) == 1:
yourFile = defaultFile
print 'Using %s as file and %s as stop word reference, printing to %s.' % (yourFile, yourStopWords, yourOut)
print
indexThem(yourFile, yourStopWords, yourOut)
def concat(alist):
outputstring = ""
for a in alist:
outputstring = outputstring + " " + a
return outputstring
def indexThem(yourFile, yourStopWords, yourOut):
punct = set(string.punctuation)
bookWords = open(yourFile).read().decode("unicode-escape").encode("ascii", "ignore").lower().split()
bookWords = [el.rstrip(string.punctuation).lstrip(string.punctuation) for el in bookWords]
stopWords = open(yourStopWords).read().decode("utf-8-sig").encode("utf-8").splitlines()
# remove stopWords from finalWords
finalWords = [x for x in bookWords if x not in stopWords]
# count single occurences of words
from collections import Counter
topWords = Counter(finalWords)
#print topWords
# following commented out lines of code are for managin the threshold of indexed word within given percentile
frequence = []
#pval = 51
for w in topWords:
frequence.append(topWords[w])
#a = numpy.array(frequence)
#p = numpy.percentile(a, pval)
# calculate average frequency of words to compute average frequency in your text
total = 0
for w in topWords:
total += topWords[w]
#print '%s is the total and %s is the length' % (total, len(topWords))
frequent = total/(len(topWords))
#print
#print '%s is a percentile of %s and %s is the average' % (p, pval, frequent)
# only add words that have more than average frequency
tops = {k:v for (k,v) in topWords.iteritems() if v >= frequent}
# sort by word count
#final = sorted(tops.items(), key=lambda x: x[1], reverse=True)
#sort Alphabetically
final = sorted(tops.items(), key=lambda x: x[0])
outFile=open(yourOut, 'w+')
for x in range(len(final)):
print >> outFile, '%s: %s' % (final[x][0], final[x][1])
outFile.close()
#bye!
if __name__ == '__main__':
main()
| gpl-2.0 | -286,084,710,352,663,170 | 30.883333 | 115 | 0.601673 | false |
UncleRus/MultiConf | src/config.py | 1 | 2044 | # -*- coding: utf-8 -*-
from PySide.QtCore import *
from PySide.QtGui import *
import controls
import ui
class ConfigWidget (ui.Scrollable):
_factories = {
'bool': controls.BoolControl,
'float': controls.FloatControl,
'enum': controls.EnumControl,
'uint8': controls.IntControl,
'uint16': controls.IntControl,
'str': controls.StrControl
}
changed = Signal ()
def __init__ (self, name, proc, parent):
super (ConfigWidget, self).__init__ (parent)
self.name = name
self.lContent = None
self.proc = proc
self.proc.connectionChanged.connect (self.refresh)
self.controls = []
self.button = ui.SquareButton (name, _(name))
self.button.toggled.connect (lambda state: self.parent ().setCurrentWidget (self))
self.refresh (False)
def refresh (self, state):
if not state:
self.clear ()
else:
self.load ()
self.button.setEnabled (not self.isEmpty ())
def clear (self):
for ctrl in self.controls:
ctrl.deleteLater ()
del self.controls [:]
if self.lContent:
QWidget ().setLayout (self.lContent)
self.lContent = QFormLayout (self.content)
def load (self):
options = self.proc.osd.options
for optname in options.struct ['map'][self.name]:
opt = options.map [optname]
if not opt.section.enabled (self.proc.osd.modules):
continue
ctrl = self._factories [opt.type] (opt, self.content)
ctrl.load ()
ctrl.changed.connect (self.onChanged)
self.controls.append (ctrl)
self.lContent.addRow (ctrl.label, ctrl.field)
def loadValues (self):
for ctrl in self.controls:
ctrl.load ()
def onChanged (self):
self.changed.emit ()
def isEmpty (self):
return not bool (self.controls)
| gpl-3.0 | -9,144,970,093,496,315,000 | 28.058824 | 90 | 0.559198 | false |
zacharyvoase/django-retracer | src/djretr/middleware.py | 1 | 2936 | # -*- coding: utf-8 -*-
import random
import string
import urllib
import urlparse
from django import http
from django.conf import settings
from django.core.urlresolvers import reverse
def make_nonce(length=10, chars=(string.letters + string.digits)):
"""Generate a random nonce (number used once)."""
return ''.join(random.choice(chars) for i in xrange(length))
def add_query_param(url, param, value):
"""Add a query parameter to a URL."""
split = list(urlparse.urlparse(url))
if split[4]:
split[4] += '&'
split[4] += urllib.urlencode([(param, value)])
return urlparse.urlunparse(split)
class RetracerRequestMixin(object):
class __metaclass__(type):
def __new__(mcls, name, bases, attrs):
return dict(
((k, v) for k, v in attrs.items() if not k.startswith('_')))
def get_location(self, default=None):
"""Retrieve the currently stashed location, or a default."""
return self.session.get(self.retracer_session_key, default)
def pop_location(self, default=None):
"""Retrieve and clear the currently stashed location."""
if default is not None:
return self.session.pop(self.retracer_session_key, default)
return self.session.pop(self.retracer_session_key)
def stash_location(self, location):
"""Stash a location in the current session."""
self.session[self.retracer_session_key] = location
def stash_referrer(self, default_location=None):
"""Stash the location"""
if 'HTTP_REFERER' in self.META:
self.stash_location(self.META['HTTP_REFERER'])
return True
elif default_location:
self.stash_location(default_location)
return True
return False
def unstash_location(self, nonce=False, permanent=False):
location = self.pop_location()
if nonce:
location = add_query_param(location, make_nonce(), '')
if permanent:
return http.HttpResponsePermanentRedirect(location)
return http.HttpResponseRedirect(location)
def unstash_location_with_default(self, view_name, args=None, kwargs=None,
nonce=False, permanent=False):
if '/' in view_name:
default = view_name
else:
default = reverse(view_name, args=args, kwargs=kwargs)
if self.get_location() is None:
self.stash_location(default)
return self.unstash_location(nonce=nonce, permanent=permanent)
class RetracerMiddleware(object):
def __init__(self):
http.HttpRequest.retracer_session_key = getattr(
settings, 'RETRACER_SESSION_KEY', '_location')
for key, value in RetracerRequestMixin.items():
setattr(http.HttpRequest, key, value)
| unlicense | 3,089,663,601,704,950,000 | 31.263736 | 78 | 0.612738 | false |
TakLee96/python-olxextract | olx-extract.py | 1 | 3272 | import sys, os, shutil
import os.path as path
import lxml.etree as et
# print helpful message
if len(sys.argv) != 4:
print "[info] usage: python olxextract.py [unzipped course folder] [new course partial folder] [chapter/sequential/vertical]"
sys.exit(0)
# grab the directory of course and update
course = sys.argv[1]
update = sys.argv[2]
section = sys.argv[3]
if not path.exists(course) or not path.isdir(course):
print "Course folder [" + course + "] does not exist"
sys.exit(0)
elif path.exists(update):
print "Update folder [" + update + "] already exist, please choose a new name"
sys.exit(0)
os.mkdir(update)
# test if @section is valid
sections = { "chapter", "sequential", "vertical" }
if section not in sections:
print "[info] please choose among chapter, sequential and vertical"
sys.exit(0)
def list_xml(directory):
""" List all the xml files in this @directory """
return filter(lambda f: f[0] != "." and f[-4:] == ".xml", os.listdir(directory))
def scan(document):
""" Scan the xml @document and return a tuple of its directory and display_name """
result = ""
with open(document, "r") as f:
root = et.fromstring(f.read())
result = root.get("display_name")
return (document, result)
def scan_xml(directory):
""" Use @scan and @list_xml to scan all the xml files in this @directory and return a list of tuple """
return [scan(path.join(directory, document)) for document in list_xml(directory)]
# list all the sections
section_tuples = scan_xml(path.join(course, section))
print "please choose a (or multiple)", section, "to be extracted; separate multiple", section, "by ','"
for i, sec in enumerate(section_tuples):
print i, ":", sec[1], "@", sec[0]
# let the user choose sections to export
def choose():
raw = raw_input("choose> ")
try:
raw = map(lambda s: int(s.strip()), raw.split(","))
except Exception as e:
print "invalid input: ", e
return choose()
return raw
raw = choose()
class FileExistsError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg + " already exists; there might be two upper section referring to the same lower section"
copies = 0
base_sections = { "html", "discussion", "problem", "video" }
def recursive_copy(filename, section):
if path.exists(path.join(update, section, filename)):
raise FileExistsError(filename)
if not path.exists(path.join(update, section)):
os.mkdir(path.join(update, section))
parent = path.join(course, section, filename)
global copies
copies += 1
shutil.copyfile(parent, path.join(update, section, filename))
if section not in base_sections:
children = []
with open(parent, "r") as f:
root = et.fromstring(f.read())
for child in root:
children.append( (child.get("url_name") + ".xml", child.tag) )
for child in children:
recursive_copy(child[0], child[1])
for i in raw:
section_tuple = section_tuples[i]
recursive_copy(path.basename(section_tuple[0]), section)
print "[info] course partials in olx-format generated in", update
print "[info]", copies, "files copied"
| mit | -5,068,809,144,545,807,000 | 29.867925 | 129 | 0.650061 | false |
ewenqua/pqctp | SyncDayBar.py | 1 | 1490 | #-*- coding=utf-8 -*-
from FinalLogger import logger
from Constant import inst_strategy, suffix_list
import urllib
import json
import sqlite3
conn = sqlite3.connect('futures.db3', check_same_thread = False)
for i in inst_strategy.keys() :
daybar_table = i + suffix_list[0]
cmd = "DROP TABLE IF EXISTS " + daybar_table
conn.execute(cmd)
cmd = "CREATE TABLE IF NOT EXISTS " + daybar_table \
+ " (id INTEGER PRIMARY KEY NULL, inst TEXT NULL, open DOUBLE NULL, high DOUBLE NULL, low DOUBLE NULL, close DOUBLE NULL, volume INTEGER NULL, TradingDay TEXT NULL, time TEXT NULL)"
conn.execute(cmd)
if __name__=="__main__":
# 'http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesDailyKLine?symbol=M1701'
base_url = 'http://stock2.finance.sina.com.cn/futures/api/json.php/IndexService.getInnerFuturesDailyKLine?symbol='
for symbol in inst_strategy.keys():
url = base_url + symbol
print 'url = ' + url
results = json.load(urllib.urlopen(url))
for r in results:
# r -- ["2016-09-05","2896.000","2916.000","2861.000","2870.000","1677366"] open, high, low, close
conn.execute(
"INSERT INTO %s (inst, open, high, low, close, volume, TradingDay,time) VALUES ('%s', %f, %f, %f, %f, %d, '%s','%s')"
% (symbol + suffix_list[0], symbol, float(r[1]), float(r[2]), float(r[3]), float(r[4]), int(r[5]), r[0], '15:00:00'))
conn.commit()
| mit | 8,276,603,549,224,353,000 | 47.064516 | 191 | 0.630872 | false |
DBuildService/atomic-reactor | atomic_reactor/build.py | 1 | 13343 | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Classes which implement tasks which builder has to be capable of doing.
Logic above these classes has to set the workflow itself.
"""
import re
from textwrap import dedent
import logging
import docker.errors
import atomic_reactor.util
from atomic_reactor.core import ContainerTasker, LastLogger
from atomic_reactor.util import (print_version_of_tools, df_parser,
base_image_is_custom, DockerfileImages)
from atomic_reactor.constants import DOCKERFILE_FILENAME
from osbs.utils import ImageName
logger = logging.getLogger(__name__)
class ImageAlreadyBuilt(Exception):
""" This method expects image not to be built but it already is """
class ImageNotBuilt(Exception):
""" This method expects image to be already built but it is not """
class BuilderStateMachine(object):
def __init__(self):
self.is_built = False
self.image = None
def ensure_is_built(self):
"""
ensure that image is already built
:return: None
"""
if not self.is_built:
logger.error("image '%s' is not built yet!", self.image)
raise ImageNotBuilt()
def ensure_not_built(self):
"""
verify that image wasn't built with 'build' method yet
:return: None
"""
if self.is_built:
logger.error("image '%s' is already built!", self.image)
raise ImageAlreadyBuilt()
class BuildResult(object):
REMOTE_IMAGE = object()
def __init__(self, logs=None, fail_reason=None, image_id=None,
annotations=None, labels=None, skip_layer_squash=False,
oci_image_path=None):
"""
:param logs: iterable of log lines (without newlines)
:param fail_reason: str, description of failure or None if successful
:param image_id: str, ID of built container image
:param annotations: dict, data captured during build step which
should be annotated to OpenShift build
:param labels: dict, data captured during build step which
should be set as labels on OpenShift build
:param skip_layer_squash: boolean, direct post-build plugins not
to squash image layers for this build
:param oci_image_path: str, path to OCI image directory
"""
assert fail_reason is None or bool(fail_reason), \
"If fail_reason provided, can't be falsy"
# must provide one, not both
assert not (fail_reason and image_id), \
"Either fail_reason or image_id should be provided, not both"
assert not (fail_reason and oci_image_path), \
"Either fail_reason or oci_image_path should be provided, not both"
assert not (image_id and oci_image_path), \
"Either image_id or oci_image_path should be provided, not both"
self._logs = logs or []
self._fail_reason = fail_reason
self._image_id = image_id
self._annotations = annotations
self._labels = labels
self._skip_layer_squash = skip_layer_squash
self._oci_image_path = oci_image_path
@classmethod
def make_remote_image_result(cls, annotations=None, labels=None):
"""Instantiate BuildResult for image not built locally."""
return cls(
image_id=cls.REMOTE_IMAGE, annotations=annotations, labels=labels
)
@property
def logs(self):
return self._logs
@property
def fail_reason(self):
return self._fail_reason
def is_failed(self):
return self._fail_reason is not None
@property
def image_id(self):
return self._image_id
@property
def annotations(self):
return self._annotations
@property
def labels(self):
return self._labels
@property
def skip_layer_squash(self):
return self._skip_layer_squash
@property
def oci_image_path(self):
return self._oci_image_path
def is_image_available(self):
return self._image_id and self._image_id is not self.REMOTE_IMAGE
class InsideBuilder(LastLogger, BuilderStateMachine):
"""
This is expected to run within container
"""
def __init__(self, source, image, **kwargs):
"""
"""
LastLogger.__init__(self)
BuilderStateMachine.__init__(self)
print_version_of_tools()
self.tasker = ContainerTasker()
# arguments for build
self.source = source
# configuration of source_registy and pull_registries with insecure and
# dockercfg_path, by registry key
self.pull_registries = {}
self.dockerfile_images = DockerfileImages([])
self._base_image_inspect = None
self.parents_pulled = False
self._parent_images_inspect = {} # locally available image => inspect
self.parent_images_digests = {}
self.image_id = None
self.built_image_info = None
self.image = ImageName.parse(image)
# get info about base image from dockerfile
build_file_path, build_file_dir = self.source.get_build_file_path()
self.df_dir = build_file_dir
self._df_path = None
self.original_df = None
self.buildargs = {} # --buildargs for container build
# If the Dockerfile will be entirely generated from the container.yaml
# (in the Flatpak case, say), then a plugin needs to create the Dockerfile
# and set the base image
if build_file_path.endswith(DOCKERFILE_FILENAME):
self.set_df_path(build_file_path)
@property
def df_path(self):
if self._df_path is None:
raise AttributeError("Dockerfile has not yet been generated")
return self._df_path
def set_df_path(self, path):
self._df_path = path
dfp = df_parser(path)
if dfp.baseimage is None:
raise RuntimeError("no base image specified in Dockerfile")
self.dockerfile_images = DockerfileImages(dfp.parent_images)
logger.debug("base image specified in dockerfile = '%s'", dfp.baseimage)
logger.debug("parent images specified in dockerfile = '%s'", dfp.parent_images)
custom_base_images = set()
for image in dfp.parent_images:
image_name = ImageName.parse(image)
image_str = image_name.to_str()
if base_image_is_custom(image_str):
custom_base_images.add(image_str)
if len(custom_base_images) > 1:
raise NotImplementedError("multiple different custom base images"
" aren't allowed in Dockerfile")
# validate user has not specified COPY --from=image
builders = []
for stmt in dfp.structure:
if stmt['instruction'] == 'FROM':
# extract "bar" from "foo as bar" and record as build stage
match = re.search(r'\S+ \s+ as \s+ (\S+)', stmt['value'], re.I | re.X)
builders.append(match.group(1) if match else None)
elif stmt['instruction'] == 'COPY':
match = re.search(r'--from=(\S+)', stmt['value'], re.I)
if not match:
continue
stage = match.group(1)
# error unless the --from is the index or name of a stage we've seen
if any(stage in [str(idx), builder] for idx, builder in enumerate(builders)):
continue
raise RuntimeError(dedent("""\
OSBS does not support COPY --from unless it matches a build stage.
Dockerfile instruction was:
{}
To use an image with COPY --from, specify it in a stage with FROM, e.g.
FROM {} AS source
FROM ...
COPY --from=source <src> <dest>
""").format(stmt['content'], stage))
# inspect base image lazily just before it's needed - pre plugins may change the base image
@property
def base_image_inspect(self):
"""
inspect base image
:return: dict
"""
if self._base_image_inspect is None:
base_image = self.dockerfile_images.base_image
if self.dockerfile_images.base_from_scratch:
self._base_image_inspect = {}
elif self.parents_pulled or self.dockerfile_images.custom_base_image:
try:
self._base_image_inspect = \
self.tasker.inspect_image(base_image)
except docker.errors.NotFound as exc:
# If the base image cannot be found throw KeyError -
# as this property should behave like a dict
raise KeyError("Unprocessed base image Dockerfile cannot be inspected") from exc
else:
insecure = self.pull_registries[base_image.registry]['insecure']
dockercfg_path = self.pull_registries[base_image.registry]['dockercfg_path']
self._base_image_inspect =\
atomic_reactor.util.get_inspect_for_image(base_image, base_image.registry,
insecure, dockercfg_path)
base_image_str = str(base_image)
if base_image_str not in self._parent_images_inspect:
self._parent_images_inspect[base_image_str] = self._base_image_inspect
return self._base_image_inspect
def parent_image_inspect(self, image):
"""
inspect parent image
:return: dict
"""
image_name = ImageName.parse(image)
if image_name not in self._parent_images_inspect:
if self.parents_pulled:
self._parent_images_inspect[image_name] = self.tasker.inspect_image(image)
else:
insecure = self.pull_registries[image_name.registry]['insecure']
dockercfg_path = self.pull_registries[image_name.registry]['dockercfg_path']
self._parent_images_inspect[image_name] =\
atomic_reactor.util.get_inspect_for_image(image_name,
image_name.registry,
insecure,
dockercfg_path)
return self._parent_images_inspect[image_name]
def inspect_built_image(self):
"""
inspect built image
:return: dict
"""
logger.info("inspecting built image '%s'", self.image_id)
self.ensure_is_built()
# dict with lots of data, see man docker-inspect
inspect_data = self.tasker.inspect_image(self.image_id)
return inspect_data
def get_base_image_info(self):
"""
query docker about base image
:return dict
"""
if self.dockerfile_images.base_from_scratch:
return
base_image = self.dockerfile_images.base_image
logger.info("getting information about base image '%s'", base_image)
image_info = self.tasker.get_image_info_by_image_name(base_image)
items_count = len(image_info)
if items_count == 1:
return image_info[0]
elif items_count <= 0:
logger.error("image '%s' not found", base_image)
raise RuntimeError("image '%s' not found" % base_image)
else:
logger.error("multiple (%d) images found for image '%s'", items_count, base_image)
raise RuntimeError("multiple (%d) images found for image '%s'" % (items_count,
base_image))
def get_built_image_info(self):
"""
query docker about built image
:return dict
"""
logger.info("getting information about built image '%s'", self.image)
image_info = self.tasker.get_image_info_by_image_name(self.image)
items_count = len(image_info)
if items_count == 1:
return image_info[0]
elif items_count <= 0:
logger.error("image '%s' not found", self.image)
raise RuntimeError("image '%s' not found" % self.image)
else:
logger.error("multiple (%d) images found for image '%s'", items_count, self.image)
raise RuntimeError("multiple (%d) images found for image '%s'" % (items_count,
self.image))
def parent_images_to_str(self):
results = {}
for base_image_name, parent_image_name in self.dockerfile_images.items():
base_str = str(base_image_name)
parent_str = str(parent_image_name)
if base_image_name and parent_image_name:
results[base_str] = parent_str
else:
logger.debug("None in: base %s has parent %s", base_str, parent_str)
return results
| bsd-3-clause | 7,096,012,980,449,241,000 | 36.69209 | 100 | 0.578056 | false |
mbevilacqua/appcompatprocessor | test/test_tcorr.py | 1 | 21682 | from __future__ import absolute_import
import logging
from unittest import TestCase
import settings
import sys, traceback
reload(sys)
sys.setdefaultencoding("utf-8")
import os
from AppCompatProcessor import main
from shutil import copyfile
from ShimCacheParser_ACP import read_mir, write_it
import tempfile
import appDB
import re, codecs
from test.auxTest import build_fake_DB, add_entry
# Setup the logger
logger = logging.getLogger()
DB = None
def create_ShimCacheTxtFile(fileFullPath):
try:
with file(fileFullPath, 'rb') as xml_data:
(error, entries) = read_mir(xml_data, True)
if not entries:
if error == "":
print "[ShimCacheParser] found no entries for %s" % fileFullPath
settings.logger.error("[ShimCacheParser] found no entries for %s" % fileFullPath)
else:
print "[ShimCacheParser] Error on file %s - [error]" % (fileFullPath, error)
settings.logger.error("[ShimCacheParser] Error on file %s - [error]" % (fileFullPath, error))
return False
else:
write_it(entries, fileFullPath + "-shimcache.txt")
fileFullPath += "-shimcache.txt"
except IOError, err:
print "[ShimCacheParser] Error opening binary file: %s" % str(err)
settings.logger.error("[ShimCacheParser] Error opening binary file: %s" % str(err))
class TestAppTcorr(TestCase):
testset1 = ''
@classmethod
def setup_class(self):
# Build test dataset
self.testset1 = build_fake_DB(1)
@classmethod
def teardown_class(self):
# Remove temp dbs
os.remove(self.testset1)
def BuildTestPath(self, folder):
master_test_folder = os.path.join(
os.path.abspath(os.path.join(os.path.join(os.path.dirname(__file__), os.pardir), os.pardir)),
"appcompatprocessor-DataSets")
load_test_path = os.path.join(master_test_folder, folder)
return load_test_path
def count_lines_regex(self, input_filename, regex_string):
regex = re.compile(regex_string, re.IGNORECASE)
count = 0
with codecs.open(input_filename, 'r', 'UTF8') as inputFile:
content = inputFile.readlines()
for line in content:
if regex.search(line) is not None:
count += 1
return count
def test_TcorrTest_prog1(self):
with appDB.DBClass(self.testset1, settings.__version__) as DB:
DB.appInitDB()
conn = DB.appConnectDB()
# TestHost01
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='AAA.exe', Size=1,ExecFlag='True')
add_entry(DB, "TestHost01", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='BBB.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost01", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='CCC.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost01", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='DDD.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost01", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='EEE.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost01", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='FFF.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost01", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='GGG.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost01", entry_fields)
# TestHost02
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='AAA.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost02", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='BBB.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost02", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='CCC.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost02", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='DDD.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost02", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='EEE.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost02", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='FFF.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost02", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='GGG.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost02", entry_fields)
try:
directCorrelationData = main([self.testset1, "tcorr", "DDD.exe", "-w 1"])
except Exception as e:
print traceback.format_exc()
self.fail(e.message + "\n" + traceback.format_exc())
# Check Names
self.assertEquals(directCorrelationData[1][3], "CCC.exe", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[0][3], "EEE.exe", "test_TcorrTest_prog1 - Name failed!")
# Check Before
self.assertEquals(directCorrelationData[1][6], 0, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[0][6], 2, "test_TcorrTest_prog1 - Name failed!")
# Check After
self.assertEquals(directCorrelationData[1][7], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[0][7], 0, "test_TcorrTest_prog1 - Name failed!")
# Check InvBond
self.assertEquals(directCorrelationData[1][9], "True", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[0][9], "True", "test_TcorrTest_prog1 - Name failed!")
# Check Total_Count
self.assertEquals(directCorrelationData[1][10], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[0][10], 2, "test_TcorrTest_prog1 - Name failed!")
try:
directCorrelationData = main([self.testset1, "tcorr", "DDD.exe", "-w 2"])
except Exception as e:
print traceback.format_exc()
self.fail(e.message + "\n" + traceback.format_exc())
# Check Names
self.assertEquals(directCorrelationData[0][3], "CCC.exe", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][3], "EEE.exe", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[2][3], "BBB.exe", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[3][3], "FFF.exe", "test_TcorrTest_prog1 - Name failed!")
# Check Before
self.assertEquals(directCorrelationData[0][6], 0, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][6], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[2][6], 0, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[3][6], 2, "test_TcorrTest_prog1 - Name failed!")
# Check After
self.assertEquals(directCorrelationData[0][7], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][7], 0, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[2][7], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[3][7], 0, "test_TcorrTest_prog1 - Name failed!")
# Check InvBond
self.assertEquals(directCorrelationData[0][9], "True", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][9], "True", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[2][9], "True", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[3][9], "True", "test_TcorrTest_prog1 - Name failed!")
# Check Total_Count
self.assertEquals(directCorrelationData[0][10], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][10], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[2][10], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[3][10], 2, "test_TcorrTest_prog1 - Name failed!")
# Check Weight
self.assertTrue(directCorrelationData[0][8] > directCorrelationData[2][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[0][8] > directCorrelationData[3][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[1][8] > directCorrelationData[2][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[1][8] > directCorrelationData[3][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[0][8] == directCorrelationData[1][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[2][8] == directCorrelationData[3][8], "test_TcorrTest_prog1 - Name failed!")
try:
directCorrelationData = main([self.testset1, "tcorr", "DDD.exe", "-w 3"])
except Exception as e:
print traceback.format_exc()
self.fail(e.message + "\n" + traceback.format_exc())
# Check Names
self.assertEquals(directCorrelationData[0][3], "CCC.exe", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][3], "EEE.exe", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[2][3], "BBB.exe", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[3][3], "FFF.exe", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[4][3], "AAA.exe", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[5][3], "GGG.exe", "test_TcorrTest_prog1 - Name failed!")
# Check Before
self.assertEquals(directCorrelationData[0][6], 0, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][6], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[2][6], 0, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[3][6], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[4][6], 0, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[5][6], 2, "test_TcorrTest_prog1 - Name failed!")
# Check After
self.assertEquals(directCorrelationData[0][7], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][7], 0, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[2][7], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[3][7], 0, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[4][7], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[5][7], 0, "test_TcorrTest_prog1 - Name failed!")
# Check InvBond
self.assertEquals(directCorrelationData[0][9], "True", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][9], "True", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[2][9], "True", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[3][9], "True", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[4][9], "True", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[5][9], "True", "test_TcorrTest_prog1 - Name failed!")
# Check Total_Count
self.assertEquals(directCorrelationData[0][10], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][10], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[2][10], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[3][10], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[4][10], 2, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[5][10], 2, "test_TcorrTest_prog1 - Name failed!")
# Check Weight
self.assertTrue(directCorrelationData[0][8] > directCorrelationData[2][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[0][8] > directCorrelationData[3][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[0][8] > directCorrelationData[4][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[0][8] > directCorrelationData[5][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[1][8] > directCorrelationData[2][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[1][8] > directCorrelationData[3][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[1][8] > directCorrelationData[4][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[1][8] > directCorrelationData[5][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[0][8] == directCorrelationData[1][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[2][8] == directCorrelationData[3][8], "test_TcorrTest_prog1 - Name failed!")
self.assertTrue(directCorrelationData[4][8] == directCorrelationData[5][8], "test_TcorrTest_prog1 - Name failed!")
# TestHost03
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='AAA.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost03", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='BBB.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost03", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='CCC.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost03", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='DDD.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost03", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='EEE.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost03", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='FFF.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost03", entry_fields)
entry_fields = settings.EntriesFields(EntryType=settings.__APPCOMPAT__,
FilePath='C:\Temp', FileName='GGG.exe', Size=1, ExecFlag='True')
add_entry(DB, "TestHost03", entry_fields)
try:
directCorrelationData = main([self.testset1, "tcorr", "DDD.exe", "-w 1"])
except Exception as e:
print traceback.format_exc()
self.fail(e.message + "\n" + traceback.format_exc())
# Check Names
self.assertEquals(directCorrelationData[0][3], "CCC.exe", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][3], "EEE.exe", "test_TcorrTest_prog1 - Name failed!")
# Check Before
self.assertEquals(directCorrelationData[0][6], 0, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][6], 3, "test_TcorrTest_prog1 - Name failed!")
# Check After
self.assertEquals(directCorrelationData[0][7], 3, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][7], 0, "test_TcorrTest_prog1 - Name failed!")
# Check InvBond
self.assertEquals(directCorrelationData[0][9], "True", "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][9], "True", "test_TcorrTest_prog1 - Name failed!")
# Check Total_Count
self.assertEquals(directCorrelationData[0][10], 3, "test_TcorrTest_prog1 - Name failed!")
self.assertEquals(directCorrelationData[1][10], 3, "test_TcorrTest_prog1 - Name failed!")
def _test_TcorrMixed(self):
# Verify that AmCache data doesn't get mixed in with AppCompat in the tcorr module
# Note that we currently print results separately but return a unique structure with aggregates both datasets
load_test_path = self.BuildTestPath("TestData-mini")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath, db_version, num_hosts, num_instances, num_entries) = main([tempdb.name, "load", load_test_path])
directCorrelationData1 = main([tempdb.name, "tcorr", "net.exe", "-w 3"])
# Check Name
self.assertEquals(directCorrelationData1[0][3], "net1.exe", "test_TcorrTest1 - Name failed!")
# Check Before
self.assertEquals(directCorrelationData1[0][6], 0, "test_TcorrTest1 - Before failed!")
# Check After
self.assertEquals(directCorrelationData1[0][7], 158, "test_TcorrTest1 - After failed!")
load_test_path = self.BuildTestPath("TestData-AmCache")
(db_filenameFullPath2, db_version2, num_hosts2, num_instances2, num_entries2) = main([tempdb.name, "load", load_test_path])
directCorrelationData2 = main([tempdb.name, "tcorr", "net.exe", "-w 3"])
# Remove temp db
os.remove(tempdb.name)
# Check Name
self.assertEquals(directCorrelationData2[0][3], "net1.exe", "test_TcorrTest1 - Name failed!")
# Check Before
self.assertEquals(directCorrelationData2[0][6], 0 + 0, "test_TcorrTest1 - Before failed!")
# Check After
self.assertEquals(directCorrelationData2[0][7], 158 + 21, "test_TcorrTest1 - After failed!")
def _test_TcorrAmCache(self):
load_test_path = self.BuildTestPath("TestData-AmCache")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath1, db_version1, num_hosts1, num_instances1, num_entries2) = main([tempdb.name, "load", load_test_path])
directCorrelationData1 = main([tempdb.name, "tcorr", "net.exe", "-w 3"])
# Remove temp db
os.remove(tempdb.name)
# Check Name
self.assertEquals(directCorrelationData1[0][3], "net1.exe", "test_TcorrTest1 - Name failed!")
# Check Before
self.assertEquals(directCorrelationData1[0][6], 0, "test_TcorrTest1 - Before failed!")
# Check After
self.assertEquals(directCorrelationData1[0][7], 21, "test_TcorrTest1 - After failed!")
| apache-2.0 | -4,675,935,307,108,132,000 | 64.902736 | 131 | 0.614657 | false |
mansam/liveconnect | liveconnect/skydrive.py | 1 | 4134 | import liveconnect
import liveconnect.exceptions
import requests
import urllib
def connect_skydrive():
client_id = liveconnect.config.get('liveconnect', 'client_id')
client_secret = liveconnect.config.get('liveconnect', 'client_secret')
return SkyDrive(client_id, client_secret)
class SkyDrive(liveconnect.LiveConnect):
def __init__(self, client_id, client_secret):
super(SkyDrive, self).__init__(client_id, client_secret)
self.api_url = "https://apis.live.net/v5.0/"
self.default_scopes = ['wl.basic', 'wl.skydrive', 'wl.skydrive_update']
def generate_auth_url(self, scopes=[], redirect_uri=None, state=""):
if not scopes:
scopes = self.default_scopes
return liveconnect.LiveConnect.generate_auth_url(self,
scopes=scopes,
redirect_uri=redirect_uri, state=state)
def _request(self, method, url, access_token, refresh_token=None, query={},
auth_header=False, files=None):
"""
Make a request to the SkyDrive api. Returns a dictionary containing
the response from the SkyDrive api.
"""
params = {
"access_token": access_token
}
for k in query:
params[k] = query[k]
headers = {}
if auth_header:
headers["Authorization"] = 'Bearer %s' % access_token
request_method = getattr(requests, method)
encoded_parameters = urllib.urlencode(params)
url = "%s%s?%s" % (self.api_url, url, encoded_parameters)
response = request_method(url, headers=headers, files=files)
if response.status_code == 200: # OK
return response
else:
response.raise_for_status()
def get_quota(self, access_token=None, refresh_token=None):
return self._request('get', 'me/skydrive/quota', access_token, refresh_token=refresh_token).json()
def get_share_link(self, file_id, access_token=None, refresh_token=None, edit_link=False):
if edit_link:
link_suffix = "shared_edit_link"
else:
link_suffix = "shared_read_link"
url = '%s/%s' % (file_id, link_suffix)
response = self._request('get', url, access_token, refresh_token=refresh_token)
return response.json()['link']
def get_download_link(self, file_id, access_token=None, refresh_token=None):
url = '%s/content' % file_id
response = self._request('get', url,
access_token,
refresh_token=refresh_token,
query={"download": 'true', "suppress_redirects":'true'})
url = response.json()['location']
return url
def list_dir(self, folder='me/skydrive', access_token=None, refresh_token=None):
return self._request('get', '%s/files' % folder, access_token, refresh_token=refresh_token).json()['data']
def info(self, file_id="", access_token=None, refresh_token=None):
return self._request('get', file_id, access_token).json()
def put(self, name=None, fobj=None, folder_id="me/skydrive", access_token=None, refresh_token=None, overwrite=True):
"""
Upload a file to SkyDrive, by default overwriting any file that exists with the selected name.
:param name: Name to create file as in SkyDrive.
:type name: str
:param fobj: File to upload
:type fobj: File or File-like object
:param folder_id: SkyDrive ID of folder to create file in
:type folder_id: str
:param access_token: Access token of user to connect as
:type access_token: str
:param refresh_token: Refresh token of user to connect as
:type refresh_token: str
:param overwrite: Overwrite existing file (default: True)
:type overwrite: boolean
:rtype: dictionary
"""
return self._request('post', "%s/files" % folder_id, access_token, files={"file":(name, fobj)})
| mit | 5,896,308,902,472,626,000 | 38 | 120 | 0.593856 | false |
mupif/mupif | mupif/examples/Example02-distrib/application2.py | 1 | 3601 | import sys
import Pyro4
import logging
sys.path.extend(['..', '../../..'])
from mupif import *
import mupif.Physics.PhysicalQuantities as PQ
log = logging.getLogger()
@Pyro4.expose
class application2(Model.Model):
"""
Simple application that computes an arithmetical average of mapped property
"""
def __init__(self, metaData={}):
MD = {
'Name': 'Simple application cummulating time steps',
'ID': 'N/A',
'Description': 'Cummulates time steps',
'Physics': {
'Type': 'Other',
'Entity': 'Other'
},
'Solver': {
'Software': 'Python script',
'Language': 'Python3',
'License': 'LGPL',
'Creator': 'Borek',
'Version_date': '02/2019',
'Type': 'Summator',
'Documentation': 'Nowhere',
'Estim_time_step_s': 1,
'Estim_comp_time_s': 0.01,
'Estim_execution_cost_EUR': 0.01,
'Estim_personnel_cost_EUR': 0.01,
'Required_expertise': 'None',
'Accuracy': 'High',
'Sensitivity': 'High',
'Complexity': 'Low',
'Robustness': 'High'
},
'Inputs': [
{'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_Time_step', 'Name': 'Time step',
'Description': 'Time step', 'Units': 's',
'Origin': 'Simulated', 'Required': True}],
'Outputs': [
{'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_Time', 'Name': 'Cummulative time',
'Description': 'Cummulative time', 'Units': 's', 'Origin': 'Simulated'}]
}
super(application2, self).__init__(metaData=MD)
self.updateMetadata(metaData)
self.value = 0.0
self.count = 0.0
self.contrib = Property.ConstantProperty(
(0.,), PropertyID.PID_Time, ValueType.Scalar, 's', PQ.PhysicalQuantity(0., 's'))
def initialize(self, file='', workdir='', metaData={}, validateMetaData=True, **kwargs):
super(application2, self).initialize(file, workdir, metaData, validateMetaData, **kwargs)
def getProperty(self, propID, time, objectID=0):
md = {
'Execution': {
'ID': self.getMetadata('Execution.ID'),
'Use_case_ID': self.getMetadata('Execution.Use_case_ID'),
'Task_ID': self.getMetadata('Execution.Task_ID')
}
}
if propID == PropertyID.PID_Time:
return Property.ConstantProperty(
(self.value,), PropertyID.PID_Time, ValueType.Scalar, 's', time, metaData=md)
else:
raise APIError.APIError('Unknown property ID')
def setProperty(self, property, objectID=0):
if property.getPropertyID() == PropertyID.PID_Time_step:
# remember the mapped value
self.contrib = property
else:
raise APIError.APIError('Unknown property ID')
def solveStep(self, tstep, stageID=0, runInBackground=False):
# here we actually accumulate the value using value of mapped property
self.value = self.value+self.contrib.inUnitsOf('s').getValue(tstep.getTime())[0]
self.count = self.count+1
def getCriticalTimeStep(self):
return PQ.PhysicalQuantity(1.0, 's')
def getAssemblyTime(self, tstep):
return tstep.getTime()
def getApplicationSignature(self):
return "Application2"
| lgpl-3.0 | -1,283,888,354,917,432,800 | 36.905263 | 110 | 0.544016 | false |
QCaudron/genepy | genepy/genepy.py | 1 | 9289 | import os
from subprocess import call
from shutil import copyfileobj
from Bio import SeqIO
from Bio.Alphabet import generic_dna
from Bio.Seq import Seq
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
try :
import seaborn
sns = True
except :
sns = False
try :
from sklearn.neighbors import KernelDensity
skl = True
except :
skl = False
# RNA / DNA alphabet
RNA = { "-" : 0,
"A" : 1,
"C" : 2,
"G" : 3,
"T" : 4,
"U" : 4
}
# Interpretable filetypes for ClustalO and BioPython
extensions = { "aln" : "clustal",
"clu" : "clustal",
"fst" : "fasta",
"fasta" : "fasta",
"phy" : "phylip",
"gb" : "genbank",
"gbk" : "genbank",
"st" : "stockholm"
}
# Colour map and plot colour
colourmap = colors.ListedColormap([[0.0, 0.0, 0.0],
[0.1, 0.6, 0.25],
[0.8, 0.1, 0.1],
[1.0, 0.7, 0.4],
[0.65, 0.85, 0.4]])
c = (0.7686274509803922, 0.3058823529411765, 0.3215686274509804)
# Displays the alignment as an image,
# with C and G as hot colours,
# with A and T as cold colours,
# and with "-" or unknown as black
def showalignment(obj, colourmap = colourmap) :
plt.imshow(obj, aspect = "auto", cmap = colourmap, interpolation = "nearest")
plt.grid(False)
plt.show()
# Return a numpy array representing the alignment
def alignmentarray(alignment, length = None, RNA = RNA) :
if length is None :
length = len(alignment[0].seq)
X = np.zeros( (len(alignment), length), dtype = "int8" )
# Convert
for i, record in enumerate(alignment) :
X[i, :len(record.seq)] = [RNA.get(nuc, 0) for nuc in record.seq]
return np.array(X)
# Read an alignment
def readalignment(filename, extensions = extensions) :
# Check the file exists
if not os.path.isfile(filename) :
print "%s not found." % filename
return
# Check the file is of an interpretable filetype
if os.path.splitext(filename)[1][1:] not in extensions :
print "GenePy currently supports the following extensions :"
print "\n- ".join(extensions.keys())
return
# Read in the records
X = []
with open(filename, "rU") as f :
for record in SeqIO.parse(f, extensions[os.path.splitext(filename)[1][1:]]) :
X.append(record.upper())
# Return the alignment as a list of sequences
return X
def align(filename, force, threads, full, full_iter, it, auto) :
# If the data isn't on disk already
if filename == "genepy.fasta" :
print "genepy !"# TODO : Write to disk !
else :
# Generate flags :
command = ["clustalo", "-v", "--outfmt=phy"]
# Input file
command.append("-i")
command.append(filename)
# Output file
command.append("-o")
command.append("temp_genepy.phy")
# Force overwrite
if force :
command.append("--force")
# Limit threads
if threads :
command.append("--threads")
command.append(threads)
# Full distance matrix
if full :
command.append("--full")
# Full distance matrix during iteration only
if full_iter :
command.append("--full-iter")
# Iteration
if it :
command.append("--iter")
command.append(it)
if not (it or full or full_iter) :
command.append("--auto")
# Call ClustalO
print " ".join(command)
call(command)
# Determine number of lines in file
with open("temp_genepy.phy", "r") as infile :
for linecount, temp in enumerate(infile) :
pass
with open(os.path.splitext(filename)[0] + "_aligned_genepy.phy", "w") as outfile, open("temp_genepy.phy", "r") as infile :
# The number of lines to change ( sequence number )
l1 = infile.readline()
N = int(l1.split(" ")[1])
# Drop header in out-file
outfile.write(l1)
# Now write the next N lines, adding a space after the sequence name
for i in range(N) :
line = infile.readline()
outfile.write(line[:10] + " " + line[10:])
# Copy the rest of the file as-is
copyfileobj(infile, outfile)
os.remove("temp_genepy.phy")
print "File rewritten as PhyML-useable input to %s" % (os.path.splitext(filename)[0] + "_aligned_genepy.phy")
# Rewrite the file, as ClustalO output fails in PhyML
"""
s = readalignment(filename.split(".")[0] + "_aligned_genepy.phy")
f = open(filename.split(".")[0] + "_aligned_genepy.phy", "w")
SeqIO.write(s, f, "phylip")
f.close()
"""
def calcstats(seq) :
stats = {}
stats["A"] = []
stats["C"] = []
stats["G"] = []
stats["T"] = []
stats["transition"] = np.zeros((4, 4))
#stats["lengths"] =
for a in seq :
A = a.seq.count("A")
C = a.seq.count("C")
G = a.seq.count("G")
T = a.seq.count("T")
ACGT = max(float(A + C + G + T), 1)
stats["A"].append(A / ACGT)
stats["C"].append(C / ACGT)
stats["G"].append(G / ACGT)
stats["T"].append(T / ACGT)
for i, base1 in enumerate("ACGT") :
for j, base2 in enumerate("ACGT") :
stats["transition"][i,j] += a.seq.count(base1 + base2) / float(len(a.seq)-1)
stats["A"] = np.array(stats["A"])
stats["C"] = np.array(stats["C"])
stats["G"] = np.array(stats["G"])
stats["T"] = np.array(stats["T"])
stats["transition"] /= np.sum(stats["transition"])
return stats
def stats(s) :
frequencies = [s["A"].mean(),
s["C"].mean(),
s["G"].mean(),
s["T"].mean()]
# Nucleotide frequencies
fig = plt.subplot2grid((3, 4), (0, 0), colspan=2, rowspan=2)
plt.bar(range(4), frequencies, width=0.9)
fig.set_xticks(np.arange(0.45, 4.45))
fig.set_xticklabels(("A", "C", "G", "T"))
plt.title("Nucleotide Frequencies")
# Nucleotide frequency distributions
if skl :
x = np.linspace(0, 1, 100)[:, np.newaxis]
yA = KernelDensity(bandwidth=0.005).fit(s["A"][:, np.newaxis])
yC = KernelDensity(bandwidth=0.005).fit(s["C"][:, np.newaxis])
yG = KernelDensity(bandwidth=0.005).fit(s["G"][:, np.newaxis])
yT = KernelDensity(bandwidth=0.005).fit(s["T"][:, np.newaxis])
plt.subplot2grid((3, 4), (2, 0))
if skl :
plt.plot(x, np.exp(yA.score_samples(x)), lw=3, c=c)
plt.fill_between(x.squeeze(), np.exp(yA.score_samples(x)), color=c, alpha=0.5)
else :
plt.hist(s["A"], normed = True, alpha = 0.7)
plt.title("Freq. A")
plt.xlim([0, 1])
plt.subplot2grid((3, 4), (2, 1))
if skl :
plt.plot(x, np.exp(yC.score_samples(x)), lw=3, c=c)
plt.fill_between(x.squeeze(), np.exp(yC.score_samples(x)), color=c, alpha=0.5)
else :
plt.hist(s["C"], normed = True, alpha = 0.7)
plt.title("Freq. C")
plt.xlim([0, 1])
plt.subplot2grid((3, 4), (2, 2))
if skl :
plt.plot(x, np.exp(yG.score_samples(x)), lw=3, c=c)
plt.fill_between(x.squeeze(), np.exp(yG.score_samples(x)), color=c, alpha=0.5)
else :
plt.hist(s["G"], normed = True, alpha = 0.7)
plt.title("Freq. G")
plt.xlim([0, 1])
plt.subplot2grid((3, 4), (2, 3))
if skl :
plt.plot(x, np.exp(yT.score_samples(x)), lw=3, c=c)
plt.fill_between(x.squeeze(), np.exp(yT.score_samples(x)), color=c, alpha=0.5)
else :
plt.hist(s["T"], normed = True, alpha = 0.7)
plt.title("Freq. T")
plt.xlim([0, 1])
# Transition Matrix
plt.subplot2grid((3, 4), (0, 2), colspan=2, rowspan=2)
plt.imshow(s["transition"], interpolation="nearest", cmap="hot")
plt.colorbar()
plt.title("Transition Matrix")
plt.xticks([0, 1, 2, 3], ["A", "C", "G", "T"])
plt.yticks([0, 1, 2, 3], ["A", "C", "G", "T"])
plt.grid(False)
plt.tight_layout()
plt.show()
def phylotree(filename, nucleotide_frequency, bootstrap, search_algorithm) :
command = ["phyml", "-d", "nt", "-m", "GTR", "-v", "e"]
# Input file
command.append("-i")
command.append(os.path.splitext(filename)[0] + "_aligned_genepy.phy")
# Nucleotide frequencies
command.append("-f")
if nucleotide_frequency == "empirical" :
command.append("e")
elif nucleotide_frequency == "max_likelihood" :
command.append("m")
else :
print "WARNING : Unrecognised option for nucleotide_frequency; setting to empirical."
command.append("e")
# Bootstrapping
command.append("-b")
command.append(str(bootstrap))
# Search algorithm
command.append("-s")
if search_algorithm == "SPR" :
command.append("SPR")
elif search_algorithm == "NNI" :
command.append("NNI")
elif search_algorithm == "BEST" :
command.append("BEST")
else :
print "WARNING : Unrecognised option for search_algorithm; setting to BEST."
command.append("BEST")
print " ".join(command)
if bootstrap > 1 :
try :
command.insert(0, "8")
command.insert(0, "-np")
command.insert(0, "mpirun")
call(command)
except OSError :
print "MPI not detected; running non-parallelised reconstruction."
call(command[3:])
else :
call(command)
def trimalignment(alignment, array = None, left = None, right = None) :
if array is not None :
array[np.where(array > 0)] = 1
density = np.sum(array, axis=0)
# Currently, no auto-guessing. Soon !
else :
X = []
for seq in alignment :
X.append(seq[left:right])
return X
def dropempties(alignment, fraction) :
def density(seq) :
known = seq.seq.count("A") + \
seq.seq.count("C") + \
seq.seq.count("G") + \
seq.seq.count("T")
return known / float(len(seq))
# Count ACGT
return [seq for seq in alignment if density(seq) > fraction]
| mit | -5,876,318,508,511,528,000 | 14.744068 | 124 | 0.611907 | false |
stefanv/aandete | app/lib/paste/script/serve.py | 1 | 23162 | from __future__ import print_function
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# @@: This should be moved to paste.deploy
# For discussion of daemonizing:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
# Code taken also from QP:
# http://www.mems-exchange.org/software/qp/
# From lib/site.py
import atexit
import errno
import logging
import os
import re
import subprocess
import sys
import threading
import time
import traceback
from paste.deploy import loadapp, loadserver
from paste.script.command import Command, BadCommand
MAXFD = 1024
jython = sys.platform.startswith('java')
class DaemonizeException(Exception):
pass
class ServeCommand(Command):
min_args = 0
usage = 'CONFIG_FILE [start|stop|restart|status] [var=value]'
takes_config_file = 1
summary = "Serve the described application"
description = """\
This command serves a web application that uses a paste.deploy
configuration file for the server and application.
If start/stop/restart is given, then --daemon is implied, and it will
start (normal operation), stop (--stop-daemon), or do both.
You can also include variable assignments like 'http_port=8080'
and then use %(http_port)s in your config files.
"""
# used by subclasses that configure apps and servers differently
requires_config_file = True
parser = Command.standard_parser(quiet=True)
parser.add_option('-n', '--app-name',
dest='app_name',
metavar='NAME',
help="Load the named application (default main)")
parser.add_option('-s', '--server',
dest='server',
metavar='SERVER_TYPE',
help="Use the named server.")
parser.add_option('--server-name',
dest='server_name',
metavar='SECTION_NAME',
help="Use the named server as defined in the configuration file (default: main)")
if hasattr(os, 'fork'):
parser.add_option('--daemon',
dest="daemon",
action="store_true",
help="Run in daemon (background) mode")
parser.add_option('--pid-file',
dest='pid_file',
metavar='FILENAME',
help="Save PID to file (default to paster.pid if running in daemon mode)")
parser.add_option('--log-file',
dest='log_file',
metavar='LOG_FILE',
help="Save output to the given log file (redirects stdout)")
parser.add_option('--reload',
dest='reload',
action='store_true',
help="Use auto-restart file monitor")
parser.add_option('--reload-interval',
dest='reload_interval',
default=1,
help="Seconds between checking files (low number can cause significant CPU usage)")
parser.add_option('--monitor-restart',
dest='monitor_restart',
action='store_true',
help="Auto-restart server if it dies")
parser.add_option('--status',
action='store_true',
dest='show_status',
help="Show the status of the (presumably daemonized) server")
if hasattr(os, 'setuid'):
# I don't think these are available on Windows
parser.add_option('--user',
dest='set_user',
metavar="USERNAME",
help="Set the user (usually only possible when run as root)")
parser.add_option('--group',
dest='set_group',
metavar="GROUP",
help="Set the group (usually only possible when run as root)")
parser.add_option('--stop-daemon',
dest='stop_daemon',
action='store_true',
help='Stop a daemonized server (given a PID file, or default paster.pid file)')
if jython:
parser.add_option('--disable-jython-reloader',
action='store_true',
dest='disable_jython_reloader',
help="Disable the Jython reloader")
_scheme_re = re.compile(r'^[a-z][a-z]+:', re.I)
default_verbosity = 1
_reloader_environ_key = 'PYTHON_RELOADER_SHOULD_RUN'
_monitor_environ_key = 'PASTE_MONITOR_SHOULD_RUN'
possible_subcommands = ('start', 'stop', 'restart', 'status')
def command(self):
if self.options.stop_daemon:
return self.stop_daemon()
if not hasattr(self.options, 'set_user'):
# Windows case:
self.options.set_user = self.options.set_group = None
# @@: Is this the right stage to set the user at?
self.change_user_group(
self.options.set_user, self.options.set_group)
if self.requires_config_file:
if not self.args:
raise BadCommand('You must give a config file')
app_spec = self.args[0]
if (len(self.args) > 1
and self.args[1] in self.possible_subcommands):
cmd = self.args[1]
restvars = self.args[2:]
else:
cmd = None
restvars = self.args[1:]
else:
app_spec = ""
if (self.args
and self.args[0] in self.possible_subcommands):
cmd = self.args[0]
restvars = self.args[1:]
else:
cmd = None
restvars = self.args[:]
if (getattr(self.options, 'daemon', False)
and getattr(self.options, 'reload', False)):
raise BadCommand('The --daemon and --reload options may not be used together')
jython_monitor = False
if self.options.reload:
if jython and not self.options.disable_jython_reloader:
# JythonMonitor raises the special SystemRestart
# exception that'll cause the Jython interpreter to
# reload in the existing Java process (avoiding
# subprocess startup time)
try:
from paste.reloader import JythonMonitor
except ImportError:
pass
else:
jython_monitor = JythonMonitor(poll_interval=int(
self.options.reload_interval))
if self.requires_config_file:
jython_monitor.watch_file(self.args[0])
if not jython_monitor:
if os.environ.get(self._reloader_environ_key):
from paste import reloader
if self.verbose > 1:
print('Running reloading file monitor')
reloader.install(int(self.options.reload_interval))
if self.requires_config_file:
reloader.watch_file(self.args[0])
else:
return self.restart_with_reloader()
if cmd not in (None, 'start', 'stop', 'restart', 'status'):
raise BadCommand(
'Error: must give start|stop|restart (not %s)' % cmd)
if cmd == 'status' or self.options.show_status:
return self.show_status()
if cmd == 'restart' or cmd == 'stop':
result = self.stop_daemon()
if result:
if cmd == 'restart':
print("Could not stop daemon; aborting")
else:
print("Could not stop daemon")
return result
if cmd == 'stop':
return result
self.options.daemon = True
if cmd == 'start':
self.options.daemon = True
app_name = self.options.app_name
vars = self.parse_vars(restvars)
if not self._scheme_re.search(app_spec):
app_spec = 'config:' + app_spec
server_name = self.options.server_name
if self.options.server:
server_spec = 'egg:PasteScript'
assert server_name is None
server_name = self.options.server
else:
server_spec = app_spec
base = os.getcwd()
if getattr(self.options, 'daemon', False):
if not self.options.pid_file:
self.options.pid_file = 'paster.pid'
if not self.options.log_file:
self.options.log_file = 'paster.log'
# Ensure the log file is writeable
if self.options.log_file:
try:
writeable_log_file = open(self.options.log_file, 'a')
except IOError as ioe:
msg = 'Error: Unable to write to log file: %s' % ioe
raise BadCommand(msg)
writeable_log_file.close()
# Ensure the pid file is writeable
if self.options.pid_file:
try:
writeable_pid_file = open(self.options.pid_file, 'a')
except IOError as ioe:
msg = 'Error: Unable to write to pid file: %s' % ioe
raise BadCommand(msg)
writeable_pid_file.close()
if getattr(self.options, 'daemon', False):
try:
self.daemonize()
except DaemonizeException as ex:
if self.verbose > 0:
print(str(ex))
return
if (self.options.monitor_restart
and not os.environ.get(self._monitor_environ_key)):
return self.restart_with_monitor()
if self.options.pid_file:
self.record_pid(self.options.pid_file)
if self.options.log_file:
stdout_log = LazyWriter(self.options.log_file, 'a')
sys.stdout = stdout_log
sys.stderr = stdout_log
logging.basicConfig(stream=stdout_log)
log_fn = app_spec
if log_fn.startswith('config:'):
log_fn = app_spec[len('config:'):]
elif log_fn.startswith('egg:'):
log_fn = None
if log_fn:
log_fn = os.path.join(base, log_fn)
self.logging_file_config(log_fn)
try:
server = self.loadserver(server_spec, name=server_name,
relative_to=base, global_conf=vars)
app = self.loadapp(app_spec, name=app_name,
relative_to=base, global_conf=vars)
except SyntaxError as e:
if self.options.reload and os.environ.get(self._reloader_environ_key):
traceback.print_exc()
reloader.watch_file(e.filename)
while True:
time.sleep(60*60)
else:
raise
if self.verbose > 0:
if hasattr(os, 'getpid'):
msg = 'Starting server in PID %i.' % os.getpid()
else:
msg = 'Starting server.'
print(msg)
def serve():
try:
server(app)
except (SystemExit, KeyboardInterrupt) as e:
if self.verbose > 1:
raise
if str(e):
msg = ' '+str(e)
else:
msg = ''
print('Exiting%s (-v to see traceback)' % msg)
if jython_monitor:
# JythonMonitor has to be ran from the main thread
threading.Thread(target=serve).start()
print('Starting Jython file monitor')
jython_monitor.periodic_reload()
else:
serve()
def loadserver(self, server_spec, name, relative_to, **kw):
return loadserver(
server_spec, name=name,
relative_to=relative_to, **kw)
def loadapp(self, app_spec, name, relative_to, **kw):
return loadapp(
app_spec, name=name, relative_to=relative_to,
**kw)
def daemonize(self):
pid = live_pidfile(self.options.pid_file)
if pid:
raise DaemonizeException(
"Daemon is already running (PID: %s from PID file %s)"
% (pid, self.options.pid_file))
if self.verbose > 0:
print('Entering daemon mode')
pid = os.fork()
if pid:
# The forked process also has a handle on resources, so we
# *don't* want proper termination of the process, we just
# want to exit quick (which os._exit() does)
os._exit(0)
# Make this the session leader
os.setsid()
# Fork again for good measure!
pid = os.fork()
if pid:
os._exit(0)
# @@: Should we set the umask and cwd now?
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
def record_pid(self, pid_file):
pid = os.getpid()
if self.verbose > 1:
print('Writing PID %s to %s' % (pid, pid_file))
f = open(pid_file, 'w')
f.write(str(pid))
f.close()
atexit.register(_remove_pid_file, pid, pid_file, self.verbose)
def stop_daemon(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print('No PID file exists in %s' % pid_file)
return 1
pid = read_pidfile(pid_file)
if not pid:
print("Not a valid PID file in %s" % pid_file)
return 1
pid = live_pidfile(pid_file)
if not pid:
print("PID in %s is not valid (deleting)" % pid_file)
try:
os.unlink(pid_file)
except (OSError, IOError) as e:
print("Could not delete: %s" % e)
return 2
return 1
for j in range(10):
if not live_pidfile(pid_file):
break
import signal
os.kill(pid, signal.SIGINT)
time.sleep(1)
for j in range(10):
if not live_pidfile(pid_file):
break
import signal
os.kill(pid, signal.SIGTERM)
time.sleep(1)
else:
print("failed to kill web process %s" % pid)
return 3
if os.path.exists(pid_file):
os.unlink(pid_file)
return 0
def show_status(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print('No PID file %s' % pid_file)
return 1
pid = read_pidfile(pid_file)
if not pid:
print('No PID in file %s' % pid_file)
return 1
pid = live_pidfile(pid_file)
if not pid:
print('PID %s in %s is not running' % (pid, pid_file))
return 1
print('Server running in PID %s' % pid)
return 0
def restart_with_reloader(self):
self.restart_with_monitor(reloader=True)
def restart_with_monitor(self, reloader=False):
if self.verbose > 0:
if reloader:
print('Starting subprocess with file monitor')
else:
print('Starting subprocess with monitor parent')
while 1:
args = [self.quote_first_command_arg(sys.executable)] + sys.argv
new_environ = os.environ.copy()
if reloader:
new_environ[self._reloader_environ_key] = 'true'
else:
new_environ[self._monitor_environ_key] = 'true'
proc = None
try:
try:
_turn_sigterm_into_systemexit()
proc = subprocess.Popen(args, env=new_environ)
exit_code = proc.wait()
proc = None
except KeyboardInterrupt:
print('^C caught in monitor process')
if self.verbose > 1:
raise
return 1
finally:
if (proc is not None
and hasattr(os, 'kill')):
import signal
try:
os.kill(proc.pid, signal.SIGTERM)
except (OSError, IOError):
pass
if reloader:
# Reloader always exits with code 3; but if we are
# a monitor, any exit code will restart
if exit_code != 3:
return exit_code
if self.verbose > 0:
print('-'*20, 'Restarting', '-'*20)
def change_user_group(self, user, group):
if not user and not group:
return
import pwd, grp
uid = gid = None
if group:
try:
gid = int(group)
group = grp.getgrgid(gid).gr_name
except ValueError:
import grp
try:
entry = grp.getgrnam(group)
except KeyError:
raise BadCommand(
"Bad group: %r; no such group exists" % group)
gid = entry.gr_gid
try:
uid = int(user)
user = pwd.getpwuid(uid).pw_name
except ValueError:
try:
entry = pwd.getpwnam(user)
except KeyError:
raise BadCommand(
"Bad username: %r; no such user exists" % user)
if not gid:
gid = entry.pw_gid
uid = entry.pw_uid
if self.verbose > 0:
print('Changing user to %s:%s (%s:%s)' % (
user, group or '(unknown)', uid, gid))
if hasattr(os, 'initgroups'):
os.initgroups(user, gid)
else:
os.setgroups([e.gr_gid for e in grp.getgrall()
if user in e.gr_mem] + [gid])
if gid:
os.setgid(gid)
if uid:
os.setuid(uid)
class LazyWriter(object):
"""
File-like object that opens a file lazily when it is first written
to.
"""
def __init__(self, filename, mode='w'):
self.filename = filename
self.fileobj = None
self.lock = threading.Lock()
self.mode = mode
def open(self):
if self.fileobj is None:
self.lock.acquire()
try:
if self.fileobj is None:
self.fileobj = open(self.filename, self.mode)
finally:
self.lock.release()
return self.fileobj
def write(self, text):
fileobj = self.open()
fileobj.write(text)
fileobj.flush()
def writelines(self, text):
fileobj = self.open()
fileobj.writelines(text)
fileobj.flush()
def flush(self):
self.open().flush()
def live_pidfile(pidfile):
"""(pidfile:str) -> int | None
Returns an int found in the named file, if there is one,
and if there is a running process with that process id.
Return None if no such process exists.
"""
pid = read_pidfile(pidfile)
if pid:
try:
os.kill(int(pid), 0)
return pid
except OSError as e:
if e.errno == errno.EPERM:
return pid
return None
def read_pidfile(filename):
if os.path.exists(filename):
try:
f = open(filename)
content = f.read()
f.close()
return int(content.strip())
except (ValueError, IOError):
return None
else:
return None
def _remove_pid_file(written_pid, filename, verbosity):
current_pid = os.getpid()
if written_pid != current_pid:
# A forked process must be exiting, not the process that
# wrote the PID file
return
if not os.path.exists(filename):
return
f = open(filename)
content = f.read().strip()
f.close()
try:
pid_in_file = int(content)
except ValueError:
pass
else:
if pid_in_file != current_pid:
print("PID file %s contains %s, not expected PID %s" % (
filename, pid_in_file, current_pid))
return
if verbosity > 0:
print("Removing PID file %s" % filename)
try:
os.unlink(filename)
return
except OSError as e:
# Record, but don't give traceback
print("Cannot remove PID file: %s" % e)
# well, at least lets not leave the invalid PID around...
try:
f = open(filename, 'w')
f.write('')
f.close()
except OSError as e:
print('Stale PID left in file: %s (%e)' % (filename, e))
else:
print('Stale PID removed')
def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2):
"""
This makes sure any open ports are closed.
Does this by connecting to them until they give connection
refused. Servers should call like::
import paste.script
ensure_port_cleanup([80, 443])
"""
atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries,
sleeptime=sleeptime)
def _cleanup_ports(bound_addresses, maxtries=30, sleeptime=2):
# Wait for the server to bind to the port.
import socket
import errno
for bound_address in bound_addresses:
for attempt in range(maxtries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(bound_address)
except socket.error as e:
if e.args[0] != errno.ECONNREFUSED:
raise
break
else:
time.sleep(sleeptime)
else:
raise SystemExit('Timeout waiting for port.')
sock.close()
def _turn_sigterm_into_systemexit():
"""
Attempts to turn a SIGTERM exception into a SystemExit exception.
"""
try:
import signal
except ImportError:
return
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
| bsd-3-clause | 597,398,015,663,908,700 | 33.673653 | 105 | 0.520033 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyQt4/QtGui/__init__.py | 1 | 20059 | # encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib/python3/dist-packages/PyQt4/QtGui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
# functions
def qAlpha(p_int): # real signature unknown; restored from __doc__
""" qAlpha(int) -> int """
return 0
def qBlue(p_int): # real signature unknown; restored from __doc__
""" qBlue(int) -> int """
return 0
def qDrawBorderPixmap(QPainter, QRect, QMargins, QPixmap): # real signature unknown; restored from __doc__
""" qDrawBorderPixmap(QPainter, QRect, QMargins, QPixmap) """
pass
def qDrawPlainRect(QPainter, p_int, p_int_1, p_int_2, p_int_3, QColor, int_lineWidth=1, QBrush_fill=None): # real signature unknown; restored from __doc__
"""
qDrawPlainRect(QPainter, int, int, int, int, QColor, int lineWidth=1, QBrush fill=None)
qDrawPlainRect(QPainter, QRect, QColor, int lineWidth=1, QBrush fill=None)
"""
pass
def qDrawShadeLine(QPainter, p_int, p_int_1, p_int_2, p_int_3, QPalette, bool_sunken=True, int_lineWidth=1, int_midLineWidth=0): # real signature unknown; restored from __doc__
"""
qDrawShadeLine(QPainter, int, int, int, int, QPalette, bool sunken=True, int lineWidth=1, int midLineWidth=0)
qDrawShadeLine(QPainter, QPoint, QPoint, QPalette, bool sunken=True, int lineWidth=1, int midLineWidth=0)
"""
pass
def qDrawShadePanel(QPainter, p_int, p_int_1, p_int_2, p_int_3, QPalette, bool_sunken=False, int_lineWidth=1, QBrush_fill=None): # real signature unknown; restored from __doc__
"""
qDrawShadePanel(QPainter, int, int, int, int, QPalette, bool sunken=False, int lineWidth=1, QBrush fill=None)
qDrawShadePanel(QPainter, QRect, QPalette, bool sunken=False, int lineWidth=1, QBrush fill=None)
"""
pass
def qDrawShadeRect(QPainter, p_int, p_int_1, p_int_2, p_int_3, QPalette, bool_sunken=False, int_lineWidth=1, int_midLineWidth=0, QBrush_fill=None): # real signature unknown; restored from __doc__
"""
qDrawShadeRect(QPainter, int, int, int, int, QPalette, bool sunken=False, int lineWidth=1, int midLineWidth=0, QBrush fill=None)
qDrawShadeRect(QPainter, QRect, QPalette, bool sunken=False, int lineWidth=1, int midLineWidth=0, QBrush fill=None)
"""
pass
def qDrawWinButton(QPainter, p_int, p_int_1, p_int_2, p_int_3, QPalette, bool_sunken=False, QBrush_fill=None): # real signature unknown; restored from __doc__
"""
qDrawWinButton(QPainter, int, int, int, int, QPalette, bool sunken=False, QBrush fill=None)
qDrawWinButton(QPainter, QRect, QPalette, bool sunken=False, QBrush fill=None)
"""
pass
def qDrawWinPanel(QPainter, p_int, p_int_1, p_int_2, p_int_3, QPalette, bool_sunken=False, QBrush_fill=None): # real signature unknown; restored from __doc__
"""
qDrawWinPanel(QPainter, int, int, int, int, QPalette, bool sunken=False, QBrush fill=None)
qDrawWinPanel(QPainter, QRect, QPalette, bool sunken=False, QBrush fill=None)
"""
pass
def qFuzzyCompare(QMatrix, QMatrix_1): # real signature unknown; restored from __doc__
"""
qFuzzyCompare(QMatrix, QMatrix) -> bool
qFuzzyCompare(QMatrix4x4, QMatrix4x4) -> bool
qFuzzyCompare(QQuaternion, QQuaternion) -> bool
qFuzzyCompare(QTransform, QTransform) -> bool
qFuzzyCompare(QVector2D, QVector2D) -> bool
qFuzzyCompare(QVector3D, QVector3D) -> bool
qFuzzyCompare(QVector4D, QVector4D) -> bool
"""
return False
def qGray(p_int, p_int_1, p_int_2): # real signature unknown; restored from __doc__
"""
qGray(int, int, int) -> int
qGray(int) -> int
"""
return 0
def qGreen(p_int): # real signature unknown; restored from __doc__
""" qGreen(int) -> int """
return 0
def qIsGray(p_int): # real signature unknown; restored from __doc__
""" qIsGray(int) -> bool """
return False
def qRed(p_int): # real signature unknown; restored from __doc__
""" qRed(int) -> int """
return 0
def qRgb(p_int, p_int_1, p_int_2): # real signature unknown; restored from __doc__
""" qRgb(int, int, int) -> int """
return 0
def qRgba(p_int, p_int_1, p_int_2, p_int_3): # real signature unknown; restored from __doc__
""" qRgba(int, int, int, int) -> int """
return 0
def qSwap(QBitmap, QBitmap_1): # real signature unknown; restored from __doc__
"""
qSwap(QBitmap, QBitmap)
qSwap(QBrush, QBrush)
qSwap(QIcon, QIcon)
qSwap(QImage, QImage)
qSwap(QKeySequence, QKeySequence)
qSwap(QPen, QPen)
qSwap(QPicture, QPicture)
qSwap(QPixmap, QPixmap)
"""
pass
def qt_x11_wait_for_window_manager(QWidget): # real signature unknown; restored from __doc__
""" qt_x11_wait_for_window_manager(QWidget) """
pass
# classes
from .Display import Display
from .QPaintDevice import QPaintDevice
from .QWidget import QWidget
from .QAbstractButton import QAbstractButton
from .QGraphicsItem import QGraphicsItem
from .QAbstractGraphicsShapeItem import QAbstractGraphicsShapeItem
from .QAbstractItemDelegate import QAbstractItemDelegate
from .QFrame import QFrame
from .QAbstractScrollArea import QAbstractScrollArea
from .QAbstractItemView import QAbstractItemView
from .QDialog import QDialog
from .QAbstractPrintDialog import QAbstractPrintDialog
from .QAbstractProxyModel import QAbstractProxyModel
from .QAbstractSlider import QAbstractSlider
from .QAbstractSpinBox import QAbstractSpinBox
from .QAbstractTextDocumentLayout import QAbstractTextDocumentLayout
from .QAction import QAction
from .QActionEvent import QActionEvent
from .QActionGroup import QActionGroup
from .QApplication import QApplication
from .QPixmap import QPixmap
from .QBitmap import QBitmap
from .QLayoutItem import QLayoutItem
from .QLayout import QLayout
from .QBoxLayout import QBoxLayout
from .QBrush import QBrush
from .QButtonGroup import QButtonGroup
from .QCalendarWidget import QCalendarWidget
from .QCheckBox import QCheckBox
from .QClipboard import QClipboard
from .QCloseEvent import QCloseEvent
from .QColor import QColor
from .QColorDialog import QColorDialog
from .QColumnView import QColumnView
from .QComboBox import QComboBox
from .QPushButton import QPushButton
from .QCommandLinkButton import QCommandLinkButton
from .QStyle import QStyle
from .QCommonStyle import QCommonStyle
from .QCompleter import QCompleter
from .QGradient import QGradient
from .QConicalGradient import QConicalGradient
from .QInputEvent import QInputEvent
from .QContextMenuEvent import QContextMenuEvent
from .QCursor import QCursor
from .QDataWidgetMapper import QDataWidgetMapper
from .QDateTimeEdit import QDateTimeEdit
from .QDateEdit import QDateEdit
from .QDesktopServices import QDesktopServices
from .QDesktopWidget import QDesktopWidget
from .QDial import QDial
from .QDialogButtonBox import QDialogButtonBox
from .QDirModel import QDirModel
from .QDockWidget import QDockWidget
from .QDoubleSpinBox import QDoubleSpinBox
from .QValidator import QValidator
from .QDoubleValidator import QDoubleValidator
from .QDrag import QDrag
from .QMimeSource import QMimeSource
from .QDropEvent import QDropEvent
from .QDragMoveEvent import QDragMoveEvent
from .QDragEnterEvent import QDragEnterEvent
from .QDragLeaveEvent import QDragLeaveEvent
from .QErrorMessage import QErrorMessage
from .QFileDialog import QFileDialog
from .QFileIconProvider import QFileIconProvider
from .QFileOpenEvent import QFileOpenEvent
from .QFileSystemModel import QFileSystemModel
from .QFocusEvent import QFocusEvent
from .QFocusFrame import QFocusFrame
from .QFont import QFont
from .QFontComboBox import QFontComboBox
from .QFontDatabase import QFontDatabase
from .QFontDialog import QFontDialog
from .QFontInfo import QFontInfo
from .QFontMetrics import QFontMetrics
from .QFontMetricsF import QFontMetricsF
from .QFormLayout import QFormLayout
from .QGesture import QGesture
from .QGestureEvent import QGestureEvent
from .QGestureRecognizer import QGestureRecognizer
from .QGlyphRun import QGlyphRun
from .QGraphicsAnchor import QGraphicsAnchor
from .QGraphicsLayoutItem import QGraphicsLayoutItem
from .QGraphicsLayout import QGraphicsLayout
from .QGraphicsAnchorLayout import QGraphicsAnchorLayout
from .QGraphicsEffect import QGraphicsEffect
from .QGraphicsBlurEffect import QGraphicsBlurEffect
from .QGraphicsColorizeEffect import QGraphicsColorizeEffect
from .QGraphicsDropShadowEffect import QGraphicsDropShadowEffect
from .QGraphicsEllipseItem import QGraphicsEllipseItem
from .QGraphicsGridLayout import QGraphicsGridLayout
from .QGraphicsItemAnimation import QGraphicsItemAnimation
from .QGraphicsItemGroup import QGraphicsItemGroup
from .QGraphicsLinearLayout import QGraphicsLinearLayout
from .QGraphicsLineItem import QGraphicsLineItem
from .QGraphicsObject import QGraphicsObject
from .QGraphicsOpacityEffect import QGraphicsOpacityEffect
from .QGraphicsPathItem import QGraphicsPathItem
from .QGraphicsPixmapItem import QGraphicsPixmapItem
from .QGraphicsPolygonItem import QGraphicsPolygonItem
from .QGraphicsWidget import QGraphicsWidget
from .QGraphicsProxyWidget import QGraphicsProxyWidget
from .QGraphicsRectItem import QGraphicsRectItem
from .QGraphicsTransform import QGraphicsTransform
from .QGraphicsRotation import QGraphicsRotation
from .QGraphicsScale import QGraphicsScale
from .QGraphicsScene import QGraphicsScene
from .QGraphicsSceneEvent import QGraphicsSceneEvent
from .QGraphicsSceneContextMenuEvent import QGraphicsSceneContextMenuEvent
from .QGraphicsSceneDragDropEvent import QGraphicsSceneDragDropEvent
from .QGraphicsSceneHelpEvent import QGraphicsSceneHelpEvent
from .QGraphicsSceneHoverEvent import QGraphicsSceneHoverEvent
from .QGraphicsSceneMouseEvent import QGraphicsSceneMouseEvent
from .QGraphicsSceneMoveEvent import QGraphicsSceneMoveEvent
from .QGraphicsSceneResizeEvent import QGraphicsSceneResizeEvent
from .QGraphicsSceneWheelEvent import QGraphicsSceneWheelEvent
from .QGraphicsSimpleTextItem import QGraphicsSimpleTextItem
from .QGraphicsTextItem import QGraphicsTextItem
from .QGraphicsView import QGraphicsView
from .QGridLayout import QGridLayout
from .QGroupBox import QGroupBox
from .QHBoxLayout import QHBoxLayout
from .QHeaderView import QHeaderView
from .QHelpEvent import QHelpEvent
from .QHideEvent import QHideEvent
from .QHoverEvent import QHoverEvent
from .QIcon import QIcon
from .QIconDragEvent import QIconDragEvent
from .QIconEngine import QIconEngine
from .QIconEngineV2 import QIconEngineV2
from .QIdentityProxyModel import QIdentityProxyModel
from .QImage import QImage
from .QImageIOHandler import QImageIOHandler
from .QImageReader import QImageReader
from .QImageWriter import QImageWriter
from .QInputContext import QInputContext
from .QInputContextFactory import QInputContextFactory
from .QInputDialog import QInputDialog
from .QInputMethodEvent import QInputMethodEvent
from .QIntValidator import QIntValidator
from .QItemDelegate import QItemDelegate
from .QItemEditorCreatorBase import QItemEditorCreatorBase
from .QItemEditorFactory import QItemEditorFactory
from .QItemSelection import QItemSelection
from .QItemSelectionModel import QItemSelectionModel
from .QItemSelectionRange import QItemSelectionRange
from .QKeyEvent import QKeyEvent
from .QKeyEventTransition import QKeyEventTransition
from .QKeySequence import QKeySequence
from .QLabel import QLabel
from .QLCDNumber import QLCDNumber
from .QLinearGradient import QLinearGradient
from .QLineEdit import QLineEdit
from .QListView import QListView
from .QListWidget import QListWidget
from .QListWidgetItem import QListWidgetItem
from .QMainWindow import QMainWindow
from .QMatrix import QMatrix
from .QMatrix2x2 import QMatrix2x2
from .QMatrix2x3 import QMatrix2x3
from .QMatrix2x4 import QMatrix2x4
from .QMatrix3x2 import QMatrix3x2
from .QMatrix3x3 import QMatrix3x3
from .QMatrix3x4 import QMatrix3x4
from .QMatrix4x2 import QMatrix4x2
from .QMatrix4x3 import QMatrix4x3
from .QMatrix4x4 import QMatrix4x4
from .QMdiArea import QMdiArea
from .QMdiSubWindow import QMdiSubWindow
from .QMenu import QMenu
from .QMenuBar import QMenuBar
from .QMessageBox import QMessageBox
from .QMouseEvent import QMouseEvent
from .QMouseEventTransition import QMouseEventTransition
from .QMoveEvent import QMoveEvent
from .QMovie import QMovie
from .QPageSetupDialog import QPageSetupDialog
from .QPaintEngine import QPaintEngine
from .QPaintEngineState import QPaintEngineState
from .QPainter import QPainter
from .QPainterPath import QPainterPath
from .QPainterPathStroker import QPainterPathStroker
from .QPaintEvent import QPaintEvent
from .QPalette import QPalette
from .QPanGesture import QPanGesture
from .QPen import QPen
from .QPicture import QPicture
from .QPictureIO import QPictureIO
from .QPinchGesture import QPinchGesture
from .QPixmapCache import QPixmapCache
from .QPlainTextDocumentLayout import QPlainTextDocumentLayout
from .QPlainTextEdit import QPlainTextEdit
from .QPolygon import QPolygon
from .QPolygonF import QPolygonF
from .QPrintDialog import QPrintDialog
from .QPrintEngine import QPrintEngine
from .QPrinter import QPrinter
from .QPrinterInfo import QPrinterInfo
from .QPrintPreviewDialog import QPrintPreviewDialog
from .QPrintPreviewWidget import QPrintPreviewWidget
from .QProgressBar import QProgressBar
from .QProgressDialog import QProgressDialog
from .QProxyModel import QProxyModel
from .QTextObjectInterface import QTextObjectInterface
from .QPyTextObject import QPyTextObject
from .QQuaternion import QQuaternion
from .QRadialGradient import QRadialGradient
from .QRadioButton import QRadioButton
from .QRawFont import QRawFont
from .QRegExpValidator import QRegExpValidator
from .QRegion import QRegion
from .QResizeEvent import QResizeEvent
from .QRubberBand import QRubberBand
from .QScrollArea import QScrollArea
from .QScrollBar import QScrollBar
from .QSessionManager import QSessionManager
from .QShortcut import QShortcut
from .QShortcutEvent import QShortcutEvent
from .QShowEvent import QShowEvent
from .QSizeGrip import QSizeGrip
from .QSizePolicy import QSizePolicy
from .QSlider import QSlider
from .QSortFilterProxyModel import QSortFilterProxyModel
from .QSound import QSound
from .QSpacerItem import QSpacerItem
from .QSpinBox import QSpinBox
from .QSplashScreen import QSplashScreen
from .QSplitter import QSplitter
from .QSplitterHandle import QSplitterHandle
from .QStackedLayout import QStackedLayout
from .QStackedWidget import QStackedWidget
from .QStandardItem import QStandardItem
from .QStandardItemModel import QStandardItemModel
from .QStaticText import QStaticText
from .QStatusBar import QStatusBar
from .QStatusTipEvent import QStatusTipEvent
from .QStringListModel import QStringListModel
from .QStyledItemDelegate import QStyledItemDelegate
from .QStyleFactory import QStyleFactory
from .QStyleHintReturn import QStyleHintReturn
from .QStyleHintReturnMask import QStyleHintReturnMask
from .QStyleHintReturnVariant import QStyleHintReturnVariant
from .QStyleOption import QStyleOption
from .QStyleOptionButton import QStyleOptionButton
from .QStyleOptionComplex import QStyleOptionComplex
from .QStyleOptionComboBox import QStyleOptionComboBox
from .QStyleOptionDockWidget import QStyleOptionDockWidget
from .QStyleOptionDockWidgetV2 import QStyleOptionDockWidgetV2
from .QStyleOptionFocusRect import QStyleOptionFocusRect
from .QStyleOptionFrame import QStyleOptionFrame
from .QStyleOptionFrameV2 import QStyleOptionFrameV2
from .QStyleOptionFrameV3 import QStyleOptionFrameV3
from .QStyleOptionGraphicsItem import QStyleOptionGraphicsItem
from .QStyleOptionGroupBox import QStyleOptionGroupBox
from .QStyleOptionHeader import QStyleOptionHeader
from .QStyleOptionMenuItem import QStyleOptionMenuItem
from .QStyleOptionProgressBar import QStyleOptionProgressBar
from .QStyleOptionProgressBarV2 import QStyleOptionProgressBarV2
from .QStyleOptionRubberBand import QStyleOptionRubberBand
from .QStyleOptionSizeGrip import QStyleOptionSizeGrip
from .QStyleOptionSlider import QStyleOptionSlider
from .QStyleOptionSpinBox import QStyleOptionSpinBox
from .QStyleOptionTab import QStyleOptionTab
from .QStyleOptionTabBarBase import QStyleOptionTabBarBase
from .QStyleOptionTabBarBaseV2 import QStyleOptionTabBarBaseV2
from .QStyleOptionTabV2 import QStyleOptionTabV2
from .QStyleOptionTabV3 import QStyleOptionTabV3
from .QStyleOptionTabWidgetFrame import QStyleOptionTabWidgetFrame
from .QStyleOptionTabWidgetFrameV2 import QStyleOptionTabWidgetFrameV2
from .QStyleOptionTitleBar import QStyleOptionTitleBar
from .QStyleOptionToolBar import QStyleOptionToolBar
from .QStyleOptionToolBox import QStyleOptionToolBox
from .QStyleOptionToolBoxV2 import QStyleOptionToolBoxV2
from .QStyleOptionToolButton import QStyleOptionToolButton
from .QStyleOptionViewItem import QStyleOptionViewItem
from .QStyleOptionViewItemV2 import QStyleOptionViewItemV2
from .QStyleOptionViewItemV3 import QStyleOptionViewItemV3
from .QStyleOptionViewItemV4 import QStyleOptionViewItemV4
from .QStylePainter import QStylePainter
from .QSwipeGesture import QSwipeGesture
from .QSyntaxHighlighter import QSyntaxHighlighter
from .QSystemTrayIcon import QSystemTrayIcon
from .QTabBar import QTabBar
from .QTabletEvent import QTabletEvent
from .QTableView import QTableView
from .QTableWidget import QTableWidget
from .QTableWidgetItem import QTableWidgetItem
from .QTableWidgetSelectionRange import QTableWidgetSelectionRange
from .QTabWidget import QTabWidget
from .QTapAndHoldGesture import QTapAndHoldGesture
from .QTapGesture import QTapGesture
from .QTextBlock import QTextBlock
from .QTextFormat import QTextFormat
from .QTextBlockFormat import QTextBlockFormat
from .QTextObject import QTextObject
from .QTextBlockGroup import QTextBlockGroup
from .QTextBlockUserData import QTextBlockUserData
from .QTextEdit import QTextEdit
from .QTextBrowser import QTextBrowser
from .QTextCharFormat import QTextCharFormat
from .QTextCursor import QTextCursor
from .QTextDocument import QTextDocument
from .QTextDocumentFragment import QTextDocumentFragment
from .QTextDocumentWriter import QTextDocumentWriter
from .QTextFragment import QTextFragment
from .QTextFrame import QTextFrame
from .QTextFrameFormat import QTextFrameFormat
from .QTextImageFormat import QTextImageFormat
from .QTextInlineObject import QTextInlineObject
from .QTextItem import QTextItem
from .QTextLayout import QTextLayout
from .QTextLength import QTextLength
from .QTextLine import QTextLine
from .QTextList import QTextList
from .QTextListFormat import QTextListFormat
from .QTextOption import QTextOption
from .QTextTable import QTextTable
from .QTextTableCell import QTextTableCell
from .QTextTableCellFormat import QTextTableCellFormat
from .QTextTableFormat import QTextTableFormat
from .QTimeEdit import QTimeEdit
from .QToolBar import QToolBar
from .QToolBox import QToolBox
from .QToolButton import QToolButton
from .QToolTip import QToolTip
from .QTouchEvent import QTouchEvent
from .QTransform import QTransform
from .QTreeView import QTreeView
from .QTreeWidget import QTreeWidget
from .QTreeWidgetItem import QTreeWidgetItem
from .QTreeWidgetItemIterator import QTreeWidgetItemIterator
from .QUndoCommand import QUndoCommand
from .QUndoGroup import QUndoGroup
from .QUndoStack import QUndoStack
from .QUndoView import QUndoView
from .QVBoxLayout import QVBoxLayout
from .QVector2D import QVector2D
from .QVector3D import QVector3D
from .QVector4D import QVector4D
from .QWhatsThis import QWhatsThis
from .QWhatsThisClickedEvent import QWhatsThisClickedEvent
from .QWheelEvent import QWheelEvent
from .QWidgetAction import QWidgetAction
from .QWidgetItem import QWidgetItem
from .QWindowStateChangeEvent import QWindowStateChangeEvent
from .QWizard import QWizard
from .QWizardPage import QWizardPage
from .QWorkspace import QWorkspace
from .QX11EmbedContainer import QX11EmbedContainer
from .QX11EmbedWidget import QX11EmbedWidget
from .QX11Info import QX11Info
# variables with complex values
qApp = None # (!) real value is ''
__loader__ = None # (!) real value is ''
__spec__ = None # (!) real value is ''
| gpl-2.0 | 7,385,498,820,512,413,000 | 40.444215 | 195 | 0.832394 | false |
mcclurmc/juju | juju/unit/tests/test_address.py | 1 | 3698 | import subprocess
import zookeeper
from twisted.internet.defer import inlineCallbacks, succeed, returnValue
from twisted.web import client
from juju.errors import JujuError
from juju.lib.testing import TestCase
from juju.unit.address import (
EC2UnitAddress, LocalUnitAddress, OrchestraUnitAddress, DummyUnitAddress,
get_unit_address)
from juju.state.environment import GlobalSettingsStateManager
class AddressTest(TestCase):
def setUp(self):
zookeeper.set_debug_level(0)
self.client = self.get_zookeeper_client()
return self.client.connect()
@inlineCallbacks
def get_address_for(self, provider_type):
settings = GlobalSettingsStateManager(self.client)
yield settings.set_provider_type(provider_type)
address = yield get_unit_address(self.client)
returnValue(address)
@inlineCallbacks
def test_get_ec2_address(self):
address = yield self.get_address_for("ec2")
self.assertTrue(isinstance(address, EC2UnitAddress))
@inlineCallbacks
def test_get_local_address(self):
address = yield self.get_address_for("local")
self.assertTrue(isinstance(address, LocalUnitAddress))
@inlineCallbacks
def test_get_orchestra_address(self):
address = yield self.get_address_for("orchestra")
self.assertTrue(isinstance(address, OrchestraUnitAddress))
@inlineCallbacks
def test_get_dummy_address(self):
address = yield self.get_address_for("dummy")
self.assertTrue(isinstance(address, DummyUnitAddress))
def test_get_unknown_address(self):
return self.assertFailure(self.get_address_for("foobar"), JujuError)
class DummyAddressTest(TestCase):
def setUp(self):
self.address = DummyUnitAddress()
def test_get_address(self):
self.assertEqual(
(yield self.address.get_public_address()),
"localhost")
self.assertEqual(
(yield self.address.get_private_address()),
"localhost")
class EC2AddressTest(TestCase):
def setUp(self):
self.address = EC2UnitAddress()
@inlineCallbacks
def test_get_address(self):
urls = [
"http://169.254.169.254/latest/meta-data/local-hostname",
"http://169.254.169.254/latest/meta-data/public-hostname"]
def verify_args(url):
self.assertEqual(urls.pop(0), url)
return succeed("foobar\n")
self.patch(client, "getPage", verify_args)
self.assertEqual(
(yield self.address.get_private_address()), "foobar")
self.assertEqual(
(yield self.address.get_public_address()), "foobar")
class LocalAddressTest(TestCase):
def setUp(self):
self.address = LocalUnitAddress()
@inlineCallbacks
def test_get_address(self):
self.patch(
subprocess, "check_output",
lambda args: "192.168.1.122 127.0.0.1\n")
self.assertEqual(
(yield self.address.get_public_address()),
"192.168.1.122")
self.assertEqual(
(yield self.address.get_private_address()),
"192.168.1.122")
class OrchestraAddressTest(TestCase):
def setUp(self):
self.address = OrchestraUnitAddress()
@inlineCallbacks
def test_get_address(self):
self.patch(
subprocess, "check_output",
lambda args: "slice.foobar.domain.net\n")
self.assertEqual(
(yield self.address.get_public_address()),
"slice.foobar.domain.net")
self.assertEqual(
(yield self.address.get_private_address()),
"slice.foobar.domain.net")
| agpl-3.0 | 940,252,071,544,997,400 | 28.349206 | 77 | 0.647918 | false |
weka511/fractals | rn/ca.py | 1 | 1555 | # Copyright (C) 2019 Greenweaves Software Limited
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>.
def create_rule(N):
n = N
rule = []
while len(rule)<8:
n,r = divmod(n,2)
rule.append(r)
return rule
def execute_rule(state,rule):
extended_state = [0,0] + state + [0,0]
new_state = []
for i in range(len(state)+2):
x = extended_state[i:i+3]
input = 2*(2*x[0] + x[1]) + x[2]
new_state.append(rule[input])
return new_state
def convert(state):
result = 0
for i in state:
result = 2 * result + i
return result
def project(state,table=[0,1,1,0]):
return [table[2*state[i]+state[i+1]] for i in range(0,len(state),2)]
if __name__=='__main__':
print (create_rule(105))
#print (create_rule(110))
#print (create_rule(137))
#r = create_rule(28)
#state = [1]
#for i in range(25):
#state = execute_rule(state,r)
#print (convert(state)) | gpl-3.0 | -711,681,062,969,949,700 | 30.12 | 72 | 0.633441 | false |
inspirehep/invenio-grobid | invenio_grobid/version.py | 1 | 1125 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Version information for Invenio Grobid.
This file is imported by ``invenio_grobid.__init__``,
and parsed by ``setup.py``.
"""
__version__ = "0.2.0"
| gpl-2.0 | -1,133,102,696,943,651,800 | 35.290323 | 76 | 0.740444 | false |
satra/NiPypeold | nipype/version.py | 1 | 1032 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
version = '0.3.1'
release = True
# Return the svn version as a string, raise a ValueError otherwise
# This code was copied from numpy trunk, revision 6873, and modified slightly
def svn_version():
# Placed imports here (instead of top of module) so they're not
# imported in released code
import re
import subprocess
try:
out = subprocess.Popen(['svn', 'info'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# We only care about stdout
out = out.communicate()[0]
except OSError:
return ""
# Searh for the 'Revision' tag
r = re.compile('Revision: ([0-9]+)')
svnver = ""
for line in out.split('\n'):
m = r.match(line)
if m:
svnver = m.group(1)
return svnver
if not release:
version += '.dev'
svnver = svn_version()
version += svnver
| bsd-3-clause | -6,007,205,910,471,568,000 | 27.666667 | 77 | 0.578488 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.