code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
"""This namespace aggregates modules related to wavelength calibration.
"""
from .wavelength_calibration import WavelengthCalibrator
| rcbrgs/tuna | tuna/tools/wavelength/__init__.py | Python | gpl-3.0 | 134 |
#!/usr/bin/env python3
from setuptools import setup
setup(
name="svgplease",
version="0.2",
url="https://github.com/sapal/svgplease",
download_url="https://launchpad.net/~sapalskimichal/+archive/svgplease",
license="GPLv3",
author="Michał Sapalski",
author_email="[email protected]",
data_files=[
("", ["__main__.py"]),
("doc", ["../doc/grammar.txt", "../doc/change_detection_algorithm.txt"])
],
description="Command line tool for manipulating svg files.",
long_description=("svgplease is a command line tool for manipulating svg images. " +
"The commands can be specified in simple language very similar to standard " +
"English syntax."),
zip_safe=True,
classifiers=[
"Topic :: Multimedia :: Graphics",
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Environment :: Console",
"Development Status :: 2 - Pre-Alpha",
],
platforms="any",
packages=["svgplease", "modgrammar"],
test_suite="tests.all_tests",
include_package_data=False,
install_requires=[]
)
| sapal/svgplease | code/setup.py | Python | gpl-3.0 | 1,246 |
def feed():
import feedparser
d = feedparser.parse('http://www.reddit.com/r/python/.rss')
entries = d.entries
return dict(message=sif, source=source, title1=title1, entries=entries)
| seefor/mycode | feed.py | Python | apache-2.0 | 198 |
import os
import sys
import sqlite3
import logging
from tqdm import tqdm
from pathlib import Path
from whoosh.index import create_in, open_dir
from whoosh.fields import Schema, TEXT, NUMERIC
from whoosh.qparser import QueryParser
from whoosh.spelling import ListCorrector
from whoosh.highlight import UppercaseFormatter
logging.basicConfig(level=logging.INFO)
if getattr(sys, 'frozen', False):
APPLICATION_PATH = os.path.dirname(sys.executable)
elif __file__:
APPLICATION_PATH = os.path.dirname(__file__)
PATH = APPLICATION_PATH
PATH_DATA = Path(PATH) / 'data'
FILE_DB = PATH_DATA / "data.db"
class Searcher:
def __init__(self):
self.scope = 20
self.terms = set()
self.index_path = "index"
self.common_terms = set()
self.schema = Schema(
title=TEXT(stored=True),
path=TEXT(stored=True),
page=NUMERIC(stored=True),
content=TEXT(stored=True))
self.ix = None
self.index_files = False
if not os.path.exists(self.index_path):
os.mkdir(self.index_path)
self.ix = create_in(self.index_path, self.schema)
self.index_files = True
else:
self.ix = open_dir(self.index_path)
self.writer = self.ix.writer()
self.read()
self.writer.commit()
self.searcher = self.ix.searcher()
self.corrector = ListCorrector(sorted(list(self.common_terms)))
self.parser = QueryParser("content", self.ix.schema)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.searcher.close()
def search(self, term):
results = []
suggestions = [term]+(self.corrector.suggest(term, limit=5))
for t in suggestions:
query = self.parser.parse(t)
query_res = self.searcher.search(query, limit=100)
query_res.fragmenter.maxchars = 300
query_res.fragmenter.surround = 100
query_res.formatter = UppercaseFormatter()
results.append((t, query_res))
return results
def read(self):
logging.info("Indexing")
con = sqlite3.connect(str(FILE_DB))
cur = con.cursor()
cur.execute(r"SELECT BOOKS.NAME, PAGE, CONTENT "
r"FROM TEXT, BOOKS "
r"WHERE BOOK = BOOKS.ID "
r"ORDER BY BOOKS.NAME, PAGE")
for row in tqdm(cur):
book, page, content = row
book, page, content = str(book), str(page), str(content)
for i in content.split(' '):
self.common_terms.add(i)
if self.index_files:
self.writer.add_document(title=book, content=content, path=book, page=page)
| DeastinY/srpdfcrawler | pdf_search.py | Python | gpl-3.0 | 2,775 |
import re
import requests
from collections import deque
from bs4 import BeautifulSoup
from . import db
from .models import Instructor, Department, Course, Term, Rating
def run():
"""A breadth-first search on the graph of Ninja Courses data.
We view each department, course, and instructor as a node,
to be processed in a function determined by PROCESSORMAP. This reads
all the departments, then all the courses, and finally all the
instructors as well as their ratings."""
# there's no way to verify the identity of ratings, since we don't have
# access to their IDs, so we are forced to delete them all before scraping
for rating in Rating.query.all():
db.session.delete(rating)
queue = deque()
queue.append(('root', {"url": 'http://ninjacourses.com/explore/1/'}))
tracked = set()
tracked.add('http://ninjacourses.com/explore/1/')
while len(queue) > 0:
pair = queue.popleft()
for child in processormap[pair[0]](pair[1]):
childurl = child[1]['url']
if childurl not in tracked:
tracked.add(childurl)
queue.append(child)
db.session.commit()
def scrape_root(data, limit=None):
"""Reads and stores all departments, then returns a list of
department URL pairs."""
soup = BeautifulSoup(requests.get(data['url']).text)
depts = []
for deptlist in soup.find_all(id=re.compile("^deptlist")):
if limit and len(depts) > limit:
break
for deptentry in deptlist.find_all('li'):
if limit and len(depts) > limit:
break
parts = deptentry.a.string.split(" (")
dept = Department.query.filter_by(name=parts[0]).first()
if dept is None:
dept = Department(name=parts[0], code=parts[1][:-1])
db.session.add(dept)
deptdata = { 'department': dept,
'url': 'http://ninjacourses.com' + deptentry.a['href'] }
depts.append(("dept", deptdata))
return depts
def scrape_dept(data):
"""Return a list of ("course", data) pairs for each course in the
department specified by DATA['url']."""
print data['url']
deptsoup = BeautifulSoup(requests.get(data['url']).text)
courselist = deptsoup.find(id="dept-course-list")
courses = []
if courselist is not None:
for courseentry in courselist.find_all('li'):
for content in courseentry.contents:
try:
if content.startswith(' - '):
name = content[3:]
number = courseentry.a['href'].split("/")[-2]
coursedata = { 'department': data['department'],
'number': number,
'name': name,
'url': 'http://ninjacourses.com' \
+ courseentry.a['href'] }
courses.append(("course", coursedata))
break
except TypeError:
pass
return courses
def scrape_course(data):
"""Initializes the course if it doesn't exist and returns a list of
("instructor", data) pairs."""
print " " + data['url']
instructors = []
coursesoup = BeautifulSoup(requests.get(data['url']).text)
ratingstab = coursesoup.find(id="tab-ratings")
floatdivs = ratingstab.find_all(class_="float-left")
if len(floatdivs) > 1 and floatdivs[1].find("span", class_="count"):
courseid = floatdivs[1].a['href'].split("/")[-2]
course = Course.query.get(courseid)
if course is None:
course = Course(id=courseid,
department=data['department'],
number=data['number'],
name=data['name'])
db.session.add(course)
for instructorentry in coursesoup.find_all(class_="ratings-instructor"):
if instructorentry.find(class_="rating-details-qtip"):
instructorurl = 'http://ninjacourses.com' + instructorentry.a['href']
instructors.append(("instructor", { 'url': instructorurl }))
return instructors
def scrape_instructor(data):
"""Reads any new information about the instructor and calls
scrape_instructor_page to start scraping ratings. This is where
our BFS stops, so we return the empty list to indicate that
this is a leaf node."""
print " " + data['url']
instructorsoup = BeautifulSoup(requests.get(data['url']).text)
countspan = instructorsoup.find(class_="count")
if countspan is not None:
id = int(data['url'].split("/")[-2])
instructor = Instructor.query.get(id)
if instructor is None:
name = instructorsoup.find("span", class_="item fn").string
instructor = Instructor(id=id, name=name)
# reassign ratings even if instructor exists; they may have changed
instructor.numratings = int(countspan.string)
maindiv = instructorsoup.find(class_="main-rating")
nums = maindiv.find_all(class_="rating-number")
instructor.overall = int(nums[0].string)
instructor.assignments = int(nums[1].string)
instructor.exams = int(nums[2].string)
instructor.helpfulness = int(nums[3].string)
instructor.enthusiasm = int(nums[4].string)
db.session.add(instructor)
scrape_instructor_page(instructor, instructorsoup, data['url'])
return []
def scrape_instructor_page(instructor, soup, instructorurl):
"""Scrapes all the ratings on SOUP and on any remaining pages."""
for ratingouter in soup.find_all("div", class_="recent-rating"):
scrape_rating(instructor, ratingouter.div)
pagediv = soup.find("div", class_="pagination")
if pagediv is not None:
pageanchor = pagediv.find_all("a")[-1]
if pageanchor.string.startswith("Next Page"):
nexturl = instructorurl + pageanchor['href']
nextsoup = BeautifulSoup(requests.get(nexturl).text)
scrape_instructor_page(instructor, nextsoup, instructorurl)
def scrape_rating(instructor, ratingentry):
"""Creates the rating in RATINGENTRY for INSTRUCTOR."""
courseid = int(ratingentry.contents[5]['href'].split("/")[-2])
course = Course.query.get(courseid)
termpair = ratingentry.contents[6][2:][:-1].split()
term = Term.query.filter_by(season=termpair[0],
year=int(termpair[1])).first()
if term is None:
term = Term(season=termpair[0], year=int(termpair[1]))
db.session.add(term)
commentdiv = ratingentry.find(class_="comment")
comment = "" if commentdiv is None else commentdiv.contents[1].strip()
nums = ratingentry.find_all(class_="rating-number")
rating = Rating(course=course,
term=term,
instructor=instructor,
overall=int(nums[0].string),
assignments=int(nums[1].string),
exams=int(nums[2].string),
helpfulness=int(nums[3].string),
enthusiasm=int(nums[4].string),
comment=comment)
db.session.add(rating)
processormap = {
"root": scrape_root,
"dept": scrape_dept,
"course": scrape_course,
"instructor": scrape_instructor,
}
| rskwan/ncindex | ncindex/scraper.py | Python | mit | 7,488 |
"""Tests for account creation"""
from datetime import datetime
import json
import unittest
import ddt
from mock import patch
from django.conf import settings
from django.contrib.auth.models import User, AnonymousUser
from django.core.urlresolvers import reverse
from django.test import TestCase, TransactionTestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.utils.importlib import import_module
import mock
import pytz
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.site_configuration.tests.mixins import SiteMixin
from notification_prefs import NOTIFICATION_PREF_KEY
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
import student
from student.models import UserAttribute
from student.views import REGISTRATION_AFFILIATE_ID, REGISTRATION_UTM_PARAMETERS, REGISTRATION_UTM_CREATED_AT
from django_comment_common.models import ForumsConfig
TEST_CS_URL = 'https://comments.service.test:123/'
@ddt.ddt
@override_settings(
MICROSITE_CONFIGURATION={
"microsite": {
"domain_prefix": "microsite",
"extended_profile_fields": ["extra1", "extra2"],
}
},
REGISTRATION_EXTRA_FIELDS={
key: "optional"
for key in [
"level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
}
)
class TestCreateAccount(SiteMixin, TestCase):
"""Tests for account creation"""
def setUp(self):
super(TestCreateAccount, self).setUp()
self.username = "test_user"
self.url = reverse("create_account")
self.request_factory = RequestFactory()
self.params = {
"username": self.username,
"email": "[email protected]",
"password": "testpass",
"name": "Test User",
"honor_code": "true",
"terms_of_service": "true",
}
@ddt.data("en", "eo")
def test_default_lang_pref_saved(self, lang):
with mock.patch("django.conf.settings.LANGUAGE_CODE", lang):
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=self.username)
self.assertEqual(get_user_preference(user, LANGUAGE_KEY), lang)
@ddt.data("en", "eo")
def test_header_lang_pref_saved(self, lang):
response = self.client.post(self.url, self.params, HTTP_ACCEPT_LANGUAGE=lang)
user = User.objects.get(username=self.username)
self.assertEqual(response.status_code, 200)
self.assertEqual(get_user_preference(user, LANGUAGE_KEY), lang)
def create_account_and_fetch_profile(self, host='microsite.example.com'):
"""
Create an account with self.params, assert that the response indicates
success, and return the UserProfile object for the newly created user
"""
response = self.client.post(self.url, self.params, HTTP_HOST=host)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=self.username)
return user.profile
def test_marketing_cookie(self):
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
self.assertIn(settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, self.client.cookies)
self.assertIn(settings.EDXMKTG_USER_INFO_COOKIE_NAME, self.client.cookies)
@unittest.skipUnless(
"microsite_configuration.middleware.MicrositeMiddleware" in settings.MIDDLEWARE_CLASSES,
"Microsites not implemented in this environment"
)
def test_profile_saved_no_optional_fields(self):
profile = self.create_account_and_fetch_profile()
self.assertEqual(profile.name, self.params["name"])
self.assertEqual(profile.level_of_education, "")
self.assertEqual(profile.gender, "")
self.assertEqual(profile.mailing_address, "")
self.assertEqual(profile.city, "")
self.assertEqual(profile.country, "")
self.assertEqual(profile.goals, "")
self.assertEqual(
profile.get_meta(),
{
"extra1": "",
"extra2": "",
}
)
self.assertIsNone(profile.year_of_birth)
@unittest.skipUnless(
"microsite_configuration.middleware.MicrositeMiddleware" in settings.MIDDLEWARE_CLASSES,
"Microsites not implemented in this environment"
)
@override_settings(LMS_SEGMENT_KEY="testkey")
@mock.patch('student.views.analytics.track')
@mock.patch('student.views.analytics.identify')
def test_segment_tracking(self, mock_segment_identify, _):
year = datetime.now().year
year_of_birth = year - 14
self.params.update({
"level_of_education": "a",
"gender": "o",
"mailing_address": "123 Example Rd",
"city": "Exampleton",
"country": "US",
"goals": "To test this feature",
"year_of_birth": str(year_of_birth),
"extra1": "extra_value1",
"extra2": "extra_value2",
})
expected_payload = {
'email': self.params['email'],
'username': self.params['username'],
'name': self.params['name'],
'age': 13,
'yearOfBirth': year_of_birth,
'education': 'Associate degree',
'address': self.params['mailing_address'],
'gender': 'Other/Prefer Not to Say',
'country': self.params['country'],
}
self.create_account_and_fetch_profile()
mock_segment_identify.assert_called_with(1, expected_payload)
@unittest.skipUnless(
"microsite_configuration.middleware.MicrositeMiddleware" in settings.MIDDLEWARE_CLASSES,
"Microsites not implemented in this environment"
)
def test_profile_saved_all_optional_fields(self):
self.params.update({
"level_of_education": "a",
"gender": "o",
"mailing_address": "123 Example Rd",
"city": "Exampleton",
"country": "US",
"goals": "To test this feature",
"year_of_birth": "2015",
"extra1": "extra_value1",
"extra2": "extra_value2",
})
profile = self.create_account_and_fetch_profile()
self.assertEqual(profile.level_of_education, "a")
self.assertEqual(profile.gender, "o")
self.assertEqual(profile.mailing_address, "123 Example Rd")
self.assertEqual(profile.city, "Exampleton")
self.assertEqual(profile.country, "US")
self.assertEqual(profile.goals, "To test this feature")
self.assertEqual(
profile.get_meta(),
{
"extra1": "extra_value1",
"extra2": "extra_value2",
}
)
self.assertEqual(profile.year_of_birth, 2015)
@unittest.skipUnless(
"microsite_configuration.middleware.MicrositeMiddleware" in settings.MIDDLEWARE_CLASSES,
"Microsites not implemented in this environment"
)
def test_profile_saved_empty_optional_fields(self):
self.params.update({
"level_of_education": "",
"gender": "",
"mailing_address": "",
"city": "",
"country": "",
"goals": "",
"year_of_birth": "",
"extra1": "",
"extra2": "",
})
profile = self.create_account_and_fetch_profile()
self.assertEqual(profile.level_of_education, "")
self.assertEqual(profile.gender, "")
self.assertEqual(profile.mailing_address, "")
self.assertEqual(profile.city, "")
self.assertEqual(profile.country, "")
self.assertEqual(profile.goals, "")
self.assertEqual(
profile.get_meta(),
{"extra1": "", "extra2": ""}
)
self.assertEqual(profile.year_of_birth, None)
def test_profile_year_of_birth_non_integer(self):
self.params["year_of_birth"] = "not_an_integer"
profile = self.create_account_and_fetch_profile()
self.assertIsNone(profile.year_of_birth)
def base_extauth_bypass_sending_activation_email(self, bypass_activation_email):
"""
Tests user creation without sending activation email when
doing external auth
"""
request = self.request_factory.post(self.url, self.params)
request.site = self.site
# now indicate we are doing ext_auth by setting 'ExternalAuthMap' in the session.
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
extauth = ExternalAuthMap(external_id='[email protected]',
external_email='[email protected]',
internal_password=self.params['password'],
external_domain='shib:https://idp.stanford.edu/')
request.session['ExternalAuthMap'] = extauth
request.user = AnonymousUser()
with mock.patch('edxmako.request_context.get_current_request', return_value=request):
with mock.patch('django.core.mail.send_mail') as mock_send_mail:
student.views.create_account(request)
# check that send_mail is called
if bypass_activation_email:
self.assertFalse(mock_send_mail.called)
else:
self.assertTrue(mock_send_mail.called)
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@mock.patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': True, 'AUTOMATIC_AUTH_FOR_TESTING': False})
def test_extauth_bypass_sending_activation_email_with_bypass(self):
"""
Tests user creation without sending activation email when
settings.FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH']=True and doing external auth
"""
self.base_extauth_bypass_sending_activation_email(True)
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@mock.patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': False, 'AUTOMATIC_AUTH_FOR_TESTING': False})
def test_extauth_bypass_sending_activation_email_without_bypass_1(self):
"""
Tests user creation without sending activation email when
settings.FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH']=False and doing external auth
"""
self.base_extauth_bypass_sending_activation_email(False)
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@mock.patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': False, 'AUTOMATIC_AUTH_FOR_TESTING': False, 'SKIP_EMAIL_VALIDATION': True})
def test_extauth_bypass_sending_activation_email_without_bypass_2(self):
"""
Tests user creation without sending activation email when
settings.FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH']=False and doing external auth
"""
self.base_extauth_bypass_sending_activation_email(True)
@ddt.data(True, False)
def test_discussions_email_digest_pref(self, digest_enabled):
with mock.patch.dict("student.models.settings.FEATURES", {"ENABLE_DISCUSSION_EMAIL_DIGEST": digest_enabled}):
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=self.username)
preference = get_user_preference(user, NOTIFICATION_PREF_KEY)
if digest_enabled:
self.assertIsNotNone(preference)
else:
self.assertIsNone(preference)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_affiliate_referral_attribution(self):
"""
Verify that a referral attribution is recorded if an affiliate
cookie is present upon a new user's registration.
"""
affiliate_id = 'test-partner'
self.client.cookies[settings.AFFILIATE_COOKIE_NAME] = affiliate_id
user = self.create_account_and_fetch_profile().user
self.assertEqual(UserAttribute.get_user_attribute(user, REGISTRATION_AFFILIATE_ID), affiliate_id)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_utm_referral_attribution(self):
"""
Verify that a referral attribution is recorded if an affiliate
cookie is present upon a new user's registration.
"""
utm_cookie_name = 'edx.test.utm'
with mock.patch('student.models.RegistrationCookieConfiguration.current') as config:
instance = config.return_value
instance.utm_cookie_name = utm_cookie_name
timestamp = 1475521816879
utm_cookie = {
'utm_source': 'test-source',
'utm_medium': 'test-medium',
'utm_campaign': 'test-campaign',
'utm_term': 'test-term',
'utm_content': 'test-content',
'created_at': timestamp
}
created_at = datetime.fromtimestamp(timestamp / float(1000), tz=pytz.UTC)
self.client.cookies[utm_cookie_name] = json.dumps(utm_cookie)
user = self.create_account_and_fetch_profile().user
self.assertEqual(
UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_source')),
utm_cookie.get('utm_source')
)
self.assertEqual(
UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_medium')),
utm_cookie.get('utm_medium')
)
self.assertEqual(
UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_campaign')),
utm_cookie.get('utm_campaign')
)
self.assertEqual(
UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_term')),
utm_cookie.get('utm_term')
)
self.assertEqual(
UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_content')),
utm_cookie.get('utm_content')
)
self.assertEqual(
UserAttribute.get_user_attribute(user, REGISTRATION_UTM_CREATED_AT),
str(created_at)
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_no_referral(self):
"""Verify that no referral is recorded when a cookie is not present."""
utm_cookie_name = 'edx.test.utm'
with mock.patch('student.models.RegistrationCookieConfiguration.current') as config:
instance = config.return_value
instance.utm_cookie_name = utm_cookie_name
self.assertIsNone(self.client.cookies.get(settings.AFFILIATE_COOKIE_NAME)) # pylint: disable=no-member
self.assertIsNone(self.client.cookies.get(utm_cookie_name)) # pylint: disable=no-member
user = self.create_account_and_fetch_profile().user
self.assertIsNone(UserAttribute.get_user_attribute(user, REGISTRATION_AFFILIATE_ID))
self.assertIsNone(UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_source')))
self.assertIsNone(UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_medium')))
self.assertIsNone(UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_campaign')))
self.assertIsNone(UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_term')))
self.assertIsNone(UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_content')))
self.assertIsNone(UserAttribute.get_user_attribute(user, REGISTRATION_UTM_CREATED_AT))
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_incomplete_utm_referral(self):
"""Verify that no referral is recorded when a cookie is not present."""
utm_cookie_name = 'edx.test.utm'
with mock.patch('student.models.RegistrationCookieConfiguration.current') as config:
instance = config.return_value
instance.utm_cookie_name = utm_cookie_name
utm_cookie = {
'utm_source': 'test-source',
'utm_medium': 'test-medium',
# No campaign
'utm_term': 'test-term',
'utm_content': 'test-content',
# No created at
}
self.client.cookies[utm_cookie_name] = json.dumps(utm_cookie)
user = self.create_account_and_fetch_profile().user
self.assertEqual(
UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_source')),
utm_cookie.get('utm_source')
)
self.assertEqual(
UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_medium')),
utm_cookie.get('utm_medium')
)
self.assertEqual(
UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_term')),
utm_cookie.get('utm_term')
)
self.assertEqual(
UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_content')),
utm_cookie.get('utm_content')
)
self.assertIsNone(
UserAttribute.get_user_attribute(user, REGISTRATION_UTM_PARAMETERS.get('utm_campaign'))
)
self.assertIsNone(
UserAttribute.get_user_attribute(user, REGISTRATION_UTM_CREATED_AT)
)
@patch("openedx.core.djangoapps.site_configuration.helpers.get_value", mock.Mock(return_value=False))
def test_create_account_not_allowed(self):
"""
Test case to check user creation is forbidden when ALLOW_PUBLIC_ACCOUNT_CREATION feature flag is turned off
"""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
def test_created_on_site_user_attribute_set(self):
profile = self.create_account_and_fetch_profile(host=self.site.domain)
self.assertEqual(UserAttribute.get_user_attribute(profile.user, 'created_on_site'), self.site.domain)
@ddt.ddt
class TestCreateAccountValidation(TestCase):
"""
Test validation of various parameters in the create_account view
"""
def setUp(self):
super(TestCreateAccountValidation, self).setUp()
self.url = reverse("create_account")
self.minimal_params = {
"username": "test_username",
"email": "[email protected]",
"password": "test_password",
"name": "Test Name",
"honor_code": "true",
"terms_of_service": "true",
}
def assert_success(self, params):
"""
Request account creation with the given params and assert that the
response properly indicates success
"""
response = self.client.post(self.url, params)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertTrue(response_data["success"])
def assert_error(self, params, expected_field, expected_value):
"""
Request account creation with the given params and assert that the
response properly indicates an error with the given field and value
"""
response = self.client.post(self.url, params)
self.assertEqual(response.status_code, 400)
response_data = json.loads(response.content)
self.assertFalse(response_data["success"])
self.assertEqual(response_data["field"], expected_field)
self.assertEqual(response_data["value"], expected_value)
def test_minimal_success(self):
self.assert_success(self.minimal_params)
def test_username(self):
params = dict(self.minimal_params)
def assert_username_error(expected_error):
"""
Assert that requesting account creation results in the expected
error
"""
self.assert_error(params, "username", expected_error)
# Missing
del params["username"]
assert_username_error("Username must be minimum of two characters long")
# Empty, too short
for username in ["", "a"]:
params["username"] = username
assert_username_error("Username must be minimum of two characters long")
# Too long
params["username"] = "this_username_has_31_characters"
assert_username_error("Username cannot be more than 30 characters long")
# Invalid
params["username"] = "invalid username"
assert_username_error("Usernames can only contain Roman letters, western numerals (0-9), underscores (_), and "
"hyphens (-).")
def test_email(self):
params = dict(self.minimal_params)
def assert_email_error(expected_error):
"""
Assert that requesting account creation results in the expected
error
"""
self.assert_error(params, "email", expected_error)
# Missing
del params["email"]
assert_email_error("A properly formatted e-mail is required")
# Empty, too short
for email in ["", "a"]:
params["email"] = email
assert_email_error("A properly formatted e-mail is required")
# Too long
params["email"] = '{email}@example.com'.format(
email='this_email_address_has_254_characters_in_it_so_it_is_unacceptable' * 4
)
# Assert that we get error when email has more than 254 characters.
self.assertGreater(len(params['email']), 254)
assert_email_error("Email cannot be more than 254 characters long")
# Valid Email
params["email"] = "[email protected]"
# Assert success on valid email
self.assertLess(len(params["email"]), 254)
self.assert_success(params)
# Invalid
params["email"] = "not_an_email_address"
assert_email_error("A properly formatted e-mail is required")
@override_settings(
REGISTRATION_EMAIL_PATTERNS_ALLOWED=[
r'.*@edx.org', # Naive regex omitting '^', '$' and '\.' should still work.
r'^.*@(.*\.)?example\.com$',
r'^(^\w+\.\w+)@school.tld$',
]
)
@ddt.data(
('[email protected]', False),
('[email protected]', False),
('[email protected]', True),
('[email protected]', True),
('[email protected]', True),
('[email protected]', True),
('[email protected]', False),
)
@ddt.unpack
def test_email_pattern_requirements(self, email, expect_success):
"""
Test the REGISTRATION_EMAIL_PATTERNS_ALLOWED setting, a feature which
can be used to only allow people register if their email matches a
against a whitelist of regexs.
"""
params = dict(self.minimal_params)
params["email"] = email
if expect_success:
self.assert_success(params)
else:
self.assert_error(params, "email", "Unauthorized email address.")
def test_password(self):
params = dict(self.minimal_params)
def assert_password_error(expected_error):
"""
Assert that requesting account creation results in the expected
error
"""
self.assert_error(params, "password", expected_error)
# Missing
del params["password"]
assert_password_error("A valid password is required")
# Empty, too short
for password in ["", "a"]:
params["password"] = password
assert_password_error("A valid password is required")
# Password policy is tested elsewhere
# Matching username
params["username"] = params["password"] = "test_username_and_password"
assert_password_error("Username and password fields cannot match")
def test_name(self):
params = dict(self.minimal_params)
def assert_name_error(expected_error):
"""
Assert that requesting account creation results in the expected
error
"""
self.assert_error(params, "name", expected_error)
# Missing
del params["name"]
assert_name_error("Your legal name must be a minimum of two characters long")
# Empty, too short
for name in ["", "a"]:
params["name"] = name
assert_name_error("Your legal name must be a minimum of two characters long")
def test_honor_code(self):
params = dict(self.minimal_params)
def assert_honor_code_error(expected_error):
"""
Assert that requesting account creation results in the expected
error
"""
self.assert_error(params, "honor_code", expected_error)
with override_settings(REGISTRATION_EXTRA_FIELDS={"honor_code": "required"}):
# Missing
del params["honor_code"]
assert_honor_code_error("To enroll, you must follow the honor code.")
# Empty, invalid
for honor_code in ["", "false", "not_boolean"]:
params["honor_code"] = honor_code
assert_honor_code_error("To enroll, you must follow the honor code.")
# True
params["honor_code"] = "tRUe"
self.assert_success(params)
with override_settings(REGISTRATION_EXTRA_FIELDS={"honor_code": "optional"}):
# Missing
del params["honor_code"]
# Need to change username/email because user was created above
params["username"] = "another_test_username"
params["email"] = "[email protected]"
self.assert_success(params)
def test_terms_of_service(self):
params = dict(self.minimal_params)
def assert_terms_of_service_error(expected_error):
"""
Assert that requesting account creation results in the expected
error
"""
self.assert_error(params, "terms_of_service", expected_error)
# Missing
del params["terms_of_service"]
assert_terms_of_service_error("You must accept the terms of service.")
# Empty, invalid
for terms_of_service in ["", "false", "not_boolean"]:
params["terms_of_service"] = terms_of_service
assert_terms_of_service_error("You must accept the terms of service.")
# True
params["terms_of_service"] = "tRUe"
self.assert_success(params)
@ddt.data(
("level_of_education", 1, "A level of education is required"),
("gender", 1, "Your gender is required"),
("year_of_birth", 2, "Your year of birth is required"),
("mailing_address", 2, "Your mailing address is required"),
("goals", 2, "A description of your goals is required"),
("city", 2, "A city is required"),
("country", 2, "A country is required"),
("custom_field", 2, "You are missing one or more required fields")
)
@ddt.unpack
def test_extra_fields(self, field, min_length, expected_error):
params = dict(self.minimal_params)
def assert_extra_field_error():
"""
Assert that requesting account creation results in the expected
error
"""
self.assert_error(params, field, expected_error)
with override_settings(REGISTRATION_EXTRA_FIELDS={field: "required"}):
# Missing
assert_extra_field_error()
# Empty
params[field] = ""
assert_extra_field_error()
# Too short
if min_length > 1:
params[field] = "a"
assert_extra_field_error()
@mock.patch.dict("student.models.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@mock.patch("lms.lib.comment_client.User.base_url", TEST_CS_URL)
@mock.patch("lms.lib.comment_client.utils.requests.request", return_value=mock.Mock(status_code=200, text='{}'))
class TestCreateCommentsServiceUser(TransactionTestCase):
def setUp(self):
super(TestCreateCommentsServiceUser, self).setUp()
self.username = "test_user"
self.url = reverse("create_account")
self.params = {
"username": self.username,
"email": "[email protected]",
"password": "testpass",
"name": "Test User",
"honor_code": "true",
"terms_of_service": "true",
}
config = ForumsConfig.current()
config.enabled = True
config.save()
def test_cs_user_created(self, request):
"If user account creation succeeds, we should create a comments service user"
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
self.assertTrue(request.called)
args, kwargs = request.call_args
self.assertEqual(args[0], 'put')
self.assertTrue(args[1].startswith(TEST_CS_URL))
self.assertEqual(kwargs['data']['username'], self.params['username'])
@mock.patch("student.models.Registration.register", side_effect=Exception)
def test_cs_user_not_created(self, register, request):
"If user account creation fails, we should not create a comments service user"
try:
self.client.post(self.url, self.params)
except:
pass
with self.assertRaises(User.DoesNotExist):
User.objects.get(username=self.username)
self.assertTrue(register.called)
self.assertFalse(request.called)
| romain-li/edx-platform | common/djangoapps/student/tests/test_create_account.py | Python | agpl-3.0 | 30,398 |
# fbdata.models
# PYTHON
from datetime import timedelta
# DJANGO
from django.conf import settings
from django.core.paginator import Paginator
from django.db import models
# DJANGO FACEBOOK
from django_facebook.models import FacebookProfile
# FBDATA
from .fields import IntegerListField
from .utils import (
date_to_timestamp,
empty_hours,
fb_post_type_str,
get_choice_name,
padded_date_range,
random_color,
truncate_html,
wordlist_regex,
LONG_DATE_FORMAT
)
############
# CLASSES
############
class AnonName(models.Model):
name = models.CharField(max_length=16, unique=True)
def __unicode__(self):
return self.name
def _anon_name():
return '%s %s' % tuple(AnonName.objects.all().order_by('?')[:2])
class FBId(models.Model):
user_id = models.BigIntegerField(unique=True)
user_name = models.CharField(max_length=128, null=True, blank=True)
anon_name = models.CharField(max_length=34, default=_anon_name)
fb_type = models.CharField(max_length=12, null=True, blank=True)
colour = models.CharField(max_length=6, default=random_color)
users = models.ManyToManyField(settings.AUTH_USER_MODEL,
related_name='friend_set')
name_error = models.BooleanField(default=False)
def is_participant(self):
return FacebookProfile.objects.filter(facebook_id=self.user_id).exists()
def reference_name(self, anon=True):
return self.anon_name if anon else self.user_name or unicode(self.user_id)
def reference_id(self, anon=True):
return self.pk
def has_user(self, user):
return self.users.filter(pk=user.pk).exists()
def __unicode__(self):
return '%s %s' % (self.user_name or 'Unknown', self.user_id)
def detail_data(self, anon=True):
return {'fbid': self.user_id,
'name': self.user_name,
'anon': self.anon_name,
'type': self.fb_type,
'rgb': self.colour,
'users': [ u.id for u in self.users.all()],
'participant': self.is_participant()}
class UserAnalysis(models.Model):
STATUS_ERROR = 0
STATUS_NEW = 1
STATUS_SUCCESS = 2
STATUS_UNDERTIME = -1
STATUS_CHOICES = (
(STATUS_ERROR, 'error'),
(STATUS_NEW, 'new'),
(STATUS_SUCCESS, 'success'),
(STATUS_UNDERTIME, 'under time')
)
user = models.OneToOneField(settings.AUTH_USER_MODEL)
fbuser = models.ForeignKey('FBId', null=True)
anon_data = models.BooleanField(default=True)
start_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
status = models.SmallIntegerField(choices=STATUS_CHOICES,
default=STATUS_NEW)
consent = models.BooleanField(default=False)
page_size = 14.0
def status_str(self):
return get_choice_name(self.status, self.STATUS_CHOICES)
def page_dates(self, page):
if self.start_time:
page = max(0, min(page, self.get_pages())-1)
days = int(page * self.page_size)
page_start = min(self.end_time, self.start_time + timedelta(days=days))
page_end = page_start + timedelta(days=self.page_size)
return padded_date_range(page_start, page_end)
return (None, None)
def get_pages(self):
from math import ceil
if not self.start_time or not self.end_time:
return 0
duration = self.end_time - self.start_time
return int(ceil(duration.days / self.page_size))
def end_page(self):
return self.get_pages()
def ad_topics(self):
return AdTopic.objects.filter(users=self.user).order_by('label')
def ad_topic_labels(self):
return AdTopic.objects.filter(
users=self.user).values_list(
'label', flat=True).order_by('label')
def match_ad_topics(self, input_str):
if not self.ad_regex:
self.ad_regex = wordlist_regex(self.ad_topic_labels())
topics = self.ad_regex.findall(input_str)
return topics
def paginate(self, page=None):
self.paginator = Paginator(range(1, self.get_pages()+1), 1)
page = page or self.end_page()
return self.paginator.page(page)
def recent_time_frame(self):
return self.page_dates(self.end_page())
class AdTopic(models.Model):
users = models.ManyToManyField(settings.AUTH_USER_MODEL)
label = models.CharField(max_length=128)
class StreamPost(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
post_id = models.CharField(max_length=128)
post_from = models.ForeignKey('FBId', null=True)
permalink = models.CharField(max_length=256, null=True, blank=True)
post_type = models.PositiveIntegerField(default=0)
created_time = models.DateTimeField(null=True)
updated_time = models.DateTimeField(null=True)
user_likes = models.BooleanField(default=False)
like_count = models.PositiveIntegerField(default=0)
comment_count = models.PositiveIntegerField(default=0)
share_count = models.PositiveIntegerField(default=0)
message = models.TextField(null=True, blank=True)
description = models.TextField(null=True, blank=True)
likers = models.ManyToManyField('FBId', related_name='liked_posts')
tagged = models.ManyToManyField('FBId', related_name='tagged_posts')
@classmethod
def comment_class(cls):
return PostComment
def get_comments(self):
return self.postcomment_set.filter(
created_time__gte=self.created_time).order_by('created_time')
@classmethod
def type_str(cls):
return 'post'
def fbid(self):
return self.post_id
def fb_source_id(self, anon=True):
return self.post_from.reference_id(anon)
def fb_source(self):
return self.post_from
def fb_timestamp(self):
return date_to_timestamp(self.created_time)
def detail_data(self, anon=True):
data = {'fbid': self.fbid(),
'type': 'post',
'post_type': self.post_type,
'source_id': self.post_from.reference_id(anon),
'source_name': self.post_from.reference_name(anon),
'created_time': date_to_timestamp(self.created_time),
'updated_time': date_to_timestamp(self.updated_time),
'user_likes': self.user_likes,
'like_count': self.like_count,
'comment_count': self.comment_count,
'share_count': self.share_count,
'likers': [u.reference_id(anon) for u in self.likers.all()],
'tagged': [u.reference_id(anon) for u in self.tagged.all()]}
if not anon:
data['permalink'] = self.permalink
if self.message:
data['message'] = truncate_html(self.message)
if self.description:
data['description'] = truncate_html(self.description)
return data
def needs_updated(self, updated_time):
if self.updated_time:
return self.updated_time < updated_time
else:
return True
def get_reference_hour(self):
rtime = self.updated_time or self.created_time
return rtime.hour
def time_str(self):
return self.created_time.strftime(LONG_DATE_FORMAT)
def post_type_str(self):
return fb_post_type_str(self.post_type, default='status')
def __unicode__(self):
return u'post: %s' % self.post_id
def display_info(self, anon=True):
title = None
if not anon:
if self.message:
title = truncate_html(self.message)
elif self.description:
title = self.description
if title:
return u'%s %s: %s' % (self.time_str(), self.post_type_str(), title)
else:
return u'%s %s' % (self.time_str(), self.post_type_str())
class FBPhoto(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
object_id = models.CharField(max_length=128)
album_object_id = models.CharField(max_length=128)
owner = models.ForeignKey('FBId', null=True)
link = models.CharField(max_length=256, null=True, blank=True)
src = models.CharField(max_length=256, null=True, blank=True)
created_time = models.DateTimeField(null=True)
updated_time = models.DateTimeField(null=True)
user_likes = models.BooleanField(default=False)
like_count = models.PositiveIntegerField(default=0)
comment_count = models.PositiveIntegerField(default=0)
caption = models.TextField(null=True, blank=True)
likers = models.ManyToManyField('FBId', related_name='liked_photos')
tagged = models.ManyToManyField('FBId', related_name='tagged_photos')
@classmethod
def comment_class(cls):
return PhotoComment
def get_comments(self):
return self.photocomment_set.filter(
created_time__gte=self.created_time).order_by('created_time')
@classmethod
def type_str(cls):
return 'photo'
def fbid(self):
return self.object_id
def fb_source_id(self, anon=True):
return self.owner.reference_id(anon)
def fb_source(self):
return self.owner
def fb_timestamp(self):
return date_to_timestamp(self.created_time)
def __unicode__(self):
return u'photo: %s' % self.object_id
def detail_data(self, anon=True):
data = {'fbid': self.object_id,
'type': 'photo',
'source_id': self.owner.reference_id(anon),
'source_name': self.owner.reference_name(anon),
'created_time': date_to_timestamp(self.created_time),
'updated_time': date_to_timestamp(self.updated_time),
'user_likes': self.user_likes,
'like_count': self.like_count,
'comment_count': self.comment_count,
'likers': [ u.reference_id(anon) for u in self.likers.all()],
'tagged': [ u.reference_id(anon) for u in self.tagged.all()]}
if not anon:
data['link'] = self.link
data['src'] = self.src
if self.caption:
data['caption'] = truncate_html(self.caption)
return data
def needs_updated(self, updated_time):
if self.updated_time:
return self.updated_time < updated_time
else:
return True
def get_reference_hour(self):
rtime = self.updated_time or self.created_time
return rtime.hour
def time_str(self):
return self.created_time.strftime(LONG_DATE_FORMAT)
def display_info(self, anon=True):
title = None
if not anon:
if self.caption:
title = truncate_html(self.caption)
if title:
return u'%s %s: %s' % (self.time_str(), self.type_str(), title)
else:
return u'%s %s' % (self.time_str(), self.type_str())
class FBVideo(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
video_id = models.CharField(max_length=128)
album_id = models.CharField(max_length=128)
owner = models.ForeignKey('FBId', null=True)
link = models.CharField(max_length=256, null=True, blank=True)
src = models.CharField(max_length=256, null=True, blank=True)
created_time = models.DateTimeField(null=True)
updated_time = models.DateTimeField(null=True)
user_likes = models.BooleanField(default=False)
like_count = models.PositiveIntegerField(default=0)
comment_count = models.PositiveIntegerField(default=0)
title = models.CharField(max_length=256, null=True, blank=True)
description = models.TextField(null=True, blank=True)
likers = models.ManyToManyField('FBId', related_name='liked_videos')
tagged = models.ManyToManyField('FBId', related_name='tagged_videos')
@classmethod
def comment_class(cls):
return VideoComment
def get_comments(self):
return self.videocomment_set.filter(
created_time__gte=self.created_time).order_by('created_time')
@classmethod
def type_str(cls):
return 'video'
def fbid(self):
return self.video_id
def fb_source_id(self, anon=True):
return self.owner.reference_id(anon)
def fb_source(self):
return self.owner
def fb_timestamp(self):
return date_to_timestamp(self.created_time)
def __unicode__(self):
return u'video: %s' % self.video_id
def detail_data(self, anon=True):
data = {'fbid': self.video_id,
'type': 'video',
'source_id': self.owner.reference_id(anon),
'source_name': self.owner.reference_name(anon),
'created_time': date_to_timestamp(self.created_time),
'updated_time': date_to_timestamp(self.updated_time),
'user_likes': self.user_likes,
'like_count': self.like_count,
'comment_count': self.comment_count,
'likers': [ u.reference_id(anon) for u in self.likers.all()],
'tagged': [ u.reference_id(anon) for u in self.tagged.all()]}
if not anon:
data['link'] = self.link
data['src'] = self.src
if self.title:
data['title'] = truncate_html(self.title)
if self.description:
data['description'] = truncate_html(self.description)
return data
def needs_updated(self, updated_time):
if self.updated_time:
return self.updated_time < updated_time
else:
return True
def get_reference_hour(self):
rtime = self.updated_time or self.created_time
return rtime.hour
def time_str(self):
return self.created_time.strftime(LONG_DATE_FORMAT)
def display_info(self, anon=True):
title = None
if not anon:
title = self.title
if title:
return u'%s %s: %s' % (self.time_str(), self.type_str(), title)
else:
return u'%s %s' % (self.time_str(), self.type_str())
class FBLink(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
link_id = models.CharField(max_length=128)
owner = models.ForeignKey('FBId', null=True)
via = models.ForeignKey('FBId', null=True, related_name='link_shared')
url = models.CharField(max_length=256, null=True, blank=True)
created_time = models.DateTimeField(null=True)
user_likes = models.BooleanField(default=False)
like_count = models.PositiveIntegerField(default=0)
comment_count = models.PositiveIntegerField(default=0)
share_count = models.PositiveIntegerField(default=0)
click_count = models.PositiveIntegerField(default=0)
caption = models.TextField(null=True, blank=True)
summary = models.TextField(null=True, blank=True)
title = models.TextField(null=True, blank=True)
owner_comment = models.TextField(null=True, blank=True)
likers = models.ManyToManyField('FBId', related_name='liked_links')
tagged = models.ManyToManyField('FBId', related_name='tagged_links')
@classmethod
def comment_class(cls):
return LinkComment
def get_comments(self):
return self.linkcomment_set.filter(
created_time__gte=self.created_time).order_by('created_time')
@classmethod
def type_str(cls):
return 'link'
def fbid(self):
return self.link_id
def fb_source_id(self, anon=True):
return self.owner.reference_id(anon)
def fb_source(self):
return self.owner
def fb_timestamp(self):
return date_to_timestamp(self.created_time)
def __unicode__(self):
return u'link: %s' % self.link_id
def detail_data(self, anon=True):
data = {'fbid': self.link_id,
'type': 'link',
'source_id': self.owner.reference_id(anon),
'source_name': self.owner.reference_name(anon),
'created_time': date_to_timestamp(self.created_time),
'user_likes': self.user_likes,
'like_count': self.like_count,
'comment_count': self.comment_count,
'share_count': self.share_count,
'click_count': self.click_count,
'likers': [ u.reference_id(anon) for u in self.likers.all()],
'tagged': [ u.reference_id(anon) for u in self.tagged.all()]}
if self.via:
data['via_id'] = self.via.reference_id(anon)
data['via_name'] = self.via.reference_name(anon)
if not anon:
data['url'] = self.url
if self.title:
data['title'] = truncate_html(self.title)
if self.caption:
data['caption'] = truncate_html(self.caption)
if self.summary:
data['summary'] = truncate_html(self.summary)
if self.owner_comment:
data['owner_comment'] = truncate_html(self.owner_comment)
return data
def needs_updated(self, updated_time):
return True
def get_reference_hour(self):
return self.created_time.hour
def time_str(self):
return self.created_time.strftime(LONG_DATE_FORMAT)
def display_info(self, anon=True):
title = None
if not anon:
title = self.title
if title:
return u'%s %s: %s' % (self.time_str(), self.type_str(), title)
else:
return u'%s %s' % (self.time_str(), self.type_str())
class FBStatus(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
status_id = models.CharField(max_length=128)
owner = models.ForeignKey('FBId', null=True)
message = models.TextField(null=True, blank=True)
created_time = models.DateTimeField(null=True)
user_likes = models.BooleanField(default=False)
like_count = models.PositiveIntegerField(default=0)
comment_count = models.PositiveIntegerField(default=0)
likers = models.ManyToManyField('FBId', related_name='liked_status')
tagged = models.ManyToManyField('FBId', related_name='tagged_status')
@classmethod
def comment_class(cls):
return StatusComment
def get_comments(self):
return self.statuscomment_set.filter(
created_time__gte=self.created_time).order_by('created_time')
@classmethod
def type_str(cls):
return 'status'
def fbid(self):
return self.status_id
def fb_source_id(self, anon=True):
return self.owner.reference_id(anon)
def fb_source(self):
return self.owner
def fb_timestamp(self):
return date_to_timestamp(self.created_time)
def __unicode__(self):
return u'status: %s' % self.status_id
def detail_data(self, anon=True):
data = {'fbid': self.status_id,
'type': 'status',
'source_id': self.owner.reference_id(anon),
'source_name': self.owner.reference_name(anon),
'created_time': date_to_timestamp(self.created_time),
'user_likes': self.user_likes,
'like_count': self.like_count,
'comment_count': self.comment_count,
'message': 'anonymised' if anon else truncate_html(self.message),
'likers': [ u.reference_id(anon) for u in self.likers.all()],
'tagged': [ u.reference_id(anon) for u in self.tagged.all()]}
if not anon:
if self.message:
data['message'] = truncate_html(self.message)
return data
def needs_updated(self, updated_time):
return True
def get_reference_hour(self):
return self.created_time.hour
def time_str(self):
return self.created_time.strftime(LONG_DATE_FORMAT)
def display_info(self, anon=True):
title = None
if not anon:
title = truncate_html(self.message)
if title:
return u'%s %s: %s' % (self.time_str(), self.type_str(), title)
else:
return u'%s %s' % (self.time_str(), self.type_str())
class FBAlbum(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
object_id = models.CharField(max_length=128)
owner = models.ForeignKey('FBId', null=True)
link = models.CharField(max_length=256, null=True, blank=True)
created_time = models.DateTimeField(null=True)
updated_time = models.DateTimeField(null=True)
user_likes = models.BooleanField(default=False)
like_count = models.PositiveIntegerField(default=0)
comment_count = models.PositiveIntegerField(default=0)
description = models.TextField(null=True, blank=True)
name = models.TextField(null=True, blank=True)
likers = models.ManyToManyField('FBId', related_name='liked_albums')
tagged = models.ManyToManyField('FBId', related_name='tagged_albums')
@classmethod
def comment_class(cls):
return AlbumComment
def get_comments(self):
return self.albumcomment_set.filter(
created_time__gte=self.created_time).order_by('created_time')
@classmethod
def type_str(cls):
return 'album'
def fbid(self):
return self.object_id
def fb_source_id(self, anon=True):
return self.owner.reference_id(anon)
def fb_source(self):
return self.owner
def fb_timestamp(self):
return date_to_timestamp(self.created_time)
def __unicode__(self):
return u'album: %s' % self.object_id
def detail_data(self, anon=True):
data = {'fbid': self.object_id,
'type': 'album',
'source_id': self.owner.reference_id(anon),
'source_name': self.owner.reference_name(anon),
'created_time': date_to_timestamp(self.created_time),
'user_likes': self.user_likes,
'like_count': self.like_count,
'comment_count': self.comment_count,
'likers': [u.reference_id(anon) for u in self.likers.all()],
'tagged': [u.reference_id(anon) for u in self.tagged.all()]}
if self.updated_time:
data['updated_time'] = date_to_timestamp(self.updated_time)
if not anon:
if self.link:
data['link'] = self.link
if self.description:
data['description'] = truncate_html(self.description)
if self.name:
data['name'] = truncate_html(self.name)
return data
def needs_updated(self, updated_time):
if self.updated_time:
return self.updated_time < updated_time
else:
return True
def get_reference_hour(self):
rtime = self.updated_time or self.created_time
return rtime.hour
def time_str(self):
return self.created_time.strftime(LONG_DATE_FORMAT)
def display_info(self, anon=True):
title = None
if not anon:
title = self.name
if title:
return u'%s %s: %s' % (self.time_str(), self.type_str(), title)
else:
return u'%s %s' % (self.time_str(), self.type_str())
class FBEvent(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
event_id = models.CharField(max_length=128)
creator = models.ForeignKey('FBId', null=True)
name = models.TextField(null=True, blank=True)
description = models.TextField(null=True, blank=True)
start_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
updated_time = models.DateTimeField(null=True)
all_members_count = models.PositiveIntegerField(default=0)
attending_count = models.PositiveIntegerField(default=0)
declined_count = models.PositiveIntegerField(default=0)
unsure_count = models.PositiveIntegerField(default=0)
invited = models.ManyToManyField('FBId', related_name='invited_events')
@classmethod
def comment_class(cls):
return None
def get_comments(self):
return []
@classmethod
def type_str(cls):
return 'event'
def fbid(self):
return self.event_id
def fb_source_id(self, anon=True):
return self.creator.reference_id(anon)
def fb_source(self):
return self.creator
def fb_timestamp(self):
return date_to_timestamp(self.updated_time)
def __unicode__(self):
return u'photo: %s' % self.object_id
def detail_data(self, anon=True):
return {'fbid': self.event_id,
'type': 'event',
'source_id': self.creator.reference_id(anon),
'source_name': self.creator.reference_name(anon),
'name': 'anonymised' if anon else truncate_html(self.name),
'start_time': date_to_timestamp(self.start_time),
'end_time': date_to_timestamp(self.end_time),
'updated_time': date_to_timestamp(self.updated_time),
'all_members_count': self.all_members_count,
'attending_count': self.attending_count,
'declined_count': self.declined_count,
'unsure_count': self.unsure_count,
'invited': [ u.reference_id(anon) for u in self.invited.all()]}
def needs_updated(self, updated_time):
if self.updated_time:
return self.updated_time < updated_time
else:
return True
def get_reference_hour(self):
return self.start.hour
def time_str(self):
return self.created_time.strftime(LONG_DATE_FORMAT)
def display_info(self, anon=True):
title = None
if not anon:
title = self.name
if title:
return u'%s %s: %s' % (self.time_str(), self.type_str(), title)
else:
return u'%s %s' % (self.time_str(), self.type_str())
####################
# COMMENTS
####################
class Comment(models.Model):
comment_id = models.CharField(max_length=128)
created_time = models.DateTimeField(null=True)
fbuser = models.ForeignKey('FBId', null=True)
like_count = models.PositiveIntegerField(default=0)
user_likes = models.BooleanField(default=False)
likers = models.ManyToManyField('FBId', related_name='liked_comments')
message = models.TextField(null=True, blank=True)
tagged = models.ManyToManyField('FBId', related_name='tagged_comments')
def get_reference_hour(self):
return self.created_time.hour
def detail_data(self, anon=True):
data = {'fbid': self.comment_id,
'type': 'comment',
'created_time': date_to_timestamp(self.created_time),
'fromid': self.fbuser.reference_id(anon),
'like_count': self.like_count,
'user_likes': self.user_likes}
if not anon:
if self.message:
data['message'] = truncate_html(self.message)
return data
class PostComment(Comment):
source = models.ForeignKey('StreamPost', null=True)
class PhotoComment(Comment):
source = models.ForeignKey('FBPhoto', null=True)
class VideoComment(Comment):
source = models.ForeignKey('FBVideo', null=True)
class LinkComment(Comment):
source = models.ForeignKey('FBLink', null=True)
class StatusComment(Comment):
source = models.ForeignKey('FBStatus', null=True)
class AlbumComment(Comment):
source = models.ForeignKey('FBAlbum', null=True)
####################
# TAGS
####################
class PhotoTag(models.Model):
source = models.ForeignKey('FBPhoto')
subject = models.CharField(max_length=128)
created_time = models.DateTimeField(null=True)
text = models.TextField(null=True, blank=True)
class VideoTag(models.Model):
source = models.ForeignKey('FBVideo')
subject = models.CharField(max_length=128)
created_time = models.DateTimeField(null=True)
####################
# ACTIVITY
####################
class DailyStreamActivity(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
date = models.DateField()
posts = models.ManyToManyField('StreamPost')
likes = models.PositiveIntegerField(default=0)
shares = models.PositiveIntegerField(default=0)
comments = models.PositiveIntegerField(default=0)
fbusers = models.ManyToManyField('FBId')
chars = models.PositiveIntegerField(default=0)
hourly = IntegerListField(max_length=120, default=empty_hours)
| valuesandvalue/valuesandvalue | vavs_project/fbdata/models.py | Python | mit | 29,633 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import platform
from spack import *
class IntelOneapiDpl(IntelOneApiLibraryPackage):
"""Intel oneAPI DPL."""
maintainers = ['rscohn2']
homepage = 'https://github.com/oneapi-src/oneDPL'
if platform.system() == 'Linux':
version('2021.6.0',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/18372/l_oneDPL_p_2021.6.0.501_offline.sh',
sha256='0225f133a6c38b36d08635986870284a958e5286c55ca4b56a4058bd736f8f4f',
expand=False)
version('2021.5.0',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/18189/l_oneDPL_p_2021.5.0.445_offline.sh',
sha256='7d4adf300a18f779c3ab517070c61dba10e3952287d5aef37c38f739e9041a68',
expand=False)
version('2021.4.0',
url='https://registrationcenter-download.intel.com/akdlm/irc_nas/17889/l_oneDPL_p_2021.4.0.337_offline.sh',
sha256='540ef0d308c4b0f13ea10168a90edd42a56dc0883024f6f1a678b94c10b5c170',
expand=False)
@property
def component_dir(self):
return 'dpl'
@property
def headers(self):
include_path = join_path(self.component_path, 'linux', 'include')
headers = find_headers('*', include_path, recursive=True)
# Force this directory to be added to include path, even
# though no files are here because all includes are relative
# to this path
headers.directories = [include_path]
return headers
| LLNL/spack | var/spack/repos/builtin/packages/intel-oneapi-dpl/package.py | Python | lgpl-2.1 | 1,726 |
from nose.tools import * # noqa
from tests.base import (
OsfTestCase,
fake
)
from tests.factories import (
UserFactory,
ProjectFactory
)
from tests import utils
from website.files import models
from tests.test_addons import TestFile
from website.models import MetaSchema
from website.prereg.utils import get_prereg_schema
from scripts.migration.migrate_registration_extra import migrate
class TestMigrateRegistrationExtra(OsfTestCase):
def setUp(self):
super(TestMigrateRegistrationExtra, self).setUp()
self.user = UserFactory()
self.node = ProjectFactory(creator=self.user)
self.prereg_schema = get_prereg_schema()
self.file = self._get_test_file()
self.data = {
'uploader': {
'extra': {
'hasSelectedFile': True,
'nodeId': self.node._id,
'selectedFileName': 'file.txt',
'sha256': fake.sha256(),
'viewUrl': '/project/{}/files/osfstorage/5723787136b74e1a953d9612/'.format(
self.node._id
)
},
'value': 'file.txt'
},
'other': {
'value': 'foo'
},
'bad': {
'value': 'foobarbaz',
'extra': [
{
'viewUrl': '/project/{}/files/osfstorage/5723787136b74e1a953d9612/'.format(
self.node._id
),
'hasSelectedFile': True,
'selectedFileName': 'file.txt'
}
]
},
'nested': {
'value': {
'uploader': {
'extra': {
'hasSelectedFile': True,
'nodeId': self.node._id,
'selectedFileName': 'file.txt',
'sha256': fake.sha256(),
'viewUrl': '/project/{}/files/osfstorage/5723787136b74e1a953d9612/'.format(
self.node._id
)
},
'value': 'file.txt'
},
'question': {
'value': 'bar',
'extra': {}
},
'other': {
'value': 'foo',
'extra': []
}
}
}
}
def _get_test_file(self):
version = models.FileVersion(identifier='1', provider='osfstorage', metadata={'sha256': '2413fb3709b05939f04cf2e92f7d0897fc2596f9ad0b8a9ea855c7bfebaae892'})
version.save()
ret = models.FileNode(
_id='5723787136b74e1a953d9612',
name='file.txt',
node=self.node,
provider='osfstorage',
path='/test/file.txt',
materialized_path='/test/file.txt',
versions=[version]
)
ret.save()
return ret
def test_migrate_registration_extra(self):
with utils.mock_archive(
self.node,
schema=self.prereg_schema,
data=self.data,
autocomplete=True,
autoapprove=True
) as reg:
migrate()
reg.reload()
data = reg.registered_meta[self.prereg_schema._id]
assert_true(
isinstance(data['uploader']['extra'], list)
)
assert_true(
isinstance(
data['nested']['value']['uploader']['extra'],
list
)
)
assert_true(
isinstance(
data['nested']['value']['question']['extra'],
list
)
)
assert_equal(
self.data['uploader']['extra'],
data['uploader']['extra'][0]
)
assert_equal(
self.data['nested']['value']['uploader']['extra'],
data['nested']['value']['uploader']['extra'][0]
)
assert_equal(
self.data['nested']['value']['question']['value'],
data['nested']['value']['question']['value']
)
assert_equal(
self.data['nested']['value']['other'],
data['nested']['value']['other']
)
assert_equal(
self.data['other'],
data['other']
)
assert_true(
data['bad']['extra'][0].get('data', False)
)
assert_true(
isinstance(data['bad']['extra'][0]['data'], dict)
)
assert_equal(
data['bad']['extra'][0]['data']['name'], 'file.txt'
)
assert_equal(
data['bad']['extra'][0]['data']['sha256'], '2413fb3709b05939f04cf2e92f7d0897fc2596f9ad0b8a9ea855c7bfebaae892'
)
| chrisseto/osf.io | scripts/tests/test_migrate_registration_extra.py | Python | apache-2.0 | 5,204 |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 4 15:13:53 2015
@author: remi
"""
import psycopg2
def connect_to_base():
conn = psycopg2.connect(
database='vosges'
,user='postgres'
,password='postgres'
,host='172.16.3.50'
,port='5432' ) ;
cur = conn.cursor()
return conn, cur ;
def execute_querry(q,arg_list,conn,cur):
#print q % arg_list ;
cur.execute( q ,arg_list)
conn.commit()
def order_patch_by_octree(conn,cur,ipatch, tot_level,stop_level,data_dim):
import numpy as np;
import octree_ordering;
round_size = 3
#get points :
q = """SELECT
round(PC_Get((pt).point,'X'),%s)::float as x
, round(PC_Get((pt).point,'Y'),%s)::float as y
, round(PC_Get((pt).point,'Z'),%s)::float as z
--,((pt).ordinality)::int
FROM rc_explodeN_numbered(%s,-1) as pt;
""" ;
arg_list = [round_size,round_size,round_size,ipatch] ;
execute_querry(q,arg_list,conn,cur) ;
pointcloud = np.asarray(cur.fetchall())
#print pointcloud ;
the_result = [];index =[];
point_cloud_length = pointcloud.shape[0] ;
index = np.arange(1,point_cloud_length+1)
pointcloud_int = octree_ordering.center_scale_quantize(pointcloud,tot_level );
pointcloud=[];
center_point,the_result,piv = octree_ordering.preparing_tree_walking(tot_level) ;
points_to_keep = np.arange(point_cloud_length,dtype=int);
octree_ordering.recursive_octree_ordering_ptk(points_to_keep, pointcloud_int,index,center_point, 0,tot_level,stop_level, the_result,piv) ;
#the_result= np.array(the_result);
#the_result[:,0]= the_result[:,0]+1 #ppython is 0 indexed, postgres is 1 indexed , we need to convert
#return np.asarray(the_result) + 1;
q = """
WITH points AS (
SELECT
(pt).ordinality
, pt.point as opoint
FROM rc_explodeN_numbered( %s,-1) as pt
)
, points_LOD as (
SELECT unnest(%s) as ordering , unnest(%s) as level
)
, pt_p_l AS (
SELECT array_agg(n_per_lev::int oRDER BY level ASC ) as points_per_level
FROM
(SELECT level, count(*) as n_per_lev
FROM points_LOD
GROUP BY level ) as sub
)
SELECT pa.patch , pt_p_l.points_per_level
FROM pt_p_l,
(
SELECT pc_patch(points.opoint order by level ASC NULLS LAST, random()) as patch
FROM points
LEFT OUTER JOIN points_LOD AS plod on (points.ordinality = plod.ordering)
) as pa ;"""
#print the_result
arg_list = [ipatch, np.asarray(the_result)[:,0].tolist(), np.asarray(the_result)[:,1].tolist() ] ;
execute_querry(q,arg_list,conn,cur) ;
return cur.fetchall()[0] ;
def simple_order(gid, tot_level,stop_level,data_dim,conn,cur):
import psycopg2 ;
import octree_ordering;
q = """
SELECT gid, patch
FROM vosges_2011.las_vosges
WHERE gid = %s
""";
arg_list = [gid] ;
execute_querry(q,arg_list,conn,cur) ;
gid , patch = cur.fetchall()[0] ;
opatch,ppl = order_patch_by_octree(conn,cur, patch, tot_level,stop_level,data_dim)
q = """
UPDATE vosges_2011.las_vosges SET (patch, points_per_level) = (%s,%s)
WHERE gid = %s;
"""
arg_list = [opatch,ppl,gid] ;
execute_querry(q,arg_list,conn,cur) ;
return gid ;
def test_order_by_octree():
import psycopg2 ;
import datetime;
import octree_ordering;
print 'starting : %s ' % (datetime.datetime.now());
conn,cur = connect_to_base();
set_search_path(conn,cur);
q = """DROP TABLE IF EXISTS public.test_order_out_of_db ;
CREATE TABLE public.test_order_out_of_db(
gid int,
patch pcpatch(6),
points_per_level int[]
) ;
""" ;
execute_querry(q,[],conn,cur) ;
for i in range(1, 1000):
if i %10 == 0:
print '\t\t starting loop %s : %s ' % (i,datetime.datetime.now());
q = """
SELECT gid, patch
FROM benchmark_cassette_2013.riegl_pcpatch_space
WHERE gid = %s
""";
arg_list = [i] ;
execute_querry(q,arg_list,conn,cur) ;
gid , patch = cur.fetchall()[0] ;
#print gid, patch
opatch,ppl = order_patch_by_octree(conn,cur, patch, tot_level,stop_level,data_dim)
q = """
INSERT INTO public.test_order_out_of_db VALUES (%s,%s,%s);
"""
arg_list = [gid,opatch,ppl] ;
execute_querry(q,arg_list,conn,cur) ;
#print result conn,cur = connect_to_base();
set_search_path(conn,cur);
q = """DROP TABLE IF EXISTS public.test_order_out_of_db ;
CREATE TABLE public.test_order_out_of_db(
gid int,
patch pcpatch(6),
points_per_level int[]
) ;
""" ;
execute_querry(q,[],conn,cur) ;
cur.close()
conn.close()
print '\t ending : %s ' % (datetime.datetime.now());
#test_order_by_octree()
def batch_LOD_multiprocess(processes,split_number,key_tot_start,key_tot_end,the_step):
"""Main function, execute a given query in parallel on server"""
import multiprocessing as mp;
import random;
#splitting the start_end into split_number interval
subintervals = split_interval_into_smaller_interval(split_number,key_tot_start,key_tot_end,the_step);
#shuffle so that subintervals are in random order
random.shuffle(subintervals);
#batch_LOD_monoprocess([1,100,1]);
#return
#multiprocessing:
pool = mp.Pool(processes);
results = pool.map(batch_LOD_monoprocess, subintervals)
return results
def split_interval_into_smaller_interval(split_number,key_tot_start,key_tot_end, the_step):
""" simply takes a big interval and split it into small pieces. Warning, possible overlaps of 1 elements at the beginning/end"""
import numpy as np
import math
key_range = abs(key_tot_end-key_tot_start)/(split_number *1.0)
interval_to_process = [] ;
for i,(proc) in enumerate(np.arange(1,split_number+1)):
key_local_start = int(math.ceil(key_tot_start+i * key_range)) ;
key_local_end = int(math.trunc(key_tot_start+(i+1) * key_range));
interval_to_process.append([ key_local_start , key_local_end, the_step])
#batch_LOD_monoprocess(key_min,key_max,key_step):
#print interval_to_process
return interval_to_process
def batch_LOD_monoprocess((key_min,key_max,key_step)):
""" this function connect to databse, and execute the querry on the specified gid range, step by step"""
import multiprocessing;
tot_level = 8;
stop_level = 6 ;
data_dim = 3 ;
#connect to db
conn,cur = connect_to_base();
#setting the search path
cur.execute("""SET search_path to vosges , public;""");
conn.commit();
i = key_min;
while i <= key_max :
simple_order(i, tot_level,stop_level,data_dim,conn,cur) ;
i+=key_step ;
#if i %int(key_max/10.0-key_min/10.0) == 0 :
# adv = round((1.0*i-key_min)/(key_max*1.0-key_min*1.0),2);
# print '\t %s: %s %s ' % (str(multiprocessing.current_process().name),' '*int(10*adv),str(adv))
print '\t %s' % str(multiprocessing.current_process().name) ;
cur.close()
conn.close()
def batch_LOD_multiprocess_test():
""" test of the main function, parameters adapted to IGN big server"""
import datetime ;
time_start = datetime.datetime.now();
print 'starting : %s ' % (time_start);
key_tot_start=8736
key_tot_end= 590264 #6554548
key_step = 1 ;
processes = 15 ;
split_number = processes*100;
# creating a table to hold results
batch_LOD_multiprocess(processes,split_number,key_tot_start,key_tot_end,key_step);
time_end = datetime.datetime.now();
print 'ending : %s ' % (time_end);
print 'duration : %s ' % (time_end-time_start)
#batch_LOD_multiprocess_test(); | Remi-C/LOD_ordering_for_patches_of_points | script/octree_ordering_out_of_server.py | Python | lgpl-3.0 | 7,993 |
from datetime import datetime
from django.db import models
from LabtrackerCore.models import Item,InventoryType,Group
from django.contrib.auth.models import User
from django.test import TestCase
from django import dispatch
class ResolveState(models.Model):
"""
Issues are resolved into a resolve state such as this
"""
rs_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
description = models.CharField(max_length=400)
def __unicode__(self):
return self.name
class ProblemType(models.Model):
"""
Issues have mulitple probme types associated to them
"""
pb_id = models.AutoField(primary_key=True)
inv = models.ManyToManyField(InventoryType, blank=True, null=True)
name = models.CharField(max_length=60, unique=True)
description = models.CharField(max_length=400)
def __unicode__(self):
return self.name
class Issue(models.Model):
"""
Issues can be related to specific items or the whole InventoryType.
Therefore, item_id is null=True
"""
issue_id = models.AutoField(primary_key=True, editable=False)
it = models.ForeignKey(InventoryType, verbose_name="Inventory Type", blank=True,
null=True)
item = models.ForeignKey(Item, verbose_name="Item Name")#, null=True, blank=True, verbose_name="Item Name")
group = models.ForeignKey(Group,null=True, blank=True)
reporter = models.ForeignKey(User, related_name='reporter')
assignee = models.ForeignKey(User, related_name='assignee', null=True,
blank=True)
cc = models.ManyToManyField(User, related_name="cc_user", null=True,
blank=True, verbose_name="CC", db_table="IssueTracker_email_cc")
problem_type = models.ManyToManyField(ProblemType, null=True, blank=True,
help_text="Select one or more problems.", verbose_name="problems")
post_time = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(null=True, blank=True, default=datetime.now)
resolve_time = models.DateTimeField(null=True, blank=True)
resolved_state = models.ForeignKey(ResolveState, null=True, blank=True)
title = models.CharField(max_length=200, verbose_name="Summary")
description = models.TextField()
steps = models.TextField(blank=True)
attempts = models.TextField(blank=True)
other_tickets = models.IntegerField(null=True, blank=True)
def save(self, *args, **kwargs):
# check to see if the resolved_state has changed, if so, then change
# resolve_time
self.last_modified = datetime.now() # slight hack (if a hack at all) to get last_modified to update correctly.
if self.pk:
old = Issue.objects.get(pk=self.pk)
if old.resolved_state != self.resolved_state :
self.resolve_time = datetime.now()
super(Issue, self).save(*args, **kwargs)
def __unicode__(self):
return "%d - %s" % (self.issue_id, self.title)
@models.permalink
def get_absolute_url(self):
return ('IssueTracker.views.viewIssue', [str(self.issue_id)])
class Meta:
permissions = (("can_modify_other", "Can modify anybody's posts"),
("can_delete_other", "Can delete anybody's posts"),
("can_view", "Can view issues"),)
class IssueHistory(models.Model):
"""
Basically, keeps track of some of the changes made to a model outside of
the IssueComment area
"""
ih_id = models.AutoField(primary_key=True)
user = models.ForeignKey(User)
issue = models.ForeignKey(Issue)
time = models.DateTimeField(auto_now=True)
message = models.CharField(max_length=300)
class IssueComment(models.Model):
"""
This is for the comments people add to each issues
"""
ip_id = models.AutoField(primary_key=True)
issue = models.ForeignKey(Issue)
user = models.ForeignKey(User)
time = models.DateTimeField(auto_now_add=True)
comment = models.TextField(blank=True)
"""
Test Cases
"""
class NullForeignKeyTest(TestCase):
"""
When sorting by a nullable foreign key, rows with a null value would disappear
"""
fixtures = ['dev']
def testNullKey(self):
issues = Issue.objects.filter(resolved_state__isnull=True).order_by('-assignee')
self.assertEquals(3, issues.count())
| abztrakt/labtracker | IssueTracker/models.py | Python | apache-2.0 | 4,368 |
import os
import urllib2
from . import *
from .helpers.mail import *
RESET_PW = '13b8f94f-bcae-4ec6-b752-70d6cb59f932'
SG = sendgrid.SendGridAPIClient(apikey=os.environ['SENDGRID_API_KEY'])
def _send_email(template_id, subject, dst_email, dst_name, src_email, src_name, sub_dict):
mail = Mail()
mail.set_subject(subject)
mail.set_from(Email(src_email, src_name))
p = Personalization()
p.add_to(Email(dst_email, dst_name))
for k, v in sub_dict.items():
p.add_substitution(Substitution(k, v))
mail.add_personalization(p)
mail.set_template_id(template_id)
tracking_settings = TrackingSettings()
tracking_settings.set_click_tracking(ClickTracking(enable=False, enable_text=False))
tracking_settings.set_open_tracking(OpenTracking(enable=False))
tracking_settings.set_subscription_tracking(SubscriptionTracking(enable=False))
mail.set_tracking_settings(tracking_settings)
data = mail.get()
print(data)
res = SG.client.mail.send.post(request_body=data)
return res
if __name__ == '__main__':
email = '[email protected]'
try:
_send_email(RESET_PW, 'test email', email, "Elmer", "[email protected]", "DX",{'-name-': 'Elmer', '-card-': 'Test'})
except urllib2.HTTPError as e:
print e.read()
| sbuss/voteswap | lib/sendgrid/temp.py | Python | mit | 1,286 |
from django.core.cache.backends import filebased
from django_prometheus.cache.metrics import (
django_cache_get_total,
django_cache_hits_total,
django_cache_misses_total,
)
class FileBasedCache(filebased.FileBasedCache):
"""Inherit filebased cache to add metrics about hit/miss ratio"""
def get(self, key, default=None, version=None):
django_cache_get_total.labels(backend="filebased").inc()
cached = super().get(key, default=None, version=version)
if cached is not None:
django_cache_hits_total.labels(backend="filebased").inc()
else:
django_cache_misses_total.labels(backend="filebased").inc()
return cached or default
| korfuri/django-prometheus | django_prometheus/cache/backends/filebased.py | Python | apache-2.0 | 709 |
#!/usr/bin/env python
import os, os.path
from .math import *
from .strings import *
from .rendering import *
from .daemon import Daemon
from .rest_query_parser import *
def mkdir(path):
"""
Makes the specified folder, including required folders "below"
"""
if not path:
return
bpath = os.path.split(path)[0]
mkdir(bpath)
if not os.path.exists(path):
os.mkdir(path)
def assign_type(string):
res = string
try:
res = float(string)
except ValueError:
pass
try:
res = int(string)
except ValueError:
pass
if string.lower() == "true":
res = True
elif string.lower() == "false":
res = False
elif string.lower() == "none":
res = None
return res
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(list(range(r))):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def atom_sort(atoms):
atoms = np.array(atoms)
x = [a.x for a in atoms]
y = [a.y for a in atoms]
z = [a.z for a in atoms]
ox = [a.ox for a in atoms]
elt = [a.element_id for a in atoms]
comps = []
for l in [x, y, z, ox, elt]:
if any(l):
comps.append(l)
if len(comps) == 0:
return atoms
return atoms[np.lexsort(comps)].tolist()
def get_docstring(model):
names = model._meta.get_all_field_names()
longest = max(list(map(len, names)))
print("+-" + "-" * longest + "-+")
for n in names:
print("| " + n.ljust(longest) + " |")
print("+-" + "-" * longest + "-+")
def get_field_list(model):
print(", ".join(model._meta.get_all_field_names()))
| wolverton-research-group/qmpy | qmpy/utils/__init__.py | Python | mit | 2,029 |
#!/usr/bin/env python
import jinja2
import mimetypes
import numpy as np
import csv
import os
import smtplib
import sys
import getpass
import email
import email.utils
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
def is_poster(entry):
if entry == 'reject':
raise Exception('A rejected abstract is being recorded')
elif entry == 'poster':
return True
elif entry == 'talk':
return False
if len(sys.argv) < 3:
print('Usage: ./send-decisions.py decisions.csv decision.txt.in')
with open(sys.argv[2], 'r') as fp:
email_template = fp.read()
template = jinja2.Template(email_template)
with open(sys.argv[1], 'rU') as csvfile:
filereader = csv.reader(csvfile, delimiter=',', quotechar='"')
submissions = []
for row in filereader:
if row[5] == 'reject':
continue
submission = {}
if len(row) > 7 :
raise Exception("Too many columns in row with id "+row[0])
submission['idnum'] = int(row[0])
submission['title'] = row[1]
submission['author'] = row[2]
submission['email'] = row[3]
submission['track'] = row[6]
if row[5] != 'reject':
submission['poster_talk'] = row[5]
submissions.append(submission)
track_name_map = {
'poster' : 'Poster',
'gen' : 'General',
'edu': 'Scientific Computing Education',
'geo': 'Geophysics',
'gis': 'Geospatial Data In Science',
'astro': 'Astronomy and Astrophysics',
'viz': 'Visualization',
'soc': 'Computational Social Sciences',
'bioinfo': 'Bioinformatics',
'eng': 'Engineering'
}
tue = 'Tuesday, July 8'
wed = 'Wednesday, July 9'
thu = 'Thursday, July 10'
am = '10:15 - 12:15am'
pm = '2:15 - 4:15pm'
track_time_map = {
'poster' : [wed,'in the afternoon'],
'gen1' : [tue, am],
'gen2' : [tue, pm],
'gen3' : [wed, am],
'gen4' : [thu, am],
'edu1': [tue, am],
'edu2': [tue, pm],
'edu3': [wed, am],
'edu4': [thu, am],
'gis1': [tue, am],
'gis2': [tue, pm],
'gis3': [wed, am],
'gis4': [thu, am],
'astro': [wed, pm],
'bioinfo': [wed, pm],
'geo': [wed, pm],
'viz': [thu, pm],
'soc': [thu, pm],
'eng': [thu, pm]
}
username = '[email protected]'
password = getpass.getpass('password:')
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username, password)
for submission in submissions:
day = track_time_map[submission['track']][0]
time = track_time_map[submission['track']][1]
track = track_name_map[submission['track'].strip('1').strip('2').strip('3').strip('4')]
email_body = template.render(
author = submission['author'],
abstract_title = submission['title'],
track_name = track,
poster_talk = submission['poster_talk'],
slot_day = day,
slot_time = time,
)
msg = MIMEMultipart('alternative')
msg['Subject'] = 'SciPy2014 Abstract Decision - Action Requested'
msg['From'] = 'Katy Huff <[email protected]>'
msg['To'] = submission['email']
msg['Cc'] = 'Serge Rey <[email protected]>,'
msg['Date'] = email.utils.formatdate()
msg.attach(MIMEText(email_body, 'plain'))
from_address = 'Katy Huff <[email protected]>'
to_address = ['Serge Rey <[email protected]>', 'Andy Terrel <[email protected]>']
to_address.extend([em.strip() for em in submission['email'].split(',')])
print(email_body)
server.sendmail(from_address, to_address, msg.as_string())
| cgodshall/scipy-conference | manual/mailings/program_committee/send-req-reminder.py | Python | bsd-3-clause | 3,730 |
"""
tests.test_generator
~~~~~~~~~~~~~~~~~~~~
Lua table generator (encoder)
"""
import unittest
from luatable.generator import Generator
from luatable.parser import Parser
class GeneratorTestCase(unittest.TestCase):
def test_generate(self):
# examples from Programming in Lua, 3e
input1 = {
1: {'y': 0, 'x': 0},
2: {'y': 0, 'x': -10},
3: {'y': 1, 'x': -10},
4: {'y': 1, 'x': 0},
'thickness': 2,
'npoints': 4,
'color': "blue"
}
generated1 = Generator(input1).generate()
output1 = Parser(generated1).parse()
self.assertEqual(input1, output1)
| zauonlok/luatable | tests/test_generator.py | Python | mit | 697 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(null=True, blank=True)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| pkimber/blog | blog/migrations/0001_initial.py | Python | apache-2.0 | 976 |
"""Integrate with DuckDNS."""
from asyncio import iscoroutinefunction
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_DOMAIN
from homeassistant.core import CALLBACK_TYPE, callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_call_later
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_TXT = "txt"
DOMAIN = "duckdns"
INTERVAL = timedelta(minutes=5)
SERVICE_SET_TXT = "set_txt"
UPDATE_URL = "https://www.duckdns.org/update"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_DOMAIN): cv.string,
vol.Required(CONF_ACCESS_TOKEN): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_TXT_SCHEMA = vol.Schema({vol.Required(ATTR_TXT): vol.Any(None, cv.string)})
async def async_setup(hass, config):
"""Initialize the DuckDNS component."""
domain = config[DOMAIN][CONF_DOMAIN]
token = config[DOMAIN][CONF_ACCESS_TOKEN]
session = async_get_clientsession(hass)
async def update_domain_interval(_now):
"""Update the DuckDNS entry."""
return await _update_duckdns(session, domain, token)
intervals = (
INTERVAL,
timedelta(minutes=1),
timedelta(minutes=5),
timedelta(minutes=15),
timedelta(minutes=30),
)
async_track_time_interval_backoff(hass, update_domain_interval, intervals)
async def update_domain_service(call):
"""Update the DuckDNS entry."""
await _update_duckdns(session, domain, token, txt=call.data[ATTR_TXT])
hass.services.async_register(
DOMAIN, SERVICE_SET_TXT, update_domain_service, schema=SERVICE_TXT_SCHEMA
)
return True
_SENTINEL = object()
async def _update_duckdns(session, domain, token, *, txt=_SENTINEL, clear=False):
"""Update DuckDNS."""
params = {"domains": domain, "token": token}
if txt is not _SENTINEL:
if txt is None:
# Pass in empty txt value to indicate it's clearing txt record
params["txt"] = ""
clear = True
else:
params["txt"] = txt
if clear:
params["clear"] = "true"
resp = await session.get(UPDATE_URL, params=params)
body = await resp.text()
if body != "OK":
_LOGGER.warning("Updating DuckDNS domain failed: %s", domain)
return False
return True
@callback
@bind_hass
def async_track_time_interval_backoff(hass, action, intervals) -> CALLBACK_TYPE:
"""Add a listener that fires repetitively at every timedelta interval."""
if not iscoroutinefunction:
_LOGGER.error("action needs to be a coroutine and return True/False")
return
if not isinstance(intervals, (list, tuple)):
intervals = (intervals,)
remove = None
failed = 0
async def interval_listener(now):
"""Handle elapsed intervals with backoff."""
nonlocal failed, remove
try:
failed += 1
if await action(now):
failed = 0
finally:
delay = intervals[failed] if failed < len(intervals) else intervals[-1]
remove = async_call_later(hass, delay.total_seconds(), interval_listener)
hass.async_run_job(interval_listener, dt_util.utcnow())
def remove_listener():
"""Remove interval listener."""
if remove:
remove() # pylint: disable=not-callable
return remove_listener
| sdague/home-assistant | homeassistant/components/duckdns/__init__.py | Python | apache-2.0 | 3,699 |
# -*- coding: utf-8 -*-
# This file is part of emesene.
#
# emesene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# emesene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with emesene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import xml.parsers.expat
import e3
import logging
class RichWidget(object):
'''a base widget that allows to add formatted text based on a
xhtml subset'''
def put_text(self, text, fg_color=None, bg_color=None, font=None, size=None,
bold=False, italic=False, underline=False, strike=False):
'''insert text at the current position with the style defined by the
optional parameters'''
raise NotImplementedError('Not implemented')
def put_formatted(self, text, fg_color=None, bg_color=None, font=None, size=None,
bold=False, italic=False, underline=False, strike=False):
'''insert text at the current position with the style defined inside
text'''
try:
result = e3.common.XmlParser.XmlParser(
#'<span>' + text.replace('\n', '') + '</span>').result
'<span>' + text + '</span>').result
except xml.parsers.expat.ExpatError:
logging.getLogger("gtkui.RichWidget").debug("cant parse '%s'" % \
(text, ))
return
dct = e3.common.XmlParser.DictObj(result)
self._put_formatted(dct, fg_color, bg_color, font, size,
bold, italic, underline, strike)
def _put_formatted(self, dct, fg_color=None, bg_color=None, font=None, size=None,
bold=False, italic=False, underline=False, strike=False):
'''insert text at the current position with the style defined inside
text, using the parsed structure stored on dct'''
# override the values if defined, keep the old ones if no new defined
bold = dct.tag == 'b' or dct.tag == 'strong' or bold
italic = dct.tag == 'i' or dct.tag == 'em' or italic
underline = dct.tag == 'u' or underline
strike = dct.tag == 's' or strike
if dct.tag == 'span' and dct.style:
style = e3.common.XmlParser.parse_css(dct.style)
font = style.font_family or font
try:
# TODO: handle different units?
size = int(style.font_size) or size
except ValueError:
pass
except TypeError:
pass
fg_color = style.color or fg_color
bg_color = style.background_color or bg_color
if dct.childs is None:
return
for child in dct.childs:
if isinstance(child, basestring):
self.put_text(child, fg_color, bg_color, font, size,
bold, italic, underline, strike)
elif child.tag == 'img':
self.put_image(child.src, child.alt)
elif child.tag == 'br':
self.new_line()
elif child.tag == 'a':
self.put_link(child.href)
else:
self._put_formatted(child, fg_color, bg_color, font, size,
bold, italic, underline, strike)
def put_image(self, path, tip=None):
'''insert an image at the current position
tip it's the alt text on mouse over'''
raise NotImplementedError('Not implemented')
def new_line(self):
'''put a new line on the text'''
raise NotImplementedError('Not implemented')
def put_link(self, link):
'''insert a link at the current position'''
raise NotImplementedError('Not implemented')
| tiancj/emesene | emesene/gui/gtkui/RichWidget.py | Python | gpl-3.0 | 4,161 |
from __future__ import division, print_function
from functools import (
wraps,
)
from jim import function
from jim.os_ import mkdirp
def memoized(directory='data'):
mkdirp(directory)
def _memoized(f):
import os
function_directory = os.path.join(
directory,
function.fullname(f)
)
mkdirp(function_directory)
@wraps(f)
def memoized_f(arg):
data_path = os.path.join(
function_directory,
str(hash(arg)),
)
if not(os.path.exists(data_path)):
return_value = f(arg)
with open(data_path, 'w') as f_:
f_.write(return_value)
f_.flush()
with open(data_path, 'r') as f_:
return f_.read()
return memoized_f
return _memoized
| Jim-Holmstroem/jim | jim/memoize.py | Python | gpl-2.0 | 885 |
'''
Tornado-MySQL: A pure-Python MySQL client library for Tornado.
Copyright (c) 2010, 2013-2014 PyMySQL contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
VERSION = (0, 6, 3, None)
from ._compat import text_type, JYTHON, IRONPYTHON
from .constants import FIELD_TYPE
from .converters import escape_dict, escape_sequence, escape_string
from .err import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError, MySQLError
from .times import Date, Time, Timestamp, \
DateFromTicks, TimeFromTicks, TimestampFromTicks
import sys
from tornado import gen
threadsafety = 1
apilevel = "2.0"
paramstyle = "format"
class DBAPISet(frozenset):
def __ne__(self, other):
if isinstance(other, set):
return super(DBAPISet, self).__ne__(self, other)
else:
return other not in self
def __eq__(self, other):
if isinstance(other, frozenset):
return frozenset.__eq__(self, other)
else:
return other in self
def __hash__(self):
return frozenset.__hash__(self)
STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING,
FIELD_TYPE.VAR_STRING])
BINARY = DBAPISet([FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB,
FIELD_TYPE.MEDIUM_BLOB, FIELD_TYPE.TINY_BLOB])
NUMBER = DBAPISet([FIELD_TYPE.DECIMAL, FIELD_TYPE.DOUBLE, FIELD_TYPE.FLOAT,
FIELD_TYPE.INT24, FIELD_TYPE.LONG, FIELD_TYPE.LONGLONG,
FIELD_TYPE.TINY, FIELD_TYPE.YEAR])
DATE = DBAPISet([FIELD_TYPE.DATE, FIELD_TYPE.NEWDATE])
TIME = DBAPISet([FIELD_TYPE.TIME])
TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME])
DATETIME = TIMESTAMP
ROWID = DBAPISet()
def Binary(x):
"""Return x as a binary type."""
if isinstance(x, text_type) and not (JYTHON or IRONPYTHON):
return x.encode()
return bytes(x)
@gen.coroutine
def connect(*args, **kwargs):
"""See connections.Connection.__init__() for information about defaults."""
from .connections import Connection
conn = Connection(*args, **kwargs)
yield conn.connect()
raise gen.Return(conn)
from . import connections as _orig_conn
if _orig_conn.Connection.__init__.__doc__ is not None:
connect.__doc__ = _orig_conn.Connection.__init__.__doc__ + ("""
See connections.Connection.__init__() for information about defaults.
""")
del _orig_conn
def get_client_info(): # for MySQLdb compatibility
return '.'.join(map(str, VERSION))
# we include a doctored version_info here for MySQLdb compatibility
version_info = (1,2,2,"final",0)
NULL = "NULL"
__version__ = get_client_info()
__all__ = [
'BINARY', 'Binary', 'connect', 'Connection', 'DATE', 'Date',
'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks',
'DataError', 'DatabaseError', 'Error', 'FIELD_TYPE', 'IntegrityError',
'InterfaceError', 'InternalError', 'MySQLError', 'NULL', 'NUMBER',
'NotSupportedError', 'DBAPISet', 'OperationalError', 'ProgrammingError',
'ROWID', 'STRING', 'TIME', 'TIMESTAMP', 'Warning', 'apilevel',
'connections', 'constants', 'converters', 'cursors',
'escape_dict', 'escape_sequence', 'escape_string', 'get_client_info',
'paramstyle', 'threadsafety', 'version_info',
"NULL","__version__",
]
| manyunkai/tweixin | weixin/tmysql/__init__.py | Python | gpl-2.0 | 4,381 |
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 msto <[email protected]>
#
# Distributed under terms of the MIT license.
"""
Simple venn diagrams.
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def venn4(subsets,
set_labels=('A', 'B', 'C', 'D'),
# set_colors=['#8f1402', '#0485d1', '#feb308', '#8eab12'],
set_colors=['#8eab12', '#feb308', '#8f1402', '#0485d1'],
alpha=0.4,
ax=None,
set_label_fontsize=18,
subset_label_fontsize=14,
rotate_labels=True):
"""
Plot a four-way venn diagram.
Parameters
----------
subsets : list
Values for each subset of Venn.
May be int, float, or string. Use strings for any custom formatting
[A, B, C, D, AB, AC, AD, BC, BD, CD, ABC, ABD, ACD, BCD, ABCD]
set_labels : list of str, optional
Labels around ellipses' exterior
set_colors : list, optional
Colors of Venn ellipses.
Defaults to xkcd's [pea green, amber, brick red, cerulean]
alpha : float, optional
Alpha of Venn ellipses
ax : AxesSubplot
Axis to draw Venn on
set_label_fontsize : int, optional
Fontsize of exterior set labels.
Default=18pt, optimized for 8in by 8in figure
subset_label_fontsize : int, optional
Fontsize of interior count labels
Default=14pt, optimized for 8in by 8in figure
rotate_labels : bool, optional
When true, rotate count labels to fit larger numbers.
When false, count labels are horizontal with rotation=0.
Returns
-------
ax : AxesSubplot
"""
if len(subsets) != 15:
raise Exception('Must provide exactly 15 subset values')
if ax is None:
ax = plt.gca()
width = 0.75
height = 0.5
alpha = 0.4
# Draw ellipses
ellipse_coords = [
((0.50, 0.60), -45), # A (top left)
((0.50, 0.60), 45), # B (top right)
((0.65, 0.40), 45), # C (bottom right)
((0.35, 0.40), -45), # D (bottom left)
]
for (coord, angle), color in zip(ellipse_coords, set_colors):
e = patches.Ellipse(coord, width, height, angle,
alpha=alpha, facecolor=color)
ax.add_patch(e)
# Add exterior set labels
set_label_positions = [
(0.22, 0.91, 45), # A (top left)
(0.78, 0.91, -45), # B (top right)
(0.12, 0.22, -45), # C (bottom right)
(0.88, 0.22, 45), # D (bottom left)
]
for label, (x, y, rotation) in zip(set_labels, set_label_positions):
ax.text(x, y, label, rotation=rotation,
ha='center', va='center', fontsize=set_label_fontsize)
# Add subset count labels
subsets = [str(s) for s in subsets]
subset_positions = [
(0.30, 0.83, 45), # A
(0.70, 0.83, -45), # B
(0.18, 0.30, -45), # C
(0.83, 0.30, 45), # D
(0.50, 0.77, 0), # AB
(0.22, 0.68, 55), # AC
(0.75, 0.40, 45), # AD
(0.25, 0.40, -45), # BC
(0.78, 0.68, -55), # BD
(0.50, 0.18, 0), # CD
(0.33, 0.58, 0), # ABC
(0.66, 0.58, 0), # ABD
(0.60, 0.32, 10), # ACD
(0.40, 0.32, -10), # BCD
(0.50, 0.45, 0), # ABCD
]
for label, (x, y, rotation) in zip(subsets, subset_positions):
ax.text(x, y, label, rotation=rotation,
ha='center', va='center', fontsize=subset_label_fontsize)
# Remove borders
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.axis('off')
ax.set_aspect('equal')
return ax
def venn3(subsets,
set_labels=('A', 'B', 'C'),
set_colors=['#feb308', '#8f1402', '#0485d1'],
alpha=0.4,
ax=None,
set_label_fontsize=18,
subset_label_fontsize=14):
"""
Plot a three-way venn diagram.
Parameters
----------
subsets : list
Values for each subset of Venn.
May be int, float, or string. Use strings for any custom formatting
[A, B, C, AB, AC, BC, ABC]
set_labels : list of str, optional
Labels around ellipses' exterior
set_colors : list, optional
Colors of Venn ellipses.
Defaults to xkcd's [amber, brick red, cerulean]
alpha : float, optional
Alpha of Venn ellipses
ax : AxesSubplot
Axis to draw Venn on
set_label_fontsize : int, optional
Fontsize of exterior set labels.
Default=18pt, optimized for 8in by 8in figure
subset_label_fontsize : int, optional
Fontsize of interior count labels
Default=14pt, optimized for 8in by 8in figure
Returns
-------
ax : AxesSubplot
"""
if len(subsets) != 7:
raise Exception('Must provide exactly 7 subset values')
if ax is None:
ax = plt.gca()
width = 0.6
height = 0.6
alpha = 0.4
# Draw ellipses
ellipse_coords = [
(0.50, 0.63), # A (top)
(0.63, 0.37), # B (bottom right)
(0.37, 0.37), # C (bottom left)
]
for color, coord in zip(set_colors, ellipse_coords):
e = patches.Ellipse(coord, width, height, alpha=alpha, facecolor=color)
ax.add_patch(e)
# Add exterior set labels
set_label_positions = [
(0.50, 0.97, 0), # A (top)
(0.88, 0.14, 45), # B (bottom right)
(0.12, 0.14, -45), # C (bottom left)
]
for label, (x, y, rotation) in zip(set_labels, set_label_positions):
ax.text(x, y, label, rotation=rotation,
ha='center', va='center', fontsize=set_label_fontsize)
# Add subset count labels
subsets = [str(s) for s in subsets]
subset_positions = [
(0.5, 0.77), # A
(0.77, 0.3), # B
(0.23, 0.3), # C
(0.7, 0.55), # AB
(0.3, 0.55), # AC
(0.5, 0.24), # BC
(0.5, 0.47) # ABC
]
for label, (x, y) in zip(subsets, subset_positions):
ax.text(x, y, label, rotation=0,
ha='center', va='center', fontsize=subset_label_fontsize)
# Remove borders
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.axis('off')
ax.set_aspect('equal')
return ax
def venn2(subsets,
set_labels=('A', 'B'),
set_colors=['#0485d1', '#8f1402'],
alpha=0.4,
ax=None,
set_label_fontsize=18,
subset_label_fontsize=14):
"""
Plot a two-way venn diagram.
Parameters
----------
subsets : list
Values for each subset of Venn.
May be int, float, or string. Use strings for any custom formatting
[A, B, AB]
set_labels : list of str, optional
Labels around ellipses' exterior
set_colors : list, optional
Colors of Venn ellipses.
Defaults to xkcd's [cerulean, brick red]
alpha : float, optional
Alpha of Venn ellipses
ax : AxesSubplot
Axis to draw Venn on
set_label_fontsize : int, optional
Fontsize of exterior set labels.
Default=18pt, optimized for 8in by 8in figure
subset_label_fontsize : int, optional
Fontsize of interior count labels
Default=14pt, optimized for 8in by 8in figure
Returns
-------
ax : AxesSubplot
"""
if len(subsets) != 3:
raise Exception('Must provide exactly 3 subset values')
if ax is None:
ax = plt.gca()
# Circle shape and coloring
width = 0.65
height = 0.65
alpha = 0.4
# Draw ellipses
ellipse_coords = [
(0.37, 0.5), # A (left)
(0.63, 0.5), # B (right)
]
for color, coord in zip(set_colors, ellipse_coords):
e = patches.Ellipse(coord, width, height, alpha=alpha, facecolor=color)
ax.add_patch(e)
# Add exterior set labels
set_label_positions = [
(0.18, 0.82, 30), # A (left)
(0.82, 0.82, -30), # B (right)
]
for label, (x, y, rotation) in zip(set_labels, set_label_positions):
ax.text(x, y, label, rotation=rotation,
ha='center', va='center', fontsize=set_label_fontsize)
# Add subset count labels
subsets = [str(s) for s in subsets]
subset_positions = [
(0.2, 0.5), # A
(0.8, 0.5), # B
(0.5, 0.5), # AB
]
for label, (x, y) in zip(subsets, subset_positions):
ax.text(x, y, label, rotation=0,
ha='center', va='center', fontsize=subset_label_fontsize)
# Remove borders
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.axis('off')
ax.set_aspect('equal')
return ax
| msto/svplot | svplot/venn.py | Python | mit | 8,650 |
# coding: utf-8
from __future__ import unicode_literals
from ..compat import (
compat_b64decode,
compat_urllib_parse_unquote,
compat_urlparse,
)
from ..utils import (
determine_ext,
update_url_query,
)
from .bokecc import BokeCCBaseIE
class InfoQIE(BokeCCBaseIE):
_VALID_URL = r'https?://(?:www\.)?infoq\.com/(?:[^/]+/)+(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things',
'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2',
'info_dict': {
'id': 'A-Few-of-My-Favorite-Python-Things',
'ext': 'mp4',
'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.',
'title': 'A Few of My Favorite [Python] Things',
},
}, {
'url': 'http://www.infoq.com/fr/presentations/changez-avis-sur-javascript',
'only_matching': True,
}, {
'url': 'http://www.infoq.com/cn/presentations/openstack-continued-delivery',
'md5': '4918d0cca1497f2244572caf626687ef',
'info_dict': {
'id': 'openstack-continued-delivery',
'title': 'OpenStack持续交付之路',
'ext': 'flv',
'description': 'md5:308d981fb28fa42f49f9568322c683ff',
},
}, {
'url': 'https://www.infoq.com/presentations/Simple-Made-Easy',
'md5': '0e34642d4d9ef44bf86f66f6399672db',
'info_dict': {
'id': 'Simple-Made-Easy',
'title': 'Simple Made Easy',
'ext': 'mp3',
'description': 'md5:3e0e213a8bbd074796ef89ea35ada25b',
},
'params': {
'format': 'bestaudio',
},
}]
def _extract_rtmp_video(self, webpage):
# The server URL is hardcoded
video_url = 'rtmpe://videof.infoq.com/cfx/st/'
# Extract video URL
encoded_id = self._search_regex(
r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id', default=None)
real_id = compat_urllib_parse_unquote(compat_b64decode(encoded_id).decode('utf-8'))
playpath = 'mp4:' + real_id
return [{
'format_id': 'rtmp_video',
'url': video_url,
'ext': determine_ext(playpath),
'play_path': playpath,
}]
def _extract_cf_auth(self, webpage):
policy = self._search_regex(r'InfoQConstants\.scp\s*=\s*\'([^\']+)\'', webpage, 'policy')
signature = self._search_regex(r'InfoQConstants\.scs\s*=\s*\'([^\']+)\'', webpage, 'signature')
key_pair_id = self._search_regex(r'InfoQConstants\.sck\s*=\s*\'([^\']+)\'', webpage, 'key-pair-id')
return {
'Policy': policy,
'Signature': signature,
'Key-Pair-Id': key_pair_id,
}
def _extract_http_video(self, webpage):
http_video_url = self._search_regex(r'P\.s\s*=\s*\'([^\']+)\'', webpage, 'video URL')
http_video_url = update_url_query(http_video_url, self._extract_cf_auth(webpage))
return [{
'format_id': 'http_video',
'url': http_video_url,
'http_headers': {'Referer': 'https://www.infoq.com/'},
}]
def _extract_http_audio(self, webpage, video_id):
fields = self._form_hidden_inputs('mp3Form', webpage)
http_audio_url = fields.get('filename')
if not http_audio_url:
return []
# base URL is found in the Location header in the response returned by
# GET https://www.infoq.com/mp3download.action?filename=... when logged in.
http_audio_url = compat_urlparse.urljoin('http://ress.infoq.com/downloads/mp3downloads/', http_audio_url)
http_audio_url = update_url_query(http_audio_url, self._extract_cf_auth(webpage))
# audio file seem to be missing some times even if there is a download link
# so probe URL to make sure
if not self._is_valid_url(http_audio_url, video_id):
return []
return [{
'format_id': 'http_audio',
'url': http_audio_url,
'vcodec': 'none',
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
video_description = self._html_search_meta('description', webpage, 'description')
if '/cn/' in url:
# for China videos, HTTP video URL exists but always fails with 403
formats = self._extract_bokecc_formats(webpage, video_id)
else:
formats = (
self._extract_rtmp_video(webpage)
+ self._extract_http_video(webpage)
+ self._extract_http_audio(webpage, video_id))
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'description': video_description,
'formats': formats,
}
| rg3/youtube-dl | youtube_dl/extractor/infoq.py | Python | unlicense | 5,072 |
from Operation import Operation
import HeeksCNC
class Profile(Operation):
def __init__(self):
Operation.__init__(self)
def TypeName(self):
return "Profile"
def op_icon(self):
# the name of the PNG file in the HeeksCNC icons folder
return "profile"
def get_selected_sketches(self):
return 'hi'
def Edit(self):
#if HeeksCNC.widgets == HeeksCNC.WIDGETS_WX:
#from wxProfile import Profiledlg
import wxProfile
class wxProfiledlg( wxProfile.Profiledlg ):
def __init__( self, parent ):
wxProfile.Profiledlg.__init__( self, parent )
#r1 = self.m_roll_radius.GetValue()
#print r1
def CloseWindow(self,event):
self.Destroy()
dlg = wxProfiledlg(None)
dlg.Show(True)
| AlanZheng/heekscnc | pycnc/Profile.py | Python | bsd-3-clause | 934 |
# Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from SparseArray import SparseArray
import json
import os
import gzip
def line_iterator(filename):
if filename.endswith(".gz"):
f = gzip.GzipFile(filename)
else:
try:
f = open(filename, encoding='utf8')
except TypeError:
f = open(filename)
while True:
line = f.readline()
# Test the type of the line and encode it if neccesary...
if type(line) is bytes:
try:
line = str(line, encoding='utf8')
except TypeError:
line = str(line)
# If the line is empty, we are done...
if len(line) == 0:
break
line = line.strip()
# If line is empty, jump to next...
if len(line) == 0:
continue
yield line
# Close the file...
f.close()
def json_iterator(filename):
for line in line_iterator(filename):
yield json.loads(line)
def tonparray(a):
return np.array(a.full_array())
def BER(y, yh):
u = np.unique(y)
b = 0
for cl in u:
m = y == cl
b += (~(y[m] == yh[m])).sum() / float(m.sum())
return (b / float(u.shape[0])) * 100.
def RSE(x, y):
return ((x - y)**2).sum() / ((x - x.mean())**2).sum()
params_fname = os.path.join(os.path.dirname(__file__), 'conf', 'parameter_values.json')
with open(params_fname, 'r') as fpt:
PARAMS = json.loads(fpt.read())
class Inputs(object):
def __init__(self):
self._word2id = {}
self._label2id = {}
@property
def word2id(self):
return self._word2id
@property
def label2id(self):
return self._label2id
@word2id.setter
def word2id(self, s):
self._word2id = s
@label2id.setter
def label2id(self, s):
self._label2id = s
@staticmethod
def _num_terms(a):
if 'num_terms' in a:
num_terms = a['num_terms']
else:
num_terms = len(a)
if 'klass' in a:
num_terms -= 1
return num_terms
def convert(self, x):
try:
return float(x)
except ValueError:
if x not in self.word2id:
self.word2id[x] = len(self.word2id)
return self.word2id[x]
def convert_label(self, x):
try:
x = float(x)
if np.isfinite(x):
return x
x = str(x)
except ValueError:
pass
if x not in self.label2id:
self.label2id[x] = len(self.label2id)
return self.label2id[x]
def _read_csv(self, fname):
X = []
for i in line_iterator(fname):
x = i.rstrip().lstrip()
if len(x):
X.append([i for i in x.split(',')])
return X
def read_csv(self, fname, dim):
X = []
y = []
d = self._read_csv(fname)
if dim > 1:
for x in d:
X.append([self.convert(i) for i in x[:-dim]])
y.append(x[-dim:])
X = np.array(X)
y = [SparseArray.fromlist([float(x[i]) for x in y]) for i in range(dim)]
elif dim == 1:
for x in d:
X.append([self.convert(i) for i in x[:-1]])
y.append(self.convert_label(x[-1]))
X = np.array(X)
y = np.array(y)
else:
X, y = np.array([[self.convert(i) for i in x] for x in d]), None
return X, y
def read_data_json(self, fname, iterable=None):
X = None
y = []
dependent = os.getenv('KLASS')
if dependent is None:
dependent = 'klass'
if iterable is None:
iterable = json_iterator(fname)
VEC = os.getenv('VEC', 'vec')
for row, a in enumerate(iterable):
if VEC in a:
return self.read_data_json_vec(fname)
if X is None:
X = [list() for i in range(self._num_terms(a))]
for k, v in a.items():
try:
k = int(k)
X[k].append((row, self.convert(v)))
except ValueError:
if k == dependent:
y.append(self.convert_label(v))
num_rows = row + 1
X = [SparseArray.index_data(x, num_rows) for x in X]
if len(y) == 0:
y = None
else:
y = np.array(y)
return X, y
def read_data_json_vec(self, fname, iterable=None):
X = None
y = []
dependent = os.getenv('KLASS')
if dependent is None:
dependent = 'klass'
if iterable is None:
iterable = json_iterator(fname)
VEC = os.getenv('VEC', 'vec')
for row, a in enumerate(iterable):
vec = a[VEC]
try:
vecsize = a['vecsize']
except KeyError:
vecsize = len(vec)
vec = enumerate(vec)
if X is None:
X = [list() for i in range(vecsize)]
for k, v in vec:
k = int(k)
X[k].append((row, self.convert(v)))
try:
y.append(self.convert_label(a[dependent]))
except KeyError:
pass
num_rows = row + 1
X = [SparseArray.index_data(x, num_rows) for x in X]
if len(y) == 0:
y = None
else:
y = np.array(y)
return X, y
class RandomParameterSearch(object):
def __init__(self, params=PARAMS,
npoints=1468,
training_size=5000,
seed=0):
self._training_size = training_size
self.popsize_constraint(params)
self._params = sorted(params.items())
assert len(self._params)
self._params.reverse()
self._len = None
self._npoints = npoints
self._seed = seed
self.fix_early_popsize()
def popsize_constraint(self, params):
try:
params['popsize'] = [x for x in params['popsize'] if x <= self._training_size]
except KeyError:
pass
def fix_early_popsize(self):
try:
popsize = [x for x in self._params if x[0] == 'popsize'][0]
if len(popsize[1]) == 0:
popsize[1].append(self._training_size)
except IndexError:
pass
try:
early = [x for x in self._params if x[0] == 'early_stopping_rounds'][0]
early_min = min(early[1])
if early_min > self._training_size:
early[1].append(self._training_size)
except IndexError:
pass
def __len__(self):
if self._len is None:
_ = np.array([len(x[1]) for x in self._params], dtype=np.uint)
_ = np.product(_)
assert _ >= 0
self._len = _
return self._len
def __getitem__(self, key):
res = {}
lens = [len(x[1]) for x in self._params]
for l, k_v in zip(lens, self._params):
k, v = k_v
key, residual = divmod(key, l)
res[k] = v[int(residual)]
return res
def constraints(self, k):
try:
if k['population_class'] == 'Generational' and\
k['early_stopping_rounds'] < k['popsize']:
return False
if k['early_stopping_rounds'] > self._training_size:
return False
except KeyError:
return True
return True
def __iter__(self):
np.random.seed(self._seed)
m = {}
_len = self.__len__()
npoints = self._npoints if _len > self._npoints else _len
while npoints:
k = np.round(np.random.uniform(0, _len)).astype(np.uint)
if len(m) == _len:
return
while k in m:
k = np.random.randint(_len)
m[k] = 1
p = self[k]
if self.constraints(p):
npoints -= 1
yield p
@staticmethod
def process_params(a):
from EvoDAG import EvoDAG
fs_class = {}
function_set = []
for x in EvoDAG()._function_set:
fs_class[x.__name__] = x
args = {}
for k, v in a.items():
if k in fs_class:
if not isinstance(v, bool):
fs_class[k].nargs = v
if v:
function_set.append(fs_class[k])
else:
args[k] = v
fs_evo = EvoDAG()._function_set
fs_evo = filter(lambda x: x in function_set, fs_evo)
args['function_set'] = [x for x in fs_evo]
return args
| mgraffg/RGP | EvoDAG/utils.py | Python | apache-2.0 | 9,348 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-21 02:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("planner", "0014_school_audit_notes")]
operations = [
migrations.AlterField(
model_name="milestone", name="date", field=models.DateField()
)
]
| mblayman/lcp | conductor/planner/migrations/0015_auto_20170221_0229.py | Python | bsd-2-clause | 398 |
"""Functions to plot M/EEG data on topo (one axes per channel)."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
from copy import deepcopy
from functools import partial
from itertools import cycle
import numpy as np
from ..io.pick import channel_type, pick_types
from ..utils import _clean_names, warn, _check_option, Bunch
from ..channels.layout import _merge_ch_data, _pair_grad_sensors, find_layout
from ..defaults import _handle_default
from .utils import (_check_delayed_ssp, _get_color_list, _draw_proj_checkbox,
add_background_image, plt_show, _setup_vmin_vmax,
DraggableColorbar, _setup_ax_spines,
_check_cov, _plot_masked_image)
def iter_topography(info, layout=None, on_pick=None, fig=None,
fig_facecolor='k', axis_facecolor='k',
axis_spinecolor='k', layout_scale=None, legend=False):
"""Create iterator over channel positions.
This function returns a generator that unpacks into
a series of matplotlib axis objects and data / channel
indices, both corresponding to the sensor positions
of the related layout passed or inferred from the channel info.
Hence, this enables convenient topography plot customization.
Parameters
----------
info : instance of Info
The measurement info.
layout : instance of mne.channels.Layout | None
The layout to use. If None, layout will be guessed.
on_pick : callable | None
The callback function to be invoked on clicking one
of the axes. Is supposed to instantiate the following
API: ``function(axis, channel_index)``.
fig : matplotlib.figure.Figure | None
The figure object to be considered. If None, a new
figure will be created.
fig_facecolor : color
The figure face color. Defaults to black.
axis_facecolor : color
The axis face color. Defaults to black.
axis_spinecolor : color
The axis spine color. Defaults to black. In other words,
the color of the axis' edge lines.
layout_scale : float | None
Scaling factor for adjusting the relative size of the layout
on the canvas. If None, nothing will be scaled.
legend : bool
If True, an additional axis is created in the bottom right corner
that can be used to, e.g., construct a legend. The index of this
axis will be -1.
Returns
-------
gen : generator
A generator that can be unpacked into:
ax : matplotlib.axis.Axis
The current axis of the topo plot.
ch_dx : int
The related channel index.
"""
return _iter_topography(info, layout, on_pick, fig, fig_facecolor,
axis_facecolor, axis_spinecolor, layout_scale,
legend=legend)
def _legend_axis(pos):
"""Add a legend axis to the bottom right."""
import matplotlib.pyplot as plt
left, bottom = pos[:, 0].max(), pos[:, 1].min()
wid, hei = pos[-1, 2:]
return plt.axes([left, bottom + .05, wid, hei])
def _iter_topography(info, layout, on_pick, fig, fig_facecolor='k',
axis_facecolor='k', axis_spinecolor='k',
layout_scale=None, unified=False, img=False, axes=None,
legend=False):
"""Iterate over topography.
Has the same parameters as iter_topography, plus:
unified : bool
If False (default), multiple matplotlib axes will be used.
If True, a single axis will be constructed. The former is
useful for custom plotting, the latter for speed.
"""
from matplotlib import pyplot as plt, collections
if fig is None:
fig = plt.figure()
def format_coord_unified(x, y, pos=None, ch_names=None):
"""Update status bar with channel name under cursor."""
# find candidate channels (ones that are down and left from cursor)
pdist = np.array([x, y]) - pos[:, :2]
pind = np.where((pdist >= 0).all(axis=1))[0]
if len(pind) > 0:
# find the closest channel
closest = pind[np.sum(pdist[pind, :]**2, axis=1).argmin()]
# check whether we are inside its box
in_box = (pdist[closest, :] < pos[closest, 2:]).all()
else:
in_box = False
return (('%s (click to magnify)' % ch_names[closest]) if
in_box else 'No channel here')
def format_coord_multiaxis(x, y, ch_name=None):
"""Update status bar with channel name under cursor."""
return '%s (click to magnify)' % ch_name
fig.set_facecolor(fig_facecolor)
if layout is None:
layout = find_layout(info)
if on_pick is not None:
callback = partial(_plot_topo_onpick, show_func=on_pick)
fig.canvas.mpl_connect('button_press_event', callback)
pos = layout.pos.copy()
if layout_scale:
pos[:, :2] *= layout_scale
ch_names = _clean_names(info['ch_names'])
iter_ch = [(x, y) for x, y in enumerate(layout.names) if y in ch_names]
if unified:
if axes is None:
under_ax = plt.axes([0, 0, 1, 1])
under_ax.axis('off')
else:
under_ax = axes
under_ax.format_coord = partial(format_coord_unified, pos=pos,
ch_names=layout.names)
under_ax.set(xlim=[0, 1], ylim=[0, 1])
axs = list()
for idx, name in iter_ch:
ch_idx = ch_names.index(name)
if not unified: # old, slow way
ax = plt.axes(pos[idx])
ax.patch.set_facecolor(axis_facecolor)
for spine in ax.spines.values():
spine.set_color(axis_spinecolor)
if not legend:
ax.set(xticklabels=[], yticklabels=[])
for tick in ax.get_xticklines() + ax.get_yticklines():
tick.set_visible(False)
ax._mne_ch_name = name
ax._mne_ch_idx = ch_idx
ax._mne_ax_face_color = axis_facecolor
ax.format_coord = partial(format_coord_multiaxis, ch_name=name)
yield ax, ch_idx
else:
ax = Bunch(ax=under_ax, pos=pos[idx], data_lines=list(),
_mne_ch_name=name, _mne_ch_idx=ch_idx,
_mne_ax_face_color=axis_facecolor)
axs.append(ax)
if not unified and legend:
ax = _legend_axis(pos)
yield ax, -1
if unified:
under_ax._mne_axs = axs
# Create a PolyCollection for the axis backgrounds
verts = np.transpose([pos[:, :2],
pos[:, :2] + pos[:, 2:] * [1, 0],
pos[:, :2] + pos[:, 2:],
pos[:, :2] + pos[:, 2:] * [0, 1],
], [1, 0, 2])
if not img:
under_ax.add_collection(collections.PolyCollection(
verts, facecolor=axis_facecolor, edgecolor=axis_spinecolor,
linewidth=1.)) # Not needed for image plots.
for ax in axs:
yield ax, ax._mne_ch_idx
def _plot_topo(info, times, show_func, click_func=None, layout=None,
vmin=None, vmax=None, ylim=None, colorbar=None, border='none',
axis_facecolor='k', fig_facecolor='k', cmap='RdBu_r',
layout_scale=None, title=None, x_label=None, y_label=None,
font_color='w', unified=False, img=False, axes=None):
"""Plot on sensor layout."""
import matplotlib.pyplot as plt
if layout.kind == 'custom':
layout = deepcopy(layout)
layout.pos[:, :2] -= layout.pos[:, :2].min(0)
layout.pos[:, :2] /= layout.pos[:, :2].max(0)
# prepare callbacks
tmin, tmax = times[0], times[-1]
click_func = show_func if click_func is None else click_func
on_pick = partial(click_func, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim, x_label=x_label,
y_label=y_label)
if axes is None:
fig = plt.figure()
axes = plt.axes([0.015, 0.025, 0.97, 0.95])
axes.set_facecolor(fig_facecolor)
else:
fig = axes.figure
if colorbar:
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin, vmax))
sm.set_array(np.linspace(vmin, vmax))
cb = fig.colorbar(sm, ax=axes, pad=0.025, fraction=0.075, shrink=0.5,
anchor=(-1, 0.5))
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cb_yticks, color=font_color)
axes.axis('off')
my_topo_plot = _iter_topography(info, layout=layout, on_pick=on_pick,
fig=fig, layout_scale=layout_scale,
axis_spinecolor=border,
axis_facecolor=axis_facecolor,
fig_facecolor=fig_facecolor,
unified=unified, img=img, axes=axes)
for ax, ch_idx in my_topo_plot:
if layout.kind == 'Vectorview-all' and ylim is not None:
this_type = {'mag': 0, 'grad': 1}[channel_type(info, ch_idx)]
ylim_ = [v[this_type] if _check_vlim(v) else v for v in ylim]
else:
ylim_ = ylim
show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim_)
if title is not None:
plt.figtext(0.03, 0.95, title, color=font_color, fontsize=15, va='top')
return fig
def _plot_topo_onpick(event, show_func):
"""Onpick callback that shows a single channel in a new figure."""
# make sure that the swipe gesture in OS-X doesn't open many figures
orig_ax = event.inaxes
import matplotlib.pyplot as plt
try:
if hasattr(orig_ax, '_mne_axs'): # in unified, single-axes mode
x, y = event.xdata, event.ydata
for ax in orig_ax._mne_axs:
if x >= ax.pos[0] and y >= ax.pos[1] and \
x <= ax.pos[0] + ax.pos[2] and \
y <= ax.pos[1] + ax.pos[3]:
orig_ax = ax
break
else:
# no axis found
return
elif not hasattr(orig_ax, '_mne_ch_idx'):
# neither old nor new mode
return
ch_idx = orig_ax._mne_ch_idx
face_color = orig_ax._mne_ax_face_color
fig, ax = plt.subplots(1)
plt.title(orig_ax._mne_ch_name)
ax.set_facecolor(face_color)
# allow custom function to override parameters
show_func(ax, ch_idx)
plt_show(fig=fig)
except Exception as err:
# matplotlib silently ignores exceptions in event handlers,
# so we print
# it here to know what went wrong
print(err)
raise
def _compute_ax_scalings(bn, xlim, ylim):
"""Compute scale factors for a unified plot."""
if isinstance(ylim[0], (tuple, list, np.ndarray)):
ylim = (ylim[0][0], ylim[1][0])
pos = bn.pos
bn.x_s = pos[2] / (xlim[1] - xlim[0])
bn.x_t = pos[0] - bn.x_s * xlim[0]
bn.y_s = pos[3] / (ylim[1] - ylim[0])
bn.y_t = pos[1] - bn.y_s * ylim[0]
def _check_vlim(vlim):
"""Check the vlim."""
return not np.isscalar(vlim) and vlim is not None
def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, onselect, ylim=None,
tfr=None, freq=None, x_label=None, y_label=None,
colorbar=False, cmap=('RdBu_r', True), yscale='auto',
mask=None, mask_style="both", mask_cmap="Greys",
mask_alpha=0.1, is_jointplot=False):
"""Show time-frequency map as two-dimensional image."""
from matplotlib import pyplot as plt
from matplotlib.widgets import RectangleSelector
_check_option('yscale', yscale, ['auto', 'linear', 'log'])
cmap, interactive_cmap = cmap
times = np.linspace(tmin, tmax, num=tfr[ch_idx].shape[1])
img, t_end = _plot_masked_image(
ax, tfr[ch_idx], times, mask, yvals=freq, cmap=cmap,
vmin=vmin, vmax=vmax, mask_style=mask_style, mask_alpha=mask_alpha,
mask_cmap=mask_cmap, yscale=yscale)
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if colorbar:
if isinstance(colorbar, DraggableColorbar):
cbar = colorbar.cbar # this happens with multiaxes case
else:
cbar = plt.colorbar(mappable=img, ax=ax)
if interactive_cmap:
ax.CB = DraggableColorbar(cbar, img)
ax.RS = RectangleSelector(ax, onselect=onselect) # reference must be kept
return t_end
def _imshow_tfr_unified(bn, ch_idx, tmin, tmax, vmin, vmax, onselect,
ylim=None, tfr=None, freq=None, vline=None,
x_label=None, y_label=None, colorbar=False,
picker=True, cmap='RdBu_r', title=None, hline=None):
"""Show multiple tfrs on topo using a single axes."""
_compute_ax_scalings(bn, (tmin, tmax), (freq[0], freq[-1]))
ax = bn.ax
data_lines = bn.data_lines
extent = (bn.x_t + bn.x_s * tmin, bn.x_t + bn.x_s * tmax,
bn.y_t + bn.y_s * freq[0], bn.y_t + bn.y_s * freq[-1])
data_lines.append(ax.imshow(tfr[ch_idx], clip_on=True, clip_box=bn.pos,
extent=extent, aspect="auto", origin="lower",
vmin=vmin, vmax=vmax, cmap=cmap))
def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
times, vline=None, x_label=None, y_label=None,
colorbar=False, hline=None, hvline_color='w',
labels=None):
"""Show time series on topo split across multiple axes."""
import matplotlib.pyplot as plt
from matplotlib.colors import colorConverter
picker_flag = False
for data_, color_, times_ in zip(data, color, times):
if not picker_flag:
# use large tol for picker so we can click anywhere in the axes
line = ax.plot(times_, data_[ch_idx], color=color_, picker=True)[0]
line.set_pickradius(1e9)
picker_flag = True
else:
ax.plot(times_, data_[ch_idx], color=color_)
def _format_coord(x, y, labels, ax):
"""Create status string based on cursor coordinates."""
# find indices for datasets near cursor (if any)
tdiffs = [np.abs(tvec - x).min() for tvec in times]
nearby = [k for k, tdiff in enumerate(tdiffs) if
tdiff < (tmax - tmin) / 100]
xlabel = ax.get_xlabel()
xunit = (xlabel[xlabel.find('(') + 1:xlabel.find(')')]
if '(' in xlabel and ')' in xlabel else 's')
timestr = '%6.3f %s: ' % (x, xunit)
if not nearby:
return '%s Nothing here' % timestr
labels = [''] * len(nearby) if labels is None else labels
nearby_data = [(data[n], labels[n], times[n]) for n in nearby]
ylabel = ax.get_ylabel()
yunit = (ylabel[ylabel.find('(') + 1:ylabel.find(')')]
if '(' in ylabel and ')' in ylabel else '')
# try to estimate whether to truncate condition labels
slen = 9 + len(xunit) + sum([12 + len(yunit) + len(label)
for label in labels])
bar_width = (ax.figure.get_size_inches() * ax.figure.dpi)[0] / 5.5
# show labels and y values for datasets near cursor
trunc_labels = bar_width < slen
s = timestr
for data_, label, tvec in nearby_data:
idx = np.abs(tvec - x).argmin()
s += '%7.2f %s' % (data_[ch_idx, idx], yunit)
if trunc_labels:
label = (label if len(label) <= 10 else
'%s..%s' % (label[:6], label[-2:]))
s += ' [%s] ' % label if label else ' '
return s
ax.format_coord = lambda x, y: _format_coord(x, y, labels=labels, ax=ax)
def _cursor_vline(event):
"""Draw cursor (vertical line)."""
ax = event.inaxes
if not ax:
return
if ax._cursorline is not None:
ax._cursorline.remove()
ax._cursorline = ax.axvline(event.xdata, color=ax._cursorcolor)
ax.figure.canvas.draw()
def _rm_cursor(event):
ax = event.inaxes
if ax._cursorline is not None:
ax._cursorline.remove()
ax._cursorline = None
ax.figure.canvas.draw()
ax._cursorline = None
# choose cursor color based on perceived brightness of background
try:
facecol = colorConverter.to_rgb(ax.get_facecolor())
except AttributeError: # older MPL
facecol = colorConverter.to_rgb(ax.get_axis_bgcolor())
face_brightness = np.dot(facecol, np.array([299, 587, 114]))
ax._cursorcolor = 'white' if face_brightness < 150 else 'black'
plt.connect('motion_notify_event', _cursor_vline)
plt.connect('axes_leave_event', _rm_cursor)
ymin, ymax = ax.get_ylim()
# don't pass vline or hline here (this fxn doesn't do hvline_color):
_setup_ax_spines(ax, [], tmin, tmax, ymin, ymax, hline=False)
ax.figure.set_facecolor('k' if hvline_color == 'w' else 'w')
ax.spines['bottom'].set_color(hvline_color)
ax.spines['left'].set_color(hvline_color)
ax.tick_params(axis='x', colors=hvline_color, which='both')
ax.tick_params(axis='y', colors=hvline_color, which='both')
ax.title.set_color(hvline_color)
ax.xaxis.label.set_color(hvline_color)
ax.yaxis.label.set_color(hvline_color)
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
if isinstance(y_label, list):
ax.set_ylabel(y_label[ch_idx])
else:
ax.set_ylabel(y_label)
if vline:
plt.axvline(vline, color=hvline_color, linewidth=1.0,
linestyle='--')
if hline:
plt.axhline(hline, color=hvline_color, linewidth=1.0, zorder=10)
if colorbar:
plt.colorbar()
def _plot_timeseries_unified(bn, ch_idx, tmin, tmax, vmin, vmax, ylim, data,
color, times, vline=None, x_label=None,
y_label=None, colorbar=False, hline=None,
hvline_color='w'):
"""Show multiple time series on topo using a single axes."""
import matplotlib.pyplot as plt
if not (ylim and not any(v is None for v in ylim)):
ylim = [min(np.min(d) for d in data), max(np.max(d) for d in data)]
# Translation and scale parameters to take data->under_ax normalized coords
_compute_ax_scalings(bn, (tmin, tmax), ylim)
pos = bn.pos
data_lines = bn.data_lines
ax = bn.ax
# XXX These calls could probably be made faster by using collections
for data_, color_, times_ in zip(data, color, times):
data_lines.append(ax.plot(
bn.x_t + bn.x_s * times_, bn.y_t + bn.y_s * data_[ch_idx],
linewidth=0.5, color=color_, clip_on=True, clip_box=pos)[0])
if vline:
vline = np.array(vline) * bn.x_s + bn.x_t
ax.vlines(vline, pos[1], pos[1] + pos[3], color=hvline_color,
linewidth=0.5, linestyle='--')
if hline:
hline = np.array(hline) * bn.y_s + bn.y_t
ax.hlines(hline, pos[0], pos[0] + pos[2], color=hvline_color,
linewidth=0.5)
if x_label is not None:
ax.text(pos[0] + pos[2] / 2., pos[1], x_label,
horizontalalignment='center', verticalalignment='top')
if y_label is not None:
y_label = y_label[ch_idx] if isinstance(y_label, list) else y_label
ax.text(pos[0], pos[1] + pos[3] / 2., y_label,
horizontalignment='right', verticalalignment='middle',
rotation=90)
if colorbar:
plt.colorbar()
def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None, data=None,
epochs=None, sigma=None, order=None, scalings=None,
vline=None, x_label=None, y_label=None, colorbar=False,
cmap='RdBu_r', vlim_array=None):
"""Plot erfimage on sensor topography."""
from scipy import ndimage
import matplotlib.pyplot as plt
this_data = data[:, ch_idx, :]
if vlim_array is not None:
vmin, vmax = vlim_array[ch_idx]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
img = ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax,
picker=True, cmap=cmap, interpolation='nearest')
ax = plt.gca()
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if colorbar:
plt.colorbar(mappable=img)
def _erfimage_imshow_unified(bn, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
data=None, epochs=None, sigma=None, order=None,
scalings=None, vline=None, x_label=None,
y_label=None, colorbar=False, cmap='RdBu_r',
vlim_array=None):
"""Plot erfimage topography using a single axis."""
from scipy import ndimage
_compute_ax_scalings(bn, (tmin, tmax), (0, len(epochs.events)))
ax = bn.ax
data_lines = bn.data_lines
extent = (bn.x_t + bn.x_s * tmin, bn.x_t + bn.x_s * tmax, bn.y_t,
bn.y_t + bn.y_s * len(epochs.events))
this_data = data[:, ch_idx, :]
vmin, vmax = (None, None) if vlim_array is None else vlim_array[ch_idx]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
data_lines.append(ax.imshow(this_data, extent=extent, aspect='auto',
origin='lower', vmin=vmin, vmax=vmax,
picker=True, cmap=cmap,
interpolation='nearest'))
def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=(0.,), hline=(0.,), fig_facecolor='k',
fig_background=None, axis_facecolor='k', font_color='w',
merge_channels=False, legend=True, axes=None, show=True,
noise_cov=None):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
Matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots (after scaling has been applied). The value
determines the upper and lower subplot limits. e.g.
ylim = dict(eeg=[-20, 20]). Valid keys are eeg, mag, grad. If None,
the ylim parameter for each channel is determined by the maximum
absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of floats | None
The values at which to show a vertical line.
hline : list of floats | None
The values at which to show a horizontal line.
fig_facecolor : color
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
axis_facecolor : color
The face color to be used for each sensor plot. Defaults to black.
font_color : color
The color of text in the colorbar and title. Defaults to white.
merge_channels : bool
Whether to use RMS value of gradiometer pairs. Only works for Neuromag
data. Defaults to False.
legend : bool | int | string | tuple
If True, create a legend based on evoked.comment. If False, disable the
legend. Otherwise, the legend is created and the parameter value is
passed as the location parameter to the matplotlib legend call. It can
be an integer (e.g. 0 corresponds to upper right corner of the plot),
a string (e.g. 'upper right'), or a tuple (x, y coordinates of the
lower left corner of the legend in the axes coordinate system).
See matplotlib documentation for more details.
axes : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
show : bool
Show figure if True.
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channels names are shown in italic.
Can be a string to load a covariance from disk.
.. versionadded:: 0.16.0
Returns
-------
fig : instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
import matplotlib.pyplot as plt
from ..cov import whiten_evoked
if not type(evoked) in (tuple, list):
evoked = [evoked]
if type(color) in (tuple, list):
if len(color) != len(evoked):
raise ValueError('Lists of evoked objects and colors'
' must have the same length')
elif color is None:
colors = ['w'] + _get_color_list
stop = (slice(len(evoked)) if len(evoked) < len(colors)
else slice(len(colors)))
color = cycle(colors[stop])
if len(evoked) > len(colors):
warn('More evoked objects than colors available. You should pass '
'a list of unique colors.')
else:
color = cycle([color])
noise_cov = _check_cov(noise_cov, evoked[0].info)
if noise_cov is not None:
evoked = [whiten_evoked(e, noise_cov) for e in evoked]
else:
evoked = [e.copy() for e in evoked]
info = evoked[0].info
ch_names = evoked[0].ch_names
scalings = _handle_default('scalings', scalings)
if not all(e.ch_names == ch_names for e in evoked):
raise ValueError('All evoked.picks must be the same')
ch_names = _clean_names(ch_names)
if merge_channels:
picks = _pair_grad_sensors(info, topomap_coords=False)
chs = list()
for pick in picks[::2]:
ch = info['chs'][pick]
ch['ch_name'] = ch['ch_name'][:-1] + 'X'
chs.append(ch)
info['chs'] = chs
info['bads'] = list() # bads dropped on pair_grad_sensors
info._update_redundant()
info._check_consistency()
new_picks = list()
for e in evoked:
data, _ = _merge_ch_data(e.data[picks], 'grad', [])
if noise_cov is None:
data *= scalings['grad']
e.data = data
new_picks.append(range(len(data)))
picks = new_picks
types_used = ['grad']
unit = _handle_default('units')['grad'] if noise_cov is None else 'NA'
y_label = 'RMS amplitude (%s)' % unit
if layout is None:
layout = find_layout(info)
if not merge_channels:
# XXX. at the moment we are committed to 1- / 2-sensor-types layouts
chs_in_layout = set(layout.names) & set(ch_names)
types_used = {channel_type(info, ch_names.index(ch))
for ch in chs_in_layout}
# remove possible reference meg channels
types_used = set.difference(types_used, set('ref_meg'))
# one check for all vendors
meg_types = {'mag', 'grad'}
is_meg = len(set.intersection(types_used, meg_types)) > 0
nirs_types = {'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od'}
is_nirs = len(set.intersection(types_used, nirs_types)) > 0
if is_meg:
types_used = list(types_used)[::-1] # -> restore kwarg order
picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[])
for kk in types_used]
elif is_nirs:
types_used = list(types_used)[::-1] # -> restore kwarg order
picks = [pick_types(info, fnirs=kk, ref_meg=False, exclude=[])
for kk in types_used]
else:
types_used_kwargs = {t: True for t in types_used}
picks = [pick_types(info, meg=False, exclude=[],
**types_used_kwargs)]
assert isinstance(picks, list) and len(types_used) == len(picks)
if noise_cov is None:
for e in evoked:
for pick, ch_type in zip(picks, types_used):
e.data[pick] *= scalings[ch_type]
if proj is True and all(e.proj is not True for e in evoked):
evoked = [e.apply_proj() for e in evoked]
elif proj == 'interactive': # let it fail early.
for e in evoked:
_check_delayed_ssp(e)
# Y labels for picked plots must be reconstructed
y_label = list()
for ch_idx in range(len(chs_in_layout)):
if noise_cov is None:
unit = _handle_default('units')[channel_type(info, ch_idx)]
else:
unit = 'NA'
y_label.append('Amplitude (%s)' % unit)
if ylim is None:
# find maxima over all evoked data for each channel pick
ymaxes = np.array([max(np.abs(e.data[t]).max() for e in evoked)
for t in picks])
ylim_ = (-ymaxes, ymaxes)
elif isinstance(ylim, dict):
ylim_ = _handle_default('ylim', ylim)
ylim_ = [ylim_[kk] for kk in types_used]
# extra unpack to avoid bug #1700
if len(ylim_) == 1:
ylim_ = ylim_[0]
else:
ylim_ = zip(*[np.array(yl) for yl in ylim_])
else:
raise TypeError('ylim must be None or a dict. Got %s.' % type(ylim))
data = [e.data for e in evoked]
comments = [e.comment for e in evoked]
times = [e.times for e in evoked]
show_func = partial(_plot_timeseries_unified, data=data, color=color,
times=times, vline=vline, hline=hline,
hvline_color=font_color)
click_func = partial(_plot_timeseries, data=data, color=color, times=times,
vline=vline, hline=hline, hvline_color=font_color,
labels=comments)
time_min = min([t[0] for t in times])
time_max = max([t[-1] for t in times])
fig = _plot_topo(info=info, times=[time_min, time_max],
show_func=show_func, click_func=click_func, layout=layout,
colorbar=False, ylim=ylim_, cmap=None,
layout_scale=layout_scale, border=border,
fig_facecolor=fig_facecolor, font_color=font_color,
axis_facecolor=axis_facecolor, title=title,
x_label='Time (s)', y_label=y_label, unified=True,
axes=axes)
add_background_image(fig, fig_background)
if legend is not False:
legend_loc = 0 if legend is True else legend
labels = [e.comment if e.comment else 'Unknown' for e in evoked]
legend = plt.legend(labels, loc=legend_loc,
prop={'size': 10})
legend.get_frame().set_facecolor(axis_facecolor)
txts = legend.get_texts()
for txt, col in zip(txts, color):
txt.set_color(col)
if proj == 'interactive':
for e in evoked:
_check_delayed_ssp(e)
params = dict(evokeds=evoked, times=times,
plot_update_proj_callback=_plot_update_evoked_topo_proj,
projs=evoked[0].info['projs'], fig=fig)
_draw_proj_checkbox(None, params)
plt_show(show)
return fig
def _plot_update_evoked_topo_proj(params, bools):
"""Update topo sensor plots."""
evokeds = [e.copy() for e in params['evokeds']]
fig = params['fig']
projs = [proj for proj, b in zip(params['projs'], bools) if b]
params['proj_bools'] = bools
for e in evokeds:
e.add_proj(projs, remove_existing=True)
e.apply_proj()
# make sure to only modify the time courses, not the ticks
for ax in fig.axes[0]._mne_axs:
for line, evoked in zip(ax.data_lines, evokeds):
line.set_ydata(ax.y_t + ax.y_s * evoked.data[ax._mne_ch_idx])
fig.canvas.draw()
def plot_topo_image_epochs(epochs, layout=None, sigma=0., vmin=None,
vmax=None, colorbar=None, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k',
fig_background=None, font_color='w', show=True):
"""Plot Event Related Potential / Fields image on topographies.
Parameters
----------
epochs : instance of :class:`~mne.Epochs`
The epochs.
layout : instance of Layout
System specific sensor positions.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is µV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is µV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool | None
Whether to display a colorbar or not. If ``None`` a colorbar will be
shown only if all channels are of the same type. Defaults to ``None``.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)).
cmap : colormap
Colors to be mapped to the values.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
``None``, defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
border : str
Matplotlib borders style to be used for each sensor plot.
fig_facecolor : color
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
:func:`matplotlib.pyplot.imshow`. Defaults to ``None``.
font_color : color
The color of tick labels in the colorbar. Defaults to white.
show : bool
Whether to show the figure. Defaults to ``True``.
Returns
-------
fig : instance of :class:`matplotlib.figure.Figure`
Figure distributing one image per channel across sensor topography.
Notes
-----
In an interactive Python session, this plot will be interactive; clicking
on a channel image will pop open a larger view of the image; this image
will always have a colorbar even when the topo plot does not (because it
shows multiple sensor types).
"""
scalings = _handle_default('scalings', scalings)
# make a copy because we discard non-data channels and scale the data
epochs = epochs.copy().load_data()
# use layout to subset channels present in epochs object
if layout is None:
layout = find_layout(epochs.info)
ch_names = set(layout.names) & set(epochs.ch_names)
idxs = [epochs.ch_names.index(ch_name) for ch_name in ch_names]
epochs = epochs.pick(idxs)
# get lists of channel type & scale coefficient
ch_types = epochs.get_channel_types()
scale_coeffs = [scalings.get(ch_type, 1) for ch_type in ch_types]
# scale the data
epochs._data *= np.array(scale_coeffs)[:, np.newaxis]
data = epochs.get_data()
# get vlims for each channel type
vlim_dict = dict()
for ch_type in set(ch_types):
this_data = data[:, np.where(np.array(ch_types) == ch_type)]
vlim_dict[ch_type] = _setup_vmin_vmax(this_data, vmin, vmax)
vlim_array = np.array([vlim_dict[ch_type] for ch_type in ch_types])
# only show colorbar if we have a single channel type
if colorbar is None:
colorbar = (len(set(ch_types)) == 1)
# if colorbar=True, we know we have only 1 channel type so all entries
# in vlim_array are the same, just take the first one
if colorbar and vmin is None and vmax is None:
vmin, vmax = vlim_array[0]
show_func = partial(_erfimage_imshow_unified, scalings=scale_coeffs,
order=order, data=data, epochs=epochs, sigma=sigma,
cmap=cmap, vlim_array=vlim_array)
erf_imshow = partial(_erfimage_imshow, scalings=scale_coeffs, order=order,
data=data, epochs=epochs, sigma=sigma, cmap=cmap,
vlim_array=vlim_array, colorbar=True)
fig = _plot_topo(info=epochs.info, times=epochs.times,
click_func=erf_imshow, show_func=show_func, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title,
fig_facecolor=fig_facecolor, font_color=font_color,
border=border, x_label='Time (s)', y_label='Epoch',
unified=True, img=True)
add_background_image(fig, fig_background)
plt_show(show)
return fig
| olafhauk/mne-python | mne/viz/topo.py | Python | bsd-3-clause | 39,094 |
from django.conf.urls import url
from home import views
urlpatterns = [
url(r'^$', views.front, name='front'),
url(r'^register/', views.register, name='register'),
url(r'^login/', views.userLogin, name='login'),
]
| Vritrahan/Chitragupt-Abhilekh | home/urls.py | Python | gpl-3.0 | 228 |
"""
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division
print __doc__
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD Style.
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print "fit the model"
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print "Accuracy:", clf.score(X, y) * 100
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower',
alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0],
colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| seckcoder/lang-learn | python/sklearn/examples/applications/svm_gui.py | Python | unlicense | 11,157 |
# -*- coding: utf-8 -*-
from rest_framework import status as http_status
from framework.exceptions import HTTPError
from website.citations.providers import CitationsProvider
from addons.zotero.serializer import ZoteroSerializer
class ZoteroCitationsProvider(CitationsProvider):
serializer = ZoteroSerializer
provider_name = 'zotero'
def _folder_to_dict(self, data):
return dict(
name=data['data'].get('name'),
list_id=data['data'].get('key'),
parent_id=data['data'].get('parentCollection'),
id=data['data'].get('key'),
)
def widget(self, node_addon):
"""
Serializes setting needed to build the widget
library_id added specifically for zotero
"""
ret = super(ZoteroCitationsProvider, self).widget(node_addon)
ret.update({
'library_id': node_addon.library_id
})
return ret
def set_config(self, node_addon, user, external_list_id, external_list_name, auth, external_library_id=None, external_library_name=None):
""" Changes folder associated with addon and logs event"""
# Ensure request has all required information
# Tell the user's addon settings that this node is connecting
metadata = {'folder': external_list_id}
if external_library_id:
metadata['library'] = external_library_id
metadata['folder'] = None
node_addon.user_settings.grant_oauth_access(
node=node_addon.owner,
external_account=node_addon.external_account,
metadata=metadata
)
node_addon.user_settings.save()
# update this instance
node_addon.list_id = external_list_id
if external_library_id:
node_addon.library_id = external_library_id
node_addon.list_id = None
node_addon.save()
if external_library_id:
node_addon.owner.add_log(
'{0}_library_selected'.format(self.provider_name),
params={
'project': node_addon.owner.parent_id,
'node': node_addon.owner._id,
'library_name': external_library_name,
'library_id': external_library_id
},
auth=auth,
)
else:
node_addon.owner.add_log(
'{0}_folder_selected'.format(self.provider_name),
params={
'project': node_addon.owner.parent_id,
'node': node_addon.owner._id,
'folder_id': external_list_id,
'folder_name': external_list_name,
},
auth=auth,
)
def citation_list(self, node_addon, user, list_id, show='all'):
"""Returns a list of citations"""
attached_list_id = self._folder_id(node_addon)
attached_library_id = getattr(node_addon, 'library_id', None)
account_folders = node_addon.get_folders(path=attached_library_id)
# Folders with 'parent_list_id'==None are children of 'All Documents'
for folder in account_folders:
if not folder.get('parent_list_id'):
folder['parent_list_id'] = 'ROOT'
if user:
node_account = node_addon.external_account
user_is_owner = user.external_accounts.filter(id=node_account.id).exists()
else:
user_is_owner = False
# verify this list is the attached list or its descendant
if not user_is_owner and (list_id != attached_list_id and attached_list_id is not None):
folders = {
(each['provider_list_id'] or 'ROOT'): each
for each in account_folders
}
if list_id is None:
ancestor_id = 'ROOT'
else:
ancestor_id = folders[list_id].get('parent_list_id')
while ancestor_id != attached_list_id:
if ancestor_id is '__':
raise HTTPError(http_status.HTTP_403_FORBIDDEN)
ancestor_id = folders[ancestor_id].get('parent_list_id')
contents = []
if list_id is None:
contents = [node_addon.root_folder]
else:
user_settings = user.get_addon(self.provider_name) if user else None
if show in ('all', 'folders'):
contents += [
self.serializer(
node_settings=node_addon,
user_settings=user_settings,
).serialize_folder(each)
for each in account_folders
if each.get('parent_list_id') == list_id
]
if show in ('all', 'citations'):
contents += [
self.serializer(
node_settings=node_addon,
user_settings=user_settings,
).serialize_citation(each)
for each in node_addon.api.get_list(list_id=list_id, library_id=attached_library_id)
]
return {
'contents': contents
}
| mfraezz/osf.io | addons/zotero/provider.py | Python | apache-2.0 | 5,211 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
from functools import reduce
"""
This module implements representations of slabs and surfaces, as well as
algorithms for generating them.
"""
__author__ = "Richard Tran, Wenhao Sun, Zihan Xu, Shyue Ping Ong"
__copyright__ = "Copyright 2014, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "6/10/14"
from fractions import gcd
import math
import itertools
import logging
import numpy as np
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import linkage, fcluster
from monty.fractions import lcm
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.core.sites import PeriodicSite
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord_utils import in_coord_list
from pymatgen.analysis.structure_matcher import StructureMatcher
logger = logging.getLogger(__name__)
class Slab(Structure):
"""
Subclass of Structure representing a Slab. Implements additional
attributes pertaining to slabs, but the init method does not
actually implement any algorithm that creates a slab. This is a
DUMMY class who's init method only holds information about the
slab. Also has additional methods that returns other information
about a slab such as the surface area, normal, and atom adsorption.
Note that all Slabs have the surface normal oriented in the c-direction.
This means the lattice vectors a and b are in the surface plane and the c
vector is out of the surface plane (though not necessary perpendicular to
the surface.)
.. attribute:: miller_index
Miller index of plane parallel to surface.
.. attribute:: scale_factor
Final computed scale factor that brings the parent cell to the
surface cell.
.. attribute:: shift
The shift value in Angstrom that indicates how much this
slab has been shifted.
"""
def __init__(self, lattice, species, coords, miller_index,
oriented_unit_cell, shift, scale_factor,
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None, energy=None):
"""
Makes a Slab structure, a structure object with additional information
and methods pertaining to slabs.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
miller_index ([h, k, l]): Miller index of plane parallel to
surface. Note that this is referenced to the input structure. If
you need this to be based on the conventional cell,
you should supply the conventional structure.
oriented_unit_cell (Structure): The oriented_unit_cell from which
this Slab is created (by scaling in the c-direction).
shift (float): The shift in the c-direction applied to get the
termination.
scale_factor (array): scale_factor Final computed scale factor
that brings the parent cell to the surface cell.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
energy (float): A value for the energy.
"""
self.oriented_unit_cell = oriented_unit_cell
self.miller_index = tuple(miller_index)
self.shift = shift
self.scale_factor = scale_factor
self.energy = energy
super(Slab, self).__init__(
lattice, species, coords, validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties)
def get_orthogonal_c_slab(self):
"""
This method returns a Slab where the normal (c lattice vector) is
"forced" to be exactly orthogonal to the surface a and b lattice
vectors. **Note that this breaks inherent symmetries in the slab.**
It should be pointed out that orthogonality is not required to get good
surface energies, but it can be useful in cases where the slabs are
subsequently used for postprocessing of some kind, e.g. generating
GBs or interfaces.
"""
a, b, c = self.lattice.matrix
new_c = np.cross(a, b)
new_c /= np.linalg.norm(new_c)
new_c = np.dot(c, new_c) * new_c
new_latt = Lattice([a, b, new_c])
return Slab(lattice=new_latt, species=self.species,
coords=self.cart_coords, miller_index=self.miller_index,
oriented_unit_cell=self.oriented_unit_cell,
shift=self.shift, scale_factor=self.scale_factor,
coords_are_cartesian=True, energy=self.energy)
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species. Note that Slab has to override this
because of the different __init__ args.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
s = Structure.from_sites(sites)
return Slab(s.lattice, s.species_and_occu, s.frac_coords,
self.miller_index, self.oriented_unit_cell, self.shift,
self.scale_factor, site_properties=s.site_properties)
def copy(self, site_properties=None, sanitize=False):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Args:
site_properties (dict): Properties to add or override. The
properties are specified in the same way as the constructor,
i.e., as a dict of the form {property: [values]}. The
properties should be in the order of the *original* structure
if you are performing sanitization.
sanitize (bool): If True, this method will return a sanitized
structure. Sanitization performs a few things: (i) The sites are
sorted by electronegativity, (ii) a LLL lattice reduction is
carried out to obtain a relatively orthogonalized cell,
(iii) all fractional coords for sites are mapped into the
unit cell.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
props = self.site_properties
if site_properties:
props.update(site_properties)
return Slab(self.lattice, self.species_and_occu, self.frac_coords,
self.miller_index, self.oriented_unit_cell, self.shift,
self.scale_factor, site_properties=props)
@property
def dipole(self):
"""
Calculates the dipole of the Slab in the direction of the surface
normal. Note that the Slab must be oxidation state-decorated for this
to work properly. Otherwise, the Slab will always have a dipole of 0.
"""
dipole = np.zeros(3)
mid_pt = np.sum(self.cart_coords, axis=0) / len(self)
normal = self.normal
for site in self:
charge = sum([getattr(sp, "oxi_state", 0) * amt
for sp, amt in site.species_and_occu.items()])
dipole += charge * np.dot(site.coords - mid_pt, normal) * normal
return dipole
def is_polar(self, tol_dipole_per_unit_area=1e-3):
"""
Checks whether the surface is polar by computing the dipole per unit
area. Note that the Slab must be oxidation state-decorated for this
to work properly. Otherwise, the Slab will always be non-polar.
Args:
tol_dipole_per_unit_area (float): A tolerance. If the dipole
magnitude per unit area is less than this value, the Slab is
considered non-polar. Defaults to 1e-3, which is usually
pretty good. Normalized dipole per unit area is used as it is
more reliable than using the total, which tends to be larger for
slabs with larger surface areas.
"""
dip_per_unit_area = self.dipole / self.surface_area
return np.linalg.norm(dip_per_unit_area) > tol_dipole_per_unit_area
@property
def normal(self):
"""
Calculates the surface normal vector of the slab
"""
normal = np.cross(self.lattice.matrix[0], self.lattice.matrix[1])
normal /= np.linalg.norm(normal)
return normal
@property
def surface_area(self):
"""
Calculates the surface area of the slab
"""
m = self.lattice.matrix
return np.linalg.norm(np.cross(m[0], m[1]))
def add_adsorbate_atom(self, indices, specie, distance):
"""
Gets the structure of single atom adsorption.
slab structure from the Slab class(in [0, 0, 1])
Args:
indices ([int]): Indices of sites on which to put the absorbate.
Absorbed atom will be displaced relative to the center of
these sites.
specie (Specie/Element/str): adsorbed atom species
distance (float): between centers of the adsorbed atom and the
given site in Angstroms.
"""
#Let's do the work in cartesian coords
center = np.sum([self[i].coords for i in indices], axis=0) / len(
indices)
coords = center + self.normal * distance / np.linalg.norm(self.normal)
self.append(specie, coords, coords_are_cartesian=True)
def __str__(self):
comp = self.composition
outs = [
"Slab Summary (%s)" % comp.formula,
"Reduced Formula: %s" % comp.reduced_formula,
"Miller index: %s" % (self.miller_index, ),
"Shift: %.4f, Scale Factor: %s" % (self.shift,
self.scale_factor.__str__())]
to_s = lambda x: "%0.6f" % x
outs.append("abc : " + " ".join([to_s(i).rjust(10)
for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i).rjust(10)
for i in self.lattice.angles]))
outs.append("Sites ({i})".format(i=len(self)))
for i, site in enumerate(self):
outs.append(" ".join([str(i + 1), site.species_string,
" ".join([to_s(j).rjust(12)
for j in site.frac_coords])]))
return "\n".join(outs)
def as_dict(self):
d = super(Slab, self).as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["oriented_unit_cell"] = self.oriented_unit_cell.as_dict()
d["miller_index"] = self.miller_index
d["shift"] = self.shift
d["scale_factor"] = self.scale_factor
d["energy"] = self.energy
return d
@classmethod
def from_dict(cls, d):
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
s = Structure.from_sites(sites)
return Slab(
lattice=lattice,
species=s.species_and_occu, coords=s.frac_coords,
miller_index=d["miller_index"],
oriented_unit_cell=Structure.from_dict(d["oriented_unit_cell"]),
shift=d["shift"], scale_factor=d["scale_factor"],
site_properties=s.site_properties, energy=d["energy"]
)
class SlabGenerator(object):
"""
This class generates different slabs using shift values determined by where a
unique termination can be found along with other criterias such as where a
termination doesn't break a polyhedral bond. The shift value then indicates
where the slab layer will begin and terminate in the slab-vacuum system.
.. attribute:: oriented_unit_cell
A unit cell of the parent structure with the miller
index of plane parallel to surface
.. attribute:: parent
Parent structure from which Slab was derived.
.. attribute:: lll_reduce
Whether or not the slabs will be orthogonalized
.. attribute:: center_slab
Whether or not the slabs will be centered between
the vacuum layer
.. attribute:: slab_scale_factor
Final computed scale factor that brings the parent cell to the
surface cell.
.. attribute:: miller_index
Miller index of plane parallel to surface.
.. attribute:: min_slab_size
Minimum size in angstroms of layers containing atoms
.. attribute:: min_vac_size
Minimize size in angstroms of layers containing vacuum
"""
def __init__(self, initial_structure, miller_index, min_slab_size,
min_vacuum_size, lll_reduce=False, center_slab=False,
primitive=True, max_normal_search=None):
"""
Calculates the slab scale factor and uses it to generate a unit cell
of the initial structure that has been oriented by its miller index.
Also stores the initial information needed later on to generate a slab.
Args:
initial_structure (Structure): Initial input structure. Note that to
ensure that the miller indices correspond to usual
crystallographic definitions, you should supply a conventional
unit cell structure.
miller_index ([h, k, l]): Miller index of plane parallel to
surface. Note that this is referenced to the input structure. If
you need this to be based on the conventional cell,
you should supply the conventional structure.
min_slab_size (float): In Angstroms
min_vac_size (float): In Angstroms
lll_reduce (bool): Whether to perform an LLL reduction on the
eventual structure.
center_slab (bool): Whether to center the slab in the cell with
equal vacuum spacing from the top and bottom.
primitive (bool): Whether to reduce any generated slabs to a
primitive cell (this does **not** mean the slab is generated
from a primitive cell, it simply means that after slab
generation, we attempt to find shorter lattice vectors,
which lead to less surface area and smaller cells).
max_normal_search (int): If set to a positive integer, the code will
conduct a search for a normal lattice vector that is as
perpendicular to the surface as possible by considering
multiples linear combinations of lattice vectors up to
max_normal_search. This has no bearing on surface energies,
but may be useful as a preliminary step to generating slabs
for absorption and other sizes. It is typical that this will
not be the smallest possible cell for simulation. Normality
is not guaranteed, but the oriented cell will have the c
vector as normal as possible (within the search range) to the
surface. A value of up to the max absolute Miller index is
usually sufficient.
"""
latt = initial_structure.lattice
d = abs(reduce(gcd, miller_index))
miller_index = tuple([int(i / d) for i in miller_index])
#Calculate the surface normal using the reciprocal lattice vector.
recp = latt.reciprocal_lattice_crystallographic
normal = recp.get_cartesian_coords(miller_index)
normal /= np.linalg.norm(normal)
slab_scale_factor = []
non_orth_ind = []
eye = np.eye(3, dtype=np.int)
for i, j in enumerate(miller_index):
if j == 0:
# Lattice vector is perpendicular to surface normal, i.e.,
# in plane of surface. We will simply choose this lattice
# vector as one of the basis vectors.
slab_scale_factor.append(eye[i])
else:
#Calculate projection of lattice vector onto surface normal.
d = abs(np.dot(normal, latt.matrix[i])) / latt.abc[i]
non_orth_ind.append((i, d))
# We want the vector that has maximum magnitude in the
# direction of the surface normal as the c-direction.
# Results in a more "orthogonal" unit cell.
c_index, dist = max(non_orth_ind, key=lambda t: t[1])
if len(non_orth_ind) > 1:
lcm_miller = lcm(*[miller_index[i] for i, d in non_orth_ind])
for (i, di), (j, dj) in itertools.combinations(non_orth_ind, 2):
l = [0, 0, 0]
l[i] = -int(round(lcm_miller / miller_index[i]))
l[j] = int(round(lcm_miller / miller_index[j]))
slab_scale_factor.append(l)
if len(slab_scale_factor) == 2:
break
if max_normal_search is None:
slab_scale_factor.append(eye[c_index])
else:
index_range = sorted(
reversed(range(-max_normal_search, max_normal_search + 1)),
key=lambda x: abs(x))
candidates = []
for uvw in itertools.product(index_range, index_range, index_range):
if (not any(uvw)) or abs(
np.linalg.det(slab_scale_factor + [uvw])) < 1e-8:
continue
vec = latt.get_cartesian_coords(uvw)
l = np.linalg.norm(vec)
cosine = abs(np.dot(vec, normal) / l)
candidates.append((uvw, cosine, l))
if abs(abs(cosine) - 1) < 1e-8:
# If cosine of 1 is found, no need to search further.
break
# We want the indices with the maximum absolute cosine,
# but smallest possible length.
uvw, cosine, l = max(candidates, key=lambda x: (x[1], -l))
slab_scale_factor.append(uvw)
slab_scale_factor = np.array(slab_scale_factor)
# Let's make sure we have a left-handed crystallographic system
if np.linalg.det(slab_scale_factor) < 0:
slab_scale_factor *= -1
single = initial_structure.copy()
single.make_supercell(slab_scale_factor)
self.oriented_unit_cell = Structure.from_sites(single,
to_unit_cell=True)
self.parent = initial_structure
self.lll_reduce = lll_reduce
self.center_slab = center_slab
self.slab_scale_factor = slab_scale_factor
self.miller_index = miller_index
self.min_vac_size = min_vacuum_size
self.min_slab_size = min_slab_size
self.primitive = primitive
self._normal = normal
a, b, c = self.oriented_unit_cell.lattice.matrix
self._proj_height = abs(np.dot(normal, c))
def get_slab(self, shift=0, tol=0.1, energy=None):
"""
This method takes in shift value for the c lattice direction and
generates a slab based on the given shift. You should rarely use this
method. Instead, it is used by other generation algorithms to obtain
all slabs.
Arg:
shift (float): A shift value in Angstrom that determines how much a
slab should be shifted.
tol (float): Tolerance to determine primitive cell.
energy (float): An energy to assign to the slab.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
h = self._proj_height
nlayers_slab = int(math.ceil(self.min_slab_size / h))
nlayers_vac = int(math.ceil(self.min_vac_size / h))
nlayers = nlayers_slab + nlayers_vac
species = self.oriented_unit_cell.species_and_occu
props = self.oriented_unit_cell.site_properties
props = {k: v * nlayers_slab for k, v in props.items()}
frac_coords = self.oriented_unit_cell.frac_coords
frac_coords = np.array(frac_coords) +\
np.array([0, 0, -shift])[None, :]
frac_coords = frac_coords - np.floor(frac_coords)
a, b, c = self.oriented_unit_cell.lattice.matrix
new_lattice = [a, b, nlayers * c]
frac_coords[:, 2] = frac_coords[:, 2] / nlayers
all_coords = []
for i in range(nlayers_slab):
fcoords = frac_coords.copy()
fcoords[:, 2] += i / nlayers
all_coords.extend(fcoords)
slab = Structure(new_lattice, species * nlayers_slab, all_coords,
site_properties=props)
scale_factor = self.slab_scale_factor
# Whether or not to orthogonalize the structure
if self.lll_reduce:
lll_slab = slab.copy(sanitize=True)
mapping = lll_slab.lattice.find_mapping(slab.lattice)
scale_factor = np.dot(mapping[2], scale_factor)
slab = lll_slab
# Whether or not to center the slab layer around the vacuum
if self.center_slab:
avg_c = np.average([c[2] for c in slab.frac_coords])
slab.translate_sites(list(range(len(slab))), [0, 0, 0.5 - avg_c])
if self.primitive:
prim = slab.get_primitive_structure(tolerance=tol)
if energy is not None:
energy = prim.volume / slab.volume * energy
slab = prim
return Slab(slab.lattice, slab.species_and_occu,
slab.frac_coords, self.miller_index,
self.oriented_unit_cell, shift,
scale_factor, site_properties=slab.site_properties,
energy=energy)
def _calculate_possible_shifts(self, tol=0.1):
frac_coords = self.oriented_unit_cell.frac_coords
n = len(frac_coords)
if n == 1:
# Clustering does not work when there is only one data point.
shift = frac_coords[0][2] + 0.5
return [shift - math.floor(shift)]
# We cluster the sites according to the c coordinates. But we need to
# take into account PBC. Let's compute a fractional c-coordinate
# distance matrix that accounts for PBC.
dist_matrix = np.zeros((n, n))
h = self._proj_height
# Projection of c lattice vector in
# direction of surface normal.
for i, j in itertools.combinations(list(range(n)), 2):
if i != j:
cdist = frac_coords[i][2] - frac_coords[j][2]
cdist = abs(cdist - round(cdist)) * h
dist_matrix[i, j] = cdist
dist_matrix[j, i] = cdist
condensed_m = squareform(dist_matrix)
z = linkage(condensed_m)
clusters = fcluster(z, tol, criterion="distance")
#Generate dict of cluster# to c val - doesn't matter what the c is.
c_loc = {c: frac_coords[i][2] for i, c in enumerate(clusters)}
#Put all c into the unit cell.
possible_c = [c - math.floor(c) for c in sorted(c_loc.values())]
# Calculate the shifts
nshifts = len(possible_c)
shifts = []
for i in range(nshifts):
if i == nshifts - 1:
# There is an additional shift between the first and last c
# coordinate. But this needs special handling because of PBC.
shift = (possible_c[0] + 1 + possible_c[i]) * 0.5
if shift > 1:
shift -= 1
else:
shift = (possible_c[i] + possible_c[i + 1]) * 0.5
shifts.append(shift - math.floor(shift))
shifts = sorted(shifts)
return shifts
def _get_c_ranges(self, bonds):
c_ranges = set()
bonds = {(get_el_sp(s1), get_el_sp(s2)): dist for (s1, s2), dist in
bonds.items()}
for (sp1, sp2), bond_dist in bonds.items():
for site in self.oriented_unit_cell:
if sp1 in site.species_and_occu:
for nn, d in self.oriented_unit_cell.get_neighbors(
site, bond_dist):
if sp2 in nn.species_and_occu:
c_range = tuple(sorted([site.frac_coords[2],
nn.frac_coords[2]]))
if c_range[1] > 1:
# Takes care of PBC when c coordinate of site
# goes beyond the upper boundary of the cell
c_ranges.add((c_range[0], 1))
c_ranges.add((0, c_range[1] - 1))
elif c_range[0] < 0:
# Takes care of PBC when c coordinate of site
# is below the lower boundary of the unit cell
c_ranges.add((0, c_range[1]))
c_ranges.add((c_range[0] + 1, 1))
elif c_range[0] != c_range[1]:
c_ranges.add(c_range)
return c_ranges
def get_slabs(self, bonds=None, tol=0.1, max_broken_bonds=0):
"""
This method returns a list of slabs that are generated using the list of
shift values from the method, _calculate_possible_shifts(). Before the
shifts are used to create the slabs however, if the user decides to take
into account whether or not a termination will break any polyhedral
structure (bonds != None), this method will filter out any shift values
that do so.
Args:
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
tol (float): Threshold parameter in fcluster in order to check
if two atoms are lying on the same plane. Default thresh set
to 0.1 Angstrom in the direction of the surface normal.
max_broken_bonds (int): Maximum number of allowable broken bonds
for the slab. Use this to limit # of slabs (some structures
may have a lot of slabs). Defaults to zero, which means no
defined bonds must be broken.
Returns:
([Slab]) List of all possible terminations of a particular surface.
Slabs are sorted by the # of bonds broken.
"""
c_ranges = set() if bonds is None else self._get_c_ranges(bonds)
slabs = []
for shift in self._calculate_possible_shifts(tol=tol):
bonds_broken = 0
for r in c_ranges:
if r[0] <= shift <= r[1]:
bonds_broken += 1
if bonds_broken <= max_broken_bonds:
# For now, set the energy to be equal to no. of broken bonds
# per unit cell.
slab = self.get_slab(shift, tol=tol, energy=bonds_broken)
slabs.append(slab)
# Further filters out any surfaces made that might be the same
m = StructureMatcher(ltol=tol, stol=tol, primitive_cell=False,
scale=False)
slabs = [g[0] for g in m.group_structures(slabs)]
return sorted(slabs, key=lambda s: s.energy)
def get_symmetrically_distinct_miller_indices(structure, max_index):
"""
Returns all symmetrically distinct indices below a certain max-index for
a given structure. Analysis is based on the symmetry of the reciprocal
lattice of the structure.
Args:
structure (Structure): input structure.
max_index (int): The maximum index. For example, a max_index of 1
means that (100), (110), and (111) are returned for the cubic
structure. All other indices are equivalent to one of these.
"""
recp_lattice = structure.lattice.reciprocal_lattice_crystallographic
# Need to make sure recp lattice is big enough, otherwise symmetry
# determination will fail. We set the overall volume to 1.
recp_lattice = recp_lattice.scale(1)
recp = Structure(recp_lattice, ["H"], [[0, 0, 0]])
# Creates a function that uses the symmetry operations in the
# structure to find Miller indices that might give repetitive slabs
analyzer = SpacegroupAnalyzer(recp, symprec=0.001)
symm_ops = analyzer.get_symmetry_operations()
unique_millers = []
def is_already_analyzed(miller_index):
for op in symm_ops:
if in_coord_list(unique_millers, op.operate(miller_index)):
return True
return False
r = list(range(-max_index, max_index + 1))
r.reverse()
for miller in itertools.product(r, r, r):
if any([i != 0 for i in miller]):
d = abs(reduce(gcd, miller))
miller = tuple([int(i / d) for i in miller])
if not is_already_analyzed(miller):
unique_millers.append(miller)
return unique_millers
def generate_all_slabs(structure, max_index, min_slab_size, min_vacuum_size,
bonds=None, tol=1e-3, max_broken_bonds=0,
lll_reduce=False, center_slab=False, primitive=True,
max_normal_search=None):
"""
A function that finds all different slabs up to a certain miller index.
Slabs oriented under certain Miller indices that are equivalent to other
slabs in other Miller indices are filtered out using symmetry operations
to get rid of any repetitive slabs. For example, under symmetry operations,
CsCl has equivalent slabs in the (0,0,1), (0,1,0), and (1,0,0) direction.
Args:
structure (Structure): Initial input structure. Note that to
ensure that the miller indices correspond to usual
crystallographic definitions, you should supply a conventional
unit cell structure.
max_index (int): The maximum Miller index to go up to.
min_slab_size (float): In Angstroms
min_vac_size (float): In Angstroms
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
tol (float): Threshold parameter in fcluster in order to check
if two atoms are lying on the same plane. Default thresh set
to 0.1 Angstrom in the direction of the surface normal.
max_broken_bonds (int): Maximum number of allowable broken bonds
for the slab. Use this to limit # of slabs (some structures
may have a lot of slabs). Defaults to zero, which means no
defined bonds must be broken.
lll_reduce (bool): Whether to perform an LLL reduction on the
eventual structure.
center_slab (bool): Whether to center the slab in the cell with
equal vacuum spacing from the top and bottom.
primitive (bool): Whether to reduce any generated slabs to a
primitive cell (this does **not** mean the slab is generated
from a primitive cell, it simply means that after slab
generation, we attempt to find shorter lattice vectors,
which lead to less surface area and smaller cells).
max_normal_search (int): If set to a positive integer, the code will
conduct a search for a normal lattice vector that is as
perpendicular to the surface as possible by considering
multiples linear combinations of lattice vectors up to
max_normal_search. This has no bearing on surface energies,
but may be useful as a preliminary step to generating slabs
for absorption and other sizes. It is typical that this will
not be the smallest possible cell for simulation. Normality
is not guaranteed, but the oriented cell will have the c
vector as normal as possible (within the search range) to the
surface. A value of up to the max absolute Miller index is
usually sufficient.
"""
all_slabs = []
for miller in get_symmetrically_distinct_miller_indices(
structure, max_index):
gen = SlabGenerator(structure.copy(), miller, min_slab_size,
min_vacuum_size, lll_reduce=lll_reduce,
center_slab=center_slab, primitive=primitive,
max_normal_search=max_normal_search)
slabs = gen.get_slabs(bonds=bonds, tol=tol,
max_broken_bonds=max_broken_bonds)
if len(slabs) > 0:
logger.debug("%s has %d slabs... " % (miller, len(slabs)))
all_slabs.extend(slabs)
return all_slabs
| migueldiascosta/pymatgen | pymatgen/core/surface.py | Python | mit | 35,314 |
import os
from collections import defaultdict
from django.db import migrations
def migrate_post_extra_to_postextraelection(apps, schema_editor):
SuggestedPostLock = apps.get_model("moderation_queue", "SuggestedPostLock")
for spl in SuggestedPostLock.objects.all():
# If there's more than one postextraelection, then make sure
# that we create new SuggestedPostLocks for the rest of them:
postextraelections = spl.post_extra.ballot_set.all()
if not postextraelections.exists():
continue
try:
use_for_original, use_for_new_list = (
postextraelections[0],
postextraelections[1:],
)
except IndexError:
use_for_original = postextraelections[0]
use_for_new_list = []
# Update the SuggestedPostLock on the original:
spl.postextraelection = use_for_original
spl.save()
# Then if there are any other PostExtraElection objects
# associated with the post, create new SuggestPostLocks with
# the same metadata for those as well.
for postextraelection in use_for_new_list:
SuggestedPostLock.objects.create(
postextraelection=postextraelection,
post_extra=spl.post_extra,
user=spl.user,
justification=spl.justification,
)
def migrate_postextraelection_to_post_extra(apps, schema_editor):
# The reverse migration here will probably lose data, since we're
# moving from the more expressive model (you can have a suggested
# post lock for just one election that a post is associated with)
# to the less expressive mmodel (a suggested post lock is for a
# post, not specifying which election it applies to). So by
# default, this migration will raise an exception to stop you
# losing data on rolling back. If you wish to run this reverse
# migration anyway, (e.g. in your local dev environment) please
# set the environment variable ALLOW_LOSSY_REVERSE_MIGRATIONS to
# '1', in which case the exception won't be raised, and a rollback
# will be attempted anyway.
if os.environ.get("ALLOW_LOSSY_REVERSE_MIGRATIONS") != "1":
raise Exception(
"Cannot reverse the 0019_migrate_post_extra_to_postextraelection "
"migration as it will lose data. See the migration file for "
"details on how to do this anyway."
)
SuggestedPostLock = apps.get_model("moderation_queue", "SuggestedPostLock")
# Group these by postextra, user and justification:
grouped = defaultdict(list)
for spl in list(SuggestedPostLock.objects.all()):
key = (spl.postextraelection.postextra, spl.user, spl.justification)
grouped[key].append(spl)
# Now just keep one SuggestedPostLock in each of these groups:
for t, spls_in_group in grouped.items():
to_keep, to_delete_list = spls_in_group[0], spls_in_group[1:]
to_keep.post_extra = to_keep.postextraelection.postextra
to_keep.save()
for to_delete in to_delete_list:
to_delete.delete()
class Migration(migrations.Migration):
dependencies = [
("moderation_queue", "0018_suggestedpostlock_postextraelection")
]
operations = [
migrations.RunPython(
migrate_post_extra_to_postextraelection,
migrate_postextraelection_to_post_extra,
)
]
| DemocracyClub/yournextrepresentative | ynr/apps/moderation_queue/migrations/0019_migrate_post_extra_to_postextraelection.py | Python | agpl-3.0 | 3,476 |
"""empty message
Revision ID: 502662155e36
Revises: 838b8a109034
Create Date: 2016-06-30 01:26:07.563294
"""
# revision identifiers, used by Alembic.
revision = '502662155e36'
down_revision = '838b8a109034'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('jobs_skills',
sa.Column('job_uuid', sa.String(), nullable=False),
sa.Column('skill_uuid', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['job_uuid'], ['jobs_master.uuid'], ),
sa.ForeignKeyConstraint(['skill_uuid'], ['skills_master.uuid'], ),
sa.PrimaryKeyConstraint('job_uuid', 'skill_uuid')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('jobs_skills')
### end Alembic commands ###
| agileronin/skills-api | migrations/versions/502662155e36_.py | Python | mit | 868 |
# -*- coding: utf-8 -*-
#
# test_upload.py — UploadController test cases
#
# This file is part of debexpo - https://alioth.debian.org/projects/debexpo/
#
# Copyright © 2008 Jonny Lamb <[email protected]>
# Copyright © 2010 Jan Dittberner <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
UploadController test cases.
"""
__author__ = 'Jonny Lamb'
__copyright__ = 'Copyright © 2008 Jonny Lamb, Copyright © 2010 Jan Dittberner'
__license__ = 'MIT'
import os
import base64
from debexpo.tests import TestController, url
import pylons.test
class TestUploadController(TestController):
def __init__(self, *args, **kwargs):
"""
Sets up database with data to provide a database to test.
"""
TestController.__init__(self, *args, **kwargs)
# Keep this so tests don't have to constantly create it.
self.user_upload_key = 'upload_key'
self.email = '[email protected]'
def setUp(self):
self._setup_models()
self._setup_example_user()
def tearDown(self):
self._remove_example_user()
def testGetRequest(self):
"""
Tests whether requests where method != PUT are rejected with error code 405.
"""
response = self.app.get(url(controller='upload', action='index',
email=self.email, password=self.user_upload_key,
filename='testname.dsc'), expect_errors=True)
self.assertEqual(response.status_int, 405)
def testNoAuthorization(self):
"""
Tests whether requests where the "Authorization" header is missing are rejected with
error code 401 and whether the "WWW-Authenticate" header is sent in the response with
the correct "realm" syntax.
"""
response = self.app.put(
url(controller='upload', action='index',
filename='testname.dsc', email='email', password='pass'), expect_errors=True)
self.assertEqual(response.status_int, 403)
def testFalseAuthentication(self):
"""
Tests whether false authentication details returns a 403 error code.
"""
response = self.app.put(url(controller='upload', action='index',
filename='testname.dsc', email=self.email,
password='wrong'),
expect_errors=True)
self.assertEqual(response.status_int, 403)
def testTrueAuthentication(self):
"""
Tests whether true authentication details returns a nicer error code.
"""
response = self.app.put(url(controller='upload', action='index',
filename='testname.dsc', email=self.email,
password=self.user_upload_key),
expect_errors=False)
self.assertNotEqual(response.status_int, 403)
app_config = pylons.test.pylonsapp.config
if os.path.isfile(os.path.join(app_config['debexpo.upload.incoming'], 'testfile1.dsc')):
os.remove(os.path.join(app_config['debexpo.upload.incoming'], 'testfile1.dsc'))
def testExtensionNotAllowed(self):
"""
Tests whether uploads of an unknown file extensions are rejected with error code 403.
"""
response = self.app.put(url(controller='upload', action='index',
filename='testname.unknown', email=self.email,
password=self.user_upload_key),
expect_errors=True)
self.assertEqual(response.status_int, 403)
def testSuccessfulUpload(self):
"""
Tests whether uploads with sane file extensions and authorization are successful.
"""
response = self.app.put(url(
controller='upload', action='index',
filename='testfile2.dsc',
email=self.email,
password=self.user_upload_key),
params='contents', expect_errors=False)
self.assertEqual(response.status_int, 200)
app_config = pylons.test.pylonsapp.config
self.assertTrue(os.path.isfile(os.path.join(app_config['debexpo.upload.incoming'],
'testfile2.dsc')))
self.assertEqual(file(os.path.join(app_config['debexpo.upload.incoming'],
'testfile2.dsc')).read(), 'contents')
if os.path.isfile(os.path.join(app_config['debexpo.upload.incoming'], 'testfile2.dsc')):
os.remove(os.path.join(app_config['debexpo.upload.incoming'], 'testfile2.dsc'))
| swvist/Debexpo | debexpo/tests/functional/test_upload.py | Python | mit | 5,784 |
from pycp2k.inputsection import InputSection
class _ci_neb1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Nsteps_it = None
self._name = "CI_NEB"
self._keywords = {'Nsteps_it': 'NSTEPS_IT'}
| SINGROUP/pycp2k | pycp2k/classes/_ci_neb1.py | Python | lgpl-3.0 | 250 |
# THREE GOLD STARS
# Sudoku [http://en.wikipedia.org/wiki/Sudoku]
# is a logic puzzle where a game
# is defined by a partially filled
# 9 x 9 square of digits where each square
# contains one of the digits 1,2,3,4,5,6,7,8,9.
# For this question we will generalize
# and simplify the game.
# Define a procedure, check_sudoku,
# that takes as input a square list
# of lists representing an n x n
# sudoku puzzle solution and returns the boolean
# True if the input is a valid
# sudoku square and returns the boolean False
# otherwise.
# A valid sudoku square satisfies these
# two properties:
# 1. Each column of the square contains
# each of the whole numbers from 1 to n exactly once.
# 2. Each row of the square contains each
# of the whole numbers from 1 to n exactly once.
# You may assume the the input is square and contains at
# least one row and column.
correct = [[1,2,3],
[2,3,1],
[3,1,2]]
incorrect = [[1,2,3,4],
[2,3,1,3],
[3,1,2,3],
[4,4,4,4]]
incorrect2 = [[1,2,3,4],
[2,3,1,4],
[4,1,2,3],
[3,4,1,2]]
incorrect3 = [[1,2,3,4,5],
[2,3,1,5,6],
[4,5,2,1,3],
[3,4,5,2,1],
[5,6,4,3,2]]
incorrect4 = [['a','b','c'],
['b','c','a'],
['c','a','b']]
incorrect5 = [ [1, 1.5],
[1.5, 1]]
def check_sudoku(square):
numOfDig = len(square[0])
sum = (numOfDig * (numOfDig + 1))/2
if checkAllRow(square, numOfDig, sum):
if checkAllCol(square, numOfDig, sum):
return True
else:
return False
else:
return False
'''
Pseudo Code
series of checks to check Row:
grab one list from Sudoku Square (contains 1 row)
check length of each row
check for duplicates? (check sum)
use counter to check each digit
'''
def checkAllRow(square, numOfDig, sumAllDig):
for row in square: #row is a list containing digits
if not checkRow(row, numOfDig, sumAllDig):
return False
return True
def checkRow(rowList, numOfDig, sumAllDig):
if len(rowList) != numOfDig:
return False
# compares sum of digits in one row to sum should be found
if sumAllDig != calcSumOneRow(rowList):
return False
if not checkEachDigitOneRow(rowList, numOfDig):
return False
else:
return True
def calcSumOneRow(oneRow):
sum = 0
for digit in oneRow:
if not isinstance(digit, int):
return False
else:
sum += digit
return sum
def checkEachDigitOneRow(oneRow, numOfDig):
index = 1
while index <= numOfDig:
if index not in oneRow:
return False
index += 1
return True
'''
Pseudo Code
series of checks to check Column:
Trick accessing same col in each row
Create list with numbers gathered
Run same check row code
'''
def checkAllCol(square, numOfDig, sumAllDig):
index = 0
while index < numOfDig:
column = []
for row in square:
column.append(row[index])
if checkRow(column, numOfDig, sumAllDig):
index += 1
else:
return False
return True
print check_sudoku(correct)
#>>> True
print check_sudoku(incorrect)
#>>> False
print check_sudoku(incorrect2)
#>>> False
print check_sudoku(incorrect3)
#>>> False
print check_sudoku(incorrect4)
#>>> False
print check_sudoku(incorrect5)
#>>> False
| annaxli/UdacityCS101 | Lesson3/L3Q8.py | Python | mit | 3,769 |
# ***************************************************************************
# * Copyright (c) 2013 Juergen Riegel <[email protected]> *
# * Copyright (c) 2016 Bernd Hahnebach <[email protected]> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM material task panel for the document object"
__author__ = "Juergen Riegel, Bernd Hahnebach, Qingfeng Xia"
__url__ = "https://www.freecadweb.org"
## @package task_material_common
# \ingroup FEM
# \brief FreeCAD FEM _ViewProviderFemMaterial
# \brief task panel for common material object
import sys
from PySide import QtCore
from PySide import QtGui
import FreeCAD
import FreeCADGui
from FreeCAD import Units
from femguiutils import selection_widgets
if sys.version_info.major >= 3:
unicode = str
class _TaskPanel:
"""
The editmode TaskPanel for FemMaterial objects
"""
def __init__(self, obj):
FreeCAD.Console.PrintMessage("\n") # empty line on start task panel
self.obj = obj
self.material = self.obj.Material # FreeCAD material dictionary of current material
self.card_path = ""
self.materials = {} # { card_path : FreeCAD material dict, ... }
self.cards = {} # { card_path : card_names, ... }
self.icons = {} # { card_path : icon_path, ... }
# mat_card is the FCMat file
# card_name is the file name of the mat_card
# card_path is the whole file path of the mat_card
# material_name is the value of the key name in FreeCAD material dictionary
# they might not match because of special letters in the material_name
# which are changed in the card_name to english standard characters
self.has_transient_mat = False
# parameter widget
self.parameterWidget = FreeCADGui.PySideUic.loadUi(
FreeCAD.getHomePath() + "Mod/Fem/Resources/ui/Material.ui"
)
# globals
QtCore.QObject.connect(
self.parameterWidget.cb_materials,
QtCore.SIGNAL("activated(int)"),
self.choose_material
)
QtCore.QObject.connect(
self.parameterWidget.chbu_allow_edit,
QtCore.SIGNAL("clicked()"),
self.toggleInputFieldsReadOnly
)
QtCore.QObject.connect(
self.parameterWidget.pushButton_editMat,
QtCore.SIGNAL("clicked()"),
self.edit_material
)
# basic properties must be provided
QtCore.QObject.connect(
self.parameterWidget.input_fd_density,
QtCore.SIGNAL("editingFinished()"),
self.density_changed
)
# mechanical properties
QtCore.QObject.connect(
self.parameterWidget.input_fd_young_modulus,
QtCore.SIGNAL("editingFinished()"),
self.ym_changed
)
QtCore.QObject.connect(
self.parameterWidget.spinBox_poisson_ratio,
QtCore.SIGNAL("editingFinished()"),
self.pr_changed
)
# thermal properties
QtCore.QObject.connect(
self.parameterWidget.input_fd_thermal_conductivity,
QtCore.SIGNAL("editingFinished()"),
self.tc_changed
)
QtCore.QObject.connect(
self.parameterWidget.input_fd_expansion_coefficient,
QtCore.SIGNAL("editingFinished()"),
self.tec_changed
)
QtCore.QObject.connect(
self.parameterWidget.input_fd_specific_heat,
QtCore.SIGNAL("editingFinished()"),
self.sh_changed
)
# fluidic properties, only volumetric thermal expansion coeff makes sense
QtCore.QObject.connect(
self.parameterWidget.input_fd_kinematic_viscosity,
QtCore.SIGNAL("editingFinished()"),
self.kinematic_viscosity_changed
)
QtCore.QObject.connect(
self.parameterWidget.input_fd_vol_expansion_coefficient,
QtCore.SIGNAL("editingFinished()"),
self.vtec_changed
)
# init all parameter input files with read only
self.parameterWidget.chbu_allow_edit.setCheckState(QtCore.Qt.CheckState.Unchecked)
self.toggleInputFieldsReadOnly()
# hide some groupBox according to material category
self.parameterWidget.label_category.setText(self.obj.Category)
if self.obj.Category == "Fluid":
self.parameterWidget.groupBox_mechanical.setVisible(0)
self.parameterWidget.label_expansion_coefficient.setVisible(0)
self.parameterWidget.input_fd_expansion_coefficient.setVisible(0)
else:
self.parameterWidget.groupBox_fluidic.setVisible(0)
self.parameterWidget.label_vol_expansion_coefficient.setVisible(0)
self.parameterWidget.input_fd_vol_expansion_coefficient.setVisible(0)
# get all available materials (fill self.materials, self.cards and self.icons)
from materialtools.cardutils import import_materials as getmats
# Note: import_materials(category="Solid", ...),
# category default to Solid, but must be given for FluidMaterial to be imported
self.materials, self.cards, self.icons = getmats(self.obj.Category)
# fill the material comboboxes with material cards
self.add_cards_to_combo_box()
# search for exact this mat_card in all known cards, choose the current material
self.card_path = self.get_material_card(self.material)
FreeCAD.Console.PrintLog("card_path: {}\n".format(self.card_path))
if not self.card_path:
# we have not found our material in self.materials dict :-(
# we're going to add a user-defined temporary material: a document material
FreeCAD.Console.PrintMessage(
"Previously used material card can not be found in material directories. "
"Add document material.\n"
)
self.card_path = "_document_material"
self.materials[self.card_path] = self.material
self.parameterWidget.cb_materials.addItem(
QtGui.QIcon(":/icons/help-browser.svg"),
self.card_path,
self.card_path
)
index = self.parameterWidget.cb_materials.findData(self.card_path)
# print(index)
# fill input fields and set the current material in the cb widget
self.choose_material(index)
else:
# we found our exact material in self.materials dict :-)
FreeCAD.Console.PrintLog(
"Previously used material card was found in material directories. "
"We will use this material.\n"
)
index = self.parameterWidget.cb_materials.findData(self.card_path)
# print(index)
# fill input fields and set the current material in the cb widget
self.choose_material(index)
# geometry selection widget
self.selectionWidget = selection_widgets.GeometryElementsSelection(
obj.References,
["Solid", "Face", "Edge"],
False,
True
) # start with Solid in list!
# form made from param and selection widget
self.form = [self.parameterWidget, self.selectionWidget]
# check references, has to be after initialisation of selectionWidget
self.selectionWidget.has_equal_references_shape_types()
# leave task panel ***************************************************************************
def accept(self):
# print(self.material)
if self.material == {}: # happens if material editor was canceled
FreeCAD.Console.PrintError("Empty material dictionary, nothing was changed.\n")
self.recompute_and_set_back_all()
return True
if self.selectionWidget.has_equal_references_shape_types():
self.do_not_set_thermal_zeros()
from materialtools.cardutils import check_mat_units as checkunits
if checkunits(self.material) is True:
self.obj.Material = self.material
self.obj.References = self.selectionWidget.references
else:
error_message = (
"Due to some wrong material quantity units in the changed "
"material data, the task panel changes where not accepted.\n"
)
FreeCAD.Console.PrintError(error_message)
QtGui.QMessageBox.critical(None, "Material data not changed", error_message)
self.recompute_and_set_back_all()
return True
def reject(self):
self.recompute_and_set_back_all()
return True
def recompute_and_set_back_all(self):
doc = FreeCADGui.getDocument(self.obj.Document)
doc.Document.recompute()
self.selectionWidget.setback_listobj_visibility()
if self.selectionWidget.sel_server:
FreeCADGui.Selection.removeObserver(self.selectionWidget.sel_server)
doc.resetEdit()
def do_not_set_thermal_zeros(self):
""" thermal material parameter are set to 0.0 if not available
this leads to wrong material values and to not finding the card
on reopen the task pane, thus do not write thermal parameter,
if they are 0.0
"""
if Units.Quantity(self.material["ThermalConductivity"]) == 0.0:
self.material.pop("ThermalConductivity", None)
FreeCAD.Console.PrintMessage(
"Zero ThermalConductivity value. "
"This parameter is not saved in the material data.\n"
)
if Units.Quantity(self.material["ThermalExpansionCoefficient"]) == 0.0:
self.material.pop("ThermalExpansionCoefficient", None)
FreeCAD.Console.PrintMessage(
"Zero ThermalExpansionCoefficient value. "
"This parameter is not saved in the material data.\n"
)
if Units.Quantity(self.material["SpecificHeat"]) == 0.0:
self.material.pop("SpecificHeat", None)
FreeCAD.Console.PrintMessage(
"Zero SpecificHeat value. "
"This parameter is not saved in the material data.\n"
)
# choose material ****************************************************************************
def get_material_card(self, material):
for a_mat in self.materials:
unmatched_items = set(self.materials[a_mat].items()) ^ set(material.items())
# print(a_mat + " --> unmatched_items = " + str(len(unmatched_items)))
# if len(unmatched_items) < 4:
# print(unmatched_items)
if len(unmatched_items) == 0:
return a_mat
return ""
def choose_material(self, index):
if index < 0:
return
self.card_path = self.parameterWidget.cb_materials.itemData(index) # returns whole path
FreeCAD.Console.PrintMessage(
"Material card chosen:\n"
" {}\n".format(self.card_path)
)
self.material = self.materials[self.card_path]
self.check_material_keys()
self.set_mat_params_in_input_fields(self.material)
self.parameterWidget.cb_materials.setCurrentIndex(index) # set after input fields
gen_mat_desc = ""
gen_mat_name = ""
if "Description" in self.material:
gen_mat_desc = self.material["Description"]
if "Name" in self.material:
gen_mat_name = self.material["Name"]
self.parameterWidget.l_mat_description.setText(gen_mat_desc)
self.parameterWidget.l_mat_name.setText(gen_mat_name)
# print("choose_material: done")
def set_transient_material(self):
self.card_path = "_transient_material"
self.materials[self.card_path] = self.material # = the current input fields data
index = self.parameterWidget.cb_materials.findData(self.card_path)
self.choose_material(index)
def add_transient_material(self):
self.has_transient_mat = True
self.card_path = "_transient_material"
self.parameterWidget.cb_materials.addItem(
QtGui.QIcon(":/icons/help-browser.svg"),
self.card_path,
self.card_path
)
self.set_transient_material()
# how to edit a material *********************************************************************
def edit_material(self):
# opens the material editor to choose a material or edit material params
import MaterialEditor
if self.card_path not in self.cards:
FreeCAD.Console.PrintLog(
"Card path not in cards, material dict will be used to open Material Editor.\n"
)
new_material_params = MaterialEditor.editMaterial(material=self.material)
else:
new_material_params = MaterialEditor.editMaterial(card_path=self.card_path)
# material editor returns the mat_dict only, not a card_path
# if the material editor was canceled a empty dict will be returned
# do not change the self.material
# check if dict is not empty (do not use "is True")
if new_material_params:
# check material quantity units
from materialtools.cardutils import check_mat_units as checkunits
if checkunits(new_material_params) is True:
self.material = new_material_params
self.card_path = self.get_material_card(self.material)
# print("card_path: " + self.card_path)
self.check_material_keys()
self.set_mat_params_in_input_fields(self.material)
if not self.card_path:
FreeCAD.Console.PrintMessage(
"Material card chosen by the material editor "
"was not found in material directories.\n"
"Either the card does not exist or some material "
"parameter where changed in material editor.\n"
)
if self.has_transient_mat is False:
self.add_transient_material()
else:
self.set_transient_material()
else:
# we found our exact material in self.materials dict :-)
FreeCAD.Console.PrintLog(
"Material card chosen by the material editor "
"was found in material directories. "
"The found material card will be used.\n"
)
index = self.parameterWidget.cb_materials.findData(self.card_path)
# print(index)
# set the current material in the cb widget
self.choose_material(index)
else:
error_message = (
"Due to some wrong material quantity units in data passed "
"by the material editor, the material data was not changed.\n"
)
FreeCAD.Console.PrintError(error_message)
QtGui.QMessageBox.critical(None, "Material data not changed", error_message)
else:
FreeCAD.Console.PrintLog(
"No changes where made by the material editor.\n"
)
def toggleInputFieldsReadOnly(self):
if self.parameterWidget.chbu_allow_edit.isChecked():
self.parameterWidget.input_fd_density.setReadOnly(False)
self.parameterWidget.input_fd_young_modulus.setReadOnly(False)
self.parameterWidget.spinBox_poisson_ratio.setReadOnly(False)
self.parameterWidget.input_fd_thermal_conductivity.setReadOnly(False)
self.parameterWidget.input_fd_expansion_coefficient.setReadOnly(False)
self.parameterWidget.input_fd_specific_heat.setReadOnly(False)
self.parameterWidget.input_fd_kinematic_viscosity.setReadOnly(False)
self.parameterWidget.input_fd_vol_expansion_coefficient.setReadOnly(False)
else:
self.parameterWidget.input_fd_density.setReadOnly(True)
self.parameterWidget.input_fd_young_modulus.setReadOnly(True)
self.parameterWidget.spinBox_poisson_ratio.setReadOnly(True)
self.parameterWidget.input_fd_thermal_conductivity.setReadOnly(True)
self.parameterWidget.input_fd_expansion_coefficient.setReadOnly(True)
self.parameterWidget.input_fd_specific_heat.setReadOnly(True)
self.parameterWidget.input_fd_kinematic_viscosity.setReadOnly(True)
self.parameterWidget.input_fd_vol_expansion_coefficient.setReadOnly(True)
# material parameter input fields ************************************************************
def check_material_keys(self):
# FreeCAD units definition is at file end of src/Base/Unit.cpp
if not self.material:
FreeCAD.Console.PrintMessage("For some reason all material data is empty!\n")
self.material["Name"] = "NoName"
if "Density" in self.material:
if "Density" not in str(Units.Unit(self.material["Density"])):
FreeCAD.Console.PrintMessage(
"Density in material data seems to have no unit "
"or a wrong unit (reset the value): {}\n"
.format(self.material["Name"])
)
self.material["Density"] = "0 kg/m^3"
else:
FreeCAD.Console.PrintMessage(
"Density not found in {}\n"
.format(self.material["Name"])
)
self.material["Density"] = "0 kg/m^3"
if self.obj.Category == "Solid":
# mechanical properties
if "YoungsModulus" in self.material:
# unit type of YoungsModulus is Pressure
if "Pressure" not in str(Units.Unit(self.material["YoungsModulus"])):
FreeCAD.Console.PrintMessage(
"YoungsModulus in material data seems to have no unit "
"or a wrong unit (reset the value): {}\n"
.format(self.material["Name"])
)
self.material["YoungsModulus"] = "0 MPa"
else:
FreeCAD.Console.PrintMessage(
"YoungsModulus not found in {}\n"
.format(self.material["Name"])
)
self.material["YoungsModulus"] = "0 MPa"
if "PoissonRatio" in self.material:
# PoissonRatio does not have a unit, but it is checked it there is no value at all
try:
float(self.material["PoissonRatio"])
except ValueError:
FreeCAD.Console.PrintMessage(
"PoissonRatio has wrong or no data (reset the value): {}\n"
.format(self.material["PoissonRatio"])
)
self.material["PoissonRatio"] = "0"
else:
FreeCAD.Console.PrintMessage(
"PoissonRatio not found in {}\n"
.format(self.material["Name"])
)
self.material["PoissonRatio"] = "0"
if self.obj.Category == "Fluid":
# Fluidic properties
if "KinematicViscosity" in self.material:
ki_vis = self.material["KinematicViscosity"]
if "KinematicViscosity" not in str(Units.Unit(ki_vis)):
FreeCAD.Console.PrintMessage(
"KinematicViscosity in material data seems to have no unit "
"or a wrong unit (reset the value): {}\n"
.format(self.material["Name"])
)
self.material["KinematicViscosity"] = "0 m^2/s"
else:
FreeCAD.Console.PrintMessage(
"KinematicViscosity not found in {}\n"
.format(self.material["Name"])
)
self.material["KinematicViscosity"] = "0 m^2/s"
if "VolumetricThermalExpansionCoefficient" in self.material:
# unit type VolumetricThermalExpansionCoefficient is ThermalExpansionCoefficient
vol_ther_ex_co = self.material["VolumetricThermalExpansionCoefficient"]
if "VolumetricThermalExpansionCoefficient" not in str(Units.Unit(vol_ther_ex_co)):
FreeCAD.Console.PrintMessage(
"VolumetricThermalExpansionCoefficient in material data "
"seems to have no unit or a wrong unit (reset the value): {}\n"
.format(self.material["Name"])
)
self.material["VolumetricThermalExpansionCoefficient"] = "0 m^3/m^3/K"
else:
FreeCAD.Console.PrintMessage(
"VolumetricThermalExpansionCoefficient not found in {}\n"
.format(self.material["Name"])
)
self.material["VolumetricThermalExpansionCoefficient"] = "0 m^3/m^3/K"
# Thermal properties
if "ThermalConductivity" in self.material:
if "ThermalConductivity" not in str(Units.Unit(self.material["ThermalConductivity"])):
FreeCAD.Console.PrintMessage(
"ThermalConductivity in material data seems to have no unit "
"or a wrong unit (reset the value): {}\n"
.format(self.material["Name"])
)
self.material["ThermalConductivity"] = "0 W/m/K"
else:
FreeCAD.Console.PrintMessage(
"ThermalConductivity not found in {}\n"
.format(self.material["Name"])
)
self.material["ThermalConductivity"] = "0 W/m/K"
if "ThermalExpansionCoefficient" in self.material:
the_ex_co = self.material["ThermalExpansionCoefficient"]
if "ThermalExpansionCoefficient" not in str(Units.Unit(the_ex_co)):
FreeCAD.Console.PrintMessage(
"ThermalExpansionCoefficient in material data seems to have no unit "
"or a wrong unit (reset the value): {}\n"
.format(self.material["Name"])
)
self.material["ThermalExpansionCoefficient"] = "0 um/m/K"
else:
FreeCAD.Console.PrintMessage(
"ThermalExpansionCoefficient not found in {}\n"
.format(self.material["Name"])
)
self.material["ThermalExpansionCoefficient"] = "0 um/m/K"
if "SpecificHeat" in self.material:
if "SpecificHeat" not in str(Units.Unit(self.material["SpecificHeat"])):
FreeCAD.Console.PrintMessage(
"SpecificHeat in material data seems to have no unit "
"or a wrong unit (reset the value): {}\n"
.format(self.material["Name"])
)
self.material["SpecificHeat"] = "0 J/kg/K"
else:
FreeCAD.Console.PrintMessage(
"SpecificHeat not found in {}\n"
.format(self.material["Name"])
)
self.material["SpecificHeat"] = "0 J/kg/K"
FreeCAD.Console.PrintMessage("\n")
def update_material_property(self, inputfield_text, matProperty, qUnit, variation=0.001):
# print(inputfield_text)
# this update property works for all Gui::InputField widgets
if qUnit != "":
value = Units.Quantity(inputfield_text).getValueAs(qUnit)
old_value = Units.Quantity(self.material[matProperty]).getValueAs(qUnit)
else:
# for example PoissonRatio
value = float(inputfield_text)
old_value = float(self.material[matProperty])
if value:
if not (1 - variation < float(old_value) / value < 1 + variation):
material = self.material
# unicode() is an alias to str for py3
if qUnit != "":
material[matProperty] = unicode(value) + " " + qUnit
else:
material[matProperty] = unicode(value)
self.material = material
if self.has_transient_mat is False:
self.add_transient_material()
else:
self.set_transient_material()
else:
pass # some check or default value set can be done here
# print(inputfield_text)
# mechanical input fields
def ym_changed(self):
# FreeCADs standard unit for stress is kPa for UnitsSchemeInternal, but MPa can be used
self.update_material_property(
self.parameterWidget.input_fd_young_modulus.text(),
"YoungsModulus",
"kPa",
)
def density_changed(self):
print(
"String read from density input field: {}"
.format(self.parameterWidget.input_fd_density.text())
)
# FreeCADs standard unit for density is kg/mm^3 for UnitsSchemeInternal
self.update_material_property(
self.parameterWidget.input_fd_density.text(),
"Density",
"kg/m^3",
)
def pr_changed(self):
value = self.parameterWidget.spinBox_poisson_ratio.value()
if value:
self.update_material_property(
self.parameterWidget.spinBox_poisson_ratio.text(),
"PoissonRatio",
"",
)
elif value == 0:
# PoissonRatio was set to 0.0 what is possible
material = self.material
material["PoissonRatio"] = unicode(value)
self.material = material
if self.has_transient_mat is False:
self.add_transient_material()
else:
self.set_transient_material()
# thermal input fields
def tc_changed(self):
self.update_material_property(
self.parameterWidget.input_fd_thermal_conductivity.text(),
"ThermalConductivity",
"W/m/K",
)
def tec_changed(self):
self.update_material_property(
self.parameterWidget.input_fd_expansion_coefficient.text(),
"ThermalExpansionCoefficient",
"um/m/K",
)
def sh_changed(self):
self.update_material_property(
self.parameterWidget.input_fd_specific_heat.text(),
"SpecificHeat",
"J/kg/K",
)
# fluidic input fields
def vtec_changed(self):
self.update_material_property(
self.parameterWidget.input_fd_vol_expansion_coefficient.text(),
"VolumetricThermalExpansionCoefficient",
"m^3/m^3/K",
)
def kinematic_viscosity_changed(self):
self.update_material_property(
self.parameterWidget.input_fd_kinematic_viscosity.text(),
"KinematicViscosity",
"m^2/s",
)
def set_mat_params_in_input_fields(self, matmap):
if "YoungsModulus" in matmap:
ym_new_unit = "MPa"
ym = FreeCAD.Units.Quantity(matmap["YoungsModulus"])
ym_with_new_unit = ym.getValueAs(ym_new_unit)
q = FreeCAD.Units.Quantity("{} {}".format(ym_with_new_unit, ym_new_unit))
self.parameterWidget.input_fd_young_modulus.setText(q.UserString)
if "PoissonRatio" in matmap:
self.parameterWidget.spinBox_poisson_ratio.setValue(float(matmap["PoissonRatio"]))
# Fluidic properties
if "KinematicViscosity" in matmap:
nu_new_unit = "m^2/s"
nu = FreeCAD.Units.Quantity(matmap["KinematicViscosity"])
nu_with_new_unit = nu.getValueAs(nu_new_unit)
q = FreeCAD.Units.Quantity("{} {}".format(nu_with_new_unit, nu_new_unit))
self.parameterWidget.input_fd_kinematic_viscosity.setText(q.UserString)
# For isotropic materials and fluidic material
# use the volumetric thermal expansion coefficient
# is approximately three times the linear coefficient for solids
if "VolumetricThermalExpansionCoefficient" in matmap:
vtec_new_unit = "m^3/m^3/K"
vtec = FreeCAD.Units.Quantity(matmap["VolumetricThermalExpansionCoefficient"])
vtec_with_new_unit = vtec.getValueAs(vtec_new_unit)
q = FreeCAD.Units.Quantity("{} {}".format(vtec_with_new_unit, vtec_new_unit))
self.parameterWidget.input_fd_vol_expansion_coefficient.setText(q.UserString)
if "Density" in matmap:
density_new_unit = "kg/m^3"
density = FreeCAD.Units.Quantity(matmap["Density"])
density_with_new_unit = density.getValueAs(density_new_unit)
# self.parameterWidget.input_fd_density.setText(
# "{} {}".format(density_with_new_unit, density_new_unit)
# )
q = FreeCAD.Units.Quantity("{} {}".format(density_with_new_unit, density_new_unit))
self.parameterWidget.input_fd_density.setText(q.UserString)
# thermal properties
if "ThermalConductivity" in matmap:
tc_new_unit = "W/m/K"
tc = FreeCAD.Units.Quantity(matmap["ThermalConductivity"])
tc_with_new_unit = tc.getValueAs(tc_new_unit)
q = FreeCAD.Units.Quantity("{} {}".format(tc_with_new_unit, tc_new_unit))
self.parameterWidget.input_fd_thermal_conductivity.setText(q.UserString)
if "ThermalExpansionCoefficient" in matmap: # linear, only for solid
tec_new_unit = "um/m/K"
tec = FreeCAD.Units.Quantity(matmap["ThermalExpansionCoefficient"])
tec_with_new_unit = tec.getValueAs(tec_new_unit)
q = FreeCAD.Units.Quantity("{} {}".format(tec_with_new_unit, tec_new_unit))
self.parameterWidget.input_fd_expansion_coefficient.setText(q.UserString)
if "SpecificHeat" in matmap:
sh_new_unit = "J/kg/K"
sh = FreeCAD.Units.Quantity(matmap["SpecificHeat"])
sh_with_new_unit = sh.getValueAs(sh_new_unit)
q = FreeCAD.Units.Quantity("{} {}".format(sh_with_new_unit, sh_new_unit))
self.parameterWidget.input_fd_specific_heat.setText(q.UserString)
# fill the combo box with cards **************************************************************
def add_cards_to_combo_box(self):
# fill combobox, in combo box the card name is used not the material name
self.parameterWidget.cb_materials.clear()
mat_prefs = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Material/Cards")
sort_by_resources = mat_prefs.GetBool("SortByResources", False)
card_name_list = [] # [ [card_name, card_path, icon_path], ... ]
if sort_by_resources is True:
for a_path in sorted(self.materials.keys()):
card_name_list.append([self.cards[a_path], a_path, self.icons[a_path]])
else:
card_names_tmp = {}
for path, name in self.cards.items():
card_names_tmp[name] = path
for a_name in sorted(card_names_tmp.keys()):
a_path = card_names_tmp[a_name]
card_name_list.append([a_name, a_path, self.icons[a_path]])
for mat in card_name_list:
self.parameterWidget.cb_materials.addItem(QtGui.QIcon(mat[2]), mat[0], mat[1])
# the whole card path is added to the combo box to make it unique
# see def choose_material:
# for assignment of self.card_path the path form the parameterWidget ist used
| Fat-Zer/FreeCAD_sf_master | src/Mod/Fem/femtaskpanels/task_material_common.py | Python | lgpl-2.1 | 33,586 |
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from django.urls import path
from confla import views
app_name = "confla"
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^$', views.IndexView.my_view, name='index'),
url(r'add_rooms/$', views.AddRoomsView.view_form, name='add_rooms'),
url(r'^events/popover/$', views.EventView.get_popover, name='eventPop'),
url(r'^events/modal/$', views.EventEditView.event_modal, name='eventMod'),
url(r'^login/$', views.LoginView.my_view, name='login'),
url(r'^logout/$', views.LoginView.logout, name='logout'),
url(r'^process/$', views.LoginView.auth_and_login, name='process_login'),
url(r'^users/$', views.UserView.my_view, name='users'),
url(r'^user/(?P<url_username>\w+)/profile/$', views.UserView.view_profile, name='profile'),
url(r'^user/(?P<url_username>\w+)/delete_mail/(?P<id>\d+)/', views.UserView.delete_email, name='delete_email'),
url(r'^user/(?P<url_username>\w+)/set_primary_mail/(?P<id>\d+)/', views.UserView.set_email_primary, name='set_primary_email'),
url(r'^user/volunteer/$', views.VolunteerView.my_view, name='volunteer'),
url(r'^register/$', views.RegisterView.user_register, name='register'),
url(r'^reset_password/$', views.RegisterView.reset_password, name='reset_password'),
url(r'^reset_password2/(?P<email_address>[^/]+)/(?P<token>[^/]+)$', views.RegisterView.reset_password2, name='reset_password2'),
#url(r'^reg_talk/$', views.RegisterView.save_form_and_register, name='reg_talk'),
#url(r'^notlogged/$', views.UserView.not_logged, name='notlogged'),
url(r'^i18n/', include('django.conf.urls.i18n'), name='set_language'),
url(r'^(?P<url_id>\w+)/$', views.AboutView.splash_view, name='splash'),
url(r'^(?P<url_id>\w+)/cfp/$', views.CfpView.save_form_and_register, name='cfp'),
url(r'^(?P<url_id>\w+)/about/$', views.AboutView.splash_view, name='about'),
url(r'^(?P<url_id>\w+)/events/$', views.EventView.event_list, name='event_list'),
url(r'^(?P<url_id>\w+)/places/$', views.PlacesView.osm, name='places'),
url(r'^(?P<url_id>\w+)/about/(?P<page>\w+)$', views.PagesView.content, name='pages'),
url(r'^(?P<url_id>\w+)/speakers/grid/$', views.UserView.speaker_grid, name='speaker_grid'),
url(r'^(?P<url_id>\w+)/speakers/list/$', views.UserView.speaker_list, name='speaker_list'),
url(r'^(?P<url_id>\w+)/sched/$', views.ScheduleView.my_view, name='schedule'),
url(r'^(?P<url_id>\w+)/sched/list/$', views.ScheduleView.list_view, name='listsched'),
url(r'^(?P<url_id>\w+)/sched/list/(?P<id>\d+)/$', views.ScheduleView.list_view, name='listschedTag'),
url(r'^(?P<url_id>\w+)/config/$', views.RoomConfView.slot_view, name='conf_rooms'),
url(r'^(?P<url_id>\w+)/config/save/$', views.RoomConfView.save_config, name='rooms_conf_save'),
url(r'^(?P<url_id>\w+)/export/m_app/$', views.ExportView.m_app, name='export_mapp'),
url(r'^(?P<url_id>\w+)/export/csv/$', views.ExportView.csv, name='export_csv'),
url(r'^org/admin/geo_icons/$', views.IconsView.table, name='geo_icons'),
url(r'^org/admin/geo_points/$', views.PlacesView.table, name='geo_points'),
url(r'^org/admin/stats/$', views.AdminView.dashboard, name='org_dashboard'),
url(r'^org/admin/newconf/$', views.ConferenceView.create_conf, name='create_conf'),
url(r'^org/admin/createroom/$', views.ConferenceView.create_room, name='create_room'),
url(r'^org/admin/createtag/$', views.EventEditView.create_event_tag, name='create_event_tag'),
url(r'^org/admin/saveconf/$', views.ConferenceView.save_conf, name='save_conf'),
url(r'^org/admin/users/$', views.AdminView.users, name='org_users'),
url(r'^org/admin/$', views.AdminView.conf_list, name='org_conf_list'),
url(r'^export/conference_list/$', views.ExportView.conf_list, name='conf_list_export'),
url(r'^(?P<url_id>\w+)/admin/$', views.AdminView.dashboard, name='dashboard'),
url(r'^(?P<url_id>\w+)/admin/conf/edit/$', views.ConferenceView.edit_conf, name='edit_conf'),
url(r'^(?P<url_id>\w+)/admin/saveconf/$', views.ConferenceView.save_conf, name='save_conf_urlid'),
url(r'^(?P<url_id>\w+)/admin/pages/$', views.PagesView.pages_list, name='admin_pages'),
url(r'^(?P<url_id>\w+)/admin/page/(?P<page>\d+)/edit/$', views.PagesView.edit_page, name='edit_page'),
url(r'^(?P<url_id>\w+)/admin/page/(?P<page>\d+)/save/$', views.PagesView.save_page, name='save_page'),
url(r'^(?P<url_id>\w+)/admin/users/$', views.AdminView.users, name='speakers'),
url(r'^(?P<url_id>\w+)/admin/sched/edit/$', views.TimetableView.view_timetable, name='adminsched'),
url(r'^(?P<url_id>\w+)/admin/sched/edit/saveTable/$', views.TimetableView.save_timetable, name='saveTable'),
url(r'^(?P<url_id>\w+)/admin/sched/edit/saveEvent/$', views.TimetableView.save_event, name='saveEvent'),
url(r'^(?P<url_id>\w+)/admin/sched/edit/popover/$', views.EventView.get_admin_popover, name='eventPop_admin'),
url(r'^(?P<url_id>\w+)/admin/eventlist/$', views.EventEditView.event_view, name='editEvent'),
url(r'^(?P<url_id>\w+)/admin/eventlist/(?P<id>\d+)/$', views.EventEditView.event_view, name='editEvent'),
url(r'^(?P<url_id>\w+)/admin/eventlist/editEvent/(?P<id>\d+)/$', views.EventEditView.event_save, name='editEvent2'),
url(r'^(?P<url_id>\w+)/admin/import/$', views.ImportView.import_view, name='import'),
url(r'^(?P<url_id>\w+)/admin/import/json/$', views.ImportView.json_upload, name='json_import'),
url(r'^(?P<url_id>\w+)/admin/export/$', views.ExportView.export_view, name='export'),
url(r'^activate/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',views.RegisterView.activate_email , name='activate_email'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| rh-lab-q/conflab | wsgi/openshift/confla/urls.py | Python | gpl-3.0 | 6,136 |
#!/usr/bin/env python
import unittest
import sys
sys.path.insert(0, '..')
from bitstring import ByteStore, ConstByteStore, equal, offsetcopy
class OffsetCopy(unittest.TestCase):
def testStraightCopy(self):
s = ByteStore(bytearray([10, 5, 1]), 24, 0)
t = offsetcopy(s, 0)
self.assertEqual(t._rawarray, bytearray([10, 5, 1]))
def testOffsetIncrease(self):
s = ByteStore(bytearray([1, 1, 1]), 24, 0)
t = offsetcopy(s, 4)
self.assertEqual(t.bitlength, 24)
self.assertEqual(t.offset, 4)
self.assertEqual(t._rawarray, bytearray([0, 16, 16, 16]))
class Equals(unittest.TestCase):
def testBothSingleByte(self):
s = ByteStore(bytearray([128]), 3, 0)
t = ByteStore(bytearray([64]), 3, 1)
u = ByteStore(bytearray([32]), 3, 2)
self.assertTrue(equal(s, t))
self.assertTrue(equal(s, u))
self.assertTrue(equal(u, t))
def testOneSingleByte(self):
s = ByteStore(bytearray([1, 0]), 2, 7)
t = ByteStore(bytearray([64]), 2, 1)
self.assertTrue(equal(s, t))
self.assertTrue(equal(t, s)) | kostaspl/SpiderMonkey38 | python/bitstring/test/test_bitstore.py | Python | mpl-2.0 | 1,132 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Downloads and installs nasm in a temporary directory."""
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import urllib.parse
import urllib.request
import zipfile
# pylint: disable=g-import-not-at-top
# pylint: disable=W0403
sys.path.append(os.path.dirname(__file__))
import shell
# pylint: enable=g-import-not-at-top
# pylint: enable=W0403
NASM_ZIP_NAME = 'nasm.zip'
class NasmInstaller:
"""Installs nasm into a temporary directory."""
def __init__(self, installer_url: str, installer_dir: str = None):
"""Initialize the installer instance.
Args:
installer_url: URL to the nasm installer.
installer_dir: Optional path to copy nasm.
"""
self._installer_url = installer_url
if not installer_dir:
self._installer_dir = tempfile.TemporaryDirectory().name
else:
self._installer_dir = installer_dir
# Add nasm installation directory to path.
os.environ['PATH'] = (self._installer_dir + os.path.pathsep +
os.environ['PATH'])
@property
def installer_path(self):
"""Get the path where nasm is going to be installed."""
return self._installer_dir
def install(self):
"""Install nasm to project.
Returns:
True when installed, false otherwise.
Raises:
urllib.error.URLError: If an error occurs while downloading the
installer.
zipfile.BadZipFile: if unzipping fails.
subprocess.CalledProcessError: If failed to set path.
"""
# Download installer.
installer_filename = self._download()
if installer_filename:
# Unzip installer.
self._unzip(installer_filename)
# Add installer to path.
self._check_nasm()
return True
return False
def _download(self) -> str:
"""Download the installer and places into temporary folder.
Returns:
Path to the downloaded installer.
Raises:
urllib.error.URLError: If an error occurs while downloading the
installer.
"""
if not self._installer_url:
return ''
# Create installation directory if doesn't exist.
os.makedirs(self._installer_dir, exist_ok=True)
installer_filename = os.path.join(self._installer_dir, NASM_ZIP_NAME)
with open(installer_filename, 'wb') as installer_file:
logging.info('Copying %s --> %s', self._installer_url, installer_filename)
with urllib.request.urlopen(self._installer_url) as urlfile:
shutil.copyfileobj(urlfile, installer_file)
return installer_filename
def _unzip(self, zip_path: str) -> bool:
"""Unzips nasm package.
Args:
zip_path: Path to the zip file.
Raises:
zipfile.BadZipFile: if unzipping fails.
"""
try:
with zipfile.ZipFile(zip_path) as handle:
for item_info in handle.infolist():
# Remove first folder, so nasm.exe can be found when setting PATH.
target_filename = os.path.join(
self._installer_dir,
os.path.join(*(
os.path.normpath(item_info.filename).split(os.path.sep)[1:])))
# Open the file inside zip and save it on the desired location.
with handle.open(item_info.filename, 'r') as input_file:
os.makedirs(os.path.dirname(target_filename), exist_ok=True)
with open(target_filename, 'wb') as output_file:
output_file.write(input_file.read())
except (zipfile.BadZipFile) as error:
logging.exception('Failed to unzip %s: %s', zip_path, error)
raise
def _check_nasm(self) -> str:
"""Check that nasm runs on cmd.
Raises:
subprocess.CalledProcessError: If failed to run nasm.
"""
try:
shell.run_command('nasm -h')
except subprocess.CalledProcessError as error:
logging.exception('Failed to add nasm to path: %s', error)
raise
| google-research/falken | sdk/build/nasm_installer.py | Python | apache-2.0 | 4,457 |
import os
from cement.core.log import get_logger
from subprocess import Popen, PIPE
from iustools.core import exc
log = get_logger(__name__)
def get_input(msg, suppress=False):
res = ''
if suppress:
try:
os.system('stty -echo')
res = raw_input(msg).strip('\n')
except Exception:
print
sys.exit(1)
finally:
print
os.system('stty echo')
else:
res = raw_input(msg).strip('\n')
return res
def exec_command(cmd_args):
"""
Quick wrapper around subprocess to exec shell command and bail out if the
command return other than zero.
Required Arguments:
cmd_args
The args to pass to subprocess.
Usage:
.. code-block:: python
from mf.helpers.misc import exec_command
(stdout, stderr) = exec_command(['ls', '-lah'])
"""
log.debug("exec_command: %s" % ' '.join(cmd_args))
proc = Popen(cmd_args, stdout=PIPE, stderr=PIPE, shell=True)
(stdout, stderr) = proc.communicate()
if proc.wait():
# call return > 0
raise exc.IUSToolsRuntimeError, \
"shell command exited with code '%s'. STDERR: %s" % \
(proc.returncode, stderr)
return (stdout, stderr) | iuscommunity/ius-tools | src/iustools.core/iustools/helpers/misc.py | Python | gpl-2.0 | 1,308 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_groups(osv.osv):
_name = "res.groups"
_inherit = 'res.groups'
_columns = {
'share': fields.boolean('Share Group', readonly=True,
help="Group created to set access rights for sharing data with some users.")
}
def get_application_groups(self, cr, uid, domain=None, context=None):
if domain is None:
domain = []
domain.append(('share', '=', False))
return super(res_groups, self).get_application_groups(cr, uid, domain=domain, context=context)
class res_users(osv.osv):
_name = 'res.users'
_inherit = 'res.users'
_columns = {
'share': fields.boolean('Share User', readonly=True,
help="External user with limited access, created only for the purpose of sharing data.")
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| jmesteve/saas3 | openerp/addons/share/res_users.py | Python | agpl-3.0 | 1,891 |
#
# Readout.py -- Readout for displaying image cursor information
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.qtw.QtHelp import QtGui, QtCore
from ginga.misc import Bunch
class Readout(object):
def __init__(self, width, height):
readout = QtGui.QLabel('')
#readout.resize(width, height)
readout.setStyleSheet("QLabel { background-color: #202030; color: lightgreen; }");
readout.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.readout = readout
self.maxx = 0
self.maxy = 0
self.maxv = 0
self.fitsimage = None
def get_widget(self):
#return self.evbox
return self.readout
def set_font(self, font):
self.readout.setFont(font)
def set_text(self, text):
self.readout.setText(text)
# END
| Rbeaty88/ginga | ginga/qtw/Readout.py | Python | bsd-3-clause | 1,006 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for VPC network functionality - Internal Load Balancing Rules
"""
# Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.base import (Account,
ApplicationLoadBalancer,
Network,
NetworkACL,
NetworkOffering,
PublicIPAddress,
Router,
ServiceOffering,
StaticNATRule,
VirtualMachine,
VPC,
VpcOffering)
from marvin.lib.common import (get_domain,
get_template,
get_zone)
from marvin.lib.utils import cleanup_resources
from marvin.cloudstackAPI import (listInternalLoadBalancerVMs,
restartVPC,
stopInternalLoadBalancerVM,
startInternalLoadBalancerVM)
# Import System Modules
from nose.plugins.attrib import attr
import copy
import socket
import time
class TestVPCNetworkInternalLBRules(cloudstackTestCase):
"""Test VPC network functionality with Internal Load Balancing Rules
"""
@classmethod
def setUpClass(cls):
# We want to fail quicker, if it's a failure
socket.setdefaulttimeout(60)
test_client = super(TestVPCNetworkInternalLBRules, cls).getClsTestClient()
cls.api_client = test_client.getApiClient()
cls.db_client = test_client.getDbConnection()
cls.test_data = test_client.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.zone = get_zone(cls.api_client)
cls.domain = get_domain(cls.api_client)
cls.template = get_template(cls.api_client,
cls.zone.id,
cls.test_data["ostype"]
)
cls.test_data["virtual_machine"]["zoneid"] = cls.zone.id
cls.test_data["virtual_machine"]["template"] = cls.template.id
# Create service offering
cls.service_offering = ServiceOffering.create(cls.api_client,
cls.test_data["service_offering"]
)
cls._cleanup = [cls.service_offering]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
print ("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
# Create an account
self.account = Account.create(self.api_client,
self.test_data["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = [self.account]
# Creating a VPC offering
self.debug("Creating a VPC offering..")
self.vpc_off = VpcOffering.create(self.api_client, self.test_data["vpc_offering_multi_lb"])
self.cleanup.append(self.vpc_off)
self.debug("Enabling the VPC offering created")
self.vpc_off.update(self.api_client, state='Enabled')
# Creating a VPC
self.debug("Creating a VPC in the account: %s" % self.account.name)
testdata = self.test_data["vpc"]
testdata["name"] = "TestVPC"
testdata["displaytext"] = "TestVPC"
testdata["cidr"] = "10.1.1.1/16"
self.vpc = VPC.create(self.api_client,
testdata,
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
# Creating network offerings
self.debug("Creating Network offering with Internal LB service...")
self.net_off_1 = NetworkOffering.create(self.api_client,
self.test_data["network_offering_internal_lb"],
conservemode=False)
self.cleanup.append(self.net_off_1)
self.debug("Enabling the Network offering created")
self.net_off_1.update(self.api_client, state="Enabled")
self.debug("Creating Network offering without Internal LB service...")
net_offering = copy.deepcopy(self.test_data["network_offering_internal_lb"])
net_offering["name"] = "Network offering without internal lb service"
net_offering["displaytext"] = "Network offering without internal lb service"
net_offering["supportedservices"] = "Vpn,Dhcp,Dns,UserData,SourceNat,StaticNat,PortForwarding,NetworkACL"
del net_offering["serviceProviderList"]["Lb"]
del net_offering["serviceCapabilityList"]["Lb"]
self.net_off_2 = NetworkOffering.create(self.api_client,
net_offering,
conservemode=False)
self.cleanup.append(self.net_off_2)
self.debug("Enabling the Network offering created")
self.net_off_2.update(self.api_client, state="Enabled")
return
def tearDown(self):
try:
# Clean up, terminate the created network offerings
cleanup_resources(self.api_client, self.cleanup)
except Exception as e:
self.debug("Warning: Exception during cleanup : %s" % e)
return
# create_Network - Creates network with the given Network offering in the VPC
def create_Network(self, nw_off, gateway="10.1.1.1"):
self.debug("Creating a network in the account - %s" % self.account.name)
self.test_data["network"]["netmask"] = "255.255.255.0"
network = Network.create(self.api_client,
self.test_data["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=nw_off.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=self.vpc.id,
)
self.debug("Created network with ID - %s" % network.id)
return network
# create_VM - Creates VM in the given network
def create_VM(self, network):
self.debug("Creating VM in network with ID - %s in the account - %s" % (network.id, self.account.name))
vm = VirtualMachine.create(self.api_client,
self.test_data["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
zoneid=self.zone.id,
networkids=[str(network.id)],
hostid=None
)
self.debug("Created VM with ID - %s in network with ID - %s" % (vm.id, network.id))
return vm
# restart_Vpc - Restarts the given VPC with/without cleanup
def restart_Vpc(self, vpc, cleanup=None):
self.debug("Restarting VPC with ID - %s" % vpc.id)
cmd = restartVPC.restartVPCCmd()
cmd.id = vpc.id
cmd.cleanup = cleanup
self.api_client.restartVPC(cmd)
self.debug("Restarted VPC with ID - %s" % vpc.id)
# get_Router - Returns router for the given network
def get_Router(self, network):
self.debug("Finding the virtual router for network with ID - %s" % network.id)
routers = Router.list(self.api_client,
networkid=network.id,
listall=True
)
self.assertEqual(isinstance(routers, list), True,
"List routers should return a valid virtual router for network"
)
return routers[0]
# create_Internal_LB_Rule - Creates Internal LB rule in the given VPC network
def create_Internal_LB_Rule(self, network, vm_array=None, services=None, source_ip=None):
self.debug("Creating Internal LB rule in VPC network with ID - %s" % network.id)
if not services:
services = self.test_data["internal_lbrule"]
int_lb_rule = ApplicationLoadBalancer.create(self.api_client,
services=services,
sourcenetworkid=network.id,
networkid=network.id,
sourceipaddress=source_ip
)
self.debug("Created Internal LB rule")
# Assigning VMs to the created Internal Load Balancer rule
if vm_array:
self.debug("Assigning virtual machines - %s to the created Internal LB rule" % vm_array)
int_lb_rule.assign(self.api_client, vms=vm_array)
self.debug("Assigned VMs to the created Internal LB rule")
return int_lb_rule
# validate_Internal_LB_Rule - Validates the given Internal LB rule,
# matches the given Internal LB rule name and state against the list of Internal LB rules fetched
def validate_Internal_LB_Rule(self, int_lb_rule, state=None, vm_array=None):
"""Validates the Internal LB Rule"""
self.debug("Check if the Internal LB Rule is created successfully ?")
int_lb_rules = ApplicationLoadBalancer.list(self.api_client,
id=int_lb_rule.id
)
self.assertEqual(isinstance(int_lb_rules, list), True,
"List Internal LB Rule should return a valid list"
)
self.assertEqual(int_lb_rule.name, int_lb_rules[0].name,
"Name of the Internal LB Rule should match with the returned list data"
)
if state:
self.assertEqual(int_lb_rules[0].loadbalancerrule[0].state, state,
"Internal LB Rule state should be '%s'" % state
)
if vm_array:
instance_ids = [instance.id for instance in int_lb_rules[0].loadbalancerinstance]
for vm in vm_array:
self.assertEqual(vm.id in instance_ids, True,
"Internal LB instance list should have the VM with ID - %s" % vm.id
)
self.debug("Internal LB Rule creation successfully validated for %s" % int_lb_rule.name)
# list_InternalLbVms - Lists deployed Internal LB VM instances
def list_InternalLbVms(self, network_id=None, source_ip=None):
listInternalLoadBalancerVMsCmd = listInternalLoadBalancerVMs.listInternalLoadBalancerVMsCmd()
listInternalLoadBalancerVMsCmd.account = self.account.name
listInternalLoadBalancerVMsCmd.domainid = self.account.domainid
if network_id:
listInternalLoadBalancerVMsCmd.networkid = network_id
internal_lb_vms = self.api_client.listInternalLoadBalancerVMs(listInternalLoadBalancerVMsCmd)
if source_ip:
return [internal_lb_vm for internal_lb_vm in internal_lb_vms
if str(internal_lb_vm.guestipaddress) == source_ip]
else:
return internal_lb_vms
# get_InternalLbVm - Returns Internal LB VM instance for the given VPC network and source ip
def get_InternalLbVm(self, network, source_ip):
self.debug("Finding the InternalLbVm for network with ID - %s and source IP address - %s" %
(network.id, source_ip))
internal_lb_vms = self.list_InternalLbVms(network.id, source_ip)
self.assertEqual(isinstance(internal_lb_vms, list), True,
"List InternalLbVms should return a valid list"
)
return internal_lb_vms[0]
# stop_InternalLbVm - Stops the given Internal LB VM instance
def stop_InternalLbVm(self, int_lb_vm, force=None):
self.debug("Stopping InternalLbVm with ID - %s" % int_lb_vm.id)
cmd = stopInternalLoadBalancerVM.stopInternalLoadBalancerVMCmd()
cmd.id = int_lb_vm.id
if force:
cmd.forced = force
self.api_client.stopInternalLoadBalancerVM(cmd)
# start_InternalLbVm - Starts the given Internal LB VM instance
def start_InternalLbVm(self, int_lb_vm):
self.debug("Starting InternalLbVm with ID - %s" % int_lb_vm.id)
cmd = startInternalLoadBalancerVM.startInternalLoadBalancerVMCmd()
cmd.id = int_lb_vm.id
self.api_client.startInternalLoadBalancerVM(cmd)
# check_InternalLbVm_state - Checks if the Internal LB VM instance of the given VPC network and source IP is in the
# expected state form the list of fetched Internal LB VM instances
def check_InternalLbVm_state(self, network, source_ip, state=None):
self.debug("Check if the InternalLbVm is in state - %s" % state)
internal_lb_vms = self.list_InternalLbVms(network.id, source_ip)
self.assertEqual(isinstance(internal_lb_vms, list), True,
"List InternalLbVm should return a valid list"
)
if state:
self.assertEqual(internal_lb_vms[0].state, state,
"InternalLbVm is not in the expected state"
)
self.debug("InternalLbVm instance - %s is in the expected state - %s" % (internal_lb_vms[0].name, state))
# create_NetworkAclRule - Creates Ingress Network ACL rule in the given network
def create_NetworkAclRule(self, rule, network):
self.debug("Adding Ingress NetworkACL rule - %s" % rule)
return NetworkACL.create(self.api_client,
networkid=network.id,
services=rule,
traffictype="Ingress"
)
# acquire_PublicIPAddress - Acquires public IP address for the VPC
def acquire_PublicIPAddress(self):
self.debug("Acquiring public IP for VPC with ID - %s in the account - %s" % (self.vpc.id, self.account.name))
public_ip = PublicIPAddress.create(self.api_client,
accountid=self.account.name,
domainid=self.account.domainid,
zoneid=self.zone.id,
vpcid=self.vpc.id
)
self.debug("Acquired public IP address - %s for VPC with ID - %s" %
(public_ip.ipaddress.ipaddress, self.vpc.id))
return public_ip
# create_StaticNatRule_For_VM - Creates Static NAT rule on the given public IP for the given VM in the given network
def create_StaticNatRule_For_VM(self, vm, public_ip, network):
self.debug("Enabling Static NAT rule on public IP - %s for VM with ID - %s in network with ID - %s" %
(public_ip.ipaddress.ipaddress, vm.id, network.id))
StaticNATRule.enable(self.api_client,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=vm.id,
networkid=network.id,
vmguestip=None
)
self.debug("Static NAT rule enabled on public IP - %s for VM with ID - %s in network with ID - %s" %
(public_ip.ipaddress.ipaddress, vm.id, network.id))
# ssh_into_VM - Gets into the shell of the given VM using its Static NAT rule enabled public IP
def ssh_into_VM(self, vm, public_ip):
self.debug("SSH into VM with ID - %s on public IP address - %s" % (vm.id, public_ip.ipaddress.ipaddress))
ssh_client = vm.get_ssh_client(ipaddress=public_ip.ipaddress.ipaddress)
return ssh_client
# execute_cmd - Executes the given command on the given ssh client
def execute_cmd(self, ssh_client, cmd):
self.debug("SSH client executing command - %s" % cmd)
ret_data = ""
out_list = ssh_client.execute(cmd)
if out_list is not None:
ret_data = ' '.join(map(str, out_list)).strip()
self.debug("SSH client executed command result - %s" % ret_data)
else:
self.debug("SSH client executed command result is None")
return ret_data
# wget_from_vm_cmd - From within the given VM (ssh client),
# fetches test.html file of web server running with the given public IP
def wget_from_vm_cmd(self, ssh_client, ip_address, port):
cmd = "wget --no-cache -t 1 http://" + ip_address + ":" + str(port) + "/test.html"
response = self.execute_cmd(ssh_client, cmd)
if "200 OK" not in response:
self.fail("Failed to wget from a VM with http server IP address - %s" % ip_address)
# Removing the wget file
cmd = "rm -r test.html"
self.execute_cmd(ssh_client, cmd)
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_01_internallb_rules(self):
"""Test VPC Network Internal LB functionality with different combinations of Internal LB rules
"""
# 1. Create an Internal LB Rule with source IP Address specified, check if the Internal LB Rule is successfully
# created.
# 2. Create an Internal LB Rule without source IP Address specified, check if the Internal LB Rule is
# successfully created.
# 3. Create an Internal LB Rule when the specified source IP Address is outside the VPC network (tier) CIDR
# range, check if the Internal LB Rule creation failed as the requested source IP is not in the network's
# CIDR subnet.
# 4. Create an Internal LB Rule when the specified source IP Address is outside the VPC super CIDR range,
# check if the Internal LB Rule creation failed as the requested source IP is not in the network's CIDR
# subnet.
# 5. Create an Internal LB Rule in the tier with LB service provider as VpcInlineLbVm, check if the Internal LB
# Rule creation failed as Scheme Internal is not supported by this network offering.
# 6. Create multiple Internal LB Rules using different Load Balancing source IP Addresses, check if the Internal
# LB Rules are successfully created.
# 7. Create multiple Internal LB Rules with different ports but using the same Load Balancing source IP Address,
# check if the Internal LB Rules are successfully created.
# 8. Create multiple Internal LB Rules with same ports and using the same Load Balancing source IP Address,
# check if the second Internal LB Rule creation failed as it conflicts with the first Internal LB rule.
# 9. Attach a VM to the above created Internal LB Rules, check if the VM is successfully attached to the
# Internal LB Rules.
# 10. Verify the InternalLbVm deployment after successfully creating the first Internal LB Rule and attaching a
# VM to it.
# 11. Verify the failure of attaching a VM from a different tier to an Internal LB Rule created on a tier.
# 12. Delete the above created Internal LB Rules, check if the Internal LB Rules are successfully deleted.
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(self.net_off_1, gateway='10.1.1.1')
self.debug("Deploying a VM in the created VPC network...")
internal_vm = self.create_VM(internal_tier)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(self.net_off_2, gateway='10.1.2.1')
self.debug("Deploying a VM in the created VPC network...")
public_vm = self.create_VM(public_tier)
# Creating Internal LB Rules
self.debug("Creating an Internal LB Rule without source IP Address specified...")
int_lb_rule = self.create_Internal_LB_Rule(internal_tier)
self.validate_Internal_LB_Rule(int_lb_rule, state="Add")
# Validating InternalLbVm deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress)
self.debug("InternalLbVm is not deployed in the network as there are no VMs assigned to this Internal LB Rule")
self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule)
self.debug("Internal LB Rule successfully deleted in CloudStack")
free_source_ip = int_lb_rule.sourceipaddress
self.debug("Creating an Internal LB Rule with source IP Address specified...")
int_lb_rule = self.create_Internal_LB_Rule(internal_tier, source_ip=free_source_ip)
self.validate_Internal_LB_Rule(int_lb_rule, state="Add")
# Validating InternalLbVm deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress)
self.debug("InternalLbVm is not deployed in the network as there are no VMs assigned to this Internal LB Rule")
self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule)
self.debug("Internal LB Rule successfully deleted in CloudStack")
self.debug("Creating an Internal LB Rule when the specified source IP Address is outside the VPC network CIDR "
"range...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, source_ip="10.1.1.256")
self.debug("Internal LB Rule creation failed as the requested IP is not in the network's CIDR subnet")
self.debug("Creating an Internal LB Rule when the specified source IP Address is outside the VPC super CIDR "
"range...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, source_ip="10.2.1.256")
self.debug("Internal LB Rule creation failed as the requested IP is not in the network's CIDR subnet")
self.debug("Creating an Internal LB Rule in a VPC network without Internal Lb service...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(public_tier)
self.debug("Internal LB Rule creation failed as Scheme Internal is not supported by this network offering")
self.debug("Creating multiple Internal LB Rules using different Load Balancing source IP Addresses...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVms deployment and state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress, state="Running")
self.debug('Removing VMs from the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_1, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
int_lb_rule_2.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_2, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
# Validating InternalLbVms state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress, state="Running")
self.debug('Deleting the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_1)
self.debug("Internal LB Rule successfully deleted in CloudStack")
int_lb_rule_2.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_2)
self.debug("Internal LB Rule successfully deleted in CloudStack")
# Validating InternalLbVms un-deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
self.debug("Creating multiple Internal LB Rules with different ports but using the same Load Balancing source "
"IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
self.debug('Removing VMs from the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_1, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
int_lb_rule_2.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_2, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
self.debug('Deleting the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_1)
self.debug("Internal LB Rule successfully deleted in CloudStack")
int_lb_rule_2.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_2)
self.debug("Internal LB Rule successfully deleted in CloudStack")
# Validating InternalLbVm un-deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
self.debug("Creating multiple Internal LB Rules with same ports and using the same Load Balancing source IP "
"Address...")
int_lb_rule = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule, state="Active", vm_array=[internal_vm])
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm], source_ip=int_lb_rule.sourceipaddress)
self.debug("Internal LB Rule creation failed as it conflicts with the existing rule")
# Validating InternalLbVm deployment and state
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress, state="Running")
self.debug('Removing VMs from the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress, state="Running")
self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule)
self.debug("Internal LB Rule successfully deleted in CloudStack")
# Validating InternalLbVm un-deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
self.debug("Attaching a VM from a different tier to an Internal LB Rule created on a tier...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, vm_array=[public_vm])
self.debug("Internal LB Rule creation failed as the VM belongs to a different network")
@attr(tags=["advanced", "intervlan"], required_hardware="true")
def test_02_internallb_rules_traffic(self):
"""Test VPC Network Internal LB functionality by performing (wget) traffic tests within a VPC
"""
# 1. Create an Internal LB Rule "internal_lbrule" with source IP Address specified on the Internal tier, check
# if the Internal LB Rule is successfully created.
# 2. Create an Internal LB Rule "internal_lbrule_http" with source IP Address (same as above) specified on the
# Internal tier, check if the Internal LB Rule is successfully created.
# 3. Attach a VM to the above created Internal LB Rules, check if the InternalLbVm is successfully deployed in
# the Internal tier.
# 4. Deploy two more VMs in the Internal tier, check if the VMs are successfully deployed.
# 5. Attach the newly deployed VMs to the above created Internal LB Rules, verify the validity of the above
# created Internal LB Rules over three Load Balanced VMs in the Internal tier.
# 6. Create the corresponding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible,
# check if the Network ACL rules are successfully added to the internal tier.
# 7. Validate the Internal LB functionality by performing (wget) traffic tests from a VM in the Public tier to
# the Internal load balanced guest VMs in the Internal tier, using Static NAT functionality to access (ssh)
# the VM on the Public tier.
# 8. Verify that the InternalLbVm gets destroyed when the last Internal LB rule is removed from the Internal
# tier.
# 9. Repeat the above steps for one more Internal tier as well, validate the Internal LB functionality.
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier_1 = self.create_Network(self.net_off_1, gateway='10.1.1.1')
self.debug("Deploying a VM in the created VPC network...")
internal_vm_1 = self.create_VM(internal_tier_1)
self.debug("Creating one more VPC network with Internal LB service...")
internal_tier_2 = self.create_Network(self.net_off_1, gateway='10.1.2.1')
self.debug("Deploying a VM in the created VPC network...")
internal_vm_2 = self.create_VM(internal_tier_2)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(self.net_off_2, gateway='10.1.3.1')
self.debug("Deploying a VM in the created VPC network...")
public_vm = self.create_VM(public_tier)
# Creating Internal LB Rules in the Internal tiers
self.debug("Creating three Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier_1, vm_array=[internal_vm_1])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm_1])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier_1,
vm_array=[internal_vm_1],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm_1])
internal_lbrule_http = copy.deepcopy(self.test_data["internal_lbrule_http"])
internal_lbrule_http["sourceport"] = 8080
internal_lbrule_http["instanceport"] = 8080
int_lb_rule_3 = self.create_Internal_LB_Rule(internal_tier_1,
vm_array=[internal_vm_1],
services=internal_lbrule_http,
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_3, state="Active", vm_array=[internal_vm_1])
# Validating InternalLbVm deployment and state
self.check_InternalLbVm_state(internal_tier_1, int_lb_rule_1.sourceipaddress, state="Running")
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" % internal_tier_1.name)
internal_vm_1_1 = self.create_VM(internal_tier_1)
internal_vm_1_2 = self.create_VM(internal_tier_1)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal LB rules...")
int_lb_rule_1.assign(self.api_client, [internal_vm_1_1, internal_vm_1_2])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active",
vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2])
int_lb_rule_2.assign(self.api_client, [internal_vm_1_1, internal_vm_1_2])
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active",
vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2])
int_lb_rule_3.assign(self.api_client, [internal_vm_1_1, internal_vm_1_2])
self.validate_Internal_LB_Rule(int_lb_rule_3, state="Active",
vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier_1, int_lb_rule_1.sourceipaddress, state="Running")
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...")
self.create_NetworkAclRule(self.test_data["ingress_rule"], internal_tier_1)
self.create_NetworkAclRule(self.test_data["http_rule"], internal_tier_1)
http_rule = copy.deepcopy(self.test_data["http_rule"])
http_rule["privateport"] = 8080
http_rule["publicport"] = 8080
http_rule["startport"] = 8080
http_rule["endport"] = 8080
self.create_NetworkAclRule(http_rule, internal_tier_1)
# Creating Internal LB Rules in the Internal tier
self.debug("Creating three Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...")
int_lb_rule_4 = self.create_Internal_LB_Rule(internal_tier_2, vm_array=[internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_4, state="Active", vm_array=[internal_vm_2])
int_lb_rule_5 = self.create_Internal_LB_Rule(internal_tier_2,
vm_array=[internal_vm_2],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_4.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_5, state="Active", vm_array=[internal_vm_2])
int_lb_rule_6 = self.create_Internal_LB_Rule(internal_tier_2,
vm_array=[internal_vm_2],
services=internal_lbrule_http,
source_ip=int_lb_rule_4.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_6, state="Active", vm_array=[internal_vm_2])
# Validating InternalLbVm deployment and state
self.check_InternalLbVm_state(internal_tier_2, int_lb_rule_4.sourceipaddress, state="Running")
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" % internal_tier_2.name)
internal_vm_2_1 = self.create_VM(internal_tier_2)
internal_vm_2_2 = self.create_VM(internal_tier_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal LB rules...")
int_lb_rule_4.assign(self.api_client, [internal_vm_2_1, internal_vm_2_2])
self.validate_Internal_LB_Rule(int_lb_rule_4, state="Active",
vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2])
int_lb_rule_5.assign(self.api_client, [internal_vm_2_1, internal_vm_2_2])
self.validate_Internal_LB_Rule(int_lb_rule_5, state="Active",
vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2])
int_lb_rule_6.assign(self.api_client, [internal_vm_2_1, internal_vm_2_2])
self.validate_Internal_LB_Rule(int_lb_rule_6, state="Active",
vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier_2, int_lb_rule_4.sourceipaddress, state="Running")
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...")
self.create_NetworkAclRule(self.test_data["ingress_rule"], internal_tier_2)
self.create_NetworkAclRule(self.test_data["http_rule"], internal_tier_2)
self.create_NetworkAclRule(http_rule, internal_tier_2)
# Creating Static NAT rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress()
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...")
self.create_NetworkAclRule(self.test_data["ingress_rule"], public_tier)
# Internal LB (wget) traffic tests
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
http_rule["publicport"]
)
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_4.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_4.sourceipaddress,
http_rule["publicport"]
)
@attr(tags=["advanced", "intervlan"], required_hardware="true")
def test_03_internallb_rules_vpc_network_restarts_traffic(self):
"""Test VPC Network Internal LB functionality with restarts of VPC network components by performing (wget)
traffic tests within a VPC
"""
# Repeat the tests in the testcase "test_02_internallb_rules_traffic" with restarts of VPC networks (tiers):
# 1. Restart tier with InternalLbVm (cleanup = false), verify that the InternalLbVm gets destroyed and deployed
# again in the Internal tier.
# 2. Restart tier with InternalLbVm (cleanup = true), verify that the InternalLbVm gets destroyed and deployed
# again in the Internal tier.
# 3. Restart tier without InternalLbVm (cleanup = false), verify that this restart has no effect on the
# InternalLbVm functionality.
# 4. Restart tier without InternalLbVm (cleanup = true), verify that this restart has no effect on the
# InternalLbVm functionality.
# 5. Stop all the VMs configured with InternalLbVm, verify that the InternalLbVm gets destroyed in the Internal
# tier.
# 6. Start all the VMs configured with InternalLbVm, verify that the InternalLbVm gets deployed again in the
# Internal tier.
# 7. Restart VPC (cleanup = false), verify that the VPC VR gets rebooted and this restart has no effect on the
# InternalLbVm functionality.
# 7. Restart VPC (cleanup = true), verify that the VPC VR gets rebooted and this restart has no effect on the
# InternalLbVm functionality.
# Verify the above restarts of VPC networks (tiers) by performing (wget) traffic tests within a VPC.
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(self.net_off_1, gateway='10.1.1.1')
self.debug("Deploying a VM in the created VPC network...")
internal_vm = self.create_VM(internal_tier)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(self.net_off_2, gateway='10.1.2.1')
self.debug("Deploying a VM in the created VPC network...")
public_vm = self.create_VM(public_tier)
# Creating Internal LB Rules in the Internal tiers
self.debug("Creating three Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" % internal_tier.name)
internal_vm_1 = self.create_VM(internal_tier)
internal_vm_2 = self.create_VM(internal_tier)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal LB rules...")
int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...")
self.create_NetworkAclRule(self.test_data["ingress_rule"], internal_tier)
self.create_NetworkAclRule(self.test_data["http_rule"], internal_tier)
# Creating Static NAT rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress()
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...")
self.create_NetworkAclRule(self.test_data["ingress_rule"], public_tier)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Restart Internal tier (cleanup = false)
# InternalLbVm gets destroyed and deployed again in the Internal tier
self.debug("Restarting the Internal tier without cleanup...")
Network.restart(internal_tier, self.api_client, cleanup=False)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while tries < 10:
try:
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the Internal tier: %s" % e)
self.debug("Waiting for the InternalLbVm in the Internal tier to be fully resolved for (wget) traffic "
"test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the Internal tier")
break
# Restart Internal tier (cleanup = true)
# InternalLbVm gets destroyed and deployed again in the Internal tier
self.debug("Restarting the Internal tier with cleanup...")
Network.restart(internal_tier, self.api_client, cleanup=True)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while tries < 10:
try:
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the Internal tier with cleanup: "
"%s" % e)
self.debug("Waiting for the InternalLbVm in the Internal tier to be fully resolved for (wget) traffic "
"test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the Internal tier with cleanup")
break
# Restart Public tier (cleanup = false)
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the Public tier without cleanup...")
Network.restart(public_tier, self.api_client, cleanup=False)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Restart Public tier (cleanup = true)
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the Public tier with cleanup...")
Network.restart(public_tier, self.api_client, cleanup=True)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Stopping VMs in the Internal tier
# wget traffic test fails as all the VMs in the Internal tier are in stopped state
self.debug("Stopping all the VMs in the Internal tier...")
internal_vm.stop(self.api_client)
internal_vm_1.stop(self.api_client)
internal_vm_2.stop(self.api_client)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
with self.assertRaises(Exception):
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
self.debug("Failed to wget file as all the VMs in the Internal tier are in stopped state")
# Starting VMs in the Internal tier
# wget traffic test succeeds as all the VMs in the Internal tier are back in running state
self.debug("Starting all the VMs in the Internal tier...")
internal_vm.start(self.api_client)
internal_vm_1.start(self.api_client)
internal_vm_2.start(self.api_client)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while tries < 10:
try:
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting all the VMs in the Internal tier"
": %s" % e)
self.debug("Waiting for the InternalLbVm and all the VMs in the Internal tier to be fully resolved for "
"(wget) traffic test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting all the VMs in the Internal "
"tier")
break
# Restarting VPC (cleanup = false)
# VPC VR gets destroyed and deployed again in the VPC
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the VPC without cleanup...")
self.restart_Vpc(self.vpc, cleanup=False)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Restarting VPC (cleanup = true)
# VPC VR gets destroyed and deployed again in the VPC
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the VPC with cleanup...")
self.restart_Vpc(self.vpc, cleanup=True)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
@attr(tags=["advanced", "intervlan"], required_hardware="true")
def test_04_internallb_appliance_operations_traffic(self):
"""Test VPC Network Internal LB functionality with InternalLbVm appliance operations by performing (wget)
traffic tests within a VPC
"""
# Repeat the tests in the testcase "test_02_internallb_rules_traffic" with InternalLbVm appliance operations:
# 1. Verify the InternalLbVm deployment by creating the Internal LB Rules when the VPC VR is in Stopped state,
# VPC VR has no effect on the InternalLbVm functionality.
# 2. Stop the InternalLbVm when the VPC VR is in Stopped State
# 3. Start the InternalLbVm when the VPC VR is in Stopped state
# 4. Stop the InternalLbVm when the VPC VR is in Running State
# 5. Start the InternalLbVm when the VPC VR is in Running state
# 6. Force stop the InternalLbVm when the VPC VR is in Running State
# 7. Start the InternalLbVm when the VPC VR is in Running state
# Verify the above restarts of VPC networks by performing (wget) traffic tests within a VPC.
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(self.net_off_1, gateway='10.1.1.1')
self.debug("Deploying a VM in the created VPC network...")
internal_vm = self.create_VM(internal_tier)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(self.net_off_2, gateway='10.1.2.1')
self.debug("Deploying a VM in the created VPC network...")
public_vm = self.create_VM(public_tier)
# Stopping the VPC VR
# VPC VR has no effect on the InternalLbVm functionality
vpc_vr = self.get_Router(internal_tier)
Router.stop(self.api_client, id=vpc_vr.id)
# Creating Internal LB Rules in the Internal tiers
self.debug("Creating three Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" % internal_tier.name)
internal_vm_1 = self.create_VM(internal_tier)
internal_vm_2 = self.create_VM(internal_tier)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal LB rules...")
int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...")
self.create_NetworkAclRule(self.test_data["ingress_rule"], internal_tier)
self.create_NetworkAclRule(self.test_data["http_rule"], internal_tier)
# Creating Static NAT rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress()
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...")
self.create_NetworkAclRule(self.test_data["ingress_rule"], public_tier)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# # Stopping the InternalLbVm when the VPC VR is in Stopped state
int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.stop_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped")
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
with self.assertRaises(Exception):
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
self.debug("Failed to wget file as the InternalLbVm is in stopped state")
# # Starting the InternalLbVm when the VPC VR is in Stopped state
self.start_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while tries < 10:
try:
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s"
% e)
self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance")
break
# Starting the VPC VR
# VPC VR has no effect on the InternalLbVm functionality
Router.start(self.api_client, id=vpc_vr.id)
# # Stopping the InternalLbVm when the VPC VR is in Running state
self.stop_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped")
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
with self.assertRaises(Exception):
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
self.debug("Failed to wget file as the InternalLbVm is in stopped state")
# # Starting the InternalLbVm when the VPC VR is in Running state
self.start_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while tries < 10:
try:
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s"
% e)
self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance")
break
# # Force Stopping the InternalLbVm when the VPC VR is in Running state
self.stop_InternalLbVm(int_lb_vm, force=True)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped")
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
with self.assertRaises(Exception):
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
self.debug("Failed to wget file as the InternalLbVm is in stopped state")
# # Starting the InternalLbVm when the VPC VR is in Running state
self.start_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while tries < 10:
try:
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s"
% e)
self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance")
break
| GabrielBrascher/cloudstack | test/integration/component/test_vpc_network_internal_lbrules.py | Python | apache-2.0 | 66,892 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
import os
import mock
import unittest
import itertools
from contextlib import contextmanager
from shutil import rmtree
from StringIO import StringIO
from tempfile import mkdtemp
from test.unit import FakeLogger
from time import gmtime
from xml.dom import minidom
import time
import random
from eventlet import spawn, Timeout, listen
import simplejson
from swift.common.swob import Request, HeaderKeyDict
import swift.container
from swift.container import server as container_server
from swift.common import constraints
from swift.common.utils import (Timestamp, mkdirs, public, replication,
lock_parent_directory, json)
from test.unit import fake_http_connect
from swift.common.storage_policy import (POLICIES, StoragePolicy)
from swift.common.request_helpers import get_sys_meta_prefix
from test.unit import patch_policies
@contextmanager
def save_globals():
orig_http_connect = getattr(swift.container.server, 'http_connect',
None)
try:
yield True
finally:
swift.container.server.http_connect = orig_http_connect
@patch_policies
class TestContainerController(unittest.TestCase):
"""Test swift.container.server.ContainerController"""
def setUp(self):
"""Set up for testing swift.object_server.ObjectController"""
self.testdir = os.path.join(mkdtemp(),
'tmp_test_object_server_ObjectController')
mkdirs(self.testdir)
rmtree(self.testdir)
mkdirs(os.path.join(self.testdir, 'sda1'))
mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false'})
# some of the policy tests want at least two policies
self.assert_(len(POLICIES) > 1)
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def _update_object_put_headers(self, req):
"""
Override this method in test subclasses to test post upgrade
behavior.
"""
pass
def _check_put_container_storage_policy(self, req, policy_index):
resp = req.get_response(self.controller)
self.assertEqual(201, resp.status_int)
req = Request.blank(req.path, method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(204, resp.status_int)
self.assertEqual(str(policy_index),
resp.headers['X-Backend-Storage-Policy-Index'])
def test_get_and_validate_policy_index(self):
# no policy is OK
req = Request.blank('/sda1/p/a/container_default', method='PUT',
headers={'X-Timestamp': '0'})
self._check_put_container_storage_policy(req, POLICIES.default.idx)
# bogus policies
for policy in ('nada', 999):
req = Request.blank('/sda1/p/a/c_%s' % policy, method='PUT',
headers={
'X-Timestamp': '0',
'X-Backend-Storage-Policy-Index': policy
})
resp = req.get_response(self.controller)
self.assertEqual(400, resp.status_int)
self.assert_('invalid' in resp.body.lower())
# good policies
for policy in POLICIES:
req = Request.blank('/sda1/p/a/c_%s' % policy.name, method='PUT',
headers={
'X-Timestamp': '0',
'X-Backend-Storage-Policy-Index':
policy.idx,
})
self._check_put_container_storage_policy(req, policy.idx)
def test_acl_container(self):
# Ensure no acl by default
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('201'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assert_('x-container-read' not in response.headers)
self.assert_('x-container-write' not in response.headers)
# Ensure POSTing acls works
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '1', 'X-Container-Read': '.r:*',
'X-Container-Write': 'account:user'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('204'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assertEquals(response.headers.get('x-container-read'), '.r:*')
self.assertEquals(response.headers.get('x-container-write'),
'account:user')
# Ensure we can clear acls on POST
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '3', 'X-Container-Read': '',
'X-Container-Write': ''})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('204'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assert_('x-container-read' not in response.headers)
self.assert_('x-container-write' not in response.headers)
# Ensure PUTing acls works
req = Request.blank(
'/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '4', 'X-Container-Read': '.r:*',
'X-Container-Write': 'account:user'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('201'))
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assertEquals(response.headers.get('x-container-read'), '.r:*')
self.assertEquals(response.headers.get('x-container-write'),
'account:user')
def test_HEAD(self):
start = int(time.time())
ts = (Timestamp(t).internal for t in itertools.count(start))
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'x-timestamp': ts.next()})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', method='HEAD')
response = req.get_response(self.controller)
self.assertEqual(response.status_int, 204)
self.assertEqual(response.headers['x-container-bytes-used'], '0')
self.assertEqual(response.headers['x-container-object-count'], '0')
obj_put_request = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'x-timestamp': ts.next(),
'x-size': 42,
'x-content-type': 'text/plain',
'x-etag': 'x',
})
self._update_object_put_headers(obj_put_request)
obj_put_resp = obj_put_request.get_response(self.controller)
self.assertEqual(obj_put_resp.status_int // 100, 2)
# re-issue HEAD request
response = req.get_response(self.controller)
self.assertEqual(response.status_int // 100, 2)
self.assertEqual(response.headers['x-container-bytes-used'], '42')
self.assertEqual(response.headers['x-container-object-count'], '1')
# created at time...
created_at_header = Timestamp(response.headers['x-timestamp'])
self.assertEqual(response.headers['x-timestamp'],
created_at_header.normal)
self.assert_(created_at_header >= start)
self.assertEqual(response.headers['x-put-timestamp'],
Timestamp(start).normal)
# backend headers
self.assertEqual(int(response.headers
['X-Backend-Storage-Policy-Index']),
int(POLICIES.default))
self.assert_(
Timestamp(response.headers['x-backend-timestamp']) >= start)
self.assertEqual(response.headers['x-backend-put-timestamp'],
Timestamp(start).internal)
self.assertEqual(response.headers['x-backend-delete-timestamp'],
Timestamp(0).internal)
self.assertEqual(response.headers['x-backend-status-changed-at'],
Timestamp(start).internal)
def test_HEAD_not_found(self):
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(int(resp.headers['X-Backend-Storage-Policy-Index']),
0)
self.assertEqual(resp.headers['x-backend-timestamp'],
Timestamp(0).internal)
self.assertEqual(resp.headers['x-backend-put-timestamp'],
Timestamp(0).internal)
self.assertEqual(resp.headers['x-backend-status-changed-at'],
Timestamp(0).internal)
self.assertEqual(resp.headers['x-backend-delete-timestamp'],
Timestamp(0).internal)
for header in ('x-container-object-count', 'x-container-bytes-used',
'x-timestamp', 'x-put-timestamp'):
self.assertEqual(resp.headers[header], None)
def test_deleted_headers(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
request_method_times = {
'PUT': ts.next(),
'DELETE': ts.next(),
}
# setup a deleted container
for method in ('PUT', 'DELETE'):
x_timestamp = request_method_times[method]
req = Request.blank('/sda1/p/a/c', method=method,
headers={'x-timestamp': x_timestamp})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a/c', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
# backend headers
self.assertEqual(int(resp.headers[
'X-Backend-Storage-Policy-Index']),
int(POLICIES.default))
self.assert_(Timestamp(resp.headers['x-backend-timestamp']) >=
Timestamp(request_method_times['PUT']))
self.assertEqual(resp.headers['x-backend-put-timestamp'],
request_method_times['PUT'])
self.assertEqual(resp.headers['x-backend-delete-timestamp'],
request_method_times['DELETE'])
self.assertEqual(resp.headers['x-backend-status-changed-at'],
request_method_times['DELETE'])
for header in ('x-container-object-count',
'x-container-bytes-used', 'x-timestamp',
'x-put-timestamp'):
self.assertEqual(resp.headers[header], None)
def test_HEAD_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_HEAD_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_HEAD_invalid_content_type(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'},
headers={'Accept': 'application/plain'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 406)
def test_HEAD_invalid_format(self):
format = '%D1%BD%8A9' # invalid UTF-8; should be %E1%BD%8A9 (E -> D)
req = Request.blank(
'/sda1/p/a/c?format=' + format,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_PUT(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
def test_PUT_simulated_create_race(self):
state = ['initial']
from swift.container.backend import ContainerBroker as OrigCoBr
class InterceptedCoBr(OrigCoBr):
def __init__(self, *args, **kwargs):
super(InterceptedCoBr, self).__init__(*args, **kwargs)
if state[0] == 'initial':
# Do nothing initially
pass
elif state[0] == 'race':
# Save the original db_file attribute value
self._saved_db_file = self.db_file
self.db_file += '.doesnotexist'
def initialize(self, *args, **kwargs):
if state[0] == 'initial':
# Do nothing initially
pass
elif state[0] == 'race':
# Restore the original db_file attribute to get the race
# behavior
self.db_file = self._saved_db_file
return super(InterceptedCoBr, self).initialize(*args, **kwargs)
with mock.patch("swift.container.server.ContainerBroker",
InterceptedCoBr):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
state[0] = "race"
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
def test_PUT_obj_not_found(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1', 'X-Size': '0',
'X-Content-Type': 'text/plain', 'X-ETag': 'e'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_PUT_good_policy_specified(self):
policy = random.choice(list(POLICIES))
# Set metadata header
req = Request.blank('/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': Timestamp(1).internal,
'X-Backend-Storage-Policy-Index':
policy.idx})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# now make sure we read it back
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
def test_PUT_no_policy_specified(self):
# Set metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# now make sure the default was used (pol 1)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(POLICIES.default.idx))
def test_PUT_bad_policy_specified(self):
# Set metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Backend-Storage-Policy-Index': 'nada'})
resp = req.get_response(self.controller)
# make sure we get bad response
self.assertEquals(resp.status_int, 400)
def test_PUT_no_policy_change(self):
ts = (Timestamp(t).internal for t in itertools.count(time.time()))
policy = random.choice(list(POLICIES))
# Set metadata header
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': policy.idx})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# make sure we get the right index back
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
# now try to update w/o changing the policy
for method in ('POST', 'PUT'):
req = Request.blank('/sda1/p/a/c', method=method, headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': policy.idx
})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int // 100, 2)
# make sure we get the right index back
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
def test_PUT_bad_policy_change(self):
ts = (Timestamp(t).internal for t in itertools.count(time.time()))
policy = random.choice(list(POLICIES))
# Set metadata header
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': policy.idx})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# make sure we get the right index back
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
other_policies = [p for p in POLICIES if p != policy]
for other_policy in other_policies:
# now try to change it and make sure we get a conflict
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': other_policy.idx
})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 409)
# and make sure there is no change!
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# make sure we get the right index back
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
def test_POST_ignores_policy_change(self):
ts = (Timestamp(t).internal for t in itertools.count(time.time()))
policy = random.choice(list(POLICIES))
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': policy.idx})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# make sure we get the right index back
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
other_policies = [p for p in POLICIES if p != policy]
for other_policy in other_policies:
# now try to change it and make sure we get a conflict
req = Request.blank('/sda1/p/a/c', method='POST', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': other_policy.idx
})
resp = req.get_response(self.controller)
# valid request
self.assertEquals(resp.status_int // 100, 2)
# but it does nothing
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# make sure we get the right index back
self.assertEquals(resp.headers.get
('X-Backend-Storage-Policy-Index'),
str(policy.idx))
def test_PUT_no_policy_for_existing_default(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
# create a container with the default storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
# check the policy index
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(POLICIES.default.idx))
# put again without specifying the storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202) # sanity check
# policy index is unchanged
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(POLICIES.default.idx))
def test_PUT_proxy_default_no_policy_for_existing_default(self):
# make it look like the proxy has a different default than we do, like
# during a config change restart across a multi node cluster.
proxy_default = random.choice([p for p in POLICIES if not
p.is_default])
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
# create a container with the default storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Default': int(proxy_default),
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
# check the policy index
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(int(resp.headers['X-Backend-Storage-Policy-Index']),
int(proxy_default))
# put again without proxy specifying the different default
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Default': int(POLICIES.default),
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202) # sanity check
# policy index is unchanged
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(int(resp.headers['X-Backend-Storage-Policy-Index']),
int(proxy_default))
def test_PUT_no_policy_for_existing_non_default(self):
ts = (Timestamp(t).internal for t in itertools.count(time.time()))
non_default_policy = [p for p in POLICIES if not p.is_default][0]
# create a container with the non-default storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': non_default_policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
# check the policy index
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(non_default_policy.idx))
# put again without specifiying the storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202) # sanity check
# policy index is unchanged
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(non_default_policy.idx))
def test_PUT_GET_metadata(self):
# Set metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Container-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Container-Meta-Test2': 'Value2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
self.assertEquals(resp.headers.get('x-container-meta-test2'), 'Value2')
# Update metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(3).internal,
'X-Container-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(2).internal,
'X-Container-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(4).internal,
'X-Container-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assert_('x-container-meta-test' not in resp.headers)
def test_PUT_GET_sys_metadata(self):
prefix = get_sys_meta_prefix('container')
key = '%sTest' % prefix
key2 = '%sTest2' % prefix
# Set metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
key: 'Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(1).internal,
key2: 'Value2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()), 'Value')
self.assertEquals(resp.headers.get(key2.lower()), 'Value2')
# Update metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(3).internal,
key: 'New Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()),
'New Value')
# Send old update to metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(2).internal,
key: 'Old Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(4).internal,
key: ''})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assert_(key.lower() not in resp.headers)
def test_PUT_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_PUT_timestamp_not_float(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_PUT_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_POST_HEAD_metadata(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# Set metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Container-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
# Update metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(3).internal,
'X-Container-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(2).internal,
'X-Container-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(4).internal,
'X-Container-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assert_('x-container-meta-test' not in resp.headers)
def test_POST_HEAD_sys_metadata(self):
prefix = get_sys_meta_prefix('container')
key = '%sTest' % prefix
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# Set metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(1).internal,
key: 'Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()), 'Value')
# Update metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(3).internal,
key: 'New Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()),
'New Value')
# Send old update to metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(2).internal,
key: 'Old Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(4).internal,
key: ''})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assert_(key.lower() not in resp.headers)
def test_POST_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_timestamp_not_float(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_POST_invalid_container_sync_to(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'},
headers={'x-container-sync-to': '192.168.0.1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_after_DELETE_not_found(self):
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c/',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '3'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_obj_not_found(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_container_not_found(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_PUT_utf8(self):
snowman = u'\u2603'
container_name = snowman.encode('utf-8')
req = Request.blank(
'/sda1/p/a/%s' % container_name, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
def test_account_update_mismatched_host_device(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '127.0.0.1:0',
'X-Account-Partition': '123',
'X-Account-Device': 'sda1,sda2'})
broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
resp = self.controller.account_update(req, 'a', 'c', broker)
self.assertEquals(resp.status_int, 400)
def test_account_update_account_override_deleted(self):
bindsock = listen(('127.0.0.1', 0))
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '%s:%s' %
bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1',
'X-Account-Override-Deleted': 'yes'})
with save_globals():
new_connect = fake_http_connect(200, count=123)
swift.container.server.http_connect = new_connect
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
def test_PUT_account_update(self):
bindsock = listen(('127.0.0.1', 0))
def accept(return_code, expected_timestamp):
try:
with Timeout(3):
sock, addr = bindsock.accept()
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEquals(inc.readline(),
'PUT /sda1/123/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != '\r\n':
headers[line.split(':')[0].lower()] = \
line.split(':')[1].strip()
line = inc.readline()
self.assertEquals(headers['x-put-timestamp'],
expected_timestamp)
except BaseException as err:
return err
return None
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 201, Timestamp(1).internal)
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(3).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 404, Timestamp(3).internal)
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(5).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 503, Timestamp(5).internal)
got_exc = False
try:
with Timeout(3):
resp = req.get_response(self.controller)
except BaseException as err:
got_exc = True
finally:
err = event.wait()
if err:
raise Exception(err)
self.assert_(not got_exc)
def test_PUT_reset_container_sync(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
db.set_x_container_sync_points(123, 456)
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to same value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to new value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
def test_POST_reset_container_sync(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
db.set_x_container_sync_points(123, 456)
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to same value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to new value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
def test_DELETE(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': '3'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_PUT_recreate(self):
path = '/sda1/p/a/c'
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(path, method='DELETE',
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(path, method='GET')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404) # sanity
# backend headers
expectations = {
'x-backend-put-timestamp': Timestamp(1).internal,
'x-backend-delete-timestamp': Timestamp(2).internal,
'x-backend-status-changed-at': Timestamp(2).internal,
}
for header, value in expectations.items():
self.assertEqual(resp.headers[header], value,
'response header %s was %s not %s' % (
header, resp.headers[header], value))
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self.assertEqual(True, db.is_deleted())
info = db.get_info()
self.assertEquals(info['put_timestamp'], Timestamp('1').internal)
self.assertEquals(info['delete_timestamp'], Timestamp('2').internal)
self.assertEquals(info['status_changed_at'], Timestamp('2').internal)
# recreate
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '4'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self.assertEqual(False, db.is_deleted())
info = db.get_info()
self.assertEquals(info['put_timestamp'], Timestamp('4').internal)
self.assertEquals(info['delete_timestamp'], Timestamp('2').internal)
self.assertEquals(info['status_changed_at'], Timestamp('4').internal)
for method in ('GET', 'HEAD'):
req = Request.blank(path)
resp = req.get_response(self.controller)
expectations = {
'x-put-timestamp': Timestamp(4).normal,
'x-backend-put-timestamp': Timestamp(4).internal,
'x-backend-delete-timestamp': Timestamp(2).internal,
'x-backend-status-changed-at': Timestamp(4).internal,
}
for header, expected in expectations.items():
self.assertEqual(resp.headers[header], expected,
'header %s was %s is not expected %s' % (
header, resp.headers[header], expected))
def test_DELETE_PUT_recreate_replication_race(self):
path = '/sda1/p/a/c'
# create a deleted db
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
req = Request.blank(path, method='DELETE',
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(path, method='GET')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404) # sanity
self.assertEqual(True, db.is_deleted())
# now save a copy of this db (and remove it from the "current node")
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
db_path = db.db_file
other_path = os.path.join(self.testdir, 'othernode.db')
os.rename(db_path, other_path)
# that should make it missing on this node
req = Request.blank(path, method='GET')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404) # sanity
# setup the race in os.path.exists (first time no, then yes)
mock_called = []
_real_exists = os.path.exists
def mock_exists(db_path):
rv = _real_exists(db_path)
if not mock_called:
# be as careful as we might hope backend replication can be...
with lock_parent_directory(db_path, timeout=1):
os.rename(other_path, db_path)
mock_called.append((rv, db_path))
return rv
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '4'})
with mock.patch.object(container_server.os.path, 'exists',
mock_exists):
resp = req.get_response(self.controller)
# db was successfully created
self.assertEqual(resp.status_int // 100, 2)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self.assertEqual(False, db.is_deleted())
# mock proves the race
self.assertEqual(mock_called[:2],
[(exists, db.db_file) for exists in (False, True)])
# info was updated
info = db.get_info()
self.assertEquals(info['put_timestamp'], Timestamp('4').internal)
self.assertEquals(info['delete_timestamp'], Timestamp('2').internal)
def test_DELETE_not_found(self):
# Even if the container wasn't previously heard of, the container
# server will accept the delete and replicate it to where it belongs
# later.
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_change_storage_policy_via_DELETE_then_PUT(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
policy = random.choice(list(POLICIES))
req = Request.blank(
'/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': policy.idx})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
# try re-recreate with other policies
other_policies = [p for p in POLICIES if p != policy]
for other_policy in other_policies:
# first delete the existing container
req = Request.blank('/sda1/p/a/c', method='DELETE', headers={
'X-Timestamp': ts.next()})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204) # sanity check
# at this point, the DB should still exist but be in a deleted
# state, so changing the policy index is perfectly acceptable
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': other_policy.idx})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
req = Request.blank(
'/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(other_policy.idx))
def test_change_to_default_storage_policy_via_DELETE_then_PUT(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
non_default_policy = random.choice([p for p in POLICIES
if not p.is_default])
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': non_default_policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
req = Request.blank(
'/sda1/p/a/c', method='DELETE',
headers={'X-Timestamp': ts.next()})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204) # sanity check
# at this point, the DB should still exist but be in a deleted state,
# so changing the policy index is perfectly acceptable
req = Request.blank(
'/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': ts.next()})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(POLICIES.default.idx))
def test_DELETE_object(self):
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': Timestamp(2).internal})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': Timestamp(0).internal, 'X-Size': 1,
'X-Content-Type': 'text/plain', 'X-Etag': 'x'})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
ts = (Timestamp(t).internal for t in
itertools.count(3))
req = Request.blank('/sda1/p/a/c', method='DELETE', headers={
'X-Timestamp': ts.next()})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 409)
req = Request.blank('/sda1/p/a/c/o', method='DELETE', headers={
'X-Timestamp': ts.next()})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', method='DELETE', headers={
'X-Timestamp': ts.next()})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', method='GET', headers={
'X-Timestamp': ts.next()})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_object_update_with_offset(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
# create container
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next()})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# check status
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(int(resp.headers['X-Backend-Storage-Policy-Index']),
int(POLICIES.default))
# create object
obj_timestamp = ts.next()
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': obj_timestamp, 'X-Size': 1,
'X-Content-Type': 'text/plain', 'X-Etag': 'x'})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# check listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 1)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 1)
self.assertEqual(obj['hash'], 'x')
self.assertEqual(obj['content_type'], 'text/plain')
# send an update with an offset
offset_timestamp = Timestamp(obj_timestamp, offset=1).internal
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': offset_timestamp, 'X-Size': 2,
'X-Content-Type': 'text/html', 'X-Etag': 'y'})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# check updated listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 2)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 2)
self.assertEqual(obj['hash'], 'y')
self.assertEqual(obj['content_type'], 'text/html')
# now overwrite with a newer time
delete_timestamp = ts.next()
req = Request.blank(
'/sda1/p/a/c/o', method='DELETE', headers={
'X-Timestamp': delete_timestamp})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# check empty listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 0)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 0)
listing_data = json.loads(resp.body)
self.assertEqual(0, len(listing_data))
# recreate with an offset
offset_timestamp = Timestamp(delete_timestamp, offset=1).internal
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': offset_timestamp, 'X-Size': 3,
'X-Content-Type': 'text/enriched', 'X-Etag': 'z'})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# check un-deleted listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 3)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 3)
self.assertEqual(obj['hash'], 'z')
self.assertEqual(obj['content_type'], 'text/enriched')
# delete offset with newer offset
delete_timestamp = Timestamp(offset_timestamp, offset=1).internal
req = Request.blank(
'/sda1/p/a/c/o', method='DELETE', headers={
'X-Timestamp': delete_timestamp})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# check empty listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 0)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 0)
listing_data = json.loads(resp.body)
self.assertEqual(0, len(listing_data))
def test_DELETE_account_update(self):
bindsock = listen(('127.0.0.1', 0))
def accept(return_code, expected_timestamp):
try:
with Timeout(3):
sock, addr = bindsock.accept()
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEquals(inc.readline(),
'PUT /sda1/123/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != '\r\n':
headers[line.split(':')[0].lower()] = \
line.split(':')[1].strip()
line = inc.readline()
self.assertEquals(headers['x-delete-timestamp'],
expected_timestamp)
except BaseException as err:
return err
return None
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': Timestamp(2).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 204, Timestamp(2).internal)
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': Timestamp(2).internal})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': Timestamp(3).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 404, Timestamp(3).internal)
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': Timestamp(4).internal})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': Timestamp(5).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 503, Timestamp(5).internal)
got_exc = False
try:
with Timeout(3):
resp = req.get_response(self.controller)
except BaseException as err:
got_exc = True
finally:
err = event.wait()
if err:
raise Exception(err)
self.assert_(not got_exc)
def test_DELETE_invalid_partition(self):
req = Request.blank(
'/sda1/./a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_DELETE_timestamp_not_float(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_DELETE_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_GET_over_limit(self):
req = Request.blank(
'/sda1/p/a/c?limit=%d' %
(constraints.CONTAINER_LISTING_LIMIT + 1),
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412)
def test_GET_json(self):
# make a container
req = Request.blank(
'/sda1/p/a/jsonc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# test an empty container
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
self.assertEquals(simplejson.loads(resp.body), [])
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/jsonc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test format
json_body = [{"name": "0",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "1",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "2",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}]
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(simplejson.loads(resp.body), json_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
for accept in ('application/json', 'application/json;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/json;q=1.0', 'application/*'):
req = Request.blank(
'/sda1/p/a/jsonc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body), json_body,
'Invalid body for Accept: %s' % accept)
self.assertEquals(
resp.content_type, 'application/json',
'Invalid content_type for Accept: %s' % accept)
req = Request.blank(
'/sda1/p/a/jsonc',
environ={'REQUEST_METHOD': 'HEAD'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'application/json',
'Invalid content_type for Accept: %s' % accept)
def test_GET_plain(self):
# make a container
req = Request.blank(
'/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# test an empty container
req = Request.blank(
'/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/plainc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
plain_body = '0\n1\n2\n'
req = Request.blank('/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
for accept in ('', 'text/plain', 'application/xml;q=0.8,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=0.8', '*/*',
'text/plain,application/xml'):
req = Request.blank(
'/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, plain_body,
'Invalid body for Accept: %s' % accept)
self.assertEquals(
resp.content_type, 'text/plain',
'Invalid content_type for Accept: %s' % accept)
req = Request.blank(
'/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'text/plain',
'Invalid content_type for Accept: %s' % accept)
# test conflicting formats
req = Request.blank(
'/sda1/p/a/plainc?format=plain',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/json'
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
# test unknown format uses default plain
req = Request.blank(
'/sda1/p/a/plainc?format=somethingelse',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
def test_GET_json_last_modified(self):
# make a container
req = Request.blank(
'/sda1/p/a/jsonc', environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i, d in [(0, 1.5), (1, 1.0), ]:
req = Request.blank(
'/sda1/p/a/jsonc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': d,
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test format
# last_modified format must be uniform, even when there are not msecs
json_body = [{"name": "0",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.500000"},
{"name": "1",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}, ]
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(simplejson.loads(resp.body), json_body)
self.assertEquals(resp.charset, 'utf-8')
def test_GET_xml(self):
# make a container
req = Request.blank(
'/sda1/p/a/xmlc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/xmlc/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
xml_body = '<?xml version="1.0" encoding="UTF-8"?>\n' \
'<container name="xmlc">' \
'<object><name>0</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'<object><name>1</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'<object><name>2</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'</container>'
# tests
req = Request.blank(
'/sda1/p/a/xmlc?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.body, xml_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/xmlc?format=xml',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
for xml_accept in (
'application/xml', 'application/xml;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=1.0', 'application/xml,text/xml'):
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = xml_accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, xml_body,
'Invalid body for Accept: %s' % xml_accept)
self.assertEquals(
resp.content_type, 'application/xml',
'Invalid content_type for Accept: %s' % xml_accept)
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'HEAD'})
req.accept = xml_accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'application/xml',
'Invalid content_type for Accept: %s' % xml_accept)
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'text/xml'
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/xml')
self.assertEquals(resp.body, xml_body)
def test_GET_marker(self):
# make a container
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/c/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test limit with marker
req = Request.blank('/sda1/p/a/c?limit=2&marker=1',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split()
self.assertEquals(result, ['2', ])
def test_weird_content_types(self):
snowman = u'\u2603'
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i, ctype in enumerate((snowman.encode('utf-8'),
'text/plain; charset="utf-8"')):
req = Request.blank(
'/sda1/p/a/c/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': ctype,
'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = [x['content_type'] for x in simplejson.loads(resp.body)]
self.assertEquals(result, [u'\u2603', 'text/plain;charset="utf-8"'])
def test_GET_accept_not_valid(self):
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': Timestamp(0).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', method='GET')
req.accept = 'application/xml*'
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 406)
def test_GET_limit(self):
# make a container
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test limit
req = Request.blank(
'/sda1/p/a/c?limit=2', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split()
self.assertEquals(result, ['0', '1'])
def test_GET_prefix(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('a1', 'b1', 'a2', 'b2', 'a3', 'b3'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.body.split(), ['a1', 'a2', 'a3'])
def test_GET_delimiter_too_long(self):
req = Request.blank('/sda1/p/a/c?delimiter=xx',
environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412)
def test_GET_delimiter(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US-TX-A', 'US-TX-B', 'US-OK-A', 'US-OK-B', 'US-UT-A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=US-&delimiter=-&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body),
[{"subdir": "US-OK-"},
{"subdir": "US-TX-"},
{"subdir": "US-UT-"}])
def test_GET_delimiter_xml(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US-TX-A', 'US-TX-B', 'US-OK-A', 'US-OK-B', 'US-UT-A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=US-&delimiter=-&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, '<?xml version="1.0" encoding="UTF-8"?>'
'\n<container name="c"><subdir name="US-OK-">'
'<name>US-OK-</name></subdir>'
'<subdir name="US-TX-"><name>US-TX-</name></subdir>'
'<subdir name="US-UT-"><name>US-UT-</name></subdir></container>')
def test_GET_delimiter_xml_with_quotes(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c/<\'sub\' "dir">/object',
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?delimiter=/&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
dom = minidom.parseString(resp.body)
self.assert_(len(dom.getElementsByTagName('container')) == 1)
container = dom.getElementsByTagName('container')[0]
self.assert_(len(container.getElementsByTagName('subdir')) == 1)
subdir = container.getElementsByTagName('subdir')[0]
self.assertEquals(unicode(subdir.attributes['name'].value),
u'<\'sub\' "dir">/')
self.assert_(len(subdir.getElementsByTagName('name')) == 1)
name = subdir.getElementsByTagName('name')[0]
self.assertEquals(unicode(name.childNodes[0].data),
u'<\'sub\' "dir">/')
def test_GET_path(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US/TX', 'US/TX/B', 'US/OK', 'US/OK/B', 'US/UT/A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?path=US&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body),
[{"name": "US/OK", "hash": "x", "bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "US/TX", "hash": "x", "bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}])
def test_GET_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_through_call(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '404 ')
def test_through_call_invalid_path(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/bob',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '400 ')
def test_through_call_invalid_path_utf8(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '\x00',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '412 ')
def test_invalid_method_doesnt_exist(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'method_doesnt_exist',
'PATH_INFO': '/sda1/p/a/c'},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '405 ')
def test_invalid_method_is_not_public(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': '__init__',
'PATH_INFO': '/sda1/p/a/c'},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '405 ')
def test_params_format(self):
req = Request.blank(
'/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': Timestamp(1).internal})
req.get_response(self.controller)
for format in ('xml', 'json'):
req = Request.blank('/sda1/p/a/c?format=%s' % format,
method='GET')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
def test_params_utf8(self):
# Bad UTF8 sequence, all parameters should cause 400 error
for param in ('delimiter', 'limit', 'marker', 'path', 'prefix',
'end_marker', 'format'):
req = Request.blank('/sda1/p/a/c?%s=\xce' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400,
"%d on param %s" % (resp.status_int, param))
# Good UTF8 sequence for delimiter, too long (1 byte delimiters only)
req = Request.blank('/sda1/p/a/c?delimiter=\xce\xa9',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412,
"%d on param delimiter" % (resp.status_int))
req = Request.blank('/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': Timestamp(1).internal})
req.get_response(self.controller)
# Good UTF8 sequence, ignored for limit, doesn't affect other queries
for param in ('limit', 'marker', 'path', 'prefix', 'end_marker',
'format'):
req = Request.blank('/sda1/p/a/c?%s=\xce\xa9' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204,
"%d on param %s" % (resp.status_int, param))
def test_put_auto_create(self):
headers = {'x-timestamp': Timestamp(1).internal,
'x-size': '0',
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e'}
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/.a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/.c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/a/c/.o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_delete_auto_create(self):
headers = {'x-timestamp': Timestamp(1).internal}
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/.a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/.c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/a/.c/.o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_content_type_on_HEAD(self):
Request.blank('/sda1/p/a/o',
headers={'X-Timestamp': Timestamp(1).internal},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
env = {'REQUEST_METHOD': 'HEAD'}
req = Request.blank('/sda1/p/a/o?format=xml', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/o?format=json', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/o', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/o', headers={'Accept': 'application/json'}, environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/o', headers={'Accept': 'application/xml'}, environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.charset, 'utf-8')
def test_updating_multiple_container_servers(self):
http_connect_args = []
def fake_http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
class SuccessfulFakeConn(object):
@property
def status(self):
return 200
def getresponse(self):
return self
def read(self):
return ''
captured_args = {'ipaddr': ipaddr, 'port': port,
'device': device, 'partition': partition,
'method': method, 'path': path, 'ssl': ssl,
'headers': headers, 'query_string': query_string}
http_connect_args.append(
dict((k, v) for k, v in captured_args.iteritems()
if v is not None))
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '12345',
'X-Account-Partition': '30',
'X-Account-Host': '1.2.3.4:5, 6.7.8.9:10',
'X-Account-Device': 'sdb1, sdf1'})
orig_http_connect = container_server.http_connect
try:
container_server.http_connect = fake_http_connect
req.get_response(self.controller)
finally:
container_server.http_connect = orig_http_connect
http_connect_args.sort(key=operator.itemgetter('ipaddr'))
self.assertEquals(len(http_connect_args), 2)
self.assertEquals(
http_connect_args[0],
{'ipaddr': '1.2.3.4',
'port': '5',
'path': '/a/c',
'device': 'sdb1',
'partition': '30',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-bytes-used': 0,
'x-delete-timestamp': '0',
'x-object-count': 0,
'x-put-timestamp': Timestamp(12345).internal,
'X-Backend-Storage-Policy-Index': '%s' % POLICIES.default.idx,
'referer': 'PUT http://localhost/sda1/p/a/c',
'user-agent': 'container-server %d' % os.getpid(),
'x-trans-id': '-'})})
self.assertEquals(
http_connect_args[1],
{'ipaddr': '6.7.8.9',
'port': '10',
'path': '/a/c',
'device': 'sdf1',
'partition': '30',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-bytes-used': 0,
'x-delete-timestamp': '0',
'x-object-count': 0,
'x-put-timestamp': Timestamp(12345).internal,
'X-Backend-Storage-Policy-Index': '%s' % POLICIES.default.idx,
'referer': 'PUT http://localhost/sda1/p/a/c',
'user-agent': 'container-server %d' % os.getpid(),
'x-trans-id': '-'})})
def test_serv_reserv(self):
# Test replication_server flag was set from configuration file.
container_controller = container_server.ContainerController
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.assertEquals(container_controller(conf).replication_server, None)
for val in [True, '1', 'True', 'true']:
conf['replication_server'] = val
self.assertTrue(container_controller(conf).replication_server)
for val in [False, 0, '0', 'False', 'false', 'test_string']:
conf['replication_server'] = val
self.assertFalse(container_controller(conf).replication_server)
def test_list_allowed_methods(self):
# Test list of allowed_methods
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST']
repl_methods = ['REPLICATE']
for method_name in obj_methods:
method = getattr(self.controller, method_name)
self.assertFalse(hasattr(method, 'replication'))
for method_name in repl_methods:
method = getattr(self.controller, method_name)
self.assertEquals(method.replication, True)
def test_correct_allowed_method(self):
# Test correct work for allowed method using
# swift.container.server.ContainerController.__call__
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
method_res = mock.MagicMock()
mock_method = public(lambda x: mock.MagicMock(return_value=method_res))
with mock.patch.object(self.controller, method, new=mock_method):
response = self.controller.__call__(env, start_response)
self.assertEqual(response, method_res)
def test_not_allowed_method(self):
# Test correct work for NOT allowed method using
# swift.container.server.ContainerController.__call__
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
answer = ['<html><h1>Method Not Allowed</h1><p>The method is not '
'allowed for this resource.</p></html>']
mock_method = replication(public(lambda x: mock.MagicMock()))
with mock.patch.object(self.controller, method, new=mock_method):
response = self.controller.__call__(env, start_response)
self.assertEqual(response, answer)
def test_GET_log_requests_true(self):
self.controller.logger = FakeLogger()
self.controller.log_requests = True
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue(self.controller.logger.log_dict['info'])
def test_GET_log_requests_false(self):
self.controller.logger = FakeLogger()
self.controller.log_requests = False
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertFalse(self.controller.logger.log_dict['info'])
def test_log_line_format(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
self.controller.logger = FakeLogger()
with mock.patch(
'time.gmtime', mock.MagicMock(side_effect=[gmtime(10001.0)])):
with mock.patch(
'time.time',
mock.MagicMock(side_effect=[10000.0, 10001.0, 10002.0])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=1234)):
req.get_response(self.controller)
self.assertEqual(
self.controller.logger.log_dict['info'],
[(('1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD /sda1/p/a/c" '
'404 - "-" "-" "-" 2.0000 "-" 1234',), {})])
@patch_policies([
StoragePolicy(0, 'legacy'),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two', True),
StoragePolicy(3, 'three'),
StoragePolicy(4, 'four'),
])
class TestNonLegacyDefaultStoragePolicy(TestContainerController):
"""
Test swift.container.server.ContainerController with a non-legacy default
Storage Policy.
"""
def _update_object_put_headers(self, req):
"""
Add policy index headers for containers created with default policy
- which in this TestCase is 1.
"""
req.headers['X-Backend-Storage-Policy-Index'] = \
str(POLICIES.default.idx)
if __name__ == '__main__':
unittest.main()
| kalrey/swift | test/unit/container/test_server.py | Python | apache-2.0 | 117,657 |
#
#testapi.py
from ..lib.pyGuifiAPI import *
from ..lib.pyGuifiAPI.error import GuifiApiError
import urllib
import re
r = re.compile('http:\/\/([^\/]*).*')
from ..guifiwrapper.cnmlUtils import *
# https://guifi.net/api?command=guifi.service.get&service_id=37668
# https://github.com/guifi/drupal-guifi/commit/c155cb310144a849adec03a73ded0f67b71f6850
conn = authenticate()
sid = 37668
data = {'command':'guifi.service.get','service_id':sid}
params = urllib.urlencode(data)
(codenum, response) = conn.sendRequest(params)
if codenum == constants.ANSWER_GOOD:
result = response['service']['var']['url']
print result
print r.match(result).group(1)
else:
extra = response['extra'] if 'extra' in response else None
raise GuifiApiError(response['str'], response['code'], extra)
#print data | emmdim/guifiAnalyzer | traffic/tests/testApi.py | Python | gpl-3.0 | 804 |
class Display():
def __init__(self, width, height):
self.width = width
self.height = height
def getSize(self):
return (self.width, self.height)
| thebruce87/Photobooth | src/display.py | Python | mit | 155 |
from tastypie.throttle import CacheDBThrottle
import time
from django.core.cache import cache
from django.contrib.auth.models import User, SiteProfileNotAvailable
import logging
log = logging.getLogger(__name__)
class UserAccessThrottle(CacheDBThrottle):
"""
A throttling mechanism that uses the cache for actual throttling but
writes-through to the database.
This is useful for tracking/aggregating usage through time, to possibly
build a statistics interface or a billing mechanism.
"""
def __init__(self, throttle_at=150, timeframe=3600, expiration=None, model_type=None):
super(UserAccessThrottle, self).__init__(throttle_at,timeframe,expiration)
self.model_type = model_type
def should_be_throttled(self, identifier, **kwargs):
"""
Returns whether or not the user has exceeded their throttle limit.
Maintains a list of timestamps when the user accessed the api within
the cache.
Returns ``False`` if the user should NOT be throttled or ``True`` if
the user should be throttled.
"""
#Generate a more granular id
new_id, url, request_method = self.get_new_id(identifier, **kwargs)
key = self.convert_identifier_to_key(new_id)
#See if we can get a user and adjust throttle limit
user = self.get_user(identifier)
throttle_at = self.get_rate_limit_for_user(user)
# Make sure something is there.
cache.add(key, [])
# Weed out anything older than the timeframe.
minimum_time = int(time.time()) - int(self.timeframe)
times_accessed = [access for access in cache.get(key) if access >= minimum_time]
cache.set(key, times_accessed, self.expiration)
if len(times_accessed) >= int(throttle_at):
# Throttle them.
return True
# Let them through.
return False
def accessed(self, identifier, **kwargs):
"""
Handles recording the user's access.
identifier - whatever identifier is passed into the class. Generally the username
kwargs - can contain request method and url
"""
#Generate a new id
new_id, url, request_method = self.get_new_id(identifier, **kwargs)
key = self.convert_identifier_to_key(new_id)
#Get times accessed and increment
times_accessed = cache.get(key, [])
times_accessed.append(int(time.time()))
cache.set(key, times_accessed, self.expiration)
# Write out the access to the DB for logging purposes.
# Do the import here, instead of top-level, so that the model is
# only required when using this throttling mechanism.
from tastypie.models import ApiAccess
ApiAccess.objects.create(
identifier=identifier,
url=url,
request_method=request_method,
)
def get_new_id(self, identifier, **kwargs):
"""
Generates a new, more granular, identifier, and parses request method and url from kwargs
identifier - whatever identifier is passed into the class. Generally the username
kwargs - can contain request method and url
"""
url = kwargs.get('url', '')
request_method = kwargs.get('request_method', '')
new_id = "{0}.{1}.{2}".format(identifier,url,request_method)
return new_id, url, request_method
def get_user(self, identifier):
"""
Try to get a user object from the identifier
identifier - whatever identifier is passed into the class. Generally the username
"""
try:
user = User.objects.get(username=identifier)
except:
user = None
return user
def get_rate_limit_for_user(self, user):
"""
See if the user has a higher rate limit than the global throttle setting
user - a user object
"""
throttle_at = self.throttle_at
if user is not None:
try:
profile = user.profile
except SiteProfileNotAvailable:
log.warn("No user profile available for {0}".format(user.username))
return throttle_at
if user.profile.throttle_at > throttle_at:
throttle_at = user.throttle_at
return throttle_at
| pombredanne/discern | freeform_data/throttle.py | Python | agpl-3.0 | 4,356 |
## 2. Looking at the data ##
# We can use the pandas library in python to read in the csv file.
# This creates a pandas dataframe and assigns it to the titanic variable.
titanic = pandas.read_csv("titanic_train.csv")
# Print the first 5 rows of the dataframe.
print(titanic.head(5))
print(titanic.describe())
## 3. Missing data ##
# The titanic variable is available here.
titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median())
## 5. Converting the Sex column ##
# Find all the unique genders -- the column appears to contain only male and female.
print(titanic["Sex"].unique())
# Replace all the occurences of male with the number 0.
titanic.loc[titanic["Sex"] == "male", "Sex"] = 0
titanic.loc[titanic["Sex"] == "female", "Sex"] = 1
## 6. Converting the Embarked column ##
# Find all the unique values for "Embarked".
print(titanic["Embarked"].unique())
titanic["Embarked"] = titanic["Embarked"].fillna('S')
titanic.loc[titanic["Embarked"] == "S", "Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C", "Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q", "Embarked"] = 2
## 9. Making predictions ##
# Import the linear regression class
from sklearn.linear_model import LinearRegression
# Sklearn also has a helper that makes it easy to do cross validation
from sklearn.cross_validation import KFold
# The columns we'll use to predict the target
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
# Initialize our algorithm class
alg = LinearRegression()
# Generate cross validation folds for the titanic dataset. It return the row indices corresponding to train and test.
# We set random_state to ensure we get the same splits every time we run this.
kf = KFold(titanic.shape[0], n_folds=3, random_state=1)
predictions = []
for train, test in kf:
# The predictors we're using the train the algorithm. Note how we only take the rows in the train folds.
train_predictors = (titanic[predictors].iloc[train,:])
# The target we're using to train the algorithm.
train_target = titanic["Survived"].iloc[train]
# Training the algorithm using the predictors and target.
alg.fit(train_predictors, train_target)
# We can now make predictions on the test fold
test_predictions = alg.predict(titanic[predictors].iloc[test,:])
predictions.append(test_predictions)
## 10. Evaluating error ##
import numpy as np
# The predictions are in three separate numpy arrays. Concatenate them into one.
# We concatenate them on axis 0, as they only have one axis.
predictions = np.concatenate(predictions, axis=0)
# Map predictions to outcomes (only possible outcomes are 1 and 0)
predictions[predictions > .5] = 1
predictions[predictions <=.5] = 0
accuracy = sum(predictions[predictions == titanic["Survived"]]) / len(predictions)
## 11. Logistic regression ##
from sklearn import cross_validation
# Initialize our algorithm
alg = LogisticRegression(random_state=1)
# Compute the accuracy score for all the cross validation folds. (much simpler than what we did before!)
scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=3)
# Take the mean of the scores (because we have one for each fold)
print(scores.mean())
## 12. Processing the test set ##
titanic_test = pandas.read_csv("titanic_test.csv")
titanic_test["Age"] = titanic_test["Age"].fillna(titanic["Age"].median())
titanic_test["Fare"] = titanic_test["Fare"].fillna(titanic_test["Fare"].median())
titanic_test.loc[titanic_test["Sex"] == "male", "Sex"] = 0
titanic_test.loc[titanic_test["Sex"] == "female", "Sex"] = 1
titanic_test["Embarked"] = titanic_test["Embarked"].fillna("S")
titanic_test.loc[titanic_test["Embarked"] == "S", "Embarked"] = 0
titanic_test.loc[titanic_test["Embarked"] == "C", "Embarked"] = 1
titanic_test.loc[titanic_test["Embarked"] == "Q", "Embarked"] = 2
## 13. Generating a submission file ##
# Initialize the algorithm class
alg = LogisticRegression(random_state=1)
# Train the algorithm using all the training data
alg.fit(titanic[predictors], titanic["Survived"])
# Make predictions using the test set.
predictions = alg.predict(titanic_test[predictors])
# Create a new dataframe with only the columns Kaggle wants from the dataset.
submission = pandas.DataFrame({
"PassengerId": titanic_test["PassengerId"],
"Survived": predictions
}) | vipmunot/Data-Analysis-using-Python | Kaggle Competitions/Getting started with Kaggle-73.py | Python | mit | 4,368 |
#!/usr/bin/env python3
from lsltools import sim,vis
# STEP 1: Initialize a generator for simulated EEG and start it up.
eeg_data = sim.EEGData(nch=3,stream_name="example")
eeg_data.start()
# STEP 2: Find the stream started in step 1 and pass it to the vis.Grapher
streams = vis.pylsl.resolve_byprop("name","example")
eeg_graph = vis.Grapher(streams[0],512*5,'y')
# STEP 3: Enjoy the graph.
| bwrc/lsltools | examples/visualizer_example.py | Python | mit | 395 |
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from .. import jwt_utils
from ..utils import str_to_class, get_redirect_url
logger = logging.getLogger(__name__)
# TODO: Remove this duplicated code from Loginview
auth_backends = settings.WIGGUM_AUTHENTICATION_BACKENDS
def get_authentication_backends():
return [str_to_class(backend)() for backend in auth_backends]
class LoginPreCheckBaseAction(object):
"""Login pre form check base action"""
def do(self, action_context, view, *args, **kwargs):
"""user will be none if the user was not authenticated"""
return action_context
class ForceLoginFormAction(LoginPreCheckBaseAction):
""" Breaks the chain to force the the login form despite the user is logged
already
"""
def do(self, action_context, view, *args, **kwargs):
if settings.FORCE_LOGIN_FORM:
action_context.break_chain = True
logger.debug("Forcing login form")
return super().do(action_context, view, *args, **kwargs)
class CheckUserAuthenticatedAlreadyAction(LoginPreCheckBaseAction):
""" Checks if the user is already authenticated"""
def do(self, action_context, view, *args, **kwargs):
"""Creates the appropiate response to return"""
# If need to force login then always show the login form
credential_data = {
'request': action_context.request,
}
backends = get_authentication_backends()
for i in backends:
user_authenticated = i.authenticate(**credential_data)
if user_authenticated:
action_context.extra_context['jwt'] = action_context.request.\
COOKIES.get(settings.JWT_COOKIE_NAME, None)
action_context.extra_context['user_authenticated'] = True
action_context.extra_context['user'] = user_authenticated
# Do not break chain, we need to check the version of the token
action_context.response = HttpResponseRedirect(
action_context.extra_context['redirect_url'])
return super().do(action_context, view, *args, **kwargs)
class CheckValidJWTVersionAction(LoginPreCheckBaseAction):
""" Checks if the jwt token version is correct, if not then this token is
destroyed (logout)
"""
def do(self, action_context, view, *args, **kwargs):
jwt = action_context.extra_context.get('jwt')
if jwt and settings.JWT_DESTROY_TOKEN_ON_LESSER_VERSION:
jwt_version = float(jwt_utils.decode_jwt(jwt).get('version', 0))
if jwt_version < settings.JWT_MINIMUM_VERSION:
logout_url = reverse("auth:logout")
action_context.extra_context['invalid_jwt_version'] = True
# Check if redirection is needed (checks all the valid redirects)
redirect_uri = get_redirect_url(action_context.request)
if redirect_uri:
redirect_param = settings.REDIRECT_URL_VALID_PARAMS[0]
logout_url = "{0}?{1}={2}".format(
logout_url, redirect_param, redirect_uri)
msg = ("JWT version is invalid (token:{0}) "
"(minimum: {1})").format(jwt_version,
settings.JWT_MINIMUM_VERSION)
logger.info(msg)
action_context.response = HttpResponseRedirect(logout_url)
return super().do(action_context, view, *args, **kwargs)
| qdqmedia/wiggum | wiggum/authorization/actions/login_pre_check.py | Python | bsd-3-clause | 3,600 |
import sys
import os
from setuptools import setup, Command
NAME = "gnu_health_fhir"
with open("README.md") as readme:
README = readme.read()
README_TYPE = "text/markdown"
with open(os.path.join(NAME, "VERSION")) as version:
VERSION = version.readlines()[0].strip()
with open("requirements.txt") as requirements:
REQUIREMENTS = [line.rstrip() for line in requirements if line != "\n"]
setup(
name="gnu_health_fhir",
version=VERSION,
description="Provides FHIR interface to GNU Health.",
long_description=README,
long_description_content_type=README_TYPE,
url="https://github.com/teffalump/gnu_health_fhir",
author="teffalump",
author_email="[email protected]",
packages=["gnu_health_fhir"],
install_requires=REQUIREMENTS,
include_package_data=True,
zip_safe=False,
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Operating System :: OS Independent",
],
)
| teffalump/health_fhir | setup.py | Python | gpl-3.0 | 1,173 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests a Glance API server which uses the caching middleware that
uses the default SQLite cache driver. We use the filesystem store,
but that is really not relevant, as the image cache is transparent
to the backend store.
"""
import hashlib
import json
import os
import shutil
import sys
import time
import httplib2
from glance.tests import functional
from glance.tests.utils import (skip_if_disabled,
execute,
xattr_writes_supported,
minimal_headers)
from glance.tests.functional.store_utils import (setup_http,
get_http_uri)
FIVE_KB = 5 * 1024
class BaseCacheMiddlewareTest(object):
@skip_if_disabled
def test_cache_middleware_transparent_v1(self):
"""
We test that putting the cache middleware into the
application pipeline gives us transparent image caching
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
# Add an image and verify a 200 OK is returned
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201)
data = json.loads(content)
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "Image1")
self.assertEqual(data['image']['is_public'], True)
image_id = data['image']['id']
# Verify image not in cache
image_cached_path = os.path.join(self.api_server.image_cache_dir,
image_id)
self.assertFalse(os.path.exists(image_cached_path))
# Grab the image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
# Verify image now in cache
image_cached_path = os.path.join(self.api_server.image_cache_dir,
image_id)
# You might wonder why the heck this is here... well, it's here
# because it took me forever to figure out that the disk write
# cache in Linux was causing random failures of the os.path.exists
# assert directly below this. Basically, since the cache is writing
# the image file to disk in a different process, the write buffers
# don't flush the cache file during an os.rename() properly, resulting
# in a false negative on the file existence check below. This little
# loop pauses the execution of this process for no more than 1.5
# seconds. If after that time the cached image file still doesn't
# appear on disk, something really is wrong, and the assert should
# trigger...
i = 0
while not os.path.exists(image_cached_path) and i < 30:
time.sleep(0.05)
i = i + 1
self.assertTrue(os.path.exists(image_cached_path))
# Now, we delete the image from the server and verify that
# the image cache no longer contains the deleted image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
self.assertFalse(os.path.exists(image_cached_path))
self.stop_servers()
@skip_if_disabled
def test_cache_middleware_transparent_v2(self):
"""Ensure the v2 API image transfer calls trigger caching"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
# Add an image and verify success
path = "http://%s:%d/v2/images" % ("0.0.0.0", self.api_port)
http = httplib2.Http()
headers = {'content-type': 'application/json'}
image_entity = {
'name': 'Image1',
'visibility': 'public',
'container_format': 'bare',
'disk_format': 'raw',
}
response, content = http.request(path, 'POST',
headers=headers,
body=json.dumps(image_entity))
self.assertEqual(response.status, 201)
data = json.loads(content)
image_id = data['id']
path = "http://%s:%d/v2/images/%s/file" % ("0.0.0.0", self.api_port,
image_id)
headers = {'content-type': 'application/octet-stream'}
image_data = "*" * FIVE_KB
response, content = http.request(path, 'PUT',
headers=headers,
body=image_data)
self.assertEqual(response.status, 204)
# Verify image not in cache
image_cached_path = os.path.join(self.api_server.image_cache_dir,
image_id)
self.assertFalse(os.path.exists(image_cached_path))
# Grab the image
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
# Verify image now in cache
image_cached_path = os.path.join(self.api_server.image_cache_dir,
image_id)
# Now, we delete the image from the server and verify that
# the image cache no longer contains the deleted image
path = "http://%s:%d/v2/images/%s" % ("0.0.0.0", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 204)
self.assertFalse(os.path.exists(image_cached_path))
self.stop_servers()
@skip_if_disabled
def test_cache_remote_image(self):
"""
We test that caching is no longer broken for remote images
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
# Add a remote image and verify a 201 Created is returned
remote_uri = get_http_uri(self, '2')
headers = {'X-Image-Meta-Name': 'Image2',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Image-Meta-Location': remote_uri}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201)
data = json.loads(content)
self.assertEqual(data['image']['size'], FIVE_KB)
image_id = data['image']['id']
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
# Grab the image
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
# Grab the image again to ensure it can be served out from
# cache with the correct size
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(int(response['content-length']), FIVE_KB)
self.stop_servers()
@skip_if_disabled
def test_cache_middleware_trans_v1_without_download_image_policy(self):
"""
Ensure the image v1 API image transfer applied 'download_image'
policy enforcement.
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
# Add an image and verify a 200 OK is returned
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201)
data = json.loads(content)
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "Image1")
self.assertEqual(data['image']['is_public'], True)
image_id = data['image']['id']
# Verify image not in cache
image_cached_path = os.path.join(self.api_server.image_cache_dir,
image_id)
self.assertFalse(os.path.exists(image_cached_path))
rules = {"context_is_admin": "role:admin", "default": "",
"download_image": "!"}
self.set_policy_rules(rules)
# Grab the image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 403)
# Now, we delete the image from the server and verify that
# the image cache no longer contains the deleted image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
self.assertFalse(os.path.exists(image_cached_path))
self.stop_servers()
@skip_if_disabled
def test_cache_middleware_trans_v2_without_download_image_policy(self):
"""
Ensure the image v2 API image transfer applied 'download_image'
policy enforcement.
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
# Add an image and verify success
path = "http://%s:%d/v2/images" % ("0.0.0.0", self.api_port)
http = httplib2.Http()
headers = {'content-type': 'application/json'}
image_entity = {
'name': 'Image1',
'visibility': 'public',
'container_format': 'bare',
'disk_format': 'raw',
}
response, content = http.request(path, 'POST',
headers=headers,
body=json.dumps(image_entity))
self.assertEqual(response.status, 201)
data = json.loads(content)
image_id = data['id']
path = "http://%s:%d/v2/images/%s/file" % ("0.0.0.0", self.api_port,
image_id)
headers = {'content-type': 'application/octet-stream'}
image_data = "*" * FIVE_KB
response, content = http.request(path, 'PUT',
headers=headers,
body=image_data)
self.assertEqual(response.status, 204)
# Verify image not in cache
image_cached_path = os.path.join(self.api_server.image_cache_dir,
image_id)
self.assertFalse(os.path.exists(image_cached_path))
rules = {"context_is_admin": "role:admin", "default": "",
"download_image": "!"}
self.set_policy_rules(rules)
# Grab the image
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 403)
# Now, we delete the image from the server and verify that
# the image cache no longer contains the deleted image
path = "http://%s:%d/v2/images/%s" % ("0.0.0.0", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 204)
self.assertFalse(os.path.exists(image_cached_path))
self.stop_servers()
class BaseCacheManageMiddlewareTest(object):
"""Base test class for testing cache management middleware"""
def verify_no_images(self):
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('images' in data)
self.assertEqual(0, len(data['images']))
def add_image(self, name):
"""
Adds an image and returns the newly-added image
identifier
"""
image_data = "*" * FIVE_KB
headers = minimal_headers('%s' % name)
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201)
data = json.loads(content)
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], name)
self.assertEqual(data['image']['is_public'], True)
return data['image']['id']
def verify_no_cached_images(self):
"""
Verify no images in the image cache
"""
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
self.assertEqual(data['cached_images'], [])
@skip_if_disabled
def test_user_not_authorized(self):
self.cleanup()
self.start_servers(**self.__dict__.copy())
self.verify_no_images()
image_id1 = self.add_image("Image1")
image_id2 = self.add_image("Image2")
# Verify image does not yet show up in cache (we haven't "hit"
# it yet using a GET /images/1 ...
self.verify_no_cached_images()
# Grab the image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id1)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
# Verify image now in cache
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
cached_images = data['cached_images']
self.assertEqual(1, len(cached_images))
self.assertEqual(image_id1, cached_images[0]['image_id'])
# Set policy to disallow access to cache management
rules = {"manage_image_cache": '!'}
self.set_policy_rules(rules)
# Verify an unprivileged user cannot see cached images
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 403)
# Verify an unprivileged user cannot delete images from the cache
path = "http://%s:%d/v1/cached_images/%s" % ("127.0.0.1",
self.api_port, image_id1)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 403)
# Verify an unprivileged user cannot delete all cached images
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 403)
# Verify an unprivileged user cannot queue an image
path = "http://%s:%d/v1/queued_images/%s" % ("127.0.0.1",
self.api_port, image_id2)
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(response.status, 403)
self.stop_servers()
@skip_if_disabled
def test_cache_manage_get_cached_images(self):
"""
Tests that cached images are queryable
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
self.verify_no_images()
image_id = self.add_image("Image1")
# Verify image does not yet show up in cache (we haven't "hit"
# it yet using a GET /images/1 ...
self.verify_no_cached_images()
# Grab the image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
# Verify image now in cache
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
# Verify the last_modified/last_accessed values are valid floats
for cached_image in data['cached_images']:
for time_key in ('last_modified', 'last_accessed'):
time_val = cached_image[time_key]
try:
float(time_val)
except ValueError:
self.fail('%s time %s for cached image %s not a valid '
'float' % (time_key, time_val,
cached_image['image_id']))
cached_images = data['cached_images']
self.assertEqual(1, len(cached_images))
self.assertEqual(image_id, cached_images[0]['image_id'])
self.assertEqual(0, cached_images[0]['hits'])
# Hit the image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
# Verify image hits increased in output of manage GET
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
cached_images = data['cached_images']
self.assertEqual(1, len(cached_images))
self.assertEqual(image_id, cached_images[0]['image_id'])
self.assertEqual(1, cached_images[0]['hits'])
self.stop_servers()
@skip_if_disabled
def test_cache_manage_delete_cached_images(self):
"""
Tests that cached images may be deleted
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
self.verify_no_images()
ids = {}
# Add a bunch of images...
for x in xrange(0, 4):
ids[x] = self.add_image("Image%s" % str(x))
# Verify no images in cached_images because no image has been hit
# yet using a GET /images/<IMAGE_ID> ...
self.verify_no_cached_images()
# Grab the images, essentially caching them...
for x in xrange(0, 4):
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
ids[x])
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200,
"Failed to find image %s" % ids[x])
# Verify images now in cache
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
cached_images = data['cached_images']
self.assertEqual(4, len(cached_images))
for x in xrange(4, 0): # Cached images returned last modified order
self.assertEqual(ids[x], cached_images[x]['image_id'])
self.assertEqual(0, cached_images[x]['hits'])
# Delete third image of the cached images and verify no longer in cache
path = "http://%s:%d/v1/cached_images/%s" % ("127.0.0.1",
self.api_port, ids[2])
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
cached_images = data['cached_images']
self.assertEqual(3, len(cached_images))
self.assertTrue(ids[2] not in [x['image_id'] for x in cached_images])
# Delete all cached images and verify nothing in cache
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
cached_images = data['cached_images']
self.assertEqual(0, len(cached_images))
self.stop_servers()
@skip_if_disabled
def test_cache_manage_delete_queued_images(self):
"""
Tests that all queued images may be deleted at once
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
self.verify_no_images()
ids = {}
NUM_IMAGES = 4
# Add and then queue some images
for x in xrange(0, NUM_IMAGES):
ids[x] = self.add_image("Image%s" % str(x))
path = "http://%s:%d/v1/queued_images/%s" % ("127.0.0.1",
self.api_port, ids[x])
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(response.status, 200)
# Delete all queued images
path = "http://%s:%d/v1/queued_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
data = json.loads(content)
num_deleted = data['num_deleted']
self.assertEqual(NUM_IMAGES, num_deleted)
# Verify a second delete now returns num_deleted=0
path = "http://%s:%d/v1/queued_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
data = json.loads(content)
num_deleted = data['num_deleted']
self.assertEqual(0, num_deleted)
self.stop_servers()
@skip_if_disabled
def test_queue_and_prefetch(self):
"""
Tests that images may be queued and prefetched
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
cache_config_filepath = os.path.join(self.test_dir, 'etc',
'glance-cache.conf')
cache_file_options = {
'image_cache_dir': self.api_server.image_cache_dir,
'image_cache_driver': self.image_cache_driver,
'registry_port': self.registry_server.bind_port,
'log_file': os.path.join(self.test_dir, 'cache.log'),
'metadata_encryption_key': "012345678901234567890123456789ab"
}
with open(cache_config_filepath, 'w') as cache_file:
cache_file.write("""[DEFAULT]
debug = True
verbose = True
image_cache_dir = %(image_cache_dir)s
image_cache_driver = %(image_cache_driver)s
registry_host = 127.0.0.1
registry_port = %(registry_port)s
metadata_encryption_key = %(metadata_encryption_key)s
log_file = %(log_file)s
""" % cache_file_options)
self.verify_no_images()
ids = {}
# Add a bunch of images...
for x in xrange(0, 4):
ids[x] = self.add_image("Image%s" % str(x))
# Queue the first image, verify no images still in cache after queueing
# then run the prefetcher and verify that the image is then in the
# cache
path = "http://%s:%d/v1/queued_images/%s" % ("127.0.0.1",
self.api_port, ids[0])
http = httplib2.Http()
response, content = http.request(path, 'PUT')
self.assertEqual(response.status, 200)
self.verify_no_cached_images()
cmd = ("%s -m glance.cmd.cache_prefetcher --config-file %s" %
(sys.executable, cache_config_filepath))
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip(), out)
# Verify first image now in cache
path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('cached_images' in data)
cached_images = data['cached_images']
self.assertEqual(1, len(cached_images))
self.assertTrue(ids[0] in [r['image_id']
for r in data['cached_images']])
self.stop_servers()
class TestImageCacheXattr(functional.FunctionalTest,
BaseCacheMiddlewareTest):
"""Functional tests that exercise the image cache using the xattr driver"""
def setUp(self):
"""
Test to see if the pre-requisites for the image cache
are working (python-xattr installed and xattr support on the
filesystem)
"""
if getattr(self, 'disabled', False):
return
if not getattr(self, 'inited', False):
try:
import xattr
except ImportError:
self.inited = True
self.disabled = True
self.disabled_message = ("python-xattr not installed.")
return
self.inited = True
self.disabled = False
self.image_cache_driver = "xattr"
super(TestImageCacheXattr, self).setUp()
self.api_server.deployment_flavor = "caching"
if not xattr_writes_supported(self.test_dir):
self.inited = True
self.disabled = True
self.disabled_message = ("filesystem does not support xattr")
return
def tearDown(self):
super(TestImageCacheXattr, self).tearDown()
if os.path.exists(self.api_server.image_cache_dir):
shutil.rmtree(self.api_server.image_cache_dir)
class TestImageCacheManageXattr(functional.FunctionalTest,
BaseCacheManageMiddlewareTest):
"""
Functional tests that exercise the image cache management
with the Xattr cache driver
"""
def setUp(self):
"""
Test to see if the pre-requisites for the image cache
are working (python-xattr installed and xattr support on the
filesystem)
"""
if getattr(self, 'disabled', False):
return
if not getattr(self, 'inited', False):
try:
import xattr
except ImportError:
self.inited = True
self.disabled = True
self.disabled_message = ("python-xattr not installed.")
return
self.inited = True
self.disabled = False
self.image_cache_driver = "xattr"
super(TestImageCacheManageXattr, self).setUp()
self.api_server.deployment_flavor = "cachemanagement"
if not xattr_writes_supported(self.test_dir):
self.inited = True
self.disabled = True
self.disabled_message = ("filesystem does not support xattr")
return
def tearDown(self):
super(TestImageCacheManageXattr, self).tearDown()
if os.path.exists(self.api_server.image_cache_dir):
shutil.rmtree(self.api_server.image_cache_dir)
class TestImageCacheSqlite(functional.FunctionalTest,
BaseCacheMiddlewareTest):
"""
Functional tests that exercise the image cache using the
SQLite driver
"""
def setUp(self):
"""
Test to see if the pre-requisites for the image cache
are working (python-xattr installed and xattr support on the
filesystem)
"""
if getattr(self, 'disabled', False):
return
if not getattr(self, 'inited', False):
try:
import sqlite3
except ImportError:
self.inited = True
self.disabled = True
self.disabled_message = ("python-sqlite3 not installed.")
return
self.inited = True
self.disabled = False
super(TestImageCacheSqlite, self).setUp()
self.api_server.deployment_flavor = "caching"
def tearDown(self):
super(TestImageCacheSqlite, self).tearDown()
if os.path.exists(self.api_server.image_cache_dir):
shutil.rmtree(self.api_server.image_cache_dir)
class TestImageCacheManageSqlite(functional.FunctionalTest,
BaseCacheManageMiddlewareTest):
"""
Functional tests that exercise the image cache management using the
SQLite driver
"""
def setUp(self):
"""
Test to see if the pre-requisites for the image cache
are working (python-xattr installed and xattr support on the
filesystem)
"""
if getattr(self, 'disabled', False):
return
if not getattr(self, 'inited', False):
try:
import sqlite3
except ImportError:
self.inited = True
self.disabled = True
self.disabled_message = ("python-sqlite3 not installed.")
return
self.inited = True
self.disabled = False
self.image_cache_driver = "sqlite"
super(TestImageCacheManageSqlite, self).setUp()
self.api_server.deployment_flavor = "cachemanagement"
def tearDown(self):
super(TestImageCacheManageSqlite, self).tearDown()
if os.path.exists(self.api_server.image_cache_dir):
shutil.rmtree(self.api_server.image_cache_dir)
| SUSE-Cloud/glance | glance/tests/functional/test_cache_middleware.py | Python | apache-2.0 | 32,711 |
# -*- coding: utf-8 -*-
"""Abstract base classes to define readers and writers."""
from __future__ import unicode_literals, print_function
from abc import ABCMeta, abstractmethod
class LinkReader:
"""Abstract reader of links."""
__metaclass__ = ABCMeta
def __init__(self, stream, header=True):
self.stream = stream
self.header = header
self.meta = {}
def start():
pass
@abstractmethod
def next(self):
pass
def links(self):
return [l for l in self.next() if l]
class Writer:
"""Abstract writer of links or deltas."""
__metaclass__ = ABCMeta
def __init__(self, stream, header=True):
"""Create a new writer."""
self.stream = stream
self.header = header
self.meta = {}
self.started = False
def start(self, meta):
"""Start writing with given metadata."""
self.meta = meta
def print(self, s):
"""Helper method to print a string without buffering."""
print(s, file=self.stream)
try:
self.stream.flush()
except IOError: # raised for instance if stream has been closed
pass
class LinkWriter(Writer):
"""Abstract writer of links."""
__metaclass__ = ABCMeta
# TODO: move to meta class
def mapping_type(self):
if 'relation' in self.meta and self.meta['relation']:
return self.meta['relation']
else:
return 'http://www.w3.org/2000/01/rdf-schema#seeAlso'
class DeltaWriter(Writer):
"""Abstract writer of deltas."""
__metaclass__ = ABCMeta
@abstractmethod
def write_delta(self, delta):
pass
| gbv/wdmapper | wdmapper/format/base.py | Python | mit | 1,684 |
# Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from moldesign import utils
from moldesign.helpers import colormap
class ColorMixin(object):
def color_by(self, atom_callback, atoms=None, mplmap='auto', force_cmap=False):
"""
Color atoms according to either:
* an atomic attribute (e.g., 'chain', 'residue', 'mass')
* a callback function that accepts an atom and returns a color or a category
Args:
atom_callback (callable OR str): callable f(atom) returns color OR
category OR an atom attribute (e.g., ``atnum, mass, residue.type``)
atoms (moldesign.molecules.AtomContainer): atoms to color (default: self.atoms)
mplmap (str): name of the matplotlib colormap to use if colors aren't explicitly
specified)
force_cmap (bool): force the use of a colormap
Notes:
If you'd like to explicitly specify colors, the callback can return color
specifications as an HTML string (``'#1234AB'``), a hexadecimal integer (
``0x12345AB``), or a CSS3 color keyword (``'green'``, ``'purple'``, etc., see
https://developer.mozilla.org/en-US/docs/Web/CSS/color_value)
If the callback returns an integer, it may be interpreted as a color spec (since RGB
colors are just hexadecimal integers). Use ``force_cmap=True`` to force the creation
of a colormap.
Returns:
dict: mapping of categories to colors
"""
atoms = utils.if_not_none(atoms, self.mol.atoms)
if isinstance(atom_callback, basestring):
# shortcut to use strings to access atom attributes, i.e. "ff.partial_charge"
attrs = atom_callback.split('.')
# make sure that whatever value is returned doesn't get interpreted as a color
force_cmap = True
def atom_callback(atom):
obj = atom
for attr in attrs:
obj = getattr(obj, attr)
return obj
colors = utils.Categorizer(atom_callback, atoms)
if force_cmap:
name_is_color = [False]
else:
name_is_color = map(utils.is_color, colors.keys())
if len(colors) <= 1:
colors = {'gray': atoms}
elif not all(name_is_color):
assert not any(name_is_color), \
"callback function returned a mix of colors and categories"
categories = colors
cats = categories.keys()
# If there are >256 categories, this is a many-to-one mapping
colornames = colormap(cats, mplmap=mplmap)
colors = {c: [] for c in colornames}
for cat, color in zip(cats, colornames):
colors[color].extend(categories[cat])
self.set_colors(colors)
| tkzeng/molecular-design-toolkit | moldesign/viewer/common.py | Python | apache-2.0 | 3,411 |
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Apache ssl plugin."""
import gettext
import os
from otopi import constants as otopicons
from otopi import filetransaction, plugin, util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup import util as osetuputil
from ovirt_setup_lib import dialog
from ovirt_engine_setup.engine_common import constants as oengcommcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Apache ssl plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._enabled = True
self._params = {
'SSLCertificateFile': (
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_CERT
),
'SSLCertificateKeyFile': (
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_KEY
),
'SSLCACertificateFile': (
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_CA_CERT
),
}
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
oengcommcons.ApacheEnv.HTTPD_CONF_SSL,
oengcommcons.FileLocations.HTTPD_CONF_SSL
)
self.environment.setdefault(
oengcommcons.ApacheEnv.CONFIGURE_SSL,
None
)
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
condition=lambda self: self._enabled,
)
def _setup(self):
if (
self.environment[
oengcommcons.ApacheEnv.CONFIGURE_SSL
] is None and
(
self.environment[
osetupcons.CoreEnv.DEVELOPER_MODE
] or
self.environment[
oengcommcons.ApacheEnv.CONFIGURED
]
)
):
self._enabled = False
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
condition=lambda self: (
self.environment[oengcommcons.ApacheEnv.ENABLE] and
self._enabled
),
before=(
oengcommcons.Stages.DIALOG_TITLES_E_APACHE,
),
after=(
oengcommcons.Stages.DIALOG_TITLES_S_APACHE,
),
)
def _customization(self):
if self.environment[
oengcommcons.ApacheEnv.CONFIGURE_SSL
] is None:
self.dialog.note(
_(
'Setup can configure apache to use SSL using a '
'certificate issued from the internal CA.'
)
)
self.environment[
oengcommcons.ApacheEnv.CONFIGURE_SSL
] = dialog.queryBoolean(
dialog=self.dialog,
name='OVESETUP_APACHE_CONFIG_SSL',
note=_(
'Do you wish Setup to configure that, or prefer to '
'perform that manually? (@VALUES@) [@DEFAULT@]: '
),
prompt=True,
true=_('Automatic'),
false=_('Manual'),
default=True,
)
self._enabled = self.environment[
oengcommcons.ApacheEnv.CONFIGURE_SSL
]
if self._enabled:
if not os.path.exists(
self.environment[
oengcommcons.ApacheEnv.HTTPD_CONF_SSL
]
):
self.logger.warning(
_(
"Automatic Apache SSL configuration was requested. "
"However, SSL configuration file '{file}' was not "
"found. Disabling automatic Apache SSL configuration."
)
)
self._enabled = False
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
condition=lambda self: self._enabled,
priority=plugin.Stages.PRIORITY_HIGH,
)
def _validate_enable(self):
if not self.environment[oengcommcons.ApacheEnv.ENABLE]:
self._enabled = False
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
condition=lambda self: self._enabled,
)
def _validate_ssl(self):
with open(
self.environment[
oengcommcons.ApacheEnv.HTTPD_CONF_SSL
],
'r'
) as f:
self._sslData = f.read()
missingParams = []
osetuputil.editConfigContent(
content=self._sslData.splitlines(),
params=self._params,
separator_re='\s+',
new_line_tpl='{spaces}{param} {value}',
added_params=missingParams,
)
if missingParams:
self.logger.warning(
_(
'Expected parameter(s) {missingParams} were not '
'found in {file}. Automatic '
'configuration of this file will not be '
'performed.'
).format(
missingParams=missingParams,
file=self.environment[
oengcommcons.ApacheEnv.HTTPD_CONF_SSL
]
)
)
self._enabled = False
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
condition=lambda self: self._enabled,
)
def _misc(self):
self.environment[oengcommcons.ApacheEnv.NEED_RESTART] = True
changed_lines = []
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=self.environment[
oengcommcons.ApacheEnv.HTTPD_CONF_SSL
],
content=osetuputil.editConfigContent(
content=self._sslData.splitlines(),
params=self._params,
changed_lines=changed_lines,
separator_re='\s+',
new_line_tpl='{spaces}{param} {value}',
),
)
)
self.environment[
osetupcons.CoreEnv.REGISTER_UNINSTALL_GROUPS
].createGroup(
group='ssl',
description='Apache SSL configuration',
optional=True
).addChanges(
'ssl',
self.environment[oengcommcons.ApacheEnv.HTTPD_CONF_SSL],
changed_lines,
)
self.environment[
osetupcons.CoreEnv.UNINSTALL_UNREMOVABLE_FILES
].append(
self.environment[
oengcommcons.ApacheEnv.HTTPD_CONF_SSL
]
)
# vim: expandtab tabstop=4 shiftwidth=4
| OpenUniversity/ovirt-engine | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine-common/apache/ssl.py | Python | apache-2.0 | 7,366 |
from django.views import generic
class HomeView(generic.TemplateView):
template_name = 'home.html'
| yourlabs/django-permissions-widget | test_project/test_project/views.py | Python | mit | 105 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
SeilaplanPluginDialog
A QGIS plugin
Seilkran-Layoutplaner
-------------------
begin : 2013
copyright : (C) 2015 by ETH Zürich
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
# GUI and QGIS libraries
from qgis.PyQt.QtCore import QFileInfo, QCoreApplication, QSettings, Qt
from qgis.PyQt.QtWidgets import (QDialog, QMessageBox, QFileDialog, QComboBox,
QTextEdit)
from qgis.PyQt.QtGui import QPixmap
from qgis.core import (QgsRasterLayer, QgsPointXY, QgsProject,
QgsCoordinateReferenceSystem)
from processing.core.Processing import Processing
# Further GUI modules for functionality
from .guiHelperFunctions import (DialogWithImage, createContours,
loadOsmLayer, createProfileLayers)
from ..tools.outputGeo import CH_CRS
from ..tools.configHandler import ConfigHandler
from ..tools.configHandler_project import castToNum
# GUI elements
from .checkableComboBoxOwn import QgsCheckableComboBoxOwn
from .saveDialog import DialogSaveParamset
from .mapMarker import MapMarkerTool
from .ui_seilaplanDialog import Ui_SeilaplanDialogUI
from .profileDialog import ProfileDialog
class SeilaplanPluginDialog(QDialog, Ui_SeilaplanDialogUI):
def __init__(self, interface, confHandler):
"""
:type confHandler: ConfigHandler
"""
QDialog.__init__(self, interface.mainWindow())
# QGIS interface
self.iface = interface
# QGIS map canvas
self.canvas = self.iface.mapCanvas()
# Management of Parameters and settings
self.confHandler = confHandler
self.confHandler.setDialog(self)
self.paramHandler = confHandler.params
self.projectHandler = confHandler.project
self.startAlgorithm = False
self.goToAdjustment = False
# Path to plugin root
self.homePath = os.path.dirname(os.path.dirname(__file__))
# Setup GUI of SEILAPLAN (import from ui_seilaplanDialog.py)
self.setupUi(self)
# Add a special QGIS type drop down with checkboxes to select raster layer
self.rasterField = QgsCheckableComboBoxOwn(self.groupBox_2)
self.rasterField.setObjectName("rasterField2")
self.gridLayout_15.addWidget(self.rasterField, 0, 2, 1, 1)
self.virtRaster = None
# Language
self.locale = QSettings().value("locale/userLocale")[0:2]
# Interaction with canvas, is used to draw onto map canvas
self.drawTool = MapMarkerTool(self.canvas)
# Connect emitted signals
self.drawTool.sig_lineFinished.connect(self.onFinishedLineDraw)
# Survey data line layer
self.surveyLineLayer = None
self.surveyPointLayer = None
# Dictionary of all GUI setting fields
self.parameterFields = {}
self.prHeaderFields = {}
# GUI fields and variables handling coordinate information
self.coordFields = {}
self.linePoints = {
'A': QgsPointXY(-100, -100),
'E': QgsPointXY(-100, -100)
}
# Organize parameter GUI fields in dictionary
self.groupFields()
# Dialog with explanatory images
self.imgBox = DialogWithImage()
# Additional GIS-Layers
self.osmLyrButton.setEnabled(False)
self.contourLyrButton.setEnabled(False)
# Connect GUI elements from dialog window with functions
self.connectGuiElements()
# Dialog window with height profile
self.profileWin = ProfileDialog(self, self.iface, self.drawTool,
self.projectHandler)
# Dialog windows for saving parameter and cable sets
self.paramSetWindow = DialogSaveParamset(self)
# Set initial sate of some buttons
# Choosing height data
self.enableRasterHeightSource()
# Button to show profile
self.buttonShowProf.setEnabled(False)
# Button that activates drawing on map
self.draw.setEnabled(False)
# Button stays down when pressed
self.draw.setCheckable(True)
Processing.initialize()
# noinspection PyMethodMayBeStati
def tr(self, message, **kwargs):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
Parameters
----------
**kwargs
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate(type(self).__name__, message)
def connectGuiElements(self):
"""Connect GUI elements with functions.
"""
self.buttonCancel.clicked.connect(self.cancel)
self.buttonRun.clicked.connect(self.apply)
self.btnAdjustment.clicked.connect(self.goToAdjustmentWindow)
self.buttonOpenPr.clicked.connect(self.onLoadProjects)
self.buttonSavePr.clicked.connect(self.onSaveProject)
self.rasterField.selectedItemsChanged.connect(self.onChangeRaster)
self.buttonRefreshRa.clicked.connect(self.updateRasterList)
self.buttonInfo.clicked.connect(self.onInfo)
self.radioRaster.toggled.connect(self.onToggleHeightSource)
self.radioSurveyData.toggled.connect(self.onToggleHeightSource)
self.buttonLoadSurveyData.clicked.connect(self.onLoadSurveyData)
self.fieldTypeA.currentTextChanged.connect(self.onTypeAChange)
self.fieldTypeE.currentTextChanged.connect(self.onTypeEChange)
# Info buttons
self.infoRasterlayer.clicked.connect(self.onHeightDataInfoShow)
self.infoSurveyData.clicked.connect(self.onHeightDataInfoShow)
self.infoPointA.clicked.connect(self.onPointAInfoShow)
self.infoPointE.clicked.connect(self.onPointEInfoShow)
self.infoParamSet.clicked.connect(self.onParamSetInfoShow)
self.infoBodenabstand.clicked.connect(self.onShowInfoImg)
self.infoStuetzen.clicked.connect(self.onShowInfoImg)
self.infoQ.clicked.connect(self.onShowInfoFieldQ)
self.infoSK.clicked.connect(self.onShowInfoFieldSK)
self.infoFieldE.clicked.connect(self.onShowInfoFieldE)
self.infoFieldFuellF.clicked.connect(self.onShowInfoFieldFuellF)
self.infoFieldSFT.clicked.connect(self.onShowInfoFieldSFT)
self.infoBerechnung.clicked.connect(self.onShowInfoBerechnung)
# OSM map and contour buttons
self.osmLyrButton.clicked.connect(self.onClickOsmButton)
self.contourLyrButton.clicked.connect(self.onClickContourButton)
# Filed that contains project names
self.fieldProjName.textChanged.connect(self.setProjName)
# Button starts map drawing
self.draw.clicked.connect(self.drawLine)
# Button shows profile window
self.buttonShowProf.clicked.connect(self.onShowProfile)
# Drop down field for parameter set choices
self.fieldParamSet.currentIndexChanged.connect(self.setParameterSet)
self.buttonSaveParamset.clicked.connect(self.onSaveParameterSet)
self.buttonRemoveParamset.clicked.connect(self.onRemoveParameterSet)
# Action for changed Coordinates (when coordinate is changed by hand)
self.coordAx.editingFinished.connect(
lambda: self.onCoordFieldChange('A'))
self.coordAy.editingFinished.connect(
lambda: self.onCoordFieldChange('A'))
self.coordEx.editingFinished.connect(
lambda: self.onCoordFieldChange('E'))
self.coordEy.editingFinished.connect(
lambda: self.onCoordFieldChange('E'))
for name, inputField in self.parameterFields.items():
# lambda definition is put in its own function "getListener" to
# preserve scope, otherwise var "name" gets overwritten in every
# iteration of this loop
if isinstance(inputField, QComboBox) and name == 'Seilsys':
inputField.currentIndexChanged.connect(
self.getListenerComboBox(name))
else:
inputField.editingFinished.connect(
self.getListenerLineEdit(name))
def groupFields(self):
"""Combine all GUI fields in dictionary for faster access.
"""
self.parameterFields = {
'Seilsys': self.fieldSeilsys,
'HM_Kran': self.fieldHMKran,
'Befahr_A': self.fieldBefA,
'Befahr_E': self.fieldBefE,
'Bodenabst_min': self.fieldBabstMin,
'Bodenabst_A': self.fieldBabstA,
'Bodenabst_E': self.fieldBabstE,
'Q': self.fieldQ,
'qT': self.fieldQt,
'D': self.fieldD,
'MBK': self.fieldMBK,
'qZ': self.fieldqZ,
'qR': self.fieldqR,
'SK': self.fieldSK,
'Anlagetyp': self.fieldAnlagetyp,
'Min_Dist_Mast': self.fieldMinDist,
'L_Delta': self.fieldLdelta,
'HM_min': self.fieldHMmin,
'HM_max': self.fieldHMmax,
'HM_Delta': self.fieldHMdelta,
'HM_nat': self.fieldHMnat,
'E': self.fieldE,
'FuellF': self.fieldFuellF,
'SF_T': self.fieldSFT
}
self.coordFields = {
'Ax': self.coordAx,
'Ay': self.coordAy,
'Ex': self.coordEx,
'Ey': self.coordEy
}
self.prHeaderFields = {
'PrVerf': self.fieldPrVerf,
'PrNr': self.fieldPrNr,
'PrGmd': self.fieldPrGmd,
'PrWald': self.fieldPrWald,
'PrBemerkung': self.fieldPrBemerkung,
}
def onToggleHeightSource(self):
if self.radioRaster.isChecked():
self.enableRasterHeightSource()
else:
self.enableSurveyDataHeightSource()
# Reset profile data
self.projectHandler.resetProfile()
self.drawTool.surveyDataMode = False
self.removeSurveyDataLayer()
self.checkPoints()
def enableRasterHeightSource(self):
if not self.radioRaster.isChecked():
self.radioRaster.blockSignals(True)
self.radioSurveyData.blockSignals(True)
self.radioRaster.setChecked(True)
self.radioRaster.blockSignals(False)
self.radioSurveyData.blockSignals(False)
self.fieldSurveyDataPath.setText('')
self.rasterField.blockSignals(True)
self.rasterField.setEnabled(True)
self.rasterField.blockSignals(False)
self.buttonRefreshRa.setEnabled(True)
self.fieldSurveyDataPath.setEnabled(False)
self.buttonLoadSurveyData.setEnabled(False)
def enableSurveyDataHeightSource(self):
if not self.radioSurveyData.isChecked():
self.radioRaster.blockSignals(True)
self.radioSurveyData.blockSignals(True)
self.radioSurveyData.setChecked(True)
self.radioRaster.blockSignals(False)
self.radioSurveyData.blockSignals(False)
self.rasterField.blockSignals(True)
self.rasterField.deselectAllOptions()
self.rasterField.setEnabled(False)
self.rasterField.blockSignals(False)
self.buttonRefreshRa.setEnabled(False)
self.fieldSurveyDataPath.setEnabled(True)
self.buttonLoadSurveyData.setEnabled(True)
def getListenerLineEdit(self, property_name):
return lambda: self.parameterChangedLineEdit(property_name)
def getListenerComboBox(self, property_name):
return lambda: self.parameterChangedComboBox(property_name)
def parameterChangedLineEdit(self, property_name):
# Deactivate editFinished signal so it is not fired twice when
# setParameter() shows a QMessageBox
self.parameterFields[property_name].blockSignals(True)
newVal = self.parameterFields[property_name].text()
newValAsStr = self.paramHandler.setParameter(property_name, newVal)
if newValAsStr is not False:
self.updateParametersetField()
# Insert correctly formatted value
self.parameterFields[property_name].setText(newValAsStr)
self.parameterFields[property_name].blockSignals(False)
def parameterChangedComboBox(self, property_name):
newVal = self.parameterFields[property_name].currentIndex()
newValAsIdx = self.paramHandler.setParameter(property_name, newVal)
if newValAsIdx is not False:
self.updateParametersetField()
def updateParametersetField(self):
# Change current parameter set name
if self.paramHandler.currentSetName:
self.fieldParamSet.setCurrentText(self.paramHandler.currentSetName)
else:
self.fieldParamSet.setCurrentIndex(-1)
def setupContentForFirstRun(self):
# Generate project name
self.fieldProjName.setText(self.projectHandler.generateProjectName())
# Check QGIS table of content for raster layer
self.updateRasterList()
# Load all predefined and user-defined parameter sets from the
# config folder
self.paramHandler.setParameterSet(self.paramHandler.defaultSet)
self.fillParametersetList()
self.fillInValues()
# Set point types
self.fieldTypeA.setCurrentIndex(
self.projectHandler.getPointTypeAsIdx('A'))
self.fieldTypeE.setCurrentIndex(
self.projectHandler.getPointTypeAsIdx('E'))
def setupContent(self):
self.startAlgorithm = False
self.goToAdjustment = False
# Generate project name
self.fieldProjName.setText(self.projectHandler.getProjectName())
if self.projectHandler.heightSourceType in ['dhm', 'dhm_list']:
# Enable gui elements
self.enableRasterHeightSource()
# Search raster and if necessary load from disk
rasternames = self.searchForRaster(
self.projectHandler.getHeightSourceAsStr(source=True))
self.setRaster(rasternames)
elif self.projectHandler.heightSourceType == 'survey':
# Enable gui elements
self.enableSurveyDataHeightSource()
# Show data on map and in gui
self.loadSurveyData()
else:
# Raster could not be loaded correctly
self.rasterField.blockSignals(True)
self.rasterField.deselectAllOptions()
self.rasterField.blockSignals(False)
self.fieldSurveyDataPath.setText('')
# Update start and end point
self.checkPoints()
# Load all predefined and user-defined parameter sets from the
# config folder (maybe a new set was added when project was opened)
self.fillParametersetList()
# Fill in parameter values
self.fillInValues()
# Deactivate / Activate status of field HMKran depending on
# point type of start point
self.updateHMKran(self.projectHandler.A_type)
# Fill in project header data (if present)
self.fillInPrHeaderData()
# Set point types
self.fieldTypeA.setCurrentIndex(
self.projectHandler.getPointTypeAsIdx('A'))
self.fieldTypeE.setCurrentIndex(
self.projectHandler.getPointTypeAsIdx('E'))
def fillParametersetList(self):
self.fieldParamSet.blockSignals(True)
self.fieldParamSet.clear()
self.fieldParamSet.addItems(self.paramHandler.getParametersetNames())
if self.paramHandler.currentSetName:
self.fieldParamSet.setCurrentText(self.paramHandler.currentSetName)
else:
self.fieldParamSet.setCurrentIndex(-1)
self.fieldParamSet.blockSignals(False)
def setParameterSet(self):
name = self.fieldParamSet.currentText()
if name:
self.paramHandler.setParameterSet(name)
# Fill in values of parameter set
self.fillInValues()
# Deactivate / Activate status of field HMKran depending on
# point type of start point
self.updateHMKran(self.projectHandler.A_type)
def fillInValues(self):
"""Fills parameter values into GUI fields."""
for field_name, field in self.parameterFields.items():
val = self.paramHandler.getParameterAsStr(field_name)
if val is not None:
if isinstance(field, QComboBox):
val = self.paramHandler.getParameter(field_name)
field.setCurrentIndex(val)
continue
field.setText(val)
def onSaveParameterSet(self):
if not self.paramHandler.checkValidState():
return
self.paramSetWindow.setData(self.paramHandler.getParametersetNames(),
self.paramHandler.SETS_PATH)
self.paramSetWindow.exec()
setname = self.paramSetWindow.getNewSetname()
if setname:
self.paramHandler.saveParameterSet(setname)
self.fieldParamSet.addItem(setname)
self.fieldParamSet.setCurrentText(setname)
def onRemoveParameterSet(self):
currParamset = self.fieldParamSet.currentText()
# No action if there is no parameter set specified
if currParamset == '':
return
# Standard set cannot be deleted
if currParamset == self.paramHandler.defaultSet:
QMessageBox.critical(self, self.tr('Parameterset loeschen'),
self.tr('Standardparameterset kann nicht geloescht werden.'), QMessageBox.Ok)
return
# Ask before removing
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
msgBox.setWindowTitle(self.tr('Parameterset loeschen'))
msgBox.setText(self.tr('Moechten Sie das Parameterset wirklich loeschen?'))
msgBox.setStandardButtons(QMessageBox.No | QMessageBox.Yes)
noBtn = msgBox.button(QMessageBox.No)
noBtn.setText(self.tr("Nein"))
yesBtn = msgBox.button(QMessageBox.Yes)
yesBtn.setText(self.tr("Ja"))
msgBox.show()
msgBox.exec()
if msgBox.clickedButton() == yesBtn:
success = self.paramHandler.removeParameterSet(currParamset)
if not success:
QMessageBox.critical(self, self.tr('Parameterset loeschen'),
self.tr('Ein Fehler ist aufgetreten. Parameterset kann nicht geloescht werden.'), QMessageBox.Ok)
else:
# Set default set
self.paramHandler.setParameterSet(self.paramHandler.defaultSet)
# Update drop down list to remove set
self.fillParametersetList()
# Fill in values of default set
self.fillInValues()
# Deactivate / Activate status of field HMKran depending on
# point type of start point
self.updateHMKran(self.projectHandler.A_type)
def updateRasterList(self):
rasterlist = self.getAvailableRaster()
self.addRastersToDropdown([lyr['name'] for lyr in rasterlist])
return rasterlist
def getAvailableRaster(self):
"""Go trough table of content and collect all raster layers.
"""
rColl = []
for item in QgsProject.instance().layerTreeRoot().findLayers():
lyr = item.layer()
if lyr.type() == 1 and lyr.name() != self.tr('OSM_Karte'):
lyrName = lyr.name()
r = {
'lyr': lyr,
'name': lyrName
}
rColl.append(r)
return rColl
def addRastersToDropdown(self, rasterList):
"""Put list of raster layers into drop down menu of self.rasterField.
If raster name contains some kind of "DHM", select it."""
self.rasterField.blockSignals(True)
selectedRasters = self.rasterField.checkedItems()
self.rasterField.clear()
self.rasterField.addItems(rasterList)
selectedRastersNew = [r for r in selectedRasters if r in rasterList]
self.rasterField.setCheckedItems(selectedRastersNew)
self.rasterField.blockSignals(False)
def onChangeRaster(self):
"""Triggered by choosing a raster from the drop down menu."""
self.setRaster()
# Update start and end point
self.checkPoints()
def setRaster(self, selectedRasters: list = None):
"""Sets selected raster in project handler"""
if not selectedRasters:
selectedRasters = self.rasterField.checkedItems()
rasterlist = self.getAvailableRaster()
rasterLyrList = []
singleRasterLayer = None
for rlyr in rasterlist:
if rlyr['name'] in selectedRasters:
rasterLyrList.append(rlyr['lyr'])
if len(rasterLyrList) == 1:
singleRasterLayer = rasterLyrList[0]
self.projectHandler.setHeightSource(rasterLyrList[0], 'dhm')
rasterValid = True
elif len(rasterLyrList) > 1:
self.projectHandler.setHeightSource(rasterLyrList, 'dhm_list')
rasterValid = True
else:
self.projectHandler.setHeightSource(None)
rasterValid = False
# Check spatial reference of raster and show message
if not rasterValid or not self.checkEqualSpatialRef():
# Unset raster
self.projectHandler.setHeightSource(None)
# Remove raster selection
self.rasterField.blockSignals(True)
self.rasterField.deselectAllOptions()
self.rasterField.blockSignals(False)
rasterValid = False
# Select layer in panel if it's only one
if rasterValid and singleRasterLayer:
self.iface.setActiveLayer(singleRasterLayer)
# If a raster was selected, OSM and Contour Layers can be generated,
# else buttons are disabled
self.osmLyrButton.setEnabled(rasterValid)
self.contourLyrButton.setEnabled(rasterValid)
self.draw.setEnabled(rasterValid)
def searchForRaster(self, rasterpaths):
""" Checks if a raster from a saved project is present in the table
of content or exists at the given location (path).
"""
if isinstance(rasterpaths, str):
rasterpaths = [rasterpaths]
availRaster = self.getAvailableRaster()
rasterNameList = []
self.rasterField.blockSignals(True)
for path in rasterpaths:
rasterinQGIS = False
for i, rlyr in enumerate(availRaster):
lyrPath = rlyr['lyr'].dataProvider().dataSourceUri()
# Raster has been loaded in QGIS project already
if lyrPath == path:
# Sets the dhm name in the drop down
self.rasterField.setItemCheckState(i, Qt.Checked)
rasterNameList.append(rlyr['name'])
rasterinQGIS = True
break
if not rasterinQGIS:
# Raster is still at same location in file system
if os.path.exists(path):
# Load raster
newRaster = QFileInfo(path).baseName()
rasterLyr = QgsRasterLayer(path, newRaster)
QgsProject.instance().addMapLayer(rasterLyr)
# Update drop down menu
dropdownItems = self.updateRasterList()
for idx, item in enumerate(dropdownItems):
if item['name'] == newRaster:
self.rasterField.setItemCheckState(idx, Qt.Checked)
break
rasterNameList.append(newRaster)
if not rasterNameList:
self.rasterField.deselectAllOptions()
txt = self.tr("Raster '{}' nicht vorhanden".format(', '.join(rasterpaths)))
title = self.tr("Fehler beim Laden des Rasters")
QMessageBox.information(self, title, txt)
self.rasterField.blockSignals(False)
return rasterNameList
def checkEqualSpatialRef(self):
# Check spatial reference of newly added raster
heightSource = self.projectHandler.heightSource
if not heightSource:
return False
hsType = self.projectHandler.heightSourceType
mapCrs = self.canvas.mapSettings().destinationCrs()
lyrCrs = heightSource.spatialRef
title = self.tr('Fehler Koordinatenbezugssystem (KBS)')
msg = ''
success = True
# Height source has a different crs than map --> map crs is changed
if lyrCrs.isValid() and not lyrCrs.isGeographic() and lyrCrs != mapCrs:
self.canvas.setDestinationCrs(lyrCrs)
self.canvas.refresh()
return True
# Height source is in a geographic crs
elif lyrCrs.isValid() and lyrCrs.isGeographic():
# Raster is in geographic coordinates --> automatic transformation
# not possible
if hsType in ['dhm', 'dhm_list']:
msg = self.tr('KBS-Fehler - Raster kann nicht verarbeitet werden')\
.format(lyrCrs.description(), lyrCrs.authid())
success = False
# Survey data can be transformed to map crs
elif hsType == 'survey' and not mapCrs.isGeographic():
# Survey data is transformed to map reference system
heightSource.reprojectToCrs(mapCrs)
success = True
elif hsType == 'survey' and mapCrs.isGeographic():
# Transform to LV95 by default
heightSource.reprojectToCrs(None)
msg = self.tr('KBS-Fehler - Felddaten und QGIS in geografischem KBS')
self.canvas.setDestinationCrs(heightSource.spatialRef)
self.canvas.refresh()
success = True
elif not lyrCrs.isValid():
if mapCrs.isGeographic():
msg = self.tr('KBS-Fehler - Bezugssystem des Rasters unbekannt')
heightSource.spatialRef = QgsCoordinateReferenceSystem(CH_CRS)
self.canvas.setDestinationCrs(heightSource.spatialRef)
self.canvas.refresh()
success = True
else:
# Reference system of survey data not valid or unknown. We use
# refsys of map
heightSource.spatialRef = mapCrs
success = True
if msg:
QMessageBox.information(self, title, msg)
return success
def onLoadSurveyData(self):
title = self.tr('Feldaufnahmen laden')
fFilter = self.tr('csv Dateien (*.csv *.CSV)')
filename, _ = QFileDialog.getOpenFileName(self, title,
self.confHandler.getCurrentPath(), fFilter)
if filename:
self.projectHandler.resetProfile()
# Load data from csv file
self.projectHandler.setHeightSource(None, 'survey', filename)
self.loadSurveyData()
self.checkPoints()
else:
return False
def loadSurveyData(self):
# Remove earlier survey data layer
self.removeSurveyDataLayer()
# Check the spatial reference and inform user if necessary
if not self.checkEqualSpatialRef():
self.projectHandler.setHeightSource(None)
self.projectHandler.resetProfile()
heightSource = self.projectHandler.heightSource
if heightSource and heightSource.valid:
# Create and add QGS layers of data to the map
self.surveyLineLayer, \
self.surveyPointLayer = createProfileLayers(heightSource)
# Zoom to layer
self.iface.setActiveLayer(self.surveyPointLayer)
self.iface.zoomToActiveLayer()
# Set path to csv in read only lineEdit
self.fieldSurveyDataPath.setText(self.projectHandler.getHeightSourceAsStr())
# Activate draw tool
self.drawTool.surveyDataMode = True
self.draw.setEnabled(True)
# Activate OSM button
self.osmLyrButton.setEnabled(True)
else:
self.fieldSurveyDataPath.setText('')
self.drawTool.surveyDataMode = False
self.draw.setEnabled(False)
self.osmLyrButton.setEnabled(False)
def removeSurveyDataLayer(self):
try:
if self.surveyLineLayer:
QgsProject.instance().removeMapLayer(self.surveyLineLayer.id())
self.surveyLineLayer = None
if self.surveyPointLayer:
QgsProject.instance().removeMapLayer(self.surveyPointLayer.id())
self.surveyPointLayer = None
except RuntimeError:
return
def setProjName(self, projname):
self.projectHandler.setProjectName(projname)
# TODO Unset Focus of field when clicking on something else, doesnt work yet
# def mousePressEvent(self, event):
# focused_widget = QtGui.QApplication.focusWidget()
# if isinstance(focused_widget, QtGui.QLineEdit):
# focused_widget.clearFocus()
# QtGui.QDialog.mousePressEvent(self, event)
def drawLine(self):
if self.projectHandler.heightSourceType in ['dhm', 'dhm_list']:
self.drawTool.drawLine()
elif self.projectHandler.heightSourceType == 'survey':
self.drawTool.drawLine(self.projectToProfileLine)
def projectToProfileLine(self, mapPosition):
point = self.projectHandler.heightSource.projectPositionOnToLine(mapPosition)
return QgsPointXY(point[0], point[1])
def onCoordFieldChange(self, pointType):
x = castToNum(self.coordFields[pointType + 'x'].text())
y = castToNum(self.coordFields[pointType + 'y'].text())
[x, y], coordState, hasChanged = self.projectHandler.setPoint(
pointType, [x, y])
if hasChanged:
self.changePoint(pointType, [x, y], coordState)
self.updateLineByCoordFields()
def changePoint(self, pointType, coords, coordState):
x = coords[0]
y = coords[1]
# Update profile line geometry
if x and y:
self.linePoints[pointType] = QgsPointXY(x, y)
else:
self.linePoints[pointType] = QgsPointXY(-100, -100)
# Update coordinate state icon
self.changePointSym(coordState[pointType], pointType)
# Update coordinate field (formatted string)
[xStr, yStr] = self.projectHandler.getPointAsStr(pointType)
self.coordFields[pointType + 'x'].blockSignals(True)
self.coordFields[pointType + 'y'].blockSignals(True)
self.coordFields[pointType + 'x'].setText(xStr)
self.coordFields[pointType + 'y'].setText(yStr)
self.coordFields[pointType + 'x'].blockSignals(False)
self.coordFields[pointType + 'y'].blockSignals(False)
# Update profile button and profile length
self.buttonShowProf.setEnabled(self.projectHandler.profileIsValid())
self.laenge.setText(self.projectHandler.getProfileLenAsStr())
def checkPoints(self):
[Ax, Ay], coordState = self.projectHandler.getPoint('A')
[Ex, Ey], coordState = self.projectHandler.getPoint('E')
self.changePoint('A', [Ax, Ay], coordState)
self.changePoint('E', [Ex, Ey], coordState)
# Draw line
self.updateLineByCoordFields()
def updateLineByCoordFields(self):
self.drawTool.reset()
if self.projectHandler.profileIsValid():
self.drawTool.updateLine(list(self.linePoints.values()))
def updateLineByMapDraw(self, newpoint, pointType):
[x, y], coordState, hasChanged = self.projectHandler.setPoint(
pointType, [newpoint.x(), newpoint.y()])
self.changePoint(pointType, [x, y], coordState)
def changePointSym(self, state, point):
iPath = '<html><head/><body><p><img src=":/plugins/SeilaplanPlugin/' \
'gui/icons/icon_{}.png"/></p></body></html>'
greenTxt = ''
yellowTxt = self.tr('zu definieren')
redTxt = self.tr('ausserhalb Raster')
if point == 'A':
if state == 'green':
self.symA.setText(iPath.format('green'))
self.symA.setToolTip(greenTxt)
if state == 'yellow':
self.symA.setText(iPath.format('yellow'))
self.symA.setToolTip(yellowTxt)
if state == 'red':
self.symA.setText(iPath.format('red'))
self.symA.setToolTip(redTxt)
if point == 'E':
if state == 'green':
self.symE.setText(iPath.format('green'))
self.symE.setToolTip(greenTxt)
if state == 'yellow':
self.symE.setText(iPath.format('yellow'))
self.symE.setToolTip(yellowTxt)
if state == 'red':
self.symE.setText(iPath.format('red'))
self.symE.setToolTip(redTxt)
def onClickOsmButton(self):
"""Add a OpenStreetMap layer."""
loadOsmLayer(self.homePath)
self.canvas.refresh()
def onClickContourButton(self):
"""Calcluate contour lines from currently selected dhm and add them to
as a layer."""
if self.projectHandler.heightSource.contourLayer is None:
createContours(self.canvas, self.projectHandler.heightSource)
def onFinishedLineDraw(self, linecoord):
self.projectHandler.resetProfile()
self.updateLineByMapDraw(linecoord[0], 'A')
self.updateLineByMapDraw(linecoord[1], 'E')
# Stop pressing down button
self.draw.setChecked(False)
def onShowProfile(self):
profile = self.projectHandler.preparePreviewProfile()
if profile:
self.profileWin.setProfile(profile)
self.profileWin.setPoleData(
self.projectHandler.fixedPoles['poles'],
self.projectHandler.noPoleSection)
self.profileWin.exec()
def onLoadProjects(self):
title = self.tr('Projekt laden')
fFilter = self.tr('Json- oder Text-Datei') + ' (*.json *.txt)'
filename, _ = QFileDialog.getOpenFileName(self, title,
self.confHandler.getCurrentPath(),
fFilter)
if filename:
self.confHandler.reset()
success = self.confHandler.loadSettings(filename)
if success:
self.setupContent()
else:
QMessageBox.critical(self, self.tr('Fehler beim Laden'),
self.tr('Projektdatei konnte nicht geladen werden.'))
else:
return False
def onSaveProject(self):
title = self.tr('Projekt speichern')
fFilter = self.tr('Json (*.json)')
self.readoutPrHeaderData()
if not self.confHandler.checkValidState():
return
filename, _ = QFileDialog.getSaveFileName(self, title,
os.path.join(self.confHandler.getCurrentPath(),
self.projectHandler.getProjectName() + '.json'), fFilter)
if filename:
if filename[-5:] != '.json':
filename += '.json'
self.confHandler.saveSettings(filename)
else:
return False
def onTypeAChange(self):
idx = self.fieldTypeA.currentIndex()
if idx == -1:
return
self.projectHandler.setPointType('A', idx)
self.updateHMKran(self.projectHandler.A_type)
def onTypeEChange(self):
idx = self.fieldTypeE.currentIndex()
self.projectHandler.setPointType('E', idx)
def updateHMKran(self, poleType):
# Update GUI: fieldHMKran
if poleType in ['pole', 'pole_anchor']:
self.fieldHMKran.setEnabled(False)
self.fieldHMKran.setText('')
elif poleType == 'crane':
paramVal = self.paramHandler.getParameterAsStr('HM_Kran')
self.fieldHMKran.setText(paramVal)
self.fieldHMKran.setEnabled(True)
def onInfo(self):
msg = self.tr('Infotext').format(
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'help', 'docs'))
QMessageBox.information(self, "SEILAPLAN Info", msg, QMessageBox.Ok)
def onHeightDataInfoShow(self):
msg = ''
if self.sender().objectName() == 'infoRasterlayer':
msg = self.tr('Hoeheninformation - Erklaerung Raster')
elif self.sender().objectName() == 'infoSurveyData':
msg = self.tr('Hoeheninformation - Erklaerung Felddaten')
QMessageBox.information(self, self.tr("Hoeheninformationen laden"), msg,
QMessageBox.Ok)
def onPointAInfoShow(self):
imgPath = os.path.join(self.homePath, 'img', self.locale + '_Anfangspunkt.png')
if not os.path.exists(imgPath):
imgPath = os.path.join(self.homePath, 'img', 'de_Anfangspunkt.png')
self.imgBox.setWindowTitle(self.tr('Anfangspunkt'))
# Load image
myPixmap = QPixmap(imgPath)
self.imgBox.label.setPixmap(myPixmap)
self.imgBox.setLayout(self.imgBox.container)
self.imgBox.show()
def onPointEInfoShow(self):
imgPath = os.path.join(self.homePath, 'img', self.locale + '_Endpunkt.png')
if not os.path.exists(imgPath):
imgPath = os.path.join(self.homePath, 'img', 'de_Endpunkt.png')
self.imgBox.setWindowTitle(self.tr('Endpunkt'))
# Load image
myPixmap = QPixmap(imgPath)
self.imgBox.label.setPixmap(myPixmap)
self.imgBox.setLayout(self.imgBox.container)
self.imgBox.show()
def onShowInfoImg(self):
sender = self.sender().objectName()
infoType = sender[4:]
# Titles of info images
infImg = {'Bodenabstand': self.tr('Erklaerungen zum Bodenabstand'),
'Stuetzen': self.tr('Erklaerungen zu den Zwischenstuetzen')}
infoTitle = infImg[infoType]
imgPath = os.path.join(self.homePath, 'img', f"{self.locale}_{infoType}.png")
if not os.path.exists(imgPath):
imgPath = os.path.join(self.homePath, 'img', f"de_{infoType}.png")
self.imgBox.setWindowTitle(infoTitle)
# Load image
myPixmap = QPixmap(imgPath)
self.imgBox.label.setPixmap(myPixmap)
self.imgBox.setLayout(self.imgBox.container)
self.imgBox.show()
def onParamSetInfoShow(self):
msg = self.tr('Erklaerung Paramersets wiederherstellen')
QMessageBox.information(self, self.tr('Parametersets wiederherstellen'),
msg, QMessageBox.Ok)
def onShowInfoFieldQ(self):
msg = self.tr('Erklaerung Gesamtlast')
QMessageBox.information(self, self.tr("Gesamtlast"),
msg, QMessageBox.Ok)
def onShowInfoFieldSK(self):
msg = self.tr('Erklaerung Grundspannung')
QMessageBox.information(self, self.tr("Grundspannung"),
msg, QMessageBox.Ok)
def onShowInfoFieldE(self):
msg = self.tr('Elastizitaetsmodul Tragseil Erklaerung')
QMessageBox.information(self, self.tr("Elastizitaetsmodul Tragseil"),
msg, QMessageBox.Ok)
def onShowInfoFieldFuellF(self):
msg = self.tr('Fuellfaktor Erklaerung')
QMessageBox.information(self,
self.tr("Fuellfaktor"),
msg, QMessageBox.Ok)
def onShowInfoFieldSFT(self):
msg = self.tr('Europaweit wird ein Sicherheitsfaktor von 3.0 fuer das '
'Tragseil verwendet.')
QMessageBox.information(self, self.tr("Sicherheitsfaktor Tragseil"), msg,
QMessageBox.Ok)
def onShowInfoBerechnung(self):
msg = self.tr('Erklaerungen Berechnungsbuttons')
QMessageBox.information(self, self.tr("Naechste Schritte"), msg,
QMessageBox.Ok)
def fillInPrHeaderData(self):
for key, val in self.projectHandler.prHeader.items():
field = self.prHeaderFields[key]
if isinstance(field, QTextEdit):
field.setPlainText(val)
else:
field.setText(val)
def readoutPrHeaderData(self):
prHeader = {}
for key, field in self.prHeaderFields.items():
if isinstance(field, QTextEdit):
prHeader[key] = field.toPlainText()
else:
prHeader[key] = field.text()
self.projectHandler.setPrHeader(prHeader)
def goToAdjustmentWindow(self):
if self.confHandler.checkValidState() \
and self.checkEqualSpatialRef \
and self.confHandler.prepareForCalculation():
self.readoutPrHeaderData()
self.startAlgorithm = False
self.goToAdjustment = True
self.close()
else:
return False
def apply(self):
if self.confHandler.checkValidState() \
and self.checkEqualSpatialRef \
and self.paramHandler.checkBodenabstand() \
and self.confHandler.prepareForCalculation():
self.readoutPrHeaderData()
self.startAlgorithm = True
self.goToAdjustment = False
self.close()
else:
# If project info or parameter are missing or wrong, algorithm
# can not start
return False
def cancel(self):
""" Called when 'Cancel' is pressed."""
self.close()
def cleanUp(self):
# Save user settings
self.confHandler.updateUserSettings()
# Clean markers and lines from map canvas
self.drawTool.reset()
# Remove survey line
self.removeSurveyDataLayer()
def closeEvent(self, QCloseEvent):
"""Last method that is called before main window is closed."""
# Close additional dialogs
self.imgBox.close()
if self.profileWin.isVisible():
self.profileWin.close()
if self.startAlgorithm or self.goToAdjustment:
self.drawTool.reset()
else:
self.cleanUp()
| piMoll/SEILAPLAN | gui/seilaplanPluginDialog.py | Python | gpl-2.0 | 44,262 |
from setuptools import setup, find_packages
setup(name='MODEL7434234848',
version=20140916,
description='MODEL7434234848 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/MODEL7434234848',
maintainer='Stanley Gu',
maintainer_url='[email protected]',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) | biomodels/MODEL7434234848 | setup.py | Python | cc0-1.0 | 377 |
#!/usr/bin/env python3
import kfp
import json
import copy
from kfp import components
from kfp import dsl
from kfp.aws import use_aws_secret
sagemaker_hpo_op = components.load_component_from_file('../../../../components/aws/sagemaker/hyperparameter_tuning/component.yaml')
sagemaker_train_op = components.load_component_from_file('../../../../components/aws/sagemaker/train/component.yaml')
sagemaker_model_op = components.load_component_from_file('../../../../components/aws/sagemaker/model/component.yaml')
sagemaker_deploy_op = components.load_component_from_file('../../../../components/aws/sagemaker/deploy/component.yaml')
sagemaker_batch_transform_op = components.load_component_from_file('../../../../components/aws/sagemaker/batch_transform/component.yaml')
hpoChannels = []
trainChannels = []
channelObj = {
'ChannelName': '',
'DataSource': {
'S3DataSource': {
'S3Uri': '',
'S3DataType': 'S3Prefix',
'S3DataDistributionType': 'FullyReplicated'
}
},
'CompressionType': 'None',
'RecordWrapperType': 'None',
'InputMode': 'File'
}
channelObj['ChannelName'] = 'train'
channelObj['DataSource']['S3DataSource']['S3Uri'] = 's3://kubeflow-pipeline-data/mnist_kmeans_example/train_data'
hpoChannels.append(copy.deepcopy(channelObj))
trainChannels.append(copy.deepcopy(channelObj))
channelObj['ChannelName'] = 'test'
channelObj['DataSource']['S3DataSource']['S3Uri'] = 's3://kubeflow-pipeline-data/mnist_kmeans_example/test_data'
hpoChannels.append(copy.deepcopy(channelObj))
@dsl.pipeline(
name='MNIST Classification pipeline',
description='MNIST Classification using KMEANS in SageMaker'
)
def mnist_classification(region='us-west-2',
image='174872318107.dkr.ecr.us-west-2.amazonaws.com/kmeans:1',
training_input_mode='File',
hpo_strategy='Bayesian',
hpo_metric_name='test:msd',
hpo_metric_type='Minimize',
hpo_early_stopping_type='Off',
hpo_static_parameters={"k": "10", "feature_dim": "784"},
hpo_integer_parameters=[{"Name": "mini_batch_size", "MinValue": "500", "MaxValue": "600"}, {"Name": "extra_center_factor", "MinValue": "10", "MaxValue": "20"}],
hpo_continuous_parameters=[],
hpo_categorical_parameters=[{"Name": "init_method", "Values": ["random", "kmeans++"]}],
hpo_channels=hpoChannels,
hpo_spot_instance=False,
hpo_max_wait_time=3600,
hpo_checkpoint_config={},
output_location='s3://kubeflow-pipeline-data/mnist_kmeans_example/output',
output_encryption_key='',
instance_type='ml.p2.16xlarge',
instance_count=1,
volume_size=50,
hpo_max_num_jobs=9,
hpo_max_parallel_jobs=3,
max_run_time=3600,
endpoint_url='',
network_isolation=True,
traffic_encryption=False,
train_channels=trainChannels,
train_spot_instance=False,
train_max_wait_time=3600,
train_checkpoint_config={},
batch_transform_instance_type='ml.m4.xlarge',
batch_transform_input='s3://kubeflow-pipeline-data/mnist_kmeans_example/input',
batch_transform_data_type='S3Prefix',
batch_transform_content_type='text/csv',
batch_transform_compression_type='None',
batch_transform_ouput='s3://kubeflow-pipeline-data/mnist_kmeans_example/output',
batch_transform_max_concurrent=4,
batch_transform_max_payload=6,
batch_strategy='MultiRecord',
batch_transform_split_type='Line',
role_arn=''
):
hpo = sagemaker_hpo_op(
region=region,
endpoint_url=endpoint_url,
image=image,
training_input_mode=training_input_mode,
strategy=hpo_strategy,
metric_name=hpo_metric_name,
metric_type=hpo_metric_type,
early_stopping_type=hpo_early_stopping_type,
static_parameters=hpo_static_parameters,
integer_parameters=hpo_integer_parameters,
continuous_parameters=hpo_continuous_parameters,
categorical_parameters=hpo_categorical_parameters,
channels=hpo_channels,
output_location=output_location,
output_encryption_key=output_encryption_key,
instance_type=instance_type,
instance_count=instance_count,
volume_size=volume_size,
max_num_jobs=hpo_max_num_jobs,
max_parallel_jobs=hpo_max_parallel_jobs,
max_run_time=max_run_time,
network_isolation=network_isolation,
traffic_encryption=traffic_encryption,
spot_instance=hpo_spot_instance,
max_wait_time=hpo_max_wait_time,
checkpoint_config=hpo_checkpoint_config,
role=role_arn,
).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))
training = sagemaker_train_op(
region=region,
endpoint_url=endpoint_url,
image=image,
training_input_mode=training_input_mode,
hyperparameters=hpo.outputs['best_hyperparameters'],
channels=train_channels,
instance_type=instance_type,
instance_count=instance_count,
volume_size=volume_size,
max_run_time=max_run_time,
model_artifact_path=output_location,
output_encryption_key=output_encryption_key,
network_isolation=network_isolation,
traffic_encryption=traffic_encryption,
spot_instance=train_spot_instance,
max_wait_time=train_max_wait_time,
checkpoint_config=train_checkpoint_config,
role=role_arn,
).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))
create_model = sagemaker_model_op(
region=region,
endpoint_url=endpoint_url,
model_name=training.outputs['job_name'],
image=training.outputs['training_image'],
model_artifact_url=training.outputs['model_artifact_url'],
network_isolation=network_isolation,
role=role_arn
).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))
prediction = sagemaker_deploy_op(
region=region,
endpoint_url=endpoint_url,
model_name_1=create_model.output,
).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))
batch_transform = sagemaker_batch_transform_op(
region=region,
endpoint_url=endpoint_url,
model_name=create_model.output,
instance_type=batch_transform_instance_type,
instance_count=instance_count,
max_concurrent=batch_transform_max_concurrent,
max_payload=batch_transform_max_payload,
batch_strategy=batch_strategy,
input_location=batch_transform_input,
data_type=batch_transform_data_type,
content_type=batch_transform_content_type,
split_type=batch_transform_split_type,
compression_type=batch_transform_compression_type,
output_location=batch_transform_ouput
).apply(use_aws_secret('aws-secret', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'))
if __name__ == '__main__':
kfp.compiler.Compiler().compile(mnist_classification, __file__ + '.zip')
| kubeflow/kfp-tekton-backend | samples/contrib/aws-samples/mnist-kmeans-sagemaker/mnist-classification-pipeline.py | Python | apache-2.0 | 6,993 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import range
from future.utils import viewitems
import six
import itertools
import math
import re
import collections
import copy
import numbers
import textwrap
from contextlib import contextmanager
import numpy as np
from scipy.spatial.distance import hamming
import pandas as pd
from skbio._base import SkbioObject
from skbio.sequence._base import ElasticLines
from skbio.util._misc import chunk_str
from skbio.util._decorator import stable, experimental
class Sequence(collections.Sequence, SkbioObject):
"""Store biological sequence data and optional associated metadata.
``Sequence`` objects do not enforce an alphabet and are thus the most
generic objects for storing biological sequence data. Subclasses ``DNA``,
``RNA``, and ``Protein`` enforce the IUPAC character set [1]_ for, and
provide operations specific to, each respective molecule type.
``Sequence`` objects consist of the underlying sequence data, as well
as optional metadata and positional metadata. The underlying sequence
is immutable, while the metdata and positional metadata are mutable.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the biological sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence. A shallow copy
of the ``dict`` will be made (see Examples section below for details).
positional_metadata : pd.DataFrame consumable, optional
Arbitrary per-character metadata (e.g., sequence read quality
scores). Must be able to be passed directly to ``pd.DataFrame``
constructor. Each column of metadata must be the same length as the
biological sequence. A shallow copy of the positional metadata will be
made if necessary (see Examples section below for details).
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters. If ``False``, no characters will be converted.
If a str, it will be treated as a key into the positional metadata of
the object. All lowercase characters will be converted to uppercase,
and a ``True`` value will be stored in a boolean array in the
positional metadata under the key.
Attributes
----------
values
metadata
positional_metadata
See Also
--------
DNA
RNA
Protein
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from pprint import pprint
>>> from skbio import Sequence
**Creating sequences:**
Create a sequence without any metadata:
>>> seq = Sequence('GGUCGUGAAGGA')
>>> seq
Sequence
---------------
Stats:
length: 12
---------------
0 GGUCGUGAAG GA
Create a sequence with metadata and positional metadata:
>>> metadata = {'id':'seq-id', 'desc':'seq desc', 'authors': ['Alice']}
>>> positional_metadata = {'quality': [3, 3, 4, 10],
... 'exons': [True, True, False, True]}
>>> seq = Sequence('ACGT', metadata=metadata,
... positional_metadata=positional_metadata)
>>> seq
Sequence
-----------------------------
Metadata:
'authors': <class 'list'>
'desc': 'seq desc'
'id': 'seq-id'
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
-----------------------------
0 ACGT
**Retrieving underlying sequence data:**
Retrieve underlying sequence:
>>> seq.values # doctest: +NORMALIZE_WHITESPACE
array([b'A', b'C', b'G', b'T'],
dtype='|S1')
Underlying sequence immutable:
>>> seq.values = np.array([b'T', b'C', b'G', b'A'], dtype='|S1')
Traceback (most recent call last):
...
AttributeError: can't set attribute
>>> seq.values[0] = b'T'
Traceback (most recent call last):
...
ValueError: assignment destination is read-only
**Retrieving sequence metadata:**
Retrieve metadata:
>>> pprint(seq.metadata) # using pprint to display dict in sorted order
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'seq-id'}
Retrieve positional metadata:
>>> seq.positional_metadata
exons quality
0 True 3
1 True 3
2 False 4
3 True 10
**Updating sequence metadata:**
.. warning:: Be aware that a shallow copy of ``metadata`` and
``positional_metadata`` is made for performance. Since a deep copy is
not made, changes made to mutable Python objects stored as metadata may
affect the metadata of other ``Sequence`` objects or anything else that
shares a reference to the object. The following examples illustrate this
behavior.
First, let's create a sequence and update its metadata:
>>> metadata = {'id':'seq-id', 'desc':'seq desc', 'authors': ['Alice']}
>>> seq = Sequence('ACGT', metadata=metadata)
>>> seq.metadata['id'] = 'new-id'
>>> seq.metadata['pubmed'] = 12345
>>> pprint(seq.metadata)
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'new-id', 'pubmed': 12345}
Note that the original metadata dictionary (stored in variable
``metadata``) hasn't changed because a shallow copy was made:
>>> pprint(metadata)
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'seq-id'}
>>> seq.metadata == metadata
False
Note however that since only a *shallow* copy was made, updates to mutable
objects will also change the original metadata dictionary:
>>> seq.metadata['authors'].append('Bob')
>>> seq.metadata['authors']
['Alice', 'Bob']
>>> metadata['authors']
['Alice', 'Bob']
This behavior can also occur when manipulating a sequence that has been
derived from another sequence:
>>> subseq = seq[1:3]
>>> subseq
Sequence
-----------------------------
Metadata:
'authors': <class 'list'>
'desc': 'seq desc'
'id': 'new-id'
'pubmed': 12345
Stats:
length: 2
-----------------------------
0 CG
>>> pprint(subseq.metadata)
{'authors': ['Alice', 'Bob'],
'desc': 'seq desc',
'id': 'new-id',
'pubmed': 12345}
The subsequence has inherited the metadata of its parent sequence. If we
update the subsequence's author list, we see the changes propagated in the
parent sequence and original metadata dictionary:
>>> subseq.metadata['authors'].append('Carol')
>>> subseq.metadata['authors']
['Alice', 'Bob', 'Carol']
>>> seq.metadata['authors']
['Alice', 'Bob', 'Carol']
>>> metadata['authors']
['Alice', 'Bob', 'Carol']
The behavior for updating positional metadata is similar. Let's create a
new sequence with positional metadata that is already stored in a
``pd.DataFrame``:
>>> positional_metadata = pd.DataFrame(
... {'quality': [3, 3, 4, 10], 'list': [[], [], [], []]})
>>> seq = Sequence('ACGT', positional_metadata=positional_metadata)
>>> seq
Sequence
-----------------------------
Positional metadata:
'list': <dtype: object>
'quality': <dtype: int64>
Stats:
length: 4
-----------------------------
0 ACGT
>>> seq.positional_metadata
list quality
0 [] 3
1 [] 3
2 [] 4
3 [] 10
Now let's update the sequence's positional metadata by adding a new column
and changing a value in another column:
>>> seq.positional_metadata['gaps'] = [False, False, False, False]
>>> seq.positional_metadata.loc[0, 'quality'] = 999
>>> seq.positional_metadata
list quality gaps
0 [] 999 False
1 [] 3 False
2 [] 4 False
3 [] 10 False
Note that the original positional metadata (stored in variable
``positional_metadata``) hasn't changed because a shallow copy was made:
>>> positional_metadata
list quality
0 [] 3
1 [] 3
2 [] 4
3 [] 10
>>> seq.positional_metadata.equals(positional_metadata)
False
Next let's create a sequence that has been derived from another sequence:
>>> subseq = seq[1:3]
>>> subseq
Sequence
-----------------------------
Positional metadata:
'list': <dtype: object>
'quality': <dtype: int64>
'gaps': <dtype: bool>
Stats:
length: 2
-----------------------------
0 CG
>>> subseq.positional_metadata
list quality gaps
0 [] 3 False
1 [] 4 False
As described above for metadata, since only a *shallow* copy was made of
the positional metadata, updates to mutable objects will also change the
parent sequence's positional metadata and the original positional metadata
``pd.DataFrame``:
>>> subseq.positional_metadata.loc[0, 'list'].append('item')
>>> subseq.positional_metadata
list quality gaps
0 [item] 3 False
1 [] 4 False
>>> seq.positional_metadata
list quality gaps
0 [] 999 False
1 [item] 3 False
2 [] 4 False
3 [] 10 False
>>> positional_metadata
list quality
0 [] 3
1 [item] 3
2 [] 4
3 [] 10
"""
# ASCII is built such that the difference between uppercase and lowercase
# is the 6th bit.
_ascii_invert_case_bit_offset = 32
_ascii_lowercase_boundary = 90
default_write_format = 'fasta'
__hash__ = None
@property
@stable(as_of="0.4.0")
def values(self):
"""Array containing underlying sequence characters.
Notes
-----
This property is not writeable.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('AACGA')
>>> s.values # doctest: +NORMALIZE_WHITESPACE
array([b'A', b'A', b'C', b'G', b'A'],
dtype='|S1')
"""
return self._bytes.view('|S1')
@property
@stable(as_of="0.4.0")
def metadata(self):
"""``dict`` containing metadata which applies to the entire sequence.
Notes
-----
This property can be set and deleted.
Examples
--------
>>> from pprint import pprint
>>> from skbio import Sequence
Create a sequence with metadata:
>>> s = Sequence('ACGTACGTACGTACGT',
... metadata={'id': 'seq-id',
... 'description': 'seq description'})
>>> s
Sequence
------------------------------------
Metadata:
'description': 'seq description'
'id': 'seq-id'
Stats:
length: 16
------------------------------------
0 ACGTACGTAC GTACGT
Retrieve metadata:
>>> pprint(s.metadata) # using pprint to display dict in sorted order
{'description': 'seq description', 'id': 'seq-id'}
Update metadata:
>>> s.metadata['id'] = 'new-id'
>>> s.metadata['pubmed'] = 12345
>>> pprint(s.metadata)
{'description': 'seq description', 'id': 'new-id', 'pubmed': 12345}
Set metadata:
>>> s.metadata = {'abc': 123}
>>> s.metadata
{'abc': 123}
Delete metadata:
>>> s.has_metadata()
True
>>> del s.metadata
>>> s.metadata
{}
>>> s.has_metadata()
False
"""
if self._metadata is None:
# not using setter to avoid copy
self._metadata = {}
return self._metadata
@metadata.setter
def metadata(self, metadata):
if not isinstance(metadata, dict):
raise TypeError("metadata must be a dict")
# shallow copy
self._metadata = metadata.copy()
@metadata.deleter
def metadata(self):
self._metadata = None
@property
@stable(as_of="0.4.0")
def positional_metadata(self):
"""``pd.DataFrame`` containing metadata on a per-character basis.
Notes
-----
This property can be set and deleted.
Examples
--------
Create a DNA sequence with positional metadata:
>>> from skbio import DNA
>>> seq = DNA(
... 'ACGT',
... positional_metadata={'quality': [3, 3, 20, 11],
... 'exons': [True, True, False, True]})
>>> seq
DNA
-----------------------------
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 50.00%
-----------------------------
0 ACGT
Retrieve positional metadata:
>>> seq.positional_metadata
exons quality
0 True 3
1 True 3
2 False 20
3 True 11
Update positional metadata:
>>> seq.positional_metadata['gaps'] = seq.gaps()
>>> seq.positional_metadata
exons quality gaps
0 True 3 False
1 True 3 False
2 False 20 False
3 True 11 False
Set positional metadata:
>>> seq.positional_metadata = {'degenerates': seq.degenerates()}
>>> seq.positional_metadata
degenerates
0 False
1 False
2 False
3 False
Delete positional metadata:
>>> seq.has_positional_metadata()
True
>>> del seq.positional_metadata
>>> seq.positional_metadata
Empty DataFrame
Columns: []
Index: [0, 1, 2, 3]
>>> seq.has_positional_metadata()
False
"""
if self._positional_metadata is None:
# not using setter to avoid copy
self._positional_metadata = pd.DataFrame(
index=np.arange(len(self)))
return self._positional_metadata
@positional_metadata.setter
def positional_metadata(self, positional_metadata):
try:
# copy=True to copy underlying data buffer
positional_metadata = pd.DataFrame(positional_metadata, copy=True)
except pd.core.common.PandasError as e:
raise TypeError('Positional metadata invalid. Must be consumable '
'by pd.DataFrame. Original pandas error message: '
'"%s"' % e)
num_rows = len(positional_metadata.index)
if num_rows != len(self):
raise ValueError(
"Number of positional metadata values (%d) must match the "
"number of characters in the sequence (%d)." %
(num_rows, len(self)))
positional_metadata.reset_index(drop=True, inplace=True)
self._positional_metadata = positional_metadata
@positional_metadata.deleter
def positional_metadata(self):
self._positional_metadata = None
@property
def _string(self):
return self._bytes.tostring()
@stable(as_of="0.4.0")
def __init__(self, sequence, metadata=None, positional_metadata=None,
lowercase=False):
if isinstance(sequence, np.ndarray):
if sequence.dtype == np.uint8:
self._set_bytes_contiguous(sequence)
elif sequence.dtype == '|S1':
sequence = sequence.view(np.uint8)
# Guarantee the sequence is an array (might be scalar before
# this).
if sequence.shape == ():
sequence = np.array([sequence], dtype=np.uint8)
self._set_bytes_contiguous(sequence)
else:
raise TypeError(
"Can only create sequence from numpy.ndarray of dtype "
"np.uint8 or '|S1'. Invalid dtype: %s" %
sequence.dtype)
elif isinstance(sequence, Sequence):
# we're not simply accessing sequence.metadata in order to avoid
# creating "empty" metadata representations on both sequence
# objects if they don't have metadata. same strategy is used below
# for positional metadata
if metadata is None and sequence.has_metadata():
metadata = sequence.metadata
if (positional_metadata is None and
sequence.has_positional_metadata()):
positional_metadata = sequence.positional_metadata
sequence = sequence._bytes
self._owns_bytes = False
self._set_bytes(sequence)
else:
# Python 3 will not raise a UnicodeEncodeError so we force it by
# encoding it as ascii
if isinstance(sequence, six.text_type):
sequence = sequence.encode("ascii")
s = np.fromstring(sequence, dtype=np.uint8)
# There are two possibilities (to our knowledge) at this point:
# Either the sequence we were given was something string-like,
# (else it would not have made it past fromstring), or it was a
# numpy scalar, and so our length must be 1.
if isinstance(sequence, np.generic) and len(s) != 1:
raise TypeError("Can cannot create a sequence with %r" %
type(sequence).__name__)
sequence = s
self._owns_bytes = True
self._set_bytes(sequence)
if metadata is None:
self._metadata = None
else:
self.metadata = metadata
if positional_metadata is None:
self._positional_metadata = None
else:
self.positional_metadata = positional_metadata
if lowercase is False:
pass
elif lowercase is True or isinstance(lowercase, six.string_types):
lowercase_mask = self._bytes > self._ascii_lowercase_boundary
self._convert_to_uppercase(lowercase_mask)
# If it isn't True, it must be a string_type
if not (lowercase is True):
self.positional_metadata[lowercase] = lowercase_mask
else:
raise TypeError("lowercase keyword argument expected a bool or "
"string, but got %s" % type(lowercase))
def _set_bytes_contiguous(self, sequence):
"""Munge the sequence data into a numpy array of dtype uint8."""
if not sequence.flags['C_CONTIGUOUS']:
# numpy doesn't support views of non-contiguous arrays. Since we're
# making heavy use of views internally, and users may also supply
# us with a view, make sure we *always* store a contiguous array to
# avoid hard-to-track bugs. See
# https://github.com/numpy/numpy/issues/5716
sequence = np.ascontiguousarray(sequence)
self._owns_bytes = True
else:
self._owns_bytes = False
self._set_bytes(sequence)
def _set_bytes(self, sequence):
sequence.flags.writeable = False
self._bytes = sequence
def _convert_to_uppercase(self, lowercase):
if np.any(lowercase):
with self._byte_ownership():
self._bytes[lowercase] ^= self._ascii_invert_case_bit_offset
@stable(as_of="0.4.0")
def __contains__(self, subsequence):
"""Determine if a subsequence is contained in the biological sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
The putative subsequence.
Returns
-------
bool
Indicates whether `subsequence` is contained in the biological
sequence.
Raises
------
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUGAAGGA')
>>> 'GGU' in s
True
>>> 'CCC' in s
False
"""
return self._munge_to_bytestring(subsequence, "in") in self._string
@stable(as_of="0.4.0")
def __eq__(self, other):
"""Determine if the biological sequence is equal to another.
Biological sequences are equal if they are *exactly* the same type and
their sequence characters, metadata, and positional metadata are the
same.
Parameters
----------
other : Sequence
Sequence to test for equality against.
Returns
-------
bool
Indicates whether the biological sequence is equal to `other`.
Examples
--------
Define two biological sequences that have the same underlying sequence
of characters:
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> t = Sequence('ACGT')
The two sequences are considered equal because they are the same type,
their underlying sequence of characters are the same, and their
optional metadata attributes (``metadata`` and ``positional_metadata``)
were not provided:
>>> s == t
True
>>> t == s
True
Define another biological sequence with a different sequence of
characters than the previous two biological sequences:
>>> u = Sequence('ACGA')
>>> u == t
False
Define a biological sequence with the same sequence of characters as
``u`` but with different metadata and positional metadata:
>>> v = Sequence('ACGA', metadata={'id': 'abc'},
... positional_metadata={'quality':[1, 5, 3, 3]})
The two sequences are not considered equal because their metadata and
positional metadata do not match:
>>> u == v
False
"""
# checks ordered from least to most expensive
if self.__class__ != other.__class__:
return False
# we're not simply comparing self.metadata to other.metadata in order
# to avoid creating "empty" metadata representations on the sequence
# objects if they don't have metadata. same strategy is used below for
# positional metadata
if self.has_metadata() and other.has_metadata():
if self.metadata != other.metadata:
return False
elif not (self.has_metadata() or other.has_metadata()):
# both don't have metadata
pass
else:
# one has metadata while the other does not
return False
if self._string != other._string:
return False
if self.has_positional_metadata() and other.has_positional_metadata():
if not self.positional_metadata.equals(other.positional_metadata):
return False
elif not (self.has_positional_metadata() or
other.has_positional_metadata()):
# both don't have positional metadata
pass
else:
# one has positional metadata while the other does not
return False
return True
@stable(as_of="0.4.0")
def __ne__(self, other):
"""Determine if the biological sequence is not equal to another.
Biological sequences are not equal if they are not *exactly* the same
type, or their sequence characters, metadata, or positional metadata
differ.
Parameters
----------
other : Sequence
Sequence to test for inequality against.
Returns
-------
bool
Indicates whether the biological sequence is not equal to `other`.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> t = Sequence('ACGT')
>>> s != t
False
>>> u = Sequence('ACGA')
>>> u != t
True
>>> v = Sequence('ACGA', metadata={'id': 'v'})
>>> u != v
True
"""
return not (self == other)
@stable(as_of="0.4.0")
def __getitem__(self, indexable):
"""Slice the biological sequence.
Parameters
----------
indexable : int, slice, iterable (int and slice), 1D array_like (bool)
The position(s) to return from the biological sequence. If
`indexable` is an iterable of integers, these are assumed to be
indices in the sequence to keep. If `indexable` is a 1D
``array_like`` of booleans, these are assumed to be the positions
in the sequence to keep.
Returns
-------
Sequence
New biological sequence containing the position(s) specified by
`indexable` in the current biological sequence. If quality scores
are present, they will be sliced in the same manner and included in
the returned biological sequence. ID and description are also
included.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUGAAGGA')
Obtain a single character from the biological sequence:
>>> s[1]
Sequence
-------------
Stats:
length: 1
-------------
0 G
Obtain a slice:
>>> s[7:]
Sequence
-------------
Stats:
length: 5
-------------
0 AAGGA
Obtain characters at the following indices:
>>> s[[3, 4, 7, 0, 3]]
Sequence
-------------
Stats:
length: 5
-------------
0 CGAGC
Obtain characters at positions evaluating to `True`:
>>> s = Sequence('GGUCG')
>>> index = [True, False, True, 'a' is 'a', False]
>>> s[index]
Sequence
-------------
Stats:
length: 3
-------------
0 GUC
"""
if (not isinstance(indexable, np.ndarray) and
((not isinstance(indexable, six.string_types)) and
hasattr(indexable, '__iter__'))):
indexable_ = indexable
indexable = np.asarray(indexable)
if indexable.dtype == object:
indexable = list(indexable_) # TODO: Don't blow out memory
if len(indexable) == 0:
# indexing with an empty list, so convert to ndarray and
# fall through to ndarray slicing below
indexable = np.asarray(indexable)
else:
seq = np.concatenate(
list(_slices_from_iter(self._bytes, indexable)))
index = _as_slice_if_single_index(indexable)
positional_metadata = None
if self.has_positional_metadata():
pos_md_slices = list(_slices_from_iter(
self.positional_metadata, index))
positional_metadata = pd.concat(pos_md_slices)
return self._to(sequence=seq,
positional_metadata=positional_metadata)
elif (isinstance(indexable, six.string_types) or
isinstance(indexable, bool)):
raise IndexError("Cannot index with %s type: %r" %
(type(indexable).__name__, indexable))
if (isinstance(indexable, np.ndarray) and
indexable.dtype == bool and
len(indexable) != len(self)):
raise IndexError("An boolean vector index must be the same length"
" as the sequence (%d, not %d)." %
(len(self), len(indexable)))
if isinstance(indexable, np.ndarray) and indexable.size == 0:
# convert an empty ndarray to a supported dtype for slicing a numpy
# array
indexable = indexable.astype(int)
seq = self._bytes[indexable]
positional_metadata = self._slice_positional_metadata(indexable)
return self._to(sequence=seq, positional_metadata=positional_metadata)
def _slice_positional_metadata(self, indexable):
if self.has_positional_metadata():
if _is_single_index(indexable):
index = _single_index_to_slice(indexable)
else:
index = indexable
return self.positional_metadata.iloc[index]
else:
return None
@stable(as_of="0.4.0")
def __len__(self):
"""Return the number of characters in the biological sequence.
Returns
-------
int
The length of the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> len(s)
4
"""
return self._bytes.size
@stable(as_of="0.4.0")
def __nonzero__(self):
"""Returns truth value (truthiness) of sequence.
Returns
-------
bool
True if length of sequence is greater than 0, else False.
Examples
--------
>>> from skbio import Sequence
>>> bool(Sequence(''))
False
>>> bool(Sequence('ACGT'))
True
"""
return len(self) > 0
@stable(as_of="0.4.0")
def __iter__(self):
"""Iterate over positions in the biological sequence.
Yields
------
Sequence
Single character subsequence, one for each position in the
sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> for c in s:
... str(c)
'G'
'G'
'U'
'C'
"""
for i in range(len(self)):
yield self[i]
@stable(as_of="0.4.0")
def __reversed__(self):
"""Iterate over positions in the biological sequence in reverse order.
Yields
------
Sequence
Single character subsequence, one for each position in the
sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> for c in reversed(s):
... str(c)
'C'
'U'
'G'
'G'
"""
return iter(self[::-1])
@stable(as_of="0.4.0")
def __str__(self):
"""Return biological sequence characters as a string.
Returns
-------
str
Sequence characters as a string. No metadata or positional
metadata will be included.
See Also
--------
sequence
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUAAAGGA', metadata={'id':'hello'})
>>> str(s)
'GGUCGUAAAGGA'
"""
return str(self._string.decode("ascii"))
@stable(as_of="0.4.0")
def __repr__(self):
r"""Return a string representation of the biological sequence object.
Representation includes:
* sequence type
* metadata keys and values: will display key/value if it is an
understood type, otherwise just the type will be displayed. If it is
an understood type whose representation is too long, just the type
will be displayed
* positional metadata: column names and column dtypes will be displayed
in the order they appear in the positional metadata ``pd.DataFrame``.
Column names (i.e., keys) follow the same display rules as metadata
keys
* sequence stats (e.g., length)
* up to five lines of chunked sequence data. Each line of chunked
sequence data displays the current position in the sequence
Returns
-------
str
String representation of the biological sequence object.
Notes
-----
Subclasses can override Sequence._repr_stats to provide custom
statistics.
Examples
--------
Short sequence without metadata:
>>> from skbio import Sequence
>>> Sequence('ACGTAATGGATACGTAATGCA')
Sequence
-------------------------
Stats:
length: 21
-------------------------
0 ACGTAATGGA TACGTAATGC A
Longer sequence displays first two lines and last two lines:
>>> Sequence('ACGT' * 100)
Sequence
---------------------------------------------------------------------
Stats:
length: 400
---------------------------------------------------------------------
0 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
60 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
...
300 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
360 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
Sequence with metadata and positional metadata:
>>> metadata = {
... 'id': 'seq-id',
... 'description': 'description of the sequence, wrapping across '
... 'lines if it\'s too long',
... 'authors': ['Alice', 'Bob', 'Carol'],
... 'year': 2015,
... 'published': True
... }
>>> positional_metadata = {
... 'quality': [3, 10, 11, 10],
... 'exons': [True, True, False, True]
... }
>>> Sequence('ACGT', metadata=metadata,
... positional_metadata=positional_metadata)
Sequence
----------------------------------------------------------------------
Metadata:
'authors': <class 'list'>
'description': "description of the sequence, wrapping across lines
if it's too long"
'id': 'seq-id'
'published': True
'year': 2015
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
----------------------------------------------------------------------
0 ACGT
"""
return _SequenceReprBuilder(
seq=self,
width=71, # 79 for pep8, 8 space indent for docstrings
indent=4,
chunk_size=10).build()
def _repr_stats(self):
"""Define statistics to display in the sequence's repr.
Subclasses can override this method to provide type-specific
statistics.
This method computes a single statistic: length.
Returns
-------
list
List of tuples where each tuple represents a statistic. Each tuple
contains exactly two ``str`` elements: the statistic's name/label,
and the str-formatted value of the statistic. Ordering of
statistics (i.e., list order) determines display order in the
sequence repr.
"""
return [('length', '%d' % len(self))]
@stable(as_of="0.4.0")
def __copy__(self):
"""Return a shallow copy of the biological sequence.
See Also
--------
copy
Notes
-----
This method is equivalent to ``seq.copy(deep=False)``.
"""
return self.copy(deep=False)
@stable(as_of="0.4.0")
def __deepcopy__(self, memo):
"""Return a deep copy of the biological sequence.
See Also
--------
copy
Notes
-----
This method is equivalent to ``seq.copy(deep=True)``.
"""
return self._copy(True, memo)
@stable(as_of="0.4.0")
def has_metadata(self):
"""Determine if the sequence contains metadata.
Returns
-------
bool
Indicates whether the sequence has metadata
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_metadata()
False
>>> t = DNA('ACACGACGTT', metadata={'id': 'seq-id'})
>>> t.has_metadata()
True
"""
return self._metadata is not None and bool(self.metadata)
@stable(as_of="0.4.0")
def has_positional_metadata(self):
"""Determine if the sequence contains positional metadata.
Returns
-------
bool
Indicates whether the sequence has positional metadata
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_positional_metadata()
False
>>> t = DNA('ACACGACGTT', positional_metadata={'quality': range(10)})
>>> t.has_positional_metadata()
True
"""
return (self._positional_metadata is not None and
len(self.positional_metadata.columns) > 0)
@stable(as_of="0.4.0")
def copy(self, deep=False):
"""Return a copy of the biological sequence.
Parameters
----------
deep : bool, optional
Perform a deep copy. If ``False``, perform a shallow copy.
Returns
-------
Sequence
Copy of the biological sequence.
Notes
-----
Since sequence objects can share the same underlying immutable sequence
data (or pieces of it), this method can be used to create a sequence
object with its own copy of the sequence data so that the original
sequence data can be garbage-collected.
Examples
--------
Create a sequence:
>>> from pprint import pprint
>>> from skbio import Sequence
>>> seq = Sequence('ACGT',
... metadata={'id': 'seq-id', 'authors': ['Alice']},
... positional_metadata={'quality': [7, 10, 8, 5],
... 'list': [[], [], [], []]})
Make a shallow copy of the sequence:
>>> seq_copy = seq.copy()
>>> seq_copy == seq
True
Setting new references in the copied sequence's metadata doesn't affect
the original sequence's metadata:
>>> seq_copy.metadata['id'] = 'new-id'
>>> pprint(seq_copy.metadata)
{'authors': ['Alice'], 'id': 'new-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice'], 'id': 'seq-id'}
The same applies to the sequence's positional metadata:
>>> seq_copy.positional_metadata.loc[0, 'quality'] = 999
>>> seq_copy.positional_metadata
list quality
0 [] 999
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [] 7
1 [] 10
2 [] 8
3 [] 5
Since only a *shallow* copy was made, updates to mutable objects stored
as metadata affect the original sequence's metadata:
>>> seq_copy.metadata['authors'].append('Bob')
>>> pprint(seq_copy.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'new-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'seq-id'}
The same applies to the sequence's positional metadata:
>>> seq_copy.positional_metadata.loc[0, 'list'].append(1)
>>> seq_copy.positional_metadata
list quality
0 [1] 999
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [1] 7
1 [] 10
2 [] 8
3 [] 5
Perform a deep copy to avoid this behavior:
>>> seq_deep_copy = seq.copy(deep=True)
Updates to mutable objects no longer affect the original sequence's
metadata:
>>> seq_deep_copy.metadata['authors'].append('Carol')
>>> pprint(seq_deep_copy.metadata)
{'authors': ['Alice', 'Bob', 'Carol'], 'id': 'seq-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'seq-id'}
Nor its positional metadata:
>>> seq_deep_copy.positional_metadata.loc[0, 'list'].append(2)
>>> seq_deep_copy.positional_metadata
list quality
0 [1, 2] 7
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [1] 7
1 [] 10
2 [] 8
3 [] 5
"""
return self._copy(deep, {})
def _copy(self, deep, memo):
# strategy: copy the sequence without metadata first, then set metadata
# attributes with copies. we take this approach instead of simply
# passing the metadata through the Sequence constructor because we
# don't want to copy twice (this could happen when deep=True, where we
# deep copy here and then shallow copy in the Sequence constructor). we
# also directly set the private metadata attributes instead of using
# their public setters to avoid an unnecessary copy
# we don't make a distinction between deep vs. shallow copy of bytes
# because dtype=np.uint8. we only need to make the distinction when
# dealing with object dtype
bytes = np.copy(self._bytes)
seq_copy = self._constructor(sequence=bytes, metadata=None,
positional_metadata=None)
if self.has_metadata():
metadata = self.metadata
if deep:
metadata = copy.deepcopy(metadata, memo)
else:
metadata = metadata.copy()
seq_copy._metadata = metadata
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
if deep:
positional_metadata = copy.deepcopy(positional_metadata, memo)
else:
# deep=True makes a shallow copy of the underlying data buffer
positional_metadata = positional_metadata.copy(deep=True)
seq_copy._positional_metadata = positional_metadata
return seq_copy
@stable(as_of='0.4.0')
def lowercase(self, lowercase):
"""Return a case-sensitive string representation of the sequence.
Parameters
----------
lowercase: str or boolean vector
If lowercase is a boolean vector, it is used to set sequence
characters to lowercase in the output string. True values in the
boolean vector correspond to lowercase characters. If lowercase
is a str, it is treated like a key into the positional metadata,
pointing to a column which must be a boolean vector.
That boolean vector is then used as described previously.
Returns
-------
str
String representation of sequence with specified characters set to
lowercase.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> s.lowercase([True, True, False, False])
'acGT'
>>> s = Sequence('ACGT',
... positional_metadata={
... 'exons': [True, False, False, True]})
>>> s.lowercase('exons')
'aCGt'
Constructor automatically populates a column in positional metadata
when the ``lowercase`` keyword argument is provided with a column name:
>>> s = Sequence('ACgt', lowercase='introns')
>>> s.lowercase('introns')
'ACgt'
>>> s = Sequence('ACGT', lowercase='introns')
>>> s.lowercase('introns')
'ACGT'
"""
index = self._munge_to_index_array(lowercase)
outbytes = self._bytes.copy()
outbytes[index] ^= self._ascii_invert_case_bit_offset
return str(outbytes.tostring().decode('ascii'))
@stable(as_of="0.4.0")
def count(self, subsequence, start=None, end=None):
"""Count occurrences of a subsequence in the biological sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Subsequence to count occurrences of.
start : int, optional
The position at which to start counting (inclusive).
end : int, optional
The position at which to stop counting (exclusive).
Returns
-------
int
Number of occurrences of `subsequence` in the biological sequence.
Raises
------
ValueError
If `subsequence` is of length 0.
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCG')
>>> s.count('G')
3
>>> s.count('GG')
1
>>> s.count('T')
0
>>> s.count('G', 2, 5)
1
"""
if len(subsequence) == 0:
raise ValueError("`count` is not defined for empty subsequences.")
return self._string.count(
self._munge_to_bytestring(subsequence, "count"), start, end)
@stable(as_of="0.4.0")
def index(self, subsequence, start=None, end=None):
"""Find position where subsequence first occurs in the sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Subsequence to search for in the biological sequence.
start : int, optional
The position at which to start searching (inclusive).
end : int, optional
The position at which to stop searching (exclusive).
Returns
-------
int
Position where `subsequence` first occurs in the biological
sequence.
Raises
------
ValueError
If `subsequence` is not present in the biological sequence.
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACACGACGTT-')
>>> s.index('ACG')
2
"""
try:
return self._string.index(
self._munge_to_bytestring(subsequence, "index"), start, end)
except ValueError:
raise ValueError(
"%r is not present in %r." % (subsequence, self))
@experimental(as_of="0.4.0")
def distance(self, other, metric=None):
"""Compute the distance to another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compute the distance to.
metric : function, optional
Function used to compute the distance between the biological
sequence and `other`. If ``None`` (the default),
``scipy.spatial.distance.hamming`` will be used. This function
should take two ``skbio.Sequence`` objects and return a ``float``.
Returns
-------
float
Distance between the biological sequence and `other`.
Raises
------
ValueError
If the sequences are not the same length when `metric` is ``None``
(i.e., `metric` is ``scipy.spatial.distance.hamming``). This is
only checked when using this metric, as equal length is not a
requirement of all sequence distance metrics. In general, the
metric itself should test and give an informative error message,
but the message from ``scipy.spatial.distance.hamming`` is somewhat
cryptic (as of this writing), and it's the default metric, so we
explicitly do this check here. This metric-specific check will be
removed from this method when the ``skbio.sequence.stats`` module
is created (track progress on issue #913).
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
fraction_diff
fraction_same
scipy.spatial.distance.hamming
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.distance(t)
0.25
>>> def custom_dist(s1, s2): return 0.42
>>> s.distance(t, custom_dist)
0.42
"""
# TODO refactor this method to accept a name (string) of the distance
# metric to apply and accept **kwargs
other = self._munge_to_sequence(other, 'distance')
if metric is None:
return self._hamming(other)
return float(metric(self, other))
def _hamming(self, other):
# Hamming requires equal length sequences. We are checking this
# here because the error you would get otherwise is cryptic.
if len(self) != len(other):
raise ValueError(
"Sequences do not have equal length. "
"Hamming distances can only be computed between "
"sequences of equal length.")
return float(hamming(self.values, other.values))
@stable(as_of="0.4.0")
def matches(self, other):
"""Find positions that match with another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` at position ``i`` indicates a match
between the sequences at their positions ``i``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
mismatches
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('GAUU')
>>> s.matches(t)
array([ True, False, True, False], dtype=bool)
"""
other = self._munge_to_sequence(other, 'matches/mismatches')
if len(self) != len(other):
raise ValueError("Match and mismatch vectors can only be "
"generated from equal length sequences.")
return self._bytes == other._bytes
@stable(as_of="0.4.0")
def mismatches(self, other):
"""Find positions that do not match with another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` at position ``i`` indicates a
mismatch between the sequences at their positions ``i``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
matches
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('GAUU')
>>> s.mismatches(t)
array([False, True, False, True], dtype=bool)
"""
return np.invert(self.matches(other))
@stable(as_of="0.4.0")
def match_frequency(self, other, relative=False):
"""Return count of positions that are the same between two sequences.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
relative : bool, optional
If ``True``, return the relative frequency of matches instead of
the count.
Returns
-------
int or float
Number of positions that are the same between the sequences. This
will be an ``int`` if `relative` is ``False`` and a ``float``
if `relative` is ``True``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
mismatch_frequency
matches
mismatches
distance
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.match_frequency(t)
3
>>> s.match_frequency(t, relative=True)
0.75
"""
if relative:
return float(self.matches(other).mean())
else:
return int(self.matches(other).sum())
@stable(as_of="0.4.0")
def mismatch_frequency(self, other, relative=False):
"""Return count of positions that differ between two sequences.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
relative : bool, optional
If ``True``, return the relative frequency of mismatches instead of
the count.
Returns
-------
int or float
Number of positions that differ between the sequences. This will be
an ``int`` if `relative` is ``False`` and a ``float``
if `relative` is ``True``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
match_frequency
matches
mismatches
distance
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.mismatch_frequency(t)
1
>>> s.mismatch_frequency(t, relative=True)
0.25
"""
if relative:
return float(self.mismatches(other).mean())
else:
return int(self.mismatches(other).sum())
@stable(as_of="0.4.0")
def iter_kmers(self, k, overlap=True):
"""Generate kmers of length `k` from the biological sequence.
Parameters
----------
k : int
The kmer length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
Yields
------
Sequence
kmer of length `k` contained in the biological sequence.
Raises
------
ValueError
If `k` is less than 1.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACACGACGTT')
>>> for kmer in s.iter_kmers(4, overlap=False):
... str(kmer)
'ACAC'
'GACG'
>>> for kmer in s.iter_kmers(3, overlap=True):
... str(kmer)
'ACA'
'CAC'
'ACG'
'CGA'
'GAC'
'ACG'
'CGT'
'GTT'
"""
if k < 1:
raise ValueError("k must be greater than 0.")
if overlap:
step = 1
count = len(self) - k + 1
else:
step = k
count = len(self) // k
if self.has_positional_metadata():
for i in range(0, len(self) - k + 1, step):
yield self[i:i+k]
# Optimized path when no positional metadata
else:
kmers = np.lib.stride_tricks.as_strided(
self._bytes, shape=(k, count), strides=(1, step)).T
for s in kmers:
yield self._to(sequence=s)
@stable(as_of="0.4.0")
def kmer_frequencies(self, k, overlap=True, relative=False):
"""Return counts of words of length `k` from the biological sequence.
Parameters
----------
k : int
The word length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
relative : bool, optional
If ``True``, return the relative frequency of each kmer instead of
its count.
Returns
-------
collections.Counter or collections.defaultdict
Frequencies of words of length `k` contained in the biological
sequence. This will be a ``collections.Counter`` if `relative` is
``False`` and a ``collections.defaultdict`` if `relative` is
``True``.
Raises
------
ValueError
If `k` is less than 1.
Examples
--------
>>> from collections import defaultdict, Counter
>>> from skbio import Sequence
>>> s = Sequence('ACACATTTATTA')
>>> freqs = s.kmer_frequencies(3, overlap=False)
>>> freqs == Counter({'TTA': 2, 'ACA': 1, 'CAT': 1})
True
>>> freqs = s.kmer_frequencies(3, relative=True, overlap=False)
>>> freqs == defaultdict(float, {'ACA': 0.25, 'TTA': 0.5, 'CAT': 0.25})
True
"""
kmers = self.iter_kmers(k, overlap=overlap)
freqs = collections.Counter((str(seq) for seq in kmers))
if relative:
if overlap:
num_kmers = len(self) - k + 1
else:
num_kmers = len(self) // k
relative_freqs = collections.defaultdict(float)
for kmer, count in viewitems(freqs):
relative_freqs[kmer] = count / num_kmers
freqs = relative_freqs
return freqs
@stable(as_of="0.4.0")
def find_with_regex(self, regex, ignore=None):
"""Generate slices for patterns matched by a regular expression.
Parameters
----------
regex : str or regular expression object
String to be compiled into a regular expression, or a pre-
compiled regular expression object (e.g., from calling
``re.compile``).
ignore : 1D array_like (bool) or iterable (slices or ints), optional
Indicate the positions to ignore when matching.
Yields
------
slice
Location where the regular expression matched.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('AATATACCGGTTATAA')
>>> for match in s.find_with_regex('(TATA+)'):
... match
... str(s[match])
slice(2, 6, None)
'TATA'
slice(11, 16, None)
'TATAA'
"""
if isinstance(regex, six.string_types):
regex = re.compile(regex)
lookup = np.arange(len(self))
if ignore is None:
string = str(self)
else:
ignore = self._munge_to_index_array(ignore)
lookup = np.delete(lookup, ignore)
string = str(self[lookup])
for match in regex.finditer(string):
# We start at 1 because we don't want the group that contains all
# other groups.
for g in range(1, len(match.groups())+1):
yield slice(lookup[match.start(g)],
lookup[match.end(g) - 1] + 1)
@stable(as_of="0.4.0")
def iter_contiguous(self, included, min_length=1, invert=False):
"""Yield contiguous subsequences based on `included`.
Parameters
----------
included : 1D array_like (bool) or iterable (slices or ints)
`included` is transformed into a flat boolean vector where each
position will either be included or skipped. All contiguous
included positions will be yielded as a single region.
min_length : int, optional
The minimum length of a subsequence for it to be yielded.
Default is 1.
invert : bool, optional
Whether to invert `included` such that it describes what should be
skipped instead of included. Default is False.
Yields
------
Sequence
Contiguous subsequence as indicated by `included`.
Notes
-----
If slices provide adjacent ranges, then they will be considered the
same contiguous subsequence.
Examples
--------
Here we use `iter_contiguous` to find all of the contiguous ungapped
sequences using a boolean vector derived from our DNA sequence.
>>> from skbio import DNA
>>> s = DNA('AAA--TT-CCCC-G-')
>>> no_gaps = ~s.gaps()
>>> for ungapped_subsequence in s.iter_contiguous(no_gaps,
... min_length=2):
... print(ungapped_subsequence)
AAA
TT
CCCC
Note how the last potential subsequence was skipped because it would
have been smaller than our `min_length` which was set to 2.
We can also use `iter_contiguous` on a generator of slices as is
produced by `find_motifs` (and `find_with_regex`).
>>> from skbio import Protein
>>> s = Protein('ACDFNASANFTACGNPNRTESL')
>>> for subseq in s.iter_contiguous(s.find_motifs('N-glycosylation')):
... print(subseq)
NASANFTA
NRTE
Note how the first subsequence contains two N-glycosylation sites. This
happened because they were contiguous.
"""
idx = self._munge_to_index_array(included)
if invert:
idx = np.delete(np.arange(len(self)), idx)
# Adapted from http://stackoverflow.com/a/7353335/579416
for contig in np.split(idx, np.where(np.diff(idx) != 1)[0] + 1):
r = self[contig]
if len(r) >= min_length:
yield r
def _to(self, sequence=None, metadata=None, positional_metadata=None):
"""Return a copy of the current biological sequence.
Returns a copy of the current biological sequence, optionally with
updated attributes specified as keyword arguments.
Arguments are the same as those passed to the ``Sequence`` constructor.
The returned copy will have its attributes updated based on the
arguments. If an attribute is missing, the copy will keep the same
attribute as the current biological sequence. Valid attribute names
are `'sequence'`, `'metadata'`, and `'positional_metadata'`. Default
behavior is to return a copy of the current biological sequence
without changing any attributes.
Parameters
----------
sequence : optional
metadata : optional
positional_metadata : optional
Returns
-------
Sequence
Copy of the current biological sequence, optionally with updated
attributes based on arguments. Will be the same type as the current
biological sequence (`self`).
Notes
-----
By default, `metadata` and `positional_metadata` are shallow-copied and
the reference to `sequence` is used (without copying) for efficiency
since `sequence` is immutable. This differs from the behavior of
`Sequence.copy`, which will actually copy `sequence`.
This method is the preferred way of creating new instances from an
existing biological sequence, instead of calling
``self.__class__(...)``, as the latter can be error-prone (e.g.,
it's easy to forget to propagate attributes to the new instance).
"""
if sequence is None:
sequence = self._bytes
if metadata is None:
metadata = self._metadata
if positional_metadata is None:
positional_metadata = self._positional_metadata
return self._constructor(sequence=sequence, metadata=metadata,
positional_metadata=positional_metadata)
def _constructor(self, **kwargs):
return self.__class__(**kwargs)
def _munge_to_index_array(self, sliceable):
"""Return an index array from something isomorphic to a boolean vector.
"""
if isinstance(sliceable, six.string_types):
if sliceable in self.positional_metadata:
if self.positional_metadata[sliceable].dtype == np.bool:
sliceable = self.positional_metadata[sliceable]
else:
raise TypeError("Column '%s' in positional metadata does "
"not correspond to a boolean vector" %
sliceable)
else:
raise ValueError("No positional metadata associated with key "
"'%s'" % sliceable)
if not hasattr(sliceable, 'dtype') or (hasattr(sliceable, 'dtype') and
sliceable.dtype == 'object'):
sliceable = tuple(sliceable)
bool_mode = False
int_mode = False
for s in sliceable:
if isinstance(s, (bool, np.bool_)):
bool_mode = True
elif isinstance(s, (slice, int, np.signedinteger)) or (
hasattr(s, 'dtype') and s.dtype != np.bool):
int_mode = True
else:
raise TypeError("Invalid type in iterable: %s, must be one"
" of {bool, int, slice, np.signedinteger}"
% s.__class__.__name__)
if bool_mode and int_mode:
raise TypeError("Cannot provide iterable of both bool and"
" int.")
sliceable = np.r_[sliceable]
if sliceable.dtype == np.bool:
if sliceable.size != len(self):
raise ValueError("Boolean array (%d) does not match length of"
" sequence (%d)."
% (sliceable.size, len(self)))
normalized, = np.where(sliceable)
else:
normalized = np.bincount(sliceable)
if np.any(normalized > 1):
raise ValueError("Overlapping index regions are not allowed.")
normalized, = np.where(normalized)
if np.any(normalized != sliceable):
raise ValueError("Index regions are out of order.")
return normalized
def _munge_to_sequence(self, other, method):
if isinstance(other, Sequence):
if type(other) != type(self):
raise TypeError("Cannot use %s and %s together with `%s`" %
(self.__class__.__name__,
other.__class__.__name__, method))
else:
return other
# We don't use self.__class__ or self._constructor here because we want
# to construct the most general type of Sequence object in order to
# avoid validation errors.
return Sequence(other)
def _munge_to_bytestring(self, other, method):
if type(other) is bytes:
return other
elif isinstance(other, six.string_types):
return other.encode('ascii')
else:
return self._munge_to_sequence(other, method)._string
@contextmanager
def _byte_ownership(self):
if not self._owns_bytes:
self._bytes = self._bytes.copy()
self._owns_bytes = True
self._bytes.flags.writeable = True
yield
self._bytes.flags.writeable = False
def _single_index_to_slice(start_index):
end_index = None if start_index == -1 else start_index+1
return slice(start_index, end_index)
def _is_single_index(index):
return (isinstance(index, numbers.Integral) and
not isinstance(index, bool))
def _as_slice_if_single_index(indexable):
if _is_single_index(indexable):
return _single_index_to_slice(indexable)
else:
return indexable
def _slices_from_iter(array, indexables):
for i in indexables:
if isinstance(i, slice):
pass
elif _is_single_index(i):
i = _single_index_to_slice(i)
else:
raise IndexError("Cannot slice sequence from iterable "
"containing %r." % i)
yield array[i]
class _SequenceReprBuilder(object):
"""Build a ``Sequence`` repr.
Parameters
----------
seq : Sequence
Sequence to repr.
width : int
Maximum width of the repr.
indent : int
Number of spaces to use for indented lines.
chunk_size: int
Number of characters in each chunk of a sequence.
"""
def __init__(self, seq, width, indent, chunk_size):
self._seq = seq
self._width = width
self._indent = ' ' * indent
self._chunk_size = chunk_size
def build(self):
lines = ElasticLines()
cls_name = self._seq.__class__.__name__
lines.add_line(cls_name)
lines.add_separator()
if self._seq.has_metadata():
lines.add_line('Metadata:')
# Python 3 doesn't allow sorting of mixed types so we can't just
# use sorted() on the metadata keys. Sort first by type then sort
# by value within each type.
for key in self._sorted_keys_grouped_by_type(self._seq.metadata):
value = self._seq.metadata[key]
lines.add_lines(self._format_metadata_key_value(key, value))
if self._seq.has_positional_metadata():
lines.add_line('Positional metadata:')
for key in self._seq.positional_metadata.columns.values.tolist():
dtype = self._seq.positional_metadata[key].dtype
lines.add_lines(
self._format_positional_metadata_column(key, dtype))
lines.add_line('Stats:')
for label, value in self._seq._repr_stats():
lines.add_line('%s%s: %s' % (self._indent, label, value))
lines.add_separator()
num_lines, num_chars, column_width = self._find_optimal_seq_chunking()
# display entire sequence if we can, else display the first two and
# last two lines separated by ellipsis
if num_lines <= 5:
lines.add_lines(self._format_chunked_seq(
range(num_lines), num_chars, column_width))
else:
lines.add_lines(self._format_chunked_seq(
range(2), num_chars, column_width))
lines.add_line('...')
lines.add_lines(self._format_chunked_seq(
range(num_lines - 2, num_lines), num_chars, column_width))
return lines.to_str()
def _sorted_keys_grouped_by_type(self, dict_):
"""Group keys within a dict by their type and sort within type."""
type_sorted = sorted(dict_, key=self._type_sort_key)
type_and_value_sorted = []
for _, group in itertools.groupby(type_sorted, self._type_sort_key):
type_and_value_sorted.extend(sorted(group))
return type_and_value_sorted
def _type_sort_key(self, key):
return repr(type(key))
def _format_metadata_key_value(self, key, value):
"""Format metadata key:value, wrapping across lines if necessary."""
key_fmt = self._format_key(key)
supported_type = True
if isinstance(value, (six.text_type, six.binary_type)):
# for stringy values, there may be u'' or b'' depending on the type
# of `value` and version of Python. find the starting quote
# character so that wrapped text will line up with that instead of
# the string literal prefix character. for example:
#
# 'foo': u'abc def ghi
# jkl mno'
value_repr = repr(value)
extra_indent = 1
if not (value_repr.startswith("'") or value_repr.startswith('"')):
extra_indent = 2
# handles any number, this includes bool
elif value is None or isinstance(value, numbers.Number):
value_repr = repr(value)
extra_indent = 0
else:
supported_type = False
if not supported_type or len(value_repr) > 140:
value_repr = str(type(value))
# extra indent of 1 so that wrapped text lines up past the bracket:
#
# 'foo': <type
# 'dict'>
extra_indent = 1
return self._wrap_text_with_indent(value_repr, key_fmt, extra_indent)
def _format_key(self, key):
"""Format metadata key.
Includes initial indent and trailing colon and space:
<indent>'foo':<space>
"""
key_fmt = self._indent + repr(key)
supported_types = (six.text_type, six.binary_type, numbers.Number,
type(None))
if len(key_fmt) > (self._width / 2) or not isinstance(key,
supported_types):
key_fmt = self._indent + str(type(key))
return '%s: ' % key_fmt
def _wrap_text_with_indent(self, text, initial_text, extra_indent):
"""Wrap text across lines with an initial indentation.
For example:
'foo': 'abc def
ghi jkl
mno pqr'
<indent>'foo':<space> is `initial_text`. `extra_indent` is 1. Wrapped
lines are indented such that they line up with the start of the
previous line of wrapped text.
"""
return textwrap.wrap(
text, width=self._width, expand_tabs=False,
initial_indent=initial_text,
subsequent_indent=' ' * (len(initial_text) + extra_indent))
def _format_positional_metadata_column(self, key, dtype):
key_fmt = self._format_key(key)
dtype_fmt = '<dtype: %s>' % str(dtype)
return self._wrap_text_with_indent(dtype_fmt, key_fmt, 1)
def _find_optimal_seq_chunking(self):
"""Find the optimal number of sequence chunks to fit on a single line.
Returns the number of lines the sequence will occupy, the number of
sequence characters displayed on each line, and the column width
necessary to display position info using the optimal number of sequence
chunks.
"""
# strategy: use an iterative approach to find the optimal number of
# sequence chunks per line. start with a single chunk and increase
# until the max line width is exceeded. when this happens, the previous
# number of chunks is optimal
num_lines = 0
num_chars = 0
column_width = 0
num_chunks = 1
not_exceeded = True
while not_exceeded:
line_len, new_chunk_info = self._compute_chunked_seq_line_len(
num_chunks)
not_exceeded = line_len <= self._width
if not_exceeded:
num_lines, num_chars, column_width = new_chunk_info
num_chunks += 1
return num_lines, num_chars, column_width
def _compute_chunked_seq_line_len(self, num_chunks):
"""Compute line length based on a number of chunks."""
num_chars = num_chunks * self._chunk_size
# ceil to account for partial line
num_lines = int(math.ceil(len(self._seq) / num_chars))
# position column width is fixed width, based on the number of
# characters necessary to display the position of the final line (all
# previous positions will be left justified using this width)
column_width = len('%d ' % ((num_lines - 1) * num_chars))
# column width + number of sequence characters + spaces between chunks
line_len = column_width + num_chars + (num_chunks - 1)
return line_len, (num_lines, num_chars, column_width)
def _format_chunked_seq(self, line_idxs, num_chars, column_width):
"""Format specified lines of chunked sequence data."""
lines = []
for line_idx in line_idxs:
seq_idx = line_idx * num_chars
chars = str(self._seq[seq_idx:seq_idx+num_chars])
chunked_chars = chunk_str(chars, self._chunk_size, ' ')
lines.append(('%d' % seq_idx).ljust(column_width) + chunked_chars)
return lines
| SamStudio8/scikit-bio | skbio/sequence/_sequence.py | Python | bsd-3-clause | 78,197 |
from future.utils import PY2, iteritems
from mockthink.test.common import assertEqual
if PY2:
import mock
else:
from unittest import mock
import unittest
from pprint import pprint
from ... import util
class TestUtil(unittest.TestCase):
def test_curry2(self):
fun = lambda x, y: x + y
curried = util.curry2(fun)
assertEqual(8, curried(5, 3))
assertEqual(8, curried(5)(3))
def test_curry3(self):
fun = lambda x, y, z: x + y + z
curried = util.curry3(fun)
assertEqual(15, curried(3, 5, 7))
assertEqual(15, curried(3, 5)(7))
assertEqual(15, curried(3)(5, 7))
assertEqual(15, curried(3)(5)(7))
def test_extend(self):
dict_1 = {'x': 'x1-val', 'y': 'y1-val'}
dict_2 = {'x': 'x2-val', 'z': 'z2-val'}
extended = util.extend(dict_1, dict_2)
assertEqual({
'x': 'x2-val',
'y': 'y1-val',
'z': 'z2-val'
}, extended)
assertEqual({
'x': 'x1-val',
'y': 'y1-val'
}, dict_1)
assertEqual({
'x': 'x2-val',
'z': 'z2-val'
}, dict_2)
def test_cat(self):
list_1 = [1, 2, 3]
list_2 = [7, 8, 9]
result = util.cat(list_1, list_2)
assertEqual([1, 2, 3, 7, 8, 9], result)
assertEqual([1, 2, 3], list_1)
assertEqual([7, 8, 9], list_2)
def test_extend_with(self):
with mock.patch('mockthink.util.extend') as extend:
extend.return_value = 'EXTENDED'
util.extend_with('X', 'Y')
util.extend_with('X')('Y')
extend.assert_has_calls([
mock.call('Y', 'X'),
mock.call('Y', 'X')
])
def test_map_with(self):
add_1 = lambda x: x + 1
nums = [10, 20, 30]
map_fn = util.map_with(add_1)
assertEqual([11, 21, 31], util.map_with(add_1)(nums))
assertEqual([11, 21, 31], util.map_with(add_1, nums))
def test_has_attrs(self):
thing1 = {'a': 'a-val', 'b': 'b-val'}
thing2 = {'x': 'x-val'}
self.assertTrue(util.has_attrs(['a'], thing1))
self.assertTrue(util.has_attrs(['a', 'b'], thing1))
self.assertFalse(util.has_attrs(['a'], thing2))
self.assertFalse(util.has_attrs(['a', 'b'], thing2))
def test_nth(self):
nums = [10, 20, 30, 40, 50]
assertEqual(20, util.nth(1)(nums))
assertEqual(40, util.nth(3)(nums))
def test_as_obj(self):
expected = {
'x': 'x-val',
'y': 'y-val'
}
pairs = [
['x', 'x-val'],
['y', 'y-val']
]
assertEqual(expected, util.as_obj(pairs))
def test_without(self):
obj = {
'x': 'x-val',
'y': 'y-val',
'z': 'z-val'
}
assertEqual({
'z': 'z-val'
}, util.without(['x', 'y'], obj))
assertEqual({
'x': 'x-val',
'y': 'y-val'
}, util.without(['z'], obj))
def test_pluck_with(self):
obj = {
'x': 'x-val',
'y': 'y-val',
'z': 'z-val'
}
assertEqual({
'x': 'x-val',
}, util.pluck_with('x')(obj))
assertEqual({
'x': 'x-val',
'y': 'y-val',
}, util.pluck_with('x', 'y')(obj))
def test_pipeline(self):
add_5 = lambda x: x + 5
mul_2 = lambda x: x * 2
assertEqual(24, util.pipeline(add_5, mul_2)(7))
assertEqual(19, util.pipeline(mul_2, add_5)(7))
def test_match_attrs_matching(self):
to_match = {
'x': 'good-x',
'y': 'good-y'
}
good_test = {
'x': 'good-x',
'y': 'good-y',
'z': 'good-z'
}
self.assertTrue(util.match_attrs(to_match, good_test))
def test_match_attrs_not_matching(self):
to_match = {
'x': 'good-x',
'y': 'good-y'
}
bad_test = {
'x': 'good-x',
'y': 'bad-y',
'z': 'good-z'
}
self.assertFalse(util.match_attrs(to_match, bad_test))
def test_match_attrs_missing_val(self):
to_match = {
'x': 'good-x',
'y': 'good-y'
}
bad_test = {
'x': 'good-x',
'z': 'good-z'
}
self.assertFalse(util.match_attrs(to_match, bad_test))
def test_getter_dict(self):
a_dict = {
'x': 'x-val'
}
assertEqual('x-val', util.getter('x')(a_dict))
assertEqual(None, util.getter('y')(a_dict))
def test_getter_obj(self):
class Thing(object):
def __init__(self, a_dict):
for k, v in iteritems(a_dict):
setattr(self, k, v)
thing = Thing({'x': 'x-val'})
assertEqual('x-val', util.getter('x')(thing))
assertEqual(None, util.getter('y')(thing))
def test_maybe_map_simple(self):
add_5 = lambda x: x + 5
assertEqual(13, util.maybe_map(add_5, 8))
assertEqual([5, 10, 15], util.maybe_map(add_5, [0, 5, 10]))
def test_maybe_map_dict(self):
def set_y_by_x(thing):
return {'x': thing['x'], 'y': thing['x'] + 1}
assertEqual(
{'x': 5, 'y': 6},
util.maybe_map(set_y_by_x, {'x': 5})
)
assertEqual(
[{'x': 5, 'y': 6}, {'x': 10, 'y': 11}],
util.maybe_map(set_y_by_x, [{'x': 5}, {'x': 10}])
)
def test_splice(self):
nums = [1, 2, 3, 4]
result = util.splice_at([10, 20], 2, nums)
assertEqual([1, 2, 10, 20, 3, 4], result)
def test_insert(self):
nums = [1, 2, 3, 4]
result = util.insert_at(10, 2, nums)
assertEqual([1, 2, 10, 3, 4], result)
def test_change_at(self):
nums = [1, 2, 3, 4]
assertEqual([1, 10, 3, 4], util.change_at(10, 1, nums))
def test_sort_by_one(self):
people = [
{'id': 'joe', 'age': 26, 'score': 60},
{'id': 'todd', 'age': 52, 'score': 15},
{'id': 'bill', 'age': 35, 'score': 78}
]
expected = [
{'id': 'joe', 'age': 26, 'score': 60},
{'id': 'bill', 'age': 35, 'score': 78},
{'id': 'todd', 'age': 52, 'score': 15}
]
result = util.sort_by_one('age', people)
for index in range(0, len(expected)):
assertEqual(expected[index], result[index])
def test_sort_by_many_1(self):
people = [
{'id': 'joe', 'age': 26, 'score': 60},
{'id': 'todd', 'age': 52, 'score': 15},
{'id': 'bill', 'age': 35, 'score': 78}
]
expected = [
{'id': 'joe', 'age': 26, 'score': 60},
{'id': 'bill', 'age': 35, 'score': 78},
{'id': 'todd', 'age': 52, 'score': 15}
]
result = util.sort_by_many([('age', 'ASC')], people)
for index in range(0, len(expected)):
assertEqual(expected[index], result[index])
def test_sort_by_many_2(self):
people = [
{'id': 'joe', 'age': 26, 'score': 60},
{'id': 'todd', 'age': 52, 'score': 15},
{'id': 'joe', 'age': 26, 'score': 20},
{'id': 'bill', 'age': 35, 'score': 78},
{'id': 'todd', 'age': 52, 'score': 80}
]
expected = [
{'id': 'joe', 'age': 26, 'score': 20},
{'id': 'joe', 'age': 26, 'score': 60},
{'id': 'bill', 'age': 35, 'score': 78},
{'id': 'todd', 'age': 52, 'score': 15},
{'id': 'todd', 'age': 52, 'score': 80}
]
result = util.sort_by_many([('age', 'ASC'), ('score', 'ASC')], people)
pprint({'RESULT': result})
for index in range(0, len(expected)):
assertEqual(expected[index], result[index])
def test_sort_by_many_3(self):
people = [
{'id': 'joe', 'age': 26, 'score': 60},
{'id': 'todd', 'age': 52, 'score': 15},
{'id': 'joe', 'age': 26, 'score': 20},
{'id': 'bill', 'age': 35, 'score': 78},
{'id': 'todd', 'age': 52, 'score': 80}
]
expected = [
{'id': 'joe', 'age': 26, 'score': 60},
{'id': 'joe', 'age': 26, 'score': 20},
{'id': 'bill', 'age': 35, 'score': 78},
{'id': 'todd', 'age': 52, 'score': 80},
{'id': 'todd', 'age': 52, 'score': 15}
]
result = util.sort_by_many([('age', 'ASC'), ('score', 'DESC')], people)
pprint({'RESULT': result})
for index in range(0, len(expected)):
assertEqual(expected[index], result[index])
def test_min_mapped(self):
sequence = [
{'val': 5},
{'val': 10},
{'val': 17},
{'val': 2},
{'val': 28},
{'val': 8}
]
get_val = lambda doc: doc['val']
assertEqual({'val': 2}, util.min_mapped(get_val, sequence))
def test_max_mapped(self):
sequence = [
{'val': 5},
{'val': 10},
{'val': 17},
{'val': 2},
{'val': 28},
{'val': 8}
]
get_val = lambda doc: doc['val']
assertEqual({'val': 28}, util.max_mapped(get_val, sequence))
def test_deep_extend_pair(self):
obj = {
'x': {
'x1': {
'v1': 5,
'v2': 7
},
'nums': [1, 3, 5]
},
'a_list': [10, 20]
}
ext_with = {
'x': {
'x2': {
'x2-key': 'x2-val'
},
'x1': {
'v2': 'new-v2-val',
'v3': 'v3-val'
},
'nums': [7, 9]
},
'a_list': 'new-a-list-val'
}
expected = {
'x': {
'x2': {
'x2-key': 'x2-val'
},
'x1': {
'v1': 5,
'v2': 'new-v2-val',
'v3': 'v3-val'
},
'nums': [1, 3, 5, 7, 9]
},
'a_list': 'new-a-list-val'
}
result = util.deep_extend_pair(obj, ext_with)
assertEqual(expected, result)
class TestDictableSet(unittest.TestCase):
def test_simple(self):
x = {'x': 10}
foo = util.DictableSet([x])
self.assertTrue(foo.has(x))
y = {'y': 15}
self.assertFalse(foo.has(y))
def test_reordered_vals(self):
get_doc = lambda: {'x': [5, 10]}
foo = util.DictableSet([get_doc()])
self.assertTrue(foo.has(get_doc()))
self.assertTrue(foo.has({'x': [10, 5]}))
| scivey/mockthink | mockthink/test/unit/test_util.py | Python | mit | 10,940 |
from django.views.generic import ListView
from models import Project
# Create your views here.
class ListProjectView(ListView):
model = Project
template_name = 'cmsplugin_vfoss_project/project_list.html'
| thuydang/djagazin | wsgi/djagazin/cmsplugin_vfoss_project/views.py | Python | bsd-2-clause | 207 |
from django.contrib import admin
from bongo.apps.archive.models import *
class MultiDBModelAdmin(admin.ModelAdmin):
# A handy constant for the name of the alternate database.
using = 'archive'
def save_model(self, request, obj, form, change):
# Tell Django to save objects to the 'other' database.
obj.save(using=self.using)
def delete_model(self, request, obj):
# Tell Django to delete objects from the 'other' database
obj.delete(using=self.using)
def get_queryset(self, request):
# Tell Django to look for objects on the 'other' database.
return super(MultiDBModelAdmin, self).get_queryset(request).using(self.using)
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
# Tell Django to populate ForeignKey widgets using a query
# on the 'other' database.
return super(MultiDBModelAdmin, self).formfield_for_foreignkey(
db_field, request=request, using=self.using, **kwargs
)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
# Tell Django to populate ManyToMany widgets using a query
# on the 'other' database.
return super(MultiDBModelAdmin, self).formfield_for_manytomany(
db_field, request=request, using=self.using, **kwargs
)
# admin.site.register(Ads, MultiDBModelAdmin)
# admin.site.register(Alerts, MultiDBModelAdmin)
# admin.site.register(Article, MultiDBModelAdmin)
# admin.site.register(Articleauthor, MultiDBModelAdmin)
# admin.site.register(Articlebody, MultiDBModelAdmin)
# admin.site.register(Articletype, MultiDBModelAdmin)
# admin.site.register(Attachments, MultiDBModelAdmin)
# admin.site.register(Author, MultiDBModelAdmin)
# admin.site.register(Issue, MultiDBModelAdmin)
# admin.site.register(Job, MultiDBModelAdmin)
# admin.site.register(Links, MultiDBModelAdmin)
# admin.site.register(Photo, MultiDBModelAdmin)
# admin.site.register(Quote, MultiDBModelAdmin)
# admin.site.register(Related, MultiDBModelAdmin)
# admin.site.register(Section, MultiDBModelAdmin)
# admin.site.register(Series, MultiDBModelAdmin)
# admin.site.register(Tips, MultiDBModelAdmin)
# admin.site.register(Volume, MultiDBModelAdmin)
| BowdoinOrient/bongo | bongo/apps/archive/admin.py | Python | mit | 2,240 |
import arrayfire as af
import numpy as np
from petsc4py import PETSc
from bolt.lib.physical_system import physical_system
from bolt.lib.nonlinear_solver.nonlinear_solver \
import nonlinear_solver
import domain
import boundary_conditions
import params
import initialize
import bolt.src.nonrelativistic_boltzmann.advection_terms as advection_terms
import bolt.src.nonrelativistic_boltzmann.collision_operator \
as collision_operator
import bolt.src.nonrelativistic_boltzmann.moment_defs as moment_defs
# Defining the physical system to be solved:
system = physical_system(domain,
boundary_conditions,
params,
initialize,
advection_terms,
collision_operator.BGK,
moment_defs
)
# Declaring a linear system object which will evolve
# the defined physical system:
nls = nonlinear_solver(system)
nls.dump_moments('dump/0000')
# Time parameters:
dt = 0.0005
t_final = 5.0
time_array = np.arange(dt, t_final + dt, dt)
for time_index, t0 in enumerate(time_array):
if((time_index+1)%100 == 0):
PETSc.Sys.Print('Computing for Time =', t0)
nls.strang_timestep(dt)
nls.dump_moments('dump/%04d'%(time_index+1))
| ShyamSS-95/Bolt | example_problems/nonrelativistic_boltzmann/instabilities/KH/hydrodynamic/lecoanet_paper_setup/main.py | Python | gpl-3.0 | 1,319 |
# Standard Python packages
import math
import numbers
# Special dependencies
import numpy
class InfiniteType:
def __init__(self, multiplier=1.):
if multiplier == 0.: raise ZeroDivisionError, "Cannot multiply infinity and zero."
self._multiplier = multiplier
def __repr__(self):
if self is Infinity:
return "Infinity"
elif self is MinusInfinity:
return "-Infinity"
elif self._multiplier > 0.:
return "Infinity*%g" % self._multiplier
else:
return "-Infinity*%g" % abs(self._multiplier)
def __neg__(self):
if self is Infinity:
return MinusInfinity
elif self is MinusInfinity:
return Infinity
else:
return self * -1.
def __mul__(self, number):
if number == 0.: raise ZeroDivisionError, "Cannot multiply infinity and zero."
return InfiniteType(self._multiplier * number)
def __div__(self, number):
if isinstance(number, InfiniteType): raise ZeroDivisionError, "Cannot divide infinity and infinity."
if number == 0: raise ZeroDivisionError, "Cannot divide infinity and zero."
return InfiniteType(self._multiplier / number)
def __truediv__(self, number):
return self.__div__(number)
#: Symbol representing infinity; can be multiplied by any scalar.
Infinity = InfiniteType()
MinusInfinity = InfiniteType(-1.)
#: A small number (1e-5), used to avoid numerical round-off issues in
#: comparisons.
#:
#: The following can be used to set epsilon (without any
#: multiple-reference issues)::
#:
#: import cassius
#: cassius.epsilon = 1e-10
epsilon = 1e-5
######################################################### Utility functions
def _roundlevel_nsigfigs(num, n):
if num == 0.: return 1
return n - int(math.ceil(math.log10(abs(num))))
def str_round(num, n):
"""Round a number to n digits and return the result as a string."""
num = round(num, n)
format = "%."+str(max(n, 0))+"f"
return format % num
def round_sigfigs(num, n):
"Round a number to n significant figures."
return round(num, _roundlevel_nsigfigs(num, n))
def str_sigfigs(num, n):
"""Round a number to n significant figures and return the result as
a string."""
level = _roundlevel_nsigfigs(num, n)
num = round(num, level)
format = "%."+str(max(level, 0))+"f"
return format % num
def round_errpair(num, err, n=2):
"""Round a number and its uncertainty to n significant figures in
the uncertainty (default is two)."""
level = _roundlevel_nsigfigs(err, n)
return round(num, level), round(err, level)
def str_errpair(num, err, n=2):
"""Round a number and its uncertainty to n significant figures in the
uncertainty (default is two) and return the result as a string."""
level = _roundlevel_nsigfigs(err, n)
num = round(num, level)
err = round(err, level)
format = "%."+str(max(level, 0))+"f"
return format % num, format % err
def unicode_errpair(num, err, n=2):
"""Round a number and its uncertainty to n significant figures in the
uncertainty (default is two) and return the result joined by a unicode
plus-minus sign."""
return u"\u00b1".join(str_errpair(num, err, n))
def mean(*values, **kwds):
"""Compute the mean of N values (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
decimals = kwds.get("decimals", None)
sigfigs = kwds.get("sigfigs", None)
string = kwds.get("string", False)
if len(values) == 1 and not isinstance(values[0], (numbers.Number, numpy.number)):
values = values[0]
sum_1 = 0.
sum_y = 0.
for y in values:
if not isinstance(y, (numbers.Number, numpy.number)):
raise ValueError, "mean() requires a list of numbers"
sum_1 += 1.
sum_y += y
if sum_1 != 0.:
output = sum_y / sum_1
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
else:
raise ValueError, "Cannot take the mean without any values"
def wmean(values, weights, decimals=None, sigfigs=None, string=False):
"""Compute the weighted mean of N values with N weights (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
sum_1 = 0.
sum_y = 0.
for y, weight in itertools.izip(values, weights):
if not isinstance(y, (numbers.Number, numpy.number)) or not isinstance(weight, (numbers.Number, numpy.number)):
raise ValueError, "wmean() requires lists of numbers"
sum_1 += weight
sum_y += weight * y
if sum_1 != 0.:
outputval, outputerr = sum_y / sum_1, math.sqrt(1. / sum_1)
if decimals is not None:
if string:
return str_round(outputval, decimals), str_round(outputerr, decimals)
else:
return round(outputval, decimals), round(outputerr, decimals)
elif sigfigs is not None:
if string:
return str_errpair(outputval, outputerr, sigfigs)
else:
return round_errpair(outputval, outputerr, sigfigs)
else:
if string:
return str(outputval), str(outputerr)
else:
return outputval, outputerr
else:
raise ValueError, "Cannot take the weighted mean without any values"
def linearfit(xvalues, yvalues, weights=None, decimals=None, sigfigs=None, string=False):
"""Compute a linear fit of N x-y pairs with weights (N > 0).
Keyword arguments:
weights (list of numbers or `None`): if `None`, weight all
points equally.
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
if weights is None:
weights = numpy.ones(min(len(xvalues), len(yvalues)), dtype=numpy.float)
sum_1 = 0.
sum_x = 0.
sum_xx = 0.
sum_y = 0.
sum_xy = 0.
for x, y, weight in itertools.izip(xvalues, yvalues, weights):
if not isinstance(x, (numbers.Number, numpy.number)) or not isinstance(y, (numbers.Number, numpy.number)) or not isinstance(weight, (numbers.Number, numpy.number)):
raise ValueError, "linearfit() requires lists of numbers"
sum_1 += weight
sum_x += weight * x
sum_xx += weight * x**2
sum_y += weight * y
sum_xy += weight * x * y
delta = (sum_1 * sum_xx) - (sum_x * sum_x)
if delta != 0.:
intercept = ((sum_xx * sum_y) - (sum_x * sum_xy)) / delta
intercept_err = math.sqrt(sum_xx / delta)
slope = ((sum_1 * sum_xy) - (sum_x * sum_y)) / delta
slope_err = math.sqrt(sum_1 / delta)
if decimals is not None:
if string:
intercept, intercept_err = str_round(intercept, decimals), str_round(intercept_err, decimals)
slope, slope_err = str_round(slope, decimals), str_round(slope_err, decimals)
else:
intercept, intercept_err = round(intercept, decimals), round(intercept_err, decimals)
slope, slope_err = round(slope, decimals), round(slope_err, decimals)
elif sigfigs is not None:
if string:
intercept, intercept_err = str_errpair(intercept, intercept_err, sigfigs)
slope, slope_err = str_errpair(slope, slope_err, sigfigs)
else:
intercept, intercept_err = round_errpair(intercept, intercept_err, sigfigs)
slope, slope_err = round_errpair(slope, slope_err, sigfigs)
elif string:
intercept, intercept_err = str(intercept), str(intercept_err)
slope, slope_err = str(slope), str(slope_err)
return intercept, intercept_err, slope, slope_err
else:
raise ValueError, "Cannot take a linear fit without any values"
def rms(*values, **kwds):
"""Compute the root-mean-square of N values (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
decimals = kwds.get("decimals", None)
sigfigs = kwds.get("sigfigs", None)
string = kwds.get("string", False)
if len(values) == 1 and not isinstance(values[0], (numbers.Number, numpy.number)):
values = values[0]
sum_1 = 0.
sum_yy = 0.
for y in values:
if not isinstance(y, (numbers.Number, numpy.number)):
raise ValueError, "rms() requires a list of numbers"
sum_1 += 1.
sum_yy += y**2
if sum_1 != 0.:
output = math.sqrt(sum_yy / sum_1)
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
else:
raise ValueError, "Cannot take the RMS with fewer than one unique value"
def stdev(*values, **kwds):
"""Compute the standard deviation of N values (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
decimals = kwds.get("decimals", None)
sigfigs = kwds.get("sigfigs", None)
string = kwds.get("string", False)
if len(values) == 1 and not isinstance(values[0], (numbers.Number, numpy.number)):
values = values[0]
sum_1 = 0.
sum_y = 0.
sum_yy = 0.
for y in values:
if not isinstance(y, (numbers.Number, numpy.number)):
raise ValueError, "stdev() requires a list of numbers"
sum_1 += 1.
sum_y += y
sum_yy += y**2
if sum_1 != 0. and (sum_yy / sum_1) > (sum_y / sum_1)**2:
output = math.sqrt((sum_yy / sum_1) - (sum_y / sum_1)**2)
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
else:
raise ValueError, "Cannot take the stdev with fewer than one unique value"
def covariance(xvalues, yvalues, decimals=None, sigfigs=None, string=False):
"""Compute the covariance of N x-y pairs (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
xmean = mean(*xvalues)
ymean = mean(*yvalues)
sum_1 = 0.
sum_xy = 0.
for x, y in itertools.izip(xvalues, yvalues):
sum_1 += 1.
sum_xy += (x - xmean) * (y - ymean)
output = sum_xy / sum_1
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
def correlation(xvalues, yvalues, decimals=None, sigfigs=None, string=False):
"""Compute the correlation of N x-y pairs (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
xmean = mean(xvalues)
ymean = mean(yvalues)
sum_xx = 0.
sum_yy = 0.
sum_xy = 0.
for x, y in itertools.izip(xvalues, yvalues):
sum_xx += (x - xmean)**2
sum_yy += (y - ymean)**2
sum_xy += (x - xmean) * (y - ymean)
if sum_xx + sum_yy != 0.:
output = sum_xy / math.sqrt(sum_xx + sum_yy)
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
else:
raise ValueError, "Cannot take the correlation without any values"
def ubiquitous(array):
"""Return the most ubiquitous (most frequent) member of a list."""
if isinstance(array, numpy.ndarray):
keys = numpy.unique(array)
maximal = None
for k in keys:
this = len(array[array == k])
if maximal is None or this > maximal:
maximal_key = k
maximal = this
if maximal is not None:
return maximal_key
else:
return None
else:
keys = set(array)
maximal = None
for k in keys:
this = len(array.count(k))
if maximal is None or this > maximal:
maximal_key = k
maximal = this
if maximal is not None:
return maximal_key
else:
return None
def erf(x):
"""Return the error function of x.
(For complex erf, get SciPy and load scipy.special)
"""
# http://stackoverflow.com/questions/457408/is-there-an-easily-available-implementation-of-erf-for-python
sign = 1
if x < 0:
sign = -1
x = abs(x)
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# http://www.amazon.com/dp/0486612724/?tag=stackoverfl08-20 formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def erfc(x):
"""Return 1 minus the error function of x.
(For complex erfc, get SciPy and load scipy.special)
"""
return 1. - erf(x)
def gaussian_likelihood(f, x, y, ey):
"""Gaussian likelihood function usable in Curve.objective and Curve.fit.
Expression:
(f - y)**2 / ey**2 or 0 if ey == 0
where f is the value of the curve at x, y is the data, and ey
is the uncertainty in the data (one Gaussian sigma).
"""
return ((f - y)**2/ey**2 if ey != 0. else 0.)
def poisson_likelihood(f, x, y):
"""Poisson likelihood function usable in Curve.objective and Curve.fit.
Expression:
-2 * (y * log(f) - f - log(y!))
where f is the value of the curve at x and y is the data
(usually an integer, like a histogram bin value).
Considerations:
Note the factor of 2! Not all texts include this factor. With
the factor of 2, this Poisson likelihood can be used
interchangeably with a Gaussian likelihood (i.e. chi^2):
uncertainty in a best fit value is the distance you need to
walk to raise this objective function by 1.0, just like the
Gaussian likelihood (not 0.5!).
"""
# try:
# return -2.*(y*math.log(f) - f - math.log(math.factorial(y)))
# except ValueError:
# return -2.*(y*math.log(1e-10) - 1e-10 - math.log(math.factorial(y)))
### much better:
try:
return -2.*(y*math.log(f) - f - sum(map(math.log, xrange(1, y+1))))
except ValueError:
# note: if f == 0., then any non-zero y is impossible
# is it right to give it a small value? something to think about...
return -2.*(y*math.log(1e-10) - 1e-10 - sum(map(math.log, xrange(1, y+1))))
| opendatagroup/cassius | tags/cassius-0_1_0_0/cassius/mathtools.py | Python | apache-2.0 | 17,665 |
#---------------------------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
# Prof. Jucimar Jr
#
# Adham Lucas da Silva Oliveira 1715310059
# Alexandre Marques Uchôa 1715310028
# André Luís Laborda Neves 1515070006
# Carlos Eduardo Tapudima de Oliveira 1715310030
# Aracille de Souza Barbosa 1315120206
# Dayana Picanço Marquez 1715310058
#
# Faça um programa que receba o valor de uma dívida e mostre uma tabela com
# os seguintes dados: valor da dívida, valor dos juros, quantidade de parcelas e valor da parcela.
# Os juros e a quantidade de parcelas seguem a tabela abaixo:
# Quantidade de Parcelas % de Juros sobre o valor inicial da dívida
# 1 0
# 3 10
# 6 15
# 9 20
# 12 25
#Exemplo de saída do programa:
#Valor da Dívida Valor dos Juros Quantidade de Parcelas Valor da Parcela
#R$ 1.000,00 0 1 R$ 1.000,00
#R$ 1.100,00 100 3 R$ 366,00
#R$ 1.150,00 150 6 R$ 191,67
#----------------------------------------------------------------------------
divida = float(input("Digite o valor da sua divida: "))
c = 1
juros = 0
porcentagem_juros = 0
parcelas = 1
valor_parcelas = 0
divida_com_juros = 0
print( "Valor da Dívida | Quantidade de Parcelas | Valor dos Juros | Valor da Parcela ")
print("-"*130)
while c <= 5:
if c == 1:
valor_parcelas = divida
print("Valor da divida =%.2f"%divida, " | ", "Valor dos juros =", juros, " | ",
"Quantidades de parcelas =", parcelas, " | ", "Valor das parcelas =%.2f"%valor_parcelas)
parcelas = 3
porcentagem_juros = 100
else:
juros = porcentagem_juros
valor_parcelas = (divida + juros)/parcelas
divida_com_juros = divida + juros
print("Valor da divida =%.2f"%divida_com_juros, " | ", "Valor dos juros =",juros, " | ",
"Quantidades de parcelas =",parcelas, " | ", "Valor das parcelas =%.2f"%valor_parcelas)
parcelas += 3
porcentagem_juros += 50
c += 1
| jucimarjr/IPC_2017-1 | lista05/lista05_lista01_questao41.py | Python | apache-2.0 | 2,362 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Rackspace driver
"""
import os
import base64
import urlparse
from xml.etree import ElementTree as ET
from xml.parsers.expat import ExpatError
from libcloud.common.base import Response
from libcloud.common.types import InvalidCredsError, MalformedResponseError
from libcloud.compute.types import NodeState, Provider
from libcloud.compute.base import NodeDriver, Node
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
from libcloud.common.rackspace import AUTH_HOST_US, AUTH_HOST_UK, RackspaceBaseConnection
NAMESPACE='http://docs.rackspacecloud.com/servers/api/v1.0'
#
# Prices need to be hardcoded as Rackspace doesn't expose them through
# the API. Prices are associated with flavors, of which there are 7.
# See - http://www.rackspacecloud.com/cloud_hosting_products/servers/pricing
#
RACKSPACE_PRICES = {
'1':'.015',
'2':'.030',
'3':'.060',
'4':'.120',
'5':'.240',
'6':'.480',
'7':'.960',
}
class RackspaceResponse(Response):
def success(self):
i = int(self.status)
return i >= 200 and i <= 299
def parse_body(self):
if not self.body:
return None
try:
body = ET.XML(self.body)
except:
raise MalformedResponseError("Failed to parse XML", body=self.body, driver=RackspaceNodeDriver)
return body
def parse_error(self):
# TODO: fixup, Rackspace only uses response codes really!
try:
body = ET.XML(self.body)
except:
raise MalformedResponseError("Failed to parse XML", body=self.body, driver=RackspaceNodeDriver)
try:
text = "; ".join([ err.text or ''
for err in
body.getiterator()
if err.text])
except ExpatError:
text = self.body
return '%s %s %s' % (self.status, self.error, text)
class RackspaceConnection(RackspaceBaseConnection):
"""
Connection class for the Rackspace driver
"""
responseCls = RackspaceResponse
auth_host = AUTH_HOST_US
_url_key = "server_url"
def __init__(self, user_id, key, secure=True):
super(RackspaceConnection, self).__init__(user_id, key, secure)
self.api_version = 'v1.0'
self.accept_format = 'application/xml'
def request(self, action, params=None, data='', headers=None, method='GET'):
if not headers:
headers = {}
if not params:
params = {}
# Due to first-run authentication request, we may not have a path
if self.server_url:
action = self.server_url + action
if method in ("POST", "PUT"):
headers = {'Content-Type': 'application/xml; charset=UTF-8'}
if method == "GET":
params['cache-busting'] = os.urandom(8).encode('hex')
return super(RackspaceConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers
)
class RackspaceSharedIpGroup(object):
"""
Shared IP group info.
"""
def __init__(self, id, name, servers=None):
self.id = str(id)
self.name = name
self.servers = servers
class RackspaceNodeIpAddresses(object):
"""
List of public and private IP addresses of a Node.
"""
def __init__(self, public_addresses, private_addresses):
self.public_addresses = public_addresses
self.private_addresses = private_addresses
class RackspaceNodeDriver(NodeDriver):
"""
Rackspace node driver.
Extra node attributes:
- password: root password, available after create.
- hostId: represents the host your cloud server runs on
- imageId: id of image
- flavorId: id of flavor
"""
connectionCls = RackspaceConnection
type = Provider.RACKSPACE
name = 'Rackspace'
_rackspace_prices = RACKSPACE_PRICES
features = {"create_node": ["generates_password"]}
NODE_STATE_MAP = { 'BUILD': NodeState.PENDING,
'REBUILD': NodeState.PENDING,
'ACTIVE': NodeState.RUNNING,
'SUSPENDED': NodeState.TERMINATED,
'QUEUE_RESIZE': NodeState.PENDING,
'PREP_RESIZE': NodeState.PENDING,
'VERIFY_RESIZE': NodeState.RUNNING,
'PASSWORD': NodeState.PENDING,
'RESCUE': NodeState.PENDING,
'REBUILD': NodeState.PENDING,
'REBOOT': NodeState.REBOOTING,
'HARD_REBOOT': NodeState.REBOOTING,
'SHARE_IP': NodeState.PENDING,
'SHARE_IP_NO_CONFIG': NodeState.PENDING,
'DELETE_IP': NodeState.PENDING,
'UNKNOWN': NodeState.UNKNOWN}
def list_nodes(self):
return self._to_nodes(self.connection.request('/servers/detail').object)
def list_sizes(self, location=None):
return self._to_sizes(self.connection.request('/flavors/detail').object)
def list_images(self, location=None):
return self._to_images(self.connection.request('/images/detail').object)
def list_locations(self):
"""Lists available locations
Locations cannot be set or retrieved via the API, but currently
there are two locations, DFW and ORD.
"""
return [NodeLocation(0, "Rackspace DFW1/ORD1", 'US', self)]
def _change_password_or_name(self, node, name=None, password=None):
uri = '/servers/%s' % (node.id)
if not name:
name = node.name
body = { 'xmlns': NAMESPACE,
'name': name}
if password != None:
body['adminPass'] = password
server_elm = ET.Element('server', body)
resp = self.connection.request(uri, method='PUT', data=ET.tostring(server_elm))
if resp.status == 204 and password != None:
node.extra['password'] = password
return resp.status == 204
def ex_set_password(self, node, password):
"""
Sets the Node's root password.
This will reboot the instance to complete the operation.
L{node.extra['password']} will be set to the new value if the
operation was successful.
"""
return self._change_password_or_name(node, password=password)
def ex_set_server_name(self, node, name):
"""
Sets the Node's name.
This will reboot the instance to complete the operation.
"""
return self._change_password_or_name(node, name=name)
def create_node(self, **kwargs):
"""Create a new rackspace node
See L{NodeDriver.create_node} for more keyword args.
@keyword ex_metadata: Key/Value metadata to associate with a node
@type ex_metadata: C{dict}
@keyword ex_files: File Path => File contents to create on the node
@type ex_files: C{dict}
"""
name = kwargs['name']
image = kwargs['image']
size = kwargs['size']
server_elm = ET.Element(
'server',
{'xmlns': NAMESPACE,
'name': name,
'imageId': str(image.id),
'flavorId': str(size.id)}
)
metadata_elm = self._metadata_to_xml(kwargs.get("ex_metadata", {}))
if metadata_elm:
server_elm.append(metadata_elm)
files_elm = self._files_to_xml(kwargs.get("ex_files", {}))
if files_elm:
server_elm.append(files_elm)
shared_ip_elm = self._shared_ip_group_to_xml(kwargs.get("ex_shared_ip_group", None))
if shared_ip_elm:
server_elm.append(shared_ip_elm)
resp = self.connection.request("/servers",
method='POST',
data=ET.tostring(server_elm))
return self._to_node(resp.object)
def ex_rebuild(self, node_id, image_id):
elm = ET.Element(
'rebuild',
{'xmlns': NAMESPACE,
'imageId': image_id,
}
)
resp = self.connection.request("/servers/%s/action" % node_id,
method='POST',
data=ET.tostring(elm))
return resp.status == 202
def ex_create_ip_group(self, group_name, node_id=None):
group_elm = ET.Element(
'sharedIpGroup',
{'xmlns': NAMESPACE,
'name': group_name,
}
)
if node_id:
ET.SubElement(group_elm,
'server',
{'id': node_id}
)
resp = self.connection.request('/shared_ip_groups',
method='POST',
data=ET.tostring(group_elm))
return self._to_shared_ip_group(resp.object)
def ex_list_ip_groups(self, details=False):
uri = '/shared_ip_groups/detail' if details else '/shared_ip_groups'
resp = self.connection.request(uri,
method='GET')
groups = self._findall(resp.object, 'sharedIpGroup')
return [self._to_shared_ip_group(el) for el in groups]
def ex_delete_ip_group(self, group_id):
uri = '/shared_ip_groups/%s' % group_id
resp = self.connection.request(uri, method='DELETE')
return resp.status == 204
def ex_share_ip(self, group_id, node_id, ip, configure_node=True):
if configure_node:
str_configure = 'true'
else:
str_configure = 'false'
elm = ET.Element(
'shareIp',
{'xmlns': NAMESPACE,
'sharedIpGroupId' : group_id,
'configureServer' : str_configure}
)
uri = '/servers/%s/ips/public/%s' % (node_id, ip)
resp = self.connection.request(uri,
method='PUT',
data=ET.tostring(elm))
return resp.status == 202
def ex_unshare_ip(self, node_id, ip):
uri = '/servers/%s/ips/public/%s' % (node_id, ip)
resp = self.connection.request(uri,
method='DELETE')
return resp.status == 202
def ex_list_ip_addresses(self, node_id):
uri = '/servers/%s/ips' % node_id
resp = self.connection.request(uri,
method='GET')
return self._to_ip_addresses(resp.object)
def _metadata_to_xml(self, metadata):
if len(metadata) == 0:
return None
metadata_elm = ET.Element('metadata')
for k, v in metadata.items():
meta_elm = ET.SubElement(metadata_elm, 'meta', {'key': str(k) })
meta_elm.text = str(v)
return metadata_elm
def _files_to_xml(self, files):
if len(files) == 0:
return None
personality_elm = ET.Element('personality')
for k, v in files.items():
file_elm = ET.SubElement(personality_elm,
'file',
{'path': str(k)})
file_elm.text = base64.b64encode(v)
return personality_elm
def _reboot_node(self, node, reboot_type='SOFT'):
resp = self._node_action(node, ['reboot', ('type', reboot_type)])
return resp.status == 202
def ex_soft_reboot_node(self, node):
return self._reboot_node(node, reboot_type='SOFT')
def ex_hard_reboot_node(self, node):
return self._reboot_node(node, reboot_type='HARD')
def reboot_node(self, node):
return self._reboot_node(node, reboot_type='HARD')
def destroy_node(self, node):
uri = '/servers/%s' % (node.id)
resp = self.connection.request(uri, method='DELETE')
return resp.status == 202
def ex_get_node_details(self, node_id):
uri = '/servers/%s' % (node_id)
resp = self.connection.request(uri, method='GET')
if resp.status == 404:
return None
return self._to_node(resp.object)
def _node_action(self, node, body):
if isinstance(body, list):
attr = ' '.join(['%s="%s"' % (item[0], item[1])
for item in body[1:]])
body = '<%s xmlns="%s" %s/>' % (body[0], NAMESPACE, attr)
uri = '/servers/%s/action' % (node.id)
resp = self.connection.request(uri, method='POST', data=body)
return resp
def _to_nodes(self, object):
node_elements = self._findall(object, 'server')
return [ self._to_node(el) for el in node_elements ]
def _fixxpath(self, xpath):
# ElementTree wants namespaces in its xpaths, so here we add them.
return "/".join(["{%s}%s" % (NAMESPACE, e) for e in xpath.split("/")])
def _findall(self, element, xpath):
return element.findall(self._fixxpath(xpath))
def _to_node(self, el):
def get_ips(el):
return [ip.get('addr') for ip in el]
def get_meta_dict(el):
d = {}
for meta in el:
d[meta.get('key')] = meta.text
return d
public_ip = get_ips(self._findall(el,
'addresses/public/ip'))
private_ip = get_ips(self._findall(el,
'addresses/private/ip'))
metadata = get_meta_dict(self._findall(el, 'metadata/meta'))
n = Node(id=el.get('id'),
name=el.get('name'),
state=self.NODE_STATE_MAP.get(el.get('status'), NodeState.UNKNOWN),
public_ip=public_ip,
private_ip=private_ip,
driver=self.connection.driver,
extra={
'password': el.get('adminPass'),
'hostId': el.get('hostId'),
'imageId': el.get('imageId'),
'flavorId': el.get('flavorId'),
'uri': "https://%s%s/servers/%s" % (self.connection.host, self.connection.request_path, el.get('id')),
'metadata': metadata,
})
return n
def _to_sizes(self, object):
elements = self._findall(object, 'flavor')
return [ self._to_size(el) for el in elements ]
def _to_size(self, el):
s = NodeSize(id=el.get('id'),
name=el.get('name'),
ram=int(el.get('ram')),
disk=int(el.get('disk')),
bandwidth=None, # XXX: needs hardcode
price=self._rackspace_prices.get(el.get('id')), # Hardcoded,
driver=self.connection.driver)
return s
def _to_images(self, object):
elements = self._findall(object, "image")
return [ self._to_image(el)
for el in elements
if el.get('status') == 'ACTIVE' ]
def _to_image(self, el):
i = NodeImage(id=el.get('id'),
name=el.get('name'),
driver=self.connection.driver,
extra={'serverId': el.get('serverId')})
return i
def ex_limits(self):
"""
Extra call to get account's limits, such as
rates (for example amount of POST requests per day)
and absolute limits like total amount of available
RAM to be used by servers.
@return: C{dict} with keys 'rate' and 'absolute'
"""
def _to_rate(el):
rate = {}
for item in el.items():
rate[item[0]] = item[1]
return rate
def _to_absolute(el):
return {el.get('name'): el.get('value')}
limits = self.connection.request("/limits").object
rate = [ _to_rate(el) for el in self._findall(limits, 'rate/limit') ]
absolute = {}
for item in self._findall(limits, 'absolute/limit'):
absolute.update(_to_absolute(item))
return {"rate": rate, "absolute": absolute}
def ex_save_image(self, node, name):
"""Create an image for node.
@keyword node: node to use as a base for image
@param node: L{Node}
@keyword name: name for new image
@param name: C{string}
"""
image_elm = ET.Element(
'image',
{'xmlns': NAMESPACE,
'name': name,
'serverId': node.id}
)
return self._to_image(self.connection.request("/images",
method="POST",
data=ET.tostring(image_elm)).object)
def _to_shared_ip_group(self, el):
servers_el = self._findall(el, 'servers')
if servers_el:
servers = [s.get('id') for s in self._findall(servers_el[0], 'server')]
else:
servers = None
return RackspaceSharedIpGroup(id=el.get('id'),
name=el.get('name'),
servers=servers)
def _to_ip_addresses(self, el):
return RackspaceNodeIpAddresses(
[ip.get('addr') for ip in self._findall(self._findall(el, 'public')[0], 'ip')],
[ip.get('addr') for ip in self._findall(self._findall(el, 'private')[0], 'ip')]
)
def _shared_ip_group_to_xml(self, shared_ip_group):
if not shared_ip_group:
return None
return ET.Element('sharedIpGroupId', shared_ip_group)
class RackspaceUKConnection(RackspaceConnection):
"""
Connection class for the Rackspace UK driver
"""
auth_host = AUTH_HOST_UK
class RackspaceUKNodeDriver(RackspaceNodeDriver):
"""Driver for Rackspace in the UK (London)
"""
name = 'Rackspace (UK)'
connectionCls = RackspaceUKConnection
def list_locations(self):
return [NodeLocation(0, 'Rackspace UK London', 'UK', self)]
| cloudkick/libcloud | libcloud/compute/drivers/rackspace.py | Python | apache-2.0 | 18,878 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
import brainstorm as bs
import numpy as np
from PIL import Image
network = bs.Network.from_hdf5('mnist_pi_best.hdf5')
image = Image.open("test_4.jpg")
data = np.array(image).reshape(
image.size[0], image.size[1], 3).dot(
[0.2, 0.7, 0.1]).reshape(
image.size[0], image.size[1], 1) / 255
network.provide_external_data(
{'default': np.array([[data]])},
all_inputs=False)
network.forward_pass(training_pass=False)
classification = network.get('Output.outputs.predictions')[0][0]
print(np.argmax(classification))
print(classification)
| pinae/MNIST-Brainstorm | classify.py | Python | gpl-3.0 | 681 |
import random
import collections
import functools
from hearthbreaker.game_objects import Hero
class memoized(object):
'''Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
'''
def __init__(self, func):
self.func = func
def __call__(self, *args):
return self.func(*args)
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
class Util:
@staticmethod
def reverse_sorted(list):
res = sorted(list)
res.reverse()
return res
@staticmethod
def uniq_by_sorted(list):
res = {}
for obj in list:
a = [c.name for c in obj]
k = str.join("", sorted(a))
if not res.get(k):
res[k] = obj
return res.values()
@staticmethod
def rand_el(list):
i = random.randint(0, len(list) - 1)
return list[i]
@staticmethod
def rand_prefer_minion(targets):
minions = [card for card in filter(lambda c: not isinstance(c, Hero), targets)]
if len(minions) > 0:
targets = minions
return Util.rand_el(targets)
@staticmethod
def filter_out_one(arr, f):
res = [obj for obj in filter(lambda x: not f(x), arr)]
if len(res) + 1 != len(arr):
s = "bad remove, list has {} elements, removed {}, {}"
raise Exception(s.format(len(arr), len(arr) - len(res), arr))
return res
@staticmethod
def names(arr):
res = []
for obj in arr:
if hasattr(obj, "name"):
res.append(obj.name)
else:
res.append("UNK")
return res
| anuragpapineni/Hearthbreaker-evolved-agent | hearthbreaker/agents/trade/util.py | Python | mit | 1,929 |
"""
This file contains the generic, assorted views that don't fall under one of
the other applications. Views are django's way of processing e.g. html
templates on the fly.
"""
from django.contrib.admin.sites import site
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import render
from evennia import SESSION_HANDLER
from evennia.objects.models import ObjectDB
from evennia.players.models import PlayerDB
from evennia.utils import logger
from django.contrib.auth import login
_BASE_CHAR_TYPECLASS = settings.BASE_CHARACTER_TYPECLASS
def _shared_login(request):
"""
Handle the shared login between website and webclient.
"""
csession = request.session
player = request.user
sesslogin = csession.get("logged_in", None)
if csession.session_key is None:
# this is necessary to build the sessid key
csession.save()
elif player.is_authenticated():
if not sesslogin:
csession["logged_in"] = player.id
elif sesslogin:
# The webclient has previously registered a login to this csession
player = PlayerDB.objects.get(id=sesslogin)
try:
login(request, player)
except AttributeError:
logger.log_trace()
def _gamestats():
# Some misc. configurable stuff.
# TODO: Move this to either SQL or settings.py based configuration.
fpage_player_limit = 4
# A QuerySet of the most recently connected players.
recent_users = PlayerDB.objects.get_recently_connected_players()[:fpage_player_limit]
nplyrs_conn_recent = len(recent_users)
nplyrs = PlayerDB.objects.num_total_players()
nplyrs_reg_recent = len(PlayerDB.objects.get_recently_created_players())
nsess = SESSION_HANDLER.player_count()
# nsess = len(PlayerDB.objects.get_connected_players()) or "no one"
nobjs = ObjectDB.objects.all().count()
nrooms = ObjectDB.objects.filter(db_location__isnull=True).exclude(db_typeclass_path=_BASE_CHAR_TYPECLASS).count()
nexits = ObjectDB.objects.filter(db_location__isnull=False, db_destination__isnull=False).count()
nchars = ObjectDB.objects.filter(db_typeclass_path=_BASE_CHAR_TYPECLASS).count()
nothers = nobjs - nrooms - nchars - nexits
pagevars = {
"page_title": "Front Page",
"players_connected_recent": recent_users,
"num_players_connected": nsess,
"num_players_registered": nplyrs,
"num_players_connected_recent": nplyrs_conn_recent,
"num_players_registered_recent": nplyrs_reg_recent,
"num_rooms": nrooms,
"num_exits": nexits,
"num_objects": nobjs,
"num_characters": nchars,
"num_others": nothers
}
return pagevars
def page_index(request):
"""
Main root page.
"""
# handle webclient-website shared login
_shared_login(request)
# get game db stats
pagevars = _gamestats()
return render(request, 'index.html', pagevars)
def to_be_implemented(request):
"""
A notice letting the user know that this particular feature hasn't been
implemented yet.
"""
pagevars = {
"page_title": "To Be Implemented...",
}
return render(request, 'tbi.html', pagevars)
@staff_member_required
def evennia_admin(request):
"""
Helpful Evennia-specific admin page.
"""
return render(
request, 'evennia_admin.html', {
'playerdb': PlayerDB})
def admin_wrapper(request):
"""
Wrapper that allows us to properly use the base Django admin site, if needed.
"""
return staff_member_required(site.index)(request)
| MarsZone/DreamLand | muddery/web/website/views.py | Python | bsd-3-clause | 3,673 |
cases = [
('pmt.py -s 1 -n 20 populations, first without state filter',
'pmt.py -s 1 -n 20 populations'),
('pmt.py -s 2 -n 20 populations filter3, state filter limits population to 3',
'pmt.py -s 2 -n 20 populations filter3')
]
| nfredrik/pyModelStuff | samples/populations/test/test_filter.py | Python | bsd-3-clause | 247 |
__author__ = 'oglebrandon'
| CarterBain/AlephNull | alephnull/live/__init__.py | Python | apache-2.0 | 27 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cronie(AutotoolsPackage):
"""Cronie contains the standard UNIX daemon crond that runs specified
programs at scheduled times and related tools."""
homepage = "https://github.com/cronie-crond/cronie"
url = "https://github.com/cronie-crond/cronie/archive/cronie-1.5.5.tar.gz"
version('1.5.5', sha256='22c2a2b22577c0f776c1268d0e0f305c5c041e10155022a345b43b665da0ffe9')
def autoreconf(self, spec, prefix):
bash = which('bash')
bash('./autogen.sh')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.sbin)
| iulian787/spack | var/spack/repos/builtin/packages/cronie/package.py | Python | lgpl-2.1 | 811 |
from cached_property import cached_property
import sympy
from devito.ir.equations.algorithms import dimension_sort, lower_exprs
from devito.finite_differences.differentiable import diff2sympy
from devito.ir.support import (IterationSpace, DataSpace, Interval, IntervalGroup,
Stencil, detect_accesses, detect_oobs, detect_io,
build_intervals, build_iterators)
from devito.symbolics import CondEq, IntDiv, uxreplace
from devito.tools import Pickable, frozendict
from devito.types import Eq
__all__ = ['LoweredEq', 'ClusterizedEq', 'DummyEq']
class IREq(sympy.Eq):
_state = ('is_Increment', 'ispace', 'dspace', 'conditionals', 'implicit_dims')
@property
def is_Scalar(self):
return self.lhs.is_Symbol
is_scalar = is_Scalar
@property
def is_Tensor(self):
return self.lhs.is_Indexed
@property
def is_Increment(self):
return self._is_Increment
@property
def ispace(self):
return self._ispace
@property
def dspace(self):
return self._dspace
@cached_property
def dimensions(self):
# Note: some dimensions may be in the iteration space but not in the
# data space (e.g., a DerivedDimension); likewise, some dimensions may
# be in the data space but not in the iteration space (e.g., when a
# function is indexed with integers only)
return set(self.dspace.dimensions) | set(self.ispace.dimensions)
@property
def implicit_dims(self):
return self._implicit_dims
@cached_property
def conditionals(self):
return self._conditionals or frozendict()
@property
def directions(self):
return self.ispace.directions
@property
def dtype(self):
return self.lhs.dtype
@cached_property
def grid(self):
grids = set()
for f in self.dspace.parts:
if f.is_DiscreteFunction:
grids.add(f.grid)
if len(grids) == 1:
return grids.pop()
else:
return None
@property
def state(self):
return {i: getattr(self, i) for i in self._state}
def apply(self, func):
"""
Apply a callable to `self` and each expr-like attribute carried by `self`,
thus triggering a reconstruction.
"""
args = [func(self.lhs), func(self.rhs)]
kwargs = dict(self.state)
kwargs['conditionals'] = {k: func(v) for k, v in self.conditionals.items()}
return self.func(*args, **kwargs)
class LoweredEq(IREq):
"""
LoweredEq(devito.Eq)
LoweredEq(devito.LoweredEq, **kwargs)
LoweredEq(lhs, rhs, **kwargs)
A SymPy equation with associated IterationSpace and DataSpace.
When created as ``LoweredEq(devito.Eq)``, the iteration and data spaces are
automatically derived from analysis of ``expr``.
When created as ``LoweredEq(devito.LoweredEq, **kwargs)``, the keyword
arguments can be anything that appears in ``LoweredEq._state`` (i.e.,
ispace, dspace, ...).
When created as ``LoweredEq(lhs, rhs, **kwargs)``, *all* keywords in
``LoweredEq._state`` must appear in ``kwargs``.
"""
_state = IREq._state + ('reads', 'writes')
def __new__(cls, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], LoweredEq):
# origin: LoweredEq(devito.LoweredEq, **kwargs)
input_expr = args[0]
expr = sympy.Eq.__new__(cls, *input_expr.args, evaluate=False)
for i in cls._state:
setattr(expr, '_%s' % i, kwargs.get(i) or getattr(input_expr, i))
return expr
elif len(args) == 1 and isinstance(args[0], Eq):
# origin: LoweredEq(devito.Eq)
input_expr = expr = args[0]
elif len(args) == 2:
expr = sympy.Eq.__new__(cls, *args, evaluate=False)
for i in cls._state:
setattr(expr, '_%s' % i, kwargs.pop(i))
return expr
else:
raise ValueError("Cannot construct LoweredEq from args=%s "
"and kwargs=%s" % (str(args), str(kwargs)))
# Well-defined dimension ordering
ordering = dimension_sort(expr)
# Analyze the expression
mapper = detect_accesses(expr)
oobs = detect_oobs(mapper)
conditional_dimensions = [i for i in ordering if i.is_Conditional]
# Construct Intervals for IterationSpace and DataSpace
intervals = build_intervals(Stencil.union(*mapper.values()))
iintervals = [] # iteration Intervals
dintervals = [] # data Intervals
for i in intervals:
d = i.dim
if d in oobs:
iintervals.append(i.zero())
dintervals.append(i)
else:
iintervals.append(i.zero())
dintervals.append(i.zero())
# Construct the IterationSpace
iintervals = IntervalGroup(iintervals, relations=ordering.relations)
iterators = build_iterators(mapper)
ispace = IterationSpace(iintervals, iterators)
# Construct the DataSpace
dintervals.extend([Interval(i, 0, 0) for i in ordering
if i not in ispace.dimensions + conditional_dimensions])
parts = {k: IntervalGroup(build_intervals(v)).add(iintervals)
for k, v in mapper.items() if k}
dspace = DataSpace(dintervals, parts)
# Construct the conditionals and replace the ConditionalDimensions in `expr`
conditionals = {}
for d in conditional_dimensions:
if d.condition is None:
conditionals[d] = CondEq(d.parent % d.factor, 0)
else:
conditionals[d] = diff2sympy(lower_exprs(d.condition))
if d.factor is not None:
expr = uxreplace(expr, {d: IntDiv(d.index, d.factor)})
conditionals = frozendict(conditionals)
# Lower all Differentiable operations into SymPy operations
rhs = diff2sympy(expr.rhs)
# Finally create the LoweredEq with all metadata attached
expr = super(LoweredEq, cls).__new__(cls, expr.lhs, rhs, evaluate=False)
expr._dspace = dspace
expr._ispace = ispace
expr._conditionals = conditionals
expr._reads, expr._writes = detect_io(expr)
expr._is_Increment = input_expr.is_Increment
expr._implicit_dims = input_expr.implicit_dims
return expr
@property
def reads(self):
return self._reads
@property
def writes(self):
return self._writes
def xreplace(self, rules):
return LoweredEq(self.lhs.xreplace(rules), self.rhs.xreplace(rules), **self.state)
def func(self, *args):
return super(LoweredEq, self).func(*args, **self.state, evaluate=False)
class ClusterizedEq(IREq, Pickable):
"""
ClusterizedEq(devito.IREq, **kwargs)
ClusterizedEq(lhs, rhs, **kwargs)
A SymPy equation with associated IterationSpace and DataSpace.
There are two main differences between a LoweredEq and a
ClusterizedEq:
* In a ClusterizedEq, the iteration and data spaces must *always*
be provided by the caller.
* A ClusterizedEq is "frozen", meaning that any call to ``xreplace``
will not trigger re-evaluation (e.g., mathematical simplification)
of the expression.
These two properties make a ClusterizedEq suitable for use in a Cluster.
"""
def __new__(cls, *args, **kwargs):
if len(args) == 1:
# origin: ClusterizedEq(expr, **kwargs)
input_expr = args[0]
expr = sympy.Eq.__new__(cls, *input_expr.args, evaluate=False)
for i in cls._state:
v = kwargs[i] if i in kwargs else getattr(input_expr, i, None)
setattr(expr, '_%s' % i, v)
elif len(args) == 2:
# origin: ClusterizedEq(lhs, rhs, **kwargs)
expr = sympy.Eq.__new__(cls, *args, evaluate=False)
for i in cls._state:
setattr(expr, '_%s' % i, kwargs.pop(i))
else:
raise ValueError("Cannot construct ClusterizedEq from args=%s "
"and kwargs=%s" % (str(args), str(kwargs)))
return expr
def func(self, *args, **kwargs):
kwargs = {k: kwargs.get(k, v) for k, v in self.state.items()}
return super(ClusterizedEq, self).func(*args, **kwargs)
# Pickling support
_pickle_args = ['lhs', 'rhs']
_pickle_kwargs = IREq._state
__reduce_ex__ = Pickable.__reduce_ex__
class DummyEq(ClusterizedEq):
"""
DummyEq(expr)
DummyEq(lhs, rhs)
A special ClusterizedEq with void iteration and data spaces.
"""
def __new__(cls, *args, **kwargs):
if len(args) == 1:
input_expr = args[0]
assert isinstance(input_expr, Eq)
obj = LoweredEq(input_expr)
elif len(args) == 2:
obj = LoweredEq(Eq(*args, evaluate=False))
else:
raise ValueError("Cannot construct DummyEq from args=%s" % str(args))
return ClusterizedEq.__new__(cls, obj, ispace=obj.ispace, dspace=obj.dspace)
# Pickling support
_pickle_args = ['lhs', 'rhs']
_pickle_kwargs = []
| opesci/devito | devito/ir/equations/equation.py | Python | mit | 9,325 |
#!/usr/bin/env python
#-*-coding:utf-8-*-
#在python脚本中第一行必须是#!/usr/bin/env python,这样才可以向下以python的环境来解释下面的语句
#=============================================================================
# Copyright (c) 2015
# ShanghaiKunyan. All rights reserved
#
# Filename : /home/wukun/work/enum_to_switch.py
# Author : Sunsolo(卓一航)
# Email : [email protected]
# Date : 2015-12-29 10:30
# Description :
#=============================================================================
import sys
import re
import codecs
import time
COLOR_OUTPUT = {"FG_GREEN" : '\033[32;1m', "END" : '\033[0m'} # 输出颜色控制
SOURCE_PATH = "./error_comm.h" #保存枚举变量的文件路径
TIME_SHOW = {
"YEAR" : str(time.localtime().tm_year), # 控制时间显示
"MONTH" : str(time.localtime().tm_mon),
"DAY" : str(time.localtime().tm_mday),
"HOUR" : str(time.localtime().tm_hour),
"MIN" : str(time.localtime().tm_min),
}
all_enum_map = {} # 存储格式为{ 枚举变量名 : { 枚举值名 : 注释含义} }
def gen_enum_to_dictionary(src_path):
'''gen_enum_to_dictionary:
args: src_path[保存枚举变量所在文件的路径]
function: 将src_path里枚举变量解析成{ 枚举变量名 : { 枚举值名 : 注释含义} }的格式
'''
global all_enum_map
temp = None
enum_name = ""
is_enum_block = None
fp = open(src_path, "r")
lines = fp.readlines()
for line_char in lines:
line_char.strip()
if line_char.startswith("enum"):
is_enum_block = 1
enum_name = line_char.split(" ")
temp = enum_name[1]
location = temp.find("{")
if location > 0:
temp = temp[:location]
all_enum_map[temp] = {}
continue
if line_char.startswith("}"):
is_enum_block = None
if len(line_char.split()) == 0 :
continue
if is_enum_block is not None:
line_char.lstrip("\r\n\t ")
enum_define = line_char.split()[0]
line_char = line_char.decode("utf8")
enum_note = line_char.split("//")[1].lstrip().rstrip()
if enum_define is not None and temp is not None :
enum_define.lstrip("\t")
all_enum_map[temp][enum_define] = enum_note
def gen_dictionary_to_switch(cc_target_file):
"""gen_dictionary_to_switch:
args:cc_target_file[保存最后转换的switch语句所在的目标cc文件]
function:将{ 枚举变量名 : { 枚举值名 : 注释含义} }格式的变量映射为switch语句
"""
fp = codecs.open(cc_target_file, "w","utf-8")
fp.write('''/*=============================================================================
# Copyright (c) 2015
# ShanghaiKunyan. All rights reserved
#
# Filename : ''' + cc_target_file + '''
# Author : Sunsolo
# Email : [email protected]
# Date : ''' + TIME_SHOW["YEAR"] + "-" + TIME_SHOW["MONTH"] + "-" + TIME_SHOW["DAY"] +" " + TIME_SHOW["HOUR"] + ":"+ TIME_SHOW["MIN"] + '''
# Description : The file is generated automatically by enum_to_switch.py.
#=============================================================================*/''')
fp.write('''
\n#include "net/error_comm.h"
\nconst char* buddha_strerror(int error_code) {
char* error_string = "";
switch(error_code){\n
''')
for key, value in all_enum_map["neterrorcode"].items():
fp.write(" case {0}:".format(key))
fp.write('\n\n error_string = "' + value + '";')
fp.write('\n break')
s = ';' + '\n\n'
fp.write(s)
fp.write(''' default:
error_string = "errorcode not define";
break;
}
return error_string;
}
''')
if __name__ == "__main__":
executable_name = sys.argv[0]
if len(sys.argv) != 2: # 脚本的用法
print COLOR_OUTPUT["FG_GREEN"] + "Usage: \n " + executable_name + " error_comm.cc" + COLOR_OUTPUT["END"]
exit(1)
else:
generate_file_name = sys.argv[1] #所要生成的文件名
gen_enum_to_dictionary(SOURCE_PATH)
gen_dictionary_to_switch(generate_file_name)
| smartdata-x/strade | src/pub/net/enum_to_switch.py | Python | apache-2.0 | 4,275 |
from DoneDone import IssueTracker
import json,sys
# for flask decorator
from datetime import timedelta
from flask import Flask, make_response, request, current_app
from functools import update_wrapper
def get_people_in_a_project(projectId):
peopleObj = issueTracker.getAllPeopleInProject(projectId)
people = []
for pp in peopleObj:
people.append(pp)
return json.loads("".join(people))
def find_person_id_in_people(peopleList, personName):
for person in peopleList:
if personName in person['Value']:
return person['ID']
return None
def find_project_id(projectName):
projectsObj = issueTracker.getProjects()
for pp in projectsObj:
projects = json.loads(pp)
for p in projects:
if p['Name'] == projectName:
return int(p['ID'])
return None
def createIssue(issueTitle, issueDescription, projectName, fixerName, testerName):
##
## Prepare Issue
##
projectId = find_project_id(projectName)
print "[DEBUG] found project id", projectId, "for project name", projectName
peopleInProject = get_people_in_a_project(projectId)
print "[DEBUG] found ", len(peopleInProject), "people in this project"
resolverId = find_person_id_in_people(peopleInProject, fixerName)
print "[DEBUG] found id", resolverId, "for fixer name", fixerName
testerId = find_person_id_in_people(peopleInProject, testerName)
print "[DEBUG] found id", testerId, "for tester name", testerName
issueTags = "autoIssue"
issuePriority = 1 # low
##
## Add issue!
##
retFlag = issueTracker.createIssue(
projectId,
issueTitle,
issuePriority,
resolverId,
testerId,
"\n ".join(issueDescription.split('\n')),
issueTags
)
print "[DEBUG] issue created?" + retFlag
return retFlag
# from https://blog.skyred.fi/articles/better-crossdomain-snippet-for-flask.html
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'] = \
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
if __name__ == '__main__':
##########################################################
# CONFIGURATION #
##########################################################
domain = "<YOUR DONE DONE DOMAIN HERE>"
token = "<YOUR API TOKEN GOES HERE>"
username = "<YOUR DONEDONE USERNAME>"
password = "<YOUR DONEDONE PASSWORD>"
serverPort = 8011
projectName = "EWOK"
fixerName = "Ewok"
testerName = "Ewok"
###########################################################################
issueTracker = IssueTracker(domain, token, username, password)
app = Flask(__name__)
@app.route('/', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def index():
data = request.get_json()
issueTitle = data['title']
issueDescription = data['description']
print "[DEBUG] Issue title:", issueTitle
print "[DEBUG] Issue description:", issueDescription
if createIssue(issueTitle, issueDescription, projectName, fixerName, testerName):
return '{"status" : "ok"}'
else:
return '{"status" : "ko"}'
app.run(port=serverPort)
| darksmo/gmailToDoneDone | doneDoneServer.py | Python | mit | 4,872 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Contract Editable',
'version': '1.0',
'category': 'Projects & Services',
'sequence': 14,
'summary': '',
'description': """
Sale Contract Editable
=============================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'sale',
],
'data': [
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| maljac/odoo-addons | sale_contract_editable/__openerp__.py | Python | agpl-3.0 | 1,519 |
from os.path import join, normcase, abspath, sep
from django.utils.encoding import force_unicode
def safe_join(base, *paths):
"""
Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
"""
# We need to use normcase to ensure we don't false-negative on case
# insensitive operating systems (like Windows).
base = force_unicode(base)
paths = [force_unicode(p) for p in paths]
final_path = normcase(abspath(join(base, *paths)))
base_path = normcase(abspath(base))
base_path_len = len(base_path)
# Ensure final_path starts with base_path and that the next character after
# the final path is os.sep (or nothing, in which case final_path must be
# equal to base_path).
if not final_path.startswith(base_path) \
or final_path[base_path_len:base_path_len+1] not in ('', sep):
raise ValueError('the joined path is located outside of the base path'
' component')
return final_path
| jamslevy/gsoc | app/django/utils/_os.py | Python | apache-2.0 | 1,166 |
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import sys
if __name__ == '__main__':
# Print the content of the env var given on the command line.
print(os.environ[sys.argv[1]])
| landism/pants | testprojects/src/python/print_env/main.py | Python | apache-2.0 | 447 |
"""Aiohttp test utils."""
import asyncio
from contextlib import contextmanager
import json as _json
import re
from unittest import mock
from urllib.parse import parse_qs
from aiohttp import ClientSession
from aiohttp.streams import StreamReader
from yarl import URL
from aiohttp.client_exceptions import ClientResponseError
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE
retype = type(re.compile(''))
def mock_stream(data):
"""Mock a stream with data."""
protocol = mock.Mock(_reading_paused=False)
stream = StreamReader(protocol)
stream.feed_data(data)
stream.feed_eof()
return stream
class AiohttpClientMocker:
"""Mock Aiohttp client requests."""
def __init__(self):
"""Initialize the request mocker."""
self._mocks = []
self._cookies = {}
self.mock_calls = []
def request(self, method, url, *,
auth=None,
status=200,
text=None,
data=None,
content=None,
json=None,
params=None,
headers={},
exc=None,
cookies=None):
"""Mock a request."""
if json is not None:
text = _json.dumps(json)
if text is not None:
content = text.encode('utf-8')
if content is None:
content = b''
if not isinstance(url, retype):
url = URL(url)
if params:
url = url.with_query(params)
self._mocks.append(AiohttpClientMockResponse(
method, url, status, content, cookies, exc, headers))
def get(self, *args, **kwargs):
"""Register a mock get request."""
self.request('get', *args, **kwargs)
def put(self, *args, **kwargs):
"""Register a mock put request."""
self.request('put', *args, **kwargs)
def post(self, *args, **kwargs):
"""Register a mock post request."""
self.request('post', *args, **kwargs)
def delete(self, *args, **kwargs):
"""Register a mock delete request."""
self.request('delete', *args, **kwargs)
def options(self, *args, **kwargs):
"""Register a mock options request."""
self.request('options', *args, **kwargs)
@property
def call_count(self):
"""Return the number of requests made."""
return len(self.mock_calls)
def clear_requests(self):
"""Reset mock calls."""
self._mocks.clear()
self._cookies.clear()
self.mock_calls.clear()
def create_session(self, loop):
"""Create a ClientSession that is bound to this mocker."""
session = ClientSession(loop=loop)
# Setting directly on `session` will raise deprecation warning
object.__setattr__(session, '_request', self.match_request)
return session
async def match_request(self, method, url, *, data=None, auth=None,
params=None, headers=None, allow_redirects=None,
timeout=None, json=None, cookies=None):
"""Match a request against pre-registered requests."""
data = data or json
url = URL(url)
if params:
url = url.with_query(params)
for response in self._mocks:
if response.match_request(method, url, params):
self.mock_calls.append((method, url, data, headers))
if response.exc:
raise response.exc
return response
assert False, "No mock registered for {} {} {}".format(method.upper(),
url, params)
class AiohttpClientMockResponse:
"""Mock Aiohttp client response."""
def __init__(self, method, url, status, response, cookies=None, exc=None,
headers=None):
"""Initialize a fake response."""
self.method = method
self._url = url
self.status = status
self.response = response
self.exc = exc
self._headers = headers or {}
self._cookies = {}
if cookies:
for name, data in cookies.items():
cookie = mock.MagicMock()
cookie.value = data
self._cookies[name] = cookie
def match_request(self, method, url, params=None):
"""Test if response answers request."""
if method.lower() != self.method.lower():
return False
# regular expression matching
if isinstance(self._url, retype):
return self._url.search(str(url)) is not None
if (self._url.scheme != url.scheme or self._url.host != url.host or
self._url.path != url.path):
return False
# Ensure all query components in matcher are present in the request
request_qs = parse_qs(url.query_string)
matcher_qs = parse_qs(self._url.query_string)
for key, vals in matcher_qs.items():
for val in vals:
try:
request_qs.get(key, []).remove(val)
except ValueError:
return False
return True
@property
def headers(self):
"""Return content_type."""
return self._headers
@property
def cookies(self):
"""Return dict of cookies."""
return self._cookies
@property
def url(self):
"""Return yarl of URL."""
return self._url
@property
def content_type(self):
"""Return yarl of URL."""
return self._headers.get('content-type')
@property
def content(self):
"""Return content."""
return mock_stream(self.response)
@asyncio.coroutine
def read(self):
"""Return mock response."""
return self.response
@asyncio.coroutine
def text(self, encoding='utf-8'):
"""Return mock response as a string."""
return self.response.decode(encoding)
@asyncio.coroutine
def json(self, encoding='utf-8'):
"""Return mock response as a json."""
return _json.loads(self.response.decode(encoding))
@asyncio.coroutine
def release(self):
"""Mock release."""
pass
def raise_for_status(self):
"""Raise error if status is 400 or higher."""
if self.status >= 400:
raise ClientResponseError(
None, None, code=self.status, headers=self.headers)
def close(self):
"""Mock close."""
pass
@contextmanager
def mock_aiohttp_client():
"""Context manager to mock aiohttp client."""
mocker = AiohttpClientMocker()
def create_session(hass, *args):
session = mocker.create_session(hass.loop)
async def close_session(event):
"""Close session."""
await session.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, close_session)
return session
with mock.patch(
'homeassistant.helpers.aiohttp_client.async_create_clientsession',
side_effect=create_session):
yield mocker
| jamespcole/home-assistant | tests/test_util/aiohttp.py | Python | apache-2.0 | 7,132 |
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import int_or_none
class HypemIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hypem\.com/track/(?P<id>[0-9a-z]{5})'
_TEST = {
'url': 'http://hypem.com/track/1v6ga/BODYWORK+-+TAME',
'md5': 'b9cc91b5af8995e9f0c1cee04c575828',
'info_dict': {
'id': '1v6ga',
'ext': 'mp3',
'title': 'Tame',
'uploader': 'BODYWORK',
'timestamp': 1371810457,
'upload_date': '20130621',
}
}
def _real_extract(self, url):
track_id = self._match_id(url)
response = self._download_webpage(url, track_id)
track = self._parse_json(self._html_search_regex(
r'(?s)<script\s+type="application/json"\s+id="displayList-data">(.+?)</script>',
response, 'tracks'), track_id)['tracks'][0]
track_id = track['id']
title = track['song']
final_url = self._download_json(
'http://hypem.com/serve/source/%s/%s' % (track_id, track['key']),
track_id, 'Downloading metadata', headers={
'Content-Type': 'application/json'
})['url']
return {
'id': track_id,
'url': final_url,
'ext': 'mp3',
'title': title,
'uploader': track.get('artist'),
'duration': int_or_none(track.get('time')),
'timestamp': int_or_none(track.get('ts')),
'track': title,
}
| vinegret/youtube-dl | youtube_dl/extractor/hypem.py | Python | unlicense | 1,551 |
"""op/TwoBodyInteraction.py
Definition of namedtuple representation of a two-body matrix element
<a b|V|c b>
"""
from __future__ import print_function, division, unicode_literals
from collections import namedtuple
# noinspection PyClassHasNoInit
class TwoBodyInteraction(namedtuple('TwoBodyInteraction',
['a', 'b', 'c', 'd'])):
__slots__ = ()
# noinspection PyCompatibility
def __str__(self):
# sep = unichr(9474).strip()
# left = unichr(12296).strip()
# right = unichr(12297).strip()
sep = b'|'
left = b'('
right = b')'
return '{left}{a:2} {b:2}{s} V {s}{c:2} {d:2}{right}'.format(
a=self.a, b=self.b, c=self.c, d=self.d,
left=left, right=right, s=sep
)
| dilynfullerton/tr-A_dependence_plots | src/deprecated/op/TwoBodyInteraction.py | Python | cc0-1.0 | 795 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = ['library_controllers', 'controller', 'skin', 'source', 'IDREF_array']
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass superior you need to pass baseline, this object could also include additional
# tests that were specific to the superior badge.
def JudgeSuperior(self, context):
# if baseline fails, no point in further checking
if (self.status_baseline == False):
self.status_superior = self.status_baseline
return self.status_superior
# Compare the rendered images
self.__assistant.CompareRenderedImages(context)
# Check for preservation of element
self.__assistant.ElementPreserved(context, self.tagList)
self.status_superior = self.__assistant.DeferJudgement(context)
return self.status_superior
# To pass exemplary you need to pass superior, this object could also include additional
# tests that were specific to the exemplary badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| KhronosGroup/COLLADA-CTS | StandardDataSets/collada/library_controllers/controller/skin/skin_search_id_first/skin_search_id_first.py | Python | mit | 3,982 |
import time
from optparse import make_option
from django.core.management.base import BaseCommand
from treeherder.model.derived import JobsModel
from treeherder.model.models import (Datasource,
Job,
Repository)
class Command(BaseCommand):
help = 'Migrate existing jobs to intermediate jobs table'
option_list = BaseCommand.option_list + (
make_option('--project',
action='append',
dest='project',
help='Filter deletion to particular project(s)',
type='string'),
make_option('--interval',
dest='interval',
help='Wait specified interval between signature migrations',
type='float',
default=0.0))
def handle(self, *args, **options):
if options['project']:
projects = options['project']
else:
projects = Datasource.objects.values_list('project', flat=True)
for project in projects:
print project
try:
repository = Repository.objects.get(name=project)
except Repository.DoesNotExist:
continue
with JobsModel(project) as jm:
offset = 0
limit = 500
while True:
datasource_jobs = jm.get_job_list(offset, limit)
if not datasource_jobs:
break
existing_jobs = Job.objects.filter(
repository=repository,
project_specific_id__in=[datasource_job['id'] for
datasource_job in datasource_jobs])
if len(existing_jobs) < len(datasource_jobs):
# only even bother trying to create new jobs if they
# haven't been created already
for datasource_job in datasource_jobs:
Job.objects.get_or_create(
repository=repository,
guid=datasource_job['job_guid'],
project_specific_id=datasource_job['id'])
offset += limit
time.sleep(options['interval'])
| akhileshpillai/treeherder | treeherder/model/management/commands/migrate_to_intermediate_jobs.py | Python | mpl-2.0 | 2,402 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__author__ = 'Trung Dong Huynh'
__email__ = '[email protected]'
__all__ = [
'get'
]
from prov import Error
class Serializer(object):
def __init__(self, document=None):
self.document = document
def serialize(self, stream, **kwargs):
"""
Abstract method for serializing
"""
def deserialize(self, stream, **kwargs):
"""
Abstract method for deserializing
"""
class DoNotExist(Error):
pass
class Registry:
serializers = None
@staticmethod
def load_serializers():
from prov.serializers.provjson import ProvJSONSerializer
from prov.serializers.provn import ProvNSerializer
from prov.serializers.provxml import ProvXMLSerializer
Registry.serializers = {
'json': ProvJSONSerializer,
'provn': ProvNSerializer,
'xml': ProvXMLSerializer
}
def get(format_name):
"""
Returns the serializer class for the specified format. Raises a DoNotExist
"""
# Lazily initialize the list of serializers to avoid cyclic imports
if Registry.serializers is None:
Registry.load_serializers()
try:
return Registry.serializers[format_name]
except KeyError:
raise DoNotExist(
'No serializer available for the format "%s"' % format_name
)
| KNMI/VERCE | verce-hpc-pe/src/prov/serializers/__init__.py | Python | mit | 1,472 |
#!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_alert_policy
short_description: Create or Delete Alert Policies at CenturyLink Cloud.
description:
- An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
version_added: "2.0"
options:
alias:
description:
- The alias of your CLC Account
required: True
name:
description:
- The name of the alert policy. This is mutually exclusive with id
id:
description:
- The alert policy id. This is mutually exclusive with name
alert_recipients:
description:
- A list of recipient email ids to notify the alert.
This is required for state 'present'
metric:
description:
- The metric on which to measure the condition that will trigger the alert.
This is required for state 'present'
choices: ['cpu','memory','disk']
duration:
description:
- The length of time in minutes that the condition must exceed the threshold.
This is required for state 'present'
threshold:
description:
- The threshold that will trigger the alert when the metric equals or exceeds it.
This is required for state 'present'
This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0
state:
description:
- Whether to create or delete the policy.
default: present
choices: ['present','absent']
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
---
- name: Create Alert Policy Example
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create an Alert Policy for disk above 80% for 5 minutes
clc_alert_policy:
alias: wfad
name: 'alert for disk > 80%'
alert_recipients:
- [email protected]
- [email protected]
metric: 'disk'
duration: '00:05:00'
threshold: 80
state: present
register: policy
- name: debug
debug: var=policy
---
- name: Delete Alert Policy Example
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete an Alert Policy
clc_alert_policy:
alias: wfad
name: 'alert for disk > 80%'
state: absent
register: policy
- name: debug
debug: var=policy
'''
RETURN = '''
policy:
description: The alert policy information
returned: success
type: dict
sample:
{
"actions": [
{
"action": "email",
"settings": {
"recipients": [
"[email protected]",
"[email protected]"
]
}
}
],
"id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
"links": [
{
"href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
"rel": "self",
"verbs": [
"GET",
"DELETE",
"PUT"
]
}
],
"name": "test_alert",
"triggers": [
{
"duration": "00:05:00",
"metric": "disk",
"threshold": 80.0
}
]
}
'''
__version__ = '${version}'
import json
import os
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcAlertPolicy:
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
self.policy_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(default=None),
id=dict(default=None),
alias=dict(required=True, default=None),
alert_recipients=dict(type='list', default=None),
metric=dict(
choices=[
'cpu',
'memory',
'disk'],
default=None),
duration=dict(type='str', default=None),
threshold=dict(type='int', default=None),
state=dict(default='present', choices=['present', 'absent'])
)
mutually_exclusive = [
['name', 'id']
]
return {'argument_spec': argument_spec,
'mutually_exclusive': mutually_exclusive}
# Module Behavior Goodness
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
p = self.module.params
self._set_clc_credentials_from_env()
self.policy_dict = self._get_alert_policies(p['alias'])
if p['state'] == 'present':
changed, policy = self._ensure_alert_policy_is_present()
else:
changed, policy = self._ensure_alert_policy_is_absent()
self.module.exit_json(changed=changed, policy=policy)
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_alert_policy_is_present(self):
"""
Ensures that the alert policy is present
:return: (changed, policy)
changed: A flag representing if anything is modified
policy: the created/updated alert policy
"""
changed = False
p = self.module.params
policy_name = p.get('name')
if not policy_name:
self.module.fail_json(msg='Policy name is a required')
policy = self._alert_policy_exists(policy_name)
if not policy:
changed = True
policy = None
if not self.module.check_mode:
policy = self._create_alert_policy()
else:
changed_u, policy = self._ensure_alert_policy_is_updated(policy)
if changed_u:
changed = True
return changed, policy
def _ensure_alert_policy_is_absent(self):
"""
Ensures that the alert policy is absent
:return: (changed, None)
changed: A flag representing if anything is modified
"""
changed = False
p = self.module.params
alert_policy_id = p.get('id')
alert_policy_name = p.get('name')
alias = p.get('alias')
if not alert_policy_id and not alert_policy_name:
self.module.fail_json(
msg='Either alert policy id or policy name is required')
if not alert_policy_id and alert_policy_name:
alert_policy_id = self._get_alert_policy_id(
self.module,
alert_policy_name)
if alert_policy_id and alert_policy_id in self.policy_dict:
changed = True
if not self.module.check_mode:
self._delete_alert_policy(alias, alert_policy_id)
return changed, None
def _ensure_alert_policy_is_updated(self, alert_policy):
"""
Ensures the alert policy is updated if anything is changed in the alert policy configuration
:param alert_policy: the target alert policy
:return: (changed, policy)
changed: A flag representing if anything is modified
policy: the updated the alert policy
"""
changed = False
p = self.module.params
alert_policy_id = alert_policy.get('id')
email_list = p.get('alert_recipients')
metric = p.get('metric')
duration = p.get('duration')
threshold = p.get('threshold')
policy = alert_policy
if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
(duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
(threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
changed = True
elif email_list:
t_email_list = list(
alert_policy.get('actions')[0].get('settings').get('recipients'))
if set(email_list) != set(t_email_list):
changed = True
if changed and not self.module.check_mode:
policy = self._update_alert_policy(alert_policy_id)
return changed, policy
def _get_alert_policies(self, alias):
"""
Get the alert policies for account alias by calling the CLC API.
:param alias: the account alias
:return: the alert policies for the account alias
"""
response = {}
policies = self.clc.v2.API.Call('GET',
'/v2/alertPolicies/%s'
% alias)
for policy in policies.get('items'):
response[policy.get('id')] = policy
return response
def _create_alert_policy(self):
"""
Create an alert Policy using the CLC API.
:return: response dictionary from the CLC API.
"""
p = self.module.params
alias = p['alias']
email_list = p['alert_recipients']
metric = p['metric']
duration = p['duration']
threshold = p['threshold']
policy_name = p['name']
arguments = json.dumps(
{
'name': policy_name,
'actions': [{
'action': 'email',
'settings': {
'recipients': email_list
}
}],
'triggers': [{
'metric': metric,
'duration': duration,
'threshold': threshold
}]
}
)
try:
result = self.clc.v2.API.Call(
'POST',
'/v2/alertPolicies/%s' % alias,
arguments)
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to create alert policy "{0}". {1}'.format(
policy_name, str(e.response_text)))
return result
def _update_alert_policy(self, alert_policy_id):
"""
Update alert policy using the CLC API.
:param alert_policy_id: The clc alert policy id
:return: response dictionary from the CLC API.
"""
p = self.module.params
alias = p['alias']
email_list = p['alert_recipients']
metric = p['metric']
duration = p['duration']
threshold = p['threshold']
policy_name = p['name']
arguments = json.dumps(
{
'name': policy_name,
'actions': [{
'action': 'email',
'settings': {
'recipients': email_list
}
}],
'triggers': [{
'metric': metric,
'duration': duration,
'threshold': threshold
}]
}
)
try:
result = self.clc.v2.API.Call(
'PUT', '/v2/alertPolicies/%s/%s' %
(alias, alert_policy_id), arguments)
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to update alert policy "{0}". {1}'.format(
policy_name, str(e.response_text)))
return result
def _delete_alert_policy(self, alias, policy_id):
"""
Delete an alert policy using the CLC API.
:param alias : the account alias
:param policy_id: the alert policy id
:return: response dictionary from the CLC API.
"""
try:
result = self.clc.v2.API.Call(
'DELETE', '/v2/alertPolicies/%s/%s' %
(alias, policy_id), None)
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to delete alert policy id "{0}". {1}'.format(
policy_id, str(e.response_text)))
return result
def _alert_policy_exists(self, policy_name):
"""
Check to see if an alert policy exists
:param policy_name: name of the alert policy
:return: boolean of if the policy exists
"""
result = False
for policy_id in self.policy_dict:
if self.policy_dict.get(policy_id).get('name') == policy_name:
result = self.policy_dict.get(policy_id)
return result
def _get_alert_policy_id(self, module, alert_policy_name):
"""
retrieves the alert policy id of the account based on the name of the policy
:param module: the AnsibleModule object
:param alert_policy_name: the alert policy name
:return: alert_policy_id: The alert policy id
"""
alert_policy_id = None
for policy_id in self.policy_dict:
if self.policy_dict.get(policy_id).get('name') == alert_policy_name:
if not alert_policy_id:
alert_policy_id = policy_id
else:
return module.fail_json(
msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
return alert_policy_id
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
argument_dict = ClcAlertPolicy._define_module_argument_spec()
module = AnsibleModule(supports_check_mode=True, **argument_dict)
clc_alert_policy = ClcAlertPolicy(module)
clc_alert_policy.process_request()
if __name__ == '__main__':
main()
| hryamzik/ansible | lib/ansible/modules/cloud/centurylink/clc_alert_policy.py | Python | gpl-3.0 | 17,447 |
# -*- coding: utf-8 -*-
from builtins import map
#from builtins import str
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
from threading import Timer
import xbmc
import xbmcaddon
import xbmcgui
from channelselector import get_thumb
from platformcode import config
class KeyListener(xbmcgui.WindowXMLDialog):
TIMEOUT = 10
def __new__(cls):
gui_api = tuple(map(int, xbmcaddon.Addon('xbmc.gui').getAddonInfo('version').split('.')))
if gui_api >= (5, 11, 0):
filenname = "DialogNotification.xml"
else:
filenname = "DialogKaiToast.xml"
return super(KeyListener, cls).__new__(cls, filenname, "")
def __init__(self):
self.key = None
def onInit(self):
try:
self.getControl(401).addLabel("Presiona la tecla a usar para abrir la ventana")
self.getControl(402).addLabel("Tienes %s segundos" % self.TIMEOUT)
except AttributeError:
self.getControl(401).setLabel("Presiona la tecla a usar para abrir la ventana")
self.getControl(402).setLabel("Tienes %s segundos" % self.TIMEOUT)
def onAction(self, action):
code = action.getButtonCode()
if code == 0:
self.key = None
else:
self.key = str(code)
self.close()
@staticmethod
def record_key():
dialog = KeyListener()
timeout = Timer(KeyListener.TIMEOUT, dialog.close)
timeout.start()
dialog.doModal()
timeout.cancel()
key = dialog.key
del dialog
return key
def set_key():
saved_key = config.get_setting("shortcut_key")
new_key = KeyListener().record_key()
if new_key and saved_key != new_key:
from core import filetools
from platformcode import platformtools
import xbmc
file_xml = "special://profile/keymaps/alfa.xml"
data = '<keymap><global><keyboard><key id="%s">' % new_key + 'runplugin(plugin://' \
'plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAia2V5bWFwIiwNCiAgICAib3BlbiI6IHRydWUNCn0=)</key></keyboard></global></keymap>'
filetools.write(file_xml, data)
platformtools.dialog_notification("Tecla guardada", "Reinicia Kodi para que se apliquen los cambios")
config.set_setting("shortcut_key", new_key)
# file_idioma = filetools.join(config.get_runtime_path(), 'resources', 'language', 'Spanish', 'strings.xml')
# data = filetools.read(file_idioma)
# value_xml = scrapertools.find_single_match(data, '<string id="31100">([^<]+)<')
# if "tecla" in value_xml:
# data = data.replace(value_xml, 'Cambiar tecla/botón para abrir la ventana (Guardada: %s)' % new_key)
# elif "key" in value_xml:
# data = data.replace(value_xml, 'Change key/button to open the window (Saved: %s)' % new_key)
# else:
# data = data.replace(value_xml,
# 'Cambiamento di chiave/pulsante per aprire la finestra (Salvato: %s)' % new_key)
# filetools.write(file_idioma, data)
return
MAIN_MENU = {
"news": {"label": "Novedades", "icon": get_thumb("news.png"), "order": 0},
"channels": {"label": "Canales", "icon": get_thumb("channels.png"), "order": 1},
"search": {"label": "Buscador", "icon": get_thumb("search.png"), "order": 2},
"favorites": {"label": "Favoritos", "icon": get_thumb("favorites.png"), "order": 3},
"videolibrary": {"label": "Videoteca", "icon": get_thumb("videolibrary.png"), "order": 4},
"downloads": {"label": "Descargas", "icon": get_thumb("downloads.png"), "order": 5},
"settings": {"label": "Configuración", "icon": get_thumb("setting_0.png"), "order": 6}
}
class Main(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
self.items = []
def onInit(self):
#### Compatibilidad con Kodi 18 ####
if config.get_platform(True)['num_version'] < 18:
self.setCoordinateResolution(2)
for menuentry in list(MAIN_MENU.keys()):
item = xbmcgui.ListItem(MAIN_MENU[menuentry]["label"])
item.setProperty("thumb", str(MAIN_MENU[menuentry]["icon"]))
item.setProperty("identifier", str(menuentry))
item.setProperty("order", str(MAIN_MENU[menuentry]["order"]))
self.items.append(item)
self.items.sort(key=lambda it: it.getProperty("order"))
self.getControl(32500).addItems(self.items)
self.setFocusId(32500)
def onClick(self, control_id):
if control_id == 32500:
identifier = self.getControl(32500).getSelectedItem().getProperty("identifier")
if identifier == "news":
xbmc.executebuiltin('Dialog.Close(all,true)')
xbmc.executebuiltin(
'ActivateWindow(10025, "plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAibWFpbmxpc3QiLCANCiAgICAiY2hhbm5lbCI6ICJuZXdzIg0KfQ==")')
elif identifier == "channels":
xbmc.executebuiltin('Dialog.Close(all,true)')
xbmc.executebuiltin(
'ActivateWindow(10025, "plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAiZ2V0Y2hhbm5lbHR5cGVzIiwgDQogICAgImNoYW5uZWwiOiAiY2hhbm5lbHNlbGVjdG9yIg0KfQ==")')
elif identifier == "search":
xbmc.executebuiltin('Dialog.Close(all,true)')
xbmc.executebuiltin(
'ActivateWindow(10025, "plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAibWFpbmxpc3QiLCANCiAgICAiY2hhbm5lbCI6ICJzZWFyY2giDQp9")')
elif identifier == "favorites":
xbmc.executebuiltin('Dialog.Close(all,true)')
xbmc.executebuiltin(
'ActivateWindow(10025, "plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAibWFpbmxpc3QiLCANCiAgICAiY2hhbm5lbCI6ICJmYXZvcml0ZXMiDQp9")')
elif identifier == "videolibrary":
xbmc.executebuiltin('Dialog.Close(all,true)')
xbmc.executebuiltin(
'ActivateWindow(10025, "plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAibWFpbmxpc3QiLCANCiAgICAiY2hhbm5lbCI6ICJ2aWRlb2xpYnJhcnkiDQp9")')
elif identifier == "downloads":
xbmc.executebuiltin('Dialog.Close(all,true)')
xbmc.executebuiltin(
'ActivateWindow(10025, "plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAibWFpbmxpc3QiLCANCiAgICAiY2hhbm5lbCI6ICJkb3dubG9hZHMiDQp9")')
elif identifier == "settings":
xbmc.executebuiltin('Dialog.Close(all,true)')
xbmc.executebuiltin(
'ActivateWindow(10025, "plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAibWFpbmxpc3QiLCANCiAgICAiY2hhbm5lbCI6ICJzZXR0aW5nIg0KfQ==")')
def onAction(self, action):
# exit
if action.getId() in [xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK]:
# main.close()
xbmc.executebuiltin('Dialog.Close(all,true)')
if action.getId() == xbmcgui.ACTION_CONTEXT_MENU:
config.open_settings()
def open_shortcut_menu():
main = Main('ShortCutMenu.xml', config.get_runtime_path())
main.doModal()
del main
| alfa-addon/addon | plugin.video.alfa/platformcode/keymaptools.py | Python | gpl-3.0 | 7,319 |
"""
utility.py
Provides a number of simple commands for working with strings.
Created By:
- Luke Rogers <https://github.com/lukeroge>
- Dabo Ross <https://github.com/daboross>
Special Thanks:
- Fletcher Boyd <https://github.com/thenoodle68>
License: GPL v3
"""
import base64
import hashlib
import collections
import re
import os
import json
import codecs
import urllib.parse
import random
import binascii
from cloudbot import hook
from cloudbot.util import formatting, web, colors
COLORS = collections.OrderedDict([
('red', '\x0304'),
('orange', '\x0307'),
('yellow', '\x0308'),
('green', '\x0309'),
('cyan', '\x0303'),
('ltblue', '\x0310'),
('rylblue', '\x0312'),
('blue', '\x0302'),
('magenta', '\x0306'),
('pink', '\x0313'),
('maroon', '\x0305')
])
# helper functions
strip_re = re.compile("(\x03|\x02|\x1f|\x0f)(?:,?\d{1,2}(?:,\d{1,2})?)?")
def strip(string):
return strip_re.sub('', string)
def translate(text, dic):
for i, j in dic.items():
text = text.replace(i, j)
return text
# on_start
@hook.on_start()
def load_text(bot):
"""
:type bot: cloudbot.bot.CloudBot
"""
global leet
with codecs.open(os.path.join(bot.data_dir, "leet.json"), encoding="utf-8") as f:
leet = json.load(f)
# misc
@hook.command("qrcode", "qr")
def qrcode(text):
"""<link> - returns a link to a QR code image for <link>"""
args = {
"cht": "qr", # chart type (QR)
"chs": "200x200", # dimensions
"chl": text # data
}
argstring = urllib.parse.urlencode(args)
link = "http://chart.googleapis.com/chart?{}".format(argstring)
return web.try_shorten(link)
# basic text tools
@hook.command("capitalise", "capitalize")
def capitalize(text):
"""capitalize <string> -- Capitalizes <string>.
:type text: str
"""
return ". ".join([sentence.capitalize() for sentence in text.split(". ")])
@hook.command
def upper(text):
"""upper <string> -- Convert string to uppercase."""
return text.upper()
@hook.command
def lower(text):
"""lower <string> -- Convert string to lowercase."""
return text.lower()
@hook.command
def titlecase(text):
"""title <string> -- Convert string to title case."""
return text.title()
@hook.command
def swapcase(text):
"""swapcase <string> -- Swaps the capitalization of <string>."""
return text.swapcase()
# encoding
@hook.command("rot13")
def rot13_encode(text):
"""rot13 <string> -- Encode <string> with rot13."""
encoder = codecs.getencoder("rot-13")
return encoder(text)[0]
@hook.command("base64")
def base64_encode(text):
"""base64 <string> -- Encode <string> with base64."""
return base64.b64encode(text.encode()).decode()
@hook.command("debase64", "unbase64")
def base64_decode(text, notice):
"""unbase64 <string> -- Decode <string> with base64."""
try:
return base64.b64decode(text.encode()).decode()
except binascii.Error:
notice("Invalid base64 string '{}'".format(text))
@hook.command("isbase64", "checkbase64")
def base64_check(text):
"""isbase64 <string> -- Checks if <string> is a valid base64 encoded string"""
try:
base64.b64decode(text.encode())
except binascii.Error:
return "'{}' is not a valid base64 encoded string".format(text)
else:
return "'{}' is a valid base64 encoded string".format(text)
@hook.command
def unescape(text):
"""unescape <string> -- Unicode unescapes <string>."""
decoder = codecs.getdecoder("unicode_escape")
return decoder(text)[0]
@hook.command
def escape(text):
"""escape <string> -- Unicode escapes <string>."""
encoder = codecs.getencoder("unicode_escape")
return encoder(text)[0].decode()
# length
@hook.command
def length(text):
"""length <string> -- Gets the length of <string>"""
return "The length of that string is {} characters.".format(len(text))
# reverse
@hook.command
def reverse(text):
"""reverse <string> -- Reverses <string>."""
return text[::-1]
# hashing
@hook.command("hash")
def hash_command(text):
"""hash <string> -- Returns hashes of <string>."""
return ', '.join(x + ": " + getattr(hashlib, x)(text.encode("utf-8")).hexdigest()
for x in ['md5', 'sha1', 'sha256'])
# novelty
@hook.command
def munge(text):
"""<text> -- Munges up <text>."""
return formatting.munge(text)
@hook.command
def leet(text):
"""<text> -- Makes <text> more 1337h4x0rz."""
output = ''.join(random.choice(leet[ch]) if ch.isalpha() else ch for ch in text.lower())
return output
# Based on plugin by FurCode - <https://github.com/FurCode/RoboCop2>
@hook.command
def derpify(text):
"""derpify <text> - returns some amusing responses from your input."""
string = text.upper()
pick_the = random.choice(["TEH", "DA"])
pick_e = random.choice(["E", "3", "A"])
pick_qt = random.choice(["?!?!??", "???!!!!??", "?!??!?", "?!?!?!???"])
pick_ex = random.choice(["1111!11", "1!11", "!!1!", "1!!!!111", "!1!111!1", "!11!111"])
pick_end = random.choice(["", "OMG", "LOL", "WTF", "WTF LOL", "OMG LOL"])
rules = {"YOU'RE": "UR", "YOUR": "UR", "YOU": "U", "WHAT THE HECK": "WTH", "WHAT THE HELL": "WTH",
"WHAT THE FUCK": "WTF",
"WHAT THE": "WT", "WHAT": "WUT", "ARE": "R", "WHY": "Y", "BE RIGHT BACK": "BRB", "BECAUSE": "B/C",
"OH MY GOD": "OMG", "O": "OH", "THE": pick_the, "TOO": "2", "TO": "2", "BE": "B", "CK": "K", "ING": "NG",
"PLEASE": "PLS", "SEE YOU": "CYA", "SEE YA": "CYA", "SCHOOL": "SKOOL", "AM": "M",
"AM GOING TO": "IAM GOING TO", "THAT": "DAT", "ICK": "IK",
"LIKE": "LIEK", "HELP": "HALP", "KE": "EK", "E": pick_e, "!": pick_ex, "?": pick_qt}
output = translate(string, rules) + " " + pick_end
return output
# colors
@hook.command
def color_parse(text):
return colors.parse(text)
# colors - based on code by Reece Selwood - <https://github.com/hitzler/homero>
@hook.command
def rainbow(text):
"""<text> -- Gives <text> rainbow colors."""
text = str(text)
text = strip(text)
col = list(COLORS.items())
out = ""
l = len(COLORS)
for i, t in enumerate(text):
if t == " ":
out += t
else:
out += col[i % l][1] + t
return out
@hook.command
def wrainbow(text):
"""<text> -- Gives each word in <text> rainbow colors."""
text = str(text)
col = list(COLORS.items())
text = strip(text).split(' ')
out = []
l = len(COLORS)
for i, t in enumerate(text):
out.append(col[i % l][1] + t)
return ' '.join(out)
@hook.command
def usa(text):
"""<text> -- Makes <text> more patriotic."""
text = strip(text)
c = [COLORS['red'], '\x0300', COLORS['blue']]
l = len(c)
out = ''
for i, t in enumerate(text):
out += c[i % l] + t
return out
@hook.command
def superscript(text):
"""<text> -- Makes <text> superscript."""
regular = "abcdefghijklmnoprstuvwxyzABDEGHIJKLMNOPRTUVW0123456789+-=()"
super_script = "ᵃᵇᶜᵈᵉᶠᵍʰⁱʲᵏˡᵐⁿᵒᵖʳˢᵗᵘᵛʷˣʸᶻᴬᴮᴰᴱᴳᴴᴵᴶᴷᴸᴹᴺᴼᴾᴿᵀᵁⱽᵂ⁰¹²³⁴⁵⁶⁷⁸⁹⁺⁻⁼⁽⁾"
result = []
for char in text:
index = regular.find(char)
if index != -1:
result.append(super_script[index])
else:
result.append(char)
return "".join(result)
| CrushAndRun/Cloudbot-Fluke | plugins/utility.py | Python | gpl-3.0 | 7,527 |
import os, sys
import mercadopago
import json
def index(req, **kwargs):
preference = {
"items": [
{
"title": "Item title",
"description": "Description",
"quantity": 1,
"unit_price": 50,
"currency_id": "CURRENCY_ID",
"picture_url": "https://www.mercadopago.com/org-img/MP3/home/logomp3.gif"
}
],
"marketplace_fee": 2.29 # fee to collect
}
mp = mercadopago.MP("SELLER_AT") # seller access_token
preferenceResult = mp.create_preference(preference)
url = preferenceResult["response"]["init_point"]
output = """
<!doctype html>
<html>
<head>
<title>Pay</title>
</head>
<body>
<a href="{url}">Pay</a>
</body>
</html>
""".format (url=url)
return output | matikbird/matikbird.github.io | portfolio/quay/back_end/payments2/mercadopago/api-mercadopago-master/templates/code-examples-master/mp-checkout/marketplace/python/payment_button.py | Python | mit | 711 |
from agms.exception.agms_exception import AgmsException
class AuthenticationException(AgmsException):
"""
Raised when the client library cannot authenticate with the gateway.
This generally means the username/password key are incorrect, or the merchant is not active.
"""
pass
| agmscode/agms_python | agms/exception/authentication_exception.py | Python | mit | 301 |
from django.db.models import ProtectedError
from django.views.generic import ListView
from django.core import serializers
from django.contrib.auth.decorators import login_required
from ..models import Catalog, CatalogIssue
from ..mixins import TacoMixin
from ..utils import __preprocess_get_request, __taco_render, json_response
from .. import formfields
class CatalogList(TacoMixin, ListView):
model = Catalog
template_name = 'taconite/catalog/list.xml'
obj_list = CatalogList.as_view()
@login_required
def obj_get(request, pk):
pk, params, obj, error = __preprocess_get_request(request, pk, Catalog)
fields = formfields.CatalogForm(obj)
return __taco_render(request, 'taconite/catalog/item.xml', {
'error': error,
'fields': fields,
'obj': obj,
})
@login_required
def obj_save(request):
msg = ''
new_obj = False
obj_id = None
saved = False
try:
for obj in serializers.deserialize('json', request.body):
if obj.object.__class__ == Catalog:
if not obj.object.id:
new_obj = True
obj.save()
saved = True
msg = 'Catalog created (ID:%d)' % obj.object.id
else:
obj.save()
saved = True
msg = 'Catalog saved'
obj_id = obj.object.id
else:
msg = 'Did not receive expected object Catalog. You sent me a %s' % obj.object.__class__.__name__
except Exception, e:
msg = 'Wrong values'
print 'Error - %s' % e
return json_response({
'saved': saved,
'msg': msg,
'obj_id': obj_id,
'created': True if new_obj else False
})
@login_required
def obj_delete(request, pk):
try:
pl = Catalog.objects.get(pk=pk)
except Catalog.DoesNotExist:
return json_response({
'status': 'error',
'msg': 'Wrong ID'
})
try:
pl.delete()
except ProtectedError:
return json_response({
'status': 'error',
'msg': 'You can`t delete this record because it is used!'
})
return json_response({
'status': 'ok',
'msg': 'Catalog has been deleted!'
})
@login_required
def obj_issue_get(request, pk):
pk, params, obj, error = __preprocess_get_request(request, pk, CatalogIssue)
fields = formfields.CatalogIssueForm(obj)
return __taco_render(request, 'taconite/catalog/item_issue.xml', {
'error': error,
'fields': fields,
'obj': obj,
})
@login_required
def obj_issue_save(request):
msg = ''
new_obj = False
obj_id = None
saved = False
try:
for obj in serializers.deserialize('json', request.body):
if obj.object.__class__ == CatalogIssue:
if not obj.object.id:
new_obj = True
obj.save()
saved = True
msg = 'Issue created (ID:%d)' % obj.object.id
else:
obj.save()
saved = True
msg = 'Issue saved'
obj_id = obj.object.id
else:
msg = 'Did not receive expected object CatalogIssue. You sent me a %s' % obj.object.__class__.__name__
except Exception, e:
msg = 'Wrong values'
print 'Error - %s' % e
return json_response({
'saved': saved,
'msg': msg,
'obj_id': obj_id,
'created': True if new_obj else False
})
@login_required
def obj_issue_delete(request, pk):
try:
pl = CatalogIssue.objects.get(pk=pk)
except CatalogIssue.DoesNotExist:
return json_response({
'status': 'error',
'msg': 'Wrong ID'
})
try:
pl.delete()
except ProtectedError:
return json_response({
'status': 'error',
'msg': 'You can`t delete this record because it is used!'
})
return json_response({
'status': 'ok',
'msg': 'Catalog issue has been deleted!'
})
| the0forge/sp | frontend/views/catalog.py | Python | gpl-3.0 | 4,180 |
from sys import stdin
def readLine():
return stdin.readline().strip()
def readInt():
return int(readLine())
def readInts():
return list(map(int, readLine().split()))
FACES = 6
T, N, K = 0, 0, 0
S = dict()
def backtrack(idx, totalSum):
global FACES, N
if idx == N:
if totalSum in S:
S[totalSum] += 1
else:
S[totalSum] = 1
return
for i in range(1, FACES + 1):
backtrack(idx + 1, totalSum + i)
def main():
global T, N, K, S
T = readInt()
for i in range(T):
N, K = readInts()
S = dict()
backtrack(0, 0)
total = sum([val for key, val in S.items()])
print('Total:', total)
j, prev, diff, totalPercent = 0, 0, 0, 0
for key, val in S.items():
if j > 0:
diff = val - prev
percent = int(val / total * 100)
print(key, ':', val, '- diff:', diff, 'Percent:', percent)
totalPercent += percent
prev = val
j += 1
print('Total Percent:', totalPercent)
if __name__ == '__main__':
main()
| mikebsg01/Contests-Online | SPOJ/AE2A-BF-1.py | Python | mit | 945 |
# -*- encoding: utf-8 -*-
import pytest
from thefuck.rules import switch_lang
from tests.utils import Command
@pytest.mark.parametrize('command', [
Command(stderr='command not found: фзе-пуе', script=u'фзе-пуе'),
Command(stderr='command not found: λσ', script=u'λσ')])
def test_match(command):
assert switch_lang.match(command, None)
@pytest.mark.parametrize('command', [
Command(stderr='command not found: pat-get', script=u'pat-get'),
Command(stderr='command not found: ls', script=u'ls'),
Command(stderr='some info', script=u'фзе-пуе')])
def test_not_match(command):
assert not switch_lang.match(command, None)
@pytest.mark.parametrize('command, new_command', [
(Command(u'фзе-пуе штыефдд мшь'), 'apt-get install vim'),
(Command(u'λσ -λα'), 'ls -la')])
def test_get_new_command(command, new_command):
assert switch_lang.get_new_command(command, None) == new_command
| nwinkler/thefuck | tests/rules/test_switch_lang.py | Python | mit | 958 |
"""
Domain middleware: enables multi-tenancy in a single process
"""
from anaf.core.domains import setup_domain, setup_domain_database
from anaf.core.db import DatabaseNotFound
from anaf.core.conf import settings
from django.http import HttpResponseRedirect
from django.db.utils import DatabaseError
from django.core.urlresolvers import reverse
from pandora import box
class DomainMiddleware(object):
"""Handles multiple domains within the same Django process"""
def process_request(self, request):
"""Identify the current domain and database, set up appropriate variables in the pandora box"""
domain = request.get_host().split('.')[0]
try:
setup_domain(domain)
except DatabaseNotFound:
evergreen_url = getattr(
settings, 'EVERGREEN_BASE_URL', 'http://tree.io/')
return HttpResponseRedirect(evergreen_url)
except DatabaseError:
from django.db import router
from anaf.core.models import ConfigSetting
setup_domain_database(router.db_for_read(ConfigSetting))
return HttpResponseRedirect(reverse('database_setup'))
box['request'] = request
def process_exception(self, request, exception):
if isinstance(exception, DatabaseNotFound):
evergreen_url = getattr(
settings, 'EVERGREEN_BASE_URL', 'http://tree.io/')
return HttpResponseRedirect(evergreen_url)
| tovmeod/anaf | anaf/core/middleware/domain.py | Python | bsd-3-clause | 1,463 |
# Rounder - Poker for the GNOME Desktop
#
# Copyright (C) 2006-2008 Devan Goodwin <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
""" Rounder GTK Utilities """
import os
import sys
def find_file_on_path(pathname):
"""
Scan the Python path and locate a file with the given name.
See:
http://www.linuxjournal.com/xstatic/articles/lj/0087/4702/4702l2.html
"""
if os.path.isabs(pathname):
return pathname
for dirname in sys.path:
candidate = os.path.join(dirname, pathname)
if os.path.isfile(candidate):
return candidate
raise Exception("Could not find %s on the Python path."
% pathname)
| dgoodwin/rounder | src/rounder/ui/gtk/util.py | Python | gpl-2.0 | 1,387 |
import argparse
import torch
import torch.nn as nn
import os
import numpy as np
import torchvision
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import ray
from ray import tune
from ray.tune.schedulers import create_scheduler
from ray.tune.integration.horovod import (DistributedTrainableCreator,
distributed_checkpoint_dir)
from ray.util.sgd.torch.resnet import ResNet18
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", action="store_true")
parser.add_argument(
"--smoke-test", action="store_true", help=("Finish quickly for testing."))
args = parser.parse_args()
CIFAR10_STATS = {
"mean": (0.4914, 0.4822, 0.4465),
"std": (0.2023, 0.1994, 0.2010),
}
def train(config, checkpoint_dir=None):
import horovod.torch as hvd
hvd.init()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = ResNet18(None).to(device)
optimizer = torch.optim.SGD(
net.parameters(),
lr=config["lr"],
)
epoch = 0
if checkpoint_dir:
with open(os.path.join(checkpoint_dir, "checkpoint")) as f:
model_state, optimizer_state, epoch = torch.load(f)
net.load_state_dict(model_state)
optimizer.load_state_dict(optimizer_state)
criterion = nn.CrossEntropyLoss()
optimizer = hvd.DistributedOptimizer(optimizer)
np.random.seed(1 + hvd.rank())
torch.manual_seed(1234)
# To ensure consistent initialization across slots,
hvd.broadcast_parameters(net.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
trainset = ray.get(config["data"])
trainloader = DataLoader(
trainset,
batch_size=int(config["batch_size"]),
shuffle=True,
num_workers=4)
for epoch in range(epoch, 40): # loop over the dataset multiple times
running_loss = 0.0
epoch_steps = 0
for i, data in enumerate(trainloader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
epoch_steps += 1
tune.report(loss=running_loss / epoch_steps)
if i % 2000 == 1999: # print every 2000 mini-batches
print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1,
running_loss / epoch_steps))
with distributed_checkpoint_dir(step=epoch) as checkpoint_dir:
print("this checkpoint dir: ", checkpoint_dir)
path = os.path.join(checkpoint_dir, "checkpoint")
torch.save((net.state_dict(), optimizer.state_dict(), epoch), path)
if __name__ == "__main__":
if args.smoke_test:
ray.init()
else:
ray.init(address="auto") # assumes ray is started with ray up
horovod_trainable = DistributedTrainableCreator(
train,
use_gpu=True,
num_hosts=1 if args.smoke_test else 2,
num_slots=2 if args.smoke_test else 2,
replicate_pem=False)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR10_STATS["mean"], CIFAR10_STATS["std"]),
]) # meanstd transformation
dataset = torchvision.datasets.CIFAR10(
root="/tmp/data_cifar",
train=True,
download=True,
transform=transform_train)
# ensure that checkpointing works.
pbt = create_scheduler(
"pbt",
perturbation_interval=2,
hyperparam_mutations={
"lr": tune.uniform(0.001, 0.1),
})
analysis = tune.run(
horovod_trainable,
metric="loss",
mode="min",
keep_checkpoints_num=1,
scheduler=pbt,
config={
"lr": tune.grid_search([0.1 * i for i in range(1, 10)]),
"batch_size": 64,
"data": ray.put(dataset)
},
num_samples=1,
fail_fast=True)
# callbacks=[FailureInjectorCallback()])
print("Best hyperparameters found were: ", analysis.best_config)
| richardliaw/ray | release/horovod_tests/workloads/horovod_test.py | Python | apache-2.0 | 4,533 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from typing import List, Optional
import attr
from base.models.enums.internship_subtypes import InternshipSubtype
from base.models.enums.learning_container_year_types import LearningContainerYearType
from base.models.enums.learning_unit_year_periodicity import PeriodicityEnum
from base.models.enums.learning_unit_year_session import DerogationSession
from base.models.enums.quadrimesters import DerogationQuadrimester
from ddd.logic.learning_unit.commands import CreatePartimCommand
from ddd.logic.learning_unit.domain.model._partim import Partim, PartimBuilder
from ddd.logic.learning_unit.domain.model._remarks import Remarks
from ddd.logic.learning_unit.domain.model._titles import Titles
from ddd.logic.learning_unit.domain.model._volumes_repartition import LecturingPart, PracticalPart
from ddd.logic.learning_unit.domain.model.responsible_entity import UCLEntityIdentity
from ddd.logic.shared_kernel.academic_year.domain.model.academic_year import AcademicYearIdentity
from ddd.logic.shared_kernel.campus.domain.model.uclouvain_campus import UclouvainCampusIdentity
from ddd.logic.shared_kernel.language.domain.model.language import LanguageIdentity
from osis_common.ddd import interface
@attr.s(frozen=True, slots=True)
class LearningUnitIdentity(interface.EntityIdentity):
academic_year = attr.ib(type=AcademicYearIdentity)
code = attr.ib(type=str)
def __str__(self):
return "{} - ({})".format(self.code, self.academic_year)
@property
def year(self) -> int:
return self.academic_year.year
def get_next_year(self):
return self.year + 1
@attr.s(slots=True, hash=False, eq=False)
class LearningUnit(interface.RootEntity):
entity_id = attr.ib(type=LearningUnitIdentity)
titles = attr.ib(type=Titles)
credits = attr.ib(type=int)
internship_subtype = attr.ib(type=InternshipSubtype)
teaching_place = attr.ib(type=UclouvainCampusIdentity)
responsible_entity_identity = attr.ib(type=UCLEntityIdentity)
attribution_entity_identity = attr.ib(type=Optional[UCLEntityIdentity])
periodicity = attr.ib(type=PeriodicityEnum)
language_id = attr.ib(type=LanguageIdentity)
remarks = attr.ib(type=Remarks)
partims = attr.ib(type=List[Partim])
derogation_quadrimester = attr.ib(type=DerogationQuadrimester)
derogation_session = attr.ib(type=DerogationSession)
lecturing_part = attr.ib(type=LecturingPart)
practical_part = attr.ib(type=PracticalPart)
professional_integration = attr.ib(type=bool)
is_active = attr.ib(type=bool)
@property
def academic_year(self) -> 'AcademicYearIdentity':
return self.entity_id.academic_year
@property
def year(self) -> int:
return self.entity_id.year
@property
def code(self) -> str:
return self.entity_id.code
@property
def complete_title_fr(self) -> str:
return self.titles.complete_fr
@property
def complete_title_en(self) -> str:
return self.titles.complete_en
def contains_partim_subdivision(self, subdivision: str) -> bool:
return subdivision in {p.subdivision for p in self.partims}
def create_partim(self, create_partim_cmd: 'CreatePartimCommand') -> None:
partim = PartimBuilder.build_from_command(
cmd=create_partim_cmd,
learning_unit=self,
)
self.partims.append(partim)
def has_partim(self) -> bool:
return len(self.partims) > 0
def is_external(self) -> bool:
return isinstance(self, ExternalLearningUnit)
def has_volume(self) -> bool:
return self.lecturing_part is not None or self.practical_part is not None
def has_practical_volume(self) -> bool:
return self.practical_part and self.practical_part.volumes.volume_annual
def has_lecturing_volume(self) -> bool:
return self.lecturing_part and self.lecturing_part.volumes.volume_annual
class CourseLearningUnit(LearningUnit):
type = LearningContainerYearType.COURSE
class InternshipLearningUnit(LearningUnit):
type = LearningContainerYearType.INTERNSHIP
class DissertationLearningUnit(LearningUnit):
type = LearningContainerYearType.DISSERTATION
class OtherCollectiveLearningUnit(LearningUnit):
type = LearningContainerYearType.OTHER_COLLECTIVE
class OtherIndividualLearningUnit(LearningUnit):
type = LearningContainerYearType.OTHER_INDIVIDUAL
class MasterThesisLearningUnit(LearningUnit):
type = LearningContainerYearType.MASTER_THESIS
class ExternalLearningUnit(LearningUnit):
type = LearningContainerYearType.EXTERNAL
| uclouvain/OSIS-Louvain | ddd/logic/learning_unit/domain/model/learning_unit.py | Python | agpl-3.0 | 5,867 |
from ase.atoms import Atoms
from ase.quaternions import Quaternions
from ase.calculators.singlepoint import SinglePointCalculator
from ase.parallel import paropen
def read_lammps_dump(fileobj, index=-1, order=True):
"""Method which reads a LAMMPS dump file.
order: Order the particles according to their id. Might be faster to
switch it off.
"""
if isinstance(fileobj, str):
f = paropen(fileobj)
else:
f = fileobj
# load everything into memory
lines = f.readlines()
natoms = 0
images = []
while len(lines) > natoms:
line = lines.pop(0)
if 'ITEM: TIMESTEP' in line:
n_atoms = 0
lo = []
hi = []
tilt = []
id = []
types = []
positions = []
scaled_positions = []
velocities = []
forces = []
quaternions = []
if 'ITEM: NUMBER OF ATOMS' in line:
line = lines.pop(0)
natoms = int(line.split()[0])
if 'ITEM: BOX BOUNDS' in line:
# save labels behind "ITEM: BOX BOUNDS" in
# triclinic case (>=lammps-7Jul09)
tilt_items = line.split()[3:]
for i in range(3):
line = lines.pop(0)
fields = line.split()
lo.append(float(fields[0]))
hi.append(float(fields[1]))
if (len(fields) >= 3):
tilt.append(float(fields[2]))
# determine cell tilt (triclinic case!)
if (len(tilt) >= 3):
# for >=lammps-7Jul09 use labels behind
# "ITEM: BOX BOUNDS" to assign tilt (vector) elements ...
if (len(tilt_items) >= 3):
xy = tilt[tilt_items.index('xy')]
xz = tilt[tilt_items.index('xz')]
yz = tilt[tilt_items.index('yz')]
# ... otherwise assume default order in 3rd column
# (if the latter was present)
else:
xy = tilt[0]
xz = tilt[1]
yz = tilt[2]
else:
xy = xz = yz = 0
xhilo = (hi[0] - lo[0]) - (xy**2)**0.5 - (xz**2)**0.5
yhilo = (hi[1] - lo[1]) - (yz**2)**0.5
zhilo = (hi[2] - lo[2])
if xy < 0:
if xz < 0:
celldispx = lo[0] - xy - xz
else:
celldispx = lo[0] - xy
else:
celldispx = lo[0]
celldispy = lo[1]
celldispz = lo[2]
cell = [[xhilo, 0, 0], [xy, yhilo, 0], [xz, yz, zhilo]]
celldisp = [[celldispx, celldispy, celldispz]]
def add_quantity(fields, var, labels):
for label in labels:
if label not in atom_attributes:
return
var.append([float(fields[atom_attributes[label]])
for label in labels])
if 'ITEM: ATOMS' in line:
# (reliably) identify values by labels behind
# "ITEM: ATOMS" - requires >=lammps-7Jul09
# create corresponding index dictionary before
# iterating over atoms to (hopefully) speed up lookups...
atom_attributes = {}
for (i, x) in enumerate(line.split()[2:]):
atom_attributes[x] = i
for n in range(natoms):
line = lines.pop(0)
fields = line.split()
id.append(int(fields[atom_attributes['id']]))
types.append(int(fields[atom_attributes['type']]))
add_quantity(fields, positions, ['x', 'y', 'z'])
add_quantity(fields, scaled_positions, ['xs', 'ys', 'zs'])
add_quantity(fields, velocities, ['vx', 'vy', 'vz'])
add_quantity(fields, forces, ['fx', 'fy', 'fz'])
add_quantity(fields, quaternions, ['c_q[1]', 'c_q[2]',
'c_q[3]', 'c_q[4]'])
if order:
def reorder(inlist):
if not len(inlist):
return inlist
outlist = [None] * len(id)
for i, v in zip(id, inlist):
outlist[i - 1] = v
return outlist
types = reorder(types)
positions = reorder(positions)
scaled_positions = reorder(scaled_positions)
velocities = reorder(velocities)
forces = reorder(forces)
quaternions = reorder(quaternions)
if len(quaternions):
images.append(Quaternions(symbols=types,
positions=positions,
cell=cell, celldisp=celldisp,
quaternions=quaternions))
elif len(positions):
images.append(Atoms(symbols=types,
positions=positions, celldisp=celldisp,
cell=cell))
elif len(scaled_positions):
images.append(Atoms(symbols=types,
scaled_positions=scaled_positions,
celldisp=celldisp,
cell=cell))
if len(velocities):
images[-1].set_velocities(velocities)
if len(forces):
calculator = SinglePointCalculator(0.0, forces,
None, None, images[-1])
images[-1].set_calculator(calculator)
return images[index]
| grhawk/ASE | tools/ase/io/lammpsrun.py | Python | gpl-2.0 | 5,820 |
farmer = {
'kb': '''
Farmer(Mac)
Rabbit(Pete)
Mother(MrsMac, Mac)
Mother(MrsRabbit, Pete)
(Rabbit(r) & Farmer(f)) ==> Hates(f, r)
(Mother(m, c)) ==> Loves(m, c)
(Mother(m, r) & Rabbit(r)) ==> Rabbit(m)
(Farmer(f)) ==> Human(f)
(Mother(m, h) & Human(h)) ==> Human(m)
''',
# Note that this order of conjuncts
# would result in infinite recursion:
# '(Human(h) & Mother(m, h)) ==> Human(m)'
'queries':'''
Human(x)
Hates(x, y)
''',
# 'limit': 1,
}
weapons = {
'kb': '''
(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)
Owns(Nono, M1)
Missile(M1)
(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)
Missile(x) ==> Weapon(x)
Enemy(x, America) ==> Hostile(x)
American(West)
Enemy(Nono, America)
''',
'queries':'''
Criminal(x)
''',
}
ocean = {
'kb': '''
Fish(Gill)
Fish(Rocky)
Fish(Uma)
Dolphin(Delphine)
Dolphin(Dale)
Shrimp(Sam)
Shrimp(Dave)
Crab(Craig)
Crab(Chris)
Crab(x) ==> Crustacean(x)
Shrimp(x) ==> Crustacean(x)
Predator(Gill, Sam)
Predator(Delphine, Gill)
Predator(Delphine, Shrimp)
Prey(Gill, Delphine)
Prey(Sam, Gill)
(Shrimp(s) & Fish(f)) ==> Eats(f, s)
(Fish(f) & Dolphin(d)) ==> Eats(d, f)
(Shrimp(f) & Dolphin(d)) ==> Eats(d, f)
Fish(f) & Shrimp(s) ==> Fears(s,f)
''',
'queries':'''
Prey(x,y)
Eats(x, y)
Fish(x)
Fears(x,y)
Crustacean(x)
''',
}
Examples = {
# 'farmer': farmer,
# 'weapons': weapons,
'ocean': ocean,
} | armadill-odyssey/aima-python | submissions/Ottenlips/myLogic.py | Python | mit | 1,399 |
Subsets and Splits