repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jeeftor/alfredToday | src/lib/pyexchange/exchange2010/soap_request.py | 1 | 19212 | """
(c) 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
from lxml.builder import ElementMaker
from ..utils import convert_datetime_to_utc
from ..compat import _unicode
MSG_NS = u'http://schemas.microsoft.com/exchange/services/2006/messages'
TYPE_NS = u'http://schemas.microsoft.com/exchange/services/2006/types'
SOAP_NS = u'http://schemas.xmlsoap.org/soap/envelope/'
NAMESPACES = {u'm': MSG_NS, u't': TYPE_NS, u's': SOAP_NS}
M = ElementMaker(namespace=MSG_NS, nsmap=NAMESPACES)
T = ElementMaker(namespace=TYPE_NS, nsmap=NAMESPACES)
EXCHANGE_DATETIME_FORMAT = u"%Y-%m-%dT%H:%M:%SZ"
EXCHANGE_DATE_FORMAT = u"%Y-%m-%d"
DISTINGUISHED_IDS = (
'calendar', 'contacts', 'deleteditems', 'drafts', 'inbox', 'journal', 'notes', 'outbox', 'sentitems',
'tasks', 'msgfolderroot', 'root', 'junkemail', 'searchfolders', 'voicemail', 'recoverableitemsroot',
'recoverableitemsdeletions', 'recoverableitemsversions', 'recoverableitemspurges', 'archiveroot',
'archivemsgfolderroot', 'archivedeleteditems', 'archiverecoverableitemsroot',
'Archiverecoverableitemsdeletions', 'Archiverecoverableitemsversions', 'Archiverecoverableitemspurges',
)
def exchange_header():
return T.RequestServerVersion({u'Version': u'Exchange2010'})
def resource_node(element, resources):
"""
Helper function to generate a person/conference room node from an email address
<t:OptionalAttendees>
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ attendee_email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
</t:OptionalAttendees>
"""
for attendee in resources:
element.append(
T.Attendee(
T.Mailbox(
T.EmailAddress(attendee.email)
)
)
)
return element
def delete_field(field_uri):
"""
Helper function to request deletion of a field. This is necessary when you want to overwrite values instead of
appending.
<t:DeleteItemField>
<t:FieldURI FieldURI="calendar:Resources"/>
</t:DeleteItemField>
"""
root = T.DeleteItemField(
T.FieldURI(FieldURI=field_uri)
)
return root
def get_item(exchange_id, format=u"Default"):
"""
Requests a calendar item from the store.
exchange_id is the id for this event in the Exchange store.
format controls how much data you get back from Exchange. Full docs are here, but acceptible values
are IdOnly, Default, and AllProperties.
http://msdn.microsoft.com/en-us/library/aa564509(v=exchg.140).aspx
<m:GetItem xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<m:ItemShape>
<t:BaseShape>{format}</t:BaseShape>
</m:ItemShape>
<m:ItemIds>
<t:ItemId Id="{exchange_id}"/>
</m:ItemIds>
</m:GetItem>
"""
elements = list()
if type(exchange_id) == list:
for item in exchange_id:
elements.append(T.ItemId(Id=item))
else:
elements = [T.ItemId(Id=exchange_id)]
root = M.GetItem(
M.ItemShape(
T.BaseShape(format)
),
M.ItemIds(
*elements
)
)
return root
def get_calendar_items(format=u"Default", start=None, end=None, max_entries=999999):
start = start.strftime(EXCHANGE_DATETIME_FORMAT)
end = end.strftime(EXCHANGE_DATETIME_FORMAT)
root = M.FindItem(
{u'Traversal': u'Shallow'},
M.ItemShape(
T.BaseShape(format)
),
M.CalendarView({
u'MaxEntriesReturned': _unicode(max_entries),
u'StartDate': start,
u'EndDate': end,
}),
M.ParentFolderIds(T.DistinguishedFolderId(Id=u"calendar")),
)
return root
def get_master(exchange_id, format=u"Default"):
"""
Requests a calendar item from the store.
exchange_id is the id for this event in the Exchange store.
format controls how much data you get back from Exchange. Full docs are here, but acceptible values
are IdOnly, Default, and AllProperties.
http://msdn.microsoft.com/en-us/library/aa564509(v=exchg.140).aspx
<m:GetItem xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<m:ItemShape>
<t:BaseShape>{format}</t:BaseShape>
</m:ItemShape>
<m:ItemIds>
<t:RecurringMasterItemId OccurrenceId="{exchange_id}"/>
</m:ItemIds>
</m:GetItem>
"""
root = M.GetItem(
M.ItemShape(
T.BaseShape(format)
),
M.ItemIds(
T.RecurringMasterItemId(OccurrenceId=exchange_id)
)
)
return root
def get_occurrence(exchange_id, instance_index, format=u"Default"):
"""
Requests one or more calendar items from the store matching the master & index.
exchange_id is the id for the master event in the Exchange store.
format controls how much data you get back from Exchange. Full docs are here, but acceptible values
are IdOnly, Default, and AllProperties.
GetItem Doc:
http://msdn.microsoft.com/en-us/library/aa564509(v=exchg.140).aspx
OccurrenceItemId Doc:
http://msdn.microsoft.com/en-us/library/office/aa580744(v=exchg.150).aspx
<m:GetItem xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<m:ItemShape>
<t:BaseShape>{format}</t:BaseShape>
</m:ItemShape>
<m:ItemIds>
{% for index in instance_index %}
<t:OccurrenceItemId RecurringMasterId="{exchange_id}" InstanceIndex="{{ index }}"/>
{% endfor %}
</m:ItemIds>
</m:GetItem>
"""
root = M.GetItem(
M.ItemShape(
T.BaseShape(format)
),
M.ItemIds()
)
items_node = root.xpath("//m:ItemIds", namespaces=NAMESPACES)[0]
for index in instance_index:
items_node.append(T.OccurrenceItemId(RecurringMasterId=exchange_id, InstanceIndex=str(index)))
return root
def get_folder(folder_id, format=u"Default"):
id = T.DistinguishedFolderId(Id=folder_id) if folder_id in DISTINGUISHED_IDS else T.FolderId(Id=folder_id)
root = M.GetFolder(
M.FolderShape(
T.BaseShape(format)
),
M.FolderIds(id)
)
return root
def new_folder(folder):
id = T.DistinguishedFolderId(Id=folder.parent_id) if folder.parent_id in DISTINGUISHED_IDS else T.FolderId(Id=folder.parent_id)
if folder.folder_type == u'Folder':
folder_node = T.Folder(T.DisplayName(folder.display_name))
elif folder.folder_type == u'CalendarFolder':
folder_node = T.CalendarFolder(T.DisplayName(folder.display_name))
root = M.CreateFolder(
M.ParentFolderId(id),
M.Folders(folder_node)
)
return root
def find_folder(parent_id, format=u"Default"):
id = T.DistinguishedFolderId(Id=parent_id) if parent_id in DISTINGUISHED_IDS else T.FolderId(Id=parent_id)
root = M.FindFolder(
{u'Traversal': u'Shallow'},
M.FolderShape(
T.BaseShape(format)
),
M.ParentFolderIds(id)
)
return root
def delete_folder(folder):
root = M.DeleteFolder(
{u'DeleteType': 'HardDelete'},
M.FolderIds(
T.FolderId(Id=folder.id)
)
)
return root
def new_event(event):
"""
Requests a new event be created in the store.
http://msdn.microsoft.com/en-us/library/aa564690(v=exchg.140).aspx
<m:CreateItem SendMeetingInvitations="SendToAllAndSaveCopy"
xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<m:SavedItemFolderId>
<t:DistinguishedFolderId Id="calendar"/>
</m:SavedItemFolderId>
<m:Items>
<t:CalendarItem>
<t:Subject>{event.subject}</t:Subject>
<t:Body BodyType="HTML">{event.subject}</t:Body>
<t:Start></t:Start>
<t:End></t:End>
<t:Location></t:Location>
<t:RequiredAttendees>
{% for attendee_email in meeting.required_attendees %}
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ attendee_email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
HTTPretty {% endfor %}
</t:RequiredAttendees>
{% if meeting.optional_attendees %}
<t:OptionalAttendees>
{% for attendee_email in meeting.optional_attendees %}
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ attendee_email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
{% endfor %}
</t:OptionalAttendees>
{% endif %}
{% if meeting.conference_room %}
<t:Resources>
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ meeting.conference_room.email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
</t:Resources>
{% endif %}
</t:CalendarItem>
</m:Items>
</m:CreateItem>
"""
id = T.DistinguishedFolderId(Id=event.calendar_id) if event.calendar_id in DISTINGUISHED_IDS else T.FolderId(Id=event.calendar_id)
start = convert_datetime_to_utc(event.start)
end = convert_datetime_to_utc(event.end)
root = M.CreateItem(
M.SavedItemFolderId(id),
M.Items(
T.CalendarItem(
T.Subject(event.subject),
T.Body(event.body or u'', BodyType="HTML"),
)
),
SendMeetingInvitations="SendToAllAndSaveCopy"
)
calendar_node = root.xpath(u'/m:CreateItem/m:Items/t:CalendarItem', namespaces=NAMESPACES)[0]
if event.reminder_minutes_before_start:
calendar_node.append(T.ReminderIsSet('true'))
calendar_node.append(T.ReminderMinutesBeforeStart(str(event.reminder_minutes_before_start)))
else:
calendar_node.append(T.ReminderIsSet('false'))
calendar_node.append(T.Start(start.strftime(EXCHANGE_DATETIME_FORMAT)))
calendar_node.append(T.End(end.strftime(EXCHANGE_DATETIME_FORMAT)))
if event.is_all_day:
calendar_node.append(T.IsAllDayEvent('true'))
calendar_node.append(T.Location(event.location or u''))
if event.required_attendees:
calendar_node.append(resource_node(element=T.RequiredAttendees(), resources=event.required_attendees))
if event.optional_attendees:
calendar_node.append(resource_node(element=T.OptionalAttendees(), resources=event.optional_attendees))
if event.resources:
calendar_node.append(resource_node(element=T.Resources(), resources=event.resources))
if event.recurrence:
if event.recurrence == u'daily':
recurrence = T.DailyRecurrence(
T.Interval(str(event.recurrence_interval)),
)
elif event.recurrence == u'weekly':
recurrence = T.WeeklyRecurrence(
T.Interval(str(event.recurrence_interval)),
T.DaysOfWeek(event.recurrence_days),
)
elif event.recurrence == u'monthly':
recurrence = T.AbsoluteMonthlyRecurrence(
T.Interval(str(event.recurrence_interval)),
T.DayOfMonth(str(event.start.day)),
)
elif event.recurrence == u'yearly':
recurrence = T.AbsoluteYearlyRecurrence(
T.DayOfMonth(str(event.start.day)),
T.Month(event.start.strftime("%B")),
)
calendar_node.append(
T.Recurrence(
recurrence,
T.EndDateRecurrence(
T.StartDate(event.start.strftime(EXCHANGE_DATE_FORMAT)),
T.EndDate(event.recurrence_end_date.strftime(EXCHANGE_DATE_FORMAT)),
)
)
)
return root
def delete_event(event):
"""
Requests an item be deleted from the store.
<DeleteItem
xmlns="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types"
DeleteType="HardDelete"
SendMeetingCancellations="SendToAllAndSaveCopy"
AffectedTaskOccurrences="AllOccurrences">
<ItemIds>
<t:ItemId Id="{{ id }}" ChangeKey="{{ change_key }}"/>
</ItemIds>
</DeleteItem>
"""
root = M.DeleteItem(
M.ItemIds(
T.ItemId(Id=event.id, ChangeKey=event.change_key)
),
DeleteType="HardDelete",
SendMeetingCancellations="SendToAllAndSaveCopy",
AffectedTaskOccurrences="AllOccurrences"
)
return root
def move_event(event, folder_id):
id = T.DistinguishedFolderId(Id=folder_id) if folder_id in DISTINGUISHED_IDS else T.FolderId(Id=folder_id)
root = M.MoveItem(
M.ToFolderId(id),
M.ItemIds(
T.ItemId(Id=event.id, ChangeKey=event.change_key)
)
)
return root
def move_folder(folder, folder_id):
id = T.DistinguishedFolderId(Id=folder_id) if folder_id in DISTINGUISHED_IDS else T.FolderId(Id=folder_id)
root = M.MoveFolder(
M.ToFolderId(id),
M.FolderIds(
T.FolderId(Id=folder.id)
)
)
return root
def update_property_node(node_to_insert, field_uri):
""" Helper function - generates a SetItemField which tells Exchange you want to overwrite the contents of a field."""
root = T.SetItemField(
T.FieldURI(FieldURI=field_uri),
T.CalendarItem(node_to_insert)
)
return root
def update_item(event, updated_attributes, calendar_item_update_operation_type):
""" Saves updates to an event in the store. Only request changes for attributes that have actually changed."""
root = M.UpdateItem(
M.ItemChanges(
T.ItemChange(
T.ItemId(Id=event.id, ChangeKey=event.change_key),
T.Updates()
)
),
ConflictResolution=u"AlwaysOverwrite",
MessageDisposition=u"SendAndSaveCopy",
SendMeetingInvitationsOrCancellations=calendar_item_update_operation_type
)
update_node = root.xpath(u'/m:UpdateItem/m:ItemChanges/t:ItemChange/t:Updates', namespaces=NAMESPACES)[0]
# if not send_only_to_changed_attendees:
# # We want to resend invites, which you do by setting an attribute to the same value it has. Right now, events
# # are always scheduled as Busy time, so we just set that again.
# update_node.append(
# update_property_node(field_uri="calendar:LegacyFreeBusyStatus", node_to_insert=T.LegacyFreeBusyStatus("Busy"))
# )
if u'html_body' in updated_attributes:
update_node.append(
update_property_node(field_uri="item:Body", node_to_insert=T.Body(event.html_body, BodyType="HTML"))
)
if u'text_body' in updated_attributes:
update_node.append(
update_property_node(field_uri="item:Body", node_to_insert=T.Body(event.text_body, BodyType="Text"))
)
if u'subject' in updated_attributes:
update_node.append(
update_property_node(field_uri="item:Subject", node_to_insert=T.Subject(event.subject))
)
if u'start' in updated_attributes:
start = convert_datetime_to_utc(event.start)
update_node.append(
update_property_node(field_uri="calendar:Start", node_to_insert=T.Start(start.strftime(EXCHANGE_DATETIME_FORMAT)))
)
if u'end' in updated_attributes:
end = convert_datetime_to_utc(event.end)
update_node.append(
update_property_node(field_uri="calendar:End", node_to_insert=T.End(end.strftime(EXCHANGE_DATETIME_FORMAT)))
)
if u'location' in updated_attributes:
update_node.append(
update_property_node(field_uri="calendar:Location", node_to_insert=T.Location(event.location))
)
if u'online_meeting' in updated_attributes:
print "Not yet Implemented"
pass
if u'attendees' in updated_attributes:
if event.required_attendees:
required = resource_node(element=T.RequiredAttendees(), resources=event.required_attendees)
update_node.append(
update_property_node(field_uri="calendar:RequiredAttendees", node_to_insert=required)
)
else:
update_node.append(delete_field(field_uri="calendar:RequiredAttendees"))
if event.optional_attendees:
optional = resource_node(element=T.OptionalAttendees(), resources=event.optional_attendees)
update_node.append(
update_property_node(field_uri="calendar:OptionalAttendees", node_to_insert=optional)
)
else:
update_node.append(delete_field(field_uri="calendar:OptionalAttendees"))
if u'resources' in updated_attributes:
if event.resources:
resources = resource_node(element=T.Resources(), resources=event.resources)
update_node.append(
update_property_node(field_uri="calendar:Resources", node_to_insert=resources)
)
else:
update_node.append(delete_field(field_uri="calendar:Resources"))
if u'reminder_minutes_before_start' in updated_attributes:
if event.reminder_minutes_before_start:
update_node.append(
update_property_node(field_uri="item:ReminderIsSet", node_to_insert=T.ReminderIsSet('true'))
)
update_node.append(
update_property_node(
field_uri="item:ReminderMinutesBeforeStart",
node_to_insert=T.ReminderMinutesBeforeStart(str(event.reminder_minutes_before_start))
)
)
else:
update_node.append(
update_property_node(field_uri="item:ReminderIsSet", node_to_insert=T.ReminderIsSet('false'))
)
if u'is_all_day' in updated_attributes:
update_node.append(
update_property_node(field_uri="calendar:IsAllDayEvent", node_to_insert=T.IsAllDayEvent(str(event.is_all_day).lower()))
)
for attr in event.RECURRENCE_ATTRIBUTES:
if attr in updated_attributes:
recurrence_node = T.Recurrence()
if event.recurrence == 'daily':
recurrence_node.append(
T.DailyRecurrence(
T.Interval(str(event.recurrence_interval)),
)
)
elif event.recurrence == 'weekly':
recurrence_node.append(
T.WeeklyRecurrence(
T.Interval(str(event.recurrence_interval)),
T.DaysOfWeek(event.recurrence_days),
)
)
elif event.recurrence == 'monthly':
recurrence_node.append(
T.AbsoluteMonthlyRecurrence(
T.Interval(str(event.recurrence_interval)),
T.DayOfMonth(str(event.start.day)),
)
)
elif event.recurrence == 'yearly':
recurrence_node.append(
T.AbsoluteYearlyRecurrence(
T.DayOfMonth(str(event.start.day)),
T.Month(event.start.strftime("%B")),
)
)
recurrence_node.append(
T.EndDateRecurrence(
T.StartDate(event.start.strftime(EXCHANGE_DATE_FORMAT)),
T.EndDate(event.recurrence_end_date.strftime(EXCHANGE_DATE_FORMAT)),
)
)
update_node.append(
update_property_node(field_uri="calendar:Recurrence", node_to_insert=recurrence_node)
)
return root
| mit | 973,815,431,249,674,600 | 30.087379 | 212 | 0.656725 | false |
remap/fountainhead | src/fountain_script.py | 1 | 1679 | # -*- Mode:python c-file-style:"gnu" indent-tabs-mode:nil -*- */
#
# Copyright (C) 2014-2015 Regents of the University of California.
# Author: Zhehao Wang <wangzhehao410305gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# A copy of the GNU General Public License is in the file COPYING.
# This module defines the script class, which takes a file name, and constructs
# elements structure by calling the parser. Scripts contains an element array, and
# a title elements dictionary; both are used by html generator
# Ported to Python from objc in nyousefi/Fountain repository
from fountain_parser import Parser, ParserVersion
class FountainScript(object):
def __init__(self, fileName = '', parserVersion = ParserVersion.DEFAULT):
if (fileName == ''):
return
self._fileName = fileName
# This parser is not optimized
parser = Parser(parserVersion)
self._elements = parser.parseBodyOfFile(self._fileName)
self._titlePageContents = parser.parseTitlePageOfFile(self._fileName)
return
| gpl-3.0 | -16,898,439,618,260,620 | 41 | 82 | 0.723049 | false |
flipdazed/SoftwareDevelopment | config.py | 1 | 2168 | # This class contains and builds the decks used in the game
from logs import *
import itertools, random
import collections
logger = logging.getLogger(__name__)
defaults = {
"central":{
"name":'Central', #"Central",
"hand_size":5,
"deck_settings":[ # Central deck paramss
{"count":4 ,"params":{"name":'Archer', "attack":3, "money":0, "cost":2}},
{"count":4 ,"params":{"name":'Baker', "attack":0, "money":3, "cost":2}},
{"count":3 ,"params":{"name":'Swordsman', "attack":4, "money":0, "cost":3}},
{"count":2 ,"params":{"name":'Knight', "attack":6, "money":0, "cost":5}},
{"count":3 ,"params":{"name":'Tailor', "attack":0, "money":4, "cost":3}},
{"count":3 ,"params":{"name":'Crossbowman', "attack":4, "money":0, "cost":3}},
{"count":3 ,"params":{"name":'Merchant', "attack":0, "money":5, "cost":4}},
{"count":4 ,"params":{"name":'Thug', "attack":2, "money":0, "cost":1}},
{"count":4 ,"params":{"name":'Thief', "attack":1, "money":1, "cost":1}},
{"count":2 ,"params":{"name":'Catapult', "attack":7, "money":0, "cost":6}},
{"count":2 ,"params":{"name":'Caravan', "attack":1, "money":5, "cost":5}},
{"count":2 ,"params":{"name":'Assassin', "attack":5, "money":0, "cost":4}}
]
,"supplements":[
{"count":10 ,"params":{"name":'Levy', "attack":1, "money":2, "cost":2}}
]
}
,"user":{
"name":'You', #"Player One",
"health":30,
"hand_size":5,
"deck_settings":[ # User's deck
{"count":8 ,"params":{"name":'Serf', "attack":0, "money":1, "cost":0}},
{"count":2 ,"params":{"name":'Squire', "attack":1, "money":0, "cost":0}}
]
}
,"computer":{
"name": 'Computer', #"Computer Player",
"health":30,
"hand_size":5,
"deck_settings":[ # computer deck
{"count":8 ,"params":{"name":'Serf', "attack":0, "money":1, "cost":0}},
{"count":2 ,"params":{"name":'Squire', "attack":1, "money":0, "cost":0}}
]
}
} | gpl-3.0 | 7,043,151,529,765,820,000 | 45.148936 | 90 | 0.473708 | false |
tumi8/sKnock | common/modules/Utils.py | 1 | 1624 | import errno
import stat
import pwd
import os
from OpenSSL import crypto
def convertDERtoPEM(key):
return crypto.dump_publickey(crypto.FILETYPE_PEM, crypto.load_publickey(crypto.FILETYPE_ASN1, key))
def convertPEMtoDER(key):
return crypto.dump_publickey(crypto.FILETYPE_ASN1, crypto.load_publickey(crypto.FILETYPE_PEM, key))
def touch(path):
"""
Creates a file at the given path.
If the directories in the given path are not existing, they are created
recursively with the permissions on each of them deriving from the umask,
but with an execute permission for others. The created file will be owned
by `nobody`
If the path already exists then the ownership is changed to `nobody`.
Throws OSError in case the given path is a directory, or upon no sufficient
disk space
"""
f_mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP
try:
mode = os.stat(path).st_mode
except os.error as e:
if errno.ENOENT != e.errno:
raise e
mask = os.umask(0)
os.umask(mask ^ 1) # enable dir access for others
try:
os.makedirs(os.path.dirname(path))
except os.error as e:
if errno.EEXIST != e.errno:
raise e
finally:
os.umask(mask)
f = os.open(path, os.O_CREAT, f_mode)
os.close(f)
else:
f_mode = f_mode | mode & 0o777
os.chmod(path, f_mode)
# File will either be created or already existing by now change the
# ownership of the file to nobody
user = pwd.getpwnam('nobody')
os.chown(path, user.pw_uid, -1)
| gpl-3.0 | 235,730,516,789,654,750 | 30.230769 | 103 | 0.644089 | false |
hasadna/open-shot | polyorg/migrations/0001_initial.py | 1 | 9185 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CandidateList'
db.create_table(u'polyorg_candidatelist', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=80)),
('ballot', self.gf('django.db.models.fields.CharField')(max_length=4)),
('number_of_seats', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('surplus_partner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['polyorg.CandidateList'], null=True, blank=True)),
('mpg_html_report', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('img_url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('youtube_user', self.gf('django.db.models.fields.CharField')(max_length=80, null=True, blank=True)),
('wikipedia_page', self.gf('django.db.models.fields.CharField')(max_length=80, null=True, blank=True)),
('twitter_account', self.gf('django.db.models.fields.CharField')(max_length=80, null=True, blank=True)),
('facebook_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('platform', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'polyorg', ['CandidateList'])
# Adding model 'Party'
db.create_table(u'polyorg_party', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('accepts_memberships', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'polyorg', ['Party'])
# Adding model 'Candidate'
db.create_table(u'polyorg_candidate', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('candidate_list', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['polyorg.CandidateList'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('ordinal', self.gf('django.db.models.fields.IntegerField')()),
('party', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['polyorg.Party'], null=True, blank=True)),
('votes', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default='S', max_length=1)),
))
db.send_create_signal(u'polyorg', ['Candidate'])
def backwards(self, orm):
# Deleting model 'CandidateList'
db.delete_table(u'polyorg_candidatelist')
# Deleting model 'Party'
db.delete_table(u'polyorg_party')
# Deleting model 'Candidate'
db.delete_table(u'polyorg_candidate')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'polyorg.candidate': {
'Meta': {'ordering': "('ordinal',)", 'object_name': 'Candidate'},
'candidate_list': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['polyorg.CandidateList']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordinal': ('django.db.models.fields.IntegerField', [], {}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['polyorg.Party']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'S'", 'max_length': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'polyorg.candidatelist': {
'Meta': {'object_name': 'CandidateList'},
'ballot': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'candidates': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'through': u"orm['polyorg.Candidate']", 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mpg_html_report': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'platform': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'surplus_partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['polyorg.CandidateList']", 'null': 'True', 'blank': 'True'}),
'twitter_account': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'wikipedia_page': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'youtube_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'})
},
u'polyorg.party': {
'Meta': {'object_name': 'Party'},
'accepts_memberships': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['polyorg'] | bsd-3-clause | 8,433,865,259,077,570,000 | 69.122137 | 208 | 0.573435 | false |
christabor/MoAL | MOAL/data_structures/graphs/hypergraph.py | 1 | 1461 | # -*- coding: utf-8 -*-
__author__ = """Chris Tabor ([email protected])"""
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from MOAL.helpers.display import Section
from MOAL.helpers.display import print_h3
from MOAL.helpers import datamaker as dmkr
from MOAL.data_structures.graphs.graphs import Graph
DEBUG = True if __name__ == '__main__' else False
class HypgerGraph(Graph):
"""
From mathworld.wolfram.com/Hypergraph.html:
"A hypergraph is a graph in which generalized edges (called hyperedges)
may connect more than two nodes."
Also interesting, from en.wikipedia.org/wiki/Hypergraph
"The collection of hypergraphs is a category with hypergraph
homomorphisms as morphisms."
"""
if DEBUG:
with Section('Multi-graph'):
hypergraph = HypgerGraph(dmkr.random_graph(max_edges=10))
print_h3('Random multi-graph')
print(hypergraph)
# Multiple edges pointing to each other
hypergraph2 = HypgerGraph({
0: {'edges': [1, 2, 3], 'val': 'A'},
1: {'edges': [0, 3, 2, 1], 'val': 'B'},
2: {'edges': [0, 1, 3, 2], 'val': 'C'},
3: {'edges': [0, 1, 2, 3], 'val': 'D'},
})
print(hypergraph2)
if raw_input('Save graph images? Y/N: ') == 'Y':
hypergraph.render_graph('hypergraph-test.png')
hypergraph2.render_graph('hypergraph2-test.png')
| apache-2.0 | -2,256,337,737,574,232,800 | 31.466667 | 75 | 0.609856 | false |
harlowja/zk_shell | zk_shell/tests/test_basic_cmds.py | 1 | 17707 | # -*- coding: utf-8 -*-
"""test basic cmds"""
import socket
from .shell_test_case import PYTHON3, ShellTestCase
from kazoo.testing.harness import get_global_cluster
from nose import SkipTest
# pylint: disable=R0904
class BasicCmdsTestCase(ShellTestCase):
""" basic test cases """
def test_create_ls(self):
""" test listing znodes """
self.shell.onecmd("create %s/one 'hello'" % (self.tests_path))
self.shell.onecmd("ls %s" % (self.tests_path))
self.assertEqual("one\n", self.output.getvalue())
def test_create_get(self):
""" create a znode and fetch its value """
self.shell.onecmd("create %s/one 'hello'" % (self.tests_path))
self.shell.onecmd("get %s/one" % (self.tests_path))
self.assertEqual("hello\n", self.output.getvalue())
def test_create_recursive(self):
""" recursively create a path """
path = "%s/one/very/long/path" % (self.tests_path)
self.shell.onecmd(
"create %s 'hello' ephemeral=false sequence=false recursive=true" % (path))
self.shell.onecmd("get %s" % (path))
self.assertEqual("hello\n", self.output.getvalue())
def test_set_get(self):
""" set and fetch a znode's value """
self.shell.onecmd("create %s/one 'hello'" % (self.tests_path))
self.shell.onecmd("set %s/one 'bye'" % (self.tests_path))
self.shell.onecmd("get %s/one" % (self.tests_path))
self.assertEqual("bye\n", self.output.getvalue())
def test_create_delete(self):
""" create & delete a znode """
self.shell.onecmd("create %s/one 'hello'" % (self.tests_path))
self.shell.onecmd("rm %s/one" % (self.tests_path))
self.shell.onecmd("exists %s/one" % (self.tests_path))
self.assertEqual("Path %s/one doesn't exist\n" % (
self.tests_path), self.output.getvalue())
def test_create_delete_recursive(self):
""" create & delete a znode recursively """
self.shell.onecmd("create %s/one 'hello'" % (self.tests_path))
self.shell.onecmd("create %s/two 'goodbye'" % (self.tests_path))
self.shell.onecmd("rmr %s" % (self.tests_path))
self.shell.onecmd("exists %s" % (self.tests_path))
self.assertEqual("Path %s doesn't exist\n" % (
self.tests_path), self.output.getvalue())
def test_create_tree(self):
""" test tree's output """
self.shell.onecmd("create %s/one 'hello'" % (self.tests_path))
self.shell.onecmd("create %s/two 'goodbye'" % (self.tests_path))
self.shell.onecmd("tree %s" % (self.tests_path))
expected_output = u""".
├── two
├── one
"""
self.assertEqual(expected_output, self.output.getutf8())
def test_add_auth(self):
""" test authentication """
self.shell.onecmd("add_auth digest super:%s" % (self.super_password))
self.assertEqual("", self.output.getvalue())
def test_bad_auth(self):
""" handle unknown scheme """
self.shell.onecmd("add_auth unknown berk:berk")
self.assertTrue(True)
def test_du(self):
""" test listing a path's size """
self.shell.onecmd("create %s/one 'hello'" % (self.tests_path))
self.shell.onecmd("du %s/one" % (self.tests_path))
self.assertEqual("5\n", self.output.getvalue())
def test_set_get_acls(self):
""" test setting & getting acls for a path """
self.shell.onecmd("create %s/one 'hello'" % (self.tests_path))
self.shell.onecmd("set_acls %s/one 'world:anyone:r digest:%s:cdrwa'" % (
self.tests_path, self.auth_digest))
self.shell.onecmd("get_acls %s/one" % (self.tests_path))
if PYTHON3:
user_id = "Id(scheme='digest', id='%s')" % (self.auth_digest)
else:
user_id = "Id(scheme=u'digest', id=u'%s')" % (self.auth_digest)
user_acl = "ACL(perms=31, acl_list=['ALL'], id=%s)" % (user_id)
expected_output = "/tests/one: ['WORLD_READ', %s]\n" % (user_acl)
self.assertEqual(expected_output, self.output.getvalue())
def test_set_get_acls_recursive(self):
""" test setting & getting acls for a path (recursively) """
path_one = "%s/one" % (self.tests_path)
path_two = "%s/one/two" % (self.tests_path)
self.shell.onecmd("create %s 'hello'" % (path_one))
self.shell.onecmd("create %s 'goodbye'" % (path_two))
self.shell.onecmd("set_acls %s 'world:anyone:r digest:%s:cdrwa' true" % (
path_one, self.auth_digest))
self.shell.onecmd("get_acls %s 0" % (path_one))
if PYTHON3:
user_id = "Id(scheme='digest', id='%s')" % (self.auth_digest)
else:
user_id = "Id(scheme=u'digest', id=u'%s')" % (self.auth_digest)
user_acl = "ACL(perms=31, acl_list=['ALL'], id=%s)" % (user_id)
expected_output = """/tests/one: ['WORLD_READ', %s]
/tests/one/two: ['WORLD_READ', %s]
""" % (user_acl, user_acl)
self.assertEqual(expected_output, self.output.getvalue())
def test_set_get_bad_acl(self):
""" make sure we handle badly formed acls"""
path_one = "%s/one" % (self.tests_path)
auth_id = "username_password:user:user"
self.shell.onecmd("create %s 'hello'" % (path_one))
self.shell.onecmd("set_acls %s 'world:anyone:r %s'" % (
path_one, auth_id))
expected_output = "Failed to set ACLs: "
expected_output += "Bad ACL: username_password:user:user. "
expected_output += "Format is scheme:id:perms.\n"
self.assertEqual(expected_output, self.output.getvalue())
def test_find(self):
""" test find command """
self.shell.onecmd("create %s/one 'hello'" % (self.tests_path))
self.shell.onecmd("create %s/two 'goodbye'" % (self.tests_path))
self.shell.onecmd("find %s/ one" % (self.tests_path))
self.assertEqual("/tests/one\n", self.output.getvalue())
def test_ifind(self):
""" test case-insensitive find """
self.shell.onecmd("create %s/ONE 'hello'" % (self.tests_path))
self.shell.onecmd("create %s/two 'goodbye'" % (self.tests_path))
self.shell.onecmd("ifind %s/ one" % (self.tests_path))
self.assertEqual("/tests/ONE\n", self.output.getvalue())
def test_grep(self):
""" test grepping for content through a path """
path = "%s/semi/long/path" % (self.tests_path)
self.shell.onecmd(
"create %s 'hello' ephemeral=false sequence=false recursive=true" % (path))
self.shell.onecmd("grep %s hello" % (self.tests_path))
self.assertEqual("%s\n" % (path), self.output.getvalue())
def test_igrep(self):
""" test case-insensitive grep """
path = "%s/semi/long/path" % (self.tests_path)
self.shell.onecmd(
"create %s 'HELLO' ephemeral=false sequence=false recursive=true" % (path))
self.shell.onecmd("igrep %s hello show_matches=true" % (self.tests_path))
self.assertEqual("%s:\nHELLO\n" % (path), self.output.getvalue())
def test_get_compressed(self):
""" test getting compressed content out of znode """
self.create_compressed("%s/one" % (self.tests_path), "some value")
self.shell.onecmd("get %s/one" % (self.tests_path))
expected_output = "b'some value'\n" if PYTHON3 else "some value\n"
self.assertEqual(expected_output, self.output.getvalue())
def test_child_count(self):
""" test child count for a given path """
self.shell.onecmd("create %s/something ''" % (self.tests_path))
self.shell.onecmd("create %s/something/else ''" % (self.tests_path))
self.shell.onecmd("create %s/something/else/entirely ''" % (self.tests_path))
self.shell.onecmd("create %s/something/else/entirely/child ''" % (self.tests_path))
self.shell.onecmd("child_count %s/something" % (self.tests_path))
expected_output = u"%s/something/else: 2\n" % (self.tests_path)
self.assertEqual(expected_output, self.output.getvalue())
def test_diff_equal(self):
self.shell.onecmd("create %s/a ''" % (self.tests_path))
self.shell.onecmd("create %s/a/something 'aaa'" % (self.tests_path))
self.shell.onecmd("create %s/a/something/else 'bbb'" % (self.tests_path))
self.shell.onecmd("create %s/a/something/else/entirely 'ccc'" % (self.tests_path))
self.shell.onecmd("create %s/b ''" % (self.tests_path))
self.shell.onecmd("create %s/b/something 'aaa'" % (self.tests_path))
self.shell.onecmd("create %s/b/something/else 'bbb'" % (self.tests_path))
self.shell.onecmd("create %s/b/something/else/entirely 'ccc'" % (self.tests_path))
self.shell.onecmd("diff %s/a %s/b" % (self.tests_path, self.tests_path))
expected_output = u"Branches are equal.\n"
self.assertEqual(expected_output, self.output.getvalue())
def test_diff_different(self):
self.shell.onecmd("create %s/a ''" % (self.tests_path))
self.shell.onecmd("create %s/a/something 'AAA'" % (self.tests_path))
self.shell.onecmd("create %s/a/something/else 'bbb'" % (self.tests_path))
self.shell.onecmd("create %s/b ''" % (self.tests_path))
self.shell.onecmd("create %s/b/something 'aaa'" % (self.tests_path))
self.shell.onecmd("create %s/b/something/else 'bbb'" % (self.tests_path))
self.shell.onecmd("create %s/b/something/else/entirely 'ccc'" % (self.tests_path))
self.shell.onecmd("diff %s/a %s/b" % (self.tests_path, self.tests_path))
expected_output = u"-+ something\n++ something/else/entirely\n"
self.assertEqual(expected_output, self.output.getvalue())
def test_newline_unescaped(self):
self.shell.onecmd("create %s/a 'hello\\n'" % (self.tests_path))
self.shell.onecmd("get %s/a" % (self.tests_path))
self.shell.onecmd("set %s/a 'bye\\n'" % (self.tests_path))
self.shell.onecmd("get %s/a" % (self.tests_path))
expected_output = u"hello\n\nbye\n\n"
self.assertEqual(expected_output, self.output.getvalue())
def test_loop(self):
self.shell.onecmd("create %s/a 'hello'" % (self.tests_path))
self.shell.onecmd("loop 3 0 'get %s/a'" % (self.tests_path))
expected_output = u"hello\nhello\nhello\n"
self.assertEqual(expected_output, self.output.getvalue())
def test_loop_multi(self):
self.shell.onecmd("create %s/a 'hello'" % (self.tests_path))
cmd = 'get %s/a' % (self.tests_path)
self.shell.onecmd("loop 3 0 '%s' '%s'" % (cmd, cmd))
expected_output = u"hello\nhello\nhello\n" * 2
self.assertEqual(expected_output, self.output.getvalue())
def test_bad_arguments(self):
self.shell.onecmd("rm /")
expected_output = u"Bad arguments.\n"
self.assertEqual(expected_output, self.output.getvalue())
def test_fill(self):
path = "%s/a" % (self.tests_path)
self.shell.onecmd("create %s 'hello'" % (path))
self.shell.onecmd("fill %s hello 5" % (path))
self.shell.onecmd("get %s" % (path))
expected_output = u"hellohellohellohellohello\n"
self.assertEqual(expected_output, self.output.getvalue())
def test_child_matches(self):
self.shell.onecmd("create %s/foo ''" % (self.tests_path))
self.shell.onecmd("create %s/foo/member_00001 ''" % (self.tests_path))
self.shell.onecmd("create %s/bar ''" % (self.tests_path))
self.shell.onecmd("child_matches %s member_" % (self.tests_path))
expected_output = u"%s/foo\n" % (self.tests_path)
self.assertEqual(expected_output, self.output.getvalue())
def test_session_endpoint(self):
self.shell.onecmd("session_endpoint 0 localhost")
expected = u"No session info for 0.\n"
self.assertEqual(expected, self.output.getvalue())
def test_ephemeral_endpoint(self):
server = next(iter(get_global_cluster()))
path = "%s/ephemeral" % (self.tests_path)
self.shell.onecmd("create %s 'foo' ephemeral=true" % (path))
self.shell.onecmd("ephemeral_endpoint %s %s" % (path, server.address))
self.assertTrue(self.output.getvalue().startswith("0x"))
def test_transaction_simple(self):
""" simple transaction"""
path = "%s/foo" % (self.tests_path)
txn = "txn 'create %s x' 'set %s y' 'check %s 1'" % (path, path, path)
self.shell.onecmd(txn)
self.shell.onecmd("get %s" % (path))
self.assertEqual("y\n", self.output.getvalue())
def test_transaction_bad_version(self):
""" check version """
path = "%s/foo" % (self.tests_path)
txn = "txn 'create %s x' 'set %s y' 'check %s 100'" % (path, path, path)
self.shell.onecmd(txn)
self.shell.onecmd("exists %s" % (path))
self.assertIn("Path %s doesn't exist\n" % (path), self.output.getvalue())
def test_transaction_rm(self):
""" multiple rm commands """
self.shell.onecmd("create %s/a 'x' ephemeral=true" % (self.tests_path))
self.shell.onecmd("create %s/b 'x' ephemeral=true" % (self.tests_path))
self.shell.onecmd("create %s/c 'x' ephemeral=true" % (self.tests_path))
txn = "txn 'rm %s/a' 'rm %s/b' 'rm %s/c'" % (
self.tests_path, self.tests_path, self.tests_path)
self.shell.onecmd(txn)
self.shell.onecmd("exists %s" % (self.tests_path))
self.assertIn("numChildren=0", self.output.getvalue())
def test_zero(self):
""" test setting a znode to None (no bytes) """
path = "%s/foo" % (self.tests_path)
self.shell.onecmd("create %s bar" % path)
self.shell.onecmd("zero %s" % path)
self.shell.onecmd("get %s" % path)
self.assertEqual("None\n", self.output.getvalue())
def test_create_sequential_without_prefix(self):
self.shell.onecmd("create %s/ '' ephemeral=false sequence=true" % self.tests_path)
self.shell.onecmd("ls %s" % self.tests_path)
self.assertEqual("0000000000\n", self.output.getvalue())
def test_rm_relative(self):
self.shell.onecmd(
"create %s/a/b '2015' ephemeral=false sequence=false recursive=true" % self.tests_path)
self.shell.onecmd("cd %s/a" % self.tests_path)
self.shell.onecmd("rm b")
self.shell.onecmd("exists %s/a" % self.tests_path)
self.assertIn("numChildren=0", self.output.getvalue())
def test_rmr_relative(self):
self.shell.onecmd(
"create %s/a/b/c '2015' ephemeral=false sequence=false recursive=true" % (
self.tests_path))
self.shell.onecmd("cd %s/a" % self.tests_path)
self.shell.onecmd("rmr b")
self.shell.onecmd("exists %s/a" % self.tests_path)
self.assertIn("numChildren=0", self.output.getvalue())
def test_conf_get_all(self):
self.shell.onecmd("conf get")
self.assertIn("chkzk_stat_retries", self.output.getvalue())
self.assertIn("chkzk_znode_delta", self.output.getvalue())
def test_conf_set(self):
self.shell.onecmd("conf set chkzk_stat_retries -100")
self.shell.onecmd("conf get chkzk_stat_retries")
self.assertIn("-100", self.output.getvalue())
def test_pipe(self):
self.shell.onecmd("create %s/foo 'bar'" % self.tests_path)
self.shell.onecmd("cd %s" % self.tests_path)
self.shell.onecmd("pipe ls get")
self.assertEqual(u"bar\n", self.output.getvalue())
def test_reconfig(self):
# handle bad input
self.shell.onecmd("reconfig add foo")
self.assertIn("Bad arguments", self.output.getvalue())
self.output.reset()
# now add a fake observer
def free_sock_port():
s = socket.socket()
s.bind(('', 0))
return s, s.getsockname()[1]
# get ports for election, zab and client endpoints. we need to use
# ports for which we'd immediately get a RST upon connect(); otherwise
# the cluster could crash if it gets a SocketTimeoutException:
# https://issues.apache.org/jira/browse/ZOOKEEPER-2202
s1, port1 = free_sock_port()
s2, port2 = free_sock_port()
s3, port3 = free_sock_port()
joining = 'server.100=0.0.0.0:%d:%d:observer;0.0.0.0:%d' % (
port1, port2, port3)
self.shell.onecmd("reconfig add %s" % joining)
self.assertIn(joining, self.output.getvalue())
self.output.reset()
# now remove it
self.shell.onecmd("reconfig remove 100")
self.assertNotIn(joining, self.output.getvalue())
def test_time(self):
self.shell.onecmd("time 'ls /'")
self.assertIn("Took", self.output.getvalue())
self.assertIn("seconds", self.output.getvalue())
def test_create_async(self):
self.shell.onecmd(
"create %s/foo bar ephemeral=false sequence=false recursive=false async=true" % (
self.tests_path))
self.shell.onecmd("exists %s/foo" % self.tests_path)
self.assertIn("numChildren=0", self.output.getvalue())
def test_session_info(self):
self.shell.onecmd("session_info sessionid")
lines = [line for line in self.output.getvalue().split("\n") if line != ""]
self.assertEqual(1, len(lines))
self.assertIn("sessionid", self.output.getvalue())
def test_echo(self):
self.shell.onecmd("create %s/jimeh gimeh" % (self.tests_path))
self.shell.onecmd("echo 'jimeh = %%s' 'get %s/jimeh'" % (self.tests_path))
self.assertIn("jimeh = gimeh", self.output.getvalue())
| apache-2.0 | 5,503,494,347,214,765,000 | 44.025445 | 99 | 0.602487 | false |
DOAJ/doaj | portality/scripts/sage_update.py | 1 | 4491 | import csv
from openpyxl import load_workbook
from portality.models import Journal
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--infile", help="path to SAGE spreadsheet", required=True)
parser.add_argument("-o", "--out", help="output file path", required=True)
args = parser.parse_args()
with open(args.out, "w", encoding="utf-8") as f:
writer = csv.writer(f)
wb = load_workbook(args.infile)
sheet = wb['sage_journals']
# Loop through all rows of the spreadsheet and update the journals (skipping row 1, the heading row)
for r in range(2, sheet.max_row+1):
j = Journal.pull(sheet.cell(row=r, column=1).value)
if j is not None:
if sheet.cell(row=r, column=1).value != j.id or sheet.cell(row=r, column=2).value != j.bibjson().title:
# if title of the journal in the sheet and in the system do not match - ignore
writer.writerow(["Id of requested journal does not match its title. Id: " +
sheet.cell(row=r, column=1).value + ", journal ignored"])
else:
fulltext_url = sheet.cell(row=r, column=3).value
apc_url = sheet.cell(row=r, column=4).value
submission_url = sheet.cell(row=r, column=5).value
editorial_board_url = sheet.cell(row=r, column=6).value
review_process_url = sheet.cell(row=r, column=7).value
aims_scope_url = sheet.cell(row=r, column=8).value
author_instructions = sheet.cell(row=r, column=9).value
plagiarism_url = sheet.cell(row=r, column=10).value
oa_url = sheet.cell(row=r, column=11).value
license_url = sheet.cell(row=r, column=12).value
jbib = j.bibjson()
if fulltext_url is not None:
jbib.remove_urls("homepage")
jbib.add_url(fulltext_url, "homepage")
if apc_url is not None:
jbib.apc_url = apc_url
if submission_url is not None:
jbib.submission_charges_url = submission_url
if editorial_board_url is not None:
jbib.remove_urls("editorial_board")
jbib.add_url(editorial_board_url, "editorial_board")
if review_process_url is not None:
jbib.set_editorial_review(jbib.editorial_review["process"], editorial_board_url)
if aims_scope_url is not None:
jbib.remove_urls("aims_scope")
jbib.add_url(aims_scope_url, "aims_scope")
if author_instructions is not None:
jbib.remove_urls("author_instructions")
jbib.add_url(author_instructions, "author_instructions")
if plagiarism_url is not None:
jbib.set_plagiarism_detection(plagiarism_url)
if oa_url is not None:
jbib.remove_urls("oa_statement")
jbib.add_url(oa_url, "oa_statement")
if license_url is not None:
current_license = jbib.get_license()
jbib.set_license(license_title=current_license["title"],
license_type=current_license["type"],
url=license_url,
open_access=current_license["open_access"],
by=current_license["BY"],
sa=current_license["SA"],
nc=current_license["NC"],
nd=current_license["ND"],
embedded=current_license["embedded"],
embedded_example_url=current_license["embedded_example_url"])
j.save(blocking=True)
else:
# if journal's id is not found in the system
writer.writerow(["Journal not found: " + sheet.cell(row=r, column=1).value])
# finished
writer.writerow(["Finished."])
| apache-2.0 | 8,343,009,261,149,139,000 | 47.290323 | 119 | 0.498775 | false |
googleinterns/out-of-distribution | src/refinenet/adaptive_conv.py | 1 | 1676 | from typing import List
import torch
from torch import nn
from src.refinenet.residual_conv_unit import ResidualConvUnit
class AdaptiveConv(nn.Module):
conv_list: nn.ModuleList
rcus_list: nn.ModuleList
out_channels: List[int]
def __init__(self, in_channels_list: List[int], out_channels: int):
super().__init__()
self.init_layers(in_channels_list, out_channels)
self.out_channels = [out_channels] * len(in_channels_list)
def init_layers(self, in_channels_list: List[int], out_channels: int) -> None:
"""
Constructs the layers of this Adaptive Conv. A dimension-adapting conv layer (self.conv_list) is placed between
each input and its respective RCUs. This conv layer is not explicitly mentioned in the papers, but is in the
architecture diagram (https://github.com/guosheng/refinenet/blob/master/net_graphs/part2_cascaed_refinenet.pdf).
:param in_channels_list: the respective number of channels in each input tensor
:param out_channels: the number of channels in the output tensors
:return: None
"""
self.conv_list = nn.ModuleList([
nn.Conv2d(in_channels, out_channels, 3, padding=1)
for in_channels in in_channels_list
])
self.rcus_list = nn.ModuleList([
nn.Sequential(
ResidualConvUnit(out_channels),
ResidualConvUnit(out_channels)
)
for _ in in_channels_list
])
def forward(self, features_list: List[torch.Tensor]) -> List[torch.Tensor]:
return [rcus(features) for rcus, features in zip(self.rcus_list, features_list)]
| apache-2.0 | 9,180,643,431,226,882,000 | 37.976744 | 120 | 0.653341 | false |
salimfadhley/jenkinsapi | jenkinsapi_tests/unittests/test_job_folders.py | 3 | 7425 | import pytest
import mock
from jenkinsapi.jenkins import JenkinsBase
@pytest.fixture(scope='function')
def jenkinsbase():
return JenkinsBase('http://localhost:8080/', poll=False)
def test_called_in__poll(jenkinsbase, monkeypatch, mocker):
def fake_poll(cls, tree=None): # pylint: disable=unused-argument
return {
'description': "My jobs",
'jobs': [{
'name': "Foo",
'url': "http://localhost:8080/job/Foo",
'color': "blue",
}],
'name': "All",
'property': [],
'url': "http://localhost:8080/view/All/",
}
monkeypatch.setattr(JenkinsBase, '_poll', fake_poll)
stub = mocker.stub()
monkeypatch.setattr(JenkinsBase, 'resolve_job_folders', stub)
jenkinsbase.poll()
stub.assert_called_once_with(
[
{
'name': "Foo",
'url': "http://localhost:8080/job/Foo",
'color': "blue",
},
],
)
def test_no_folders(jenkinsbase):
jobs = [
{
'name': "Foo",
'url': "http://localhost:8080/job/Foo",
'color': "blue",
},
{
'name': "Bar",
'url': "http://localhost:8080/job/Bar",
'color': "disabled",
},
]
assert jenkinsbase.resolve_job_folders(jobs) == [
{
'name': "Foo",
'url': "http://localhost:8080/job/Foo",
'color': "blue",
},
{
'name': "Bar",
'url': "http://localhost:8080/job/Bar",
'color': "disabled",
},
]
def test_empty_folder(jenkinsbase, monkeypatch, mocker):
def fake_get_data(cls, url, tree=None): # pylint: disable=unused-argument
return {'jobs': []}
monkeypatch.setattr(JenkinsBase, 'get_data', fake_get_data)
spy = mocker.spy(jenkinsbase, 'get_data')
jobs = [
{
'name': "Folder1",
'url': "http://localhost:8080/job/Folder1",
},
]
assert jenkinsbase.resolve_job_folders(jobs) == []
spy.assert_called_once_with(
'http://localhost:8080/job/Folder1/api/python',
tree='jobs[name,color]'
)
def test_folder_job_mix(jenkinsbase, monkeypatch, mocker):
def fake_get_data(cls, url, tree=None): # pylint: disable=unused-argument
return {
'jobs': [
{
'name': "Bar",
'url': "http://localhost:8080/job/Folder1/job/Bar",
'color': "disabled",
}
]
}
monkeypatch.setattr(JenkinsBase, 'get_data', fake_get_data)
spy = mocker.spy(jenkinsbase, 'get_data')
jobs = [
{
'name': "Foo",
'url': "http://localhost:8080/job/Foo",
'color': "blue",
},
{
'name': "Folder1",
'url': "http://localhost:8080/job/Folder1",
},
]
assert jenkinsbase.resolve_job_folders(jobs) == [
{
'name': "Foo",
'url': "http://localhost:8080/job/Foo",
'color': "blue",
},
{
'name': "Bar",
'url': "http://localhost:8080/job/Folder1/job/Bar",
'color': "disabled",
}
]
spy.assert_called_once_with(
'http://localhost:8080/job/Folder1/api/python',
tree='jobs[name,color]'
)
def test_multiple_folders(jenkinsbase, monkeypatch, mocker):
def fake_get_data(cls, url, tree=None): # pylint: disable=unused-argument
# first call
if 'Folder1' in url:
return {'jobs': [
{
'name': "Foo",
'url': "http://localhost:8080/job/Folder1/job/Foo",
'color': "disabled",
},
]}
if 'Folder2' in url:
# second call
return {'jobs': [
{
'name': "Bar",
'url': "http://localhost:8080/job/Folder2/job/Bar",
'color': "blue",
},
]}
monkeypatch.setattr(JenkinsBase, 'get_data', fake_get_data)
spy = mocker.spy(jenkinsbase, 'get_data')
jobs = [
{
'name': "Folder1",
'url': "http://localhost:8080/job/Folder1",
},
{
'name': "Folder2",
'url': "http://localhost:8080/job/Folder2",
},
]
assert jenkinsbase.resolve_job_folders(jobs) == [
{
'name': "Foo",
'url': "http://localhost:8080/job/Folder1/job/Foo",
'color': "disabled",
},
{
'name': "Bar",
'url': "http://localhost:8080/job/Folder2/job/Bar",
'color': "blue",
},
]
assert spy.call_args_list == [
mock.call(
'http://localhost:8080/job/Folder1/api/python',
tree='jobs[name,color]'
),
mock.call(
'http://localhost:8080/job/Folder2/api/python',
tree='jobs[name,color]'
),
]
def test_multiple_folder_levels(jenkinsbase, monkeypatch, mocker):
def fake_get_data(cls, url, tree=None): # pylint: disable=unused-argument
if 'Folder1' in url and 'Folder2' not in url:
# first call
return {'jobs': [
{
'name': "Bar",
'url': "http://localhost:8080/job/Folder1/job/Bar",
'color': "disabled",
},
{
'name': "Folder2",
'url': "http://localhost:8080/job/Folder1/job/Folder2",
}
]}
if 'Folder2' in url:
# second call
return {
'jobs': [
{
'name': "Baz",
'url': (
"http://localhost:8080/job/Folder1/"
"job/Folder2/job/Baz"
),
'color': "disabled",
},
]
}
monkeypatch.setattr(JenkinsBase, 'get_data', fake_get_data)
spy = mocker.spy(jenkinsbase, 'get_data')
jobs = [
{
'name': "Foo",
'url': "http://localhost:8080/job/Foo",
'color': "blue",
},
{
'name': "Folder1",
'url': "http://localhost:8080/job/Folder1",
},
]
assert jenkinsbase.resolve_job_folders(jobs) == [
{
'name': "Foo",
'url': "http://localhost:8080/job/Foo",
'color': "blue",
},
{
'name': "Bar",
'url': "http://localhost:8080/job/Folder1/job/Bar",
'color': "disabled",
},
{
'name': "Baz",
'url': ("http://localhost:8080/job/Folder1"
"/job/Folder2/job/Baz"),
'color': "disabled",
},
]
assert spy.call_args_list == [
mock.call(
'http://localhost:8080/job/Folder1/api/python',
tree='jobs[name,color]'
),
mock.call(
'http://localhost:8080/job/Folder1'
'/job/Folder2/api/python',
tree='jobs[name,color]'
),
]
| mit | 736,008,613,852,080,000 | 26.197802 | 78 | 0.443232 | false |
Outernet-Project/librarian | librarian/routes/firmware.py | 1 | 1624 | import logging
from bottle_utils.i18n import lazy_gettext as _
from streamline import XHRPartialFormRoute, RouteBase
from ..core.contrib.templates.renderer import template
from ..core.exts import ext_container as exts
from ..forms.firmware import FirmwareUpdateForm
from ..helpers.firmware import update_firmware, FIRMWARE_UPDATE_KEY
from ..utils.route_mixins import JSONResponseMixin
class FirmwareUpdate(XHRPartialFormRoute):
name = 'firmware:update'
path = '/firmware/'
template_func = template
template_name = 'firmware/update'
partial_template_name = 'firmware/_update'
form_factory = FirmwareUpdateForm
def get_bound_form(self):
form_factory = self.get_form_factory()
return form_factory(self.request.files)
def form_invalid(self):
return dict(saved=False)
def form_valid(self):
exts.cache.set(FIRMWARE_UPDATE_KEY, 'processing')
firmware = self.form.processed_data['firmware']
try:
path = exts.config['firmware.save_path']
exts.tasks.schedule(update_firmware, args=(firmware, path))
except Exception:
logging.exception('Firmware upload error.')
# Translators, shown when firmware upload failed
return dict(saved=False,
message=_('Firmware upload failed.'))
else:
return dict(saved=True)
class FirmwareUpdateStatus(JSONResponseMixin, RouteBase):
name = 'firmware:status'
path = '/firmware/status/'
def get(self):
status = exts.cache.get(FIRMWARE_UPDATE_KEY)
return dict(status=status)
| gpl-3.0 | 2,844,891,010,243,496,000 | 32.142857 | 71 | 0.678571 | false |
mhvlug/ical2mailman | ical2mailman.py | 1 | 3412 | #!/usr/bin/env python
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import icalendar
import re
import robobrowser
import time
import urllib2
import yaml
def next_meetings(count=3):
"""Find the next N meetings from our ical.
After getting the ical, run through it looking for Events,
which are in the future, and include 'meetings' in the url,
which means they are Drupal meeting types, and not other
kinds of events like Lunch or Conferences.
Because we know that July (7) and August (8) we'll be at
Lourdes, add an annotation to the events in those months. People
seem to use the email footer for more info than I'd expect so
hopefully this means less people getting lost.
"""
f = urllib2.urlopen("https://mhvlug.org/calendar/ical")
ical = f.read()
cal = icalendar.Calendar()
cal = cal.from_ical(ical)
now = datetime.datetime.now()
found = 0
meetings = []
for event in cal.subcomponents:
if found >= count:
break
if type(event) != icalendar.cal.Event:
continue
# oh time...
dt = event['DTSTART'].dt
then = datetime.datetime.fromtimestamp(time.mktime(dt.utctimetuple()))
if then < now:
continue
if re.search('meetings', event['URL']):
meeting = (" %s - %s" % (
dt.strftime("%b %e"), event['SUMMARY'].title()))
if dt.month == 7:
meeting += " @ Lourdes"
meetings.append(meeting)
found += 1
return meetings
def update_mailman(meetings, passwd=""):
"""Log into mailman and update the footer with meetings.
Using python mechanize log into the mailman admin interface, strip
off the end of the footer and replace it with the updated list of meetings.
The text for this is hardcoded based on our needs, but it's at least
a pretty good example of how to do it.
"""
br = robobrowser.RoboBrowser()
br.open("https://mhvlug.org/cgi-bin/mailman/admin/mhvlug/nondigest")
form = br.get_form(action='/cgi-bin/mailman/admin/mhvlug/nondigest')
form['adminpw'].value = passwd
br.submit_form(form)
# Now we are logged in
br.open("https://mhvlug.org/cgi-bin/mailman/admin/mhvlug/nondigest")
form = br.get_forms()[0]
cur_footer = form['msg_footer'].value.split("Upcoming Meetings")[0]
cur_footer += ("Upcoming Meetings (6pm - 8pm) "
"Vassar College *\n")
for meeting in meetings:
cur_footer += meeting + "\n"
form['msg_footer'].value = cur_footer
br.submit_form(form)
def load_conf():
return yaml.load(open("config.yaml"))
def main():
conf = load_conf()
meetings = next_meetings(int(conf['entries']))
update_mailman(meetings, passwd=conf['pass'])
if __name__ == '__main__':
main()
| apache-2.0 | -8,760,569,111,229,455,000 | 31.807692 | 79 | 0.646249 | false |
Huyuwei/tvm | rust/runtime/tests/build_model.py | 2 | 2571 | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Builds a simple NNVM graph for testing."""
from os import path as osp
import nnvm
from nnvm import sym
from nnvm.compiler import graph_util
from nnvm.testing import init
import numpy as np
import tvm
CWD = osp.dirname(osp.abspath(osp.expanduser(__file__)))
def _get_model(dshape):
data = sym.Variable('data', shape=dshape)
fc1 = sym.dense(data, units=dshape[-1]*2, use_bias=True)
left, right = sym.split(fc1, indices_or_sections=2, axis=1)
return sym.Group(((left + 1), (right - 1)))
def _init_params(graph, input_shapes, initializer=init.Xavier(), seed=10):
if isinstance(graph, sym.Symbol):
graph = nnvm.graph.create(graph)
ishapes, _ = graph_util.infer_shape(graph, **input_shapes)
param_shapes = dict(zip(graph.index.input_names, ishapes))
np.random.seed(seed)
params = {}
for param, shape in param_shapes.items():
if param in {'data', 'label'} or not shape:
continue
init_value = np.empty(shape).astype('float32')
initializer(param, init_value)
params[param] = tvm.nd.array(init_value)
return params
def main():
dshape = (32, 16)
net = _get_model(dshape)
ishape_dict = {'data': dshape}
params = _init_params(net, ishape_dict)
graph, lib, params = nnvm.compiler.build(net, 'llvm',
shape=ishape_dict,
params=params,
dtype='float32')
with open(osp.join(CWD, 'graph.json'), 'w') as f_resnet:
f_resnet.write(graph.json())
with open(osp.join(CWD, 'graph.params'), 'wb') as f_params:
f_params.write(nnvm.compiler.save_param_dict(params))
if __name__ == '__main__':
main()
| apache-2.0 | 7,480,344,979,134,752,000 | 35.211268 | 74 | 0.651886 | false |
informeren/qgis-cartogram | resources_rc.py | 1 | 16075 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Thu Jul 23 16:26:35 2015
# by: The Resource Compiler for PyQt (Qt v4.8.6)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x0e\x0d\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x06\x00\x00\x00\xaa\x69\x71\xde\
\x00\x00\x0d\xd4\x49\x44\x41\x54\x78\x5e\xe5\x5a\x5b\x8b\x25\x59\
\x56\xfe\x56\xdc\xe3\xc4\xb9\xe4\xc9\x5b\x55\xb5\x5d\x63\x8f\xad\
\x83\x32\x0f\xc2\xe0\xc3\xbc\xd9\x8d\x4a\xfb\x62\xb7\x42\xd3\x0c\
\x8e\x88\x17\x18\x47\x9a\x79\x90\xf9\x03\xed\x48\x3f\xf8\xa2\x20\
\x6a\xf7\x4b\x81\xbe\x09\x03\x63\x43\x83\x62\x8b\xe2\x65\x44\x1b\
\xda\x11\x1d\xd4\xb1\x9c\x5b\xd7\x4c\x56\x66\xe5\xc9\x3c\x99\xe7\
\x92\xe7\x16\xb1\xf7\x76\x9f\x15\x27\x17\x44\xc4\x09\xb2\x07\x34\
\x52\xe8\x55\xec\xda\x71\x76\xec\x88\x3a\xdf\xb7\xd6\xfe\xf6\xda\
\xab\x0e\x01\x30\xf8\xff\x6b\x84\xff\x63\xf3\xb0\xb1\x56\x0c\xb8\
\x2e\x60\x74\xf1\x5f\x35\x95\x0b\x31\x19\x26\x07\x98\x5e\x41\xac\
\x9d\x00\xd0\xb5\x8f\xd4\xdf\x70\x00\xa5\x81\xe5\x12\x30\x32\xa7\
\x01\x02\xe2\x18\x38\xff\x56\x0e\x5e\x1b\xe9\xd9\x8c\x01\xb4\x96\
\x9e\x1b\x51\xde\xcb\xb8\x6d\xf7\x3f\x0e\xb1\x7f\xfb\x07\x99\x0f\
\x40\xde\xc9\x63\x4a\x0b\xb8\xe2\x3b\xb4\x8c\xe1\x27\x5e\x02\x26\
\xd3\x06\x09\x70\xa9\x06\x3c\x5f\x57\xc0\x0b\x70\x23\xe0\x50\xb0\
\x2a\xf8\xbc\xd7\x25\xf0\xc6\x14\xe6\xca\x67\xcf\x45\x63\xe6\x30\
\xeb\x28\x82\x37\xe2\xd9\xa2\x97\x88\xaa\x84\x08\x78\x31\x21\xaa\
\xfa\x4e\x01\x5f\x6c\x40\xe9\x9d\xa6\x51\x02\x8a\xde\xae\x86\xa7\
\x34\x01\xac\xcb\x73\x2b\xfa\x20\x73\x0b\xc0\x8a\xef\x6d\x1e\x7c\
\xbd\x08\x16\x42\xb9\x10\xae\xe2\x4d\x21\xc1\x6c\x9d\x5b\x5d\x02\
\xe5\xb0\x27\xb9\xae\x01\x2f\xf3\x6f\x83\x80\x22\x20\x23\x6d\x0b\
\x21\x02\xa8\xde\x6b\xba\x36\xec\x3f\x18\x78\xd3\x3c\x01\x25\x40\
\x02\x5e\xae\x45\xf0\x6a\xbd\x29\x56\x12\x53\xc3\xfd\x8d\x9e\x17\
\x07\x70\xdf\xb4\x06\xd4\xaf\x79\x69\x22\x78\x54\x19\xaf\x98\x91\
\xf1\xb2\x68\xd6\x7b\xde\x60\xd3\x37\x4f\x80\x80\x2d\x7a\x08\x5b\
\xb6\x3a\xaa\x0d\xe5\x82\xe9\x42\x94\x94\xc9\x15\xe0\x02\x9e\x04\
\xbc\x44\xcd\x2d\x68\x80\xa9\xee\xf3\xd5\xf0\x2c\x6b\x41\x4d\xd8\
\x9a\x1a\xdd\xc0\x76\xc1\x53\xb2\xed\xde\xe2\x12\x10\x60\xb5\x6a\
\x5f\xf6\x60\x51\xf0\xc4\x64\x6e\x1d\xf8\x1a\xcf\x73\xbb\x45\x02\
\x44\xdc\x94\x78\xa9\xb4\xd5\x15\x01\x50\x49\xed\xc5\x04\xe8\xcd\
\xe0\x95\x84\xfd\xad\x80\x17\x02\xc4\x4c\x29\x0a\x40\x37\x84\xb2\
\xd9\x1e\x01\xd5\xb9\x25\xb2\x6a\xc1\xcb\xbb\x6e\x4d\x03\xea\x13\
\xa2\xca\x19\xa1\x2e\x73\xbb\x79\xcd\x1b\xb3\x1d\xbc\x08\x65\xf3\
\x04\x94\xc0\x57\xf7\xf9\x6a\xd8\xd7\x8a\x60\x2d\x78\x19\xab\x82\
\xbf\xe5\x4c\xd0\x75\x2a\x49\x8e\x10\xe0\x6c\xce\xf6\x0e\x18\x74\
\xee\x9e\xcd\x7d\x17\x80\x49\x01\x84\x28\x58\x05\xbc\x90\x79\x23\
\x78\xbe\x4f\xd4\x6c\xc5\xc5\xf8\x3e\x90\xa6\x1f\xce\x8a\x90\x03\
\x30\xf8\x0f\xad\x39\xf8\x10\x9b\x68\x40\x12\x03\x5f\xfd\xc7\xed\
\x82\x67\x4c\x71\xcb\x02\xaa\xdb\xa2\xbb\x03\x7c\xe2\x13\x10\xfb\
\xfb\x3f\xaf\x3f\x53\xa0\xf4\x5e\xa9\x1b\xc8\x38\xf0\xcb\xaf\x02\
\xe3\x49\x83\x04\x68\xd4\xab\xbd\x80\x65\xf0\xdb\x33\xbc\x74\x58\
\x15\x40\xa0\x7e\x9f\xd7\x35\xa4\xdc\x62\x45\x68\x7b\x7a\x6b\xc4\
\xd3\x37\x66\x78\x05\x23\xaa\x07\x2f\x20\xab\xe0\x6f\x3f\x15\x26\
\xf1\x5a\x11\x00\x70\x63\x6e\x5f\x30\xad\x6e\x06\xaf\x6c\xa3\xc2\
\x3b\x6e\x9b\x80\x82\xe7\x6f\x2e\x58\xd6\x97\xb1\xca\x21\xbe\x15\
\xbc\xa4\xc3\x72\x5f\x09\xf8\x5b\x4a\x85\x6b\xd7\x7c\x4d\xfd\xde\
\xd4\xa5\xc3\x45\xf0\xa0\x2a\x78\x16\x40\xb5\x2d\xf4\x2b\x24\xb8\
\x10\x83\x91\x9e\xdb\xcd\xd8\x92\xe4\x70\x8f\x68\xb6\x0f\xe0\x60\
\xa5\x54\xbf\xd3\x4a\x3e\xb9\x5a\x2d\x3f\xdb\xed\x76\x5f\x38\x3a\
\x3a\xfa\x27\x00\x4e\xe9\x2c\x50\x5d\xf3\xd5\xf1\x1b\x43\x56\xc6\
\x85\x58\x25\xaa\x2f\xa4\x96\xd3\x1c\xc7\xd9\x90\x84\x02\x3c\x85\
\x2d\xf6\xde\x7b\x5f\xf9\xc2\xd1\xd1\x77\x22\x0b\xa2\x75\x7a\x3a\
\x88\xce\xce\xce\x5a\x4f\x9e\x9c\x76\x86\xc3\xf3\xf6\xf9\xf0\xa2\
\x33\x1e\xd9\x36\x1e\x77\x96\xcb\x79\x94\xa6\x59\x90\xa6\x69\xa8\
\x94\xf2\xce\xe7\x73\x38\x8e\xa3\x5f\x79\xe5\x95\xcf\x3e\x78\xf0\
\x60\x4d\x80\x44\x80\x78\xc6\x48\xd8\xdf\x08\x5e\x80\x18\x5d\x57\
\x11\x2a\xcc\x97\x42\xc9\xf1\x13\x17\xa7\x83\x1c\x9b\x52\x24\xd5\
\xa7\xd9\x1c\x18\x8d\x81\x95\xca\x9f\x7f\xf5\xd5\xcf\x7d\x89\x60\
\x8c\xf5\x9a\x73\x7c\x7c\xdc\xb5\x60\x7b\x97\x97\x17\xed\x97\x5e\
\xfa\x99\xd6\x62\xb1\x88\x96\xcb\x65\x64\xfb\x38\xcb\x32\x0f\x25\
\x73\x3d\x1f\x7e\xd8\x82\x9f\xf4\x91\x04\x31\x5a\x9d\x5d\x33\x7c\
\xf2\xc8\xcc\xc6\x03\xe7\xe2\x62\x94\x20\x37\xf2\x04\xec\x07\xf1\
\x7c\x7d\x6e\x5f\x5f\x11\xe2\x56\x24\xf9\x8b\x6f\xf9\xf8\x9b\x2f\
\x6b\x10\x19\x98\xfa\x3d\x0f\x7f\xf0\xfb\xbf\xf7\x73\xd8\x62\xae\
\xbf\x06\x17\xc3\x0f\xda\xd8\xdd\x79\x0a\x71\x6f\xdf\x24\xed\x3e\
\xe2\xce\x2e\x5a\x49\x17\x61\xbb\x87\x56\x7b\x17\x61\xd4\x81\x17\
\xc5\x70\x5d\xcf\x7e\xee\xd3\xdf\x7e\xe9\x77\xf1\x5f\xef\xbd\x83\
\xf1\x78\xd4\x29\x69\xc0\x07\x07\x6f\x0a\xe1\x5d\x2b\x82\x55\xc1\
\xbb\x5e\x06\x26\xc0\xe5\x98\x98\xa5\x3b\xf7\x3f\x86\xb8\xb3\xcf\
\xee\x76\x5c\xdf\x36\x87\xbf\x2c\x91\x6d\xae\xbb\x06\x69\xe6\xe3\
\x33\x3c\xfc\x97\xbf\x46\xdc\xdd\xc5\x27\x5f\xf8\x15\x24\xbd\x03\
\x84\xad\x0e\xe2\x56\x17\x5e\x1c\xc3\xf3\x5b\xf0\x5c\x87\x40\x0e\
\x80\x9c\x50\xa3\x15\x54\x96\xc2\x28\x0d\xa5\x53\xa8\xd5\x12\x59\
\x3a\x47\x64\x9f\x59\x9b\x8d\xa2\x6b\x02\x8c\x10\xf0\xbd\xac\x79\
\xa2\xf2\x56\x57\x27\x82\xd5\x5a\xe2\x74\x0a\x8c\x2e\x32\x0b\xd6\
\xc3\x0b\xbf\xf8\x5b\x38\xfc\xc8\x8f\x20\x5d\xcd\x40\xe4\xc8\xff\
\xc0\xf0\x1f\xa5\x10\x26\x5d\x7a\xf8\xcf\x7f\xc9\x04\xec\xdf\x79\
\x06\x3f\xfa\xe3\x2f\x23\x5d\xce\xa1\xb5\x82\xce\xb2\x1c\xa8\x5a\
\x61\x99\x2a\x18\xad\xf3\xc6\xcf\x6b\x40\x23\xef\x19\x47\x0e\x24\
\x4c\x7a\xdc\x8f\x46\xe3\x76\x85\x80\x0f\x2e\x78\x15\xf0\xd5\x8a\
\x90\x2e\x94\xbd\x2a\x04\x9c\x59\x02\xfc\xb0\x8b\xa0\xd5\xc6\x6a\
\x3e\x41\x9a\xad\x72\xec\x9b\x33\xb9\x36\x1a\x4a\xa5\xec\xd1\xd1\
\xe0\x88\x49\x8c\xda\x3b\x58\xcd\x26\x58\xcc\xa7\x4c\x16\x83\x83\
\x23\x02\x44\x20\x39\x47\xc3\x38\x30\xa4\x41\x86\x78\x1e\x53\xa2\
\x35\xe2\x76\x1e\x01\x57\x57\xd3\x0e\x64\x26\xdb\xf7\x24\x78\x55\
\xf0\xb6\x89\x09\xe8\x02\x78\xd9\xf6\xa6\x33\x17\xd3\x2b\x83\x4e\
\xff\x00\x61\xdc\x86\xd2\x1a\x8e\x43\x39\x04\x22\x80\xc0\xbd\x03\
\x62\x80\xf3\xd9\x98\x31\xc5\x9d\x3e\x5c\x2f\x00\x64\xce\x26\xe4\
\x37\x82\x63\x8c\xb6\x8d\x7b\x6e\x28\x5d\xeb\x2c\x45\x9c\xf4\xe1\
\x38\xbe\x75\xc2\xb4\xf5\xdc\x73\xcf\x45\xa5\x54\xb8\x5e\x08\xb5\
\x6c\x65\x5b\xf2\x78\xb3\x2d\x0f\xa8\x8f\x9e\xf3\x21\xf1\xfc\xf6\
\xce\x1d\x78\x7e\x08\xad\x53\xfe\x0c\x53\x06\x01\x0e\xf5\x99\xd5\
\x00\x00\x6b\x11\x93\xf5\x0d\x40\xe6\x09\x78\xc8\xf3\xa5\x77\x19\
\x89\x80\x30\x6a\xc3\x8f\x42\x64\x59\x16\x3d\x7a\xf4\xe8\x50\x08\
\x70\xb0\xdd\xf3\x72\x5a\x93\xf1\x92\xe7\x49\xe6\x95\xac\x9a\xe1\
\x11\xe5\xfd\xc9\x00\x6c\xdd\xfe\x5d\xd6\x81\x4d\x0a\x2a\xe0\x61\
\xae\xd7\xb2\x82\xce\x56\x98\x4f\x2e\x78\x7e\x10\x26\x50\x3a\x2b\
\x90\x54\x00\xaa\x2b\xe0\xa5\x07\x88\x89\xe6\x6d\x31\x4c\x90\xae\
\x56\x21\x11\x1d\x14\x4e\x83\x35\x6a\x2f\xe0\xab\x61\x5f\x1c\xab\
\x07\x5f\x1c\x3b\x1b\x12\x0f\x24\xbb\x7b\x80\x31\xac\xd6\xe4\x5c\
\x8b\x1f\xe0\x10\xf1\x4e\xe0\x79\x01\x5c\x3f\xc0\x6c\x7c\x9e\x13\
\xb6\x7f\x0f\x9e\x1b\xf0\x6e\xc1\xc0\x94\x82\xbe\x0e\x71\x2d\xd9\
\xd3\x16\xf0\xd8\x2c\x01\xcd\xef\x0b\x82\xc8\x5c\x29\x15\x4c\x26\
\xb3\x7d\x21\x60\xb1\x00\x7e\xf2\x67\x01\xcf\x13\xf1\x2a\x2f\x6b\
\x19\xa8\x0e\x55\x6b\x78\xbf\xfa\x39\x01\x5e\xcd\x0f\x28\xbf\xd1\
\xdf\xbd\x0f\x2f\x88\x10\xc5\x1d\x0b\xca\x65\x20\x69\xba\x44\xba\
\xb8\xb2\x5e\xbf\xc4\x7c\x36\xc1\xf9\xc9\xd7\x31\x38\xfe\x06\xcf\
\x1f\x1c\xfd\x37\xa2\x56\x0f\x41\x18\x23\x6c\x75\xd9\x9b\xe4\x10\
\xb4\xca\x78\x47\xc8\x54\x6a\x7b\xdb\x94\x2a\xe8\x00\x88\x84\x10\
\x4b\x20\x85\xc9\x8e\xfd\xf8\xbe\x43\xa4\x77\x85\x00\xe0\x7f\xf7\
\x37\x39\xa3\x31\xb6\x5a\x10\x04\x30\x26\x03\x39\x2e\x9c\x30\xc2\
\xf1\xb7\xff\x1d\xe7\x16\xe0\xe4\xfc\x31\x2e\xcf\x4f\x30\x1d\x1e\
\x63\xbc\x6e\xf6\x3a\x5d\xcd\x0b\xcf\xbe\xfb\x67\x0f\xf0\x2e\x1e\
\x20\x4e\xba\x68\xef\x3d\x85\xde\xee\x3d\x74\x76\xef\xa0\xb7\xf7\
\x34\x0b\x6a\xdc\xee\x23\x8c\x12\xf8\x51\x2b\x77\xa0\x51\x50\x4a\
\x31\x29\x2a\xcb\xc9\x31\xae\x8f\x78\xb3\x15\x2e\x97\xe9\xbd\x6b\
\x02\xa8\xa9\xe2\x6b\x1c\xc7\x1f\x75\x1c\xe7\x2b\x97\x97\x97\x3b\
\x7f\xf1\xc7\xbf\x69\xb2\xd5\x8c\xcc\x96\x50\xf1\x83\x18\xbb\x77\
\xbe\xdf\x0a\xe5\x21\x8e\xdf\xff\x0f\x8e\x8a\xc3\xfb\x3f\x8c\x74\
\x39\xc3\xe8\xec\x08\xf3\x47\x5f\xc3\xc0\x36\x31\x72\x10\xc5\x09\
\x3a\x56\x58\xbb\x7b\xf7\x90\xf4\x6d\x6f\xc9\x69\xf7\x0e\x11\x77\
\x76\x58\x3f\x38\xda\x5a\x1d\x74\xed\xbd\xb5\x1d\xdc\x39\x78\xf9\
\xf2\x72\xf8\x3b\x5e\x93\xd5\xe7\xd9\x6c\x76\x40\x44\xb1\x31\xb9\
\x40\xf4\xf7\xef\xa3\xd5\xdb\xb3\x40\x0f\xd0\xdb\x7f\x9a\x3d\x9a\
\x74\xf6\xf8\x4b\x47\x71\x97\x33\xbe\x3f\xfd\xc3\xdf\xc0\xe9\xa3\
\xff\xc4\x8f\xfd\xd4\xa7\x71\xf7\x23\x1f\xc7\xf0\xc9\xfb\x98\x4d\
\x86\x18\x0f\x4f\x70\x39\xf8\x2e\x26\xc3\x23\x4c\x47\xe7\xb8\x1a\
\x9d\x61\xf0\xf8\xeb\xdc\x84\x17\x22\x5e\x36\xad\xce\x0e\xef\x3a\
\xfd\xbb\xcf\x60\x6c\xa3\x6d\x6d\xb3\xe9\x94\x93\xa1\x26\x09\x40\
\xd4\x6e\x1f\x4e\x2e\x2e\xc2\x83\xa7\x7f\xc8\xfc\xf4\x2f\x7d\x81\
\xc2\x30\x0f\x59\xd7\xf3\x40\x70\xa0\x39\xb3\xcb\x60\x6c\x4b\xd3\
\x15\x2b\x70\xdb\x12\x74\x0a\x60\x7a\x39\x80\xff\xb1\x04\xfd\x83\
\xfb\xd8\x7b\xea\xa3\x9b\xad\x53\x23\x5b\xad\xb0\x5c\x4c\x2d\x29\
\x17\x96\x84\x53\x4c\x2e\x6d\x1b\x9e\x70\xa4\x58\x52\x58\x4f\xce\
\x4f\xbe\xcd\xed\xfd\xaf\xbd\x2b\x00\xac\x33\x3a\xc6\x18\xf2\xd0\
\xa0\x7d\xfa\x53\x9f\xfa\xb5\x37\xdf\x78\x03\x7b\x77\x7f\x00\x07\
\x4f\xfd\x20\x7b\x52\x65\xa9\x05\xb1\x60\xf0\xb2\x0d\x02\xac\xf2\
\xf0\x43\xc4\xc9\x0e\x3f\x3b\xbf\x1a\xf3\x56\xb6\x5a\x6b\xc3\x0a\
\x1b\x91\xcb\xa9\xf5\xfc\xc0\x6a\xc2\x5d\xec\xec\x7f\x1f\x88\x93\
\x43\x83\xcc\x12\xb8\x5a\x0b\xea\xf4\x12\xd3\xd1\x00\x93\x8b\x53\
\xac\xcf\x15\x67\x27\xdf\xc4\xd9\xd1\x37\x60\x4f\x92\x6d\x22\x0a\
\x1b\x22\x80\x31\xe1\x3b\x8f\xbe\xdb\xe7\xa4\xa6\x77\x80\x74\xb5\
\x60\xd5\xb7\x9a\x00\xe0\x3a\xb3\xd3\x30\x60\x04\x39\x19\x44\x2c\
\x6e\x6b\x5b\x58\x4f\x1a\xa5\x25\xb4\x01\x4e\x87\x79\x9e\x56\x79\
\x7e\xa0\x35\x93\xc2\xe3\x3c\xc3\xf5\x58\x47\x3a\x96\x9c\x7b\xcf\
\x68\x38\x9e\x8f\xf3\xc7\xdf\xc4\x5f\xfd\xc9\x6f\xaf\xe7\xb2\x2e\
\x35\x4a\xc0\xd9\xd9\x80\x73\xf0\x4e\x6f\x1f\x8c\x81\x3d\x8e\x3c\
\x89\x81\x29\x64\x77\x0c\xd4\x68\xde\xf2\x00\xb0\x27\x39\x4a\x20\
\x59\x63\xe1\x39\x18\xc0\xa1\x9c\x14\x18\xe2\xfb\x3c\xdf\x28\x9e\
\x67\x49\xe2\x1a\x81\xd6\x86\x7b\x20\x0b\x7e\xe1\x33\x9f\xd9\x6d\
\x8c\x00\x6b\xf4\xec\xb3\xcf\xf6\x38\x02\x76\xf6\xa1\x95\xa4\xb4\
\x0c\x62\x03\x46\xc0\x49\xfa\x1a\x27\xb9\x68\x5d\x5d\xf0\x01\x89\
\xf2\xf9\x85\xe7\x60\x6a\x92\x20\x6e\x90\x08\x83\x01\x27\x59\xae\
\xeb\x23\x5d\x2e\xbd\x77\xde\x7a\xab\xe5\xa0\x21\x7b\xfe\xf9\xe7\
\x7b\x53\xab\xbc\xbe\x1f\xa1\xdd\x3b\x80\xca\xb2\x8a\x27\xa1\x73\
\x40\x92\xbd\x69\x85\x28\xe1\x08\x58\x67\x84\xac\x15\xe0\x53\x23\
\x83\xac\x05\x2f\xef\x94\x31\x23\x11\xe1\xb8\x2e\x47\x80\x1d\x23\
\xad\x3d\xaf\x31\x11\x7c\xf8\xf0\xe1\x5d\xbb\xf5\x84\x61\xdc\xe5\
\xad\x49\xab\xf4\x3a\x9c\x0b\x9e\x97\xa8\xd0\x06\x2a\x5b\x21\x88\
\xdb\x2c\x72\x8b\xc5\x15\xd7\x02\xfc\xa0\x95\x87\xb5\xd9\xf2\x8c\
\x80\x97\x6b\x39\x27\xf0\x18\x29\x4e\xc2\x5c\x2f\x58\x8f\xf9\x71\
\xec\x1e\x36\x16\x01\x5a\xd3\xbd\xf9\x72\x19\x87\x49\xdb\x82\x4a\
\x58\xfd\x0d\xe4\x50\x23\x9e\x17\x8f\x81\x3d\xc6\x80\x83\xa8\x8d\
\xcc\x82\x5f\x2d\x66\xac\x1d\xf2\x8c\x6d\xf5\xe0\xe5\xbd\xf9\x35\
\xf8\xfc\xc0\x02\xea\xfa\x3e\x53\x6e\xeb\x8c\xa6\x31\x02\x5e\x7e\
\xe5\xe5\x5f\x57\x4a\xb9\x71\xa7\x6f\x3c\xd7\x27\x75\x7d\xbe\x96\
\x2f\x69\x0a\x40\x28\x3f\x0e\xb3\xf7\xc3\xb8\xcd\xe1\xbf\x98\x8d\
\x61\x88\x04\x14\x60\x6a\xc0\xe7\xe3\xba\xb4\x0c\xb4\xd1\x79\x1a\
\x4e\xae\xb1\xe6\x1e\x1e\x1e\xee\x37\xb6\x04\x8e\x8f\x1f\xb3\x9c\
\x77\xfa\x77\xb9\xde\x67\xb4\x02\x81\x24\x4c\x41\x28\x1e\x69\xa1\
\x01\x05\x78\x9e\xcf\x07\x20\x00\x4c\x00\x01\x39\x10\xbe\x42\x0d\
\x78\xd1\x87\xeb\x25\x20\x11\x41\x30\x44\x5e\x60\x00\xd0\xe3\xb3\
\x0b\xbf\xb1\x08\x38\x3d\x79\xc2\x1b\x7a\xbb\x7f\x00\xe4\xea\x2d\
\x6a\x0f\x94\xc0\x4b\x41\x24\x03\x39\x0e\xa2\x3c\x19\xe2\xac\x8e\
\xf2\xb9\x05\xe1\xab\x03\xaf\x19\x74\x0e\x5e\xe6\x03\xf0\xfd\x80\
\x3f\xdf\xbf\x73\xe7\xe7\x9b\x8a\x00\xc9\x01\xda\x9d\x3d\x40\x5d\
\x0b\x1f\x72\xd0\x30\x5b\x8b\x19\x9a\x81\x12\xa2\xce\x8e\xe4\x02\
\x05\x30\x46\xdf\x04\x5e\x96\x01\x01\x72\xcf\x0b\xa3\xcd\x89\x70\
\x96\x34\x42\xc0\x6b\xaf\xbd\xe6\xbd\xf9\xe6\x9b\x7d\x10\x71\x79\
\x5b\xe9\x2c\xf7\x0c\x9c\x5a\xf0\x46\x44\x4e\x23\x8e\xbb\xf9\x17\
\x9e\x8f\xa1\x65\xdc\xd4\x83\x97\x6d\x50\xbc\xcf\x63\x3a\xff\xbc\
\x49\x84\x80\x2c\x53\x68\x44\x04\xdf\x7e\xfb\xed\x1d\x9b\x03\x24\
\x41\xd4\xe2\xc2\xa4\x52\x5c\x07\xac\x01\x2f\x5f\x5e\xc6\xc2\x4d\
\x2e\xb0\x98\x8e\xa0\x15\x8b\x67\x49\xed\xeb\xc1\x13\x00\x89\x14\
\x9d\x47\x84\xe7\x85\x1b\x02\xd2\xae\xd7\x4c\xf8\x9f\xdd\x5f\x2c\
\x16\x61\xb2\x73\x88\xa0\xd5\xe1\xb4\x94\x88\x0a\xe0\x4b\x45\x4c\
\xd1\x04\xad\x34\x17\x3a\x98\x80\xf9\x88\x73\x03\xc9\x1b\x4a\x22\
\xc7\x64\x96\x48\x31\x84\x52\x5e\xc0\x3b\x0b\x03\x48\xd3\xd4\x69\
\x24\x02\x5e\x7c\xf1\xc5\xcf\x2b\xa5\xfc\x70\x5d\xd2\x0a\x5a\x50\
\x59\x26\x1e\x2b\x00\x2f\x85\x32\x98\x00\x2e\x66\xe6\x04\x5c\x8d\
\x91\xad\x96\xc8\xf1\xeb\xf2\x5c\x59\xf3\xd2\x17\xb3\x46\x69\x8e\
\xeb\x6f\x72\x13\x1d\x36\x42\xc0\xe9\x60\xd0\xb6\xe0\xc8\x96\xb6\
\x8d\xeb\x3a\x04\xa3\xca\xe9\x6b\x19\xbc\x00\xd4\x4a\x21\x88\x12\
\x2e\xa1\xcf\xa6\x97\x5c\x2a\xe3\xe8\xd1\xc5\xb0\xd7\x5a\x04\x0f\
\xda\x54\xc0\x8b\xf7\x8d\x52\x70\x3d\x21\x20\x6a\x84\x80\x93\x27\
\xf9\x16\x98\xf4\xf6\x01\x22\x28\xad\x60\xd6\xcd\xe8\x7a\xf0\x92\
\xcc\x64\xf0\x83\x08\x7e\x9c\x20\x4b\x53\xac\x96\x33\x00\x42\x50\
\x59\xf0\x64\xbd\x57\xc0\x6b\xb5\xd1\x00\x05\x22\x47\x32\xc8\x46\
\x34\xe0\x7c\x30\x60\x15\xeb\xed\xdd\xe3\x33\x3a\xc1\x01\x08\x22\
\x4a\xd0\x0a\xba\x22\x82\x9c\x08\x40\xc3\x70\xc5\x28\x8a\x12\xcc\
\xc7\x43\xde\x09\xda\x3d\x39\x4d\x16\xe6\xb3\x87\xb5\x29\x12\x24\
\x04\x68\xd6\x13\x50\xb6\xa9\x41\x10\x94\x52\xed\x46\x08\x98\x4c\
\x26\x1d\x22\x32\x8e\xe3\x61\x36\x1e\x1a\xa5\x52\x3e\x92\x92\xe3\
\xc2\x73\x5c\x02\x05\x72\xaa\xd3\xb9\xa7\xc4\x5b\x80\xb1\x73\x03\
\x84\x71\x67\xb3\x15\x5e\x61\xfd\x1e\xd0\x75\x2e\xe8\x00\x0e\xe4\
\xe7\x6d\xec\x5c\x06\xec\x30\xa9\x0e\x14\x8c\x21\x18\x10\xe0\x81\
\xc1\x7b\x41\x0c\x27\xcf\x46\x9b\x29\x88\x0c\x87\xc3\x43\x63\x0c\
\xbd\xfb\xce\x1f\xe1\x5f\xff\xee\x8b\x70\xfd\x10\x7e\x10\xc2\xf5\
\xc2\xb5\x22\x1b\x3f\x4c\xf8\x80\xe4\x87\x31\x82\x75\x0b\x5a\xf0\
\x82\x90\xe7\xb1\xf7\xe3\x1d\x26\x0b\x00\xd9\x3a\x9f\x59\xce\xa6\
\x58\xcd\xa7\x9b\xdd\x42\xb1\x97\x39\xc4\x91\xef\x1a\x46\x67\x9b\
\x71\xc5\x9f\xb5\xb9\xfe\xaf\xf2\x0c\xb4\x49\xa8\xc8\x71\xec\x94\
\xac\xd7\x08\x01\xdd\x6e\x37\x05\x30\xd4\x4a\x85\x8b\xab\x0b\x47\
\x6b\xed\x1a\x63\x1c\xdb\xd6\x3d\xa1\xde\x40\x44\x5c\xd2\xd6\x2a\
\xe3\x01\xfb\x03\x07\xfa\xd6\x57\xbf\xcc\xe0\x58\x00\xc1\x15\xa4\
\x42\xe2\x64\x74\xb1\x58\x92\x97\xce\x80\xcd\x5f\xb2\xfe\x01\x34\
\xb3\x04\x5e\x7f\xfd\xf5\x37\x46\xa3\x11\xc6\xe3\x71\xeb\xe2\xe2\
\xc2\x9d\x5c\x5d\xc5\xe3\xcb\xb1\xb7\x5a\x2d\x82\xd1\x64\x1a\x2c\
\x97\x8b\xe0\x6a\x32\x69\xad\xd2\xd4\x9d\x4e\xa6\x5d\x63\x34\xd9\
\xbc\x21\xd1\x5a\x3b\xb6\x78\x99\xac\x49\x5a\x64\xba\x0f\x20\x76\
\xa0\x4e\xd5\x6a\x9a\x01\x20\x63\x0a\x60\x40\xf9\x75\x21\xed\x65\
\xbb\x1e\x43\xf5\x17\x29\xff\x03\x3d\xd7\xc8\x9a\x1c\xae\x83\xfd\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x09\
\x09\xb5\x3b\x7d\
\x00\x63\
\x00\x61\x00\x72\x00\x74\x00\x6f\x00\x67\x00\x72\x00\x61\x00\x6d\
\x00\x06\
\x06\x8a\x9c\xb3\
\x00\x61\
\x00\x73\x00\x73\x00\x65\x00\x74\x00\x73\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x2c\x00\x02\x00\x00\x00\x01\x00\x00\x00\x04\
\x00\x00\x00\x3e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| gpl-2.0 | 2,922,580,266,374,114,300 | 57.454545 | 96 | 0.725412 | false |
tonyin/optionstg | run.py | 1 | 9422 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
import argparse
import json
import os
import shutil
import sys
import time
from main import config
###############################################################################
# Options
###############################################################################
parser = argparse.ArgumentParser()
parser.add_argument(
'-w', '--watch', dest='watch', action='store_true',
help='watch files for changes when running the development web server',
)
parser.add_argument(
'-c', '--clean', dest='clean', action='store_true',
help='''recompiles files when running the development web server, but
obsolete if -s is used''',
)
parser.add_argument(
'-m', '--minify', dest='minify', action='store_true',
help='compiles files into minified version before deploying'
)
parser.add_argument(
'-s', '--start', dest='start', action='store_true',
help='starts the dev_appserver.py with storage_path pointing to temp',
)
parser.add_argument(
'-o', '--host', dest='host', action='store', default='127.0.0.1',
help='the host to start the dev_appserver.py',
)
parser.add_argument(
'-p', '--port', dest='port', action='store', default='8080',
help='the port to start the dev_appserver.py',
)
parser.add_argument(
'-f', '--flush', dest='flush', action='store_true',
help='clears the datastore, blobstore, etc',
)
args = parser.parse_args()
###############################################################################
# Directories
###############################################################################
DIR_MAIN = 'main'
DIR_STATIC = 'static'
DIR_SRC = 'src'
DIR_STYLE = 'style'
DIR_SCRIPT = 'script'
DIR_MIN = 'min'
DIR_DST = 'dst'
DIR_LIB = 'lib'
DIR_NODE_MODULES = 'node_modules'
DIR_BIN = '.bin'
DIR_TEMP = 'temp'
DIR_STORAGE = 'storage'
FILE_ZIP = '%s.zip' % DIR_LIB
FILE_COFFEE = 'coffee'
FILE_LESS = 'lessc'
FILE_UGLIFYJS = 'uglifyjs'
dir_static = os.path.join(DIR_MAIN, DIR_STATIC)
dir_src = os.path.join(dir_static, DIR_SRC)
dir_src_script = os.path.join(dir_src, DIR_SCRIPT)
dir_src_style = os.path.join(dir_src, DIR_STYLE)
dir_dst = os.path.join(dir_static, DIR_DST)
dir_dst_style = os.path.join(dir_dst, DIR_STYLE)
dir_dst_script = os.path.join(dir_dst, DIR_SCRIPT)
dir_min = os.path.join(dir_static, DIR_MIN)
dir_min_style = os.path.join(dir_min, DIR_STYLE)
dir_min_script = os.path.join(dir_min, DIR_SCRIPT)
dir_lib = os.path.join(DIR_MAIN, DIR_LIB)
file_lib = os.path.join(DIR_MAIN, FILE_ZIP)
dir_bin = os.path.join(DIR_NODE_MODULES, DIR_BIN)
file_coffee = os.path.join(dir_bin, FILE_COFFEE)
file_less = os.path.join(dir_bin, FILE_LESS)
file_uglifyjs = os.path.join(dir_bin, FILE_UGLIFYJS)
dir_storage = os.path.join(DIR_TEMP, DIR_STORAGE)
###############################################################################
# Helpers
###############################################################################
def print_out(script, filename=''):
timestamp = datetime.now().strftime('%H:%M:%S')
if not filename:
filename = '-' * 46
script = script.rjust(12, '-')
print '[%s] %12s %s' % (timestamp, script, filename)
def make_dirs(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def remove_dir(directory):
if os.path.isdir(directory):
shutil.rmtree(directory)
def clean_files():
bad_endings = ['pyc', '~']
print_out(
'CLEAN FILES',
'Removing files: %s' % ', '.join(['*%s' % e for e in bad_endings]),
)
for home, dirs, files in os.walk(DIR_MAIN):
for f in files:
for b in bad_endings:
if f.endswith(b):
os.remove(os.path.join(home, f))
def merge_files(source, target):
fout = open(target, 'a')
for line in open(source):
fout.write(line)
fout.close()
def os_execute(executable, args, source, target, append=False):
operator = '>>' if append else '>'
os.system('"%s" %s %s %s %s' % (executable, args, source, operator, target))
def compile_script(source, target_dir):
if not os.path.isfile(source):
print_out('NOT FOUND', source)
return
target = source.replace(dir_src_script, target_dir).replace('.coffee', '.js')
if not is_dirty(source, target):
return
make_dirs(os.path.dirname(target))
if not source.endswith('.coffee'):
print_out('COPYING', source)
shutil.copy(source, target)
return
print_out('COFFEE', source)
os_execute(file_coffee, '-cp', source, target)
def compile_style(source, target_dir, check_modified=False):
if not os.path.isfile(source):
print_out('NOT FOUND', source)
return
target = source.replace(dir_src_style, target_dir).replace('.less', '.css')
minified = ''
if not source.endswith('.less'):
return
if check_modified and not is_style_modified(target):
return
if target_dir == dir_min_style:
minified = '-x'
target = target.replace('.css', '.min.css')
print_out('LESS MIN', source)
else:
print_out('LESS', source)
make_dirs(os.path.dirname(target))
os_execute(file_less, minified, source, target)
def make_lib_zip(force=False):
if force and os.path.isfile(file_lib):
os.remove(file_lib)
if not os.path.isfile(file_lib):
print_out('ZIP', file_lib)
shutil.make_archive(dir_lib, 'zip', dir_lib)
def is_dirty(source, target):
if not os.access(target, os.O_RDONLY):
return True
return os.stat(source).st_mtime - os.stat(target).st_mtime > 0
def is_style_modified(target):
for folder, folders, files in os.walk(dir_src):
for file_ in files:
path = os.path.join(folder, file_)
if path.endswith('.less') and is_dirty(path, target):
return True
return False
def compile_all_dst():
for source in config.STYLES:
compile_style(os.path.join(dir_static, source), dir_dst_style, True)
for module in config.SCRIPTS:
for source in config.SCRIPTS[module]:
compile_script(os.path.join(dir_static, source), dir_dst_script)
def update_path_separators():
def fixit(path):
return path.replace('\\', '/').replace('/', os.sep)
for idx in xrange(len(config.STYLES)):
config.STYLES[idx] = fixit(config.STYLES[idx])
for module in config.SCRIPTS:
for idx in xrange(len(config.SCRIPTS[module])):
config.SCRIPTS[module][idx] = fixit(config.SCRIPTS[module][idx])
def install_dependencies():
missing = False
if not os.path.exists(file_coffee):
missing = True
if not os.path.exists(file_less):
missing = True
if not os.path.exists(file_uglifyjs):
missing = True
if not os.path.exists(os.path.join(DIR_NODE_MODULES, 'grunt')):
missing = True
try:
file_package = os.path.join(DIR_NODE_MODULES, 'uglify-js', 'package.json')
package_json = json.load(open(file_package))
version = package_json['version']
if int(version.split('.')[0]) < 2:
missing = True
except:
missing = True
if missing:
os.system('npm install')
def update_missing_args():
if args.start:
args.clean = True
def uniq(seq):
seen = set()
return [e for e in seq if e not in seen and not seen.add(e)]
###############################################################################
# Main
###############################################################################
os.chdir(os.path.dirname(os.path.realpath(__file__)))
update_path_separators()
install_dependencies()
update_missing_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
if args.clean:
print_out('CLEAN')
clean_files()
make_lib_zip(force=True)
remove_dir(dir_dst)
make_dirs(dir_dst)
compile_all_dst()
print_out('DONE')
if args.minify:
print_out('MINIFY')
clean_files()
make_lib_zip(force=True)
remove_dir(dir_min)
make_dirs(dir_min_script)
for source in config.STYLES:
compile_style(os.path.join(dir_static, source), dir_min_style)
for module in config.SCRIPTS:
scripts = uniq(config.SCRIPTS[module])
coffees = ' '.join([
os.path.join(dir_static, script)
for script in scripts if script.endswith('.coffee')
])
pretty_js = os.path.join(dir_min_script, '%s.js' % module)
ugly_js = os.path.join(dir_min_script, '%s.min.js' % module)
print_out('COFFEE MIN', ugly_js)
if len(coffees):
os_execute(file_coffee, '--join -cp', coffees, pretty_js, append=True)
for script in scripts:
if not script.endswith('.js'):
continue
script_file = os.path.join(dir_static, script)
merge_files(script_file, pretty_js)
os_execute(file_uglifyjs, pretty_js, '-cm', ugly_js)
os.remove(pretty_js)
print_out('DONE')
if args.watch:
print_out('WATCHING')
make_lib_zip()
make_dirs(dir_dst)
compile_all_dst()
print_out('DONE', 'and watching for changes (Ctrl+C to stop)')
while True:
time.sleep(0.5)
reload(config)
update_path_separators()
compile_all_dst()
if args.flush:
remove_dir(dir_storage)
print_out('STORAGE CLEARED')
if args.start:
make_dirs(dir_storage)
clear = 'yes' if args.flush else 'no'
port = int(args.port)
run_command = '''
python ../../../"Program Files (X86)"/Google/google_appengine/dev_appserver.py %s
--host %s
--port %s
--admin_port %s
--storage_path=%s
--clear_datastore=%s
--skip_sdk_update_check
''' % (DIR_MAIN, args.host, port, port + 1, dir_storage, clear)
os.system(run_command.replace('\n', ' '))
| mit | 3,240,041,015,489,999,400 | 26.469388 | 87 | 0.609425 | false |
CroissanceCommune/autonomie | autonomie/tests/views/admin/main/test_site.py | 1 | 1481 | # -*- coding: utf-8 -*-
# * Copyright (C) 2012-2013 Croissance Commune
# * Authors:
# * Arezki Feth <[email protected]>;
# * Miotte Julien <[email protected]>;
# * Pettier Gabriel;
# * TJEBBES Gaston <[email protected]>
#
# This file is part of Autonomie : Progiciel de gestion de CAE.
#
# Autonomie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Autonomie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Autonomie. If not, see <http://www.gnu.org/licenses/>.
#
import pytest
from autonomie.models.config import (
get_config,
)
pytest.mark.usefixtures("config")
def test_site_config_success(config, get_csrf_request_with_db, dbsession):
from autonomie.views.admin.main.site import (
MAIN_SITE_ROUTE,
AdminSiteView,
)
config.add_route(MAIN_SITE_ROUTE, MAIN_SITE_ROUTE)
appstruct = {'welcome': 'testvalue'}
view = AdminSiteView(get_csrf_request_with_db())
view.submit_success(appstruct)
assert get_config()['welcome'] == u'testvalue'
| gpl-3.0 | 6,709,779,369,749,557,000 | 35.121951 | 74 | 0.690749 | false |
igel-kun/pyload | module/plugins/internal/UnTar.py | 1 | 1811 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import sys
import tarfile
from .Extractor import ArchiveError, CRCError, Extractor
from .misc import encode, fsjoin
class UnTar(Extractor):
__name__ = "UnTar"
__type__ = "extractor"
__version__ = "0.05"
__status__ = "stable"
__description__ = """TAR extractor plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]")]
VERSION = "%s.%s.%s" % (sys.version_info[0],
sys.version_info[1],
sys.version_info[2])
@classmethod
def isarchive(cls, filename):
try:
return tarfile.is_tarfile(encode(filename))
except:
return False
@classmethod
def find(cls):
return sys.version_info[:2] >= (2, 5)
def list(self, password=None):
with tarfile.open(self.filename) as t:
self.files = [fsjoin(self.dest, _f) for _f in t.getnames()]
return self.files
def verify(self, password=None):
try:
t = tarfile.open(self.filename, errorlevel=1)
except tarfile.CompressionError, e:
raise CRCError(e)
except (OSError, tarfile.TarError), e:
raise ArchiveError(e)
else:
t.close()
def extract(self, password=None):
self.verify(password)
try:
with tarfile.open(self.filename, errorlevel=2) as t:
t.extractall(self.dest)
self.files = t.getnames()
return self.files
except tarfile.ExtractError, e:
self.log_warning(e)
except tarfile.CompressionError, e:
raise CRCError(e)
except (OSError, tarfile.TarError), e:
raise ArchiveError(e)
| gpl-3.0 | 7,260,781,826,180,916,000 | 24.507042 | 71 | 0.555494 | false |
kubeflow/testing | py/kubeflow/testing/delete_kf_instance.py | 1 | 2404 | """Delete a kubeflow instance."""
import fire
import json
import logging
import retrying
from googleapiclient import discovery
from googleapiclient import errors
from oauth2client.client import GoogleCredentials
from kubeflow.testing import util
@retrying.retry(stop_max_delay=10*60*1000, wait_exponential_max=60*1000,
wait_exponential_multiplier=1000)
def delete_deployment(dm, project, name):
deployments_client = dm.deployments()
try:
op = deployments_client.delete(project=project, deployment=name).execute()
except errors.HttpError as e:
if not e.content:
raise
error_content = json.loads(e.content)
message = error_content.get('error', {}).get('message', "")
logging.info("delete deployment error %s", message)
code = error_content.get('error', {}).get('code', 0)
if code == 404: # pylint: disable=no-else-return
logging.info("Project %s doesn't have deployment %s", project, name)
return
elif code == 409:
logging.info("Conflicting operation in progress")
raise ValueError("Can't delete deployment confliction operation in "
"progress")
raise
zone = None
op = util.wait_for_gcp_operation(dm.operations(), project, zone, op["name"])
logging.info("Final op: %s", op)
class KFDeleter:
def delete_kf(self, project, name):
"""Delete a KF instance with the specified name in the specified project."""
# TODO(jlewi): This is a bit of a hack due to the fact that kfctl
# doesn't properly handle deletion just given the name of a kubeflow
# deployment. Once that's fixed we should just use that.
util.maybe_activate_service_account()
credentials = GoogleCredentials.get_application_default()
dm = discovery.build("deploymentmanager", "v2", credentials=credentials)
for dm_name in [name, name + "-storage"]:
logging.info("Deleting project %s deployment %s", project, dm_name)
delete_deployment(dm, project, dm_name)
# TODO(jlewi): Cleanup other resources like certificates and backends
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
fire.Fire(KFDeleter)
| apache-2.0 | 50,440,597,814,513,940 | 37.774194 | 80 | 0.665141 | false |
morepath/more.jwtauth | more/jwtauth/main.py | 1 | 15396 | """Provides the JWTIdentityPolicy.
The following settings are available:
* master_secret: A secret known only by the server, used for
the default HMAC (HS*) algorithm.
* private_key: An Elliptic Curve or an RSA private_key used for
the EC (EC*) or RSA (PS*/RS*) algorithms.
* private_key_file: A file holding an Elliptic Curve or an RSA encoded
(PEM/DER) private_key.
* public_key: An Elliptic Curve or an RSA public_key used for the EC (EC*)
or RSA (PS*/RS*) algorithms.
* public_key_file: A file holding an Elliptic Curve
or an RSA encoded (PEM/DER) public_key.
* algorithm: The algorithm used to sign the key (defaults to HS256).
* expiration_delta: Time delta from now until the token will expire.
This can either be a datetime.timedelta or the number of seconds.
Default is 30 minutes, set to None to disable expiration.
* leeway: The leeway, which allows you to validate an expiration time
which is in the past, but not very far. To use as a datetime.timedelta
or the number of seconds. Defaults is 0.
* allow_refresh: Enables the token refresh API when True.
Default is False
* refresh_delta: A time delta in which the token can be refreshed
considering the leeway.
This can either be a datetime.timedelta or the number of seconds.
Default is 7 days. When None you can always refresh the token.
* refresh_nonce_handler: Either dotted path to callback function or the
callback function itself, which receives the userid as argument and
returns a nonce which will be validated before refreshing.
When None no nonce will be created or validated for refreshing.
Default is None.
* verify_expiration_on_refresh: If False, expiration_delta for the JWT
token will not be checked during refresh. Otherwise you can refresh
the token only if it's not yet expired. Default is False.
* issuer: This is a string that will be checked against the iss claim of
the token. You can use this e.g. if you have several related apps with
exclusive user audience. Default is None (do not check iss on JWT).
* auth_header_prefix: You can modify the Authorization header value prefix
that is required to be sent together with the token. The default value
is JWT. Another common value used for tokens is Bearer.
* userid_claim: The claim, which contains the user id.
The default claim is 'sub'.
The library takes either a master_secret or private_key/public_key pair.
In the later case the algorithm must be an EC*, PS* or RS* version.
"""
from calendar import timegm
from datetime import datetime, timedelta
import jwt
from morepath import Identity, NO_IDENTITY
from . import (
InvalidTokenError,
DecodeError,
ExpiredSignatureError,
MissingRequiredClaimError,
)
from .utils import handler
class JWTIdentityPolicy:
"""Morepath Identity Policy implementing JWT Access Auth.
This class provides an IdentityPolicy implementation based on
signed requests, using the JSON Web Token Authentication standard.
Reference:
http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html
"""
def __init__(
self,
master_secret=None,
private_key=None,
private_key_file=None,
public_key=None,
public_key_file=None,
algorithm="HS256",
expiration_delta=timedelta(minutes=30),
leeway=0,
allow_refresh=False,
refresh_delta=timedelta(days=7),
refresh_nonce_handler=None,
verify_expiration_on_refresh=False,
issuer=None,
auth_header_prefix="JWT",
userid_claim="sub",
):
"""Initiate the JWTIdentityPolicy with the given settings."""
_public_key = master_secret
if public_key is not None:
_public_key = public_key
if public_key_file is not None:
with open(public_key_file) as key_pub_file:
_public_key = key_pub_file.read()
self.public_key = _public_key
_private_key = master_secret
if private_key is not None:
_private_key = private_key
if private_key_file is not None:
with open(private_key_file) as key_priv_file:
_private_key = key_priv_file.read()
self.private_key = _private_key
self.algorithm = algorithm
if isinstance(expiration_delta, timedelta):
expiration_delta = expiration_delta.total_seconds()
self.expiration_delta = expiration_delta
if leeway is None:
leeway = 0
elif isinstance(leeway, timedelta):
leeway = leeway.total_seconds()
self.leeway = leeway
self.allow_refresh = allow_refresh
if isinstance(refresh_delta, timedelta):
refresh_delta = refresh_delta.total_seconds()
self.refresh_delta = refresh_delta
if isinstance(refresh_nonce_handler, str):
self.refresh_nonce_handler = handler(refresh_nonce_handler)
else:
self.refresh_nonce_handler = refresh_nonce_handler
self.verify_expiration_on_refresh = verify_expiration_on_refresh
self.issuer = issuer
self.auth_header_prefix = auth_header_prefix
self.userid_claim = userid_claim
def identify(self, request):
"""Establish what identity this user claims to have from request.
:param request: Request to extract identity information from.
:type request: :class:`morepath.Request`.
:returns: :class:`morepath.Identity` instance or
:attr:`morepath.NO_IDENTITY` if identity cannot
be established.
"""
token = self.get_jwt(request)
if token is None:
return NO_IDENTITY
try:
claims_set = self.decode_jwt(token)
except (DecodeError, ExpiredSignatureError):
return NO_IDENTITY
userid = self.get_userid(claims_set)
if userid is None:
return NO_IDENTITY
extra_claims = self.get_extra_claims(claims_set)
if extra_claims is not None:
return Identity(userid=userid, **extra_claims)
else:
return Identity(userid=userid)
def remember(self, response, request, identity):
"""Remember identity on response.
Implements ``morepath.App.remember_identity``, which is called
from user login code.
Create a JWT token and return it as the Authorization field of the
response header.
:param response: response object on which to store identity.
:type response: :class:`morepath.Response`
:param request: request object.
:type request: :class:`morepath.Request`
:param identity: identity to remember.
:type identity: :class:`morepath.Identity`
"""
claims = identity.as_dict()
userid = claims.pop("userid")
claims_set = self.create_claims_set(request, userid, claims)
token = self.encode_jwt(claims_set)
response.headers["Authorization"] = "{} {}".format(
self.auth_header_prefix,
token,
)
def forget(self, response, request):
"""Forget identity on response.
Implements ``morepath.App.forget_identity``, which is called from
user logout code.
This is a no-op for this identity policy. The client is supposed to
handle logout and remove the token.
:param response: response object on which to forget identity.
:type response: :class:`morepath.Response`
:param request: request object.
:type request: :class:`morepath.Request`
"""
pass
def decode_jwt(self, token, verify_expiration=True):
"""Decode a JWTAuth token into its claims set.
This method decodes the given JWT to provide the claims set. The JWT
can fail if the token has expired (with appropriate leeway) or if the
token won't validate due to the secret (key) being wrong.
If private_key/public key is set then the public_key will be used
to decode the key.
The leeway and issuer settings will be passed to jwt.decode.
:param token: the JWTAuth token.
:param verify_expiration: if False the expiration time will not
be checked.
"""
options = {
"verify_exp": verify_expiration,
}
return jwt.decode(
token,
self.public_key,
algorithms=[self.algorithm],
options=options,
leeway=self.leeway,
issuer=self.issuer,
)
def create_claims_set(self, request, userid, extra_claims=None):
"""Create the claims set based on the userid of the claimed identity,
the settings and the extra_claims dictionary.
The userid will be stored in settings.jwtauth.userid_claim
(default: "sub").
If settings.jwtauth.expiration_delta is set it will be added
to the current time and stored in the "exp" claim.
If settings.jwtauth.issuer is set, it get stored in the "iss" claim.
If settings.jwtauth.refresh_delta is set it will be added
to the current time and stored in the "refresh_until" claim and
the return value of settings.jwtauth.refresh_nonce_handler called with
"user_id" as argument will be stored in the "nonce" claim.
With the extra_claims dictionary you can provide additional claims.
This can be registered claims like "nbf"
(the time before which the token should not be processed) and/or
claims containing extra info
about the identity, which will be stored in the Identity object.
:param request: current request object.
:type request: :class:`morepath.Request`
:param userid: the userid of the claimed identity.
:param extra_claims: dictionary, containing additional claims or None.
"""
claims_set = {self.userid_claim: userid}
now = timegm(datetime.utcnow().utctimetuple())
if self.expiration_delta is not None:
claims_set["exp"] = now + self.expiration_delta
if self.issuer is not None:
claims_set["iss"] = self.issuer
if self.allow_refresh:
if self.refresh_delta is not None:
claims_set["refresh_until"] = now + self.refresh_delta
if self.refresh_nonce_handler is not None:
claims_set["nonce"] = self.refresh_nonce_handler(
request, userid
)
if extra_claims is not None:
claims_set.update(extra_claims)
return claims_set
def encode_jwt(self, claims_set):
"""Encode a JWT token based on the claims_set and the settings.
If available, registry.settings.jwtauth.private_key is used as key.
In this case the algorithm must be an RS* or EC* algorithm.
If registry.settings.jwtauth.private_key is not set,
registry.settings.jwtauth.master_secret is used.
registry.settings.jwtauth.algorithm is used as algorithm.
:param claims_set: set of claims, which will be included in
the created token.
"""
token = jwt.encode(
claims_set,
self.private_key,
self.algorithm,
).decode(encoding="UTF-8")
return token
def get_userid(self, claims_set):
"""Extract the userid from a claims set.
Returns userid or None if there is none.
:param claims_set: set of claims, which was included
in the received token.
"""
if self.userid_claim in claims_set:
return claims_set[self.userid_claim]
else:
return None
def get_extra_claims(self, claims_set):
"""Get claims holding extra identity info from the claims set.
Returns a dictionary of extra claims or None if there are none.
:param claims_set: set of claims, which was included in the received
token.
"""
reserved_claims = (
self.userid_claim,
"iss",
"aud",
"exp",
"nbf",
"iat",
"jti",
"refresh_until",
"nonce",
)
extra_claims = {}
for claim in claims_set:
if claim not in reserved_claims:
extra_claims[claim] = claims_set[claim]
if not extra_claims:
return None
return extra_claims
def get_jwt(self, request):
"""Extract the JWT token from the authorisation header of the request.
Returns the JWT token or None, if the token cannot be extracted.
:param request: request object.
:type request: :class:`morepath.Request`
"""
try:
authorization = request.authorization
except ValueError: # pragma: no cover
return None
if authorization is None:
return None
authtype, token = authorization
if authtype.lower() != self.auth_header_prefix.lower():
return None
return token
def verify_refresh(self, request):
"""
Verify if the request to refresh the token is valid.
If valid it returns the userid which can be used to create
an updated identity with ``remember_identity``.
Otherwise it raises an exception based on InvalidTokenError.
:param request: current request object
:type request: :class:`morepath.Request`
:returns: userid
:raises: InvalidTokenError, ExpiredSignatureError, DecodeError,
MissingRequiredClaimError
"""
if not self.allow_refresh:
raise InvalidTokenError("Token refresh is disabled")
token = self.get_jwt(request)
if token is None:
raise InvalidTokenError("Token not found")
try:
claims_set = self.decode_jwt(
token, self.verify_expiration_on_refresh
)
# reraise the exceptions to change the error messages
except DecodeError:
raise DecodeError("Token could not be decoded")
except ExpiredSignatureError:
raise ExpiredSignatureError("Token has expired")
userid = self.get_userid(claims_set)
if userid is None:
raise MissingRequiredClaimError(self.userid_claim)
if self.refresh_nonce_handler is not None:
if "nonce" not in claims_set:
raise MissingRequiredClaimError("nonce")
if (
self.refresh_nonce_handler(request, userid)
!= claims_set["nonce"]
):
raise InvalidTokenError("Refresh nonce is not valid")
if self.refresh_delta is not None:
if "refresh_until" not in claims_set:
raise MissingRequiredClaimError("refresh_until")
now = timegm(datetime.utcnow().utctimetuple())
refresh_until = int(claims_set["refresh_until"])
if refresh_until < (now - self.leeway):
raise ExpiredSignatureError("Refresh nonce has expired")
return userid
| bsd-3-clause | 8,533,879,541,043,725,000 | 36.009615 | 79 | 0.626137 | false |
ned2/typediff | typediff/typediff.py | 1 | 8044 | import sys
import os
import argparse
import json
import pickle
import functools
from itertools import chain
from . import delphin
from . import config
from . import gram
"""typediff.py
Author: Ned Letcher
https://github.com/ned2/typediff
Typediff is a tool to allow you to quickly explore the types used in
the processing of input by DELPH-IN grammars.
"""
HELP = """Usage:
$ typediff [options] GRAMMAR_NAME pos_sent1 pos_sent2 ... @ neg_sent1 neg_sent2 ...
Options:
The remainder of the options are only relevant to the command line mode:
-d
Operate in difference mode (default).
-i
Operate in intersection mode.
-u
Operate in union mode.
-n count
The number of trees ACE is limited to returning.
--profiles
--frags
Include fragment readings (only supported by ERG currently).
--all
Take types from all of the parses returned by ACE instead of just the best.
--supers
Include the super types in the output.
--raw
Don't sort and colorize the list of types.
"""
# TODO
# move help text into argparse
# update various config files to reflect LOGONROOT variable
def argparser():
argparser = argparse.ArgumentParser()
argparser.add_argument("grammar", metavar="GRAMMAR NAME")
argparser.add_argument("--count", default=10)
argparser.add_argument("--all", action='store_true')
argparser.add_argument("--tagger")
argparser.add_argument("--fragments", action='store_true')
argparser.add_argument("--supers", action='store_true')
argparser.add_argument("--profiles", action='store_true')
argparser.add_argument("--raw", action='store_true')
group = argparser.add_mutually_exclusive_group(required=False)
group.add_argument("-i", action='store_true')
group.add_argument("-d", action='store_true')
group.add_argument("-u", action='store_true')
argparser.add_argument("sentences", nargs=argparse.REMAINDER)
return argparser
class ColorText(object):
WHITE = '\033[97m'
CYAN = '\033[96m'
PURPLE = '\033[95m'
BLUE = '\033[94m'
YELLOW = '\033[93m'
GREEN = '\033[92m'
RED = '\033[91m'
BLACK = '\033[90m'
END = '\033[0m'
def __init__(self, text, color):
self.text = text
self.color = getattr(self, color.upper())
def __str__(self):
return ''.join((self.color, self.text, self.END))
def pretty_print_types(types, hierarchy):
"""
Print the type list to the terminal, sorting and colorizing as
specified by the TYPES variable.
"""
def descendants(s):
if s == 'other':
return []
else:
return set(t.name for t in hierarchy[s].descendants())
kinds = [(descendants(t), col) for t, _rgba, col in config.TYPES]
def keyfunc(t):
for i, x in enumerate(kinds):
if t.lstrip('^') in kinds[i][0]:
return i
return 1000
types.sort(key=keyfunc)
output = []
for t in types:
for ds, col in kinds:
if t.lstrip('^') in ds:
output.append(str(ColorText(t, col)))
break
else:
output.append(t)
return '\n'.join(output)
def compare_types(pos_types, neg_types, arg):
if arg.d:
types = pos_types - neg_types
elif arg.i:
# currently only works when there are pos and neg items
types = set.intersection(pos_types, neg_types)
else:
types = pos_types | neg_types
return types
@functools.lru_cache(maxsize=32)
def get_hierarchy(grammar):
return delphin.load_hierarchy(grammar.types_path)
@functools.lru_cache(maxsize=32)
def load_descendants(grammar):
hierarchy = get_hierarchy(grammar)
desc_func = lambda x: set(t.name for t in hierarchy[x].descendants())
kinds = [name for name, _rgba, _col in config.TYPES if name != 'other']
descendants = {}
for kind in kinds:
for t in desc_func(kind):
descendants[t] = kind
return descendants
def type_data():
return {t:{'rank':i+1, 'col':rgba}
for i, (t, rgba, _col) in enumerate(config.TYPES)}
def typediff_web(pos_items, neg_items, opts):
data = {
'pos-items' : pos_items,
'neg-items' : neg_items,
'descendants' : load_descendants(opts.grammar) if opts.desc else False,
'typeData': type_data(),
'grammar': opts.grammar.alias,
'treebank': opts.treebank,
}
if opts.supers:
hierarchy = get_hierarchy(opts.grammar)
for item in chain(pos_items, neg_items):
item.load_supers(hierarchy)
return data
def typediff(pos_items, neg_items, opts):
"""pos_items and neg_items are lists of either Fragment or Reading objects"""
# currently assuming that the Reading objects are only coming from gold
# profiles, therefore only one per item. otherwise we'd need to be using s
# list of Reading objects or probably could be defining an ProfileItem
# class that emulates the relevant interface to Fragment
tfunc = lambda x:x.types.keys() if opts.all else x.best.types.keys()
pos_types = set(chain.from_iterable(tfunc(x) for x in pos_items))
neg_types = set(chain.from_iterable(tfunc(x) for x in neg_items))
if len(pos_types) + len(neg_types) > 1:
typelist = list(compare_types(pos_types, neg_types, opts))
else:
typelist = list(max(pos_types, neg_types))
if opts.raw:
return '\n'.join(typelist)
hierarchy = delphin.load_hierarchy(opts.grammar.types_path)
if opts.supers:
for group in (pos, neg):
for item in group:
item.load_supers(hierarchy)
sfunc = lambda x:x.supers
pos_supers = set(chain.from_iterable(sfunc(x) for x in pos))
neg_supers = set(chain.from_iterable(sfunc(x) for x in neg))
supers = compare_types(pos_supers, neg_supers, opts)
typelist.extend('^'+t for t in supers)
return pretty_print_types(typelist, hierarchy)
def process_sentences(inputs, opts):
def process(sentence):
return delphin.Fragment(
sentence,
opts.grammar,
fragments=opts.fragments,
count=opts.count,
tnt=opts.get('tnt', False),
dat_path=opts.grammar.dat_path,
ace_path=config.ACEBIN,
typifier=config.TYPIFIERBIN,
logpath=config.LOGPATH
)
return [process(i) for i in inputs]
def process_profiles(queries, opts):
# assume queries is a string of the form: PROFILE_PATH:opt_tsql_query
sep = ':'
items = []
# support both list of queries and single query
if isinstance(queries, str):
queries = [queries]
for query in queries:
if query.find(sep) >= 0:
path, condition = query.split(':')
condition = None if condition == '' else condition
else:
path = query
condition = None
items.extend(process_gold_profile(
path,
condition=condition,
grammar=opts.grammar,
))
return items
def process_gold_profile(path, condition=None, grammar=None):
return delphin.get_profile_results(
[path],
gold=True,
grammar=grammar,
condition=condition,
typifier=config.TYPIFIERBIN
)
def main():
arg = argparser().parse_args()
arg.grammar = gram.get_grammar(arg.grammar)
if '@' in arg.sentences and not (arg.u or arg.i or arg.d):
arg.d = True
pos, neg = [], []
# assign the inputs into pos and neg lists accordingly
stype = pos
for s in arg.sentences:
if s =='@':
stype = neg
else:
stype.append(s)
process_func = process_profiles if arg.profiles else process_sentences
pos_items = process_func(pos, arg)
neg_items = process_func(neg, arg)
result = typediff(pos_items, neg_items, arg)
print(result)
if __name__ == "__main__":
main()
| mit | -3,276,857,596,572,645,000 | 26.175676 | 83 | 0.617976 | false |
stormi/tsunami | src/primaires/salle/cherchables/__init__.py | 1 | 1798 | # -*-coding:Utf-8 -*
# Copyright (c) 2012 NOEL-BARON Léo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce package contient les objets cherchables du module.
"""
from .bonhomme import CherchableBonhommeNeige
from .decor import CherchableDecor
from .prototype_decor import CherchablePrototypeDecor
from .salle import CherchableSalle
| bsd-3-clause | 8,668,133,806,592,736,000 | 46.289474 | 79 | 0.786311 | false |
pyta-uoft/pyta | nodes/if.py | 1 | 2300 | """
If astroid node
An if statement.
Attributes:
- test (NodeNG)
- Holds the node to evaluate such as Compare.
- body (list[NodeNG])
- A list of nodes that will execute if the test condition passes.
- orelse (list[NodeNG])
- A list of nodes executed when the test condition fails.
- elif statements are nested inside of the orelse
Example 1:
If(
test=Compare(
left=Name(name='n'),
ops=[['==', Const(value=0)]]),
body=[Pass()],
orelse=[If(
test=Name(name='something'),
body=[Pass()],
orelse=[If(
test=Compare(
left=Name(name='n'),
ops=[['>', Const(value=0)]]),
body=[Pass()],
orelse=[Assign(
targets=[AssignName(name='n')],
value=Const(value=3))])])])
Example 2:
If(
test=Compare(
left=Name(name='n'),
ops=[['==', Const(value=0)]]),
body=[Expr(value=Call(
func=Name(name='print'),
args=[Const(value=1)],
keywords=None)),
Expr(value=Call(
func=Name(name='print'),
args=[Const(value=10)],
keywords=None))],
orelse=[Expr(value=Call(
func=Name(name='print'),
args=[Const(value=100)],
keywords=None))])
Example 3:
If(
test=Compare(
left=Name(name='x'),
ops=[['==', Const(value=2)]]),
body=[Pass()],
orelse=[If(
test=Compare(
left=Name(name='y'),
ops=[['==', Const(value=3)]]),
body=[Pass()],
orelse=[])])
Example 4
If(
test=Compare(
left=Name(name='x'),
ops=[['==', Const(value=5)]]),
body=[Pass()],
orelse=[])
"""
# Example 1
if n == 0:
pass
elif something:
pass
elif n > 0:
pass
else:
n = 3
# Example 2
if n == 0:
print(1)
print(10)
else:
print(100)
# Example 3
if x == 2:
pass
elif y == 3:
pass
if x == 5:
pass
| gpl-3.0 | -7,077,060,367,189,977,000 | 22.469388 | 73 | 0.423478 | false |
open-rnd/ros3d-dev-controller | ros3ddevcontroller/mqtt/mqttornado.py | 1 | 4038 | #
# Copyright (c) 2015 Open-RnD Sp. z o.o.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""MQTT to Tornado adapter"""
import logging
from tornado.ioloop import IOLoop, PeriodicCallback
# periodic check with MQTT client library to execute misc actions
# (pings, etc.)
MQTT_MISC_PERIOD = 10 * 1000
LOG = logging.getLogger('mqttornado')
class MQTTornadoAdapter(object):
"""Adapter for interfacing MQTT Client with Tornado framework"""
def __init__(self, client, loop=None):
"""Create new adapter for given client instance
:param mqtt.Client client: MQTT client instance
:param tornado.ioloop.IOLoop loop: Tonardo IOLoop instance,
None to use default loop
"""
self._client = client
self._fd = self._client_fd()
self._loop = loop
self._read_events = IOLoop.READ | IOLoop.ERROR
self._all_events = self._read_events | IOLoop.WRITE
if not self._loop:
self._loop = IOLoop.instance()
LOG.debug('setup handlers')
self._loop.add_handler(self._client_fd(),
self._io_clbk,
self._all_events)
self._periodic = PeriodicCallback(self._periodic_clbk,
MQTT_MISC_PERIOD,
io_loop=self._loop)
self._periodic.start()
def stop(self):
"""Stop Adapter
"""
self._loop.remove_handler(self._fd)
self._periodic.stop();
self._periodic = None
def _client_fd(self):
"""Return MQTT client FD if already set otherwise raise an
exception
:rtype: int
:return: MQTT client fd
"""
sock = self._client.socket()
if sock == None:
raise RuntimeError('not connected to broker')
LOG.debug('socket: %s', sock.fileno())
return sock.fileno()
def _io_clbk(self, _, event):
"""IO Callback from Tornado"""
LOG.debug('IO event: 0x%x', event)
if event & IOLoop.READ:
self._client.loop_read()
if event & IOLoop.ERROR:
self._client.loop_read()
if event & IOLoop.WRITE:
self._client.loop_write()
if self.poll_writes() == False:
self._loop.update_handler(self._client_fd(),
self._read_events)
def _periodic_clbk(self):
"""Periodic callback handler"""
# LOG.debug('periodic check')
self._client.loop_misc()
def poll_writes(self):
"""Check if client wants to write anything and schedule write
action
:return: True if client wants to write, False otherwise
:rtype: bool"""
if self._client.want_write():
LOG.debug('want write')
self._loop.update_handler(self._client_fd(),
self._all_events)
return True
return False
| mit | -2,852,210,188,878,344,700 | 32.932773 | 69 | 0.604755 | false |
iwm911/plaso | plaso/parsers/firefox_cache.py | 1 | 7488 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a parser for Firefox cache files."""
import collections
import logging
import os
import construct
import pyparsing
from plaso.lib import errors
from plaso.lib import event
from plaso.lib import eventdata
from plaso.lib import parser
__author__ = 'Petter Bjelland ([email protected])'
class FirefoxCacheEvent(event.PosixTimeEvent):
"""Convenience class for an firefox cache record event."""
DATA_TYPE = 'firefox:cache:record'
def __init__(self, metadata, request_method, url, response_code):
super(FirefoxCacheEvent, self).__init__(
metadata.last_fetched, eventdata.EventTimestamp.ADDED_TIME)
self.last_modified = metadata.last_modified
self.major = metadata.major
self.minor = metadata.minor
self.location = metadata.location
self.last_fetched = metadata.last_fetched
self.expire_time = metadata.expire_time
self.fetch_count = metadata.fetch_count
self.request_size = metadata.request_size
self.info_size = metadata.info_size
self.data_size = metadata.data_size
self.request_method = request_method
self.url = url
self.response_code = response_code
class FirefoxCacheParser(parser.BaseParser):
"""Extract cached records from Firefox."""
NAME = 'firefox_cache'
# Number of bytes allocated to a cache record metadata.
RECORD_HEADER_SIZE = 36
# Initial size of Firefox >= 4 cache files.
INITIAL_CACHE_FILE_SIZE = 1024 * 1024 * 4
# Smallest possible block size in Firefox cache files.
MIN_BLOCK_SIZE = 256
RECORD_HEADER_STRUCT = construct.Struct(
'record_header',
construct.UBInt16('major'),
construct.UBInt16('minor'),
construct.UBInt32('location'),
construct.UBInt32('fetch_count'),
construct.UBInt32('last_fetched'),
construct.UBInt32('last_modified'),
construct.UBInt32('expire_time'),
construct.UBInt32('data_size'),
construct.UBInt32('request_size'),
construct.UBInt32('info_size'))
ALTERNATIVE_CACHE_NAME = (pyparsing.Word(pyparsing.hexnums, exact=5) +
pyparsing.Word("m", exact=1) + pyparsing.Word(pyparsing.nums, exact=2))
FIREFOX_CACHE_CONFIG = collections.namedtuple(
u'firefox_cache_config',
u'block_size first_record_offset')
REQUEST_METHODS = [
u'GET', 'HEAD', 'POST', 'PUT', 'DELETE',
u'TRACE', 'OPTIONS', 'CONNECT', 'PATCH']
def __GetFirefoxConfig(self, file_entry):
"""Determine cache file block size. Raises exception if not found."""
if file_entry.name[0:9] != '_CACHE_00':
try:
# Match alternative filename. Five hex characters + 'm' + two digit
# number, e.g. "01ABCm02". 'm' is for metadata. Cache files with 'd'
# instead contain data only.
self.ALTERNATIVE_CACHE_NAME.parseString(file_entry.name)
except pyparsing.ParseException:
raise errors.UnableToParseFile(u'Not a Firefox cache file.')
file_object = file_entry.GetFileObject()
# There ought to be a valid record within the first 4MB. We use this
# limit to prevent reading large invalid files.
to_read = min(file_object.get_size(), self.INITIAL_CACHE_FILE_SIZE)
while file_object.get_offset() < to_read:
offset = file_object.get_offset()
try:
# We have not yet determined the block size, so we use the smallest
# possible size.
record = self.__NextRecord(file_entry.name, file_object,
self.MIN_BLOCK_SIZE)
record_size = (
self.RECORD_HEADER_SIZE + record.request_size + record.info_size)
if record_size >= 4096:
# _CACHE_003_
block_size = 4096
elif record_size >= 1024:
# _CACHE_002_
block_size = 1024
else:
# _CACHE_001_
block_size = 256
return self.FIREFOX_CACHE_CONFIG(block_size, offset)
except IOError:
logging.debug(
u'{0:s}:{1:d}: Invalid record.'.format(file_entry.name, offset))
raise errors.UnableToParseFile(
u'Could not find a valid cache record. '
u'Not a Firefox cache file.')
def __Accept(self, candidate, block_size):
"""Determine whether the candidate is a valid cache record."""
record_size = (self.RECORD_HEADER_SIZE + candidate.request_size
+ candidate.info_size)
return (candidate.request_size > 0 and candidate.fetch_count > 0
and candidate.major == 1 and record_size // block_size < 256)
def __NextRecord(self, filename, file_object, block_size):
"""Provide the next cache record."""
offset = file_object.get_offset()
try:
candidate = self.RECORD_HEADER_STRUCT.parse_stream(file_object)
except (IOError, construct.FieldError):
raise IOError(u'Unable to parse stream.')
if not self.__Accept(candidate, block_size):
# Move reader to next candidate block.
file_object.seek(block_size - self.RECORD_HEADER_SIZE, os.SEEK_CUR)
raise IOError(u'Not a valid Firefox cache record.')
# The last byte in a request is null.
url = file_object.read(candidate.request_size)[:-1]
# HTTP response header, even elements are keys, odd elements values.
headers = file_object.read(candidate.info_size)
request_method, _, _ = (
headers.partition('request-method\x00')[2].partition('\x00'))
_, _, response_head = headers.partition('response-head\x00')
response_code, _, _ = response_head.partition("\r\n")
if request_method not in self.REQUEST_METHODS:
logging.debug(
u'{0:s}:{1:d}: Unknown HTTP method "{2:s}". Response "{3:s}"'.format(
filename, offset, request_method, headers))
if response_code[0:4] != 'HTTP':
logging.debug(
u'{0:s}:{1:d}: Could not determine HTTP response code. '
u'Response headers: "{2:s}".'.format(filename, offset, headers))
# A request can span multiple blocks, so we use modulo.
_, remainder = divmod(file_object.get_offset() - offset, block_size)
# Move reader to next candidate block. Include the null-byte skipped above.
file_object.seek(block_size - remainder, os.SEEK_CUR)
return FirefoxCacheEvent(candidate, request_method, url, response_code)
def Parse(self, file_entry):
"""Extract records from a Firefox cache file."""
firefox_config = self.__GetFirefoxConfig(file_entry)
file_object = file_entry.GetFileObject()
file_object.seek(firefox_config.first_record_offset)
while file_object.get_offset() < file_object.get_size():
try:
yield self.__NextRecord(file_entry.name, file_object,
firefox_config.block_size)
except IOError:
logging.debug(u'{0:s}:{1:d}: Invalid cache record.'.format(
file_entry.name, file_object.get_offset() - self.MIN_BLOCK_SIZE))
| apache-2.0 | -1,733,855,490,218,381,600 | 33.666667 | 79 | 0.670005 | false |
opencorato/mapit | mapit_gb/controls/2015-05.py | 1 | 1662 | # A control file for importing May 2015 Boundary-Line.
# This control file assumes previous Boundary-Lines have been imported,
# because it uses that information. If this is a first import, use the
# first-gss control file.
def code_version():
return 'gss'
def check(name, type, country, geometry):
"""Should return True if this area is NEW, False if we should match against
an ONS code, or an Area to be used as an override instead."""
# There are lots of new things in this edition of boundary line, but none
# of them are things which we have to manually override, which is nice.
# New areas, by type:
# County electoral divisions
# New area: CED None 44530 Wyreside ED
# Geometry of None CED Earls Barton not valid
# Geometry of None CED Hatton Park not valid
# Metropolitan wards
# Doncaster (SI 2015/114)
# Wards, in range E05009491-E05010773
# Upwards of 50 councils with boundary changes
# Geometry of E05010034 DIW Harrowden & Sywell Ward not valid
# Geometry of E05010039 DIW Redwell Ward not valid
# London Borough Wards
# Geometry of E05009392 LBW Colville not valid
# Geometry of E05009400 LBW Pembridge not valid
# Unitary Authority wards
# Darlington (2014/3338)
# Herefordshire (2014/20)
# Leicester (2014/3339)
# Middlesbrough (2014/1188)
# North Somerset (2014/3291)
# Poole (2015/73)
# Swindon (2015/116)
# Telford & Wrekin (2014/1910)
# York (2014/3289)
# Parish Councils, most IDs from E04012345-E04012470
# Geometry of E04006883 CPC Great Harrowden not valid
# This is the default
return False
| agpl-3.0 | -8,454,783,055,558,965,000 | 30.358491 | 79 | 0.696751 | false |
ThunderGemios10/The-Super-Duper-Script-Editor | wrd/bin.py | 1 | 5171 | ################################################################################
### Copyright © 2012-2013 BlackDragonHunt
###
### This file is part of the Super Duper Script Editor.
###
### The Super Duper Script Editor is free software: you can redistribute it
### and/or modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation, either version 3 of the License,
### or (at your option) any later version.
###
### The Super Duper Script Editor is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with the Super Duper Script Editor.
### If not, see <http://www.gnu.org/licenses/>.
################################################################################
import bitstring
from bitstring import BitStream, ConstBitStream
from wrd.ops import *
from wrd.parser import parse_command, InvalidWrdHeader
################################################################################
### Converts binary wrd data to a list of commands which can be used in all
### the other functions in this module.
################################################################################
def from_bin(data):
# Eat the header.
parse_command(data)
commands = []
while True:
try:
op, params = parse_command(data)
commands.append((op, params))
except InvalidWrdHeader:
byte = data.read("uint:8")
commands.append((WRD_INVALID, {"val": byte}))
except:
break
return commands
################################################################################
### Converts a list of commands to the binary format used by the game.
################################################################################
def to_bin(commands):
data = BitStream()
lines = 0
for op, params in commands:
if op == WRD_HEADER:
continue
if not op in OP_PARAMS:
# raise Exception("Unknown op: 0x%02X" % op)
print "Unknown op: 0x%02X" % op
continue
param_info = OP_PARAMS[op]
# If it has a custom parsing function, use the equivalent packing function.
if isinstance(param_info, basestring):
command = globals()[OP_FUNCTIONS[op]](**params)
data.append(command)
else:
if op == WRD_SHOW_LINE:
lines += 1
data.append(bitstring.pack("uint:8, uint:8", CMD_MARKER, op))
unnamed_param_id = 0
for param_name, param_type in param_info:
if param_name == None:
data.append(bitstring.pack(param_type, params[param_name][unnamed_param_id]))
unnamed_param_id += 1
else:
data.append(bitstring.pack(param_type, params[param_name]))
return bitstring.pack("uint:8, uint:8, uintle:16", CMD_MARKER, WRD_HEADER, lines) + data
################################################################################
### Special function definitions.
################################################################################
def check_flag_a(flags, flag_ops, fail_label):
# XX XX 00 YY
# * If there are multiple flags (as many as needed)
# -> WW XX XX 00 YY
#
# * When all the flags have been listed.
# -> 70 3C 70 34 ZZ ZZ
#
# * XX XX = Flag group/ID
# * YY = Flag State
# * 00 = Off
# * 01 = On
#
# * WW = Operator
# * 06 = AND
# * 07 = OR (?)
#
# * ZZ ZZ = Label to jump to if check failed.
command = bitstring.pack("uint:8, uint:8", CMD_MARKER, WRD_CHECKFLAG_A)
for i, (flag_group, flag_id, flag_state, long_state) in enumerate(flags):
command += bitstring.pack("uint:8, uint:8", flag_group, flag_id)
if long_state:
command += bitstring.pack("uint:16", flag_state)
else:
command += bitstring.pack("uint:8", flag_state)
if i < len(flag_ops):
command += bitstring.pack("uint:8", flag_ops[i])
command += bitstring.pack("uint:8, uint:8", CMD_MARKER, WRD_FLAG_CHECK_END)
if not fail_label == None:
command += bitstring.pack("uint:8, uint:8, uint:16", CMD_MARKER, WRD_GOTO_LABEL, fail_label)
return command
def check_flag_b(flags, flag_ops, fail_label):
command = bitstring.pack("uint:8, uint:8", CMD_MARKER, WRD_CHECKFLAG_B)
for i, (unk1, unk2, unk3, unk4, unk5) in enumerate(flags):
command += bitstring.pack("uint:8, uint:8, uint:8, uint:8, uint:8", unk1, unk2, unk3, unk4, unk5)
if i < len(flag_ops):
command += bitstring.pack("uint:8", flag_ops[i])
command += bitstring.pack("uint:8, uint:8", CMD_MARKER, WRD_FLAG_CHECK_END)
if not fail_label == None:
command += bitstring.pack("uint:8, uint:8, uint:16", CMD_MARKER, WRD_GOTO_LABEL, fail_label)
return command
def wait_frames(frames):
return bitstring.pack("uint:8, uint:8", CMD_MARKER, WRD_WAIT_FRAME) * frames
def byte(val):
return bitstring.pack("uint:8", val)
### EOF ### | gpl-3.0 | -3,351,541,255,566,452,700 | 31.509434 | 101 | 0.555728 | false |
cedadev/eos-db | eos_db/views.py | 1 | 25449 | """API functions for controlling the Cloudhands DB
This module contains all the API functions available on the Cloudhands RESTful
API. Modifications requests to the database are mediated through functions in
the "server" module.
"""
import json, uuid
import hashlib, base64, random
from pyramid.response import Response
from pyramid.view import view_config
from pyramid.httpexceptions import (HTTPBadRequest, HTTPNotImplemented,
HTTPUnauthorized, HTTPForbidden,
HTTPNotFound, HTTPInternalServerError )
from pyramid.security import Allow, Everyone
from eos_db import server
# Patch for view_config - as we're not calling any of these functions directly it's
# too easy to accidentally give two funtions the same name, and then wonder why
# the result is a 404 error.
# This workaround patches the view_config decorator so that it complains when you
# try to decorate a function that has already been declared. The behaviour should
# be otherwise unaffected.
# (Note that pyflakes3 is a good way to pick up this issue too.)
# Also, for bonus points, implement the routes=[list] argument.
_view_config = view_config
def view_config(*args, **kwargs):
def new_decorator(f):
if f.__name__ in globals():
raise AttributeError("This module already has a function %s() defined" % f.__name__)
if 'routes' in kwargs:
for r in kwargs.pop('routes'):
f = _view_config(*args, route_name=r, **kwargs)(f)
return f
else:
return _view_config(*args, **kwargs)(f)
return new_decorator
class PermissionsMap():
"""This is passed to pyramid.config.Configurator in __init__.py,
and defines the permissions attributed to each group. """
__acl__ = [(Allow, Everyone, 'login'),
(Allow, 'group:users', 'use'),
(Allow, 'group:agents', 'use'),
(Allow, 'group:agents', 'act'),
(Allow, 'group:administrators', 'use'),
(Allow, 'group:administrators', 'act'),
(Allow, 'group:administrators', 'administer')]
def __init__(self, request):
""" No-operations here. """
pass
@view_config(request_method="GET", route_name='home', renderer='json')
def home_view(request):
""" Return a list of all valid API calls by way of documentation. """
call_list = {"Valid API Call List":{
"Retrieve User List": "/users",
"Get my details": "/user",
"Get my touches": "/user/touches",
"Set my password": "/user/password",
"Get my credit": "/user/credit",
"servers": "/servers", # Return server list
"Server details by name": "/servers/{name}", # Get server details or
"Server details by ID": "/servers/by_id/{id}",
"Start a server": "/servers/{name}/Starting",
"Stop a server": "/servers/{name}/Stopping",
"Restart a server": "/servers/{name}/Restarting",
"De-Boost a server": "/servers/{name}/pre_deboosting",
"server_Pre_Deboosted": "/servers/{name}/Pre_deboosted",
"server_Deboost": "/servers/{name}/deboosting",
"server_Started": "/servers/{name}/Started",
"server_Stopped": "/servers/{name}/Stopped",
"Boost a server": "/servers/{name}/preparing",
"server_Prepared": "/servers/{name}/prepared",
"server_owner": "/servers/{name}/owner",
"server_touches": "/servers/{name}/touches",
"CPU/RAM Specification": "/servers/{name}/specification",
"All states, and count by state": "/states",
"Servers is state": "/states/{name}",
"Servers needing deboost": "/deboost_jobs",
}
}
return call_list
# OPTIONS call result
@view_config(request_method="OPTIONS", routes=['home', 'servers'])
def options(request):
""" Return the OPTIONS header. """
# NOTE: This is important for enabling CORS, although under certain
# circumstances the browser doesn' appear to need it. Might be worth
# examining why.
resp = Response(None)
resp.headers['Allow'] = "HEAD,GET,OPTIONS"
return resp
@view_config(request_method="OPTIONS", routes=['server', 'server_specification'])
@view_config(request_method="OPTIONS", routes=['server_by_id', 'server_by_id_specification'])
def options2(request):
resp = Response(None)
resp.headers['Allow'] = "HEAD,GET,POST,OPTIONS"
return resp
@view_config(request_method="OPTIONS", routes=["server_" + x for x in server.get_state_list()])
@view_config(request_method="OPTIONS", routes=["server_by_id_" + x for x in server.get_state_list()])
@view_config(request_method="OPTIONS", routes=['server_extend_boost', 'server_by_id_extend_boost'])
def options3(request):
resp = Response(None)
resp.headers['Allow'] = "HEAD,POST,OPTIONS"
return resp
# End of OPTIONS guff
@view_config(request_method="GET", route_name='users', renderer='json', permission="use")
def retrieve_users(request):
"""Return details for all users on the system. Basically the same as calling /users/x
for all users, but missing the credit info.
"""
res = []
for user_id in server.list_user_ids():
res.append(server.check_user_details(user_id))
return res
@view_config(request_method="PUT", route_name='user', renderer='json', permission="administer")
def create_user(request):
""" Create a user in the database. """
#FIXME - the UUID for a user should be the e-mail address. Can we make this explicit?
newname = server.create_user(request.POST['type'], request.POST['handle'], request.POST['name'], request.matchdict['name'])
return newname
@view_config(request_method="GET", route_name='user', renderer='json', permission="use")
def retrieve_user(request):
"""Return account details for any user. Anybody should be able to do this,
though most users have no need to.
:param name: The user we are interested in.
:returns JSON object containing user table data.
"""
username = request.matchdict['name']
try:
actor_id = server.get_user_id_from_name(username)
details = server.check_user_details(actor_id)
details.update({'credits' : server.check_credit(actor_id)})
return details
except KeyError:
return HTTPNotFound()
@view_config(request_method="GET", route_name='my_user', renderer='json', permission="use")
def retrieve_my_user(request):
"""Return account details for logged-in user.
:param name: The user we are interested in.
:returns JSON object containing user table data.
"""
username = request.authenticated_userid
try:
actor_id = server.get_user_id_from_name(username)
details = server.check_user_details(actor_id)
details.update({'credits' : server.check_credit(actor_id)})
return details
except KeyError:
#Should be impossible unless a logged-in user is deleted.
return HTTPInternalServerError()
@view_config(request_method="PATCH", route_name='user', renderer='json', permission="administer")
def update_user(request):
# FIXME: Not implemented.
response = HTTPNotImplemented()
return response
@view_config(request_method="DELETE", route_name='user', renderer='json', permission="administer")
def delete_user(request):
# FIXME: Not implemented. Some thought needs to go into this. I think a
# deletion flag would be appropriate, but this will involve changing quite
# a number of queries.
# Tim thinks - makbe just lock the password and remove all machines?
response = HTTPNotImplemented()
return response
@view_config(request_method="PUT", route_name='user_password', renderer='json', permission="administer")
def create_user_password(request):
""" Creates a password for the user given. """
username = request.matchdict['name']
actor_id = server.get_user_id_from_name(username)
newname = server.touch_to_add_password(actor_id, request.POST['password'])
#FIXME - should we not just return OK?
return newname
@view_config(request_method="PUT", route_name='my_password', renderer='json', permission="use")
def create_my_password(request):
""" Creates a password for the user given. """
username = request.authenticated_userid
actor_id = server.get_user_id_from_name(username)
newname = server.touch_to_add_password(actor_id, request.POST['password'])
#FIXME - should we not just return OK?
#FIXME2 - also should this not be a POST?
return newname
@view_config(request_method="GET", route_name='user_touches', renderer='json', permission="use")
def retrieve_user_touches(request):
# FIXME - Not implemented.
name = request.matchdict['name']
return name
@view_config(request_method="POST", route_name='user_credit', renderer='json', permission="administer")
def create_user_credit(request):
"""Adds credit to a user account, negative or positive. Only an administrator can do this
directly. Boost and Deboost actions will do this implicitly.
Checks if username is valid, otherwise throws HTTP 404.
Checks if credit is an integer, otherwise throws HTTP 400.
:param name: User for which we are amending credit.
:returns: JSON containing actor id, credit change and new balance.
"""
username, credit = request.matchdict['name'], request.POST['credit']
try:
user_id = server.get_user_id_from_name(username)
server.touch_to_add_credit(user_id, int(credit))
credits = server.check_credit(user_id)
return {'actor_id': int(user_id),
'credit_change': int(credit),
'credit_balance': int(credits)}
except ValueError:
return HTTPBadRequest()
except KeyError:
return HTTPNotFound()
# Not sure if Ben was in the process of folding credit balance into user details
# or splitting it out. I vote for folding it in.
# DELETEME
#
# @view_config(request_method="GET", route_name='my_credit', renderer='json', permission="use")
# def retrieve_my_credit(request):
# """Return credits outstanding for current user.
#
# :returns: JSON containing actor_id and current balance.
# """
# username = request.authenticated_userid
# actor_id = server.get_user_id_from_name(username)
# # actor_id should be valid
# credits = server.check_credit(actor_id)
# return { 'actor_id': actor_id,
# 'credit_balance': int(credits)}
# FIXME - should just return credit to match the POST above.
@view_config(request_method="GET", route_name='user_credit', renderer='json', permission="act")
def retrieve_user_credit(request):
"""Return credits outstanding for any user.
:param name: User for which we are checking credit.
:returns: JSON containing actor_id and current balance.
"""
username = request.matchdict['name']
try:
user_id = server.get_user_id_from_name(username)
credits = server.check_credit(user_id)
return {'actor_id': user_id,
'credit_balance': int(credits)}
except KeyError as e:
return HTTPNotFound(str(e))
@view_config(request_method="GET", route_name='servers', renderer='json', permission="use")
def retrieve_servers(request):
"""
Lists all artifacts related to the current user.
"""
user_id = server.get_user_id_from_name(request.authenticated_userid)
server_list = server.list_artifacts_for_user(user_id)
return list(server_list)
@view_config(request_method="GET", route_name='states', renderer='json', permission="use")
def retrieve_server_counts_by_state(request):
"""
List all states and the number of servers in that state.
"""
#Note that with the current DB schema, having the separate state and states calls is silly
#because both retrieve the same info from the DB then selectively throw bits away.
server_table = server.list_servers_by_state()
all_states = server.get_state_list()
#Not so good - we'd like to report all valid states...
#return { k: len(v) for k, v in server_table }
#Better...
return { s: len(server_table.get(s, ())) for s in all_states }
@view_config(request_method="GET", route_name='state', renderer='json', permission="use")
def retrieve_servers_in_state(request):
"""
Lists all servers in a given state.
"""
server_ids = server.list_servers_by_state().get(request.matchdict['name'],())
server_uuid = [ server.get_server_uuid_from_id(s_id) for s_id in server_ids ]
server_name = [ server.get_server_name_from_id(s_id) for s_id in server_ids ]
return [ { "artifact_id" : s[0],
"artifact_uuid" : s[1],
"artifact_name" : s[2] }
for s in zip(server_ids, server_uuid, server_name) ]
@view_config(request_method="PUT", route_name='server', renderer='json', permission="administer")
def create_server(request):
"""
Creates a new artifact record in the database.
"""
newname = server.create_appliance(request.matchdict['name'], request.POST['uuid'])
return newname
@view_config(request_method="GET", routes=['server', 'server_by_id'], renderer='json', permission="use")
def retrieve_server(request):
"""
Gets artifact details from the server, based on the name or the internal ID.
"""
vm_id, actor_id = _resolve_vm(request)
server_details = server.return_artifact_details(vm_id)
return server_details
@view_config(request_method="PATCH", route_name='server', renderer='json', permission="use")
def update_server(request):
# FIXME: Not implemented. Do we want this to be implemented?
response = HTTPNotImplemented()
return response
@view_config(request_method="DELETE", route_name='server', renderer='json', permission="use")
def delete_server(request):
# FIXME: Not implemented. Again, this needs thought. Probably logical
# deletion through a "deleted" flag.
# Or else, add a new server with the same name and blank UUID, as currently for multiple
# servers with the same name we only see the last.
response = HTTPNotImplemented()
return response
@view_config(request_method="PUT", route_name='server_owner', renderer='json', permission="use")
def create_server_owner(request):
""" Calls touch_to_add_ownership to add an owner to the server. """
# FIXME: There is the problem of servers being able to have multiple
# owners in the current system. Again, we may need a logical deletion
# flag. On reflection, I'd like to suggest that we add a logical deletion
# flag to the Resource class, as it'll be inherited by all resources,
# and solves multiple problems in one place.
newname = server.touch_to_add_ownership(request.matchdict['name'], request.POST['actor_id'])
return newname
@view_config(request_method="GET", route_name='server_owner', renderer='json', permission="use")
def get_server_owner(request):
# Not implemented. Check if necessary. A server can have many owners.
return HTTPNotImplemented()
def _resolve_vm(request):
"""Function given a request works out the VM we are talking about and whether
the current user actually has permission to do stuff to it.
Also returns the internal ID for the user, as well as the VM.
"""
actor_id = None
vm_id = None
try:
actor_id = server.get_user_id_from_name(request.authenticated_userid)
except:
#OK, it must be an agent or an internal call.
pass
try:
vm_id = ( request.matchdict['id']
if 'id' in request.matchdict else
server.get_server_id_from_name(request.matchdict['name']) )
except:
#Presumably because there is no such VM
raise HTTPNotFound()
if ( request.has_permission('act') or
server.check_ownership(vm_id, actor_id) ):
return vm_id, actor_id
else:
raise HTTPUnauthorized()
@view_config(request_method="GET", routes=['server_state', 'server_by_id_state'],
renderer='json', permission="use")
def server_state(request):
"""Get the status for a server. Anyone can request this,
:param name: Name of VApp which we want to stop.
:returns: The state, by name.
"""
vm_id, actor_id = _resolve_vm(request)
return server.check_state(vm_id)
def _set_server_state(request, target_state):
"""Basic function for putting a server into some state, for basic state-change calls."""
vm_id, actor_id = _resolve_vm(request)
return {"vm_id": vm_id,
"touch_id": server.touch_to_state(actor_id, vm_id, target_state)}
#Pyramid is not designed to allow you to add view configurations programatically.
#But in Python all things are possible. Here is a way to set up view configurations in a loop.
#This will declare the functions "set_server_to_Boosting", etc...
#And bind them to the routes "server_Boosting" and "server_by_id_Boosting"
### Here are the states that can only be set by agents...
for state in ['Stopped', 'Started', 'Prepared', 'Pre_Deboosted',
'Boosting', 'Deboosting', 'Starting_Boosted' ]:
funcname = 'set_server_to_' + state
globals()[funcname] = lambda request, state=state: _set_server_state(request, state)
globals()[funcname] = view_config( request_method="POST",
routes=['server_' + state, 'server_by_id_' + state],
renderer='json',
permission="act"
)( globals()[funcname] )
globals()[funcname].__name__ = funcname
globals()[funcname].__qualname__ = funcname
### States that any user can put their server into...
for state in ['Starting', 'Stopping', 'Restarting', 'Error']:
funcname = 'set_server_to_' + state
globals()[funcname] = lambda request, state=state: _set_server_state(request, state)
globals()[funcname] = view_config( request_method="POST",
routes=['server_' + state, 'server_by_id_' + state],
renderer='json',
permission="use"
)( globals()[funcname] )
globals()[funcname].__name__ = funcname
globals()[funcname].__qualname__ = funcname
### Any user can Boost, but it will cost.
@view_config(request_method="POST", routes=['server_Preparing', 'server_by_id_Preparing'],
renderer='json', permission="use")
def boost_server(request):
"""Boost a server: ie:
Debit the users account
Schedule a De-Boost
Set the CPUs and RAM
Put the server in a "preparing" status
:param {vm or name}: ID of VApp which we want to boost.
:ram: ram wanted
:cores: cores wanted
:hours: hours of boost wanted
:returns: JSON containing VApp ID and job ID for progress calls.
"""
vm_id, actor_id = _resolve_vm(request)
hours = int(request.POST['hours'])
cores = int(request.POST['cores'])
ram = int(request.POST['ram'])
# FIXME: Really the user should boost to a named level, rather than directly
# specifying RAM and cores. For now I'm just going to work out the cost based
# on the cores requested, and assume the RAM level matches it.
cost = server.check_and_remove_credits(actor_id, ram, cores, hours)
if not cost:
#Either we can't afford it or we can't determine the cost.
return HTTPBadRequest();
#Schedule a de-boost
server.touch_to_add_deboost(vm_id, hours)
# Set spec
server.touch_to_add_specification(vm_id, cores, ram)
# Tell the agents to get to work.
touch_id = server.touch_to_state(actor_id, vm_id, "Preparing")
return dict(touch_id=touch_id, vm_id=vm_id, cost=cost)
# Likewise any user can deboost.
@view_config(request_method="POST", routes=['server_Pre_Deboosting', 'server_by_id_Pre_Deboosting'],
renderer='json', permission="use")
def deboost_server(request):
"""Deboost a server: ie:
Credit the users account
Cancel any scheduled De-Boost
Set the CPUs and RAM to the previous state
Put the server in a "Pre_Deboosting" status
Note that a user can Deboost at ANY time, but they only get credit if credit is due.
Deboosting a non-boosted server just amounts to a restart.
:param {vm or name}: ID of VApp which we want to deboost.
:returns: ???
"""
vm_id, actor_id = _resolve_vm(request)
credit = server.get_time_until_deboost(vm_id)[3]
server.touch_to_add_credit(actor_id, credit)
#Scheduled timeouts don't need cancelling as they are ignored on unboosted servers.
#FIXME - yet more hard-coding for cores/RAM
prev_cores = 1
prev_ram = 16
try:
prev_cores, prev_ram = server.get_previous_specification(vm_id)
except:
#OK, use the defaults.
pass
#If we're not careful, with this "go back to previous config" semantics, if a user de-boosts
#a server twice they will actually end up setting their baseline config to the boosted specs.
#Therefore do a check.
current_cores, current_ram = server.get_latest_specification(vm_id)
if not (prev_ram > current_ram):
server.touch_to_add_specification(vm_id, prev_cores, prev_ram)
# Tell the agents to get to work.
touch_id = server.touch_to_state(actor_id, vm_id, "Pre_Deboosting")
return dict(touch_id=touch_id, vm_id=vm_id, credit=credit)
@view_config(request_method="POST", routes=['server_extend_boost', 'server_by_id_extend_boost'],
renderer='json', permission="use")
def extend_boost_on_server(request):
"""Extends the Boost period on a server by adding a new deboost timeout, if
the user can afford it, and debiting the cost.
"""
vm_id, actor_id = _resolve_vm(request)
hours = int(request.POST['hours'])
#See what level of boost we have just now. Again, need to FIXME that hard-coding
cores, ram = server.get_latest_specification(vm_id)
cost = server.check_and_remove_credits(actor_id, ram, cores, hours)
if not cost:
#Either we can't afford it or we can't determine the cost.
return HTTPBadRequest();
#Work out when the new de-boost should be. First get the remaining boost time as
#hours. It's unlikely to be a whole number. If the boost has expired somehow then
#don't be mean - count from now.
remaining_time = (server.get_time_until_deboost(vm_id)[1] or 0) / 3600.0
if remaining_time < 0 : remaining_time = 0
#Schedule a later de-boost
server.touch_to_add_deboost(vm_id, hours + remaining_time)
return dict(vm_id=vm_id, cost=cost)
# Find out what needs de-boosting (agents only)
@view_config(request_method="GET", route_name='deboosts', renderer='json', permission="act")
def deboost_jobs(request):
""" Calls get_deboost_jobs, which is what the deboost_daemon needs in order to work.
Defaults to getting all deboosts that expired within the last 60 minutes.
"""
past = int(request.params.get('past', 60))
future = int(request.params.get('future', 0))
return server.get_deboost_jobs(past, future)
@view_config(request_method="GET", route_name='server_touches', renderer='json', permission="use")
def retrieve_server_touches(request):
""" Retrieve activity log from recent touches. """
# FIXME - Clearly this hasn't been implemented.
name = request.matchdict['name']
return name
@view_config(request_method="POST", renderer='json', permission="act",
routes=['server_specification', 'server_by_id_specification'])
def set_server_specification(request):
""" Set number of cores and amount of RAM for a VM. These numbers should
only match the given specification types listed below.
Regular users can only do this indirectly via boost/deboost.
"""
vm_id, actor_id = _resolve_vm(request)
# FIXME - This really shouldn't be hardcoded.
cores = request.POST.get('cores')
ram = request.POST.get('ram')
if (cores not in ['1', '2', '4', '16']) or (ram not in ['1', '4', '8', '16', '400']):
return HTTPBadRequest()
else:
server.touch_to_add_specification(vm_id, cores, ram)
return dict(cores=cores, ram=ram, artifact_id=vm_id)
@view_config(request_method="GET", renderer='json', permission="use",
routes=['server_specification', 'server_by_id_specification'])
def get_server_specification(request):
""" Get the specification of a machine. Returns RAM in GB and number of
cores in a JSON object."""
vm_id, actor_id = _resolve_vm(request)
cores, ram = server.get_latest_specification(vm_id)
return dict(cores=cores, ram=ram, artifact_id=vm_id)
| bsd-3-clause | 8,918,893,795,204,798,000 | 42.060914 | 127 | 0.643876 | false |
icaoberg/cellorganizer-galaxy-tools | datatypes/sequence.py | 1 | 47418 | """
Sequence classes
"""
import gzip
import json
import logging
import os
import re
import string
from cgi import escape
from galaxy import util
from galaxy.datatypes import metadata
from galaxy.util.checkers import is_gzip
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.sniff import get_headers
from galaxy.datatypes.util.image_util import check_image_type
from galaxy.util import nice_size
from . import data
import bx.align.maf
log = logging.getLogger(__name__)
class SequenceSplitLocations( data.Text ):
"""
Class storing information about a sequence file composed of multiple gzip files concatenated as
one OR an uncompressed file. In the GZIP case, each sub-file's location is stored in start and end.
The format of the file is JSON::
{ "sections" : [
{ "start" : "x", "end" : "y", "sequences" : "z" },
...
]}
"""
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
try:
parsed_data = json.load(open(dataset.file_name))
# dataset.peek = json.dumps(data, sort_keys=True, indent=4)
dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
dataset.blurb = '%d sections' % len(parsed_data['sections'])
except Exception:
dataset.peek = 'Not FQTOC file'
dataset.blurb = 'Not FQTOC file'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
file_ext = "fqtoc"
def sniff( self, filename ):
if os.path.getsize(filename) < 50000:
try:
data = json.load(open(filename))
sections = data['sections']
for section in sections:
if 'start' not in section or 'end' not in section or 'sequences' not in section:
return False
return True
except:
pass
return False
class Sequence( data.Text ):
"""Class describing a sequence"""
"""Add metadata elements"""
MetadataElement( name="sequences", default=0, desc="Number of sequences", readonly=True, visible=False, optional=True, no_value=0 )
def set_meta( self, dataset, **kwd ):
"""
Set the number of sequences and the number of data lines in dataset.
"""
data_lines = 0
sequences = 0
for line in file( dataset.file_name ):
line = line.strip()
if line and line.startswith( '#' ):
# We don't count comment lines for sequence data types
continue
if line and line.startswith( '>' ):
sequences += 1
data_lines += 1
else:
data_lines += 1
dataset.metadata.data_lines = data_lines
dataset.metadata.sequences = sequences
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
if dataset.metadata.sequences:
dataset.blurb = "%s sequences" % util.commaify( str( dataset.metadata.sequences ) )
else:
dataset.blurb = nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def get_sequences_per_file(total_sequences, split_params):
if split_params['split_mode'] == 'number_of_parts':
# legacy basic mode - split into a specified number of parts
parts = int(split_params['split_size'])
sequences_per_file = [total_sequences / parts for i in range(parts)]
for i in range(total_sequences % parts):
sequences_per_file[i] += 1
elif split_params['split_mode'] == 'to_size':
# loop through the sections and calculate the number of sequences
chunk_size = long(split_params['split_size'])
rem = total_sequences % chunk_size
sequences_per_file = [chunk_size for i in range(total_sequences / chunk_size)]
# TODO: Should we invest the time in a better way to handle small remainders?
if rem > 0:
sequences_per_file.append(rem)
else:
raise Exception('Unsupported split mode %s' % split_params['split_mode'])
return sequences_per_file
get_sequences_per_file = staticmethod(get_sequences_per_file)
def do_slow_split( cls, input_datasets, subdir_generator_function, split_params):
# count the sequences so we can split
# TODO: if metadata is present, take the number of lines / 4
if input_datasets[0].metadata is not None and input_datasets[0].metadata.sequences is not None:
total_sequences = input_datasets[0].metadata.sequences
else:
input_file = input_datasets[0].file_name
compress = is_gzip(input_file)
if compress:
# gzip is really slow before python 2.7!
in_file = gzip.GzipFile(input_file, 'r')
else:
# TODO
# if a file is not compressed, seek locations can be calculated and stored
# ideally, this would be done in metadata
# TODO
# Add BufferedReader if python 2.7?
in_file = open(input_file, 'rt')
total_sequences = long(0)
for i, line in enumerate(in_file):
total_sequences += 1
in_file.close()
total_sequences /= 4
sequences_per_file = cls.get_sequences_per_file(total_sequences, split_params)
return cls.write_split_files(input_datasets, None, subdir_generator_function, sequences_per_file)
do_slow_split = classmethod(do_slow_split)
def do_fast_split( cls, input_datasets, toc_file_datasets, subdir_generator_function, split_params):
data = json.load(open(toc_file_datasets[0].file_name))
sections = data['sections']
total_sequences = long(0)
for section in sections:
total_sequences += long(section['sequences'])
sequences_per_file = cls.get_sequences_per_file(total_sequences, split_params)
return cls.write_split_files(input_datasets, toc_file_datasets, subdir_generator_function, sequences_per_file)
do_fast_split = classmethod(do_fast_split)
def write_split_files(cls, input_datasets, toc_file_datasets, subdir_generator_function, sequences_per_file):
directories = []
def get_subdir(idx):
if idx < len(directories):
return directories[idx]
dir = subdir_generator_function()
directories.append(dir)
return dir
# we know how many splits and how many sequences in each. What remains is to write out instructions for the
# splitting of all the input files. To decouple the format of those instructions from this code, the exact format of
# those instructions is delegated to scripts
start_sequence = 0
for part_no in range(len(sequences_per_file)):
dir = get_subdir(part_no)
for ds_no in range(len(input_datasets)):
ds = input_datasets[ds_no]
base_name = os.path.basename(ds.file_name)
part_path = os.path.join(dir, base_name)
split_data = dict(class_name='%s.%s' % (cls.__module__, cls.__name__),
output_name=part_path,
input_name=ds.file_name,
args=dict(start_sequence=start_sequence, num_sequences=sequences_per_file[part_no]))
if toc_file_datasets is not None:
toc = toc_file_datasets[ds_no]
split_data['args']['toc_file'] = toc.file_name
f = open(os.path.join(dir, 'split_info_%s.json' % base_name), 'w')
json.dump(split_data, f)
f.close()
start_sequence += sequences_per_file[part_no]
return directories
write_split_files = classmethod(write_split_files)
def split( cls, input_datasets, subdir_generator_function, split_params):
"""Split a generic sequence file (not sensible or possible, see subclasses)."""
if split_params is None:
return None
raise NotImplementedError("Can't split generic sequence files")
def get_split_commands_with_toc(input_name, output_name, toc_file, start_sequence, sequence_count):
"""
Uses a Table of Contents dict, parsed from an FQTOC file, to come up with a set of
shell commands that will extract the parts necessary
>>> three_sections=[dict(start=0, end=74, sequences=10), dict(start=74, end=148, sequences=10), dict(start=148, end=148+76, sequences=10)]
>>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=0, sequence_count=10)
['dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null >> ./output.gz']
>>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=1, sequence_count=5)
['(dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +5 2> /dev/null) | head -20 | gzip -c >> ./output.gz']
>>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=0, sequence_count=20)
['dd bs=1 skip=0 count=148 if=./input.gz 2> /dev/null >> ./output.gz']
>>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=5, sequence_count=10)
['(dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +21 2> /dev/null) | head -20 | gzip -c >> ./output.gz', '(dd bs=1 skip=74 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +1 2> /dev/null) | head -20 | gzip -c >> ./output.gz']
>>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=10, sequence_count=10)
['dd bs=1 skip=74 count=74 if=./input.gz 2> /dev/null >> ./output.gz']
>>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=5, sequence_count=20)
['(dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +21 2> /dev/null) | head -20 | gzip -c >> ./output.gz', 'dd bs=1 skip=74 count=74 if=./input.gz 2> /dev/null >> ./output.gz', '(dd bs=1 skip=148 count=76 if=./input.gz 2> /dev/null )| zcat | ( tail -n +1 2> /dev/null) | head -20 | gzip -c >> ./output.gz']
"""
sections = toc_file['sections']
result = []
current_sequence = long(0)
i = 0
# skip to the section that contains my starting sequence
while i < len(sections) and start_sequence >= current_sequence + long(sections[i]['sequences']):
current_sequence += long(sections[i]['sequences'])
i += 1
if i == len(sections): # bad input data!
raise Exception('No FQTOC section contains starting sequence %s' % start_sequence)
# These two variables act as an accumulator for consecutive entire blocks that
# can be copied verbatim (without decompressing)
start_chunk = long(-1)
end_chunk = long(-1)
copy_chunk_cmd = 'dd bs=1 skip=%s count=%s if=%s 2> /dev/null >> %s'
while sequence_count > 0 and i < len(sections):
# we need to extract partial data. So, find the byte offsets of the chunks that contain the data we need
# use a combination of dd (to pull just the right sections out) tail (to skip lines) and head (to get the
# right number of lines
sequences = long(sections[i]['sequences'])
skip_sequences = start_sequence - current_sequence
sequences_to_extract = min(sequence_count, sequences - skip_sequences)
start_copy = long(sections[i]['start'])
end_copy = long(sections[i]['end'])
if sequences_to_extract < sequences:
if start_chunk > -1:
result.append(copy_chunk_cmd % (start_chunk, end_chunk - start_chunk, input_name, output_name))
start_chunk = -1
# extract, unzip, trim, recompress
result.append('(dd bs=1 skip=%s count=%s if=%s 2> /dev/null )| zcat | ( tail -n +%s 2> /dev/null) | head -%s | gzip -c >> %s' %
(start_copy, end_copy - start_copy, input_name, skip_sequences * 4 + 1, sequences_to_extract * 4, output_name))
else: # whole section - add it to the start_chunk/end_chunk accumulator
if start_chunk == -1:
start_chunk = start_copy
end_chunk = end_copy
sequence_count -= sequences_to_extract
start_sequence += sequences_to_extract
current_sequence += sequences
i += 1
if start_chunk > -1:
result.append(copy_chunk_cmd % (start_chunk, end_chunk - start_chunk, input_name, output_name))
if sequence_count > 0:
raise Exception('%s sequences not found in file' % sequence_count)
return result
get_split_commands_with_toc = staticmethod(get_split_commands_with_toc)
def get_split_commands_sequential(is_compressed, input_name, output_name, start_sequence, sequence_count):
"""
Does a brain-dead sequential scan & extract of certain sequences
>>> Sequence.get_split_commands_sequential(True, './input.gz', './output.gz', start_sequence=0, sequence_count=10)
['zcat "./input.gz" | ( tail -n +1 2> /dev/null) | head -40 | gzip -c > "./output.gz"']
>>> Sequence.get_split_commands_sequential(False, './input.fastq', './output.fastq', start_sequence=10, sequence_count=10)
['tail -n +41 "./input.fastq" 2> /dev/null | head -40 > "./output.fastq"']
"""
start_line = start_sequence * 4
line_count = sequence_count * 4
# TODO: verify that tail can handle 64-bit numbers
if is_compressed:
cmd = 'zcat "%s" | ( tail -n +%s 2> /dev/null) | head -%s | gzip -c' % (input_name, start_line + 1, line_count)
else:
cmd = 'tail -n +%s "%s" 2> /dev/null | head -%s' % (start_line + 1, input_name, line_count)
cmd += ' > "%s"' % output_name
return [cmd]
get_split_commands_sequential = staticmethod(get_split_commands_sequential)
class Alignment( data.Text ):
"""Class describing an alignment"""
"""Add metadata elements"""
MetadataElement( name="species", desc="Species", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None )
def split( cls, input_datasets, subdir_generator_function, split_params):
"""Split a generic alignment file (not sensible or possible, see subclasses)."""
if split_params is None:
return None
raise NotImplementedError("Can't split generic alignment files")
class Fasta( Sequence ):
"""Class representing a FASTA sequence"""
edam_format = "format_1929"
file_ext = "fasta"
def sniff( self, filename ):
"""
Determines whether the file is in fasta format
A sequence in FASTA format consists of a single-line description, followed by lines of sequence data.
The first character of the description line is a greater-than (">") symbol in the first column.
All lines should be shorter than 80 characters
For complete details see http://www.ncbi.nlm.nih.gov/blast/fasta.shtml
Rules for sniffing as True:
We don't care about line length (other than empty lines).
The first non-empty line must start with '>' and the Very Next line.strip() must have sequence data and not be a header.
'sequence data' here is loosely defined as non-empty lines which do not start with '>'
This will cause Color Space FASTA (csfasta) to be detected as True (they are, after all, still FASTA files - they have a header line followed by sequence data)
Previously this method did some checking to determine if the sequence data had integers (presumably to differentiate between fasta and csfasta)
This should be done through sniff order, where csfasta (currently has a null sniff function) is detected for first (stricter definition) followed sometime after by fasta
We will only check that the first purported sequence is correctly formatted.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'sequence.maf' )
>>> Fasta().sniff( fname )
False
>>> fname = get_test_fname( 'sequence.fasta' )
>>> Fasta().sniff( fname )
True
"""
try:
fh = open( filename )
while True:
line = fh.readline()
if not line:
break # EOF
line = line.strip()
if line: # first non-empty line
if line.startswith( '>' ):
# The next line.strip() must not be '', nor startwith '>'
line = fh.readline().strip()
if line == '' or line.startswith( '>' ):
break
# If there is a third line, and it isn't a header line, it may not contain chars like '()[].' otherwise it's most likely a DotBracket file
line = fh.readline()
if not line.startswith('>') and re.search("[\(\)\[\]\.]", line):
break
return True
else:
break # we found a non-empty line, but it's not a fasta header
fh.close()
except:
pass
return False
def split(cls, input_datasets, subdir_generator_function, split_params):
"""Split a FASTA file sequence by sequence.
Note that even if split_mode="number_of_parts", the actual number of
sub-files produced may not match that requested by split_size.
If split_mode="to_size" then split_size is treated as the number of
FASTA records to put in each sub-file (not size in bytes).
"""
if split_params is None:
return
if len(input_datasets) > 1:
raise Exception("FASTA file splitting does not support multiple files")
input_file = input_datasets[0].file_name
# Counting chunk size as number of sequences.
if 'split_mode' not in split_params:
raise Exception('Tool does not define a split mode')
elif split_params['split_mode'] == 'number_of_parts':
split_size = int(split_params['split_size'])
log.debug("Split %s into %i parts..." % (input_file, split_size))
# if split_mode = number_of_parts, and split_size = 10, and
# we know the number of sequences (say 1234), then divide by
# by ten, giving ten files of approx 123 sequences each.
if input_datasets[0].metadata is not None and input_datasets[0].metadata.sequences:
# Galaxy has already counted/estimated the number
batch_size = 1 + input_datasets[0].metadata.sequences // split_size
cls._count_split(input_file, batch_size, subdir_generator_function)
else:
# OK, if Galaxy hasn't counted them, it may be a big file.
# We're not going to count the records which would be slow
# and a waste of disk IO time - instead we'll split using
# the file size.
chunk_size = os.path.getsize(input_file) // split_size
cls._size_split(input_file, chunk_size, subdir_generator_function)
elif split_params['split_mode'] == 'to_size':
# Split the input file into as many sub-files as required,
# each containing to_size many sequences
batch_size = int(split_params['split_size'])
log.debug("Split %s into batches of %i records..." % (input_file, batch_size))
cls._count_split(input_file, batch_size, subdir_generator_function)
else:
raise Exception('Unsupported split mode %s' % split_params['split_mode'])
split = classmethod(split)
def _size_split(cls, input_file, chunk_size, subdir_generator_function):
"""Split a FASTA file into chunks based on size on disk.
This does of course preserve complete records - it only splits at the
start of a new FASTQ sequence record.
"""
log.debug("Attemping to split FASTA file %s into chunks of %i bytes" % (input_file, chunk_size))
f = open(input_file, "rU")
part_file = None
try:
# Note if the input FASTA file has no sequences, we will
# produce just one sub-file which will be a copy of it.
part_dir = subdir_generator_function()
part_path = os.path.join(part_dir, os.path.basename(input_file))
part_file = open(part_path, 'w')
log.debug("Writing %s part to %s" % (input_file, part_path))
start_offset = 0
while True:
offset = f.tell()
line = f.readline()
if not line:
break
if line[0] == ">" and offset - start_offset >= chunk_size:
# Start a new sub-file
part_file.close()
part_dir = subdir_generator_function()
part_path = os.path.join(part_dir, os.path.basename(input_file))
part_file = open(part_path, 'w')
log.debug("Writing %s part to %s" % (input_file, part_path))
start_offset = f.tell()
part_file.write(line)
except Exception as e:
log.error('Unable to size split FASTA file: %s' % str(e))
f.close()
if part_file is not None:
part_file.close()
raise
f.close()
_size_split = classmethod(_size_split)
def _count_split(cls, input_file, chunk_size, subdir_generator_function):
"""Split a FASTA file into chunks based on counting records."""
log.debug("Attemping to split FASTA file %s into chunks of %i sequences" % (input_file, chunk_size))
f = open(input_file, "rU")
part_file = None
try:
# Note if the input FASTA file has no sequences, we will
# produce just one sub-file which will be a copy of it.
part_dir = subdir_generator_function()
part_path = os.path.join(part_dir, os.path.basename(input_file))
part_file = open(part_path, 'w')
log.debug("Writing %s part to %s" % (input_file, part_path))
rec_count = 0
while True:
line = f.readline()
if not line:
break
if line[0] == ">":
rec_count += 1
if rec_count > chunk_size:
# Start a new sub-file
part_file.close()
part_dir = subdir_generator_function()
part_path = os.path.join(part_dir, os.path.basename(input_file))
part_file = open(part_path, 'w')
log.debug("Writing %s part to %s" % (input_file, part_path))
rec_count = 1
part_file.write(line)
part_file.close()
except Exception as e:
log.error('Unable to count split FASTA file: %s' % str(e))
f.close()
if part_file is not None:
part_file.close()
raise
f.close()
_count_split = classmethod(_count_split)
class csFasta( Sequence ):
""" Class representing the SOLID Color-Space sequence ( csfasta ) """
edam_format = "format_1929"
file_ext = "csfasta"
def sniff( self, filename ):
"""
Color-space sequence:
>2_15_85_F3
T213021013012303002332212012112221222112212222
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'sequence.fasta' )
>>> csFasta().sniff( fname )
False
>>> fname = get_test_fname( 'sequence.csfasta' )
>>> csFasta().sniff( fname )
True
"""
try:
fh = open( filename )
while True:
line = fh.readline()
if not line:
break # EOF
line = line.strip()
if line and not line.startswith( '#' ): # first non-empty non-comment line
if line.startswith( '>' ):
line = fh.readline().strip()
if line == '' or line.startswith( '>' ):
break
elif line[0] not in string.ascii_uppercase:
return False
elif len( line ) > 1 and not re.search( '^[\d.]+$', line[1:] ):
return False
return True
else:
break # we found a non-empty line, but it's not a header
fh.close()
except:
pass
return False
def set_meta( self, dataset, **kwd ):
if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
dataset.metadata.data_lines = None
dataset.metadata.sequences = None
return
return Sequence.set_meta( self, dataset, **kwd )
class Fastq ( Sequence ):
"""Class representing a generic FASTQ sequence"""
edam_format = "format_1930"
file_ext = "fastq"
def set_meta( self, dataset, **kwd ):
"""
Set the number of sequences and the number of data lines
in dataset.
FIXME: This does not properly handle line wrapping
"""
if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
dataset.metadata.data_lines = None
dataset.metadata.sequences = None
return
data_lines = 0
sequences = 0
seq_counter = 0 # blocks should be 4 lines long
for line in file( dataset.file_name ):
line = line.strip()
if line and line.startswith( '#' ) and not data_lines:
# We don't count comment lines for sequence data types
continue
seq_counter += 1
data_lines += 1
if line and line.startswith( '@' ):
if seq_counter >= 4:
# count previous block
# blocks should be 4 lines long
sequences += 1
seq_counter = 1
if seq_counter >= 4:
# count final block
sequences += 1
dataset.metadata.data_lines = data_lines
dataset.metadata.sequences = sequences
def sniff( self, filename ):
"""
Determines whether the file is in generic fastq format
For details, see http://maq.sourceforge.net/fastq.shtml
Note: There are three kinds of FASTQ files, known as "Sanger" (sometimes called "Standard"), Solexa, and Illumina
These differ in the representation of the quality scores
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( '1.fastqsanger' )
>>> Fastq().sniff( fname )
True
>>> fname = get_test_fname( '2.fastqsanger' )
>>> Fastq().sniff( fname )
True
"""
headers = get_headers( filename, None )
bases_regexp = re.compile( "^[NGTAC]*" )
# check that first block looks like a fastq block
try:
if len( headers ) >= 4 and headers[0][0] and headers[0][0][0] == "@" and headers[2][0] and headers[2][0][0] == "+" and headers[1][0]:
# Check the sequence line, make sure it contains only G/C/A/T/N
if not bases_regexp.match( headers[1][0] ):
return False
return True
return False
except:
return False
def split( cls, input_datasets, subdir_generator_function, split_params):
"""
FASTQ files are split on cluster boundaries, in increments of 4 lines
"""
if split_params is None:
return None
# first, see if there are any associated FQTOC files that will give us the split locations
# if so, we don't need to read the files to do the splitting
toc_file_datasets = []
for ds in input_datasets:
tmp_ds = ds
fqtoc_file = None
while fqtoc_file is None and tmp_ds is not None:
fqtoc_file = tmp_ds.get_converted_files_by_type('fqtoc')
tmp_ds = tmp_ds.copied_from_library_dataset_dataset_association
if fqtoc_file is not None:
toc_file_datasets.append(fqtoc_file)
if len(toc_file_datasets) == len(input_datasets):
return cls.do_fast_split(input_datasets, toc_file_datasets, subdir_generator_function, split_params)
return cls.do_slow_split(input_datasets, subdir_generator_function, split_params)
split = classmethod(split)
def process_split_file(data):
"""
This is called in the context of an external process launched by a Task (possibly not on the Galaxy machine)
to create the input files for the Task. The parameters:
data - a dict containing the contents of the split file
"""
args = data['args']
input_name = data['input_name']
output_name = data['output_name']
start_sequence = long(args['start_sequence'])
sequence_count = long(args['num_sequences'])
if 'toc_file' in args:
toc_file = json.load(open(args['toc_file'], 'r'))
commands = Sequence.get_split_commands_with_toc(input_name, output_name, toc_file, start_sequence, sequence_count)
else:
commands = Sequence.get_split_commands_sequential(is_gzip(input_name), input_name, output_name, start_sequence, sequence_count)
for cmd in commands:
if 0 != os.system(cmd):
raise Exception("Executing '%s' failed" % cmd)
return True
process_split_file = staticmethod(process_split_file)
class FastqSanger( Fastq ):
"""Class representing a FASTQ sequence ( the Sanger variant )"""
edam_format = "format_1932"
file_ext = "fastqsanger"
class FastqSolexa( Fastq ):
"""Class representing a FASTQ sequence ( the Solexa variant )"""
edam_format = "format_1933"
file_ext = "fastqsolexa"
class FastqIllumina( Fastq ):
"""Class representing a FASTQ sequence ( the Illumina 1.3+ variant )"""
edam_format = "format_1931"
file_ext = "fastqillumina"
class FastqCSSanger( Fastq ):
"""Class representing a Color Space FASTQ sequence ( e.g a SOLiD variant )"""
file_ext = "fastqcssanger"
class Maf( Alignment ):
"""Class describing a Maf alignment"""
edam_format = "format_3008"
file_ext = "maf"
# Readonly and optional, users can't unset it, but if it is not set, we are generally ok; if required use a metadata validator in the tool definition
MetadataElement( name="blocks", default=0, desc="Number of blocks", readonly=True, optional=True, visible=False, no_value=0 )
MetadataElement( name="species_chromosomes", desc="Species Chromosomes", param=metadata.FileParameter, readonly=True, no_value=None, visible=False, optional=True )
MetadataElement( name="maf_index", desc="MAF Index File", param=metadata.FileParameter, readonly=True, no_value=None, visible=False, optional=True )
def init_meta( self, dataset, copy_from=None ):
Alignment.init_meta( self, dataset, copy_from=copy_from )
def set_meta( self, dataset, overwrite=True, **kwd ):
"""
Parses and sets species, chromosomes, index from MAF file.
"""
# these metadata values are not accessable by users, always overwrite
# Imported here to avoid circular dependency
from galaxy.tools.util.maf_utilities import build_maf_index_species_chromosomes
indexes, species, species_chromosomes, blocks = build_maf_index_species_chromosomes( dataset.file_name )
if indexes is None:
return # this is not a MAF file
dataset.metadata.species = species
dataset.metadata.blocks = blocks
# write species chromosomes to a file
chrom_file = dataset.metadata.species_chromosomes
if not chrom_file:
chrom_file = dataset.metadata.spec['species_chromosomes'].param.new_file( dataset=dataset )
chrom_out = open( chrom_file.file_name, 'wb' )
for spec, chroms in species_chromosomes.items():
chrom_out.write( "%s\t%s\n" % ( spec, "\t".join( chroms ) ) )
chrom_out.close()
dataset.metadata.species_chromosomes = chrom_file
index_file = dataset.metadata.maf_index
if not index_file:
index_file = dataset.metadata.spec['maf_index'].param.new_file( dataset=dataset )
indexes.write( open( index_file.file_name, 'wb' ) )
dataset.metadata.maf_index = index_file
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
# The file must exist on disk for the get_file_peek() method
dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
if dataset.metadata.blocks:
dataset.blurb = "%s blocks" % util.commaify( str( dataset.metadata.blocks ) )
else:
# Number of blocks is not known ( this should not happen ), and auto-detect is
# needed to set metadata
dataset.blurb = "? blocks"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
"""Returns formated html of peek"""
return self.make_html_table( dataset )
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
try:
out.append('<tr><th>Species: ')
for species in dataset.metadata.species:
out.append( '%s ' % species )
out.append( '</th></tr>' )
if not dataset.peek:
dataset.set_peek()
data = dataset.peek
lines = data.splitlines()
for line in lines:
line = line.strip()
if not line:
continue
out.append( '<tr><td>%s</td></tr>' % escape( line ) )
out.append( '</table>' )
out = "".join( out )
except Exception as exc:
out = "Can't create peek %s" % exc
return out
def sniff( self, filename ):
"""
Determines wether the file is in maf format
The .maf format is line-oriented. Each multiple alignment ends with a blank line.
Each sequence in an alignment is on a single line, which can get quite long, but
there is no length limit. Words in a line are delimited by any white space.
Lines starting with # are considered to be comments. Lines starting with ## can
be ignored by most programs, but contain meta-data of one form or another.
The first line of a .maf file begins with ##maf. This word is followed by white-space-separated
variable=value pairs. There should be no white space surrounding the "=".
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format5
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'sequence.maf' )
>>> Maf().sniff( fname )
True
>>> fname = get_test_fname( 'sequence.fasta' )
>>> Maf().sniff( fname )
False
"""
headers = get_headers( filename, None )
try:
if len(headers) > 1 and headers[0][0] and headers[0][0] == "##maf":
return True
else:
return False
except:
return False
class MafCustomTrack( data.Text ):
file_ext = "mafcustomtrack"
MetadataElement( name="vp_chromosome", default='chr1', desc="Viewport Chromosome", readonly=True, optional=True, visible=False, no_value='' )
MetadataElement( name="vp_start", default='1', desc="Viewport Start", readonly=True, optional=True, visible=False, no_value='' )
MetadataElement( name="vp_end", default='100', desc="Viewport End", readonly=True, optional=True, visible=False, no_value='' )
def set_meta( self, dataset, overwrite=True, **kwd ):
"""
Parses and sets viewport metadata from MAF file.
"""
max_block_check = 10
chrom = None
forward_strand_start = float( 'inf' )
forward_strand_end = 0
try:
maf_file = open( dataset.file_name )
maf_file.readline() # move past track line
for i, block in enumerate( bx.align.maf.Reader( maf_file ) ):
ref_comp = block.get_component_by_src_start( dataset.metadata.dbkey )
if ref_comp:
ref_chrom = bx.align.maf.src_split( ref_comp.src )[-1]
if chrom is None:
chrom = ref_chrom
if chrom == ref_chrom:
forward_strand_start = min( forward_strand_start, ref_comp.forward_strand_start )
forward_strand_end = max( forward_strand_end, ref_comp.forward_strand_end )
if i > max_block_check:
break
if forward_strand_end > forward_strand_start:
dataset.metadata.vp_chromosome = chrom
dataset.metadata.vp_start = forward_strand_start
dataset.metadata.vp_end = forward_strand_end
except:
pass
class Axt( data.Text ):
"""Class describing an axt alignment"""
# gvk- 11/19/09 - This is really an alignment, but we no longer have tools that use this data type, and it is
# here simply for backward compatibility ( although it is still in the datatypes registry ). Subclassing
# from data.Text eliminates managing metadata elements inherited from the Alignemnt class.
file_ext = "axt"
def sniff( self, filename ):
"""
Determines whether the file is in axt format
axt alignment files are produced from Blastz, an alignment tool available from Webb Miller's lab
at Penn State University.
Each alignment block in an axt file contains three lines: a summary line and 2 sequence lines.
Blocks are separated from one another by blank lines.
The summary line contains chromosomal position and size information about the alignment. It
consists of 9 required fields.
The sequence lines contain the sequence of the primary assembly (line 2) and aligning assembly
(line 3) with inserts. Repeats are indicated by lower-case letters.
For complete details see http://genome.ucsc.edu/goldenPath/help/axt.html
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'alignment.axt' )
>>> Axt().sniff( fname )
True
>>> fname = get_test_fname( 'alignment.lav' )
>>> Axt().sniff( fname )
False
"""
headers = get_headers( filename, None )
if len(headers) < 4:
return False
for hdr in headers:
if len(hdr) > 0 and hdr[0].startswith("##matrix=axt"):
return True
if len(hdr) > 0 and not hdr[0].startswith("#"):
if len(hdr) != 9:
return False
try:
map( int, [hdr[0], hdr[2], hdr[3], hdr[5], hdr[6], hdr[8]] )
except:
return False
if hdr[7] not in data.valid_strand:
return False
else:
return True
class Lav( data.Text ):
"""Class describing a LAV alignment"""
edam_format = "format_3014"
file_ext = "lav"
# gvk- 11/19/09 - This is really an alignment, but we no longer have tools that use this data type, and it is
# here simply for backward compatibility ( although it is still in the datatypes registry ). Subclassing
# from data.Text eliminates managing metadata elements inherited from the Alignemnt class.
def sniff( self, filename ):
"""
Determines whether the file is in lav format
LAV is an alignment format developed by Webb Miller's group. It is the primary output format for BLASTZ.
The first line of a .lav file begins with #:lav.
For complete details see http://www.bioperl.org/wiki/LAV_alignment_format
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'alignment.lav' )
>>> Lav().sniff( fname )
True
>>> fname = get_test_fname( 'alignment.axt' )
>>> Lav().sniff( fname )
False
"""
headers = get_headers( filename, None )
try:
if len(headers) > 1 and headers[0][0] and headers[0][0].startswith('#:lav'):
return True
else:
return False
except:
return False
class RNADotPlotMatrix( data.Data ):
edam_format = "format_3466"
file_ext = "rna_eps"
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = 'RNA Dot Plot format (Postscript derivative)'
dataset.blurb = nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff(self, filename):
"""Determine if the file is in RNA dot plot format."""
if check_image_type( filename, ['EPS'] ):
seq = False
coor = False
pairs = False
with open( filename ) as handle:
for line in handle:
line = line.strip()
if line:
if line.startswith('/sequence'):
seq = True
elif line.startswith('/coor'):
coor = True
elif line.startswith('/pairs'):
pairs = True
if seq and coor and pairs:
return True
return False
class DotBracket ( Sequence ):
edam_format = "format_1457"
file_ext = "dbn"
sequence_regexp = re.compile( "^[ACGTURYKMSWBDHVN]+$", re.I)
structure_regexp = re.compile( "^[\(\)\.\[\]{}]+$" )
def set_meta( self, dataset, **kwd ):
"""
Set the number of sequences and the number of data lines
in dataset.
"""
if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
dataset.metadata.data_lines = None
dataset.metadata.sequences = None
dataset.metadata.seconday_structures = None
return
data_lines = 0
sequences = 0
for line in file( dataset.file_name ):
line = line.strip()
data_lines += 1
if line and line.startswith( '>' ):
sequences += 1
dataset.metadata.data_lines = data_lines
dataset.metadata.sequences = sequences
def sniff(self, filename):
"""
Galaxy Dbn (Dot-Bracket notation) rules:
* The first non-empty line is a header line: no comment lines are allowed.
* A header line starts with a '>' symbol and continues with 0 or multiple symbols until the line ends.
* The second non-empty line is a sequence line.
* A sequence line may only include chars that match the Fasta format (https://en.wikipedia.org/wiki/FASTA_format#Sequence_representation) symbols for nucleotides: ACGTURYKMSWBDHVN, and may thus not include whitespaces.
* A sequence line has no prefix and no suffix.
* A sequence line is case insensitive.
* The third non-empty line is a structure (Dot-Bracket) line and only describes the 2D structure of the sequence above it.
* A structure line must consist of the following chars: '.{}[]()'.
* A structure line must be of the same length as the sequence line, and each char represents the structure of the nucleotide above it.
* A structure line has no prefix and no suffix.
* A nucleotide pairs with only 1 or 0 other nucleotides.
* In a structure line, the number of '(' symbols equals the number of ')' symbols, the number of '[' symbols equals the number of ']' symbols and the number of '{' symbols equals the number of '}' symbols.
* The format accepts multiple entries per file, given that each entry is provided as three lines: the header, sequence and structure line.
* Sniffing is only applied on the first entry.
* Empty lines are allowed.
"""
state = 0
with open( filename, "r" ) as handle:
for line in handle:
line = line.strip()
if line:
# header line
if state == 0:
if(line[0] != '>'):
return False
else:
state = 1
# sequence line
elif state == 1:
if not self.sequence_regexp.match(line):
return False
else:
sequence_size = len(line)
state = 2
# dot-bracket structure line
elif state == 2:
if sequence_size != len(line) or not self.structure_regexp.match(line) or \
line.count('(') != line.count(')') or \
line.count('[') != line.count(']') or \
line.count('{') != line.count('}'):
return False
else:
return True
# Number of lines is less than 3
return False
| gpl-3.0 | -7,907,747,532,020,869,000 | 44.16 | 337 | 0.574887 | false |
sdss/marvin | tasks.py | 1 | 6407 | # !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-06-10 16:46:40
# @Last modified by: José Sánchez-Gallego ([email protected])
# @Last modified time: 2018-11-14 19:37:22
from __future__ import absolute_import, division, print_function
import os
from invoke import Collection, task
DIRPATH = '/home/manga/software/git/manga/marvin'
MODULEPATH = '/home/manga/software/git/modulefiles'
@task
def clean_docs(ctx):
''' Cleans up the docs '''
print('Cleaning the docs')
ctx.run("rm -rf docs/sphinx/_build")
@task
def build_docs(ctx, clean=False):
''' Builds the Sphinx docs '''
if clean:
print('Cleaning the docs')
ctx.run("rm -rf docs/sphinx/_build")
print('Building the docs')
os.chdir('docs/sphinx')
ctx.run("make html", pty=True)
@task
def show_docs(ctx):
"""Shows the Sphinx docs"""
print('Showing the docs')
os.chdir('docs/sphinx/_build/html')
ctx.run('open ./index.html')
@task
def clean(ctx):
''' Cleans up the crap '''
print('Cleaning')
# ctx.run("rm -rf docs/sphinx/_build")
ctx.run("rm -rf htmlcov")
ctx.run("rm -rf build")
ctx.run("rm -rf dist")
@task(clean)
def deploy(ctx, repo=None):
''' Deploy to pypi '''
print('Deploying to Pypi!')
rstr = ''
if repo:
rstr = '-r {0}'.format(repo)
ctx.run("python setup.py sdist bdist_wheel --universal")
ctx.run("twine upload {0} dist/*".format(rstr))
@task
def update_default(ctx, path=None, version=None):
''' Updates the default version module file'''
assert version is not None, 'A version is required to update the default version!'
assert path is not None, 'A path must be specified!'
# update default version
f = open('.version', 'r+')
data = f.readlines()
data[1] = 'set ModulesVersion "{0}"\n'.format(version)
f.seek(0, 0)
f.writelines(data)
f.close()
@task
def update_module(ctx, path=None, wrap=None, version=None):
''' Update a module file '''
assert version is not None, 'A version is required to update the module file!'
assert path is not None, 'A path must be specified!'
print('Setting up module files!')
os.chdir(path)
newfile = 'mangawork.marvin_{0}'.format(version) if wrap else version
oldfile = 'mangawork.marvin_2.1.3' if wrap else 'master'
searchline = 'marvin' if wrap else 'version'
ctx.run('cp {0} {1}'.format(oldfile, newfile))
f = open('{0}'.format(newfile), 'r+')
data = f.readlines()
index, line = [(i, line) for i, line in enumerate(data)
if 'set {0}'.format(searchline) in line][0]
data[index] = 'set {0} {1}\n'.format(searchline, version)
f.seek(0, 0)
f.writelines(data)
f.close()
# update the default version
update_default(ctx, path=path, version=newfile)
@task
def update_git(ctx, version=None):
''' Update the git package at Utah '''
assert version is not None, 'A version is required to checkout a new git repo!'
print('Checking out git tag {0}'.format(version))
verpath = os.path.join(DIRPATH, version)
# checkout and setup new git tag
os.chdir(DIRPATH)
ctx.run('git clone https://github.com/sdss/marvin.git {0}'.format(version))
os.chdir(verpath)
ctx.run('git checkout {0}'.format(version))
ctx.run('git submodule update --init --recursive')
# ctx.run('python -c "from get_version import generate_version_py; '
# 'generate_version_py(\'sdss-marvin\', {0}, False)'.format(version))
@task
def update_current(ctx, version=None):
''' Update the current symlink '''
assert version is not None, 'A version is required to update the current symlink!'
# reset the current symlink
os.chdir(DIRPATH)
ctx.run('rm current')
ctx.run('ln -s {0} current'.format(version))
@task
def switch_module(ctx, version=None):
''' Switch to the marvin module of the specified version and start it '''
assert version is not None, 'A version is required to setup Marvin at Utah!'
ctx.run('uwsgi --stop /home/www/sas.sdss.org/mangawork/marvin/pid/uwsgi_marvin.pid')
ctx.run('module unload wrapmarvin')
ctx.run('module load wrapmarvin/mangawork.marvin_{0}'.format(version))
ctx.run('uwsgi /home/manga/software/git/manga/marvin/{0}/python/marvin/web/uwsgi_conf_files/uwsgi_marvin_mangawork.ini'.format(version))
@task
def update_uwsgi(ctx, version=None):
''' Reset the uwsgi symlink to the new version and touch the file to Emperor reload Marvin '''
assert version is not None, 'A version is required to setup Marvin at Utah!'
os.chdir('/etc/uwsgi/vassals')
new_path = '/home/manga/software/git/manga/marvin/{0}/python/marvin/web/uwsgi_conf_files/uwsgi_marvin_mangawork.ini'.format(version)
ctx.run('rm uwsgi_marvin_mangawork.ini')
ctx.run('ln -s {0} uwsgi_marvin_mangawork.ini'.format(new_path))
ctx.run('touch uwsgi_marvin_mangawork.ini')
@task
def setup_utah(ctx, version=None):
''' Setup the package at Utah and update the release '''
assert version is not None, 'A version is required to setup Marvin at Utah!'
# update git
update_git(ctx, version=version)
# update_current
update_current(ctx, version=version)
# update modules
marvin = os.path.join(MODULEPATH, 'marvin')
wrap = os.path.join(MODULEPATH, 'wrapmarvin')
update_module(ctx, path=marvin, version=version)
update_module(ctx, path=wrap, wrap=True, version=version)
# restart the new marvin
# switch_module(ctx, version=version)
update_uwsgi(ctx, version=version)
print('Marvin version {0} is set up!\n'.format(version))
print('Check for the new Marvin version at the bottom of the Marvin Web main page!')
# print('Please run ...\n stopmarvin \n module switch wrapmarvin '
# 'wrapmarvin/mangawork.marvin_{0} \n startmarvin \n'.format(version))
os.chdir(os.path.dirname(__file__))
ns = Collection(clean, deploy, setup_utah)
docs = Collection('docs')
docs.add_task(build_docs, 'build')
docs.add_task(clean_docs, 'clean')
docs.add_task(show_docs, 'show')
ns.add_collection(docs)
updates = Collection('update')
updates.add_task(update_git, 'git')
updates.add_task(update_current, 'current')
updates.add_task(update_module, 'module')
updates.add_task(update_default, 'default')
ns.add_collection(updates)
| bsd-3-clause | -28,274,147,638,332,620 | 31.18593 | 140 | 0.667291 | false |
Kronos3/HTML_PARSER | src/config.py | 1 | 6102 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# config.py
#
# Copyright 2016 Andrei Tumbar <atuser@Kronos>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import os, sys
import platform
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('GtkSource', '3.0')
from gi.repository import Gtk, GObject, GLib, GtkSource, Pango, Gdk
os.chdir ( os.path.dirname ( os.path.realpath ( __file__ ) ) )
import filetab, filemanager, builderset, project, configitem, configfile
class Config:
config_file_relative = ""
config_file_full = ""
__file_lines = None
__file = None
notebook = None
open_dialogue = None
var_dict = {}
var_list = []
list_vars = [ "output_files", "input_files" ]
conf_vars = [ "title", "css", "js" ]
variables_box = Gtk.Box ( )
configitems = []
rows = []
row_raw = []
current_file = {}
current = None
def __init__ ( self, curr_dir, config, notebook, open_dialogue ):
self.open_dialogue = open_dialogue
self.dir = curr_dir
self.notebook = notebook
self.new_config ( config )
def remove_config ( self ):
self.input.destroy ( )
self.output.destroy ( )
self.treeview.destroy ( )
self.var_store = None
self.var_rend = None
self.val_rend = None
self.treeview.destroy ( )
self.var_dict = {}
self.var_list = []
self.list_vars = [ "output_files", "input_files" ]
self.conf_vars = [ "title", "css", "js" ]
self.variables_box = Gtk.Box ( )
self.configitems = []
self.current_file = {}
self.current = None
def new_config ( self, config ):
self.config_file_relative = config
self.config_file_full = self.get_path ( config )
self.__file_lines = open ( self.config_file_relative, "r" ).readlines ( )
self.input = configitem.ConfigItem ( )
self.output = configitem.ConfigItem ( )
self.input.connect ( "new_config", self.get_new )
self.output.connect ( "new_config", self.get_new )
self.input.connect ( "remove_item", self.get_remove )
self.output.connect ( "remove_item", self.get_remove )
for l in self.__file_lines:
if l [ 0 ] == "#" or l == "" or l == "\n":
continue
var, val = l.split ( "=" )
# Remove the whitespace
var = var.strip ( )
val = val.strip ( )
self.var_dict [ var ] = val
self.var_list.append ( var )
if var in self.list_vars:
self.var_dict [ var ] = val.split ( "," )
for var in self.list_vars:
if not var:
continue
buff = self.var_dict [ var ]
exec ( "self.%s.set_notebook ( self.notebook )" % var.replace ( "_files", "" ) )
exec ( "self.%s.set_dialogue ( self.open_dialogue )" % var.replace ( "_files", "" ) )
exec ( "self.%s.add_items ( buff )" % var.replace ( "_files", "" ) )
self.__init_vars__ ( )
for var in self.var_list:
if ( not isinstance ( self.var_dict [ var ], list ) ):
self.add_var ( var )
def get_remove (self, buff_cfg, buff_item):
curr = "output"
if buff_cfg == self.input:
curr = "input"
self.var_dict [ curr + "_files" ].pop ( self.var_dict [ curr + "_files" ].index (buff_item.full_path))
def get_path ( self, _in ):
if self.dir [ -1 ] == "/":
return self.dir + _in
return self.dir + "/" + _in
def get_new ( self, a, confitem ):
if ( confitem == self.input ):
self.current = "input"
else:
self.current = "output"
def add ( self, __files ):
if platform.system () == "Windows":
__files[0] = __files [0][1:]
if ( self.current == "input" ):
self.input.add_items ( __files, remove=False )
self.var_dict ["input_files"].append (__files[0])
else:
self.output.add_items ( __files, remove=False )
self.var_dict ["output_files"].append (__files[0])
def update_file ( self, var, val ):
self.current_file [ var ] = val
def __init_vars__ ( self ):
self.var_store = Gtk.ListStore ( str, str )
self.treeview = Gtk.TreeView.new_with_model ( self.var_store )
self.var_rend = Gtk.CellRendererText ( )
self.val_rend = Gtk.CellRendererText ( )
self.val_rend.set_property('editable', True)
column_1 = Gtk.TreeViewColumn ( "Variables", self.var_rend, text=0 )
column_2 = Gtk.TreeViewColumn ( "Value", self.val_rend, text=1 )
self.treeview.append_column ( column_1 )
self.treeview.append_column ( column_2 )
self.val_rend.connect ( "edited", self.vars_changes )
def vars_changes ( self, renderer, path, new_text ):
self.var_store.set ( self.var_store.get_iter ( path ), 1, new_text )
self.var_dict [ self.var_store.get_value ( self.var_store.get_iter ( path ), 0 ) ] = new_text
def add_var ( self, var, add_to_list=False ):
if ( add_to_list ):
self.var_list.append ( var )
self.var_dict [ var ] = ""
self.var_store.append ( [ var, self.var_dict [ var ] ] )
def open_file ( self, path ):
self.__file_lines = open ( path, "r" ).readlines ( )
self.__file = open ( path, "w" ).readlines ( )
def remove_var ( self ):
model, treeiter = self.treeview.get_selection ( ).get_selected ( )
self.var_dict.pop ( model [ treeiter ] [ 0 ], None )
self.var_list.pop ( self.var_list.index ( model [ treeiter ] [ 0 ] ) )
print (self.var_list)
self.var_store.remove ( treeiter )
def get_conf_out ( self ):
out_buff = []
for x in self.var_list:
buff = self.var_dict [ x ]
if ( isinstance ( self.var_dict [ x ], list ) ):
buff = ",".join ( self.var_dict [ x ] )
buff += ","
out_buff.append ( x + " = " + buff )
return out_buff
| gpl-3.0 | -1,421,159,499,639,127,300 | 28.196172 | 104 | 0.621599 | false |
intuition-io/intuition | tests/data/test_universe.py | 1 | 2697 | '''
Tests for intuition.data.universe
'''
import os
import unittest
from nose.tools import raises, eq_
import dna.test_utils
import intuition.data.universe as universe
from intuition.errors import LoadMarketSchemeFailed
class MarketTestCase(unittest.TestCase):
def setUp(self):
dna.test_utils.setup_logger(self)
self.default_timezone = 'US/Eastern'
self.default_benchmark = '^GSPC'
self.scheme_path = os.path.expanduser('~/.intuition/data/market.yml')
self.good_universe_description = 'stocks:paris:cac40'
self.bad_universe_description = 'whatever'
def tearDown(self):
dna.test_utils.teardown_logger(self)
# NOTE It also tests market._load_market_scheme()
def test_initialize_market(self):
market = universe.Market()
self.assertIsInstance(market.scheme, dict)
eq_(market.benchmark, self.default_benchmark)
eq_(market.timezone, self.default_timezone)
#eq_(market.open, self.default_open)
#eq_(market.close, self.default_close)
def test_initialize_market_without_scheme(self):
tmp_path = self.scheme_path.replace('market', 'bkp.market')
os.system('mv {} {}'.format(self.scheme_path, tmp_path))
self.assertRaises(LoadMarketSchemeFailed, universe.Market)
os.system('mv {} {}'.format(tmp_path, self.scheme_path))
def test__extract_forex(self):
market = universe.Market()
sids = market._extract_forex()
self.assertGreater(len(sids), 0)
self.assertGreater(sids[0].find('/'), 0)
def test__extract_cac40(self):
market = universe.Market()
sids = market._extract_cac40(['stocks', 'paris', 'cac40'])
self.assertGreater(len(sids), 0)
self.assertGreater(sids[0].find('.pa'), 0)
def test__lookup_sids_no_limit(self):
market = universe.Market()
sids = market._lookup_sids(self.good_universe_description)
self.assertIsInstance(sids, list)
self.assertGreater(len(sids), 0)
def test__lookup_sids_with_limit(self):
limit = 4
market = universe.Market()
sids = market._lookup_sids(self.good_universe_description, limit)
self.assertIsInstance(sids, list)
eq_(len(sids), limit)
@raises(LoadMarketSchemeFailed)
def test__lookup_sids_wrong_market(self):
market = universe.Market()
market._lookup_sids(self.bad_universe_description)
def test_parse_universe(self):
market = universe.Market()
market.parse_universe_description(
self.good_universe_description + ',4')
self.assertIsInstance(market.sids, list)
eq_(len(market.sids), 4)
| apache-2.0 | -2,435,537,797,693,956,600 | 34.486842 | 77 | 0.652948 | false |
kfoss/neon | neon/datasets/sparsenet.py | 1 | 5254 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Sparsenet is the natural image dataset used by Olshausen and Field
More info at: http://redwood.berkeley.edu/bruno/sparsenet/
"""
import logging
import os
import numpy
import pickle
from neon.util.compat import PY3, range
from neon.datasets.dataset import Dataset
if PY3:
from urllib.parse import urljoin as basejoin
else:
from urllib import basejoin
logger = logging.getLogger(__name__)
class SPARSENET(Dataset):
"""
Sets up a Sparsenet dataset.
Attributes:
raw_base_url (str): where to find the source data
raw_train_input_gz (str): URL of the full path to raw train inputs
raw_train_target_gz (str): URL of the full path to raw train targets
raw_test_input_gz (str): URL of the full path to raw test inputs
raw_test_target_gz (str): URL of the full path to raw test targets
backend (neon.backends.Backend): backend used for this data
inputs (dict): structure housing the loaded train/test/validation
input data
targets (dict): structure housing the loaded train/test/validation
target data
Kwargs:
repo_path (str, optional): where to locally host this dataset on disk
"""
raw_base_url = 'http://redwood.berkeley.edu/bruno/sparsenet/'
raw_train_whitened = basejoin(raw_base_url, 'IMAGES.mat')
raw_train_unwhitened = basejoin(raw_base_url, 'IMAGES_RAW.mat')
def __init__(self, **kwargs):
self.macro_batched = False
self.__dict__.update(kwargs)
def read_image_file(self, fname, dtype=None):
"""
Carries out the actual reading of Sparsenet image files.
"""
logger.info("in read_image_file, reading: %s", fname)
with open(fname, 'rb') as infile:
array = pickle.load(infile)
infile.close()
return array
def load(self, backend=None, experiment=None):
"""
main function
"""
import scipy.io
if 'repo_path' in self.__dict__:
self.repo_path = os.path.expandvars(os.path.expanduser(
self.repo_path))
save_dir = os.path.join(self.repo_path,
self.__class__.__name__)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
train_idcs = list(range(10000))
if 'sample_pct' in self.__dict__:
if self.sample_pct > 1.0:
self.sample_pct /= 100.0
if self.sample_pct < 1.0:
numpy.random.seed(self.backend.rng_seed)
numpy.random.shuffle(train_idcs)
train_idcs = train_idcs[0:int(10000 * self.sample_pct)]
for url in (self.raw_train_unwhitened, self.raw_train_whitened):
name = os.path.basename(url).rstrip('.mat')
repo_mat_file = os.path.join(save_dir, name + '.mat')
repo_file = repo_mat_file.rstrip('.mat')
# download and create dataset
if not os.path.exists(repo_file):
self.download_to_repo(url, save_dir)
infile = scipy.io.loadmat(repo_mat_file)
with open(repo_file, 'wb') as outfile:
data = infile[infile.keys()[0]]
# patches are extracted so they can be cached
# doing non-overlapping 16x16 patches (1024 per image)
patches = data.reshape(512/16, 16, 512/16, 16, 10)
patches = patches.transpose(1, 3, 0, 2, 4)
patches = patches.reshape(16, 16, 1024*10)
logger.info("Caching to pickle file: %s", outfile)
pickle.dump(patches, outfile)
outfile.close()
logger.info('loading: %s', name)
# load existing data
if 'IMAGES' in repo_file:
indat = self.read_image_file(repo_file, 'float32')
# flatten to 1D images
indat = indat.reshape((256, 10240)).transpose()[train_idcs]
self.inputs['train'] = indat
else:
logger.error('problems loading: %s', name)
self.format()
else:
raise AttributeError('repo_path not specified in config')
# TODO: try and download and read in directly?
| apache-2.0 | -2,070,562,520,108,554,800 | 41.032 | 79 | 0.557099 | false |
wuher/devil | devil/util.py | 1 | 3000 | # -*- coding: utf-8 -*-
# util.py ---
#
# Created: Fri Dec 30 23:27:52 2011 (+0200)
# Author: Janne Kuuskeri
#
import re
charset_pattern = re.compile('.*;\s*charset=(.*)')
def camelcase_to_slash(name):
""" Converts CamelCase to camel/case
code ripped from http://stackoverflow.com/questions/1175208/does-the-python-standard-library-have-function-to-convert-camelcase-to-camel-cas
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1/\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1/\2', s1).lower()
def strip_charset(content_type):
""" Strip charset from the content type string.
:param content_type: The Content-Type string (possibly with charset info)
:returns: The Content-Type string without the charset information
"""
return content_type.split(';')[0]
def extract_charset(content_type):
""" Extract charset info from content type.
E.g. application/json;charset=utf-8 -> utf-8
:param content_type: The Content-Type string (possibly with charset info)
:returns: The charset or ``None`` if not found.
"""
match = charset_pattern.match(content_type)
return match.group(1) if match else None
def get_charset(request):
""" Extract charset from the content type
"""
content_type = request.META.get('CONTENT_TYPE', None)
if content_type:
return extract_charset(content_type) if content_type else None
else:
return None
def parse_accept_header(accept):
""" Parse the Accept header
todo: memoize
:returns: list with pairs of (media_type, q_value), ordered by q
values.
"""
def parse_media_range(accept_item):
""" Parse media range and subtype """
return accept_item.split('/', 1)
def comparator(a, b):
""" Compare accept items a and b """
# first compare q values
result = -cmp(a[2], b[2])
if result is not 0:
# q values differ, no need to compare media types
return result
# parse media types and compare them (asterisks are lower in precedence)
mtype_a, subtype_a = parse_media_range(a[0])
mtype_b, subtype_b = parse_media_range(b[0])
if mtype_a == '*' and subtype_a == '*':
return 1
if mtype_b == '*' and subtype_b == '*':
return -1
if subtype_a == '*':
return 1
if subtype_b == '*':
return -1
return 0
if not accept:
return []
result = []
for media_range in accept.split(","):
parts = media_range.split(";")
media_type = parts.pop(0).strip()
media_params = []
q = 1.0
for part in parts:
(key, value) = part.lstrip().split("=", 1)
if key == "q":
q = float(value)
else:
media_params.append((key, value))
result.append((media_type, tuple(media_params), q))
result.sort(comparator)
return result
#
# util.py ends here
| mit | 3,430,907,829,229,489,700 | 25.086957 | 144 | 0.581667 | false |
ram8647/gcb-mobilecsp | modules/i18n_dashboard/i18n_dashboard_tests.py | 1 | 160861 | # coding: utf-8
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the internationalization (i18n) workflow."""
__author__ = 'John Orr ([email protected])'
import cgi
import collections
import cStringIO
import logging
import StringIO
import traceback
import unittest
import urllib
import zipfile
from babel.messages import pofile
import appengine_config
from common import crypto
from common import resource
from common import tags
from common import users
from common import utils
from common.utils import Namespace
from controllers import sites
from models import config
from models import courses
from models import resources_display
from models import models
from models import roles
from models import transforms
from modules.announcements import announcements
from modules.dashboard import dashboard
from modules.i18n_dashboard import i18n_dashboard
from modules.i18n_dashboard.i18n_dashboard import I18nProgressDAO
from modules.i18n_dashboard.i18n_dashboard import I18nProgressDTO
from modules.i18n_dashboard.i18n_dashboard import LazyTranslator
from modules.i18n_dashboard.i18n_dashboard import ResourceBundleDAO
from modules.i18n_dashboard.i18n_dashboard import ResourceBundleDTO
from modules.i18n_dashboard.i18n_dashboard import ResourceBundleKey
from modules.i18n_dashboard.i18n_dashboard import ResourceRow
from modules.i18n_dashboard.i18n_dashboard import TranslationConsoleRestHandler
from modules.i18n_dashboard.i18n_dashboard import TranslationUploadRestHandler
from modules.i18n_dashboard.i18n_dashboard import VERB_CHANGED
from modules.i18n_dashboard.i18n_dashboard import VERB_CURRENT
from modules.i18n_dashboard.i18n_dashboard import VERB_NEW
from modules.notifications import notifications
from tests.functional import actions
from google.appengine.api import memcache
from google.appengine.api import namespace_manager
from google.appengine.datastore import datastore_rpc
class ResourceBundleKeyTests(unittest.TestCase):
def test_roundtrip_data(self):
key1 = ResourceBundleKey(
resources_display.ResourceAssessment.TYPE, '23', 'el')
key2 = ResourceBundleKey.fromstring(str(key1))
self.assertEquals(key1.locale, key2.locale)
self.assertEquals(key1.resource_key.type, key2.resource_key.type)
self.assertEquals(key1.resource_key.key, key2.resource_key.key)
def test_from_resource_key(self):
resource_key = resource.Key(
resources_display.ResourceAssessment.TYPE, '23')
key = ResourceBundleKey.from_resource_key(resource_key, 'el')
self.assertEquals(resources_display.ResourceAssessment.TYPE,
key.resource_key.type)
self.assertEquals('23', key.resource_key.key)
self.assertEquals('el', key.locale)
class ResourceRowTests(unittest.TestCase):
def setUp(self):
super(ResourceRowTests, self).setUp()
course = object()
rsrc = object()
self.type_str = resources_display.ResourceAssessment.TYPE
self.key = '23'
self.i18n_progress_dto = I18nProgressDTO(None, {})
self.resource_row = ResourceRow(
course, rsrc, self.type_str, self.key,
i18n_progress_dto=self.i18n_progress_dto)
def test_class_name(self):
self.i18n_progress_dto.is_translatable = True
self.assertEquals('', self.resource_row.class_name)
self.i18n_progress_dto.is_translatable = False
self.assertEquals('not-translatable', self.resource_row.class_name)
def test_resource_key(self):
key = self.resource_row.resource_key
self.assertEquals(self.type_str, key.type)
self.assertEquals(self.key, key.key)
def test_is_translatable(self):
self.i18n_progress_dto.is_translatable = True
self.assertTrue(self.resource_row.is_translatable)
self.i18n_progress_dto.is_translatable = False
self.assertFalse(self.resource_row.is_translatable)
def test_status(self):
self.i18n_progress_dto.set_progress('fr', I18nProgressDTO.NOT_STARTED)
self.i18n_progress_dto.set_progress('el', I18nProgressDTO.IN_PROGRESS)
self.i18n_progress_dto.set_progress('ru', I18nProgressDTO.DONE)
self.assertEquals('Not started', self.resource_row.status('fr'))
self.assertEquals('In progress', self.resource_row.status('el'))
self.assertEquals('Done', self.resource_row.status('ru'))
def test_status_class(self):
self.i18n_progress_dto.set_progress('fr', I18nProgressDTO.NOT_STARTED)
self.i18n_progress_dto.set_progress('el', I18nProgressDTO.IN_PROGRESS)
self.i18n_progress_dto.set_progress('ru', I18nProgressDTO.DONE)
self.assertEquals('not-started', self.resource_row.status_class('fr'))
self.assertEquals('in-progress', self.resource_row.status_class('el'))
self.assertEquals('done', self.resource_row.status_class('ru'))
def test_edit_url(self):
self.assertEquals(
'dashboard?action=i18_console&key=assessment%3A23%3Ael',
self.resource_row.edit_url('el'))
class IsTranslatableRestHandlerTests(actions.TestBase):
ADMIN_EMAIL = '[email protected]'
COURSE_NAME = 'i18n_course'
URL = 'rest/modules/i18n_dashboard/is_translatable'
def setUp(self):
super(IsTranslatableRestHandlerTests, self).setUp()
self.base = '/' + self.COURSE_NAME
context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'I18N Course')
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
self.course = courses.Course(None, context)
def tearDown(self):
del sites.Registry.test_overrides[sites.GCB_COURSES_CONFIG.name]
namespace_manager.set_namespace(self.old_namespace)
super(IsTranslatableRestHandlerTests, self).tearDown()
def _post_response(self, request_dict):
return transforms.loads(self.post(
self.URL,
{'request': transforms.dumps(request_dict)}).body)
def _get_request(self, payload_dict):
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
'is-translatable')
return {
'xsrf_token': xsrf_token,
'payload': payload_dict
}
def test_require_xsrf_token(self):
response = self._post_response({'xsrf_token': 'BAD TOKEN'})
self.assertEquals(403, response['status'])
def test_require_course_admin(self):
response = self._post_response(self._get_request({}))
self.assertEquals(401, response['status'])
actions.login(self.ADMIN_EMAIL, is_admin=True)
response = self._post_response(self._get_request(
{'resource_key': 'assessment:23', 'value': True}))
self.assertEquals(200, response['status'])
def test_set_data(self):
resource_key_str = 'assessment:23'
actions.login(self.ADMIN_EMAIL, is_admin=True)
self.assertIsNone(I18nProgressDAO.load(resource_key_str))
response = self._post_response(self._get_request(
{'resource_key': 'assessment:23', 'value': True}))
self.assertEquals(200, response['status'])
dto = I18nProgressDAO.load(resource_key_str)
self.assertTrue(dto.is_translatable)
response = self._post_response(self._get_request(
{'resource_key': 'assessment:23', 'value': False}))
self.assertEquals(200, response['status'])
dto = I18nProgressDAO.load(resource_key_str)
self.assertFalse(dto.is_translatable)
class I18nDashboardHandlerTests(actions.TestBase):
ADMIN_EMAIL = '[email protected]'
COURSE_NAME = 'i18n_course'
URL = 'dashboard?action=i18n_dashboard'
def setUp(self):
super(I18nDashboardHandlerTests, self).setUp()
self.base = '/' + self.COURSE_NAME
context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'I18N Course')
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
self.course = courses.Course(None, context)
self.unit = self.course.add_unit()
self.unit.title = 'Test Unit'
self.assessment = self.course.add_assessment()
self.assessment.title = 'Post Assessment'
self.unit.post_assessment = self.assessment.unit_id
self.lesson = self.course.add_lesson(self.unit)
self.lesson.title = 'Test Lesson'
self.course.save()
actions.login(self.ADMIN_EMAIL, is_admin=True)
def tearDown(self):
del sites.Registry.test_overrides[sites.GCB_COURSES_CONFIG.name]
namespace_manager.set_namespace(self.old_namespace)
super(I18nDashboardHandlerTests, self).tearDown()
def test_page_data(self):
response = self.get(self.URL)
soup = self.parse_html_string_to_soup(response.body)
tables = soup.select('.i18n-progress-table')
expected_tables = [
{
'title': 'Settings',
'rows': [
'Assessments',
'Forums',
'Course',
'Invitations',
'Registration',
],
},
{
'title': 'Create > Outline',
'rows': [
'Unit 1 - Test Unit',
'1.1 Test Lesson',
'Post Assessment',
],
},
{
'title': 'Questions',
'rows': [],
},
{
'title': 'Question Groups',
'rows': [],
},
{
'title': 'Skills',
'rows': [],
},
{
'title': 'HTML Hooks',
'rows': [
'base.after_body_tag_begins',
'base.after_main_content_ends',
'base.after_navbar_begins',
'base.after_top_content_ends',
'base.before_body_tag_ends',
'base.before_head_tag_ends',
'base.before_navbar_ends',
],
},
]
for table, expected_table in zip(tables, expected_tables):
self.assertEquals(table.select(
'thead .title')[0].text.strip(), expected_table['title'])
rows = table.select('tbody tr')
self.assertEqual(len(rows), len(expected_table['rows']))
for row, row_name in zip(rows, expected_table['rows']):
self.assertEquals(row.select('.name')[0].text.strip(), row_name)
def test_multiple_locales(self):
extra_env = {
'extra_locales': [
{'locale': 'el', 'availability': 'unavailable'},
{'locale': 'ru', 'availability': 'unavailable'},
]}
with actions.OverriddenEnvironment(extra_env):
soup = self.parse_html_string_to_soup(self.get(self.URL).body)
table = soup.select('.i18n-progress-table')[0]
columns = table.select('.language-header')
expected_col_data = [
'el',
'ru',
]
self.assertEquals(len(expected_col_data), len(columns))
for index, expected in enumerate(expected_col_data):
self.assertEquals(expected, columns[index].text)
def test_is_translatable(self):
soup = self.parse_html_string_to_soup(self.get(self.URL).body)
rows = soup.select('tbody .not-translatable')
self.assertEquals(0, len(rows))
dto_key = resource.Key(resources_display.ResourceLesson.TYPE,
self.lesson.lesson_id)
dto = I18nProgressDTO(str(dto_key), {})
dto.is_translatable = False
I18nProgressDAO.save(dto)
soup = self.parse_html_string_to_soup(self.get(self.URL).body)
rows = soup.select('tbody .not-translatable')
self.assertEquals(1, len(rows))
def test_progress(self):
def assert_progress(class_name, row, index):
td = row.select('.status')[index]
self.assertIn(class_name, td.get('class'))
lesson_row_selector = ('.i18n-progress-table > tbody > '
'tr[data-resource-key="lesson:{}"]').format(self.lesson.lesson_id)
extra_env = {
'extra_locales': [
{'locale': 'el', 'availability': 'unavailable'},
{'locale': 'ru', 'availability': 'unavailable'},
]}
with actions.OverriddenEnvironment(extra_env):
soup = self.parse_html_string_to_soup(self.get(self.URL).body)
lesson_row = soup.select(lesson_row_selector)[0]
lesson_title = lesson_row.select('.name')[0].getText().strip()
self.assertEquals('1.1 Test Lesson', lesson_title)
assert_progress('not-started', lesson_row, 0)
assert_progress('not-started', lesson_row, 1)
dto_key = resource.Key(
resources_display.ResourceLesson.TYPE, self.lesson.lesson_id)
dto = I18nProgressDTO(str(dto_key), {})
dto.set_progress('el', I18nProgressDTO.DONE)
dto.set_progress('ru', I18nProgressDTO.IN_PROGRESS)
I18nProgressDAO.save(dto)
soup = self.parse_html_string_to_soup(self.get(self.URL).body)
lesson_row = soup.select(lesson_row_selector)[0]
assert_progress('done', lesson_row, 0)
assert_progress('in-progress', lesson_row, 1)
class TranslationConsoleRestHandlerTests(actions.TestBase):
ADMIN_EMAIL = '[email protected]'
COURSE_NAME = 'i18n_course'
URL = 'rest/modules/i18n_dashboard/translation_console'
def setUp(self):
super(TranslationConsoleRestHandlerTests, self).setUp()
self.base = '/' + self.COURSE_NAME
context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'I18N Course')
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
self.course = courses.Course(None, context)
self.unit = self.course.add_unit()
self.unit.title = 'Test Unit'
self.unit.unit_header = '<p>a</p><p>b</p>'
self.course.save()
actions.login(self.ADMIN_EMAIL, is_admin=True)
def tearDown(self):
del sites.Registry.test_overrides[sites.GCB_COURSES_CONFIG.name]
namespace_manager.set_namespace(self.old_namespace)
super(TranslationConsoleRestHandlerTests, self).tearDown()
def _get_by_key(self, key):
return transforms.loads(
self.get('%s?key=%s' % (self.URL, str(key))).body)
def _assert_section_values(
self, section, name, type_str, data_size, source_value):
self.assertEquals(name, section['name'])
self.assertEquals(type_str, section['type'])
self.assertEquals(data_size, len(section['data']))
self.assertEquals(source_value, section['source_value'])
def test_get_requires_admin_role(self):
actions.logout()
key = ResourceBundleKey(
resources_display.ResourceUnit.TYPE, self.unit.unit_id, 'el')
response = self._get_by_key(key)
self.assertEquals(401, response['status'])
def test_get_unit_content_with_no_existing_values(self):
key = ResourceBundleKey(
resources_display.ResourceUnit.TYPE, self.unit.unit_id, 'el')
response = self._get_by_key(key)
self.assertEquals(200, response['status'])
payload = transforms.loads(response['payload'])
self.assertEquals('en_US', payload['source_locale'])
self.assertEquals('el', payload['target_locale'])
sections = payload['sections']
self.assertEquals(
['title', 'unit_header'],
[s['name'] for s in sections])
self.assertEquals(
['Title', 'Header'],
[s['label'] for s in sections])
expected_values = [
('title', 'string', 1, ''),
('unit_header', 'html', 2, '<p>a</p><p>b</p>')]
for i, (name, type_str, data_size, source_value) in enumerate(
expected_values):
self._assert_section_values(
sections[i], name, type_str, data_size, source_value)
# confirm all the data is new
for section in sections:
for data in section['data']:
self.assertEquals(VERB_NEW, data['verb'])
header_data = sections[1]['data']
for item in header_data:
self.assertIsNone(item['old_source_value'])
self.assertEquals('', item['target_value'])
self.assertFalse(item['changed'])
self.assertEquals('a', header_data[0]['source_value'])
self.assertEquals('b', header_data[1]['source_value'])
def test_get_unit_content_with_existing_values(self):
key = ResourceBundleKey(
resources_display.ResourceUnit.TYPE, self.unit.unit_id, 'el')
resource_bundle_dict = {
'title': {
'type': 'string',
'source_value': '',
'data': [
{'source_value': 'Test Unit', 'target_value': 'TEST UNIT'}]
},
'unit_header': {
'type': 'html',
'source_value': '<p>a</p><p>b</p>',
'data': [
{'source_value': 'a', 'target_value': 'A'}]
}
}
dto = ResourceBundleDTO(str(key), resource_bundle_dict)
ResourceBundleDAO.save(dto)
response = self._get_by_key(key)
self.assertEquals(200, response['status'])
sections = transforms.loads(response['payload'])['sections']
self.assertEquals(2, len(sections))
# Confirm there is a translation for the title
title_section = sections[0]
self.assertEquals('title', title_section['name'])
self.assertEquals('Title', title_section['label'])
self.assertEquals(1, len(title_section['data']))
self.assertEquals(VERB_CURRENT, title_section['data'][0]['verb'])
self.assertEquals('TEST UNIT', title_section['data'][0]['target_value'])
# Confirm there is a translation for one of the two paragraphs
header_section = sections[1]
self.assertEquals('unit_header', header_section['name'])
self.assertEquals('Header', header_section['label'])
self.assertEquals(2, len(header_section['data']))
self.assertEquals(VERB_CURRENT, header_section['data'][0]['verb'])
self.assertEquals('a', header_section['data'][0]['source_value'])
self.assertEquals('a', header_section['data'][0]['old_source_value'])
self.assertEquals('A', header_section['data'][0]['target_value'])
self.assertEquals(VERB_NEW, header_section['data'][1]['verb'])
def test_get_unit_content_with_changed_values(self):
key = ResourceBundleKey(
resources_display.ResourceUnit.TYPE, self.unit.unit_id, 'el')
resource_bundle_dict = {
'title': {
'type': 'string',
'source_value': '',
'data': [
{
'source_value': 'Old Test Unit',
'target_value': 'OLD TEST UNIT'}]
},
'unit_header': {
'type': 'html',
'source_value': '<p>a</p><p>b</p>',
'data': [
{'source_value': 'aa', 'target_value': 'AA'}]
}
}
dto = ResourceBundleDTO(str(key), resource_bundle_dict)
ResourceBundleDAO.save(dto)
response = self._get_by_key(key)
self.assertEquals(200, response['status'])
sections = transforms.loads(response['payload'])['sections']
self.assertEquals(2, len(sections))
# Confirm there is a translation for the title
title_section = sections[0]
self.assertEquals('title', title_section['name'])
self.assertEquals('Title', title_section['label'])
self.assertEquals(1, len(title_section['data']))
self.assertEquals(VERB_CHANGED, title_section['data'][0]['verb'])
self.assertEquals(
'OLD TEST UNIT', title_section['data'][0]['target_value'])
# Confirm there is a translation for one of the two paragraphs
header_section = sections[1]
self.assertEquals('unit_header', header_section['name'])
self.assertEquals('Header', header_section['label'])
self.assertEquals(2, len(header_section['data']))
self.assertEquals(VERB_CHANGED, header_section['data'][0]['verb'])
self.assertEquals('a', header_section['data'][0]['source_value'])
self.assertEquals('aa', header_section['data'][0]['old_source_value'])
self.assertEquals('AA', header_section['data'][0]['target_value'])
self.assertEquals(VERB_NEW, header_section['data'][1]['verb'])
def test_core_tags_handle_none_handler(self):
for _, tag_cls in tags.Registry.get_all_tags().items():
self.assertTrue(tag_cls().get_schema(None))
def test_get_unit_content_with_custom_tag(self):
unit = self.course.add_unit()
unit.title = 'Test Unit with Tag'
unit.unit_header = (
'text'
'<gcb-youtube videoid="Kdg2drcUjYI" instanceid="c4CLTDvttJEu">'
'</gcb-youtube>')
self.course.save()
key = ResourceBundleKey(
resources_display.ResourceUnit.TYPE, unit.unit_id, 'el')
response = self._get_by_key(key)
payload = transforms.loads(response['payload'])
data = payload['sections'][1]['data']
self.assertEquals(1, len(data))
self.assertEquals(
'text<gcb-youtube#1 videoid="Kdg2drcUjYI" />',
data[0]['source_value'])
def test_get_unit_content_with_custom_tag_with_body(self):
unit = self.course.add_unit()
unit.title = 'Test Unit with Tag'
unit.unit_header = '<gcb-markdown>*hello*</gcb-markdown>'
self.course.save()
key = ResourceBundleKey(
resources_display.ResourceUnit.TYPE, unit.unit_id, 'el')
response = self._get_by_key(key)
payload = transforms.loads(response['payload'])
data = payload['sections'][1]['data']
self.assertEquals(1, len(data))
self.assertEquals(
'<gcb-markdown#1>*hello*</gcb-markdown#1>', data[0]['source_value'])
def test_defaults_to_known_translations(self):
unit = self.course.add_unit()
# Make the unit title be a string which is part of CB's i18n data
unit.title = 'Registration'
self.course.save()
key = ResourceBundleKey(
resources_display.ResourceUnit.TYPE, unit.unit_id, 'el')
response = self._get_by_key(key)
payload = transforms.loads(response['payload'])
data = payload['sections'][0]['data']
self.assertEqual(VERB_CHANGED, data[0]['verb'])
self.assertEqual(u'Εγγραφή', data[0]['target_value'])
class TranslationConsoleValidationTests(actions.TestBase):
ADMIN_EMAIL = '[email protected]'
COURSE_NAME = 'i18n_course'
URL = 'rest/modules/i18n_dashboard/translation_console'
INVALID = LazyTranslator.INVALID_TRANSLATION
VALID = LazyTranslator.VALID_TRANSLATION
def setUp(self):
super(TranslationConsoleValidationTests, self).setUp()
self.base = '/' + self.COURSE_NAME
context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'I18N Course')
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
self.course = courses.Course(None, context)
self.unit = self.course.add_unit()
self.unit.title = 'Test Unit'
self.unit.unit_header = '<p>a</p><p>b</p>'
self.course.save()
actions.login(self.ADMIN_EMAIL, is_admin=True)
self.key = ResourceBundleKey(
resources_display.ResourceUnit.TYPE, self.unit.unit_id, 'el')
self.validation_payload = {
'key': str(self.key),
'title': 'Unit 1 - Test Unit',
'source_locale': 'en_US',
'target_locale': 'el',
'sections': [
{
'name': 'title',
'label': 'Title',
'type': 'string',
'source_value': '',
'data': [
{
'source_value': 'Test Unit',
'target_value': 'TEST UNIT',
'verb': 1, # verb NEW
'old_source_value': '',
'changed': True
}
]
},
{
'name': 'unit_header',
'label': 'Unit Header',
'type': 'html',
'source_value': '<p>a</p><p>b</p>',
'data': [
{
'source_value': 'a',
'target_value': 'A',
'verb': 1, # verb NEW
'old_source_value': 'a',
'changed': True
},
{
'source_value': 'b',
'target_value': 'B',
'verb': 1, # verb NEW
'old_source_value': 'b',
'changed': True
},
]
},
]}
self.resource_bundle_dict = {
'title': {
'type': 'string',
'source_value': '',
'data': [
{'source_value': 'Test Unit', 'target_value': 'TEST UNIT'}]
},
'unit_header': {
'type': 'html',
'source_value': '<p>a</p><p>b</p>',
'data': [
{'source_value': 'a', 'target_value': 'A'},
{'source_value': 'a', 'target_value': 'B'}]
}
}
def tearDown(self):
del sites.Registry.test_overrides[sites.GCB_COURSES_CONFIG.name]
namespace_manager.set_namespace(self.old_namespace)
super(TranslationConsoleValidationTests, self).tearDown()
def _validate(self):
request_dict = {
'key': str(self.key),
'xsrf_token': crypto.XsrfTokenManager.create_xsrf_token(
'translation-console'),
'payload': transforms.dumps(self.validation_payload),
'validate': True}
response = self.put(
self.URL, {'request': transforms.dumps(request_dict)})
response = transforms.loads(response.body)
self.assertEquals(200, response['status'])
payload = transforms.loads(response['payload'])
expected_keys = {
section['name'] for section in self.validation_payload['sections']}
self.assertEquals(expected_keys, set(payload.keys()))
return payload
def test_valid_content(self):
payload = self._validate()
self.assertEquals(self.VALID, payload['title']['status'])
self.assertEquals('', payload['title']['errm'])
self.assertEquals(self.VALID, payload['unit_header']['status'])
self.assertEquals('', payload['unit_header']['errm'])
def test_invalid_content(self):
self.validation_payload[
'sections'][1]['data'][0]['target_value'] = '<img#1/>'
payload = self._validate()
self.assertEquals(self.VALID, payload['title']['status'])
self.assertEquals('', payload['title']['errm'])
self.assertEquals(self.INVALID, payload['unit_header']['status'])
self.assertEquals(
'Error in chunk 1. Unexpected tag: <img#1>.',
payload['unit_header']['errm'])
def test_with_bundle(self):
dto = ResourceBundleDTO(str(self.key), self.resource_bundle_dict)
ResourceBundleDAO.save(dto)
payload = self._validate()
self.assertEquals(self.VALID, payload['title']['status'])
self.assertEquals('', payload['title']['errm'])
self.assertEquals(self.VALID, payload['unit_header']['status'])
self.assertEquals('', payload['unit_header']['errm'])
def test_with_bundle_with_extra_fields(self):
self.resource_bundle_dict['description'] = {
'type': 'string',
'source_value': '',
'data': [
{'source_value': 'descr', 'target_value': 'DESCR'}]
}
dto = ResourceBundleDTO(str(self.key), self.resource_bundle_dict)
ResourceBundleDAO.save(dto)
payload = self._validate()
self.assertEquals(self.VALID, payload['title']['status'])
self.assertEquals('', payload['title']['errm'])
self.assertEquals(self.VALID, payload['unit_header']['status'])
self.assertEquals('', payload['unit_header']['errm'])
def test_untranslated_section(self):
# Add a section to the unit which has no translation in the bundle
self.unit.unit_footer = 'footer'
self.course.save()
self.validation_payload['sections'].append(
{
'name': 'unit_footer',
'label': 'Unit Footer',
'type': 'html',
'source_value': 'footer',
'data': [
{
'source_value': 'footer',
'target_value': '',
'verb': 1, # verb NEW
'old_source_value': None,
'changed': False
}
]
})
payload = self._validate()
footer_data = payload['unit_footer']
self.assertEqual(
LazyTranslator.NOT_STARTED_TRANSLATION, footer_data['status'])
self.assertEqual('No translation saved yet', footer_data['errm'])
class I18nProgressDeferredUpdaterTests(actions.TestBase):
ADMIN_EMAIL = '[email protected]'
COURSE_NAME = 'i18n_course'
COURSE_TITLE = 'I18N Course'
def setUp(self):
super(I18nProgressDeferredUpdaterTests, self).setUp()
self.base = '/' + self.COURSE_NAME
self.app_context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, self.COURSE_TITLE)
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
self.course = courses.Course(None, self.app_context)
self.unit = self.course.add_unit()
self.unit.title = 'Test Unit'
self.unit.unit_header = '<p>a</p><p>b</p>'
self.unit.availability = courses.AVAILABILITY_AVAILABLE
self.lesson = self.course.add_lesson(self.unit)
self.lesson.title = 'Test Lesson'
self.lesson.objectives = '<p>c</p><p>d</p>'
self.lesson.availability = courses.AVAILABILITY_AVAILABLE
self.course.save()
courses.Course.ENVIRON_TEST_OVERRIDES = {
'extra_locales': [
{'locale': 'el', 'availability': 'available'},
{'locale': 'ru', 'availability': 'available'}]
}
actions.login(self.ADMIN_EMAIL)
def tearDown(self):
del sites.Registry.test_overrides[sites.GCB_COURSES_CONFIG.name]
namespace_manager.set_namespace(self.old_namespace)
courses.Course.ENVIRON_TEST_OVERRIDES = {}
super(I18nProgressDeferredUpdaterTests, self).tearDown()
def _put_payload(self, url, xsrf_name, key, payload):
request_dict = {
'key': key,
'xsrf_token': (
crypto.XsrfTokenManager.create_xsrf_token(xsrf_name)),
'payload': transforms.dumps(payload)
}
response = transforms.loads(self.put(
url, {'request': transforms.dumps(request_dict)}).body)
self.assertEquals(200, response['status'])
self.assertEquals('Saved.', response['message'])
return response
def _assert_progress(self, key, el_progress=None, ru_progress=None):
progress_dto = I18nProgressDAO.load(str(key))
self.assertIsNotNone(progress_dto)
self.assertEquals(el_progress, progress_dto.get_progress('el'))
self.assertEquals(ru_progress, progress_dto.get_progress('ru'))
def test_on_lesson_changed(self):
unit = self.course.add_unit()
unit.title = 'Test Unit'
lesson = self.course.add_lesson(unit)
lesson.title = 'Test Lesson'
lesson.objectives = '<p>a</p><p>b</p>'
lesson.availability = courses.AVAILABILITY_AVAILABLE
self.course.save()
lesson_bundle = {
'title': {
'type': 'string',
'source_value': '',
'data': [
{
'source_value': 'Test Lesson',
'target_value': 'TEST LESSON'}]
},
'objectives': {
'type': 'html',
'source_value': '<p>a</p><p>b</p>',
'data': [
{'source_value': 'a', 'target_value': 'A'},
{'source_value': 'b', 'target_value': 'B'}]
}
}
lesson_key = resource.Key(
resources_display.ResourceLesson.TYPE, lesson.lesson_id)
lesson_key_el = ResourceBundleKey.from_resource_key(lesson_key, 'el')
ResourceBundleDAO.save(
ResourceBundleDTO(str(lesson_key_el), lesson_bundle))
progress_dto = I18nProgressDAO.load(str(lesson_key))
self.assertIsNone(progress_dto)
edit_lesson_payload = {
'key': lesson.lesson_id,
'unit_id': unit.unit_id,
'title': 'Test Lesson',
'objectives': '<p>a</p><p>b</p>',
'auto_index': True,
'is_draft': True,
'video': '',
'scored': 'not_scored',
'notes': '',
'activity_title': '',
'activity_listed': True,
'activity': '',
'manual_progress': False,
}
self._put_payload(
'rest/course/lesson', 'lesson-edit', lesson.lesson_id,
edit_lesson_payload)
self.execute_all_deferred_tasks()
self._assert_progress(
lesson_key,
el_progress=I18nProgressDTO.DONE,
ru_progress=I18nProgressDTO.NOT_STARTED)
edit_lesson_payload['title'] = 'New Title'
self._put_payload(
'rest/course/lesson', 'lesson-edit', lesson.lesson_id,
edit_lesson_payload)
self.execute_all_deferred_tasks()
self._assert_progress(
lesson_key,
el_progress=I18nProgressDTO.IN_PROGRESS,
ru_progress=I18nProgressDTO.NOT_STARTED)
def test_on_unit_changed(self):
unit = self.course.add_unit()
unit.title = 'Test Unit'
self.course.save()
unit_bundle = {
'title': {
'type': 'string',
'source_value': '',
'data': [
{'source_value': 'Test Unit', 'target_value': 'TEST UNIT'}]
}
}
unit_key = resource.Key(
resources_display.ResourceUnit.TYPE, unit.unit_id)
unit_key_el = ResourceBundleKey.from_resource_key(unit_key, 'el')
ResourceBundleDAO.save(
ResourceBundleDTO(str(unit_key_el), unit_bundle))
progress_dto = I18nProgressDAO.load(str(unit_key))
self.assertIsNone(progress_dto)
edit_unit_payload = {
'key': unit.unit_id,
'type': 'Unit',
'title': 'Test Unit',
'description': '',
'label_groups': [],
'is_draft': True,
'unit_header': '',
'pre_assessment': -1,
'post_assessment': -1,
'show_contents_on_one_page': False,
'manual_progress': False,
'unit_footer': ''
}
self._put_payload(
'rest/course/unit', 'put-unit', unit.unit_id, edit_unit_payload)
self.execute_all_deferred_tasks()
self._assert_progress(
unit_key,
el_progress=I18nProgressDTO.DONE,
ru_progress=I18nProgressDTO.NOT_STARTED)
edit_unit_payload['title'] = 'New Title'
self._put_payload(
'rest/course/unit', 'put-unit', unit.unit_id, edit_unit_payload)
self.execute_all_deferred_tasks()
self._assert_progress(
unit_key,
el_progress=I18nProgressDTO.IN_PROGRESS,
ru_progress=I18nProgressDTO.NOT_STARTED)
def test_on_question_changed(self):
qu_payload = {
'version': '1.5',
'question': 'What is a question?',
'description': 'Test Question',
'hint': '',
'defaultFeedback': '',
'rows': '1',
'columns': '100',
'graders': [{
'score': '1.0',
'matcher': 'case_insensitive',
'response': 'yes',
'feedback': ''}]
}
response = self._put_payload(
'rest/question/sa', 'sa-question-edit', '', qu_payload)
key = transforms.loads(response['payload'])['key']
qu_key = resource.Key(resources_display.ResourceSAQuestion.TYPE, key)
qu_bundle = {
'question': {
'type': 'html',
'source_value': 'What is a question?',
'data': [{
'source_value': 'What is a question?',
'target_value': 'WHAT IS A QUESTION?'}]
},
'description': {
'type': 'string',
'source_value': '',
'data': [{
'source_value': 'Test Question',
'target_value': 'TEST QUESTION'}]
},
'graders:[0]:response': {
'type': 'string',
'source_value': '',
'data': [{
'source_value': 'yes',
'target_value': 'YES'}]
}
}
qu_key_el = ResourceBundleKey.from_resource_key(qu_key, 'el')
ResourceBundleDAO.save(
ResourceBundleDTO(str(qu_key_el), qu_bundle))
self.execute_all_deferred_tasks()
self._assert_progress(
qu_key,
el_progress=I18nProgressDTO.DONE,
ru_progress=I18nProgressDTO.NOT_STARTED)
qu_payload['description'] = 'New Description'
qu_payload['key'] = key
response = self._put_payload(
'rest/question/sa', 'sa-question-edit', key, qu_payload)
self.execute_all_deferred_tasks()
self._assert_progress(
qu_key,
el_progress=I18nProgressDTO.IN_PROGRESS,
ru_progress=I18nProgressDTO.NOT_STARTED)
def test_on_question_group_changed(self):
qgp_payload = {
'version': '1.5',
'description': 'Test Question Group',
'introduction': 'Test introduction',
'items': []
}
response = self._put_payload(
'rest/question_group', 'question-group-edit', '', qgp_payload)
key = transforms.loads(response['payload'])['key']
qgp_key = resource.Key(
resources_display.ResourceQuestionGroup.TYPE, key)
qgp_bundle = {
'description': {
'type': 'string',
'source_value': '',
'data': [{
'source_value': 'Test Question Group',
'target_value': 'TEST QUESTION GROUP'}]
},
'introduction': {
'type': 'html',
'source_value': 'Test introduction',
'data': [{
'source_value': 'Test introduction',
'target_value': 'TEST INTRODUCTION'}]
}
}
qgp_key_el = ResourceBundleKey.from_resource_key(qgp_key, 'el')
ResourceBundleDAO.save(
ResourceBundleDTO(str(qgp_key_el), qgp_bundle))
self.execute_all_deferred_tasks()
self._assert_progress(
qgp_key,
el_progress=I18nProgressDTO.DONE,
ru_progress=I18nProgressDTO.NOT_STARTED)
qgp_payload['description'] = 'New Description'
qgp_payload['key'] = key
response = self._put_payload(
'rest/question_group', 'question-group-edit', key, qgp_payload)
self.execute_all_deferred_tasks()
self._assert_progress(
qgp_key,
el_progress=I18nProgressDTO.IN_PROGRESS,
ru_progress=I18nProgressDTO.NOT_STARTED)
def test_on_course_settings_changed(self):
homepage_payload = {
'homepage': {
'base:show_gplus_button': True,
'base:nav_header': 'Search Education',
'course:title': 'My New Course',
'course:blurb': 'Awesome course',
'course:instructor_details': '',
'course:main_image:url': '',
'course:main_image:alt_text': '',
'base:privacy_terms_url': 'Privacy Policy'}
}
homepage_bundle = {
'course:title': {
'type': 'string',
'source_value': '',
'data': [{
'source_value': 'My New Course',
'target_value': 'MY NEW COURSE'}]
},
'course:blurb': {
'type': 'html',
'source_value': 'Awesome course',
'data': [{
'source_value': 'Awesome course',
'target_value': 'AWESOME COURSE'}]
},
'base:nav_header': {
'type': 'string',
'source_value': '',
'data': [{
'source_value': 'Search Education',
'target_value': 'SEARCH EDUCATION'}]
},
'base:privacy_terms_url': {
'type': 'string',
'source_value': '',
'data': [{
'source_value': 'Privacy Policy',
'target_value': 'PRIVACY_POLICY'}]
},
'institution:logo:url': {
'type': 'string',
'source_value': 'assets/img/your_logo_here.png',
'data': [{
'source_value': 'assets/img/your_logo_here.png',
'target_value': 'assets/img/your_greek_logo_here.png',
}],
},
}
homepage_key = resource.Key(
resources_display.ResourceCourseSettings.TYPE, 'homepage')
homepage_key_el = ResourceBundleKey.from_resource_key(
homepage_key, 'el')
ResourceBundleDAO.save(
ResourceBundleDTO(str(homepage_key_el), homepage_bundle))
self._put_payload(
'rest/course/settings', 'basic-course-settings-put',
'/course.yaml', homepage_payload)
self.execute_all_deferred_tasks()
self._assert_progress(
homepage_key,
el_progress=I18nProgressDTO.DONE,
ru_progress=I18nProgressDTO.NOT_STARTED)
homepage_payload['homepage']['course:title'] = 'New Title'
self._put_payload(
'rest/course/settings', 'basic-course-settings-put',
'/course.yaml', homepage_payload)
self.execute_all_deferred_tasks()
self._assert_progress(
homepage_key,
el_progress=I18nProgressDTO.IN_PROGRESS,
ru_progress=I18nProgressDTO.NOT_STARTED)
class LazyTranslatorTests(actions.TestBase):
ADMIN_EMAIL = '[email protected]'
COURSE_NAME = 'i18n_course'
COURSE_TITLE = 'I18N Course'
def setUp(self):
super(LazyTranslatorTests, self).setUp()
self.base = '/' + self.COURSE_NAME
self.app_context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, self.COURSE_TITLE)
def test_lazy_translator_is_json_serializable(self):
source_value = 'hello'
translation_dict = {
'type': 'string',
'data': [
{'source_value': 'hello', 'target_value': 'HELLO'}]}
key = ResourceBundleKey(
resources_display.ResourceLesson.TYPE, '23', 'el')
lazy_translator = LazyTranslator(
self.app_context, key, source_value, translation_dict)
self.assertEquals(
'{"lt": "HELLO"}', transforms.dumps({'lt': lazy_translator}))
def test_lazy_translator_supports_addition(self):
source_value = 'hello, '
translation_dict = {
'type': 'string',
'data': [
{'source_value': 'hello, ', 'target_value': 'HELLO, '}]}
key = ResourceBundleKey(
resources_display.ResourceLesson.TYPE, '23', 'el')
lazy_translator = LazyTranslator(
self.app_context, key, source_value, translation_dict)
self.assertEquals('HELLO, world', lazy_translator + 'world')
def test_lazy_translator_supports_interpolation(self):
source_value = 'hello, %s'
translation_dict = {
'type': 'string',
'data': [
{'source_value': 'hello, %s', 'target_value': 'HELLO, %s'}]}
key = ResourceBundleKey(
resources_display.ResourceLesson.TYPE, '23', 'el')
lazy_translator = LazyTranslator(
self.app_context, key, source_value, translation_dict)
self.assertEquals('HELLO, world', lazy_translator % 'world')
def test_lazy_translator_supports_upper_and_lower(self):
source_value = 'Hello'
translation_dict = {
'type': 'string',
'data': [
{'source_value': 'Hello', 'target_value': 'Bonjour'}]}
key = ResourceBundleKey(
resources_display.ResourceLesson.TYPE, '23', 'el')
lazy_translator = LazyTranslator(
self.app_context, key, source_value, translation_dict)
self.assertEquals('BONJOUR', lazy_translator.upper())
self.assertEquals('bonjour', lazy_translator.lower())
def test_lazy_translator_records_status(self):
source_value = 'hello'
translation_dict = {
'type': 'html',
'source_value': 'hello',
'data': [
{'source_value': 'hello', 'target_value': 'HELLO'}]}
key = ResourceBundleKey(
resources_display.ResourceLesson.TYPE, '23', 'el')
lazy_translator = LazyTranslator(
self.app_context, key, source_value, translation_dict)
self.assertEquals(
LazyTranslator.NOT_STARTED_TRANSLATION, lazy_translator.status)
str(lazy_translator)
self.assertEquals(
LazyTranslator.VALID_TRANSLATION, lazy_translator.status)
# Monkey patch get_template_environ because the app_context is not
# fully setn up
def mock_get_template_environ(unused_locale, dirs):
return self.app_context.fs.get_jinja_environ(dirs)
self.app_context.get_template_environ = mock_get_template_environ
lazy_translator = LazyTranslator(
self.app_context, key, 'changed', translation_dict)
str(lazy_translator)
self.assertEquals(
LazyTranslator.INVALID_TRANSLATION, lazy_translator.status)
self.assertEquals(
'The content has changed and 1 part '
'of the translation is out of date.',
lazy_translator.errm)
class CourseContentTranslationTests(actions.TestBase):
ADMIN_EMAIL = '[email protected]'
COURSE_NAME = 'i18n_course'
COURSE_TITLE = 'I18N Course'
STUDENT_EMAIL = '[email protected]'
def setUp(self):
super(CourseContentTranslationTests, self).setUp()
self.base = '/' + self.COURSE_NAME
app_context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, self.COURSE_TITLE)
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
self.course = courses.Course(None, app_context)
self.unit = self.course.add_unit()
self.unit.title = 'Test Unit'
self.unit.unit_header = '<p>a</p><p>b</p>'
self.unit.availability = courses.AVAILABILITY_AVAILABLE
self.lesson = self.course.add_lesson(self.unit)
self.lesson.title = 'Test Lesson'
self.lesson.objectives = '<p>c</p><p>d</p>'
self.lesson.availability = courses.AVAILABILITY_AVAILABLE
self.course.save()
self.unit_bundle = {
'title': {
'type': 'string',
'source_value': '',
'data': [
{'source_value': 'Test Unit', 'target_value': 'TEST UNIT'}]
},
'unit_header': {
'type': 'html',
'source_value': '<p>a</p><p>b</p>',
'data': [
{'source_value': 'a', 'target_value': 'A'},
{'source_value': 'b', 'target_value': 'B'}]
}
}
self.lesson_bundle = {
'title': {
'type': 'string',
'source_value': '',
'data': [
{
'source_value': 'Test Lesson',
'target_value': 'TEST LESSON'}]
},
'objectives': {
'type': 'html',
'source_value': '<p>c</p><p>d</p>',
'data': [
{'source_value': 'c', 'target_value': 'C'},
{'source_value': 'd', 'target_value': 'D'}]
}
}
self.unit_key_el = ResourceBundleKey(
resources_display.ResourceUnit.TYPE, self.unit.unit_id, 'el')
self.lesson_key_el = ResourceBundleKey(
resources_display.ResourceLesson.TYPE, self.lesson.lesson_id, 'el')
actions.login(self.ADMIN_EMAIL, is_admin=True)
prefs = models.StudentPreferencesDAO.load_or_default()
prefs.locale = 'el'
models.StudentPreferencesDAO.save(prefs)
def tearDown(self):
del sites.Registry.test_overrides[sites.GCB_COURSES_CONFIG.name]
namespace_manager.set_namespace(self.old_namespace)
super(CourseContentTranslationTests, self).tearDown()
def _store_resource_bundle(self):
ResourceBundleDAO.save_all([
ResourceBundleDTO(str(self.unit_key_el), self.unit_bundle),
ResourceBundleDTO(str(self.lesson_key_el), self.lesson_bundle)])
def test_lesson_and_unit_translated(self):
self._store_resource_bundle()
page_html = self.get('unit?unit=1').body
self.assertIn('TEST UNIT', page_html)
self.assertIn('<p>A</p><p>B</p>', page_html)
self.assertIn('TEST LESSON', page_html)
self.assertIn('<p>C</p><p>D</p>', page_html)
def test_links_are_translated(self):
link = self.course.add_link()
link.title = 'Test Link'
link.description = 'Test Description'
link.href = 'http://www.foo.com'
self.course.save()
link_bundle = {
'title': {
'type': 'string',
'source_value': '',
'data': [
{
'source_value': 'Test Link',
'target_value': 'TEST LINK'}]
},
'description': {
'type': 'string',
'source_value': '',
'data': [
{
'source_value': 'Test description',
'target_value': 'TEST DESCRIPTION'}]
},
'url': {
'type': 'string',
'source_value': '',
'data': [
{
'source_value': 'http://www.foo.com',
'target_value': 'http://www.foo.gr'}]
}
}
link_key = ResourceBundleKey(
resources_display.ResourceLink.TYPE, link.unit_id, 'el')
ResourceBundleDAO.save(
ResourceBundleDTO(str(link_key), link_bundle))
page_html = self.get('course').body
self.assertIn('TEST LINK', page_html)
self.assertIn('TEST DESCRIPTION', page_html)
self.assertIn('http://www.foo.gr', page_html)
def test_assessments_are_translated(self):
assessment = self.course.add_assessment()
assessment.title = 'Test Assessment'
assessment.html_content = '<p>a</p><p>b</p>'
self.course.save()
assessment_bundle = {
'assessment:title': {
'type': 'string',
'source_value': '',
'data': [
{
'source_value': 'Test Assessment',
'target_value': 'TEST ASSESSMENT'}]
},
'assessment:html_content': {
'type': 'html',
'source_value': '<p>a</p><p>b</p>',
'data': [
{'source_value': 'a', 'target_value': 'A'},
{'source_value': 'b', 'target_value': 'B'}]
}
}
assessment_key = ResourceBundleKey(
resources_display.ResourceAssessment.TYPE, assessment.unit_id, 'el')
ResourceBundleDAO.save(
ResourceBundleDTO(str(assessment_key), assessment_bundle))
page_html = self.get('assessment?name=%s' % assessment.unit_id).body
self.assertIn('TEST ASSESSMENT', page_html)
self.assertIn('<p>A</p><p>B</p>', page_html)
def test_bad_translations_are_flagged_for_admin(self):
self.unit_bundle['unit_header']['data'][1] = {
'source_value': 'b', 'target_value': '<b#1>b</b#1>'}
self._store_resource_bundle()
dom = self.parse_html_string(self.get('unit?unit=1').body)
self.assertEquals(
'Error in chunk 2. Unexpected tag: <b#1>.',
dom.find('.//div[@class="gcb-translation-error-body"]/p[1]').text)
edit_link = dom.find(
'.//div[@class="gcb-translation-error-body"]/p[2]a')
self.assertEquals('Edit the resource', edit_link.text)
self.assertEquals(
'dashboard?action=i18_console&key=unit%%3A%s%%3Ael' % (
self.unit.unit_id),
edit_link.attrib['href'])
def test_bad_translations_are_not_flagged_for_student(self):
self.unit_bundle['unit_header']['data'][1] = {
'source_value': 'b', 'target_value': '<b#1>b</b#1>'}
self._store_resource_bundle()
actions.logout()
actions.login(self.STUDENT_EMAIL, is_admin=False)
self.assertIn('<p>a</p><p>b</p>', self.get('unit?unit=1').body)
def test_fallback_to_default_when_translation_missing(self):
del self.lesson_bundle['objectives']
self._store_resource_bundle()
page_html = self.get('unit?unit=1').body
self.assertIn('TEST UNIT', page_html)
self.assertIn('<p>A</p><p>B</p>', page_html)
self.assertIn('TEST LESSON', page_html)
self.assertNotIn('<p>C</p><p>D</p>', page_html)
self.assertIn('<p>c</p><p>d</p>', page_html)
def test_partial_translations(self):
def update_lesson_objectives(objectives):
self.lesson = self.course.find_lesson_by_id(
self.unit.unit_id, self.lesson.lesson_id)
self.lesson.objectives = objectives
self.course.save()
def assert_p_tags(dom, expected_content_list, expected_error_msg):
# Ensure that the lesson body is a list of <p>..</p> tags with the
# expected content. All should be inside an error warning div.
p_tag_content_list = [
p_tag.text for p_tag in dom.findall(
'.//div[@class="gcb-lesson-content"]'
'//div[@class="gcb-translation-error-alt"]/p')]
self.assertEquals(expected_content_list, p_tag_content_list)
error_msg = dom.find(
'.//div[@class="gcb-lesson-content"]'
'//div[@class="gcb-translation-error-body"]/p[1]')
self.assertIn('content has changed', error_msg.text)
if expected_error_msg:
self.assertIn(expected_error_msg, error_msg.text)
self._store_resource_bundle()
# Delete first para from lesson
update_lesson_objectives('<p>d</p>')
dom = self.parse_html_string(self.get('unit?unit=1').body)
assert_p_tags(
dom, ['C', 'D'], '1 part of the translation is out of date')
# Delete second para from lesson
update_lesson_objectives('<p>c</p>')
dom = self.parse_html_string(self.get('unit?unit=1').body)
assert_p_tags(
dom, ['C', 'D'], '1 part of the translation is out of date')
# Add para to lesson
update_lesson_objectives('<p>c</p><p>d</p><p>e</p>')
dom = self.parse_html_string(self.get('unit?unit=1').body)
assert_p_tags(
dom, ['C', 'D'], '1 part of the translation is out of date')
# Change para in lesson
update_lesson_objectives('<p>cc</p><p>d</p>')
dom = self.parse_html_string(self.get('unit?unit=1').body)
assert_p_tags(
dom, ['C', 'D'], '1 part of the translation is out of date')
# Change two paras
update_lesson_objectives('<p>cc</p><p>dd</p>')
dom = self.parse_html_string(self.get('unit?unit=1').body)
assert_p_tags(
dom, ['C', 'D'], '2 parts of the translation are out of date')
# A student should see the partial translation but no error message
actions.logout()
actions.login(self.STUDENT_EMAIL, is_admin=False)
prefs = models.StudentPreferencesDAO.load_or_default()
prefs.locale = 'el'
models.StudentPreferencesDAO.save(prefs)
dom = self.parse_html_string(self.get('unit?unit=1').body)
self.assertEquals(
['C', 'D'],
[p_tag.text for p_tag in dom.findall(
'.//div[@class="gcb-lesson-content"]/p')])
self.assertIsNone(dom.find('.//div[@class="gcb-translation-error"]'))
def test_custom_tag_expanded_without_analytics(self):
with actions.OverriddenEnvironment(
{'course': {'can_record_student_events': False}}):
source_video_id = 'Kdg2drcUjYI'
target_video_id = 'jUfccP5Rl5M'
unit_header = (
'text'
'<gcb-youtube videoid="%s" instanceid="c4CLTDvttJEu">'
'</gcb-youtube>') % source_video_id
unit = self.course.add_unit()
unit.title = 'Tag Unit'
unit.unit_header = unit_header
self.course.save()
unit_bundle = {
'title': {
'type': 'string',
'source_value': '',
'data': [{
'source_value': 'Tag Unit',
'target_value': 'TAG UNIT'}]
},
'unit_header': {
'type': 'html',
'source_value': unit_header,
'data': [
{
'source_value': (
'text<gcb-youtube#1 videoid="%s" />'
) % source_video_id,
'target_value': (
'TEXT<gcb-youtube#1 videoid="%s" />'
) % target_video_id}]
}
}
unit_key_el = ResourceBundleKey(
resources_display.ResourceUnit.TYPE, unit.unit_id, 'el')
ResourceBundleDAO.save(
ResourceBundleDTO(str(unit_key_el), unit_bundle))
page_html = self.get('unit?unit=%s' % unit.unit_id).body
dom = self.parse_html_string(page_html)
main = dom.find('.//div[@id="gcb-main-article"]/div[2]')
self.assertEquals('TEXT', main.text.strip())
self.assertEquals('div', main[0].tag)
self.assertEquals('gcb-video-container', main[0].attrib['class'])
self.assertEquals(1, len(main[0]))
self.assertEquals('iframe', main[0][0].tag)
self.assertIn(target_video_id, main[0][0].attrib['src'])
def test_custom_tag_with_body_is_translated(self):
tag_string = (
'<gcb-markdown instanceid="c4CLTDvttJEu">'
'*hello*'
'</gcb-markdown>')
unit = self.course.add_unit()
unit.title = 'Tag Unit'
unit.unit_header = tag_string
self.course.save()
unit_bundle = {
'unit_header': {
'type': 'html',
'source_value': tag_string,
'data': [
{
'source_value': (
'<gcb-markdown#1>*hello*</gcb-markdown#1>'),
'target_value': (
'<gcb-markdown#1>*HELLO*</gcb-markdown#1>')}
]
}
}
unit_key_el = ResourceBundleKey(
resources_display.ResourceUnit.TYPE, unit.unit_id, 'el')
ResourceBundleDAO.save(
ResourceBundleDTO(str(unit_key_el), unit_bundle))
page_html = self.get('unit?unit=%s' % unit.unit_id).body
dom = self.parse_html_string(page_html)
main = dom.find('.//div[@id="gcb-main-article"]/div[2]')
markdown = main.find('.//div[@class="gcb-markdown"]/p')
self.assertEquals('HELLO', markdown.find('./em').text)
def _add_question(self):
# Create a question
qu_dict = {
'type': 0,
'question': 'question text',
'description': 'description text',
'choices': [
{'text': 'choice 1', 'score': 0.0, 'feedback': ''},
{'text': 'choice 2', 'score': 1.0, 'feedback': ''}],
'multiple_selections': False,
'last_modified': 1410451682.042784,
'version': '1.5'
}
qu_dto = models.QuestionDTO(None, qu_dict)
qu_id = models.QuestionDAO.save(qu_dto)
# Store translation data for the question
qu_bundle = {
'question': {
'type': 'html',
'source_value': 'question text',
'data': [
{
'source_value': 'question text',
'target_value': 'QUESTION TEXT'
}]
},
'description': {
'source_value': None,
'type': 'string',
'data': [
{
'source_value': 'description text',
'target_value': 'DESCRIPTION TEXT'
}]
},
'choices:[0]:text': {
'type': 'html',
'source_value': 'choice 1',
'data': [
{
'source_value': 'choice 1',
'target_value': 'CHOICE 1'
}
]
},
'choices:[1]:text': {
'source_value': 'choice 2',
'type': 'html',
'data': [
{
'source_value': 'choice 2',
'target_value': 'CHOICE 2'
}
]
}}
key_el = ResourceBundleKey(
resources_display.ResourceMCQuestion.TYPE, qu_id, 'el')
ResourceBundleDAO.save(
ResourceBundleDTO(str(key_el), qu_bundle))
return qu_id
def test_questions_are_translated(self):
# Create an assessment and add the question to the content
assessment = self.course.add_assessment()
assessment.title = 'Test Assessment'
assessment.html_content = """
<question quid="%s" weight="1" instanceid="test_question">%s
""" % (self._add_question(), '</question>')
self.course.save()
page_html = self.get('assessment?name=%s' % assessment.unit_id).body
self.assertIn('QUESTION TEXT', page_html)
self.assertIn('CHOICE 1', page_html)
self.assertIn('CHOICE 2', page_html)
def test_legacy_questions_with_null_body(self):
# Create a question
qu_dict = {
'type': 0,
'question': None,
'description': 'description text',
'choices': [
{'text': 'choice 1', 'score': 0.0, 'feedback': ''},
{'text': 'choice 2', 'score': 1.0, 'feedback': ''}],
'multiple_selections': False,
'last_modified': 1410451682.042784,
'version': '1.5'
}
qu_dto = models.QuestionDTO(None, qu_dict)
qu_id = models.QuestionDAO.save(qu_dto)
assessment = self.course.add_assessment()
assessment.title = 'Test Assessment'
assessment.html_content = """
<question quid="%s" weight="1" instanceid="test_question">%s
""" % (qu_id, '</question>')
self.course.save()
# Store translation data for the question
qu_bundle = {
'question': {
'type': 'html',
'source_value': 'None',
'data': [
{
'source_value': 'None',
'target_value': 'NONE'
}]
}
}
key_el = ResourceBundleKey(
resources_display.ResourceMCQuestion.TYPE, qu_id, 'el')
ResourceBundleDAO.save(
ResourceBundleDTO(str(key_el), qu_bundle))
dom = self.parse_html_string(
self.get('assessment?name=%s' % assessment.unit_id).body)
self.assertIsNone(dom.find('.//div[@class="qt-question"]').text)
def test_question_groups_are_translated(self):
# Create a question group with one question
qgp_dict = {
'description': 'description text',
'introduction': '<p>a</p><p>b</p>',
'items': [{'question': self._add_question(), 'weight': '1'}],
'last_modified': 1410451682.042784,
'version': '1.5'
}
qgp_dto = models.QuestionGroupDTO(None, qgp_dict)
qgp_id = models.QuestionGroupDAO.save(qgp_dto)
# Create an assessment and add the question group to the content
assessment = self.course.add_assessment()
assessment.title = 'Test Assessment'
assessment.html_content = """
<question-group qgid="%s" instanceid="test-qgp">
</question-group><br>
""" % qgp_id
self.course.save()
# Store translation data for the question
qgp_bundle = {
'description': {
'source_value': None,
'type': 'string',
'data': [
{
'source_value': 'description text',
'target_value': 'DESCRIPTION TEXT'
}]
},
'introduction': {
'type': 'html',
'source_value': '<p>a</p><p>b</p>',
'data': [
{
'source_value': 'a',
'target_value': 'A'
},
{
'source_value': 'b',
'target_value': 'B'
}
]
}}
key_el = ResourceBundleKey(
resources_display.ResourceQuestionGroup.TYPE, qgp_id, 'el')
ResourceBundleDAO.save(
ResourceBundleDTO(str(key_el), qgp_bundle))
page_html = self.get('assessment?name=%s' % assessment.unit_id).body
dom = self.parse_html_string(page_html)
main = dom.find('.//div[@id="test-qgp"]')
self.assertEquals(
'A', main.find('.//div[@class="qt-introduction"]/p[1]').text)
self.assertEquals(
'B', main.find('.//div[@class="qt-introduction"]/p[2]').text)
self.assertEquals(
'QUESTION TEXT', main.find('.//div[@class="qt-question"]').text)
self.assertEquals(
'CHOICE 1',
main.findall('.//div[@class="qt-choices"]//label')[0].text.strip())
self.assertEquals(
'CHOICE 2',
main.findall('.//div[@class="qt-choices"]//label')[1].text.strip())
def test_course_settings_are_translated(self):
course_bundle = {
'course:title': {
'source_value': None,
'type': 'string',
'data': [
{
'source_value': self.COURSE_TITLE,
'target_value': 'TRANSLATED TITLE'
}]
}}
key_el = ResourceBundleKey(
resources_display.ResourceCourseSettings.TYPE, 'homepage', 'el')
ResourceBundleDAO.save(
ResourceBundleDTO(str(key_el), course_bundle))
page_html = self.get('course').body
dom = self.parse_html_string(page_html)
self.assertEquals(
'TRANSLATED TITLE',
dom.find('.//h1[@class="gcb-product-headers-large"]').text.strip())
def test_course_settings_load_with_default_locale(self):
# NOTE: This is to test the protections against a vulnerability
# to infinite recursion in the course settings translation. The issue
# is that when no locale is set, then sites.get_current_locale needs
# to refer to the course settings to find the default locale. However
# if this call to get_current_locale takes place inside the translation
# callback from loading the course settings, there will be infinite
# recursion. This test checks that this case is defended.
prefs = models.StudentPreferencesDAO.load_or_default()
models.StudentPreferencesDAO.delete(prefs)
page_html = self.get('course').body
dom = self.parse_html_string(page_html)
self.assertEquals(
self.COURSE_TITLE,
dom.find('.//h1[@class="gcb-product-headers-large"]').text.strip())
def test_invitations_are_translated(self):
student_name = 'A. Student'
sender_email = '[email protected]'
recipient_email = '[email protected]'
translated_subject = 'EMAIL_FROM A. Student'
# The invitation email
email_env = {
'course': {
'invitation_email': {
'enabled': True,
'sender_email': sender_email,
'subject_template': 'Email from {{sender_name}}',
'body_template':
'From {{sender_name}}. Unsubscribe: {{unsubscribe_url}}'}}}
# Translate the subject line of the email
invitation_bundle = {
'course:invitation_email:subject_template': {
'type': 'string',
'source_value': None,
'data': [{
'source_value': 'Email from {{sender_name}}',
'target_value': 'EMAIL_FROM {{sender_name}}'}]}}
key_el = ResourceBundleKey(
resources_display.ResourceCourseSettings.TYPE, 'invitation', 'el')
ResourceBundleDAO.save(
ResourceBundleDTO(str(key_el), invitation_bundle))
# Set up a spy to capture mails sent
send_async_call_log = []
def send_async_spy(unused_cls, *args, **kwargs):
send_async_call_log.append({'args': args, 'kwargs': kwargs})
# Patch the course env and the notifications sender
courses.Course.ENVIRON_TEST_OVERRIDES = email_env
old_send_async = notifications.Manager.send_async
notifications.Manager.send_async = classmethod(send_async_spy)
try:
# register a student
actions.login(self.STUDENT_EMAIL, is_admin=False)
actions.register(self, student_name)
# Set locale prefs
prefs = models.StudentPreferencesDAO.load_or_default()
prefs.locale = 'el'
models.StudentPreferencesDAO.save(prefs)
# Read the sample email displayed to the student
self.assertIn(
translated_subject, self.get('modules/invitation').body)
# Post a request to the REST handler
request_dict = {
'xsrf_token': (
crypto.XsrfTokenManager.create_xsrf_token('invitation')),
'payload': {'emailList': recipient_email}
}
response = transforms.loads(self.post(
'rest/modules/invitation',
{'request': transforms.dumps(request_dict)}).body)
self.assertEquals(200, response['status'])
self.assertEquals('OK, 1 messages sent', response['message'])
self.assertEquals(
translated_subject, send_async_call_log[0]['args'][4])
finally:
courses.Course.ENVIRON_TEST_OVERRIDES = []
notifications.Manager.send_async = old_send_async
class TranslationImportExportTests(actions.TestBase):
ADMIN_EMAIL = '[email protected]'
COURSE_NAME = 'i18n_course'
COURSE_TITLE = 'I18N Course'
STUDENT_EMAIL = '[email protected]'
URL = 'dashboard?action=i18n_dashboard'
def setUp(self):
super(TranslationImportExportTests, self).setUp()
self.base = '/' + self.COURSE_NAME
app_context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, self.COURSE_TITLE)
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
self.course = courses.Course(None, app_context)
self.unit = self.course.add_unit()
self.unit.title = 'Unit Title'
self.unit.description = 'unit description'
self.unit.unit_header = 'unit header'
self.unit.unit_footer = 'unit footer'
self.unit.availability = courses.AVAILABILITY_AVAILABLE
self.assessment = self.course.add_assessment()
self.assessment.title = 'Assessment Title'
self.assessment.description = 'assessment description'
self.assessment.html_content = 'assessment html content'
self.assessment.html_review_form = 'assessment html review form'
self.assessment.availability = courses.AVAILABILITY_AVAILABLE
self.link = self.course.add_link()
self.link.title = 'Link Title'
self.link.description = 'link description'
self.link.url = 'link url'
self.lesson = self.course.add_lesson(self.unit)
self.lesson.unit_id = self.unit.unit_id
self.lesson.title = 'Lesson Title'
self.lesson.objectives = 'lesson objectives'
self.lesson.video_id = 'lesson video'
self.lesson.notes = 'lesson notes'
self.lesson.availability = courses.AVAILABILITY_AVAILABLE
self.course.save()
foo_content = StringIO.StringIO('content of foo.jpg')
fs = app_context.fs.impl
fs.put(fs.physical_to_logical('/assets/img/foo.jpg'), foo_content)
mc_qid = models.QuestionDAO.save(models.QuestionDTO(
None,
{
'question': 'mc question',
'description': 'mc description',
'type': 0,
'choices': [
{'score': 1.0,
'feedback': 'mc feedback one',
'text': 'mc answer one'},
{'score': 0.0,
'feedback': 'mc feedback two',
'text': 'mc answer two'}
],
'multiple_selections': False,
'version': '1.5',
}))
sa_qid = models.QuestionDAO.save(models.QuestionDTO(
None,
{
'question': 'sa question',
'description': 'sa description',
'type': 1,
'columns': 100,
'hint': 'sa hint',
'graders': [
{'score': '1.0',
'response': 'sa response',
'feedback': 'sa feedback',
'matcher': 'case_insensitive'}
],
'version': '1.5',
'defaultFeedback': 'sa default feedback',
'rows': 1}))
models.QuestionGroupDAO.save(models.QuestionGroupDTO(
None,
{'items': [
{'weight': '1',
'question': mc_qid},
{'weight': '1',
'question': sa_qid}],
'version': '1.5',
'introduction': 'question group introduction',
'description': 'question group description'}))
actions.login(self.ADMIN_EMAIL, is_admin=True)
prefs = models.StudentPreferencesDAO.load_or_default()
prefs.locale = 'el'
models.StudentPreferencesDAO.save(prefs)
def tearDown(self):
namespace_manager.set_namespace(self.old_namespace)
super(TranslationImportExportTests, self).tearDown()
def _do_download(self, payload, method='put'):
request = {
'xsrf_token': crypto.XsrfTokenManager.create_xsrf_token(
i18n_dashboard.TranslationDownloadRestHandler.XSRF_TOKEN_NAME),
'payload': transforms.dumps(payload),
}
if method == 'put':
fp = self.put
else:
fp = self.post
response = fp(
'/%s%s' % (self.COURSE_NAME,
i18n_dashboard.TranslationDownloadRestHandler.URL),
{'request': transforms.dumps(request)})
return response
def _do_deletion(self, payload):
request = {
'xsrf_token': crypto.XsrfTokenManager.create_xsrf_token(
i18n_dashboard.TranslationDeletionRestHandler.XSRF_TOKEN_NAME),
'payload': transforms.dumps(payload),
}
response = self.put(
'/%s%s' % (self.COURSE_NAME,
i18n_dashboard.TranslationDeletionRestHandler.URL),
params={'request': transforms.dumps(request)})
return response
def _do_upload(self, contents):
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
i18n_dashboard.TranslationUploadRestHandler.XSRF_TOKEN_NAME)
response = self.post(
'/%s%s' % (self.COURSE_NAME,
i18n_dashboard.TranslationUploadRestHandler.URL),
{'request': transforms.dumps({'xsrf_token': xsrf_token})},
upload_files=[('file', 'doesntmatter', contents)])
return response
def test_deletion_ui_no_request(self):
response = self.put(
'/%s%s' % (self.COURSE_NAME,
i18n_dashboard.TranslationDeletionRestHandler.URL),
{})
rsp = transforms.loads(response.body)
self.assertEquals(rsp['status'], 400)
self.assertEquals(
rsp['message'], 'Malformed or missing "request" parameter.')
def test_deletion_ui_no_payload(self):
response = self.put(
'/%s%s' % (self.COURSE_NAME,
i18n_dashboard.TranslationDeletionRestHandler.URL),
{'request': transforms.dumps({'foo': 'bar'})})
rsp = transforms.loads(response.body)
self.assertEquals(rsp['status'], 400)
self.assertEquals(
rsp['message'], 'Malformed or missing "payload" parameter.')
def test_deletion_ui_no_xsrf(self):
response = self.put(
'/%s%s' % (self.COURSE_NAME,
i18n_dashboard.TranslationDeletionRestHandler.URL),
{'request': transforms.dumps({'payload': '{}'})})
rsp = transforms.loads(response.body)
self.assertEquals(rsp['status'], 403)
self.assertEquals(
rsp['message'],
'Bad XSRF token. Please reload the page and try again')
def test_deletion_ui_no_locales(self):
rsp = transforms.loads(self._do_deletion({'locales': []}).body)
self.assertEquals(rsp['status'], 400)
self.assertEquals(rsp['message'],
'Please select at least one language to delete.')
def test_deletion_ui_malformed_locales(self):
actions.login('[email protected]', is_admin=False)
rsp = transforms.loads(self._do_deletion(
{'locales': [{'checked': True}]}).body)
self.assertEquals(rsp['status'], 400)
self.assertEquals('Language specification not as expected.',
rsp['message'])
def test_deletion_ui_no_selected_locales(self):
actions.login('[email protected]', is_admin=False)
rsp = transforms.loads(self._do_deletion(
{'locales': [{'locale': 'de'}]}).body)
self.assertEquals(rsp['status'], 400)
self.assertEquals('Please select at least one language to delete.',
rsp['message'])
def test_deletion_ui_no_permissions(self):
actions.login('[email protected]', is_admin=False)
rsp = transforms.loads(self._do_deletion(
{'locales': [{'locale': 'de', 'checked': True}]}).body)
self.assertEquals(401, rsp['status'])
self.assertEquals('Access denied.', rsp['message'])
def test_deletion(self):
self.get('dashboard?action=i18n_reverse_case')
# Verify that there are translation bundle rows for 'ln',
# and progress items with settings for 'ln'.
bundles = ResourceBundleDAO.get_all_for_locale('ln')
self.assertGreater(len(bundles), 0)
progress = I18nProgressDAO.get_all()
self.assertGreater(len(progress), 0)
for p in progress:
self.assertEquals(I18nProgressDTO.DONE, p.get_progress('ln'))
rsp = transforms.loads(self._do_deletion(
{'locales': [{'locale': 'ln', 'checked': True}]}).body)
self.assertEquals(200, rsp['status'])
self.assertEquals('Success.', rsp['message'])
# Verify that there are no translation bundle rows for 'ln',
# and no progress items with settings for 'ln'.
bundles = ResourceBundleDAO.get_all_for_locale('ln')
self.assertEquals(len(bundles), 0)
progress = I18nProgressDAO.get_all()
self.assertGreater(len(progress), 0)
for p in progress:
self.assertEquals(I18nProgressDTO.NOT_STARTED, p.get_progress('ln'))
def test_upload_ui_no_request(self):
response = self.post(
'/%s%s' % (self.COURSE_NAME,
i18n_dashboard.TranslationUploadRestHandler.URL),
{})
self.assertEquals(
'<response><status>400</status><message>'
'Malformed or missing "request" parameter.</message></response>',
response.body)
def test_upload_ui_no_xsrf(self):
response = self.post(
'/%s%s' % (self.COURSE_NAME,
i18n_dashboard.TranslationUploadRestHandler.URL),
{'request': transforms.dumps({})})
self.assertEquals(
'<response><status>403</status><message>'
'Missing or invalid XSRF token.</message></response>',
response.body)
def test_upload_ui_no_file(self):
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
i18n_dashboard.TranslationUploadRestHandler.XSRF_TOKEN_NAME)
response = self.post(
'/%s%s' % (self.COURSE_NAME,
i18n_dashboard.TranslationUploadRestHandler.URL),
{'request': transforms.dumps({'xsrf_token': xsrf_token})})
self.assertEquals(
'<response><status>400</status><message>'
'Must select a .zip or .po file to upload.</message></response>',
response.body)
def test_upload_ui_bad_file_param(self):
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
i18n_dashboard.TranslationUploadRestHandler.XSRF_TOKEN_NAME)
response = self.post(
'/%s%s' % (self.COURSE_NAME,
i18n_dashboard.TranslationUploadRestHandler.URL),
{
'request': transforms.dumps({'xsrf_token': xsrf_token}),
'file': ''
})
self.assertEquals(
'<response><status>400</status><message>'
'Must select a .zip or .po file to upload</message></response>',
response.body)
def test_upload_ui_empty_file(self):
response = self._do_upload('')
self.assertEquals(
'<response><status>400</status><message>'
'The .zip or .po file must not be empty.</message></response>',
response.body)
def test_upload_ui_bad_content(self):
response = self._do_upload('23 skidoo')
self.assertEquals(
'<response><status>400</status><message>'
'No translations found in provided file.</message></response>',
response.body)
def test_upload_ui_no_permissions(self):
actions.login('[email protected]', is_admin=False)
response = self._do_upload(
'# <span class="">1.1 Lesson Title</span>\n'
'#: GCB-1|title|string|lesson:4:de:0\n'
'#| msgid ""\n'
'msgid "Lesson Title"\n'
'msgstr "Lektion Titel"\n')
self.assertEquals(
'<response><status>401</status><message>'
'Access denied.</message></response>',
response.body)
def test_upload_ui_bad_protocol(self):
actions.login('[email protected]', is_admin=False)
response = self._do_upload(
'# <span class="">1.1 Lesson Title</span>\n'
'#: GCB-2|title|string|lesson:4:de:0\n'
'#| msgid ""\n'
'msgid "Lesson Title"\n'
'msgstr "Lektion Titel"\n')
self.assertEquals(
'<response><status>400</status><message>'
'Expected location format GCB-1, but had GCB-2'
'</message></response>',
response.body)
def test_upload_ui_multiple_languages(self):
actions.login('[email protected]', is_admin=False)
response = self._do_upload(
'# <span class="">1.1 Lesson Title</span>\n'
'#: GCB-1|title|string|lesson:4:de:0\n'
'#: GCB-1|title|string|lesson:4:fr:0\n'
'#| msgid ""\n'
'msgid "Lesson Title"\n'
'msgstr "Lektion Titel"\n')
self.assertEquals(
'<response><status>400</status><message>'
'File has translations for both "de" and "fr"'
'</message></response>',
response.body)
def test_upload_ui_one_item(self):
# Do export to force creation of progress, bundle entities
self._do_download({'locales': [{'locale': 'de', 'checked': True}],
'export_what': 'all'}, method='post')
# Upload one translation.
response = self._do_upload(
'# <span class="">1.1 Lesson Title</span>\n'
'#: GCB-1|title|string|lesson:4:de:0\n'
'#| msgid ""\n'
'msgid "Lesson Title"\n'
'msgstr "Lektion Titel"\n')
self.assertIn(
'<response><status>200</status><message>Success.</message>',
response.body)
self.assertIn('made 1 total replacements', response.body)
# Verify uploaded translation makes it to lesson page when
# viewed with appropriate language preference.
prefs = models.StudentPreferencesDAO.load_or_default()
prefs.locale = 'de'
models.StudentPreferencesDAO.save(prefs)
response = self.get(
'/%s/unit?unit=%s&lesson=%s' % (
self.COURSE_NAME, self.unit.unit_id, self.lesson.lesson_id))
self.assertIn('Lektion Titel', response.body)
def _parse_messages(self, response):
dom = self.parse_html_string(response.body)
payload = dom.find('.//payload')
return transforms.loads(payload.text)['messages']
def test_upload_ui_no_bundles_created(self):
# Upload one translation.
response = self._do_upload(
'# <span class="">1.1 Lesson Title</span>\n'
'#: GCB-1|title|string|lesson:4:de:0\n'
'#| msgid ""\n'
'msgid "Lesson Title"\n'
'msgstr "Lektion Titel"\n')
messages = self._parse_messages(response)
# Expect no messages other than the expected missing translations and
# the summary line indicating that we did something.
for message in messages:
self.assertTrue(
message.startswith('Did not find translation for') or
message.startswith('For Deutsch (de), made 1 total replacem'))
def test_upload_ui_with_bundles_created(self):
# Do export to force creation of progress, bundle entities
self._do_download({'locales': [{'locale': 'de', 'checked': True}],
'export_what': 'all'}, method='post')
# Upload one translation.
response = self._do_upload(
'# <span class="">1.1 Lesson Title</span>\n'
'#: GCB-1|title|string|lesson:4:de:0\n'
'#| msgid ""\n'
'msgid "Lesson Title"\n'
'msgstr "Lektion Titel"\n')
messages = self._parse_messages(response)
# Expect no messages other than the expected missing translations and
# the summary line indicating that we did something.
for message in messages:
self.assertTrue(
message.startswith('Did not find translation for') or
message.startswith('For Deutsch (de), made 1 total replacem'))
def test_upload_ui_with_unexpected_resource(self):
# Do export to force creation of progress, bundle entities
self._do_download({'locales': [{'locale': 'de', 'checked': True}],
'export_what': 'all'}, method='post')
# Upload one translation.
response = self._do_upload(
'# <span class="">1.1 Lesson Title</span>\n'
'#: GCB-1|title|string|lesson:999:de:0\n'
'#| msgid ""\n'
'msgid "Lesson Title"\n'
'msgstr "Lektion Titel"\n')
messages = self._parse_messages(response)
self.assertIn('Translation file had 1 items for resource '
'"lesson:999:de", but course had no such resource.',
messages)
def test_upload_ui_with_unexpected_translation(self):
# Do export to force creation of progress, bundle entities
self._do_download({'locales': [{'locale': 'de', 'checked': True}],
'export_what': 'all'}, method='post')
# Upload one translation.
response = self._do_upload(
'# <span class="">1.1 Lesson Title</span>\n'
'#: GCB-1|title|string|lesson:4:de:0\n'
'#| msgid ""\n'
'msgid "FizzBuzz"\n'
'msgstr "Lektion Titel"\n')
messages = self._parse_messages(response)
self.assertIn('Translation for "FizzBuzz" present but not used.',
messages)
def test_upload_ui_with_missing_translation(self):
# Do export to force creation of progress, bundle entities
self._do_download({'locales': [{'locale': 'de', 'checked': True}],
'export_what': 'all'}, method='post')
# Upload one translation.
response = self._do_upload(
'# <span class="">1.1 Lesson Title</span>\n'
'#: GCB-1|title|string|lesson:4:de:0\n'
'#| msgid ""\n'
'msgid "FizzBuzz"\n'
'msgstr "Lektion Titel"\n')
messages = self._parse_messages(response)
self.assertIn('Did not find translation for "Lesson Title"', messages)
def test_upload_ui_with_blank_translation(self):
resource_count = 19
# Do export to force creation of progress, bundle entities
self._do_download({'locales': [{'locale': 'de', 'checked': True}],
'export_what': 'all'}, method='post')
# Upload one translation.
response = self._do_upload(
'# <span class="">1.1 Lesson Title</span>\n'
'#: GCB-1|title|string|lesson:4:de:0\n'
'#| msgid ""\n'
'msgid "Lesson Title"\n'
'msgstr ""\n')
messages = self._parse_messages(response)
self.assertIn(
'For Deutsch (de), made 0 total replacements in {} resources. '
'1 items in the uploaded file did not have translations.'.format(
resource_count), messages)
def test_download_ui_no_request(self):
response = self.put(
'/%s%s' % (self.COURSE_NAME,
i18n_dashboard.TranslationDownloadRestHandler.URL),
{})
rsp = transforms.loads(response.body)
self.assertEquals(rsp['status'], 400)
self.assertEquals(
rsp['message'], 'Malformed or missing "request" parameter.')
def test_download_ui_no_payload(self):
response = self.put(
'/%s%s' % (self.COURSE_NAME,
i18n_dashboard.TranslationDownloadRestHandler.URL),
{'request': transforms.dumps({'foo': 'bar'})})
rsp = transforms.loads(response.body)
self.assertEquals(rsp['status'], 400)
self.assertEquals(
rsp['message'], 'Malformed or missing "payload" parameter.')
def test_download_ui_no_xsrf(self):
response = self.put(
'/%s%s' % (self.COURSE_NAME,
i18n_dashboard.TranslationDownloadRestHandler.URL),
{'request': transforms.dumps({'payload': '{}'})})
rsp = transforms.loads(response.body)
self.assertEquals(rsp['status'], 403)
self.assertEquals(
rsp['message'],
'Bad XSRF token. Please reload the page and try again')
def test_download_ui_no_locales(self):
rsp = transforms.loads(self._do_download({'locales': []}).body)
self.assertEquals(rsp['status'], 400)
self.assertEquals(rsp['message'],
'Please select at least one language to export.')
def test_download_ui_malformed_locales(self):
actions.login('[email protected]', is_admin=False)
rsp = transforms.loads(self._do_download(
{'locales': [{'checked': True}]}).body)
self.assertEquals(rsp['status'], 400)
self.assertEquals('Language specification not as expected.',
rsp['message'])
def test_download_ui_no_selected_locales(self):
actions.login('[email protected]', is_admin=False)
rsp = transforms.loads(self._do_download(
{'locales': [{'locale': 'de'}]}).body)
self.assertEquals(rsp['status'], 400)
self.assertEquals('Please select at least one language to export.',
rsp['message'])
def test_download_ui_no_permissions(self):
actions.login('[email protected]', is_admin=False)
rsp = transforms.loads(self._do_download(
{'locales': [{'locale': 'de', 'checked': True}]}).body)
self.assertEquals(401, rsp['status'])
self.assertEquals('Access denied.', rsp['message'])
def test_download_ui_file_name_default(self):
extra_env = {
'extra_locales': [{'locale': 'de', 'availability': 'available'}]
}
with actions.OverriddenEnvironment(extra_env):
rsp = self._do_download(
{'locales': [{'locale': 'de', 'checked': True}]}, method='post')
self.assertEquals('application/octet-stream', rsp.content_type)
self.assertEquals('attachment; filename="i18n_course.zip"',
rsp.content_disposition)
def test_download_ui_file_name_set(self):
extra_env = {
'extra_locales': [{'locale': 'de', 'availability': 'available'}]
}
with actions.OverriddenEnvironment(extra_env):
rsp = self._do_download({
'locales': [{'locale': 'de', 'checked': True}],
'file_name': 'xyzzy.zip',
}, method='post')
self.assertEquals('application/octet-stream', rsp.content_type)
self.assertEquals('attachment; filename="xyzzy.zip"',
rsp.content_disposition)
def _translated_value_swapcase(self, key, section_name):
get_response = self.get(
'/%s%s?%s' % (
self.COURSE_NAME,
i18n_dashboard.TranslationConsoleRestHandler.URL,
urllib.urlencode({'key': str(key)})))
response = transforms.loads(get_response.body)
payload = transforms.loads(response['payload'])
s = next(s for s in payload['sections'] if s['name'] == section_name)
s['data'][0]['changed'] = True
s['data'][0]['target_value'] = s['data'][0]['source_value'].swapcase()
response['payload'] = transforms.dumps(payload)
response['key'] = payload['key']
response = self.put(
'/%s%s' % (self.COURSE_NAME,
i18n_dashboard.TranslationConsoleRestHandler.URL),
{'request': transforms.dumps(response)})
def _make_current_and_stale_translation(self):
# Provide translations for lesson title and assessment title.
self._translated_value_swapcase(
ResourceBundleKey(resources_display.ResourceLesson.TYPE,
self.lesson.lesson_id, 'de'),
'title')
self._translated_value_swapcase(
ResourceBundleKey(resources_display.ResourceAssessment.TYPE,
self.assessment.unit_id, 'de'),
'assessment:title')
# Make assessment out-of-date by changing the assessment title
# via the course interface.
assessment = self.course.find_unit_by_id(self.assessment.unit_id)
assessment.title = 'Edited Assessment Title'
self.course.save()
def _parse_zip_response(self, response):
download_zf = zipfile.ZipFile(cStringIO.StringIO(response.body), 'r')
out_stream = StringIO.StringIO()
out_stream.fp = out_stream
for item in download_zf.infolist():
file_data = download_zf.read(item)
catalog = pofile.read_po(cStringIO.StringIO(file_data))
yield catalog
def test_export_only_selected_languages(self):
extra_env = {
'extra_locales': [
{'locale': 'de', 'availability': 'available'},
{'locale': 'fr', 'availability': 'available'},
{'locale': 'es', 'availability': 'available'},
]
}
with actions.OverriddenEnvironment(extra_env):
payload = {
'locales': [
{'locale': 'de', 'checked': True},
{'locale': 'fr', 'checked': True},
{'locale': 'es'},
],
'export_what': 'all'}
response = self._do_download(payload, method='post')
zf = zipfile.ZipFile(cStringIO.StringIO(response.body), 'r')
contents = [item.filename for item in zf.infolist()]
self.assertIn('locale/de/LC_MESSAGES/messages.po', contents)
self.assertIn('locale/fr/LC_MESSAGES/messages.po', contents)
self.assertNotIn('locale/es/LC_MESSAGES/messages.po', contents)
def _test_export(self, export_what, expect_lesson):
def find_message(catalog, the_id):
for message in catalog:
if message.id == the_id:
return message
return None
extra_env = {
'extra_locales': [{'locale': 'de', 'availability': 'available'}]
}
with actions.OverriddenEnvironment(extra_env):
self._make_current_and_stale_translation()
payload = {
'locales': [{'locale': 'de', 'checked': True}],
'export_what': export_what,
}
response = self._do_download(payload)
rsp = transforms.loads(response.body)
self.assertEquals(200, rsp['status'])
self.assertEquals('Success.', rsp['message'])
response = self._do_download(payload, method='post')
for catalog in self._parse_zip_response(response):
unit = find_message(catalog, 'Unit Title')
self.assertEquals(1, len(unit.locations))
self.assertEquals('GCB-1|title|string|unit:1:de',
unit.locations[0][0])
self.assertEquals('', unit.string)
assessment = find_message(catalog, 'Edited Assessment Title')
self.assertEquals(1, len(assessment.locations))
self.assertEquals(
'GCB-1|assessment:title|string|assessment:2:de',
assessment.locations[0][0])
self.assertEquals('', assessment.string)
lesson = find_message(catalog, 'Lesson Title')
if expect_lesson:
self.assertEquals(1, len(lesson.locations))
self.assertEquals('GCB-1|title|string|lesson:4:de',
lesson.locations[0][0])
self.assertEquals('lESSON tITLE', lesson.string)
self.assertEquals([], lesson.previous_id)
else:
self.assertIsNone(lesson)
def test_export_only_new(self):
self._test_export('new', False)
def test_export_all(self):
self._test_export('all', True)
def test_added_items_appear_on_dashboard(self):
"""Ensure that all items added in setUp are present on dashboard.
Do this so that we can trust in other tests that when we don't
see something that we don't expect to see it's not because we failed
to add the item, but instead it really is getting actively suppressed.
"""
response = self.get(self.URL)
self.assertIn('Unit Title', response.body)
self.assertIn('Assessment Title', response.body)
self.assertIn('Link Title', response.body)
self.assertIn('Lesson Title', response.body)
self.assertIn('mc description', response.body)
self.assertIn('sa description', response.body)
self.assertIn('question group description', response.body)
def test_download_exports_all_expected_fields(self):
extra_env = {
'extra_locales': [{'locale': 'de', 'availability': 'available'}]
}
with actions.OverriddenEnvironment(extra_env):
response = self._do_download(
{'locales': [{'locale': 'de', 'checked': True}],
'export_what': 'all'}, method='post')
for catalog in self._parse_zip_response(response):
messages = [msg.id for msg in catalog]
self.assertIn('Unit Title', messages)
self.assertIn('unit description', messages)
self.assertIn('unit header', messages)
self.assertIn('unit footer', messages)
self.assertIn('Assessment Title', messages)
self.assertIn('assessment description', messages)
self.assertIn('assessment html content', messages)
self.assertIn('assessment html review form', messages)
self.assertIn('Link Title', messages)
self.assertIn('link description', messages)
self.assertIn('Lesson Title', messages)
self.assertIn('lesson objectives', messages)
self.assertIn('lesson notes', messages)
self.assertIn('mc question', messages)
self.assertIn('mc description', messages)
self.assertIn('mc feedback one', messages)
self.assertIn('mc answer one', messages)
self.assertIn('mc feedback two', messages)
self.assertIn('mc answer two', messages)
self.assertIn('sa question', messages)
self.assertIn('sa description', messages)
self.assertIn('sa hint', messages)
self.assertIn('sa response', messages)
self.assertIn('sa feedback', messages)
self.assertIn('sa default feedback', messages)
self.assertIn('question group introduction', messages)
self.assertIn('question group description', messages)
# Non-translatable items; will require manual attention from
# someone who understands the course material.
self.assertNotIn('link url', messages)
self.assertNotIn('lesson video', messages)
self.assertNotIn('foo.jpg', messages)
def test_upload_translations(self):
actions.update_course_config(
self.COURSE_NAME,
{'extra_locales': [{'locale': 'el', 'availability': 'available'}]})
# Download the course translations, and build a catalog containing
# all the translations repeated.
response = self._do_download(
{'locales': [{'locale': 'el', 'checked': True}],
'export_what': 'all'}, method='post')
download_zf = zipfile.ZipFile(cStringIO.StringIO(response.body), 'r')
out_stream = StringIO.StringIO()
out_stream.fp = out_stream
upload_zf = zipfile.ZipFile(out_stream, 'w')
num_translations = 0
for item in download_zf.infolist():
catalog = pofile.read_po(cStringIO.StringIO(download_zf.read(item)))
for msg in catalog:
if msg.locations:
msg.string = msg.id.upper() * 2
content = cStringIO.StringIO()
pofile.write_po(content, catalog)
upload_zf.writestr(item.filename, content.getvalue())
content.close()
upload_zf.close()
# Upload the modified translations.
upload_contents = out_stream.getvalue()
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
TranslationUploadRestHandler.XSRF_TOKEN_NAME)
self.post('/%s%s' % (self.COURSE_NAME,
TranslationUploadRestHandler.URL),
{'request': transforms.dumps({
'xsrf_token': cgi.escape(xsrf_token),
'payload': transforms.dumps({'key', ''})})},
upload_files=[('file', 'doesntmatter', upload_contents)])
# Download the translations; verify the doubling.
response = self._do_download(
{'locales': [{'locale': 'el', 'checked': True}],
'export_what': 'all'}, method='post')
for catalog in self._parse_zip_response(response):
num_translations = 0
for msg in catalog:
if msg.locations: # Skip header pseudo-message entry
num_translations += 1
self.assertNotEquals(msg.id, msg.string)
self.assertEquals(msg.id.upper() * 2, msg.string)
self.assertEquals(31, num_translations)
# And verify the presence of the translated versions on actual
# course pages.
response = self.get('unit?unit=%s' % self.unit.unit_id)
self.assertIn(self.unit.title.upper() * 2, response.body)
self.assertIn(self.lesson.title.upper() * 2, response.body)
def test_reverse_case(self):
response = self.get('dashboard?action=i18n_reverse_case')
prefs = models.StudentPreferencesDAO.load_or_default()
prefs.locale = 'ln'
models.StudentPreferencesDAO.save(prefs)
response = self.get('unit?unit=%s' % self.unit.unit_id)
self.assertIn('uNIT tITLE', response.body)
self.assertIn('lESSON tITLE', response.body)
def _test_progress_calculation(self, sections, expected_status):
key = i18n_dashboard.ResourceBundleKey.fromstring('assessment:1:de')
i18n_progress_dto = i18n_dashboard.I18nProgressDAO.create_blank(key)
for section in sections:
section['name'] = 'fred'
section['type'] = 'string'
TranslationConsoleRestHandler.update_dtos_with_section_data(
key, sections, None, i18n_progress_dto)
self.assertEquals(expected_status,
i18n_progress_dto.get_progress(key.locale))
def test_progress_no_sections_is_done(self):
self._test_progress_calculation([], i18n_dashboard.I18nProgressDTO.DONE)
def test_progress_one_section_current_and_not_changed_is_done(self):
self._test_progress_calculation(
[{'data': [{'verb': i18n_dashboard.VERB_CURRENT,
'changed': False,
'source_value': 'yes',
'target_value': 'ja'}]}],
i18n_dashboard.I18nProgressDTO.DONE)
def test_progress_one_section_current_and_changed_is_done(self):
self._test_progress_calculation(
[{'data': [{'verb': i18n_dashboard.VERB_CURRENT,
'changed': True,
'source_value': 'yes',
'target_value': 'yup'}]}],
i18n_dashboard.I18nProgressDTO.DONE)
def test_progress_one_section_stale_and_not_changed_is_in_progress(self):
self._test_progress_calculation(
[{'data': [{'verb': i18n_dashboard.VERB_CHANGED,
'changed': False,
'old_source_value': 'yse',
'source_value': 'yes',
'target_value': 'ja'}]}],
i18n_dashboard.I18nProgressDTO.IN_PROGRESS)
def test_progress_one_section_stale_but_changed_is_done(self):
self._test_progress_calculation(
[{'data': [{'verb': i18n_dashboard.VERB_CHANGED,
'changed': True,
'old_source_value': 'yse',
'source_value': 'yes',
'target_value': 'ja'}]}],
i18n_dashboard.I18nProgressDTO.DONE)
def test_progress_one_section_new_and_not_translated_is_not_started(self):
self._test_progress_calculation(
[{'data': [{'verb': i18n_dashboard.VERB_NEW,
'changed': False,
'source_value': 'yes',
'target_value': ''}]}],
i18n_dashboard.I18nProgressDTO.NOT_STARTED)
def test_progress_one_section_new_and_translated_is_done(self):
self._test_progress_calculation(
[{'data': [{'verb': i18n_dashboard.VERB_NEW,
'changed': False,
'source_value': 'yes',
'target_value': 'ja'}]}],
i18n_dashboard.I18nProgressDTO.NOT_STARTED)
def test_progress_one_section_current_but_changed_to_blank_unstarted(self):
self._test_progress_calculation(
[{'data': [{'verb': i18n_dashboard.VERB_CURRENT,
'changed': True,
'source_value': 'yes',
'target_value': ''}]}],
i18n_dashboard.I18nProgressDTO.NOT_STARTED)
def test_progress_one_section_changed_but_changed_to_blank_unstarted(self):
self._test_progress_calculation(
[{'data': [{'verb': i18n_dashboard.VERB_CHANGED,
'changed': True,
'source_value': 'yes',
'target_value': ''}]}],
i18n_dashboard.I18nProgressDTO.NOT_STARTED)
def test_progress_one_section_new_but_changed_to_blank_is_unstarted(self):
self._test_progress_calculation(
[{'data': [{'verb': i18n_dashboard.VERB_NEW,
'changed': True,
'source_value': 'yes',
'target_value': ''}]}],
i18n_dashboard.I18nProgressDTO.NOT_STARTED)
def test_progress_one_not_started_and_one_done_is_in_progress(self):
self._test_progress_calculation(
[{'data': [{'verb': i18n_dashboard.VERB_NEW,
'changed': False,
'source_value': 'yes',
'target_value': ''},
{'verb': i18n_dashboard.VERB_CURRENT,
'changed': False,
'source_value': 'yes',
'target_value': 'ja'}]}],
i18n_dashboard.I18nProgressDTO.IN_PROGRESS)
def test_progress_one_stale_and_one_done_is_in_progress(self):
self._test_progress_calculation(
[{'data': [{'verb': i18n_dashboard.VERB_CHANGED,
'changed': False,
'old_source_value': 'yse',
'source_value': 'yes',
'target_value': 'ja'},
{'verb': i18n_dashboard.VERB_CURRENT,
'changed': False,
'source_value': 'yes',
'target_value': 'ja'}]}],
i18n_dashboard.I18nProgressDTO.IN_PROGRESS)
def test_progress_one_stale_and_one_not_started_is_in_progress(self):
self._test_progress_calculation(
[{'data': [{'verb': i18n_dashboard.VERB_CHANGED,
'changed': False,
'old_source_value': 'yse',
'source_value': 'yes',
'target_value': 'ja'},
{'verb': i18n_dashboard.VERB_NEW,
'changed': False,
'source_value': 'yes',
'target_value': ''}]}],
i18n_dashboard.I18nProgressDTO.IN_PROGRESS)
class TranslatorRoleTests(actions.TestBase):
ADMIN_EMAIL = '[email protected]'
USER_EMAIL = '[email protected]'
COURSE_NAME = 'i18n_course'
DASHBOARD_URL = 'dashboard?action=i18n_dashboard'
CONSOLE_REST_URL = 'rest/modules/i18n_dashboard/translation_console'
ENVIRON = {
'extra_locales': [
{'locale': 'el', 'availability': 'unavailable'},
{'locale': 'ru', 'availability': 'unavailable'},
]}
def setUp(self):
super(TranslatorRoleTests, self).setUp()
self.base = '/' + self.COURSE_NAME
actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'I18N Course')
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
# Need to muck with internals of code under test.
# pylint: disable=protected-access
self.old_registered_permission = roles.Roles._REGISTERED_PERMISSIONS
roles.Roles.REGISTERED_PERMISSIONS = {}
def tearDown(self):
del sites.Registry.test_overrides[sites.GCB_COURSES_CONFIG.name]
roles.Roles.REGISTERED_PERMISSIONS = self.old_registered_permission
namespace_manager.set_namespace(self.old_namespace)
super(TranslatorRoleTests, self).tearDown()
def _createTranslatorRole(self, name, locales):
permissions = {
dashboard.custom_module.name: [i18n_dashboard.ACCESS_PERMISSION],
i18n_dashboard.custom_module.name: [
i18n_dashboard.locale_to_permission(loc) for loc in locales]
}
role_dto = models.RoleDTO(None, {
'name': name,
'users': [self.USER_EMAIL],
'permissions': permissions
})
models.RoleDAO.save(role_dto)
def test_no_permission_redirect(self):
with actions.OverriddenEnvironment(self.ENVIRON):
actions.login(self.USER_EMAIL, is_admin=False)
self.assertEquals(self.get(self.DASHBOARD_URL).status_int, 302)
def test_restricted_access(self):
with actions.OverriddenEnvironment(self.ENVIRON):
self._createTranslatorRole('ElTranslator', ['el'])
actions.login(self.USER_EMAIL, is_admin=False)
soup = self.parse_html_string_to_soup(
self.get(self.DASHBOARD_URL).body)
table = soup.select('.i18n-progress-table')[0]
columns = table.select('.language-header')
expected_col_data = [
'el'
]
self.assertEquals(len(expected_col_data), len(columns))
for index, expected in enumerate(expected_col_data):
self.assertEquals(expected, columns[index].text)
response = self.get('%s?key=%s' % (
self.CONSOLE_REST_URL, 'course_settings%3Ahomepage%3Aru'))
self.assertEquals(transforms.loads(response.body)['status'], 401)
response = self.get('%s?key=%s' % (
self.CONSOLE_REST_URL, 'course_settings%3Ahomepage%3Ael'))
self.assertEquals(transforms.loads(response.body)['status'], 200)
class CourseLocalizationTestBase(actions.TestBase):
def setUp(self):
super(CourseLocalizationTestBase, self).setUp()
if sites.GCB_COURSES_CONFIG.name in sites.Registry.test_overrides:
del sites.Registry.test_overrides[sites.GCB_COURSES_CONFIG.name]
self.auto_deploy = sites.ApplicationContext.AUTO_DEPLOY_DEFAULT_COURSE
sites.ApplicationContext.AUTO_DEPLOY_DEFAULT_COURSE = False
self._import_course()
self._locale_to_label = {}
def tearDown(self):
del sites.Registry.test_overrides[sites.GCB_COURSES_CONFIG.name]
sites.ApplicationContext.AUTO_DEPLOY_DEFAULT_COURSE = self.auto_deploy
super(CourseLocalizationTestBase, self).tearDown()
def _import_course(self):
email = '[email protected]'
actions.login(email, is_admin=True)
actions.simple_add_course('first', email, 'My First Course')
class SampleCourseLocalizationTest(CourseLocalizationTestBase):
def _import_sample_course(self):
dst_app_context = actions.simple_add_course(
'sample', '[email protected]',
'Power Searching with Google')
dst_course = courses.Course(None, dst_app_context)
src_app_context = sites.get_all_courses('course:/:/:')[0]
errors = []
dst_course.import_from(src_app_context, errors)
dst_course.save()
self.assertEquals(0, len(errors))
def _setup_locales(self, availability='available', course='first'):
request = {
'key': '/course.yaml',
'payload': (
'{\"i18n\":{\"course:locale\":\"en_US\",\"extra_locales\":['
'{\"locale\":\"ru_RU\",\"availability\":\"%s\"}, '
'{\"locale\":\"es_ES\",\"availability\":\"%s\"}'
']}}' % (availability, availability)),
'xsrf_token': crypto.XsrfTokenManager.create_xsrf_token(
'basic-course-settings-put')}
response = self.put(
'%s/rest/course/settings' % course, params={
'request': transforms.dumps(request)})
self.assertEquals(response.status_int, 200)
# check labels exist
with Namespace('ns_%s' % course):
labels = models.LabelDAO.get_all_of_type(
models.LabelDTO.LABEL_TYPE_LOCALE)
self.assertEqual(3, len(labels))
for label in labels:
self._locale_to_label[label.title] = label
def _add_announcement(self, title, locales):
with Namespace('ns_first'):
labels = models.LabelDAO.get_all_of_type(
models.LabelDTO.LABEL_TYPE_LOCALE)
label_ids = []
for label in labels:
for locale in locales:
if label.title == locale:
label_ids.append(label.id)
annon = announcements.AnnouncementEntity()
annon.title = title
annon.labels = utils.list_to_text(label_ids)
annon.is_draft = False
annon.put()
def _add_announcements(self):
self._add_announcement('Test announcement EN', ['en_US'])
self._add_announcement('Test announcement RU', ['ru_RU'])
self._add_announcement('Test announcement ES', ['es_ES'])
self._add_announcement(
'Test announcement ALL', ['en_US', 'ru_RU', 'es_ES'])
self._add_announcement('Test announcement NONE', [])
with Namespace('ns_first'):
items = announcements.AnnouncementEntity.get_announcements()
self.assertEqual(5, len(items))
def _add_units(self, locale_labels=False):
with Namespace('ns_first'):
course = courses.Course(None, sites.get_all_courses()[0])
_en = course.add_unit()
_en.type = 'U'
_en.availability = courses.AVAILABILITY_AVAILABLE
_en.title = 'Unit en_US'
_ru = course.add_unit()
_ru.type = 'U'
_ru.availability = courses.AVAILABILITY_AVAILABLE
_ru.title = 'Unit ru_RU'
_es = course.add_unit()
_es.type = 'U'
_es.availability = courses.AVAILABILITY_AVAILABLE
_es.title = 'Unit es_ES'
_all = course.add_unit()
_all.type = 'U'
_all.availability = courses.AVAILABILITY_AVAILABLE
_all.title = 'Unit all_ALL'
_none = course.add_unit()
_none.type = 'U'
_none.availability = courses.AVAILABILITY_AVAILABLE
_none.title = 'Unit none_NONE'
if locale_labels:
_en.labels = utils.list_to_text(
[self._locale_to_label['en_US'].id])
_ru.labels = utils.list_to_text(
[self._locale_to_label['ru_RU'].id])
_es.labels = utils.list_to_text(
[self._locale_to_label['es_ES'].id])
_all.labels = utils.list_to_text([
self._locale_to_label['es_ES'].id,
self._locale_to_label['ru_RU'].id])
_none.labels = utils.list_to_text([])
course.save()
self._locale_to_unit = {}
self._locale_to_unit['en_US'] = _en
self._locale_to_unit['ru_RU'] = _ru
self._locale_to_unit['es_ES'] = _es
def _set_labels_on_current_student(self, labels, ids=None):
with Namespace('ns_first'):
user = users.get_current_user()
if ids is None:
ids = [label.id for label in labels]
labels = utils.list_to_text(ids)
models.StudentProfileDAO.update(
user.user_id(), user.email(), labels=labels)
def _set_prefs_locale(self, locale, course='first'):
with Namespace('ns_%s' % course):
prefs = models.StudentPreferencesDAO.load_or_default()
if prefs:
prefs.locale = locale
models.StudentPreferencesDAO.save(prefs)
def _assert_picker(self, is_present, has_locales=None, is_admin=False):
actions.login('[email protected]', is_admin=is_admin)
response = self.get('first/course')
self.assertEquals(response.status_int, 200)
dom = self.parse_html_string(response.body)
if is_present:
self.assertTrue(dom.find('.//select[@id="locale-select"]'))
for has_locale in has_locales:
option = dom.find(
'.//select[@id="locale-select"]'
'/option[@value="%s"]' % has_locale)
self.assertIsNotNone(option)
else:
self.assertFalse(dom.find('.//select[@id="locale-select"]'))
actions.logout()
def _assert_en_ru_es_all_none(self, en, ru, es, _all, _none, lang):
response = self.get('first/announcements')
self.assertEquals(response.status_int, 200)
if en:
self.assertIn('Test announcement EN', response.body)
else:
self.assertNotIn('Test announcement EN', response.body)
if ru:
self.assertIn('Test announcement RU', response.body)
else:
self.assertNotIn('Test announcement RU', response.body)
if es:
self.assertIn('Test announcement ES', response.body)
else:
self.assertNotIn('Test announcement ES', response.body)
if _all:
self.assertIn('Test announcement ALL', response.body)
else:
self.assertNotIn('Test announcement ALL', response.body)
if _none:
self.assertIn('Test announcement NONE', response.body)
else:
self.assertNotIn('Test announcement NONE', response.body)
self.assertEquals(self.parse_html_string(
response.body).get('lang'), lang)
return response
def _course_en_ru_es_all_none(self, en, ru, es, _all, _none, lang):
response = self.get('first/course')
self.assertEquals(response.status_int, 200)
if en:
self.assertIn('Unit en_US', response.body)
else:
self.assertNotIn('Unit en_US', response.body)
if ru:
self.assertIn('Unit ru_RU', response.body)
else:
self.assertNotIn('Unit ru_RU', response.body)
if es:
self.assertIn('Unit es_ES', response.body)
else:
self.assertNotIn('Unit es_ES', response.body)
if _all:
self.assertIn('Unit all_ALL', response.body)
else:
self.assertNotIn('Unit all_ALL', response.body)
if _none:
self.assertIn('Unit none_NONE', response.body)
else:
self.assertNotIn('Unit none_NONE', response.body)
self.assertEquals(self.parse_html_string(
response.body).get('lang'), lang)
return response
def test_locale_picker_visibility_for_available_locales_as_student(self):
self._setup_locales()
with actions.OverriddenEnvironment(
{'course': {
'now_available': True, 'can_student_change_locale': True}}):
self._assert_picker(True, ['en_US', 'ru_RU', 'es_ES'])
with actions.OverriddenEnvironment(
{'course': {
'now_available': True, 'can_student_change_locale': False}}):
self._assert_picker(False)
def test_locale_picker_visibility_for_unavailable_locales_as_student(self):
self._setup_locales(availability='unavailable')
with actions.OverriddenEnvironment(
{'course': {
'now_available': True, 'can_student_change_locale': True}}):
self._assert_picker(False)
with actions.OverriddenEnvironment(
{'course': {
'now_available': True, 'can_student_change_locale': False}}):
self._assert_picker(False)
def test_locale_picker_visibility_for_unavailable_locales_as_admin(self):
self._setup_locales(availability='unavailable')
with actions.OverriddenEnvironment(
{'course': {
'now_available': True, 'can_student_change_locale': True}}):
self._assert_picker(
True, ['en_US', 'ru_RU', 'es_ES'], is_admin=True)
with actions.OverriddenEnvironment(
{'course': {
'now_available': True, 'can_student_change_locale': False}}):
self._assert_picker(
True, ['en_US', 'ru_RU', 'es_ES'], is_admin=True)
def test_view_announcement_via_locale_picker(self):
self._setup_locales()
self._add_announcements()
actions.logout()
actions.login('[email protected]')
with actions.OverriddenEnvironment(
{'course': {
'now_available': True, 'can_student_change_locale': True}}):
actions.register(
self,
'test_view_announcement_via_locale_picker', course='first')
self._set_prefs_locale(None)
response = self._assert_en_ru_es_all_none(
True, False, False, True, True, 'en_US')
self.assertIn('Announcements', response.body)
self._set_prefs_locale('ru_RU')
response = self._assert_en_ru_es_all_none(
False, True, False, True, True, 'ru_RU')
self.assertIn('Сообщения', response.body)
self._set_prefs_locale('es_ES')
response = self._assert_en_ru_es_all_none(
False, False, True, True, True, 'es_ES')
self.assertIn('Avisos', response.body)
# when locale labels are combined with prefs, labels win
self._set_prefs_locale(None)
self._set_labels_on_current_student(
[self._locale_to_label['ru_RU']])
self._assert_en_ru_es_all_none(
False, True, False, True, True, 'ru_RU')
self._set_prefs_locale('es_ES')
self._set_labels_on_current_student(
[self._locale_to_label['ru_RU']])
self._assert_en_ru_es_all_none(
False, True, False, True, True, 'ru_RU')
def test_announcements_via_locale_labels(self):
self._setup_locales()
self._add_announcements()
actions.logout()
actions.login('[email protected]')
with actions.OverriddenEnvironment(
{'course': {
'now_available': True, 'can_student_change_locale': False}}):
actions.register(
self, 'test_announcements_via_locale_labels', course='first')
self._set_prefs_locale(None)
self._set_labels_on_current_student([])
self._assert_en_ru_es_all_none(
True, True, True, True, True, 'en_US')
self._set_labels_on_current_student(
[self._locale_to_label['en_US']])
self._assert_en_ru_es_all_none(
True, False, False, True, True, 'en_US')
self._set_labels_on_current_student(
[self._locale_to_label['ru_RU']])
self._assert_en_ru_es_all_none(
False, True, False, True, True, 'ru_RU')
self._set_labels_on_current_student(
[self._locale_to_label['es_ES']])
self._assert_en_ru_es_all_none(
False, False, True, True, True, 'es_ES')
self._set_prefs_locale('ru_RU')
self._set_labels_on_current_student([])
response = self._assert_en_ru_es_all_none(
True, True, True, True, True, 'ru_RU')
self.assertIn('Сообщения', response.body)
self._set_prefs_locale('ru_RU')
self._set_labels_on_current_student(
[self._locale_to_label['es_ES']])
response = self._assert_en_ru_es_all_none(
False, False, True, True, True, 'es_ES')
self.assertIn('Avisos', response.body)
def test_course_track_via_locale_picker(self):
self._setup_locales()
self._add_units(locale_labels=True)
actions.logout()
actions.login('[email protected]')
with actions.OverriddenEnvironment(
{'course': {
'now_available': True, 'can_student_change_locale': True}}):
actions.register(
self, 'test_course_track_via_locale_picker', course='first')
self._set_prefs_locale(None)
self._course_en_ru_es_all_none(
True, False, False, False, True, 'en_US')
self._set_prefs_locale('en_US')
response = self._course_en_ru_es_all_none(
True, False, False, False, True, 'en_US')
self.assertIn('Announcements', response.body)
self._set_prefs_locale('ru_RU')
response = self._course_en_ru_es_all_none(
False, True, False, True, True, 'ru_RU')
self.assertIn('Сообщения', response.body)
self._set_prefs_locale('es_ES')
response = self._course_en_ru_es_all_none(
False, False, True, True, True, 'es_ES')
self.assertIn('Avisos', response.body)
def test_button_captions(self):
self._import_sample_course()
self._setup_locales(course='sample')
self._set_prefs_locale('ru', course='sample')
response = self.get('/sample/course')
# TODO(psimakov): 'Search' button caption must be localized; but it's
# in the hook and we don't curently support gettext() inside hook :(
self.assertIn('type="submit" value="Search"', response.body)
response = self.get('/sample/unit?unit=14&lesson=20')
self.assertIn('Проверить ответ', response.body)
self.assertIn('Подсказка', response.body)
self.assertIn('Баллов: 1', response.body)
self.assertIn('Предыдущая страница', response.body)
self.assertIn('Следующая страница', response.body)
response = self.get('/sample/assessment?name=1')
self.assertIn('Отправить ответы', response.body)
for url in [
'/sample/assessment?name=35', '/sample/assessment?name=65']:
response = self.get(url)
self.assertIn('Баллов: 1', response.body)
self.assertIn('Проверить ответы', response.body)
self.assertIn('Отправить ответы', response.body)
def test_course_track_via_locale_labels(self):
self._setup_locales()
self._add_units(locale_labels=True)
actions.logout()
actions.login('[email protected]')
with actions.OverriddenEnvironment(
{'course': {
'now_available': True, 'can_student_change_locale': True}}):
actions.register(
self, 'test_course_track_via_locale_picker', course='first')
self._set_labels_on_current_student([])
self._course_en_ru_es_all_none(
True, False, False, False, True, 'en_US')
self._set_labels_on_current_student(
[self._locale_to_label['en_US']])
self._course_en_ru_es_all_none(
True, False, False, False, True, 'en_US')
self._set_labels_on_current_student(
[self._locale_to_label['ru_RU']])
self._course_en_ru_es_all_none(
False, True, False, True, True, 'ru_RU')
self._set_labels_on_current_student(
[self._locale_to_label['es_ES']])
self._course_en_ru_es_all_none(
False, False, True, True, True, 'es_ES')
def test_track_and_locale_labels_do_work_together(self):
self._setup_locales()
with Namespace('ns_first'):
track_a_id = models.LabelDAO.save(models.LabelDTO(
None, {'title': 'Track A',
'version': '1.0',
'description': 'Track A',
'type': models.LabelDTO.LABEL_TYPE_COURSE_TRACK}))
track_b_id = models.LabelDAO.save(models.LabelDTO(
None, {'title': 'Track B',
'version': '1.0',
'description': 'Track B',
'type': models.LabelDTO.LABEL_TYPE_COURSE_TRACK}))
locale_ru_id = self._locale_to_label['ru_RU'].id
locale_es_id = self._locale_to_label['es_ES'].id
course = courses.Course(None, sites.get_all_courses()[0])
unit_1 = course.add_unit()
unit_1.type = 'U'
unit_1.availability = courses.AVAILABILITY_AVAILABLE
unit_1.title = 'Unit for Track A and Locale ru_RU'
unit_1.labels = utils.list_to_text(
[track_a_id, locale_ru_id])
unit_2 = course.add_unit()
unit_2.type = 'U'
unit_2.availability = courses.AVAILABILITY_AVAILABLE
unit_2.title = 'Unit for Track B and Locale es_ES'
unit_2.labels = utils.list_to_text(
[track_b_id, locale_es_id])
course.save()
def _assert_course(
locale, label_ids, is_unit_1_visible, is_unit_2_visible):
self._set_prefs_locale(locale)
self._set_labels_on_current_student(None, ids=label_ids)
response = self.get('first/course')
if is_unit_1_visible:
self.assertIn(unit_1.title, response.body)
else:
self.assertNotIn(unit_1.title, response.body)
if is_unit_2_visible:
self.assertIn(unit_2.title, response.body)
else:
self.assertNotIn(unit_2.title, response.body)
actions.logout()
with actions.OverriddenEnvironment(
{'course': {'now_available': True}}):
actions.login(
'[email protected]')
actions.register(
self, 'test_track_and_locale_labels_dont_interfere',
course='first')
with actions.OverriddenEnvironment(
{'course': {
'now_available': True, 'can_student_change_locale': True}}):
_assert_course(None, [], False, False)
_assert_course('ru_RU', [], True, False)
_assert_course('es_ES', [], False, True)
_assert_course(None, [track_a_id], False, False)
_assert_course('ru_RU', [track_a_id], True, False)
_assert_course('ru_RU', [track_b_id], False, False)
_assert_course('es_ES', [track_a_id], False, False)
_assert_course('es_ES', [track_b_id], False, True)
_assert_course(None, [locale_ru_id], True, False)
_assert_course('ru_RU', [locale_ru_id], True, False)
_assert_course('ru_RU', [locale_es_id], False, True)
_assert_course('es_ES', [locale_ru_id], True, False)
_assert_course('es_ES', [locale_es_id], False, True)
_assert_course(None, [track_a_id, track_b_id], False, False)
_assert_course('ru_RU', [track_a_id, track_b_id], True, False)
_assert_course('es_ES', [track_a_id, track_b_id], False, True)
_assert_course(
None, [track_a_id, locale_ru_id], True, False)
_assert_course('ru_RU', [track_a_id, locale_ru_id], True, False)
_assert_course(
'ru_RU', [track_a_id, locale_es_id], False, False)
_assert_course('ru_RU', [track_b_id, locale_es_id], False, True)
_assert_course('ru_RU', [track_b_id, locale_ru_id], False, False)
_assert_course(
None, [track_a_id, track_b_id, locale_ru_id], True, False)
_assert_course(
None, [track_a_id, track_b_id, locale_es_id], False, True)
_assert_course(
'ru_RU', [track_a_id, track_b_id, locale_ru_id], True, False)
_assert_course(
'ru_RU', [track_a_id, track_b_id, locale_es_id], False, True)
_assert_course(
'es_ES', [track_a_id, track_b_id, locale_ru_id], True, False)
_assert_course(
'es_ES', [track_a_id, track_b_id, locale_es_id], False, True)
with actions.OverriddenEnvironment(
{'course': {
'now_available': True, 'can_student_change_locale': False}}):
_assert_course(None, [], True, True)
_assert_course('ru_RU', [], True, True)
_assert_course('es_ES', [], True, True)
_assert_course(None, [locale_ru_id], True, False)
_assert_course('ru_RU', [locale_ru_id], True, False)
_assert_course('ru_RU', [locale_es_id], False, True)
_assert_course('es_ES', [locale_ru_id], True, False)
_assert_course('es_ES', [locale_es_id], False, True)
_assert_course(None, [track_a_id], True, False)
_assert_course('ru_RU', [track_a_id], True, False)
_assert_course('ru_RU', [track_b_id], False, True)
_assert_course('es_ES', [track_a_id], True, False)
_assert_course('es_ES', [track_b_id], False, True)
_assert_course(None, [track_a_id, track_b_id], True, True)
# the one below is not an error; the empty locale label set on
# student is a match for unit labeled with any locale or none
_assert_course('ru_RU', [track_a_id, track_b_id], True, True)
_assert_course('es_ES', [track_a_id, track_b_id], True, True)
_assert_course(None, [track_a_id, locale_ru_id], True, False)
_assert_course('ru_RU', [track_a_id, locale_ru_id], True, False)
_assert_course('ru_RU', [track_a_id, locale_es_id], False, False)
_assert_course('ru_RU', [track_b_id, locale_es_id], False, True)
_assert_course('ru_RU', [track_b_id, locale_ru_id], False, False)
_assert_course(
None, [track_a_id, track_b_id, locale_ru_id], True, False)
_assert_course(
None, [track_a_id, track_b_id, locale_es_id], False, True)
_assert_course(
'ru_RU', [track_a_id, track_b_id, locale_ru_id], True, False)
_assert_course(
'ru_RU', [track_a_id, track_b_id, locale_es_id], False, True)
_assert_course(
'es_ES', [track_a_id, track_b_id, locale_ru_id], True, False)
_assert_course(
'es_ES', [track_a_id, track_b_id, locale_es_id], False, True)
def test_localized_course_with_images(self):
self._import_sample_course()
self._setup_locales(course='sample')
with actions.OverriddenEnvironment(
{'course': {'now_available': True}}):
actions.logout()
actions.login(
'[email protected]')
actions.register(
self, 'test_track_and_locale_labels_dont_interfere',
course='sample')
def _assert_image():
response = self.get('sample/assets/img/Image2.2.1.png')
self.assertEquals(200, response.status_int)
self.assertEquals(215086, len(response.body))
with actions.OverriddenEnvironment(
{'course': {
'now_available': True, 'can_student_change_locale': True}}):
self._set_prefs_locale('en_US', course='sample')
response = self.get('sample/unit?unit=14&lesson=18')
self.assertIn(
'You are a cosmetologist and business owner', response.body)
self.assertIn('Announcements', response.body)
_assert_image()
self._set_prefs_locale('ru_RU', course='sample')
response = self.get('sample/unit?unit=14&lesson=18')
self.assertIn(
'You are a cosmetologist and business owner', response.body)
self.assertIn('Сообщения', response.body)
_assert_image()
def test_set_current_locale_reloads_environ(self):
app_context = sites.get_all_courses()[0]
self._setup_locales()
course = courses.Course(None, app_context)
course_bundle = {
'course:title': {
'source_value': None,
'type': 'string',
'data': [
{
'source_value': app_context.get_title(),
'target_value': 'TRANSLATED TITLE'
}]
}}
with Namespace('ns_first'):
key_el = ResourceBundleKey(
resources_display.ResourceCourseSettings.TYPE, 'homepage',
'es_ES')
ResourceBundleDAO.save(
ResourceBundleDTO(str(key_el), course_bundle))
sites.set_path_info('/first')
app_context.set_current_locale('ru_RU')
ru_env = course.get_environ(app_context)
app_context.set_current_locale('es_ES')
es_env = course.get_environ(app_context)
sites.unset_path_info()
self.assertNotEquals(ru_env, es_env)
def test_swapcase(self):
source = '12345'
target = u'12345λ'
self.assertEquals(target, i18n_dashboard.swapcase(source))
source = '<img alt="Hello!">W0rld</img>'
target = u'<img alt="Hello!">w0RLDλ</img>'
self.assertEquals(target, i18n_dashboard.swapcase(source))
source = 'Hello W0rld!'
target = u'hELLO w0RLD!λ'
self.assertEquals(target, i18n_dashboard.swapcase(source))
source = 'Hello'W0rld!'
target = u'hELLO\'w0RLD!λ'
self.assertEquals(target, i18n_dashboard.swapcase(source))
# content inside tags must be preserved
source = (
'Hello<img src="http://a.b.com/'
'foo?bar=baz&cookie=sweet"/>W0rld')
target = (
u'hELLOλ<img src="http://a.b.com/'
u'foo?bar=baz&cookie=sweet"/>w0RLDλ')
self.assertEquals(target, i18n_dashboard.swapcase(source))
# %s and other formatting must be preserved
source = 'Hello%sW0rld!'
target = u'hELLO%sw0RLD!λ'
self.assertEquals(target, i18n_dashboard.swapcase(source))
source = 'Hello%(foo)sW0rld!'
target = u'hELLO%(foo)sw0RLD!λ'
self.assertEquals(target, i18n_dashboard.swapcase(source))
# we dont support {foo} type formatting
source = 'Hello{s}W0rld!'
target = u'hELLO{S}w0RLD!λ'
self.assertEquals(target, i18n_dashboard.swapcase(source))
def test_reverse_case(self):
self._import_sample_course()
actions.login('[email protected]', is_admin=True)
self.get('sample/dashboard?action=i18n_reverse_case')
self._set_prefs_locale('ln', course='sample')
def check_all_in(response, texts):
self.assertEquals(200, response.status_int)
for text in texts:
self.assertIn(text, response.body)
# check selected pages
check_all_in(self.get('sample/course'), [
'dANIEL rUSSELL', 'pRE-COURSE ASSESSMENT', 'iNTRODUCTION'])
check_all_in(self.get('sample/assessment?name=1'), [
'tHANK YOU, AND HAVE FUN!', 'wHEN SEARCHING gOOGLE iMAGES',
'a AND c', 'iF YOU DO NOT KNOW'])
check_all_in(self.get('sample/unit?unit=14'), [
'Unit 2 - iNTERPRETING RESULTS', 'wHEN SEARCH RESULTS SUGGEST',
'lESSON 2.3 aCTIVITY'])
check_all_in(self.get('sample/unit?unit=2&lesson=9'), [
'lESSON 1.4 aCTIVITY'])
check_all_in(self.get('sample/unit?unit=14&lesson=16'), [
'hAVE YOU EVER PLAYED THE'])
check_all_in(self.get('sample/unit?unit=47&lesson=53'), [
'dID dARWIN, HIMSELF, USE', 'aDVENTURES IN wONDERLAND'])
check_all_in(self.get('sample/assessment?name=64'), [
'sOLVE THE PROBLEM BELOW', 'hOW MANY pOWER sEARCH CONCEPTS',
'lIST THE pOWER sEARCH CONCEPTS'])
# check assesment submition; it has fragile %s type complex formatting
# functions that we need to check
actions.register(self, 'test_reverse_case', course='sample')
response = self.post('sample/answer', {
'xsrf_token': crypto.XsrfTokenManager.create_xsrf_token(
'assessment-post'),
'score': 0, 'assessment_type': 65, 'answers': {}})
self.assertEquals(200, response.status_int)
for text in ['for taking the pOST-COURSE', 'cERTIFICATE OR NOT, WE']:
self.assertIn(text, response.body)
# check
invalid_question = 0
translation_error = 0
course = courses.Course(None, sites.get_all_courses()[0])
for unit in course.get_units():
for lesson in course.get_lessons(unit.unit_id):
response = self.get('sample/unit?unit=%s&lesson=%s' % (
unit.unit_id, lesson.lesson_id))
self.assertEquals(200, response.status_int)
self.assertIn(
unit.title.swapcase(), response.body.decode('utf-8'))
self.assertIn(
lesson.title.swapcase(), response.body.decode('utf-8'))
# check standard multibyte character is present
self.assertIn(u'λ', response.body.decode('utf-8'))
try:
self.assertNotIn('[Invalid question]', response.body)
except AssertionError:
invalid_question += 1
try:
self.assertNotIn('gcb-translation-error', response.body)
except AssertionError:
translation_error += 1
self.assertEquals((invalid_question, translation_error), (0, 0))
def test_course_with_one_common_unit_and_two_per_locale_units(self):
# TODO(psimakov): incomplete
pass
def test_readonly(self):
self._import_sample_course()
self._setup_locales(course='sample')
actions.login('[email protected]', is_admin=True)
response = self.get('sample/dashboard?action=i18n_dashboard')
self.assertNotIn('input disabled', response.body)
self.assertIn('action=i18n_download', response.body)
self.assertIn('action=i18n_upload', response.body)
self.assertIn('action=i18n_reverse_case', response.body)
self.assertIn('action=i18_console', response.body)
with actions.OverriddenEnvironment(
{'course': {'prevent_translation_edits': True}}):
response = self.get('sample/dashboard?action=i18n_dashboard')
self.assertIn('input disabled', response.body)
self.assertNotIn('action=i18n_download', response.body)
self.assertNotIn('action=i18n_upload', response.body)
self.assertNotIn('action=i18n_reverse_case', response.body)
self.assertNotIn('action=i18_console', response.body)
def test_dev_only_button_visibility(self):
self._import_sample_course()
extra_env = {
'extra_locales': [
{'locale': 'de', 'availability': 'available'},
]}
with actions.OverriddenEnvironment(extra_env):
response = self.get('sample/dashboard?action=i18n_dashboard')
self.assertIn('action=i18n_download', response.body)
self.assertIn('action=i18n_upload', response.body)
self.assertIn('action=i18n_reverse_case', response.body)
try:
appengine_config.PRODUCTION_MODE = True
response = self.get('sample/dashboard?action=i18n_dashboard')
self.assertNotIn('action=i18n_download', response.body)
self.assertNotIn('action=i18n_upload', response.body)
self.assertNotIn('action=i18n_reverse_case', response.body)
finally:
appengine_config.PRODUCTION_MODE = False
def test_rpc_performance(self):
"""Tests various common actions for the number of memcache/db rpc."""
self._import_sample_course()
# add fake 'ln' locale and fake translations
response = self.get('sample/dashboard?action=i18n_reverse_case')
self.assertEquals(302, response.status_int)
response = self.get('sample/dashboard?action=i18n_dashboard')
self.assertEquals(200, response.status_int)
self.assertIn('>ln</th>', response.body)
config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True
# Need to muck with internals of code under test.
# pylint: disable=protected-access
old_memcache_make_async_call = memcache._CLIENT._make_async_call
old_db_make_rpc_call = datastore_rpc.BaseConnection._make_rpc_call
try:
lines = []
over_quota = [False]
def _profile(url, hint, quota=(128, 32)):
"""Fetches a URL while counting a number of RPC calls.
Args:
url: URL to fetch
hint: hint about this operation to put in the report
quota: tuple of max counts of (memcache, db) RPC calls
allowed during this request
"""
counters = [0, 0]
memcache_stacks = collections.defaultdict(int)
db_stacks = collections.defaultdict(int)
def reset():
counters[0] = 0
counters[1] = 0
def _memcache_make_async_call(*args, **kwds):
memcache_stacks[tuple(traceback.extract_stack())] += 1
counters[0] += 1
return old_memcache_make_async_call(*args, **kwds)
def _db_make_rpc_call(*args, **kwds):
db_stacks[tuple(traceback.extract_stack())] += 1
counters[1] += 1
return old_db_make_rpc_call(*args, **kwds)
def _assert_quota(quota, actual, lines):
memcache_quota, db_quota = quota
memcache_actual, db_actual = actual
respects_quota = True
if memcache_quota is not None and (
memcache_quota < memcache_actual):
respects_quota = False
if db_quota is not None and (db_quota < db_actual):
respects_quota = False
if not respects_quota:
over_quota[0] = True
lines.append(
'Request metrics %s exceed RPC quota '
'[memcache:%s, db:%s]: %s (%s)' % (
actual, memcache_quota, db_quota, hint, url))
for stacktrace, count in memcache_stacks.iteritems():
lines.append('Memcache: %d calls to:' % count)
lines += [l.rstrip() for l in
traceback.format_list(stacktrace)]
for stacktrace, count in db_stacks.iteritems():
lines.append('DB: %d calls to:' % count)
lines += [l.rstrip() for l in
traceback.format_list(stacktrace)]
counters_list = []
memcache._CLIENT._make_async_call = _memcache_make_async_call
datastore_rpc.BaseConnection._make_rpc_call = _db_make_rpc_call
for locale in ['en_US', 'ln']:
self._set_prefs_locale(locale, course='sample')
memcache.flush_all()
app_context = sites.get_all_courses()[0]
app_context.clear_per_process_cache()
app_context.clear_per_request_cache()
for attempt in [0, 1]:
reset()
response = self.get(url)
self.assertEquals(200, response.status_int)
actual = [] + counters
counters_list.append((actual))
if quota is not None and attempt == 1:
_assert_quota(quota, actual, lines)
stats = ' '.join([
'[% 4d|% 4d]' % (_memcache, _db)
for _memcache, _db in counters_list])
lines.append('\t{ %s }\t%s (%s)' % (stats, hint, url))
header = (
'[memcache|db] for {first load, second load, '
'first locale load, second locale load}')
with actions.OverriddenEnvironment(
{'course': {
'now_available': True, 'can_student_change_locale': True}}):
actions.logout()
lines.append('RPC Profile, anonymous user %s' % header)
_profile(
'/modules/oeditor/resources/butterbar.js', # deprecated
'Butterbar', quota=(0, 0))
_profile('sample/assets/css/main.css', 'main.css', quota=(6, 0))
_profile('sample/course', 'Home page', quota=(None, 1))
_profile(
'sample/announcements', 'Announcements', quota=(None, 1))
actions.login('[email protected]')
actions.register(self, 'test_rpc_performance', course='sample')
lines.append('RPC Profile, registered user %s' % header)
_profile(
'/modules/oeditor/resources/butterbar.js', # deprecated
'Butterbar', quota=(0, 0))
_profile(
'sample/assets/css/main.css', 'main.css', quota=(3, 1))
_profile('sample/course', 'Home page')
_profile('sample/announcements', 'Announcements')
_profile('sample/unit?unit=14&lesson=17', 'Lesson 2.2')
_profile('sample/assessment?name=35', 'Mid-term exam')
actions.logout()
actions.login('[email protected]', is_admin=True)
lines.append('RPC Profile, admin user %s' % header)
_profile(
'/modules/oeditor/resources/butterbar.js', # deprecated
'Butterbar', quota=(0, 0))
_profile(
'sample/assets/css/main.css', 'main.css', quota=(3, 1))
_profile('sample/course', 'Home page')
_profile('sample/announcements', 'Announcements')
_profile('sample/unit?unit=14&lesson=17', 'Lesson 2.2')
_profile('sample/assessment?name=35', 'Mid-term exam')
_profile('sample/admin', 'Admin home')
_profile('sample/admin?action=settings', 'Settings')
_profile('sample/dashboard', 'Dashboard', quota=(150, 60))
_profile('sample/dashboard?action=edit_questions',
'Questions')
_profile('sample/dashboard?action=edit_question_groups',
'Question Groups')
_profile(
'sample/dashboard?action=i18n_dashboard',
'I18N Dashboard')
_profile('sample/dashboard?action=i18n_download', 'I18N Export')
logging.info('\n'.join(lines))
self.assertFalse(over_quota[0], msg='Some items exceed quota.')
finally:
memcache._CLIENT._make_async_call = old_memcache_make_async_call
datastore_rpc.BaseConnection._make_rpc_call = old_db_make_rpc_call
del config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name]
| apache-2.0 | -880,724,496,572,794,800 | 40.319877 | 80 | 0.548985 | false |
cclljj/AnySense_7688 | pending/pm_hpm.py | 1 | 3267 | import mraa
import time
from multiprocessing import Queue,Process
import move_avge
NUM_INCOME_BYTE = 8
CHAR_PRELIM = 0x40
NUM_DATA_BYTE = 7
CHECK_BYTE = 7
PM1_BYTE = -1
PM25_BYTE = 3
PM10_BYTE = 5
class sensor(Process):
def __init__(self, q):
Process.__init__(self)
self.q = q
self.u=mraa.Uart(0)
self.u.setBaudRate(9600)
self.u.setMode(8, mraa.UART_PARITY_NONE, 1)
self.u.setFlowcontrol(False, False)
self.u.flush()
cmd = bytearray([0x68,0x01,0x02,0x95])
#cmd = bytearray([0x68,0x01,0x04,0x96])
self.u.write(cmd)
self.u.flush()
time.sleep(0.1)
if self.u.dataAvailable():
ready = False
while ready is False:
getstr = self.u.readStr(2)
bytedata = bytearray(getstr)
if bytedata[0]==165 and bytedata[1]==165:
ready = True
else:
time.sleep(0.1)
self.u.flush()
cmd = bytearray([0x68,0x01,0x01,0x96])
self.u.write(cmd)
self.u.flush()
time.sleep(0.1)
if self.u.dataAvailable():
ready = False
while ready is False:
getstr = self.u.readStr(2)
bytedata = bytearray(getstr)
for i in range (0,2,1):
print (int)(bytedata[i])
if bytedata[0]==165 and bytedata[1]==165:
ready = True
else:
time.sleep(0.1)
self.u.flush()
self.pm1_0_avg = move_avge.move_avg(1)
self.pm2_5_avg = move_avge.move_avg(1)
self.pm10_avg = move_avge.move_avg(1)
def data_log(self, dstr):
bytedata = bytearray(dstr)
if self.checksum(dstr) is True:
PM1_0 = -1
PM2_5 = bytedata[PM25_BYTE]*256 + bytedata[PM25_BYTE+1]
PM10 = bytedata[PM10_BYTE]*256 + bytedata[PM10_BYTE+1]
self.pm1_0_avg.add(PM1_0)
self.pm2_5_avg.add(PM2_5)
self.pm10_avg.add(PM10)
return True
else:
return False
def checksum(self, dstr):
bytedata = bytearray(dstr)
if bytedata[0]!=64 or bytedata[1]!=5 or bytedata[2]!=4:
return False
calcsum = 0
calcsum = bytedata[0] + bytedata[1] + bytedata[2] + 256 * bytedata[3] + bytedata[4] + 256 * bytedata[5] + bytedata[6]
calcsum = (65536 - calcsum) % 256
exptsum = bytedata[CHECK_BYTE]
if calcsum==exptsum:
return True
else:
return False
def get_data(self):
PM1_0 = self.pm1_0_avg.get()
PM2_5 = self.pm2_5_avg.get()
PM10 = self.pm10_avg.get()
ret = {
'PM1.0': PM1_0,
'PM2.5': PM2_5,
'PM10': PM10
}
return ret
def run(self):
count = 0
while True:
self.u.flush()
cmd = bytearray([0x68,0x01,0x04,0x93])
self.u.write(cmd)
self.u.flush()
time.sleep(1)
if self.u.dataAvailable():
getstr = self.u.readStr(NUM_INCOME_BYTE)
if len(getstr) == NUM_INCOME_BYTE:
if self.data_log(getstr) is True:
g = self.get_data()
self.q.put(g)
if __name__ == '__main__':
q = Queue()
p = sensor(q)
p.start()
while True:
print('air: '+ str(q.get()))
| gpl-3.0 | 4,554,266,414,521,395,700 | 23.380597 | 119 | 0.53015 | false |
Larisa123/Kviz | main.py | 1 | 12835 | from tkinter import *
from tkinter import ttk
import random
button_width = 17
number_of_characters_per_row = 56
diff_for_answers = 8
color = '#%02x%02x%02x' % (231, 231, 231)
import subprocess # poskusile 5 razlicnih modulov: pyglet, mp3play, sound in se dva pa noben ni delal
# pygame se nama zdi prevelika knjiznica za dodati za samo nekaj zvokov
def play_button_click(): # dela samo na OS X!
subprocess.call(["afplay", "Sounds/button_click.mp3"])
# dela prepočasi!! - ko to dela, ne dela nič drugo!
# subprocess.call(["afplay", "music.mp3"]) # ce to igram, potem nic drugo ne dela dokler se glasba ne konca!
import gettext
_ = gettext.gettext
# noinspection PyBroadException
try:
en = gettext.translation('main', localedir='locale', languages=['en'])
en.install()
except:
print(_("Prevedba v angleski jezik ni bila mogoca."))
class Quiz(Tk):
frames = {}
number_of_questions = 5
question_count = 0
number_of_all_questions = 20 # per subject in SUBJECTdata.txt
points = 0 # number of points user gets for answering the question correctly
def __init__(self, *args, **kwargs):
Tk.__init__(self, *args, **kwargs)
Tk.wm_title(self, _("Maturitetni kviz"))
self.initialize_container_frame()
self.initialize_start_page()
self.set_images()
def initialize_container_frame(self):
self.container = ttk.Frame(self) # to je frame, ki nima na sebi nič, na njega zlagama nove
self.container.pack_propagate(0)
self.container.pack(pady=10, padx=10)
self.container.grid_rowconfigure(0, weight=1)
# default weight je 0, kar pomeni da bo ta imel najvecji prostor ko spremenimo velikost - zaenkrat nima veze ker je sam
self.container.grid_columnconfigure(0, weight=1)
def initialize_start_page(self):
start_page = StartPage(self.container, self)
start_page.grid(row=0, column=0, sticky="nsew")
self.frames[0] = start_page
self.show_frame()
def show_frame(self):
if self.question_count <= self.number_of_questions:
frame = self.frames.get(self.question_count, None) # da slucajno ne pride do zrusitve programa
if frame is not None:
frame.tkraise() # naloži nov frame - vprašanje
else:
print(_("Nekaj se je zalomilo. Vprasanja ni bilo mogoče naložiti"))
self.question_count += 1
else:
self.show_result_frame()
def set_subject(self, subject):
self.create_random_questions(subject)
self.show_frame()
play_button_click()
def create_random_questions(self, subject):
random_question_numbers = []
table_of_possible_question_numbers = list(
range(1, self.number_of_all_questions + 1)) # iti more od 1 do vkljucno stevila
# tu samo dolocimo random stevilke vprasanj, stevilka pomeni vrstica v dokumentu:
while len(random_question_numbers) < self.number_of_questions:
rand_number = random.choice(table_of_possible_question_numbers)
random_question_numbers.append(rand_number)
if rand_number in table_of_possible_question_numbers:
table_of_possible_question_numbers.remove(rand_number)
else:
print(_("Pri določanju tvojih vprašanj se je zalomilo.")) # spet da slucajno ne pride do zrusitve
# nalozimo dejanska vprasanja, prikazemo zaenkrat se nobenega:
question_count = 1 # to ni lastnost metode self.question_count, ampak nova spremenljivka
for number in random_question_numbers:
question = Question(self.container, self, subject, number)
self.frames[question_count] = question
question_count += 1
question.grid(row=0, column=0, sticky="nsew")
def show_result_frame(self):
result_page = ResultPage(self.container, self)
result_page.grid(row=0, column=0, sticky="nsew")
result_page.tkraise()
# ponastavimo rezultate, ce bo slucajno igral ponovno:
self.question_count = 0
self.points = 0
self.destroy_previous_frames() # da se nam spomin ne zabase
def destroy_previous_frames(self):
for frame in self.frames.values():
frame.destroy()
self.frames = {}
def increase_points(self):
self.points += 1
def set_images(self):
correct_photo = PhotoImage(file="Images/correct.gif")
Label(self, image=correct_photo)
self.correct_photo = correct_photo
wrong_photo = wrong_photo = PhotoImage(file="Images/wrong.gif")
Label(self, image=wrong_photo)
self.wrong_photo = wrong_photo
class StartPage(ttk.Frame): # podeduje metode in lastnosti razreda
def __init__(self, parent, quiz_reference): # self je container - vse se bo nalagalo na container
ttk.Frame.__init__(self, parent)
self.quiz_reference = quiz_reference
self.show_frame()
def show_frame(self):
text = _('''Pozdravljen bodoči maturant!\nPred tabo je kratek kviz iz maturitetnih predmetov\n''')
ttk.Label(self, text=text, justify="center").pack(padx=10)
self.show_image()
ttk.Label(self, text=_("Izberi področje:")).pack(pady=10, padx=10)
button_geo = ttk.Button(self, text=_("Geografija"),
command=lambda: self.quiz_reference.set_subject("GEO"),
width=button_width)
button_geo.pack(side="bottom")
button_mat = ttk.Button(self, text=_("Matematika"),
command=lambda: self.quiz_reference.set_subject("MAT"),
width=button_width)
button_mat.pack(side="bottom")
# lambda uporabimo, da lahko podamo parameter in ob tem ne sprožimo klica funkcije
def show_image(self):
photo = PhotoImage(file="Images/slika.gif")
label = ttk.Label(self, image=photo)
self.start_page_image = photo # treba je imeti se eno povezavo, zato da je avtomatsko ne izbrise
label.pack()
class Question(ttk.Frame):
question = ""
correct_answer = 0
possible_answers = {}
chosen_answer = ""
is_confirm_button_showing = False
radio_buttons = []
def __init__(self, parent, quiz_reference, subject, number): # ko imama stevilko, poiscema vprasanje, odgovor in mozne odgovore iz datoteke
ttk.Frame.__init__(self, parent)
self.quiz_reference = quiz_reference
self.subject = subject
self.number = number
self.get_data()
self.show_frame_widgets()
def show_frame_widgets(self):
self.show_the_question()
self.show_possible_answers()
def show_the_question(self):
'''prikaze vprasanje na label widgetu'''
edited_text = self.check_if_text_too_long(self.question, number_of_characters_per_row)
ttk.Label(self, text=edited_text).pack(pady=15, padx=10, side="top")
def check_if_text_too_long(self, unedited_text, allowed_number_of_chars):
'''vrne primerno preurejen text z novimi vrsticami, ce je trenutno predolg'''
if len(unedited_text) <= number_of_characters_per_row: return unedited_text # je ze ok
text = '''''' # vecvrsticni string
num_of_chars = 0 # in current row
for word in unedited_text.split(" "):
num_of_chars += len(word)
if num_of_chars < allowed_number_of_chars:
text += word + " "
else:
text = text + word + "\n"
num_of_chars = 0
return text.strip("\n")
def show_possible_answers(self):
self.radio_buttons = {}
self.var = StringVar()
for possible_answer in self.possible_answers:
possible_answer = self.check_if_text_too_long(possible_answer,
number_of_characters_per_row - diff_for_answers)
R = ttk.Radiobutton(self,
compound="left",
text=possible_answer,
variable=self.var,
value=possible_answer,
command=self.set_chosen_answer)
# Ko uporabnik izbere odgovor, se mu prikaze gumb za potrditev, ko stisne nanj se preveri pravilnost izbire
self.radio_buttons[possible_answer] = R
R.pack(anchor='w')
def set_chosen_answer(self):
if not self.is_confirm_button_showing: self.show_confirm_button()
def show_confirm_button(self):
self.confirm_button = ttk.Button(self, text=_("Potrdi izbiro"),
command=self.check_the_answer,
width=button_width)
self.confirm_button.pack(pady=8, side="bottom")
self.is_confirm_button_showing = True
def change_text_on_confirm_button(self):
self.confirm_button.destroy()
self.next_q_button = ttk.Button(self, text=_("Naprej"),
command=self.confirm_button_pressed,
width=button_width)
self.next_q_button.pack(pady=8, side="bottom")
# prepreci da stisne na gumbe:
for text, radio_button in self.radio_buttons.items():
radio_button.configure(state=DISABLED)
#if radio_button.text == self.chosen_answer: print(self.chosen_answer) # to ne dela! zato je narejeno z slovarjem
if text == self.chosen_answer:
appropriate_image = self.quiz_reference.correct_photo if self.chosen_answer == self.correct_answer \
else self.quiz_reference.wrong_photo
#print(appropriate_image.name)
#radio_button.configure(image=appropriate_image) # TU BI SE MORALA PRIKAZATI ZRAVEN PRIMERNA SLIKA
def confirm_button_pressed(self):
play_button_click()
self.quiz_reference.show_frame()
def check_the_answer(self):
self.chosen_answer = self.var.get()
if self.chosen_answer == self.correct_answer: self.quiz_reference.increase_points()
self.change_text_on_confirm_button()
play_button_click()
def get_data(self):
data = self.subject + "data.txt"
with open(data, "r") as file:
lines = [line.strip() for line in file]
currentLine = lines[self.number]
# zapisano v obliki Vprasanje;odg1:odg2:odg3;odgovorPravilen
data = currentLine.split(";")
self.question = data[0]
self.correct_answer = data[2]
self.possible_answers = data[1].split(":")
class ResultPage(ttk.Frame):
def __init__(self, parent, quiz_reference): # ko imama stevilko, poiscema vprasanje, odgovor in mozne odgovore iz datoteke
ttk.Frame.__init__(self, parent)
self.quiz_reference = quiz_reference
self.show_frame_widgets()
def show_frame_widgets(self):
points = self.quiz_reference.points
all_points = self.quiz_reference.number_of_questions
ttk.Label(self, text="Tvoj rezultat je: {} od {} točk!".
format(points, all_points)).pack(pady=10, padx=10)
text_message = self.appropriate_message(points)
ttk.Label(self, text=text_message).pack(pady=10, padx=10)
appropriate_image = "Images/failure.gif" if points <= all_points // 2 else "Images/bravo.gif"
photo = PhotoImage(file=appropriate_image)
label = ttk.Label(self, image=photo)
self.congratulation_photo = photo
label.pack(pady=15)
ttk.Button(self, text="Igraj ponovno!",
command=self.quiz_reference.initialize_start_page,
width=button_width).pack(side="bottom")
def appropriate_message(self, user_points):
"""Prikaze sporocilo glede na rezultat"""
all_points = self.quiz_reference.number_of_questions
if user_points in range(all_points // 2 + 1):
message = "Tvoje znanje je nezadostno!"
elif user_points in range(all_points // 2 + 1, all_points // 4):
message = "Tvoje znanje je zadovoljivo."
elif user_points in range(all_points // 4, all_points):
message = "Čestitam, dosegel si skoraj vse točke!"
else:
message = "Bravo, tvoje znanje je izjemno!!!" # dosegel je vse točke
return message
app = Quiz()
app.geometry("500x250")
app.configure(bg=color) # sicer bi bil rob beli
# velikost okna - to ni resitev, hocem nastavit velikost vseh framov, ne samo okna, ker se zdaj čudno poravnava
app.resizable(0, 0) # v nobeno smer ni resizable
app.mainloop()
| apache-2.0 | 391,766,898,711,221,760 | 39.05625 | 144 | 0.61359 | false |
geometalab/OSMTagFinder | OSMTagFinder/thesaurus/mapsemnet.py | 1 | 1602 | # -*- coding: utf-8 -*-
'''
Created on 08.11.2014
@author: Simon Gwerder
'''
from rdflib.namespace import SKOS
from semnet.osmsemanticnet import OSMSemanticNet
from thesaurus.rdfgraph import RDFGraph
from utilities.configloader import ConfigLoader
class MapOSMSemanticNet:
def __init__(self, tagFinderRDF, osnSemNetFilePath=None):
if tagFinderRDF is None: return
osnSemNetRDF = None
if osnSemNetFilePath is not None:
#print('Loading OSN graph')
osnSemNetRDF = RDFGraph(osnSemNetFilePath)
osn = OSMSemanticNet(osnSemNetRDF) # if osnSemNetRDF is None it will check the web graph
termSchemeName = ConfigLoader().getThesaurusString('TERM_SCHEME_NAME')
count = 0
for subject, predicate, obj in tagFinderRDF.graph:
if not osn.baseUrl in subject and not termSchemeName in subject: # check if some osn matches have been added already
osnConcept = None
if predicate == SKOS.prefLabel:
count = count + 1
if '=' in str(obj):
splitArray = str(obj).split('=')
osnConcept = osn.getConcept(splitArray[0], splitArray[1])
else:
osnConcept = osn.getConcept(str(obj))
if osnConcept:
tagFinderRDF.addRelatedMatch(subject, osnConcept)
#print(str(count) + ' : Added Matching Concept Mapping from: ' + subject + '\t\t\tto: ' + osnConcept)
#tagFinderRDF.serialize(tagFinderRDF.filePath)
| mit | -4,895,832,965,683,772,000 | 32.375 | 128 | 0.611735 | false |
bellowsj/aiopogo | aiopogo/pogoprotos/settings/master/quest_settings_pb2.py | 1 | 3401 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/settings/master/quest_settings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.enums import quest_type_pb2 as pogoprotos_dot_enums_dot_quest__type__pb2
from pogoprotos.settings.master.quest import daily_quest_settings_pb2 as pogoprotos_dot_settings_dot_master_dot_quest_dot_daily__quest__settings__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/settings/master/quest_settings.proto',
package='pogoprotos.settings.master',
syntax='proto3',
serialized_pb=_b('\n/pogoprotos/settings/master/quest_settings.proto\x12\x1apogoprotos.settings.master\x1a!pogoprotos/enums/quest_type.proto\x1a;pogoprotos/settings/master/quest/daily_quest_settings.proto\"\x8b\x01\n\rQuestSettings\x12/\n\nquest_type\x18\x01 \x01(\x0e\x32\x1b.pogoprotos.enums.QuestType\x12I\n\x0b\x64\x61ily_quest\x18\x02 \x01(\x0b\x32\x34.pogoprotos.settings.master.quest.DailyQuestSettingsb\x06proto3')
,
dependencies=[pogoprotos_dot_enums_dot_quest__type__pb2.DESCRIPTOR,pogoprotos_dot_settings_dot_master_dot_quest_dot_daily__quest__settings__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_QUESTSETTINGS = _descriptor.Descriptor(
name='QuestSettings',
full_name='pogoprotos.settings.master.QuestSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='quest_type', full_name='pogoprotos.settings.master.QuestSettings.quest_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='daily_quest', full_name='pogoprotos.settings.master.QuestSettings.daily_quest', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=176,
serialized_end=315,
)
_QUESTSETTINGS.fields_by_name['quest_type'].enum_type = pogoprotos_dot_enums_dot_quest__type__pb2._QUESTTYPE
_QUESTSETTINGS.fields_by_name['daily_quest'].message_type = pogoprotos_dot_settings_dot_master_dot_quest_dot_daily__quest__settings__pb2._DAILYQUESTSETTINGS
DESCRIPTOR.message_types_by_name['QuestSettings'] = _QUESTSETTINGS
QuestSettings = _reflection.GeneratedProtocolMessageType('QuestSettings', (_message.Message,), dict(
DESCRIPTOR = _QUESTSETTINGS,
__module__ = 'pogoprotos.settings.master.quest_settings_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.settings.master.QuestSettings)
))
_sym_db.RegisterMessage(QuestSettings)
# @@protoc_insertion_point(module_scope)
| mit | 2,925,874,700,893,986,300 | 40.987654 | 424 | 0.757424 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.2/Lib/idlelib/macosxSupport.py | 1 | 6327 | """
A number of function that enhance IDLE on MacOSX when it used as a normal
GUI application (as opposed to an X11 application).
"""
import sys
import tkinter
from os import path
_appbundle = None
def runningAsOSXApp():
"""
Returns True if Python is running from within an app on OSX.
If so, assume that Python was built with Aqua Tcl/Tk rather than
X11 Tcl/Tk.
"""
global _appbundle
if _appbundle is None:
_appbundle = (sys.platform == 'darwin' and '.app' in sys.executable)
return _appbundle
_carbonaquatk = None
def isCarbonAquaTk(root):
"""
Returns True if IDLE is using a Carbon Aqua Tk (instead of the
newer Cocoa Aqua Tk).
"""
global _carbonaquatk
if _carbonaquatk is None:
_carbonaquatk = (runningAsOSXApp() and
'aqua' in root.tk.call('tk', 'windowingsystem') and
'AppKit' not in root.tk.call('winfo', 'server', '.'))
return _carbonaquatk
def tkVersionWarning(root):
"""
Returns a string warning message if the Tk version in use appears to
be one known to cause problems with IDLE. The Apple Cocoa-based Tk 8.5
that was shipped with Mac OS X 10.6.
"""
if (runningAsOSXApp() and
('AppKit' in root.tk.call('winfo', 'server', '.')) and
(root.tk.call('info', 'patchlevel') == '8.5.7') ):
return (r"WARNING: The version of Tcl/Tk (8.5.7) in use may"
r" be unstable.\n"
r"Visit http://www.python.org/download/mac/tcltk/"
r" for current information.")
else:
return False
def addOpenEventSupport(root, flist):
"""
This ensures that the application will respont to open AppleEvents, which
makes is feaseable to use IDLE as the default application for python files.
"""
def doOpenFile(*args):
for fn in args:
flist.open(fn)
# The command below is a hook in aquatk that is called whenever the app
# receives a file open event. The callback can have multiple arguments,
# one for every file that should be opened.
root.createcommand("::tk::mac::OpenDocument", doOpenFile)
def hideTkConsole(root):
try:
root.tk.call('console', 'hide')
except tkinter.TclError:
# Some versions of the Tk framework don't have a console object
pass
def overrideRootMenu(root, flist):
"""
Replace the Tk root menu by something that's more appropriate for
IDLE.
"""
# The menu that is attached to the Tk root (".") is also used by AquaTk for
# all windows that don't specify a menu of their own. The default menubar
# contains a number of menus, none of which are appropriate for IDLE. The
# Most annoying of those is an 'About Tck/Tk...' menu in the application
# menu.
#
# This function replaces the default menubar by a mostly empty one, it
# should only contain the correct application menu and the window menu.
#
# Due to a (mis-)feature of TkAqua the user will also see an empty Help
# menu.
from tkinter import Menu, Text, Text
from idlelib.EditorWindow import prepstr, get_accelerator
from idlelib import Bindings
from idlelib import WindowList
from idlelib.MultiCall import MultiCallCreator
menubar = Menu(root)
root.configure(menu=menubar)
menudict = {}
menudict['windows'] = menu = Menu(menubar, name='windows')
menubar.add_cascade(label='Window', menu=menu, underline=0)
def postwindowsmenu(menu=menu):
end = menu.index('end')
if end is None:
end = -1
if end > 0:
menu.delete(0, end)
WindowList.add_windows_to_menu(menu)
WindowList.register_callback(postwindowsmenu)
def about_dialog(event=None):
from idlelib import aboutDialog
aboutDialog.AboutDialog(root, 'About IDLE')
def config_dialog(event=None):
from idlelib import configDialog
# Ensure that the root object has an instance_dict attribute,
# mirrors code in EditorWindow (although that sets the attribute
# on an EditorWindow instance that is then passed as the first
# argument to ConfigDialog)
root.instance_dict = flist.inversedict
root.instance_dict = flist.inversedict
configDialog.ConfigDialog(root, 'Settings')
def help_dialog(event=None):
from idlelib import textView
fn = path.join(path.abspath(path.dirname(__file__)), 'help.txt')
textView.view_file(root, 'Help', fn)
root.bind('<<about-idle>>', about_dialog)
root.bind('<<open-config-dialog>>', config_dialog)
root.createcommand('::tk::mac::ShowPreferences', config_dialog)
if flist:
root.bind('<<close-all-windows>>', flist.close_all_callback)
# The binding above doesn't reliably work on all versions of Tk
# on MacOSX. Adding command definition below does seem to do the
# right thing for now.
root.createcommand('exit', flist.close_all_callback)
if isCarbonAquaTk(root):
# for Carbon AquaTk, replace the default Tk apple menu
menudict['application'] = menu = Menu(menubar, name='apple')
menubar.add_cascade(label='IDLE', menu=menu)
Bindings.menudefs.insert(0,
('application', [
('About IDLE', '<<about-idle>>'),
None,
]))
tkversion = root.tk.eval('info patchlevel')
if tuple(map(int, tkversion.split('.'))) < (8, 4, 14):
# for earlier AquaTk versions, supply a Preferences menu item
Bindings.menudefs[0][1].append(
('_Preferences....', '<<open-config-dialog>>'),
)
else:
# assume Cocoa AquaTk
# replace default About dialog with About IDLE one
root.createcommand('tkAboutDialog', about_dialog)
# replace default "Help" item in Help menu
root.createcommand('::tk::mac::ShowHelp', help_dialog)
# remove redundant "IDLE Help" from menu
del Bindings.menudefs[-1][1][0]
def setupApp(root, flist):
"""
Perform setup for the OSX application bundle.
"""
if not runningAsOSXApp(): return
hideTkConsole(root)
overrideRootMenu(root, flist)
addOpenEventSupport(root, flist)
| mit | -5,375,528,088,494,449,000 | 34.745763 | 79 | 0.636637 | false |
jspiros/python-ebml | ebml/tests/test_core.py | 1 | 4698 | import unittest
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import random
import sys
from ..core import *
class ElementSizeTests(unittest.TestCase):
def assert_roundtrip(self, value, length=None):
encoded = encode_element_size(value, length=length)
if length is not None:
self.assertEqual(length, len(encoded))
encoded_stream = StringIO(encoded)
self.assertEqual(value, read_element_size(encoded_stream)[0])
def test_unknown(self):
for length in xrange(1, 9):
self.assert_roundtrip(None, length=length)
def test_base_10(self):
for value in (10**exp for exp in xrange(1, 16)):
self.assert_roundtrip(value)
def test_base_2(self):
for value in (2**exp for exp in xrange(1, 56)):
self.assert_roundtrip(value)
def test_max_base_2(self):
for value in ((2**exp) - 2 for exp in xrange(1, 57)):
self.assert_roundtrip(value)
def test_random(self):
maximum = 2**56 - 2
for value in (random.randint(0, maximum) for i in xrange(0, 10000)):
self.assert_roundtrip(value)
class ElementIDTests(unittest.TestCase):
ebml_ids = (
0x1a45dfa3,
0x4286,
0x42f7,
0x42f2,
0x42f3,
0x4282,
0x4287,
0x4285,
0xbf,
0xec
)
def assert_roundtrip(self, value):
encoded = encode_element_id(value)
encoded_stream = StringIO(encoded)
self.assertEqual(value, read_element_id(encoded_stream)[0])
def test_ebml_ids(self):
for id_ in self.ebml_ids:
self.assert_roundtrip(id_)
class ValueTestCase(unittest.TestCase):
encoder = None
reader = None
def assert_roundtrip(self, value, length=None):
if self.encoder is not None and self.reader is not None:
encoded = self.encoder(value, length)
if length is not None:
self.assertEqual(length, len(encoded))
encoded_stream = StringIO(encoded)
self.assertEqual(value, self.reader(encoded_stream, len(encoded)))
else:
raise NotImplementedError
class UnsignedIntegerTests(ValueTestCase):
encoder = staticmethod(encode_unsigned_integer)
reader = staticmethod(read_unsigned_integer)
maximum = 2**64 - 1
def test_random(self):
for value in (random.randint(0, self.maximum) for i in xrange(0, 10000)):
self.assert_roundtrip(value)
def test_random_longer(self):
for value in (random.randint(0, (self.maximum / (2**32))) for i in xrange(0, 10000)):
self.assert_roundtrip(value, length=8)
def test_maximum(self):
self.assert_roundtrip(self.maximum)
class SignedIntegerTests(ValueTestCase):
encoder = staticmethod(encode_signed_integer)
reader = staticmethod(read_signed_integer)
minimum = -(2**63)
maximum = (2**63) - 1
def test_random(self):
for value in (random.randint(self.minimum, self.maximum) for i in xrange(0, 10000)):
self.assert_roundtrip(value)
def test_random_longer(self):
for value in (random.randint((self.minimum / (2**32)), (self.maximum / (2**32))) for i in xrange(0, 10000)):
self.assert_roundtrip(value, length=8)
def test_minimum(self):
self.assert_roundtrip(self.minimum)
def test_maximum(self):
self.assert_roundtrip(self.maximum)
class FloatTests(ValueTestCase):
# Note:
# I'm not sure if this is a good idea, due to the potential for loss of precision.
# It seems that, at least with my installation of Python, floats are 64-bit IEEE, and so, for now, this works.
encoder = staticmethod(encode_float)
reader = staticmethod(read_float)
def test_random(self):
for value in (random.uniform(1.0, float(random.randint(2, 2**10))) for i in xrange(0, 1000)):
self.assert_roundtrip(value)
class StringTests(ValueTestCase):
encoder = staticmethod(encode_string)
reader = staticmethod(read_string)
letters = ''.join(chr(i) for i in xrange(1, 127))
def test_random(self):
for length in (random.randint(0, 2**10) for i in xrange(0, 1000)):
astring = ''.join(random.sample(self.letters * ((length // len(self.letters)) + 1), length))
self.assert_roundtrip(astring)
self.assert_roundtrip(astring, length=length*2)
class UnicodeStringTests(ValueTestCase):
encoder = staticmethod(encode_unicode_string)
reader = staticmethod(read_unicode_string)
letters = u''.join(unichr(i) for i in xrange(1, sys.maxunicode + 1))
def test_random(self):
for length in (random.randint(0, 2**10) for i in xrange(0, 1000)):
ustring = u''.join(random.sample(self.letters * ((length // len(self.letters)) + 1), length))
ustring = ustring.encode('utf_8').decode('utf_8')
self.assert_roundtrip(ustring)
self.assert_roundtrip(ustring, length=length*5)
class DateTests(ValueTestCase):
encoder = staticmethod(encode_date)
reader = staticmethod(read_date)
def test_random(self):
pass
if __name__ == '__main__':
unittest.main() | isc | -7,486,734,860,703,075,000 | 27.478788 | 111 | 0.711367 | false |
seung-lab/cloud-volume | cloudvolume/storage/storage_interfaces.py | 1 | 15070 | import six
from collections import defaultdict
import json
import os.path
import posixpath
import re
import boto3
import botocore
from glob import glob
import google.cloud.exceptions
from google.cloud.storage import Batch, Client
import requests
import tenacity
from cloudvolume.connectionpools import S3ConnectionPool, GCloudBucketPool
from cloudvolume.lib import mkdir
from cloudvolume.exceptions import UnsupportedCompressionType
COMPRESSION_EXTENSIONS = ('.gz', '.br')
# This is just to support pooling by bucket
class keydefaultdict(defaultdict):
def __missing__(self, key):
if self.default_factory is None:
raise KeyError( key )
else:
ret = self[key] = self.default_factory(key)
return ret
S3_POOL = None
GC_POOL = None
def reset_connection_pools():
global S3_POOL
global GC_POOL
S3_POOL = keydefaultdict(lambda service: keydefaultdict(lambda bucket_name: S3ConnectionPool(service, bucket_name)))
GC_POOL = keydefaultdict(lambda bucket_name: GCloudBucketPool(bucket_name))
reset_connection_pools()
retry = tenacity.retry(
reraise=True,
stop=tenacity.stop_after_attempt(7),
wait=tenacity.wait_random_exponential(0.5, 60.0),
)
class StorageInterface(object):
def release_connection(self):
pass
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.release_connection()
class FileInterface(StorageInterface):
def __init__(self, path):
super(StorageInterface, self).__init__()
self._path = path
def get_path_to_file(self, file_path):
return os.path.join(
self._path.basepath, self._path.layer, file_path
)
def put_file(
self, file_path, content,
content_type, compress,
cache_control=None
):
path = self.get_path_to_file(file_path)
mkdir(os.path.dirname(path))
# keep default as gzip
if compress == "br":
path += ".br"
elif compress:
path += '.gz'
if content \
and content_type \
and re.search('json|te?xt', content_type) \
and type(content) is str:
content = content.encode('utf-8')
try:
with open(path, 'wb') as f:
f.write(content)
except IOError as err:
with open(path, 'wb') as f:
f.write(content)
def get_file(self, file_path, start=None, end=None):
path = self.get_path_to_file(file_path)
if os.path.exists(path + '.gz'):
encoding = "gzip"
path += '.gz'
elif os.path.exists(path + '.br'):
encoding = "br"
path += ".br"
else:
encoding = None
try:
with open(path, 'rb') as f:
if start is not None:
f.seek(start)
if end is not None:
start = start if start is not None else 0
num_bytes = end - start
data = f.read(num_bytes)
else:
data = f.read()
return data, encoding
except IOError:
return None, encoding
def exists(self, file_path):
path = self.get_path_to_file(file_path)
return os.path.exists(path) or any(( os.path.exists(path + ext) for ext in COMPRESSION_EXTENSIONS ))
def files_exist(self, file_paths):
return { path: self.exists(path) for path in file_paths }
def delete_file(self, file_path):
path = self.get_path_to_file(file_path)
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path + '.gz'):
os.remove(path + '.gz')
elif os.path.exists(path + ".br"):
os.remove(path + ".br")
def delete_files(self, file_paths):
for path in file_paths:
self.delete_file(path)
def list_files(self, prefix, flat):
"""
List the files in the layer with the given prefix.
flat means only generate one level of a directory,
while non-flat means generate all file paths with that
prefix.
"""
layer_path = self.get_path_to_file("")
path = os.path.join(layer_path, prefix) + '*'
filenames = []
remove = layer_path
if len(remove) and remove[-1] != '/':
remove += '/'
if flat:
for file_path in glob(path):
if not os.path.isfile(file_path):
continue
filename = file_path.replace(remove, '')
filenames.append(filename)
else:
subdir = os.path.join(layer_path, os.path.dirname(prefix))
for root, dirs, files in os.walk(subdir):
files = [ os.path.join(root, f) for f in files ]
files = [ f.replace(remove, '') for f in files ]
files = [ f for f in files if f[:len(prefix)] == prefix ]
for filename in files:
filenames.append(filename)
def stripext(fname):
(base, ext) = os.path.splitext(fname)
if ext in COMPRESSION_EXTENSIONS:
return base
else:
return fname
filenames = list(map(stripext, filenames))
return _radix_sort(filenames).__iter__()
class GoogleCloudStorageInterface(StorageInterface):
def __init__(self, path):
super(StorageInterface, self).__init__()
global GC_POOL
self._path = path
self._bucket = GC_POOL[path.bucket].get_connection()
def get_path_to_file(self, file_path):
return posixpath.join(self._path.no_bucket_basepath, self._path.layer, file_path)
@retry
def put_file(self, file_path, content, content_type, compress, cache_control=None):
key = self.get_path_to_file(file_path)
blob = self._bucket.blob( key )
# gcloud disable brotli until content-encoding works
if compress == "br":
raise UnsupportedCompressionType("Brotli unsupported on google cloud storage")
elif compress:
blob.content_encoding = "gzip"
if cache_control:
blob.cache_control = cache_control
blob.upload_from_string(content, content_type)
@retry
def get_file(self, file_path, start=None, end=None):
key = self.get_path_to_file(file_path)
blob = self._bucket.blob( key )
if start is not None:
start = int(start)
if end is not None:
end = int(end - 1)
try:
# blob handles the decompression so the encoding is None
return blob.download_as_bytes(start=start, end=end), None # content, encoding
except google.cloud.exceptions.NotFound as err:
return None, None
@retry
def exists(self, file_path):
key = self.get_path_to_file(file_path)
blob = self._bucket.blob(key)
return blob.exists()
def files_exist(self, file_paths):
result = {path: None for path in file_paths}
MAX_BATCH_SIZE = Batch._MAX_BATCH_SIZE
for i in range(0, len(file_paths), MAX_BATCH_SIZE):
# Retrieve current batch of blobs. On Batch __exit__ it will populate all
# future responses before raising errors about the (likely) missing keys.
try:
with self._bucket.client.batch():
for file_path in file_paths[i:i+MAX_BATCH_SIZE]:
key = self.get_path_to_file(file_path)
result[file_path] = self._bucket.get_blob(key)
except google.cloud.exceptions.NotFound as err:
pass # Missing keys are expected
for file_path, blob in result.items():
# Blob exists if ``dict``, missing if ``_FutureDict``
result[file_path] = isinstance(blob._properties, dict)
return result
@retry
def delete_file(self, file_path):
key = self.get_path_to_file(file_path)
try:
self._bucket.delete_blob( key )
except google.cloud.exceptions.NotFound:
pass
def delete_files(self, file_paths):
MAX_BATCH_SIZE = Batch._MAX_BATCH_SIZE
for i in range(0, len(file_paths), MAX_BATCH_SIZE):
try:
with self._bucket.client.batch():
for file_path in file_paths[i : i + MAX_BATCH_SIZE]:
key = self.get_path_to_file(file_path)
self._bucket.delete_blob(key)
except google.cloud.exceptions.NotFound:
pass
@retry
def list_files(self, prefix, flat=False):
"""
List the files in the layer with the given prefix.
flat means only generate one level of a directory,
while non-flat means generate all file paths with that
prefix.
"""
layer_path = self.get_path_to_file("")
path = posixpath.join(layer_path, prefix)
for blob in self._bucket.list_blobs(prefix=path):
filename = blob.name.replace(layer_path, '')
if not filename:
continue
elif not flat and filename[-1] != '/':
yield filename
elif flat and '/' not in blob.name.replace(path, ''):
yield filename
def release_connection(self):
global GC_POOL
GC_POOL[self._path.bucket].release_connection(self._bucket)
class HttpInterface(StorageInterface):
def __init__(self, path):
super(StorageInterface, self).__init__()
self._path = path
def get_path_to_file(self, file_path):
path = posixpath.join(
self._path.basepath, self._path.layer, file_path
)
return self._path.protocol + '://' + path
# @retry
def delete_file(self, file_path):
raise NotImplementedError()
def delete_files(self, file_paths):
raise NotImplementedError()
# @retry
def put_file(self, file_path, content, content_type, compress, cache_control=None):
raise NotImplementedError()
@retry
def get_file(self, file_path, start=None, end=None):
key = self.get_path_to_file(file_path)
if start is not None or end is not None:
start = int(start) if start is not None else ''
end = int(end - 1) if end is not None else ''
headers = { "Range": "bytes={}-{}".format(start, end) }
resp = requests.get(key, headers=headers)
else:
resp = requests.get(key)
if resp.status_code in (404, 403):
return None, None
resp.raise_for_status()
if 'Content-Encoding' not in resp.headers:
return resp.content, None
# requests automatically decodes these
elif resp.headers['Content-Encoding'] in ('', 'gzip', 'deflate', 'br'):
return resp.content, None
else:
return resp.content, resp.headers['Content-Encoding']
@retry
def exists(self, file_path):
key = self.get_path_to_file(file_path)
resp = requests.get(key, stream=True)
resp.close()
return resp.ok
def files_exist(self, file_paths):
return {path: self.exists(path) for path in file_paths}
def list_files(self, prefix, flat=False):
raise NotImplementedError()
class S3Interface(StorageInterface):
def __init__(self, path):
super(StorageInterface, self).__init__()
global S3_POOL
self._path = path
self._conn = S3_POOL[path.protocol][path.bucket].get_connection()
def get_path_to_file(self, file_path):
return posixpath.join(self._path.no_bucket_basepath, self._path.layer, file_path)
@retry
def put_file(self, file_path, content, content_type, compress, cache_control=None, ACL="bucket-owner-full-control"):
key = self.get_path_to_file(file_path)
attrs = {
'Bucket': self._path.bucket,
'Body': content,
'Key': key,
'ContentType': (content_type or 'application/octet-stream'),
'ACL': ACL,
}
# keep gzip as default
if compress == "br":
attrs['ContentEncoding'] = 'br'
elif compress:
attrs['ContentEncoding'] = 'gzip'
if cache_control:
attrs['CacheControl'] = cache_control
self._conn.put_object(**attrs)
@retry
def get_file(self, file_path, start=None, end=None):
"""
There are many types of execptions which can get raised
from this method. We want to make sure we only return
None when the file doesn't exist.
"""
kwargs = {}
if start is not None or end is not None:
start = int(start) if start is not None else ''
end = int(end - 1) if end is not None else ''
kwargs['Range'] = "bytes={}-{}".format(start, end)
try:
resp = self._conn.get_object(
Bucket=self._path.bucket,
Key=self.get_path_to_file(file_path),
**kwargs
)
encoding = ''
if 'ContentEncoding' in resp:
encoding = resp['ContentEncoding']
return resp['Body'].read(), encoding
except botocore.exceptions.ClientError as err:
if err.response['Error']['Code'] == 'NoSuchKey':
return None, None
else:
raise
def exists(self, file_path):
exists = True
try:
self._conn.head_object(
Bucket=self._path.bucket,
Key=self.get_path_to_file(file_path),
)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
exists = False
else:
raise
return exists
def files_exist(self, file_paths):
return {path: self.exists(path) for path in file_paths}
@retry
def delete_file(self, file_path):
# Not necessary to handle 404s here.
# From the boto3 documentation:
# delete_object(**kwargs)
# Removes the null version (if there is one) of an object and inserts a delete marker,
# which becomes the latest version of the object. If there isn't a null version,
# Amazon S3 does not remove any objects.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.delete_object
self._conn.delete_object(
Bucket=self._path.bucket,
Key=self.get_path_to_file(file_path),
)
def delete_files(self, file_paths):
for path in file_paths:
self.delete_file(path)
def list_files(self, prefix, flat=False):
"""
List the files in the layer with the given prefix.
flat means only generate one level of a directory,
while non-flat means generate all file paths with that
prefix.
"""
layer_path = self.get_path_to_file("")
path = posixpath.join(layer_path, prefix)
@retry
def s3lst(continuation_token=None):
kwargs = {
'Bucket': self._path.bucket,
'Prefix': path,
}
if continuation_token:
kwargs['ContinuationToken'] = continuation_token
return self._conn.list_objects_v2(**kwargs)
resp = s3lst()
def iterate(resp):
if 'Contents' not in resp.keys():
resp['Contents'] = []
for item in resp['Contents']:
key = item['Key']
filename = key.replace(layer_path, '')
if not flat and filename[-1] != '/':
yield filename
elif flat and '/' not in key.replace(path, ''):
yield filename
for filename in iterate(resp):
yield filename
while resp['IsTruncated'] and resp['NextContinuationToken']:
resp = s3lst(resp['NextContinuationToken'])
for filename in iterate(resp):
yield filename
def release_connection(self):
global S3_POOL
S3_POOL[self._path.protocol][self._path.bucket].release_connection(self._conn)
def _radix_sort(L, i=0):
"""
Most significant char radix sort
"""
if len(L) <= 1:
return L
done_bucket = []
buckets = [ [] for x in range(255) ]
for s in L:
if i >= len(s):
done_bucket.append(s)
else:
buckets[ ord(s[i]) ].append(s)
buckets = [ _radix_sort(b, i + 1) for b in buckets ]
return done_bucket + [ b for blist in buckets for b in blist ] | bsd-3-clause | 6,737,976,082,698,986,000 | 27.76145 | 118 | 0.633046 | false |
jimmyho/thehobot | example/example.py | 1 | 2968 | from errbot import BotPlugin, botcmd, arg_botcmd, webhook
class Example(BotPlugin):
"""
Just an example
"""
def activate(self):
"""
Triggers on plugin activation
You should delete it if you're not using it to override any default behaviour
"""
super(Example, self).activate()
def deactivate(self):
"""
Triggers on plugin deactivation
You should delete it if you're not using it to override any default behaviour
"""
super(Example, self).deactivate()
def get_configuration_template(self):
"""
Defines the configuration structure this plugin supports
You should delete it if your plugin doesn't use any configuration like this
"""
return {'EXAMPLE_KEY_1': "Example value",
'EXAMPLE_KEY_2': ["Example", "Value"]
}
def check_configuration(self, configuration):
"""
Triggers when the configuration is checked, shortly before activation
Raise a errbot.utils.ValidationException in case of an error
You should delete it if you're not using it to override any default behaviour
"""
super(Example, self).check_configuration(configuration)
def callback_connect(self):
"""
Triggers when bot is connected
You should delete it if you're not using it to override any default behaviour
"""
pass
def callback_message(self, message):
"""
Triggered for every received message that isn't coming from the bot itself
You should delete it if you're not using it to override any default behaviour
"""
pass
def callback_botmessage(self, message):
"""
Triggered for every message that comes from the bot itself
You should delete it if you're not using it to override any default behaviour
"""
pass
@webhook
def example_webhook(self, incoming_request):
"""A webhook which simply returns 'Example'"""
return "Example"
# Passing split_args_with=None will cause arguments to be split on any kind
# of whitespace, just like Python's split() does
@botcmd(split_args_with=None)
def example(self, message, args):
"""A command which simply returns 'Example'"""
return "Example"
@arg_botcmd('name', type=str)
@arg_botcmd('--favorite-number', type=int, unpack_args=False)
def hello(self, message, args):
"""
A command which says hello to someone.
If you include --favorite-number, it will also tell you their
favorite number.
"""
if args.favorite_number is None:
return "Hello {name}".format(name=args.name)
else:
return "Hello {name}, I hear your favorite number is {number}".format(
name=args.name,
number=args.favorite_number,
)
| mit | 482,288,891,811,556,030 | 29.916667 | 85 | 0.61624 | false |
SCPR/kpcc_backroom_handshakes | measure_finance/models.py | 1 | 7431 | from django.conf import settings
from django.db import models
from django.utils.encoding import smart_str
from django.utils import timezone
from django.template.defaultfilters import slugify
from django.shortcuts import get_object_or_404
from election_registrar import models as registrar
import logging
import time
import datetime
logger = logging.getLogger("kpcc_backroom_handshakes")
class Measure(models.Model):
election = models.ForeignKey(registrar.Election, null=True)
measure_id = models.IntegerField("", null=True, blank=True)
official_identifier = models.CharField("official_identifier", max_length=255, null=True, blank=True)
official_identifier_slug = models.SlugField("official_identifier_slug", max_length=140, null=True, blank=True)
topic = models.CharField("", max_length=255, null=True, blank=True)
official_title = models.CharField("", max_length=255, null=True, blank=True)
official_short_summary = models.TextField(" ", null=True, blank=True)
official_summary = models.TextField(" ", null=True, blank=True)
official_summary_author = models.CharField("", max_length=255, null=True, blank=True)
official_yes_vote_means = models.TextField(" ", null=True, blank=True)
official_no_vote_means = models.TextField(" ", null=True, blank=True)
official_vote_means_source = models.CharField("", max_length=255, null=True, blank=True)
official_financial_effect = models.TextField(" ", null=True, blank=True)
official_financial_effect_author = models.CharField("", max_length=255, null=True, blank=True)
official_impartial_analysis = models.TextField(" ", null=True, blank=True)
official_impartial_analysis_author = models.CharField("", max_length=255, null=True, blank=True)
# official_background = models.TextField(" ", null=True, blank=True)
# official_background_author = models.CharField("", max_length=255, null=True, blank=True)
official_tax_rate = models.CharField("", max_length=255, null=True, blank=True)
official_tax_rate_author = models.CharField("", max_length=255, null=True, blank=True)
official_short_arguments_yes = models.TextField(" ", null=True, blank=True)
official_short_arguments_no = models.TextField(" ", null=True, blank=True)
official_short_arguments_source = models.CharField("", max_length=255, null=True, blank=True)
# official_arguments_yes = models.TextField(" ", null=True, blank=True)
# official_arguments_no = models.TextField(" ", null=True, blank=True)
# official_arguments_source = models.CharField("", max_length=255, null=True, blank=True)
official_rebuttal_yes = models.TextField(" ", null=True, blank=True)
official_rebuttal_no = models.TextField(" ", null=True, blank=True)
measure_type = models.CharField("", max_length=255, null=True, blank=True)
passage_requirements = models.CharField("", max_length=255, null=True, blank=True)
fulltext_link = models.URLField("fulltext_link", max_length=1024, null=True, blank=True)
# full_text = models.TextField(" ", null=True, blank=True)
# simplified_title = models.CharField("", max_length=255, null=True, blank=True)
# way_it_is = models.TextField(" ", null=True, blank=True)
# what_if_pass = models.TextField(" ", null=True, blank=True)
# budget_effect = models.TextField(" ", null=True, blank=True)
# people_for_say = models.TextField(" ", null=True, blank=True)
# people_against_say = models.TextField(" ", null=True, blank=True)
# evg_source = models.CharField("", max_length=255, null=True, blank=True)
# lwv_question = models.TextField(" ", null=True, blank=True)
# lwv_situation = models.TextField(" ", null=True, blank=True)
# lwv_proposal = models.TextField(" ", null=True, blank=True)
# lwv_fiscal_effects = models.TextField(" ", null=True, blank=True)
# lwv_supporters_say = models.TextField(" ", null=True, blank=True)
# lwv_opponents_say = models.TextField(" ", null=True, blank=True)
# lwv_source = models.CharField("", max_length=255, null=True, blank=True)
# status = models.CharField("", max_length=255, null=True, blank=True)
# votes_for = models.CharField("", max_length=255, null=True, blank=True)
# votes_against = models.CharField("", max_length=255, null=True, blank=True)
# weight = models.CharField("", max_length=255, null=True, blank=True)
published = models.CharField("", max_length=255, null=True, blank=True)
disable_finance_data = models.CharField("", max_length=255, null=True, blank=True)
deleted = models.CharField("", max_length=255, null=True, blank=True)
entity_type = models.CharField("", max_length=255, null=True, blank=True)
measure_timestamp = models.DateTimeField("", null=True, blank=True)
created = models.DateTimeField("Date Created", auto_now_add=True)
modified = models.DateTimeField("Date Modified", auto_now=True)
def __unicode__(self):
return self.official_identifier
def get_absolute_url(self):
return ("measure-detail")
class MeasureContributor(models.Model):
measure = models.ForeignKey(Measure)
finance_top_id = models.IntegerField("", null=True, blank=True)
top_type = models.CharField("", max_length=255, null=True, blank=True)
support = models.CharField("", max_length=255, null=True, blank=True)
name = models.CharField("", max_length=255, null=True, blank=True)
total_amount = models.FloatField("", null=True, blank=True)
total_individual = models.FloatField("", null=True, blank=True)
total_organization = models.FloatField("", null=True, blank=True)
percentage_total = models.FloatField("", null=True, blank=True)
percentage_individual = models.FloatField("", null=True, blank=True)
percentage_organization = models.FloatField("", null=True, blank=True)
updated_date = models.DateField("", null=True, blank=True)
entity_type = models.IntegerField("", null=True, blank=True)
finance_top_timestamp = models.DateTimeField("", null=True, blank=True)
created = models.DateTimeField("Date Created", auto_now_add=True)
modified = models.DateTimeField("Date Modified", auto_now=True)
def __unicode__(self):
return self.name
class MeasureTotal(models.Model):
measure = models.ForeignKey(Measure)
finance_id = models.CharField("", max_length=255, null=True, blank=True)
support = models.CharField("", max_length=255, null=True, blank=True)
total_amount = models.FloatField("", null=True, blank=True)
total_individual = models.FloatField("", null=True, blank=True)
total_unitemized = models.FloatField("", null=True, blank=True)
total_itemized = models.FloatField("", null=True, blank=True)
total_organization = models.FloatField("", null=True, blank=True)
pct_individual = models.FloatField("", null=True, blank=True)
pct_organization = models.FloatField("", null=True, blank=True)
pct_unitemized = models.FloatField("", null=True, blank=True)
pct_itemized = models.FloatField("", null=True, blank=True)
updated_date = models.DateField("", null=True, blank=True)
entity_type = models.IntegerField("", null=True, blank=True)
finance_timestamp = models.DateTimeField("", null=True, blank=True)
created = models.DateTimeField("Date Created", auto_now_add=True)
modified = models.DateTimeField("Date Modified", auto_now=True)
def __unicode__(self):
return self.support
| mit | 9,157,139,782,778,764,000 | 59.909836 | 114 | 0.704885 | false |
MiguelSR/djangosaml2 | djangosaml2/views.py | 1 | 17432 | # Copyright (C) 2010-2013 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2009 Lorenzo Gil Sanchez <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import logout as django_logout
from django.http import Http404, HttpResponse
from django.http import HttpResponseRedirect # 30x
from django.http import HttpResponseBadRequest, HttpResponseForbidden # 40x
from django.http import HttpResponseServerError # 50x
from django.views.decorators.http import require_POST
from django.shortcuts import render_to_response
from django.template import RequestContext, TemplateDoesNotExist
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
# Django 1.0 compatibility
def csrf_exempt(view_func):
return view_func
from saml2 import BINDING_HTTP_REDIRECT, BINDING_HTTP_POST
from saml2.client import Saml2Client
from saml2.metadata import entity_descriptor
from saml2.ident import code, decode
from djangosaml2.cache import IdentityCache, OutstandingQueriesCache
from djangosaml2.cache import StateCache
from djangosaml2.conf import get_config
from djangosaml2.signals import post_authenticated
from djangosaml2.utils import get_custom_setting, available_idps, get_location, \
get_hidden_form_inputs
logger = logging.getLogger('djangosaml2')
def _set_subject_id(session, subject_id):
session['_saml2_subject_id'] = code(subject_id)
def _get_subject_id(session):
try:
return decode(session['_saml2_subject_id'])
except KeyError:
return None
def login(request,
config_loader_path=None,
wayf_template='djangosaml2/wayf.html',
authorization_error_template='djangosaml2/auth_error.html',
post_binding_form_template='djangosaml2/post_binding_form.html'):
"""SAML Authorization Request initiator
This view initiates the SAML2 Authorization handshake
using the pysaml2 library to create the AuthnRequest.
It uses the SAML 2.0 Http Redirect protocol binding.
* post_binding_form_template - path to a template containing HTML form with
hidden input elements, used to send the SAML message data when HTTP POST
binding is being used. You can customize this template to include custom
branding and/or text explaining the automatic redirection process. Please
see the example template in
templates/djangosaml2/example_post_binding_form.html
If set to None or nonexistent template, default form from the saml2 library
will be rendered.
"""
logger.debug('Login process started')
came_from = request.GET.get('next', settings.LOGIN_REDIRECT_URL)
if not came_from:
logger.warning('The next parameter exists but is empty')
came_from = settings.LOGIN_REDIRECT_URL
# if the user is already authenticated that maybe because of two reasons:
# A) He has this URL in two browser windows and in the other one he
# has already initiated the authenticated session.
# B) He comes from a view that (incorrectly) send him here because
# he does not have enough permissions. That view should have shown
# an authorization error in the first place.
# We can only make one thing here and that is configurable with the
# SAML_IGNORE_AUTHENTICATED_USERS_ON_LOGIN setting. If that setting
# is True (default value) we will redirect him to the came_from view.
# Otherwise, we will show an (configurable) authorization error.
if not request.user.is_anonymous():
try:
redirect_authenticated_user = settings.SAML_IGNORE_AUTHENTICATED_USERS_ON_LOGIN
except AttributeError:
redirect_authenticated_user = True
if redirect_authenticated_user:
return HttpResponseRedirect(came_from)
else:
logger.debug('User is already logged in')
return render_to_response(authorization_error_template, {
'came_from': came_from,
}, context_instance=RequestContext(request))
selected_idp = request.GET.get('idp', None)
conf = get_config(config_loader_path, request)
# is a embedded wayf needed?
idps = available_idps(conf)
if selected_idp is None and len(idps) > 1:
logger.debug('A discovery process is needed')
return render_to_response(wayf_template, {
'available_idps': idps.items(),
'came_from': came_from,
}, context_instance=RequestContext(request))
# Choose binding (REDIRECT vs. POST).
# When authn_requests_signed is turned on, HTTP Redirect binding cannot be
# used the same way as without signatures; proper usage in this case involves
# stripping out the signature from SAML XML message and creating a new
# signature, following precise steps defined in the SAML2.0 standard.
#
# It is not feasible to implement this since we wouldn't be able to use an
# external (xmlsec1) library to handle the signatures - more (higher level)
# context is needed in order to create such signature (like the value of
# RelayState parameter).
#
# Therefore it is much easier to use the HTTP POST binding in this case, as
# it can relay the whole signed SAML message as is, without the need to
# manipulate the signature or the XML message itself.
#
# Read more in the official SAML2 specs (3.4.4.1):
# http://docs.oasis-open.org/security/saml/v2.0/saml-bindings-2.0-os.pdf
binding = BINDING_HTTP_POST if getattr(conf, '_sp_authn_requests_signed', False) else BINDING_HTTP_REDIRECT
client = Saml2Client(conf)
try:
(session_id, result) = client.prepare_for_authenticate(
entityid=selected_idp, relay_state=came_from,
binding=binding,
)
except TypeError as e:
logger.error('Unable to know which IdP to use')
return HttpResponse(unicode(e))
logger.debug('Saving the session_id in the OutstandingQueries cache')
oq_cache = OutstandingQueriesCache(request.session)
oq_cache.set(session_id, came_from)
logger.debug('Redirecting user to the IdP via %s binding.', binding.split(':')[-1])
if binding == BINDING_HTTP_REDIRECT:
return HttpResponseRedirect(get_location(result))
elif binding == BINDING_HTTP_POST:
if not post_binding_form_template:
return HttpResponse(result['data'])
try:
params = get_hidden_form_inputs(result['data'][3])
return render_to_response(post_binding_form_template, {
'target_url': result['url'],
'params': params,
}, context_instance=RequestContext(request))
except TemplateDoesNotExist:
return HttpResponse(result['data'])
else:
raise NotImplementedError('Unsupported binding: %s', binding)
@require_POST
@csrf_exempt
def assertion_consumer_service(request,
config_loader_path=None,
attribute_mapping=None,
create_unknown_user=None):
"""SAML Authorization Response endpoint
The IdP will send its response to this view, which
will process it with pysaml2 help and log the user
in using the custom Authorization backend
djangosaml2.backends.Saml2Backend that should be
enabled in the settings.py
"""
attribute_mapping = attribute_mapping or get_custom_setting(
'SAML_ATTRIBUTE_MAPPING', {'uid': ('username', )})
create_unknown_user = create_unknown_user or get_custom_setting(
'SAML_CREATE_UNKNOWN_USER', True)
logger.debug('Assertion Consumer Service started')
conf = get_config(config_loader_path, request)
if 'SAMLResponse' not in request.POST:
return HttpResponseBadRequest(
'Couldn\'t find "SAMLResponse" in POST data.')
xmlstr = request.POST['SAMLResponse']
client = Saml2Client(conf, identity_cache=IdentityCache(request.session))
oq_cache = OutstandingQueriesCache(request.session)
outstanding_queries = oq_cache.outstanding_queries()
# process the authentication response
response = client.parse_authn_request_response(xmlstr, BINDING_HTTP_POST,
outstanding_queries)
if response is None:
logger.error('SAML response is None')
return HttpResponseBadRequest(
"SAML response has errors. Please check the logs")
session_id = response.session_id()
oq_cache.delete(session_id)
# authenticate the remote user
session_info = response.session_info()
if callable(attribute_mapping):
attribute_mapping = attribute_mapping()
if callable(create_unknown_user):
create_unknown_user = create_unknown_user()
logger.debug('Trying to authenticate the user')
user = auth.authenticate(session_info=session_info,
attribute_mapping=attribute_mapping,
create_unknown_user=create_unknown_user)
if user is None:
logger.error('The user is None')
return HttpResponseForbidden("Permission denied")
auth.login(request, user)
_set_subject_id(request.session, session_info['name_id'])
logger.debug('Sending the post_authenticated signal')
post_authenticated.send_robust(sender=user, session_info=session_info)
# redirect the user to the view where he came from
default_relay_state = get_custom_setting('ACS_DEFAULT_REDIRECT_URL',
settings.LOGIN_REDIRECT_URL)
relay_state = request.POST.get('RelayState', default_relay_state)
if not relay_state:
logger.warning('The RelayState parameter exists but is empty')
relay_state = default_relay_state
logger.debug('Redirecting to the RelayState: %s', relay_state)
return HttpResponseRedirect(relay_state)
@login_required
def echo_attributes(request,
config_loader_path=None,
template='djangosaml2/echo_attributes.html'):
"""Example view that echo the SAML attributes of an user"""
state = StateCache(request.session)
conf = get_config(config_loader_path, request)
client = Saml2Client(conf, state_cache=state,
identity_cache=IdentityCache(request.session))
subject_id = _get_subject_id(request.session)
identity = client.users.get_identity(subject_id,
check_not_on_or_after=False)
return render_to_response(template, {'attributes': identity[0]},
context_instance=RequestContext(request))
@login_required
def logout(request, config_loader_path=None):
"""SAML Logout Request initiator
This view initiates the SAML2 Logout request
using the pysaml2 library to create the LogoutRequest.
"""
logger.debug('Logout process started')
state = StateCache(request.session)
conf = get_config(config_loader_path, request)
client = Saml2Client(conf, state_cache=state,
identity_cache=IdentityCache(request.session))
subject_id = _get_subject_id(request.session)
if subject_id is None:
logger.warning(
'The session does not contains the subject id for user %s',
request.user)
result = client.global_logout(subject_id)
state.sync()
if not result:
logger.error("Looks like the user %s is not logged in any IdP/AA", subject_id)
return HttpResponseBadRequest("You are not logged in any IdP/AA")
if len(result) > 1:
logger.error('Sorry, I do not know how to logout from several sources. I will logout just from the first one')
for entityid, logout_info in result.items():
if isinstance(logout_info, tuple):
binding, http_info = logout_info
if binding == BINDING_HTTP_POST:
logger.debug('Returning form to the IdP to continue the logout process')
body = ''.join(http_info['data'])
return HttpResponse(body)
elif binding == BINDING_HTTP_REDIRECT:
logger.debug('Redirecting to the IdP to continue the logout process')
return HttpResponseRedirect(get_location(http_info))
else:
logger.error('Unknown binding: %s', binding)
return HttpResponseServerError('Failed to log out')
else:
# We must have had a soap logout
return finish_logout(request, logout_info)
logger.error('Could not logout because there only the HTTP_REDIRECT is supported')
return HttpResponseServerError('Logout Binding not supported')
def logout_service(request, *args, **kwargs):
return do_logout_service(request, request.GET, BINDING_HTTP_REDIRECT, *args, **kwargs)
@csrf_exempt
def logout_service_post(request, *args, **kwargs):
return do_logout_service(request, request.POST, BINDING_HTTP_POST, *args, **kwargs)
def do_logout_service(request, data, binding, config_loader_path=None, next_page=None,
logout_error_template='djangosaml2/logout_error.html'):
"""SAML Logout Response endpoint
The IdP will send the logout response to this view,
which will process it with pysaml2 help and log the user
out.
Note that the IdP can request a logout even when
we didn't initiate the process as a single logout
request started by another SP.
"""
logger.debug('Logout service started')
conf = get_config(config_loader_path, request)
state = StateCache(request.session)
client = Saml2Client(conf, state_cache=state,
identity_cache=IdentityCache(request.session))
if 'SAMLResponse' in data: # we started the logout
logger.debug('Receiving a logout response from the IdP')
response = client.parse_logout_request_response(data['SAMLResponse'], binding)
state.sync()
return finish_logout(request, response, next_page=next_page)
elif 'SAMLRequest' in data: # logout started by the IdP
logger.debug('Receiving a logout request from the IdP')
subject_id = _get_subject_id(request.session)
if subject_id is None:
logger.warning(
'The session does not contain the subject id for user %s. Performing local logout',
request.user)
auth.logout(request)
return render_to_response(logout_error_template, {},
context_instance=RequestContext(request))
else:
http_info = client.handle_logout_request(
data['SAMLRequest'],
subject_id,
binding)
state.sync()
auth.logout(request)
return HttpResponseRedirect(get_location(http_info))
else:
logger.error('No SAMLResponse or SAMLRequest parameter found')
raise Http404('No SAMLResponse or SAMLRequest parameter found')
def finish_logout(request, response, next_page=None):
if response and response.status_ok():
if next_page is None and hasattr(settings, 'LOGOUT_REDIRECT_URL'):
next_page = settings.LOGOUT_REDIRECT_URL
logger.debug('Performing django_logout with a next_page of %s',
next_page)
return django_logout(request, next_page=next_page)
else:
logger.error('Unknown error during the logout')
return HttpResponse('Error during logout')
def metadata(request, config_loader_path=None, valid_for=None):
"""Returns an XML with the SAML 2.0 metadata for this
SP as configured in the settings.py file.
"""
conf = get_config(config_loader_path, request)
metadata = entity_descriptor(conf)
return HttpResponse(content=str(metadata),
content_type="text/xml; charset=utf8")
def register_namespace_prefixes():
from saml2 import md, saml, samlp
try:
from saml2 import xmlenc
from saml2 import xmldsig
except ImportError:
import xmlenc
import xmldsig
prefixes = (('saml', saml.NAMESPACE),
('samlp', samlp.NAMESPACE),
('md', md.NAMESPACE),
('ds', xmldsig.NAMESPACE),
('xenc', xmlenc.NAMESPACE))
if hasattr(ElementTree, 'register_namespace'):
for prefix, namespace in prefixes:
ElementTree.register_namespace(prefix, namespace)
else:
for prefix, namespace in prefixes:
ElementTree._namespace_map[namespace] = prefix
register_namespace_prefixes()
| apache-2.0 | 4,200,962,372,265,607,000 | 40.308057 | 118 | 0.672155 | false |
ttroy50/cmake-examples | 04-static-analysis/clang-format/cmake/scripts/clang-format-check-changed.py | 1 | 5095 | #!/usr/bin/env python
import argparse
import os
import sys
import subprocess
def check_file(filename, excludes, extensions):
"""
Check if a file should be included in our check
"""
name, ext = os.path.splitext(filename)
if len(ext) > 0 and ext in extensions:
if len(excludes) == 0:
return True
for exclude in excludes:
if exclude in filename:
return False
return True
return False
def check_directory(directory, excludes, extensions):
output = []
if len(excludes) > 0:
for exclude in excludes:
if exclude in directory:
directory_excluded = False
return output
for root, _, files in os.walk(directory):
for file in files:
filename = os.path.join(root, file)
if check_file(filename, excludes, extensions):
print("Will check file [{}]".format(filename))
output.append(filename)
return output
def get_git_root(git_bin):
cmd = [git_bin, "rev-parse", "--show-toplevel"]
try:
return subprocess.check_output(cmd).strip()
except subprocess.CalledProcessError, e:
print("Error calling git [{}]".format(e))
raise
def clean_git_filename(line):
"""
Takes a line from git status --porcelain and returns the filename
"""
file = None
git_status = line[:2]
# Not an exhaustive list of git status output but should
# be enough for this case
# check if this is a delete
if 'D' in git_status:
return None
# ignored file
if '!' in git_status:
return None
# Covers renamed files
if '->' in line:
file = line[3:].split('->')[-1].strip()
else:
file = line[3:].strip()
return file
def get_changed_files(git_bin, excludes, file_extensions):
"""
Run git status and return the list of changed files
"""
extensions = file_extensions.split(",")
# arguments coming from cmake will be *.xx. We want to remove the *
for i, extension in enumerate(extensions):
if extension[0] == '*':
extensions[i] = extension[1:]
git_root = get_git_root(git_bin)
cmd = [git_bin, "status", "--porcelain", "--ignore-submodules"]
print("git cmd = {}".format(cmd))
output = []
returncode = 0
try:
cmd_output = subprocess.check_output(cmd)
for line in cmd_output.split('\n'):
if len(line) > 0:
file = clean_git_filename(line)
if not file:
continue
file = os.path.join(git_root, file)
if file[-1] == "/":
directory_files = check_directory(
file, excludes, file_extensions)
output = output + directory_files
else:
if check_file(file, excludes, file_extensions):
print("Will check file [{}]".format(file))
output.append(file)
except subprocess.CalledProcessError, e:
print("Error calling git [{}]".format(e))
returncode = e.returncode
return output, returncode
def run_clang_format(clang_format_bin, changed_files):
"""
Run clang format on a list of files
@return 0 if formatted correctly.
"""
if len(changed_files) == 0:
return 0
cmd = [clang_format_bin, "-style=file",
"-output-replacements-xml"] + changed_files
print("clang-format cmd = {}".format(cmd))
try:
cmd_output = subprocess.check_output(cmd)
if "replacement offset" in cmd_output:
print("ERROR: Changed files don't match format")
return 1
except subprocess.CalledProcessError, e:
print("Error calling clang-format [{}]".format(e))
return e.returncode
return 0
def cli():
# global params
parser = argparse.ArgumentParser(prog='clang-format-check-changed',
description='Checks if files chagned in git match the .clang-format specification')
parser.add_argument("--file-extensions", type=str,
default=".cpp,.h,.cxx,.hxx,.hpp,.cc,.ipp",
help="Comma separated list of file extensions to check")
parser.add_argument('--exclude', action='append', default=[],
help='Will not match the files / directories with these in the name')
parser.add_argument('--clang-format-bin', type=str, default="clang-format",
help="The clang format binary")
parser.add_argument('--git-bin', type=str, default="git",
help="The git binary")
args = parser.parse_args()
# Run gcovr to get the .gcda files form .gcno
changed_files, returncode = get_changed_files(
args.git_bin, args.exclude, args.file_extensions)
if returncode != 0:
return returncode
return run_clang_format(args.clang_format_bin, changed_files)
if __name__ == '__main__':
sys.exit(cli())
| mit | -7,903,845,470,487,237,000 | 30.257669 | 120 | 0.575466 | false |
datapythonista/pandas | pandas/tests/indexing/test_scalar.py | 4 | 9940 | """ test scalar indexing, including at and iat """
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.tests.indexing.common import Base
class TestScalar(Base):
@pytest.mark.parametrize("kind", ["series", "frame"])
def test_at_and_iat_get(self, kind):
def _check(f, func, values=False):
if f is not None:
indices = self.generate_indices(f, values)
for i in indices:
result = getattr(f, func)[i]
expected = self.get_value(func, f, i, values)
tm.assert_almost_equal(result, expected)
d = getattr(self, kind)
# iat
for f in [d["ints"], d["uints"]]:
_check(f, "iat", values=True)
for f in [d["labels"], d["ts"], d["floats"]]:
if f is not None:
msg = "iAt based indexing can only have integer indexers"
with pytest.raises(ValueError, match=msg):
self.check_values(f, "iat")
# at
for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
_check(f, "at")
@pytest.mark.parametrize("kind", ["series", "frame"])
def test_at_and_iat_set(self, kind):
def _check(f, func, values=False):
if f is not None:
indices = self.generate_indices(f, values)
for i in indices:
getattr(f, func)[i] = 1
expected = self.get_value(func, f, i, values)
tm.assert_almost_equal(expected, 1)
d = getattr(self, kind)
# iat
for f in [d["ints"], d["uints"]]:
_check(f, "iat", values=True)
for f in [d["labels"], d["ts"], d["floats"]]:
if f is not None:
msg = "iAt based indexing can only have integer indexers"
with pytest.raises(ValueError, match=msg):
_check(f, "iat")
# at
for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
_check(f, "at")
class TestScalar2:
# TODO: Better name, just separating things that dont need Base class
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range("1/1/2000", periods=8)
df = DataFrame(np.random.randn(8, 4), index=dates, columns=["A", "B", "C", "D"])
s = df["A"]
result = s.at[dates[5]]
xp = s.values[5]
assert result == xp
# GH 7729
# make sure we are boxing the returns
s = Series(["2014-01-01", "2014-02-02"], dtype="datetime64[ns]")
expected = Timestamp("2014-02-02")
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
assert result == expected
s = Series(["1 days", "2 days"], dtype="timedelta64[ns]")
expected = Timedelta("2 days")
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
assert result == expected
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype="int64")
result = s.iloc[2]
assert result == 2
result = s.iat[2]
assert result == 2
msg = "index 10 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s.iat[10]
msg = "index -10 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s.iat[-10]
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype="int64")
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
assert result == 2
def test_frame_at_with_duplicate_axes(self):
# GH#33041
arr = np.random.randn(6).reshape(3, 2)
df = DataFrame(arr, columns=["A", "A"])
result = df.at[0, "A"]
expected = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.T.at["A", 0]
tm.assert_series_equal(result, expected)
# setter
df.at[1, "A"] = 2
expected = Series([2.0, 2.0], index=["A", "A"], name=1)
tm.assert_series_equal(df.iloc[1], expected)
def test_at_getitem_dt64tz_values(self):
# gh-15822
df = DataFrame(
{
"name": ["John", "Anderson"],
"date": [
Timestamp(2017, 3, 13, 13, 32, 56),
Timestamp(2017, 2, 16, 12, 10, 3),
],
}
)
df["date"] = df["date"].dt.tz_localize("Asia/Shanghai")
expected = Timestamp("2017-03-13 13:32:56+0800", tz="Asia/Shanghai")
result = df.loc[0, "date"]
assert result == expected
result = df.at[0, "date"]
assert result == expected
def test_mixed_index_at_iat_loc_iloc_series(self):
# GH 19860
s = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
for el, item in s.items():
assert s.at[el] == s.loc[el] == item
for i in range(len(s)):
assert s.iat[i] == s.iloc[i] == i + 1
with pytest.raises(KeyError, match="^4$"):
s.at[4]
with pytest.raises(KeyError, match="^4$"):
s.loc[4]
def test_mixed_index_at_iat_loc_iloc_dataframe(self):
# GH 19860
df = DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], columns=["a", "b", "c", 1, 2]
)
for rowIdx, row in df.iterrows():
for el, item in row.items():
assert df.at[rowIdx, el] == df.loc[rowIdx, el] == item
for row in range(2):
for i in range(5):
assert df.iat[row, i] == df.iloc[row, i] == row * 5 + i
with pytest.raises(KeyError, match="^3$"):
df.at[0, 3]
with pytest.raises(KeyError, match="^3$"):
df.loc[0, 3]
def test_iat_setter_incompatible_assignment(self):
# GH 23236
result = DataFrame({"a": [0, 1], "b": [4, 5]})
result.iat[0, 0] = None
expected = DataFrame({"a": [None, 1], "b": [4, 5]})
tm.assert_frame_equal(result, expected)
def test_getitem_zerodim_np_array(self):
# GH24924
# dataframe __getitem__
df = DataFrame([[1, 2], [3, 4]])
result = df[np.array(0)]
expected = Series([1, 3], name=0)
tm.assert_series_equal(result, expected)
# series __getitem__
s = Series([1, 2])
result = s[np.array(0)]
assert result == 1
def test_iat_dont_wrap_object_datetimelike():
# GH#32809 .iat calls go through DataFrame._get_value, should not
# call maybe_box_datetimelike
dti = date_range("2016-01-01", periods=3)
tdi = dti - dti
ser = Series(dti.to_pydatetime(), dtype=object)
ser2 = Series(tdi.to_pytimedelta(), dtype=object)
df = DataFrame({"A": ser, "B": ser2})
assert (df.dtypes == object).all()
for result in [df.at[0, "A"], df.iat[0, 0], df.loc[0, "A"], df.iloc[0, 0]]:
assert result is ser[0]
assert isinstance(result, datetime)
assert not isinstance(result, Timestamp)
for result in [df.at[1, "B"], df.iat[1, 1], df.loc[1, "B"], df.iloc[1, 1]]:
assert result is ser2[1]
assert isinstance(result, timedelta)
assert not isinstance(result, Timedelta)
def test_at_with_tuple_index_get():
# GH 26989
# DataFrame.at getter works with Index of tuples
df = DataFrame({"a": [1, 2]}, index=[(1, 2), (3, 4)])
assert df.index.nlevels == 1
assert df.at[(1, 2), "a"] == 1
# Series.at getter works with Index of tuples
series = df["a"]
assert series.index.nlevels == 1
assert series.at[(1, 2)] == 1
def test_at_with_tuple_index_set():
# GH 26989
# DataFrame.at setter works with Index of tuples
df = DataFrame({"a": [1, 2]}, index=[(1, 2), (3, 4)])
assert df.index.nlevels == 1
df.at[(1, 2), "a"] = 2
assert df.at[(1, 2), "a"] == 2
# Series.at setter works with Index of tuples
series = df["a"]
assert series.index.nlevels == 1
series.at[1, 2] = 3
assert series.at[1, 2] == 3
class TestMultiIndexScalar:
def test_multiindex_at_get(self):
# GH 26989
# DataFrame.at and DataFrame.loc getter works with MultiIndex
df = DataFrame({"a": [1, 2]}, index=[[1, 2], [3, 4]])
assert df.index.nlevels == 2
assert df.at[(1, 3), "a"] == 1
assert df.loc[(1, 3), "a"] == 1
# Series.at and Series.loc getter works with MultiIndex
series = df["a"]
assert series.index.nlevels == 2
assert series.at[1, 3] == 1
assert series.loc[1, 3] == 1
def test_multiindex_at_set(self):
# GH 26989
# DataFrame.at and DataFrame.loc setter works with MultiIndex
df = DataFrame({"a": [1, 2]}, index=[[1, 2], [3, 4]])
assert df.index.nlevels == 2
df.at[(1, 3), "a"] = 3
assert df.at[(1, 3), "a"] == 3
df.loc[(1, 3), "a"] = 4
assert df.loc[(1, 3), "a"] == 4
# Series.at and Series.loc setter works with MultiIndex
series = df["a"]
assert series.index.nlevels == 2
series.at[1, 3] = 5
assert series.at[1, 3] == 5
series.loc[1, 3] = 6
assert series.loc[1, 3] == 6
def test_multiindex_at_get_one_level(self):
# GH#38053
s2 = Series((0, 1), index=[[False, True]])
result = s2.at[False]
assert result == 0
| bsd-3-clause | 5,832,607,505,893,796,000 | 30.356467 | 88 | 0.520523 | false |
SahSih/ARStreaming360Display | RealTimeVideoStitch/motion_detector.py | 1 | 2815 | # USAGE
# python motion_detector.py
# python motion_detector.py --video videos/example_01.mp4
# import the necessary packages
import argparse
import datetime
import imutils
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
time.sleep(0.25)
# otherwise, we are reading from a video file
else:
camera = cv2.VideoCapture(1)
time.sleep(0.25)
# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
(grabbed, frame) = camera.read()
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
# draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
| mit | 7,161,149,992,135,203,000 | 28.946809 | 84 | 0.705151 | false |
BronyTV/bronytv.net | btv_site/assets.py | 1 | 2355 | from flask_assets import Environment, Bundle
assets = Environment()
global_css = ["css/vendor/bootstrap.css", "css/global.css"]
global_js = ["js/vendor/jquery.js", "js/vendor/angular.js", "js/angular/common.js",
"js/header.js"]
def make_css(name, assets):
return Bundle(*assets, filters="cssmin", output="min/css/%s.css" % name)
def make_js(name, assets):
return Bundle(*assets, filters="jsmin", output="min/js/%s.js" % name)
def register_all(lst):
for asset_type, bundle_name, asset_files in lst:
if isinstance(asset_files, str):
asset_files = [asset_files]
if asset_type == "css":
assets.register(bundle_name, make_css(bundle_name, global_css + asset_files))
else:
assets.register(bundle_name, make_js(bundle_name, global_js + asset_files))
"""
Assets definitions look like this:
(asset_type, bundle_name, asset_files)
Where:
asset_type is one of "css" or "js"
bundle_name is the asset bundle name that will be used in templates
asset_files is a list of file names to add to the bundle, or a single filename str if there's only one
"""
register_all([
("css", "index_css", "css/index.css"),
("js", "index_js", ["js/vendor/moment.js", "js/vendor/moment-timezone-with-data-2010-2020.js",
"js/vendor/humanize-duration.js", "js/vendor/angular-timer.js", "js/angular/index.js"]),
("css", "stream_css", ["css/vendor/video-js.css", "css/stream.css", "css/vendor/animate.css"]),
("js", "stream_js", ["js/vendor/angular-animate.js", "js/vendor/video.js", "js/vendor/bootstrap-notify.js", "js/angular/stream.js"]),
("css", "chat_css", ["css/chat.css"]),
("js", "chat_js", []),
("css", "schedule_css", ["css/schedule.css"]),
("js", "schedule_js", []),
("css", "event_css", ["css/event.css"]),
("js", "event_js", ["js/vendor/moment.js", "js/angular/event.js"]),
("css", "about_css", []),
("js", "about_js", "js/angular/about.js"),
("css", "rules_css", []),
("js", "rules_js", "js/angular/rules.js"),
("css", "contact_css", []),
("js", "contact_js", []),
("css", "admin_index_css", "css/admin/index.css"),
("js", "admin_index_js", "js/angular/admin/index.js"),
("css", "admin_login_css", "css/admin/login.css"),
("js", "admin_login_js", [])
])
| gpl-3.0 | -5,187,518,734,788,820,000 | 32.642857 | 137 | 0.6 | false |
device42/nix_bsd_mac_inventory | module_hpux.py | 1 | 10053 | import paramiko
import math
import json
class GetHPUXData:
def __init__(self, ip, ssh_port, timeout, usr, pwd, use_key_file, key_file,
get_serial_info, get_hardware_info, get_os_details,
get_cpu_info, get_memory_info, ignore_domain, upload_ipv6, debug):
self.machine_name = ip
self.port = int(ssh_port)
self.timeout = timeout
self.username = usr
self.password = pwd
self.ssh = paramiko.SSHClient()
self.use_key_file = use_key_file
self.key_file = key_file
self.get_serial_info = get_serial_info
self.get_hardware_info = get_hardware_info
self.get_os_details = get_os_details
self.get_cpu_info = get_cpu_info
self.get_memory_info = get_memory_info
self.ignore_domain = ignore_domain
self.upload_ipv6 = upload_ipv6
self.debug = debug
self.ssh = paramiko.SSHClient()
self.conn = None
self.root = False
self.sysdata = {}
self.nic_data = {'nic_parts': {}}
self.ip_data = []
self.disk_data = {'hdd_parts':[]}
self.name = None
self.paths = {}
self.alldata = []
self.name = None
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
def main(self):
self.connect()
self.are_u_root()
self.get_sys_1()
self.get_sys_2()
self.get_macs()
self.get_ips()
self.get_cpu_num()
self.get_disks()
self.format_data()
return self.alldata
def connect(self):
try:
if not self.use_key_file:
self.ssh.connect(str(self.machine_name), port=self.port,
username=self.username, password=self.password, timeout=self.timeout)
else:
self.ssh.connect(str(self.machine_name), port=self.port,
username=self.username, key_filename=self.key_file, timeout=self.timeout)
except paramiko.AuthenticationException:
print str(self.machine_name) + ': authentication failed'
return None
except Exception as err:
print str(self.machine_name) + ': ' + str(err)
return None
def execute(self, cmd, need_sudo=False):
if need_sudo and not self.root: # not working currently, maybe in the future
cmd_sudo = "sudo -S -p '' %s" % cmd
stdin, stdout, stderr = self.ssh.exec_command(cmd_sudo, timeout=30)
stdin.write('%s\n' % self.password)
stdin.flush()
else:
stdin, stdout, stderr = self.ssh.exec_command(cmd, timeout=30)
data_err = stderr.readlines()
data_out = stdout.readlines()
return data_out, data_err
def are_u_root(self):
cmd = 'id -u'
data, err = self.execute(cmd)
if data[0].strip() == '0':
self.root = True
else:
self.root = False
if not self.root:
print '[!] You must be root to run HP-UX discovery!'
return
def format_data(self):
self.alldata.append(self.sysdata)
self.alldata.append(self.nic_data)
self.alldata.append(self.disk_data)
def get_sys_1(self):
cmd = '/usr/contrib/bin/machinfo'
data_out, data_err = self.execute(cmd, False)
if not data_err:
raw = [x.strip().lower() for x in data_out if x not in ('', '\n', None)]
for rec in raw:
if rec.startswith('memory:'):
ram = int(math.ceil(float(rec.split()[1])))
self.sysdata.update({'memory':ram})
if rec.startswith('model:'):
model = rec.split(':')[1].strip().strip('"')
self.sysdata.update({'hardware': model})
if rec.startswith('machine id number:'):
uuid = rec.split(':')[1].strip()
self.sysdata.update({'uuid': uuid})
if rec.startswith('machine serial number'):
serial = rec.split(':')[1].strip()
self.sysdata.update({'serial_no': serial})
if rec.startswith('nodename:'):
name = rec.split(':')[1].strip()
self.sysdata.update({'name': name})
self.name = name
if rec.startswith('release:'):
os_version = rec.split(':')[1].strip()
osver = ' '.join(os_version.split()[1:]).strip()
self.sysdata.update({'os': 'hp-ux'})
self.sysdata.update({'osver': osver if osver else 'D42_NULL'})
else:
print '[!] Error in get_sys_1(). Message was: %s' % data_err
def get_sys_2(self):
cmd = '/opt/ignite/bin/print_manifest'
data_out, data_err = self.execute(cmd, False)
if not data_err:
raw = [x.strip().lower() for x in data_out if x not in ('', '\n', None)]
for rec in raw:
if rec.startswith('model:'):
model = rec.split(':')[1].strip()
self.sysdata.update({'hardware': model})
if rec.startswith('main memory:'):
m = rec.split(':')[1].split()[0]
ram = int(math.ceil(float(m.strip())))
self.sysdata.update({'memory': ram})
if 'speed:' in rec and 'mhz' in rec:
cpu_speed= rec.split(':')[1].strip('mhz').strip()
self.sysdata.update({'cpupower': cpu_speed})
if rec.startswith('hostname'):
name = rec.split(':')[1].strip()
self.name = name
self.sysdata.update({'name': name})
else:
print '[!] Error in get_sys_2(). Message was: %s' % data_err
def get_macs(self):
cmd = 'lanscan'
data_out, data_err = self.execute(cmd, False)
if not data_err:
raw = [x.strip().lower() for x in data_out if x not in ('', '\n', None)]
for rec in raw:
if rec.split()[3] == 'up':
words = rec.split()
nic_mac = words[1]
nic_name = words[4]
mac = ''.join(nic_mac.split('0x')[1:])
n=2
raw = [mac[i:i + n] for i in range(0, len(mac), n)]
macaddress = ':'.join(raw)
self.nic_data['nic_parts'].update({nic_name:{'serial_no':macaddress}})
else:
print '[!] Error in get_macs(). Message was: %s' % data_err
def get_ips(self):
ip_data = {}
mac_data = {}
for nic in self.nic_data['nic_parts']:
mac = self.nic_data['nic_parts'][nic]['serial_no']
ip_data.update({'device':self.name})
ip_data.update({'tag': nic})
mac_data.update({'device': self.name})
mac_data.update({'port_name': nic})
mac_data.update({'macaddress': mac})
ip_data.update({'macaddress': mac})
cmd = 'ifconfig %s | grep inet' % nic
data_out, data_err = self.execute(cmd, False)
if not data_err:
raw = [x.strip().lower() for x in data_out if x not in ('', '\n', None)]
for rec in raw:
ip = rec.split()[1].strip()
self.nic_data['nic_parts'][nic].update({'ipaddress':ip})
ip_data.update({'ipaddress': ip})
else:
print '[!] Error in get_ips(). Message was: %s' % data_err
self.alldata.append(ip_data)
self.alldata.append(mac_data)
def get_cpu_num(self):
cmd = 'ioscan -fnk|grep proc | wc -l'
data_out, data_err = self.execute(cmd, False)
if not data_err:
raw = [x.strip().lower() for x in data_out if x not in ('', '\n', None)]
if raw:
cpu_num = raw[0]
self.sysdata.update({'cpucount': cpu_num})
else:
print '[!] Error in get_cpu_num(). Message was: %s' % data_err
def get_disks(self):
cmd = 'ls /dev/rdisk/'
data_out, data_err = self.execute(cmd, False)
if not data_err:
disks = list(set([x.strip().split('_')[0] for x in data_out if x]))
for disk in disks:
cmd = 'diskinfo /dev/rdisk/%s' % disk
data_out, data_err = self.execute(cmd, False)
if not data_err:
raw = [x.strip().lower() for x in data_out if x not in ('', '\n', None)]
disk = {}
for rec in raw:
if 'describe of ' in rec: # another disk
if not len(disk) == 0:
self.disk_data['hdd_parts'].append(disk)
disk = {}
else:
if rec.startswith('product id'):
product = rec.split(':')[1].strip()
disk.update({'product': product})
if rec.startswith('size'):
size = int(math.ceil(float(rec.split(':')[1].split()[0].strip()) / 1024 / 1024))
disk.update({'hdd_size': size})
disk.update({'assignment': 'device'})
if self.name:
disk.update({'device': self.name})
self.disk_data['hdd_parts'].append(disk)
else:
print '[!] Error in get_disks(). Message was: %s' % data_err | mit | 6,703,414,946,404,267,000 | 39.721992 | 112 | 0.468915 | false |
rmcgibbo/openmoltools | openmoltools/amber.py | 1 | 17246 | import mdtraj as md
import tempfile
import logging
import os
import shutil
from distutils.spawn import find_executable
from mdtraj.utils.delay_import import import_
import mdtraj.utils
try:
from subprocess import getoutput # If python 3
except ImportError:
from commands import getoutput # If python 2
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format="LOG: %(message)s")
# http://ambermd.org/tutorials/advanced/tutorial15/Tutorial2.xhtml
# Run tLEaP with input file:
# $ tleap -f commands.in
TLEAP_TEMPLATE = """
source leaprc.gaff
source oldff/leaprc.ff99SB
%(mol2_section)s
box = loadPdb %(box_filename)s
%(amberparams_section)s
setbox box centers
saveAmberParm box %(prmtop_filename)s %(inpcrd_filename)s
quit
"""
#loadmol2_section will look something like this:
#BMI = loadmol2 bmi.mol2
#BF4 = loadmol2 bf4.mol2
#ACN = loadmol2 acn.mol2
#loadamberparams_section looks like this:
#loadamberparams frcmod.bf4
#loadamberparams frcmod.bmi
#loadamberparams frcmod.acn
def build_mixture_prmtop(mol2_filenames, frcmod_filenames, box_filename, prmtop_filename, inpcrd_filename, water_model = 'TIP3P'):
"""Create a prmtop and inpcrd from a collection of mol2 and frcmod files
as well as a single box PDB. We have used this for setting up
simulations of neat liquids or binary mixtures.
Parameters
----------
mol2_filenames : list(str)
Filenames of GAFF flavored mol2 files. Each must contain exactly
ONE ligand.
frcmod_filenames : str
Filename of input GAFF frcmod filenames.
box_filename : str
Filename of PDB containing an arbitrary box of the mol2 molecules.
prmtop_filename : str
output prmtop filename. Should have suffix .prmtop
inpcrd_filename : str
output inpcrd filename. Should have suffix .inpcrd
water_model : str, optional. Default: "TIP3P"
String specifying water model to be used IF water is present as a component of the mixture. Valid options are currently "TIP3P", "SPC", or None. If None is specified, flexible GAFF-water will be used as for any other solute (old behavior).
Returns
-------
tleap_commands : str
The string of commands piped to tleap for building the prmtop
and inpcrd files. This will *already* have been run, but the
output can be useful for debugging or archival purposes. However,
this will reflect temporary file names for both input and output
file as these are used to avoid tleap filename restrictions.
Notes
-----
This can be easily broken if there are missing, duplicated, or
inconsistent ligand residue names in your box, mol2, and frcmod files.
You can use mdtraj to edit the residue names with something like
this: trj.top.residue(0).name = "L1"
"""
# Check for one residue name per mol2 file and uniqueness between all mol2 files
all_names = set()
for filename in mol2_filenames:
t = md.load(filename)
names = set([r.name for r in t.top.residues])
if len(names) != 1:
raise(ValueError("Must have a SINGLE residue name in each mol2 file."))
all_names = all_names.union(list(names))
if len(all_names) != len(mol2_filenames):
raise(ValueError("Must have UNIQUE residue names in each mol2 file."))
if len(mol2_filenames) != len(frcmod_filenames):
raise(ValueError("Must provide an equal number of frcmod and mol2 file names."))
#Get number of files
nfiles = len(mol2_filenames)
#Check validity of water model options
valid_water = ['TIP3P', 'SPC', None]
if not water_model in valid_water:
raise(ValueError("Must provide a valid water model."))
#If we are requesting a different water model, check if there is water present
if not water_model==None:
parmed = import_("parmed")
solventIsWater = []
waterPresent = False
for i in range(nfiles):
mol = parmed.load_file( mol2_filenames[i] )
#Check if it is water by checking GAFF atom names
types = [ atom.type for atom in mol.atoms ]
if 'oh' in types and types.count('ho')==2 and len(types)==3:
solventIsWater.append(True)
waterPresent = True
else:
solventIsWater.append(False)
#In this case, if we have any water, we will now work on fewer .mol2 and .frcmod files and instead use the force field files for those. So, reduce nfiles and remove the files we don't need from the .mol2 and .frcmod filename lists
#After doing so, go on to interpret the specified water model and compose the water model string needed for tleap
if waterPresent:
new_mol2_filenames = []
new_frcmod_filenames = []
water_mol2_filenames = []
for i in range( nfiles ):
if not solventIsWater[i]:
new_mol2_filenames.append( mol2_filenames[i] )
new_frcmod_filenames.append( frcmod_filenames[i] )
else:
water_mol2_filenames.append( mol2_filenames[i] )
nfiles = len(new_mol2_filenames)
mol2_filenames = new_mol2_filenames
frcmod_filenames = new_frcmod_filenames
#Now interpret the specified water model and translate into AMBER nomenclature
if water_model=='TIP3P':
water_model = 'TP3'
elif water_model =='SPC':
water_model = 'SPC'
else:
raise(ValueError("Cannot translate specified water model into one of the available models."))
#Compose string for loading specified water molecule
water_string = '\n'
water_names = [md.load(filename).top.residue(0).name for filename in water_mol2_filenames]
for name in water_names:
water_string += '%s = %s\n' % (name, water_model )
#Also if not TIP3P, update to source correct frcmod file
if water_model == 'SPC':
water_string += 'loadamberparams frcmod.spce\n'
elif water_model =='TP3':
continue
else:
raise(ValueError("Cannot identify water frcmod file to be loaded."))
#Rename water atoms in box file to match what is expected by AMBER
packmol = import_("openmoltools.packmol")
packmol.rename_water_atoms(box_filename)
else:
waterPresent = False
#Make temporary, hardcoded filenames for mol2 and frcmod input to avoid tleap filename restrictions
tmp_mol2_filenames = [ 'in%d.mol2' % n for n in range(nfiles) ]
tmp_frcmod_filenames = [ 'in%d.frcmod' % n for n in range(nfiles) ]
#Make temporary, hardcoded filenames for output files to avoid tleap filename restrictions
tmp_prmtop_filename = 'out.prmtop'
tmp_inpcrd_filename = 'out.inpcrd'
tmp_box_filename = 'tbox.pdb'
#Build absolute paths of input files so we can use context and temporary directory
infiles = mol2_filenames + frcmod_filenames + [box_filename]
infiles = [ os.path.abspath(filenm) for filenm in infiles ]
#Build absolute paths of output files so we can copy them back
prmtop_filename = os.path.abspath( prmtop_filename )
inpcrd_filename = os.path.abspath( inpcrd_filename )
#Use temporary directory and do the setup
with mdtraj.utils.enter_temp_directory():
#Copy input files to temporary file names in target directory
for (infile, outfile) in zip( infiles, tmp_mol2_filenames+tmp_frcmod_filenames+[tmp_box_filename] ):
shutil.copy( infile, outfile)
logger.debug('Copying input file %s to %s...\n' % (infile, outfile))
all_names = [md.load(filename).top.residue(0).name for filename in tmp_mol2_filenames]
mol2_section = "\n".join("%s = loadmol2 %s" % (all_names[k], filename) for k, filename in enumerate(tmp_mol2_filenames))
#If non-GAFF water is present, load desired parameters for that water as well.
if waterPresent:
mol2_section += water_string
amberparams_section = "\n".join("loadamberparams %s" % (filename) for k, filename in enumerate(tmp_frcmod_filenames))
tleap_commands = TLEAP_TEMPLATE % dict(mol2_section=mol2_section, amberparams_section=amberparams_section, box_filename=tmp_box_filename, prmtop_filename=tmp_prmtop_filename, inpcrd_filename=tmp_inpcrd_filename)
print(tleap_commands)
file_handle = tempfile.NamedTemporaryFile('w') # FYI Py3K defaults to 'wb' mode, which won't work here.
file_handle.writelines(tleap_commands)
file_handle.flush()
logger.debug('Running tleap in temporary directory.')
cmd = "tleap -f %s " % file_handle.name
logger.debug(cmd)
output = getoutput(cmd)
logger.debug(output)
check_for_errors( output, other_errors = ['Improper number of arguments'], ignore_errors = ['unperturbed charge of the unit', 'ignoring the error'] )
file_handle.close()
#Copy stuff back to right filenames
for (tfile, finalfile) in zip( [tmp_prmtop_filename, tmp_inpcrd_filename], [prmtop_filename, inpcrd_filename] ):
shutil.copy( tfile, finalfile)
return tleap_commands
def check_for_errors( outputtext, other_errors = None, ignore_errors = None ):
"""Check AMBER package output for the string 'ERROR' (upper or lowercase) and (optionally) specified other strings and raise an exception if it is found (to avoid silent failures which might be noted to log but otherwise ignored).
Parameters
----------
outputtext : str
String listing output text from an (AMBER) command which should be checked for errors.
other_errors : list(str), default None
If specified, provide strings for other errors which will be chcked for, such as "improper number of arguments", etc.
ignore_errors: list(str), default None
If specified, AMBER output lines containing errors but also containing any of the specified strings will be ignored (because, for example, AMBER issues an "ERROR" for non-integer charges in some cases when only a warning is needed).
Notes
-----
If error(s) are found, raise a RuntimeError and attept to print the appropriate errors from the processed text."""
lines = outputtext.split('\n')
error_lines = []
for line in lines:
if 'ERROR' in line.upper():
error_lines.append( line )
if not other_errors == None:
for err in other_errors:
if err.upper() in line.upper():
error_lines.append( line )
if not ignore_errors == None and len(error_lines)>0:
new_error_lines = []
for ign in ignore_errors:
ignore = False
for err in error_lines:
if ign in err:
ignore = True
if not ignore:
new_error_lines.append( err )
error_lines = new_error_lines
if len(error_lines) > 0:
print("Unexpected errors encountered running AMBER tool. Offending output:")
for line in error_lines: print(line)
raise(RuntimeError("Error encountered running AMBER tool. Exiting."))
return
def find_gaff_dat():
AMBERHOME = None
try:
AMBERHOME = os.environ['AMBERHOME']
except KeyError:
pass
if AMBERHOME is None:
full_path = find_executable("parmchk2")
try:
AMBERHOME = os.path.split(full_path)[0]
AMBERHOME = os.path.join(AMBERHOME, "../")
except:
raise(ValueError("Cannot find AMBER GAFF"))
if AMBERHOME is None:
raise(ValueError("Cannot find AMBER GAFF"))
return os.path.join(AMBERHOME, 'dat', 'leap', 'parm', 'gaff.dat')
GAFF_DAT_FILENAME = find_gaff_dat()
def run_antechamber(molecule_name, input_filename, charge_method="bcc", net_charge=None, gaff_mol2_filename=None, frcmod_filename=None):
"""Run AmberTools antechamber and parmchk2 to create GAFF mol2 and frcmod files.
Parameters
----------
molecule_name : str
Name of the molecule to be parameterized, will be used in output filenames.
ligand_filename : str
The molecule to be parameterized. Must be tripos mol2 format.
charge_method : str, optional
If not None, the charge method string will be passed to Antechamber.
net_charge : int, optional
If not None, net charge of the molecule to be parameterized.
If None, Antechamber sums up partial charges from the input file.
gaff_mol2_filename : str, optional, default=None
Name of GAFF mol2 filename to output. If None, uses local directory
and molecule_name
frcmod_filename : str, optional, default=None
Name of GAFF frcmod filename to output. If None, uses local directory
and molecule_name
Returns
-------
gaff_mol2_filename : str
GAFF format mol2 filename produced by antechamber
frcmod_filename : str
Amber frcmod file produced by prmchk
"""
utils = import_("openmoltools.utils")
ext = utils.parse_ligand_filename(input_filename)[1]
filetype = ext[1:]
if filetype != "mol2":
raise(ValueError("Must input mol2 filename"))
if gaff_mol2_filename is None:
gaff_mol2_filename = molecule_name + '.gaff.mol2'
if frcmod_filename is None:
frcmod_filename = molecule_name + '.frcmod'
#Build absolute paths for input and output files
gaff_mol2_filename = os.path.abspath( gaff_mol2_filename )
frcmod_filename = os.path.abspath( frcmod_filename )
input_filename = os.path.abspath( input_filename )
#Use temporary directory context to do this to avoid issues with spaces in filenames, etc.
with mdtraj.utils.enter_temp_directory():
shutil.copy( input_filename, 'in.mol2' )
cmd = "antechamber -i in.mol2 -fi mol2 -o out.mol2 -fo mol2 -s 2"
if charge_method is not None:
cmd += ' -c %s' % charge_method
if net_charge is not None:
cmd += ' -nc %d' % net_charge
logger.debug(cmd)
output = getoutput(cmd)
logger.debug(output)
cmd = "parmchk2 -i out.mol2 -f mol2 -o out.frcmod"
logger.debug(cmd)
output = getoutput(cmd)
logger.debug(output)
check_for_errors( output )
#Copy back
shutil.copy( 'out.mol2', gaff_mol2_filename )
shutil.copy( 'out.frcmod', frcmod_filename )
return gaff_mol2_filename, frcmod_filename
def run_tleap(molecule_name, gaff_mol2_filename, frcmod_filename, prmtop_filename=None, inpcrd_filename=None):
"""Run AmberTools tleap to create simulation files for AMBER
Parameters
----------
molecule_name : str
The name of the molecule
gaff_mol2_filename : str
GAFF format mol2 filename produced by antechamber
frcmod_filename : str
Amber frcmod file produced by prmchk
prmtop_filename : str, optional, default=None
Amber prmtop file produced by tleap, defaults to molecule_name
inpcrd_filename : str, optional, default=None
Amber inpcrd file produced by tleap, defaults to molecule_name
Returns
-------
prmtop_filename : str
Amber prmtop file produced by tleap
inpcrd_filename : str
Amber inpcrd file produced by tleap
"""
if prmtop_filename is None:
prmtop_filename = "%s.prmtop" % molecule_name
if inpcrd_filename is None:
inpcrd_filename = "%s.inpcrd" % molecule_name
#Get absolute paths for input/output
gaff_mol2_filename = os.path.abspath( gaff_mol2_filename )
frcmod_filename = os.path.abspath( frcmod_filename )
prmtop_filename = os.path.abspath( prmtop_filename )
inpcrd_filename = os.path.abspath( inpcrd_filename )
#Work in a temporary directory, on hard coded filenames, to avoid any issues AMBER may have with spaces and other special characters in filenames
with mdtraj.utils.enter_temp_directory():
shutil.copy( gaff_mol2_filename, 'file.mol2' )
shutil.copy( frcmod_filename, 'file.frcmod' )
tleap_input = """
source oldff/leaprc.ff99SB
source leaprc.gaff
LIG = loadmol2 file.mol2
check LIG
loadamberparams file.frcmod
saveamberparm LIG out.prmtop out.inpcrd
quit
"""
file_handle = tempfile.NamedTemporaryFile('w') # FYI Py3K defaults to 'wb' mode, which won't work here.
file_handle.writelines(tleap_input)
file_handle.flush()
cmd = "tleap -f %s " % file_handle.name
logger.debug(cmd)
output = getoutput(cmd)
logger.debug(output)
check_for_errors( output, other_errors = ['Improper number of arguments'] )
file_handle.close()
#Copy back target files
shutil.copy( 'out.prmtop', prmtop_filename )
shutil.copy( 'out.inpcrd', inpcrd_filename )
return prmtop_filename, inpcrd_filename
| gpl-2.0 | 6,290,256,106,830,964,000 | 38.555046 | 247 | 0.650586 | false |
nimbusproject/dashi | dashi/bootstrap/containers.py | 1 | 5837 | #!/usr/bin/env python
__author__ = 'Adam R. Smith'
__license__ = 'Apache 2.0'
import collections
class DotNotationGetItem(object):
""" Drive the behavior for DotList and DotDict lookups by dot notation, JSON-style. """
def _convert(self, val):
""" Convert the type if necessary and return if a conversion happened. """
if isinstance(val, dict) and not isinstance(val, DotDict):
return DotDict(val), True
elif isinstance(val, list) and not isinstance(val, DotList):
return DotList(val), True
return val, False
def __getitem__(self, key):
val = super(DotNotationGetItem, self).__getitem__(key)
val, converted = self._convert(val)
if converted: self[key] = val
return val
class DotList(DotNotationGetItem, list):
""" Partner class for DotDict; see that for docs. Both are needed to fully support JSON/YAML blocks. """
#def DotListIterator(list.)
def __iter__(self):
""" Monkey-patch the "next" iterator method to return modified versions. This will be slow. """
#it = super(DotList, self).__iter__()
#it_next = getattr(it, 'next')
#setattr(it, 'next', lambda: it_next(it))
#return it
for val in super(DotList, self).__iter__():
val, converted = self._convert(val)
yield val
class DotDict(DotNotationGetItem, dict):
"""
Subclass of dict that will recursively look up attributes with dot notation.
This is primarily for working with JSON-style data in a cleaner way like javascript.
Note that this will instantiate a number of child DotDicts when you first access attributes;
do not use in performance-critical parts of your code.
"""
def __getattr__(self, key):
""" Make attempts to lookup by nonexistent attributes also attempt key lookups. """
try:
val = self.__getitem__(key)
except KeyError:
raise AttributeError(key)
return val
def copy(self):
return DotDict(dict.copy(self))
@classmethod
def fromkeys(cls, seq, value=None):
return DotDict(dict.fromkeys(seq, value))
class DictModifier(DotDict):
"""
Subclass of DotDict that allows the sparse overriding of dict values.
"""
def __init__(self, base, data=None):
# base should be a DotDict, raise TypeError exception if not
if not isinstance(base, DotDict):
raise TypeError("Base must be of type DotDict")
self.base = base
if data is not None:
self.update(data)
def __getattr__(self, key):
try:
return DotDict.__getattr__(self, key)
except AttributeError, ae:
# Delegate to base
return getattr(self.base, key)
def __getitem__(self, key):
try:
return DotDict.__getitem__(self, key)
except KeyError, ke:
# Delegate to base
return getattr(self.base, key)
# dict_merge from: http://appdelegateinc.com/blog/2011/01/12/merge-deeply-nested-dicts-in-python/
def quacks_like_dict(object):
"""Check if object is dict-like"""
return isinstance(object, collections.Mapping)
def dict_merge(a, b):
"""Merge two deep dicts non-destructively
Uses a stack to avoid maximum recursion depth exceptions
>>> a = {'a': 1, 'b': {1: 1, 2: 2}, 'd': 6}
>>> b = {'c': 3, 'b': {2: 7}, 'd': {'z': [1, 2, 3]}}
>>> c = merge(a, b)
>>> from pprint import pprint; pprint(c)
{'a': 1, 'b': {1: 1, 2: 7}, 'c': 3, 'd': {'z': [1, 2, 3]}}
"""
assert quacks_like_dict(a), quacks_like_dict(b)
dst = a.copy()
stack = [(dst, b)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if quacks_like_dict(current_src[key]) and quacks_like_dict(current_dst[key]) :
stack.append((current_dst[key], current_src[key]))
else:
current_dst[key] = current_src[key]
return dst
def named_any(name):
"""
Retrieve a Python object by its fully qualified name from the global Python
module namespace. The first part of the name, that describes a module,
will be discovered and imported. Each subsequent part of the name is
treated as the name of an attribute of the object specified by all of the
name which came before it.
@param name: The name of the object to return.
@return: the Python object identified by 'name'.
"""
assert name, 'Empty module name'
names = name.split('.')
topLevelPackage = None
moduleNames = names[:]
while not topLevelPackage:
if moduleNames:
trialname = '.'.join(moduleNames)
try:
topLevelPackage = __import__(trialname)
except Exception, ex:
moduleNames.pop()
else:
if len(names) == 1:
raise Exception("No module named %r" % (name,))
else:
raise Exception('%r does not name an object' % (name,))
obj = topLevelPackage
for n in names[1:]:
obj = getattr(obj, n)
return obj
def for_name(modpath, classname):
'''
Returns a class of "classname" from module "modname".
'''
module = __import__(modpath, fromlist=[classname])
classobj = getattr(module, classname)
return classobj()
if __name__ == '__main__':
dd = DotDict({'a':{'b':{'c':1, 'd':2}}})
print dd.a.b.c, dd.a.b.d
print dd.a.b
#print dd.foo
print dict.fromkeys(('a','b','c'), 'foo')
print DotDict.fromkeys(('a','b','c'), 'foo').a
dl = DotList([1, {'a':{'b':{'c':1, 'd':2}}}])
print dl[1].a.b.c
| apache-2.0 | 2,652,685,822,307,659,300 | 31.433333 | 108 | 0.588145 | false |
BoPeng/SOS | src/sos/actions.py | 1 | 58010 | #!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import copy
import gzip
import os
import shlex
import shutil
import subprocess
import sys
import tarfile
import time
import tempfile
import textwrap
import urllib
import urllib.error
import urllib.parse
import urllib.request
import uuid
import zipfile
from collections.abc import Sequence
from functools import wraps
from tqdm import tqdm as ProgressBar
from concurrent.futures import ProcessPoolExecutor
from .eval import interpolate
from .parser import SoS_Script
from .syntax import SOS_ACTION_OPTIONS
from .targets import executable, file_target, path, paths, sos_targets
from .utils import (
textMD5,
fileMD5,
StopInputGroup,
TerminateExecution,
TimeoutInterProcessLock,
env,
get_traceback,
short_repr,
transcribe,
load_config_files,
)
from .controller import send_message_to_controller
from .messages import encode_msg, decode_msg
from typing import Any, Callable, Dict, List, Tuple, Union
__all__ = [
"SoS_Action",
"script",
"sos_run",
"fail_if",
"warn_if",
"stop_if",
"download",
"run",
"perl",
"report",
"pandoc",
]
def get_actions() -> List[Any]:
# get the name of all actions, which are identified by an attribute
# run_mode of the function
return [k for k, v in globals().items() if hasattr(v, "run_mode")]
#
# A decoration function that allows SoS to replace all SoS actions
# with a null action. Option run_mode is deprecated and might be
# removed later on.
#
def SoS_Action(
run_mode: Union[str, List[str]] = "deprecated",
acceptable_args: Union[Tuple[str], List[str]] = ("*",),
default_args: Dict[str, Dict[str, str]] = {},
) -> Callable:
def runtime_decorator(func):
@wraps(func)
def action_wrapper(*args, **kwargs):
# if container in args, a large number of docker-specific
# args would be allowed.
for k in default_args:
if k in default_args and k not in kwargs:
kwargs[k] = default_args[k]
if "*" not in acceptable_args and all(
x not in kwargs
for x in ("docker_image", "container", "template", "template_name")
):
for key in kwargs.keys():
if key not in acceptable_args and key not in SOS_ACTION_OPTIONS:
raise ValueError(
f'Unrecognized option "{key}" for action {func}'
)
# docker files will be downloaded in run or prepare mode
# this option is independent of container...
if "docker_file" in kwargs and env.config["run_mode"] in [
"run",
"interactive",
]:
from .docker.client import SoS_DockerClient
docker = SoS_DockerClient()
docker.load_image(kwargs["docker_file"])
# handle image
if "docker_image" in kwargs:
if "container" in kwargs and kwargs["container"]:
raise ValueError(
"Option docker_image is deprecated and should not be specified with option container"
)
kwargs["container"] = "docker://" + kwargs["container"]
if "container" in kwargs and kwargs["container"]:
if not isinstance(kwargs["container"], str):
raise ValueError(
f'A string in the format of "scheme://tag" is expected for option container, {kwargs["container"]} provided'
)
engine = (
kwargs["engine"]
if "engine" in kwargs and kwargs["engine"]
else None
)
if "://" in kwargs["container"]:
cty, cname = kwargs["container"].split("://", 1)
elif kwargs["container"].endswith(".simg") or kwargs[
"container"
].endswith(".sif"):
engine = "singularity"
cty = "file"
cname = kwargs["container"]
else:
cty = None
cname = kwargs["container"]
# now let us figure out image and engine
# if engine is specified
if engine == "docker":
if cty is not None and cty != "docker":
raise ValueError(
f"docker engine only allows docker container {cty} specified"
)
elif engine == "singularity":
if cty is not None and cty not in (
"docker",
"file",
"library",
"shub",
):
raise ValueError(
f"singularity engine only allows docker, file, library, and shub container {cty} specified"
)
elif engine is not None and engine != "local":
raise ValueError(
f"Only docker and singularity container engines are supported: {engine} specified"
)
else:
# engine is none, need to be refered
if cty == "docker":
engine = "docker"
elif cty in ("file", "shub", "library"):
engine = "singularity"
elif cty == "local":
engine = "local"
else:
engine = "docker"
#
# handle different container type
if engine == "docker":
from .docker.client import SoS_DockerClient
docker = SoS_DockerClient()
docker.pull(cname)
kwargs["engine"] = "docker"
kwargs["container"] = cname
elif engine == "singularity":
kwargs["engine"] = "singularity"
from .singularity.client import SoS_SingularityClient
singularity = SoS_SingularityClient()
singularity.pull(kwargs["container"])
else:
# if local or none, reset container
kwargs["engine"] = None
kwargs["container"] = None
if "active" in kwargs:
if kwargs["active"] is False:
return None
elif kwargs["active"] is True:
pass
elif isinstance(kwargs["active"], int):
if (
kwargs["active"] >= 0
and env.sos_dict["_index"] != kwargs["active"]
):
return None
if (
kwargs["active"] < 0
and env.sos_dict["_index"]
!= kwargs["active"] + env.sos_dict["__num_groups__"]
):
return None
elif isinstance(kwargs["active"], Sequence):
allowed_index = list(
[
x if x >= 0 else env.sos_dict["__num_groups__"] + x
for x in kwargs["active"]
]
)
if env.sos_dict["_index"] not in allowed_index:
return None
elif isinstance(kwargs["active"], slice):
allowed_index = list(range(env.sos_dict["__num_groups__"]))[
kwargs["active"]
]
if env.sos_dict["_index"] not in allowed_index:
return None
else:
raise RuntimeError(
f'Unacceptable value for option active: {kwargs["active"]}'
)
# verify input
if "input" in kwargs and kwargs["input"] is not None:
try:
ifiles = sos_targets(kwargs["input"])
for ifile in ifiles:
if not ifile.target_exists("target"):
raise RuntimeError(f"Input file {ifile} does not exist.")
except Exception as e:
raise ValueError(
f'Unacceptable value ({kwargs["input"]}) for parameter input of actions: {e}'
)
# if there are parameters input and output, the action is subject to signature verification
sig = None
# tracked can be True, filename or list of filename
if (
"tracked" in kwargs
and kwargs["tracked"] is not None
and kwargs["tracked"] is not False
):
if args and isinstance(args[0], str):
script = args[0]
elif "script" in kwargs:
script = kwargs["script"]
else:
script = ""
try:
tfiles = sos_targets(
[] if kwargs["tracked"] is True else kwargs["tracked"]
)
except Exception as e:
raise ValueError(
f'Parameter tracked of actions can be None, True/False, or one or more filenames: {kwargs["tracked"]} provided: {e}'
)
# append input and output
for t in ("input", "output"):
if t in kwargs and kwargs[t] is not None:
tfiles.extend(sos_targets(kwargs[t]))
from .targets import RuntimeInfo
sig = RuntimeInfo(
textMD5(script),
sos_targets(kwargs["input"] if "input" in kwargs else []),
sos_targets(kwargs["output"] if "output" in kwargs else []),
sos_targets(
kwargs["tracked"]
if "tracked" in kwargs and kwargs["tracked"] is not True
else []
),
kwargs,
)
sig.lock()
if env.config["sig_mode"] in ("default", "skip", "distributed"):
matched = sig.validate()
if isinstance(matched, dict):
env.logger.info(
f"Action ``{func.__name__}`` is ``ignored`` due to saved signature"
)
return None
else:
env.logger.debug(f"Signature mismatch: {matched}")
elif env.config["sig_mode"] == "assert":
matched = sig.validate()
if isinstance(matched, str):
raise RuntimeError(f"Signature mismatch: {matched}")
else:
env.logger.info(
f"Action ``{func.__name__}`` is ``ignored`` with matching signature"
)
return None
elif env.config["sig_mode"] == "build":
# build signature require existence of files
if sig.write():
env.logger.info(
f"Action ``{func.__name__}`` is ``ignored`` with signature constructed"
)
return None
original_env = {}
if "default_env" in kwargs:
original_env = copy.deepcopy(os.environ)
if not isinstance(kwargs["default_env"], dict):
raise ValueError(
f'Option default_env must be a dictionary, {kwargs["default_env"]} provided'
)
for k in kwargs["default_env"]:
if k not in os.environ:
os.environ[k] = kwargs["default_env"][k]
if "env" in kwargs:
original_env = copy.deepcopy(os.environ)
if not isinstance(kwargs["env"], dict):
raise ValueError(
f'Option env must be a dictionary, {kwargs["env"]} provided'
)
os.environ.update(kwargs["env"])
# workdir refers to directory inside of docker image
if "workdir" in kwargs:
if not kwargs["workdir"] or not isinstance(
kwargs["workdir"], (str, os.PathLike)
):
raise RuntimeError(
f'workdir option should be a path of type str or path, {kwargs["workdir"]} provided'
)
workdir = path(kwargs["workdir"])
if not os.path.isdir(workdir):
os.makedirs(workdir, exist_ok=True)
try:
olddir = os.getcwd()
os.chdir(workdir)
try:
res = func(*args, **kwargs)
except Exception as e:
if "allow_error" in kwargs and kwargs["allow_error"]:
env.logger.warning(str(e))
res = None
else:
raise
finally:
os.chdir(olddir)
if original_env:
os.environ.clear()
os.environ.update(original_env)
else:
try:
res = func(*args, **kwargs)
except Exception as e:
if "allow_error" in kwargs and kwargs["allow_error"]:
env.logger.warning(str(e))
res = None
else:
raise
finally:
if original_env:
os.environ.clear()
os.environ.update(original_env)
if "output" in kwargs and kwargs["output"] is not None:
ofiles = sos_targets(kwargs["output"])
for ofile in ofiles:
if not ofile.target_exists("any"):
raise RuntimeError(
f"Output target {ofile} does not exist after completion of action {func.__name__}"
)
if sig:
sig.write()
sig.release()
return res
return action_wrapper
return runtime_decorator
class SoS_ExecuteScript:
def __init__(self, script, interpreter, suffix, args=""):
self.script = script
self.interpreter = interpreter
self.args = args
if suffix:
self.suffix = suffix
elif sys.platform == "win32":
self.suffix = ".bat"
else:
self.suffix = ".sh"
def process_template(self, cmd, filename, script, **kwargs):
if "template" in kwargs:
template = kwargs["template"]
else:
template_name = kwargs["template_name"]
if "CONFIG" not in env.sos_dict:
load_config_files()
if (
"action_templates" in env.sos_dict["CONFIG"]
and template_name in env.sos_dict["CONFIG"]["action_templates"]
):
template = env.sos_dict["CONFIG"]["action_templates"][template_name]
elif template_name == "conda":
template = textwrap.dedent(
"""\
conda run -n {env_name} {cmd}
"""
)
else:
raise ValueError(
f'No template named {template_name} is built-in or provided in "action_templates" of config files.'
)
try:
context = copy.deepcopy(kwargs)
context["cmd"] = cmd
context["filename"] = filename
context["script"] = script
return interpolate(template, context)
except Exception as e:
raise ValueError(f"Failed to expand template {template}: {e}")
def run(self, **kwargs):
#
if "input" in kwargs:
try:
ifiles = sos_targets(kwargs["input"])
except Exception as e:
raise ValueError(
f'Unacceptable value ({kwargs["input"]}) for paremter input: {e}'
)
content = ""
for ifile in ifiles:
try:
with open(ifile) as iscript:
content += iscript.read()
except Exception as e:
raise RuntimeError(f"Failed to read from {ifile}: {e}")
self.script = content + self.script
if "engine" in kwargs and kwargs["engine"] == "docker":
from .docker.client import SoS_DockerClient
docker = SoS_DockerClient()
docker.run(
kwargs["container"],
self.script,
self.interpreter,
self.args,
self.suffix,
**kwargs,
)
elif "engine" in kwargs and kwargs["engine"] == "singularity":
from .singularity.client import SoS_SingularityClient
singularity = SoS_SingularityClient()
singularity.run(
kwargs["container"],
self.script,
self.interpreter,
self.args,
self.suffix,
**kwargs,
)
else:
if isinstance(self.interpreter, str):
if self.interpreter and not shutil.which(
shlex.split(self.interpreter)[0]
):
raise RuntimeError(
f"Failed to locate interpreter {self.interpreter}"
)
elif isinstance(self.interpreter, Sequence):
found = False
for ip in self.interpreter:
if shutil.which(shlex.split(ip)[0]):
self.interpreter = ip
found = True
break
if not found:
raise RuntimeError(
f'Failed to locate any of the interpreters {", ".join(self.interpreter)}'
)
else:
raise RuntimeError(f"Unacceptable interpreter {self.interpreter}")
debug_script_file = os.path.join(
env.exec_dir,
f'{env.sos_dict["step_name"]}_{env.sos_dict["_index"]}_{str(uuid.uuid4())[:8]}{self.suffix}',
)
# with open(debug_script_file, 'w') as sfile:
# sfile.write(self.script)
# env.log_to_file('ACTION', self.script)
try:
p = None
script_file = tempfile.NamedTemporaryFile(
mode="w+t", suffix=self.suffix, delete=False
).name
# potentially used for template
cmd_file = None
with open(script_file, "w") as sfile:
sfile.write(self.script)
if not self.args:
self.args = "{filename:q}"
# if no intepreter, let us prepare for the case when the script will be executed directly
if not self.interpreter:
# make the script executable
os.chmod(script_file, 0o775)
#
if env.config["run_mode"] == "dryrun":
cmd = interpolate(
f"{self.interpreter} {self.args}",
{"filename": path("SCRIPT"), "script": self.script},
)
if "__std_out__" in env.sos_dict:
with open(env.sos_dict["__std_out__"], "a") as so:
so.write(f"HINT: {cmd}\n{self.script}\n")
else:
print(f"HINT: {cmd}\n{self.script}\n")
return None
cmd = interpolate(
f"{self.interpreter} {self.args}",
{"filename": sos_targets(script_file), "script": self.script},
)
transcript_cmd = interpolate(
f"{self.interpreter} {self.args}",
{"filename": sos_targets("SCRIPT"), "script": self.script},
)
if "template_name" in kwargs or "template" in kwargs:
templated_script = self.process_template(
cmd, sos_targets(script_file), self.script, **kwargs
)
cmd_file = tempfile.NamedTemporaryFile(
mode="w+t",
suffix=".bat" if sys.platform == "win32" else ".sh",
delete=False,
).name
with open(cmd_file, "w") as cfile:
cfile.write(templated_script)
# if it has an shebang line
if templated_script.startswith("#!") or sys.platform == "win32":
os.chmod(cmd_file, 0o775)
cmd = cmd_file
else:
cmd = f"sh {shlex.quote(cmd_file)}"
env.logger.debug(
f"Running templated script \n{templated_script}\ncommand {cmd}"
)
transcribe(self.script, cmd=transcript_cmd)
# if not notebook, not task, signature database is avaialble.
if (
env.sos_dict["_index"] == 0
and env.config["run_mode"] != "interactive"
and "__std_out__" not in env.sos_dict
and hasattr(env, "master_push_socket")
and env.master_push_socket is not None
):
send_message_to_controller(
[
"workflow_sig",
"transcript",
env.sos_dict["step_name"],
repr(
{
"start_time": time.time(),
"command": transcript_cmd,
"script": self.script,
}
),
]
)
if env.config["run_mode"] == "interactive":
if "stdout" in kwargs or "stderr" in kwargs:
child = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
)
out, err = child.communicate()
if "stdout" in kwargs:
if kwargs["stdout"] is not False and len(out):
with open(kwargs["stdout"], "ab") as so:
so.write(out)
else:
sys.stdout.write(out.decode())
if "stderr" in kwargs:
if kwargs["stderr"] is not False and len(err):
with open(kwargs["stderr"], "ab") as se:
se.write(err)
else:
sys.stderr.write(err.decode())
ret = child.returncode
else:
# need to catch output and send to python output, which will in trun be hijacked by SoS notebook
from .utils import pexpect_run
ret = pexpect_run(cmd.strip())
elif "__std_out__" in env.sos_dict and "__std_err__" in env.sos_dict:
if "stdout" in kwargs or "stderr" in kwargs:
if "stdout" in kwargs:
if kwargs["stdout"] is False:
so = subprocess.DEVNULL
else:
so = open(kwargs["stdout"], "ab")
elif env.verbosity > 0:
so = open(env.sos_dict["__std_out__"], "ab")
else:
so = subprocess.DEVNULL
if "stderr" in kwargs:
if kwargs["stderr"] is False:
se = subprocess.DEVNULL
else:
se = open(kwargs["stderr"], "ab")
elif env.verbosity > 1:
se = open(env.sos_dict["__std_err__"], "ab")
else:
se = subprocess.DEVNULL
p = subprocess.Popen(cmd, shell=True, stderr=se, stdout=so)
ret = p.wait()
if so != subprocess.DEVNULL:
so.close()
if se != subprocess.DEVNULL:
se.close()
elif env.verbosity >= 1:
with open(env.sos_dict["__std_out__"], "ab") as so, open(
env.sos_dict["__std_err__"], "ab"
) as se:
p = subprocess.Popen(cmd, shell=True, stderr=se, stdout=so)
ret = p.wait()
else:
p = subprocess.Popen(
cmd,
shell=True,
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
)
ret = p.wait()
else:
if "stdout" in kwargs:
if kwargs["stdout"] is False:
so = subprocess.DEVNULL
else:
so = open(kwargs["stdout"], "ab")
elif env.verbosity > 0:
so = None
else:
so = subprocess.DEVNULL
if "stderr" in kwargs:
if kwargs["stderr"] is False:
se = subprocess.DEVNULL
else:
se = open(kwargs["stderr"], "ab")
elif env.verbosity > 1:
se = None
else:
se = subprocess.DEVNULL
p = subprocess.Popen(cmd, shell=True, stderr=se, stdout=so)
ret = p.wait()
if so is not None and so != subprocess.DEVNULL:
so.close()
if se is not None and se != subprocess.DEVNULL:
se.close()
# clean up empty stdstream files
for item in ["stdout", "stderr"]:
if (
item in kwargs
and os.path.isfile(kwargs[item])
and os.path.getsize(kwargs[item]) == 0
):
try:
os.remove(kwargs[item])
except Exception:
pass
if ret != 0:
with open(debug_script_file, "w") as sfile:
sfile.write(self.script)
cmd = cmd.replace(script_file, debug_script_file)
out = (
f", stdout={kwargs['stdout']}"
if "stdout" in kwargs
and os.path.isfile(kwargs["stdout"])
and os.path.getsize(kwargs["stdout"]) > 0
else ""
)
err = (
f", stderr={kwargs['stderr']}"
if "stderr" in kwargs
and os.path.isfile(kwargs["stderr"])
and os.path.getsize(kwargs["stderr"]) > 0
else ""
)
raise subprocess.CalledProcessError(
returncode=ret,
cmd=cmd,
stderr="\nFailed to execute ``{}``\nexitcode={}, workdir=``{}``{}{}{}\n{}".format(
cmd,
ret,
os.getcwd(),
f', task={os.path.basename(env.sos_dict["__std_err__"]).split(".")[0]}'
if "__std_err__" in env.sos_dict
else "",
out,
err,
"-" * 75,
),
)
finally:
try:
os.remove(script_file)
except Exception:
# 1315: ignore in case the temp script file no longer exists
pass
if cmd_file is not None:
try:
os.remove(cmd_file)
except Exception:
# 1315: ignore in case the temp script file no longer exists
pass
@SoS_Action()
def sos_run(workflow=None, targets=None, shared=None, args=None, source=None, **kwargs):
"""Execute a workflow from the current SoS script or a specified source
(in .sos or .ipynb format), with _input as the initial input of workflow."""
if "__std_out__" in env.sos_dict and "__std_err__" in env.sos_dict:
raise RuntimeError(
"Executing nested workflow (action sos_run) in tasks is not supported."
)
if isinstance(workflow, str):
workflows = [workflow]
elif isinstance(workflow, Sequence):
workflows = list(workflow)
elif workflow is None:
workflows = []
else:
raise ValueError(
"workflow has to be None, a workflow name, or a list of workflow names"
)
if source is None:
script = SoS_Script(
env.sos_dict["__step_context__"].content,
env.sos_dict["__step_context__"].filename,
)
if workflows:
wfs = [script.workflow(wf, use_default=True) for wf in workflows]
else:
wfs = [script.workflow(use_default=False)]
else:
# reading workflow from another file
script = SoS_Script(filename=source)
if workflows:
wfs = [script.workflow(wf, use_default=True) for wf in workflows]
else:
wfs = [script.workflow(use_default=False)]
# if wf contains the current step or one of the previous one, this constitute
# recusive nested workflow and should not be allowed
all_parameters = set()
for wf in wfs:
all_parameters |= set(wf.parameters())
if env.sos_dict["step_name"] in [f"{x.name}_{x.index}" for x in wf.sections]:
raise RuntimeError(
f'Nested workflow {workflow} contains the current step {env.sos_dict["step_name"]}'
)
# args can be specified both as a dictionary or keyword arguments
if args is None:
args = kwargs
else:
args.update(kwargs)
for key in args.keys():
if key not in all_parameters and key not in SOS_ACTION_OPTIONS:
raise ValueError(f"No parameter {key} is defined for workflow {workflow}")
if shared is None:
shared = []
elif isinstance(shared, str):
shared = [shared]
# for nested workflow, _input would becomes the input of workflow.
env.sos_dict.set("__step_output__", copy.deepcopy(env.sos_dict.get("_input", None)))
shared.append("__step_output__")
try:
my_name = env.sos_dict["step_name"]
args_output = ", ".join(
f"{x}={short_repr(y)}" for x, y in args.items() if not x.startswith("__")
)
if "ACTION" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file(
"ACTION",
"Executing workflow ``{}`` with input ``{}`` and {}".format(
workflow,
short_repr(env.sos_dict.get("_input", None), True),
"no args" if not args_output else args_output,
),
)
if not hasattr(env, "__socket__") or env.__socket__ is None:
raise RuntimeError("sos_run function cannot be executed in scratch cell.")
# tell the master process to receive a workflow
# really send the workflow
shared = {x: (env.sos_dict[x] if x in env.sos_dict else None) for x in shared}
wf_ids = [str(uuid.uuid4()) for wf in wfs]
blocking = not env.sos_dict.get("__concurrent_subworkflow__", False)
env.__socket__.send(
encode_msg(
["workflow", wf_ids, wfs, targets, args, shared, env.config, blocking]
)
)
if not blocking:
return {"pending_workflows": wf_ids}
res = {}
for wf in wfs:
wf_res = decode_msg(env.__socket__.recv())
res.update(wf_res)
if wf_res is None:
sys.exit(0)
elif isinstance(wf_res, Exception):
raise wf_res
else:
env.sos_dict.quick_update(wf_res["shared"])
return res
finally:
# restore step_name in case the subworkflow re-defines it
env.sos_dict.set("step_name", my_name)
@SoS_Action(acceptable_args=["script", "interpreter", "suffix", "args"])
def script(script, interpreter="", suffix="", args="", **kwargs):
"""Execute specified script using specified interpreter. This action accepts common
action arguments such as input, active, workdir, docker_image and args. In particular,
content of one or more files specified by option input would be prepended before
the specified script."""
return SoS_ExecuteScript(script, interpreter, suffix, args).run(**kwargs)
@SoS_Action(acceptable_args=["expr", "msg"])
def fail_if(expr, msg=""):
"""Raise an exception with `msg` if condition `expr` is False"""
if expr:
raise TerminateExecution(msg if msg else "error triggered by action fail_if")
return 0
@SoS_Action(acceptable_args=["expr", "msg"])
def warn_if(expr, msg=""):
"""Yield an warning message `msg` if `expr` is False """
if expr:
env.logger.warning(msg)
return 0
@SoS_Action(acceptable_args=["expr", "msg", "no_output"])
def stop_if(expr, msg="", no_output=False):
"""Abort the execution of the current step or loop and yield
an warning message `msg` if `expr` is False"""
if expr:
raise StopInputGroup(msg=msg, keep_output=not no_output)
return 0
@SoS_Action(acceptable_args=["expr", "msg"])
def done_if(expr, msg=""):
"""Assuming that output has already been generated and stop
executing the rest of the substep"""
if expr:
raise StopInputGroup(msg=msg, keep_output=True)
return 0
@SoS_Action(acceptable_args=["expr", "msg", "no_output"])
def skip_if(expr, msg=""):
"""Skip the current substep and set _output to empty. Output
will be removed if already generated."""
if expr:
raise StopInputGroup(msg=msg, keep_output=False)
return 0
#
# download file with progress bar
#
def downloadURL(URL, dest, decompress=False, index=None):
dest = os.path.abspath(os.path.expanduser(dest))
dest_dir, filename = os.path.split(dest)
#
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir, exist_ok=True)
if not os.path.isdir(dest_dir):
raise RuntimeError(f"Failed to create destination directory to download {URL}")
#
message = filename
if len(message) > 30:
message = message[:10] + "..." + message[-16:]
#
dest_tmp = dest + f".tmp_{os.getpid()}"
term_width = shutil.get_terminal_size((80, 20)).columns
try:
env.logger.debug(f"Download {URL} to {dest}")
sig = file_target(dest)
if os.path.isfile(dest):
prog = ProgressBar(
desc=message,
disable=env.verbosity <= 1,
position=index,
leave=True,
bar_format="{desc}",
total=10000000,
)
target = file_target(dest)
if env.config["sig_mode"] == "build":
prog.set_description(message + ": \033[32m writing signature\033[0m")
prog.update()
target.write_sig()
prog.close()
return True
elif env.config["sig_mode"] == "ignore":
prog.set_description(message + ": \033[32m use existing\033[0m")
prog.update()
prog.close()
return True
elif env.config["sig_mode"] in ("default", "skip", "distributed"):
prog.update()
if sig.validate():
prog.set_description(message + ": \033[32m Validated\033[0m")
prog.update()
prog.close()
return True
else:
prog.set_description(
message + ":\033[91m Signature mismatch\033[0m"
)
target.write_sig()
prog.update()
#
prog = ProgressBar(
desc=message,
disable=env.verbosity <= 1,
position=index,
leave=True,
bar_format="{desc}",
total=10000000,
)
#
# Stop using pycurl because of libcurl version compatibility problems
# that happen so often and difficult to fix. Error message looks like
#
# Reason: Incompatible library version: pycurl.cpython-35m-darwin.so
# requires version 9.0.0 or later, but libcurl.4.dylib provides version 7.0.0
#
# with open(dest_tmp, 'wb') as f:
# c = pycurl.Curl()
# c.setopt(pycurl.URL, str(URL))
# c.setopt(pycurl.WRITEFUNCTION, f.write)
# c.setopt(pycurl.SSL_VERIFYPEER, False)
# c.setopt(pycurl.NOPROGRESS, False)
# c.setopt(pycurl.PROGRESSFUNCTION, prog.curlUpdate)
# c.perform()
# if c.getinfo(pycurl.HTTP_CODE) == 404:
# prog.set_description(message + ':\033[91m 404 Error {}\033[0m'.format(' '*(term_width - len(message) - 12)))
# try:
# os.remove(dest_tmp)
# except OSError:
# pass
# return False
with open(dest_tmp, "wb") as f:
try:
u = urllib.request.urlopen(str(URL))
try:
file_size = int(u.getheader("Content-Length"))
prog = ProgressBar(
total=file_size, desc=message, position=index, leave=False
)
except Exception:
file_size = None
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
prog.update(len(buffer))
except urllib.error.HTTPError as e:
prog.set_description(message + f":\033[91m {e.code} Error\033[0m")
prog.update()
prog.close()
try:
os.remove(dest_tmp)
except OSError:
pass
return False
except Exception as e:
prog.set_description(message + f":\033[91m {e}\033[0m")
prog.update()
prog.close()
try:
os.remove(dest_tmp)
except OSError:
pass
return False
#
if os.path.isfile(dest):
os.remove(dest)
os.rename(dest_tmp, dest)
decompressed = 0
if decompress:
if zipfile.is_zipfile(dest):
prog.set_description(message + ":\033[91m Decompressing\033[0m")
prog.update()
prog.close()
zfile = zipfile.ZipFile(dest)
zfile.extractall(dest_dir)
names = zfile.namelist()
for name in names:
if os.path.isdir(os.path.join(dest_dir, name)):
continue
elif not os.path.isfile(os.path.join(dest_dir, name)):
return False
else:
decompressed += 1
elif tarfile.is_tarfile(dest):
prog.set_description(message + ":\033[91m Decompressing\033[0m")
prog.update()
prog.close()
with tarfile.open(dest, "r:*") as tar:
tar.extractall(dest_dir)
# only extract files
files = [x.name for x in tar.getmembers() if x.isfile()]
for name in files:
if not os.path.isfile(os.path.join(dest_dir, name)):
return False
else:
decompressed += 1
elif dest.endswith(".gz"):
prog.set_description(message + ":\033[91m Decompressing\033[0m")
prog.update()
prog.close()
decomp = dest[:-3]
with gzip.open(dest, "rb") as fin, open(decomp, "wb") as fout:
buffer = fin.read(100000)
while buffer:
fout.write(buffer)
buffer = fin.read(100000)
decompressed += 1
decompress_msg = (
""
if not decompressed
else f' ({decompressed} file{"" if decompressed <= 1 else "s"} decompressed)'
)
prog.set_description(
message
+ f':\033[32m downloaded{decompress_msg} {" "*(term_width - len(message) - 13 - len(decompress_msg))}\033[0m'
)
prog.update()
prog.close()
# if a md5 file exists
# if downloaded files contains .md5 signature, use them to validate
# downloaded files.
if os.path.isfile(dest + ".md5"):
prog.set_description(message + ":\033[91m Verifying md5 signature\033[0m")
prog.update()
prog.close()
with open(dest + ".md5") as md5:
rec_md5 = md5.readline().split()[0].strip()
obs_md5 = fileMD5(dest, partial=False)
if rec_md5 != obs_md5:
prog.set_description(
message + ":\033[91m MD5 signature mismatch\033[0m"
)
prog.update()
prog.close()
env.logger.warning(
f"md5 signature mismatch for downloaded file {filename[:-4]} (recorded {rec_md5}, observed {obs_md5})"
)
prog.set_description(message + ":\033[91m MD5 signature verified\033[0m")
prog.update()
prog.close()
except Exception as e:
if env.verbosity > 2:
sys.stderr.write(get_traceback())
env.logger.error(f"Failed to download: {e}")
return False
finally:
# if there is something wrong still remove temporary file
if os.path.isfile(dest_tmp):
os.remove(dest_tmp)
return os.path.isfile(dest)
@SoS_Action(
acceptable_args=[
"URLs",
"workdir",
"dest_dir",
"dest_file",
"decompress",
"max_jobs",
]
)
def download(URLs, dest_dir=".", dest_file=None, decompress=False, max_jobs=5):
"""Download files from specified URL, which should be space, tab or
newline separated URLs. The files will be downloaded to specified destination.
Option "dest_dir" specify the destination directory,
and "dest_file" specify the output filename, which will otherwise be the same
specified in the URL. If `filename.md5` files are downloaded, they are used to
validate downloaded `filename`. If "decompress=True", compressed
files are decompressed. If `max_jobs` is given, a maximum of `max_jobs`
concurrent download jobs will be used for each domain. This restriction
applies to domain names and will be applied to multiple download
instances.
"""
if env.config["run_mode"] == "dryrun":
print(f"HINT: download\n{URLs}\n")
return None
if isinstance(URLs, str):
urls = [x.strip() for x in URLs.split() if x.strip()]
else:
urls = list(URLs)
if not urls:
env.logger.debug(f"No download URL specified: {URLs}")
return
#
if dest_file is not None and len(urls) != 1:
raise RuntimeError(
"Only one URL is allowed if a destination file is specified."
)
#
if dest_file is None:
filenames = []
for idx, url in enumerate(urls):
token = urllib.parse.urlparse(url)
# if no scheme or netloc, the URL is not acceptable
if not all(
[
getattr(token, qualifying_attr)
for qualifying_attr in ("scheme", "netloc")
]
):
raise ValueError(f"Invalid URL {url}")
filename = os.path.split(token.path)[-1]
if not filename:
raise ValueError(f"Cannot determine destination file for {url}")
filenames.append(os.path.join(dest_dir, filename))
else:
token = urllib.parse.urlparse(urls[0])
if not all(
[
getattr(token, qualifying_attr)
for qualifying_attr in ("scheme", "netloc")
]
):
raise ValueError(f"Invalid URL {url}")
filenames = [dest_file]
#
succ = [(False, None) for x in urls]
with ProcessPoolExecutor(max_workers=max_jobs) as executor:
for idx, (url, filename) in enumerate(zip(urls, filenames)):
# if there is alot, start download
succ[idx] = executor.submit(downloadURL, url, filename, decompress, idx)
succ = [x.result() for x in succ]
# for su, url in zip(succ, urls):
# if not su:
# env.logger.warning('Failed to download {}'.format(url))
failed = [y for x, y in zip(succ, urls) if not x]
if failed:
if len(urls) == 1:
raise RuntimeError("Failed to download {urls[0]}")
else:
raise RuntimeError(
f"Failed to download {failed[0]} ({len(failed)} out of {len(urls)})"
)
return 0
@SoS_Action(acceptable_args=["script", "args"])
def run(script, args="", **kwargs):
"""Execute specified script using bash. This action accepts common action arguments such as
input, active, workdir, docker_image and args. In particular, content of one or more files
specified by option input would be prepended before the specified script."""
if sys.platform == "win32":
# in the case there is no interpreter, we put the script
# at first (this is the case for windows)
# and we donot add default args.
interpreter = ""
else:
# if there is a shebang line, we ...
if not script.startswith("#!"):
interpreter = "/bin/bash"
if not args:
args = "-ev {filename:q}"
else:
# execute script directly
interpreter = ""
return SoS_ExecuteScript(script, interpreter, "", args).run(**kwargs)
@SoS_Action(acceptable_args=["script", "args"])
def perl(script, args="", **kwargs):
"""Execute specified script using perl. This action accepts common action arguments such as
input, active, workdir, docker_image and args. In particular, content of one or more files
specified by option input would be prepended before the specified script."""
return SoS_ExecuteScript(script, "perl", ".pl", args).run(**kwargs)
def collect_input(script, input):
# determine file extension
if input is not None:
if isinstance(input, (str, file_target)):
ext = os.path.splitext(input)[-1]
elif isinstance(input, Sequence) and len(input) > 0:
ext = os.path.splitext(input[0])[-1]
else:
raise ValueError("Unknown input file for action pandoc")
else:
ext = ".md"
input_file = tempfile.NamedTemporaryFile(mode="w+t", suffix=ext, delete=False).name
with open(input_file, "w") as tmp:
if script is not None and script.strip():
tmp.write(script.rstrip() + "\n\n")
if isinstance(input, str):
try:
with open(input) as ifile:
tmp.write(ifile.read() + "\n\n")
except Exception as e:
raise ValueError(f"Failed to read input file {input}: {e}")
elif isinstance(input, Sequence):
for ifile in input:
try:
with open(ifile) as itmp:
tmp.write(itmp.read().rstrip() + "\n\n")
except Exception as e:
raise ValueError(f"Failed to read input file {ifile}: {e}")
return input_file
@SoS_Action(acceptable_args=["script"])
def report(script=None, input=None, output=None, **kwargs):
"""Write script to an output file specified by `output`, which can be
a filename to which the content of the script will be written,
any object with a "write" attribute (e.g. a file handle) for which the "write"
function will be called with the content. If output is unspecified, the content
will be written to standard output or appended to a file specified with command
line option `-r`."""
if env.config["run_mode"] == "dryrun":
if "__std_out__" in env.sos_dict:
with open(env.sos_dict["__std_out__"], "a") as so:
so.write(f'HINT: report:\n{"" if script is None else script}\n')
if input is not None:
for ifile in input:
so.write(f" from file: {ifile}\n")
else:
print(f'HINT: report:\n{"" if script is None else script}')
if input is not None:
for ifile in input:
print(f" from file: {ifile}")
return
file_handle = None
if isinstance(output, str):
if not output or output == "-":
writer = sys.stdout.write
elif output.startswith(">>"):
file_handle = open(os.path.expanduser(output[2:]), "a")
writer = file_handle.write
else:
file_handle = open(os.path.expanduser(output), "w")
writer = file_handle.write
elif isinstance(output, (path, file_target)):
file_handle = open(os.path.expanduser(str(output)), "w")
writer = file_handle.write
elif isinstance(output, (paths, sos_targets)):
if len(output) != 1:
raise ValueError(f"More than one output is specified {output}")
if not isinstance(output[0], (file_target, path)):
raise ValueError(
"Action report can only output to file target or standard output"
)
file_handle = open(os.path.expanduser(str(output[0])), "w")
writer = file_handle.write
elif hasattr(output, "write"):
writer = output.write
elif output is None or output == "":
writer = sys.stdout.write
else:
raise ValueError(f"Invalid output {output}.")
# file lock to prevent race condition
with TimeoutInterProcessLock(os.path.join(env.temp_dir, "report_lock")):
if isinstance(script, str) and script.strip():
writer(script.rstrip() + "\n\n")
if input is not None:
if isinstance(input, (str, file_target)):
if (
"ACTION" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
env.log_to_file("ACTION", f"Loading report from {input}")
with open(input) as ifile:
writer(ifile.read().rstrip() + "\n\n")
elif isinstance(input, Sequence):
for ifile in input:
try:
env.logger.debug(f"Loading report from {ifile}")
with open(ifile) as itmp:
writer(itmp.read().rstrip() + "\n\n")
except Exception as e:
raise ValueError(f"Failed to read input file {ifile}: {e}")
else:
raise ValueError("Unknown input file for action report")
#
if file_handle:
file_handle.close()
@SoS_Action(acceptable_args=["script", "args"])
def pandoc(
script=None, input=None, output=None, args="{input:q} --output {output:q}", **kwargs
):
"""Convert input file to output using pandoc
The input can be specified in three ways:
1. instant script, which is assumed to be in md format
pandoc: output='report.html'
script
2. one or more input files. The format is determined by extension of input file
pandoc(input, output='report.html')
3. input file specified by command line option `-r` .
pandoc(output='report.html')
If no output is specified, it is assumed to be in html format
and is written to standard output.
You can specify more options such as "from" and "to" by customizing
the args parameter of the action. The default value of args is
`{input:q} --output {output:q}'
"""
#
# # this is output format
# pandoc [OPTIONS] [FILES]
# Input formats: commonmark, docbook, docx, epub, haddock, html, json*, latex,
# markdown, markdown_github, markdown_mmd, markdown_phpextra,
# markdown_strict, mediawiki, native, odt, opml, org, rst, t2t,
# textile, twiki
# [ *only Pandoc's JSON version of native AST]
# Output formats: asciidoc, beamer, commonmark, context, docbook, docx, dokuwiki,
# dzslides, epub, epub3, fb2, haddock, html, html5, icml, json*,
# latex, man, markdown, markdown_github, markdown_mmd,
# markdown_phpextra, markdown_strict, mediawiki, native, odt,
# opendocument, opml, org, pdf**, plain, revealjs, rst, rtf, s5,
# slideous, slidy, tei, texinfo, textile
# [**for pdf output, use latex or beamer and -o FILENAME.pdf]
# Options:
# -f FORMAT, -r FORMAT --from=FORMAT, --read=FORMAT
# -t FORMAT, -w FORMAT --to=FORMAT, --write=FORMAT
# -o FILENAME --output=FILENAME
# --data-dir=DIRECTORY
# -R --parse-raw
# -S --smart
#
# IGNORED
#
if not executable("pandoc").target_exists():
raise RuntimeError("pandoc not found")
input = sos_targets(collect_input(script, input))
output = sos_targets(output)
if len(output) == 0:
write_to_stdout = True
output = sos_targets(
tempfile.NamedTemporaryFile(mode="w+t", suffix=".html", delete=False).name
)
else:
write_to_stdout = False
#
ret = 1
try:
p = None
cmd = interpolate(f"pandoc {args}", {"input": input, "output": output})
if "ACTION" in env.config["SOS_DEBUG"] or "ALL" in env.config["SOS_DEBUG"]:
env.log_to_file("ACTION", f'Running command "{cmd}"')
if env.config["run_mode"] == "interactive":
# need to catch output and send to python output, which will in trun be hijacked by SoS notebook
from .utils import pexpect_run
ret = pexpect_run(cmd)
else:
p = subprocess.Popen(cmd, shell=True)
ret = p.wait()
except Exception as e:
env.logger.error(str(e))
if ret != 0:
temp_file = os.path.join(".sos", f"pandoc_{os.getpid()}.md")
shutil.copyfile(input, temp_file)
cmd = interpolate(
f"pandoc {args}",
{"input": sos_targets(temp_file), "output": sos_targets(output)},
)
raise RuntimeError(
f"Failed to execute script. Please use command \n{cmd}\nunder {os.getcwd()} to test it."
)
if write_to_stdout:
with open(output[0].fullname()) as out:
sys.stdout.write(out.read())
else:
env.logger.info(f"Report saved to {output}")
try:
os.remove(input)
except Exception:
pass
| gpl-3.0 | 1,902,920,400,745,923,300 | 39.284722 | 140 | 0.483434 | false |
karmab/kcli | kvirt/klist.py | 1 | 3492 | #!/usr/bin/env python3
# coding=utf-8
from kvirt.config import Kconfig
from kvirt.common import get_user
import json
import os
import argparse
def empty():
"""
:return:
"""
return {'_meta': {'hostvars': {}}}
class KcliInventory(object):
"""
"""
def __init__(self):
self.inventory = {}
self.read_cli_args()
config = Kconfig(quiet=True)
self.host = config.host
self.port = config.port
self.user = config.user
self.tunnel = config.tunnel
self.k = config.k
self.type = config.type
if self.k.conn is None:
os._exit(1)
# Called with `--list`.
if self.args.list:
self.inventory = self._list()
# Called with `--host [hostname]`.
elif self.args.host:
self.inventory = self.get(self.args.host)
# If no groups or vars are present, return an empty inventory.
else:
self.inventory = empty()
print(json.dumps(self.inventory))
# Read the command line args passed to the script.
def read_cli_args(self):
"""
"""
parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true')
parser.add_argument('--host', action='store')
self.args = parser.parse_args()
def _list(self):
"""
:return:
"""
k = self.k
tunnel = self.tunnel
metadata = {'_meta': {'hostvars': {}}}
hostvalues = metadata['_meta']['hostvars']
for vm in k.list():
name = vm.get('name')
status = vm.get('status')
ip = vm.get('ip', '')
image = vm.get('image')
plan = vm.get('plan', 'kvirt')
if plan == '':
plan = 'kvirt'
profile = vm.get('profile', '')
if plan not in metadata:
metadata[plan] = {"hosts": [name], "vars": {"plan": plan, "profile": profile}}
else:
metadata[plan]["hosts"].append(name)
hostvalues[name] = {'status': status}
if tunnel and self.type in ['kvm', 'kubevirt']:
hostvalues[name]['ansible_ssh_common_args'] = \
"-o ProxyCommand='ssh -p %s -W %%h:%%p %s@%s'" % (self.port, self.user, self.host)
if ip != '':
hostvalues[name]['ansible_host'] = ip
if image != '':
user = get_user(image)
hostvalues[name]['ansible_user'] = user
return metadata
def get(self, name):
"""
:return:
"""
k = self.k
tunnel = self.tunnel
metadata = {}
vm = k.info(name)
for entry in ['name', 'template', 'plan', 'profile', 'ip']:
metadata[entry] = vm.get(entry)
if metadata['plan'] == '':
metadata['plan'] = 'kvirt'
if tunnel and self.type in ['kvm', 'kubevirt']:
metadata['ansible_ssh_common_args'] = \
"-o ProxyCommand='ssh -p %s -W %%h:%%p %s@%s'" % (self.port, self.user, self.host)
ip = metadata['ip']
if ip != '':
metadata['ansible_host'] = ip
template = metadata['template']
if template != '':
user = get_user(template)
metadata['ansible_user'] = user
return metadata
def main():
KcliInventory()
if __name__ == "__main__":
main()
| apache-2.0 | 5,411,395,601,268,033,000 | 27.859504 | 102 | 0.489977 | false |
nuagenetworks/vspk-python | vspk/v6/nusubnet.py | 1 | 58844 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPATIPEntriesFetcher
from .fetchers import NUTCAsFetcher
from .fetchers import NUAddressRangesFetcher
from .fetchers import NUDefaultGatewaysFetcher
from .fetchers import NUDeploymentFailuresFetcher
from .fetchers import NUPermissionsFetcher
from .fetchers import NUVMResyncsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUBGPNeighborsFetcher
from .fetchers import NUDHCPOptionsFetcher
from .fetchers import NUDHCPv6OptionsFetcher
from .fetchers import NUVirtualIPsFetcher
from .fetchers import NUIKEGatewayConnectionsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUVMsFetcher
from .fetchers import NUVMInterfacesFetcher
from .fetchers import NUVMIPReservationsFetcher
from .fetchers import NUEnterprisePermissionsFetcher
from .fetchers import NUContainersFetcher
from .fetchers import NUContainerInterfacesFetcher
from .fetchers import NUContainerResyncsFetcher
from .fetchers import NUQOSsFetcher
from .fetchers import NUVPortsFetcher
from .fetchers import NUIPReservationsFetcher
from .fetchers import NUProxyARPFiltersFetcher
from .fetchers import NUStatisticsFetcher
from .fetchers import NUStatisticsPoliciesFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUSubnet(NURESTObject):
""" Represents a Subnet in the VSD
Notes:
This is the definition of a subnet associated with a Zone.
"""
__rest_name__ = "subnet"
__resource_name__ = "subnets"
## Constants
CONST_PAT_ENABLED_DISABLED = "DISABLED"
CONST_PAT_ENABLED_INHERITED = "INHERITED"
CONST_USE_GLOBAL_MAC_DISABLED = "DISABLED"
CONST_RESOURCE_TYPE_FLOATING = "FLOATING"
CONST_RESOURCE_TYPE_NSG_VNF = "NSG_VNF"
CONST_DPI_ENABLED = "ENABLED"
CONST_DHCP_RELAY_STATUS_DISABLED = "DISABLED"
CONST_IP_TYPE_IPV6 = "IPV6"
CONST_DPI_INHERITED = "INHERITED"
CONST_IP_TYPE_IPV4 = "IPV4"
CONST_UNDERLAY_ENABLED_ENABLED = "ENABLED"
CONST_MAINTENANCE_MODE_DISABLED = "DISABLED"
CONST_RESOURCE_TYPE_STANDARD = "STANDARD"
CONST_USE_GLOBAL_MAC_ENABLED = "ENABLED"
CONST_MAINTENANCE_MODE_ENABLED = "ENABLED"
CONST_L2_ENCAP_TYPE_VLAN = "VLAN"
CONST_RESOURCE_TYPE_PUBLIC = "PUBLIC"
CONST_UNDERLAY_ENABLED_INHERITED = "INHERITED"
CONST_L2_ENCAP_TYPE_VXLAN = "VXLAN"
CONST_USE_GLOBAL_MAC_ENTERPRISE_DEFAULT = "ENTERPRISE_DEFAULT"
CONST_ENCRYPTION_INHERITED = "INHERITED"
CONST_ENTITY_STATE_UNDER_CONSTRUCTION = "UNDER_CONSTRUCTION"
CONST_PAT_ENABLED_ENABLED = "ENABLED"
CONST_MULTICAST_ENABLED = "ENABLED"
CONST_MULTICAST_INHERITED = "INHERITED"
CONST_L2_ENCAP_TYPE_MPLSOUDP = "MPLSoUDP"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_DHCP_RELAY_STATUS_ENABLED = "ENABLED"
CONST_MULTICAST_DISABLED = "DISABLED"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_L2_ENCAP_TYPE_MPLS = "MPLS"
CONST_ENCRYPTION_DISABLED = "DISABLED"
CONST_DPI_DISABLED = "DISABLED"
CONST_MAINTENANCE_MODE_ENABLED_INHERITED = "ENABLED_INHERITED"
CONST_ENCRYPTION_ENABLED = "ENABLED"
CONST_IP_TYPE_DUALSTACK = "DUALSTACK"
CONST_ENTITY_STATE_MARKED_FOR_DELETION = "MARKED_FOR_DELETION"
CONST_UNDERLAY_ENABLED_DISABLED = "DISABLED"
def __init__(self, **kwargs):
""" Initializes a Subnet instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> subnet = NUSubnet(id=u'xxxx-xxx-xxx-xxx', name=u'Subnet')
>>> subnet = NUSubnet(data=my_dict)
"""
super(NUSubnet, self).__init__()
# Read/Write Attributes
self._l2_encap_type = None
self._pat_enabled = None
self._dhcp_relay_status = None
self._dpi = None
self._ip_type = None
self._ipv6_address = None
self._ipv6_gateway = None
self._evpn_enabled = None
self._maintenance_mode = None
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._gateway = None
self._gateway_mac_address = None
self._access_restriction_enabled = None
self._address = None
self._advertise = None
self._template_id = None
self._service_id = None
self._description = None
self._resource_type = None
self._netmask = None
self._link_local_address = None
self._embedded_metadata = None
self._vn_id = None
self._enable_dhcpv4 = None
self._enable_dhcpv6 = None
self._encryption = None
self._underlay = None
self._underlay_enabled = None
self._ingress_replication_enabled = None
self._interface_id = None
self._entity_scope = None
self._entity_state = None
self._policy_group_id = None
self._color = None
self._domain_service_label = None
self._route_distinguisher = None
self._route_target = None
self._split_subnet = None
self._irb_sub_interface_id = None
self._creation_date = None
self._proxy_arp = None
self._vrrp_ipv6_backup_address = None
self._use_global_mac = None
self._associated_multicast_channel_map_id = None
self._associated_shared_network_resource_id = None
self._dual_stack_dynamic_ip_allocation = None
self._public = None
self._subnet_vlanid = None
self._multi_home_enabled = None
self._multicast = None
self._customer_id = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="l2_encap_type", remote_name="l2EncapType", attribute_type=str, is_required=False, is_unique=False, choices=[u'MPLS', u'MPLSoUDP', u'VLAN', u'VXLAN'])
self.expose_attribute(local_name="pat_enabled", remote_name="PATEnabled", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="dhcp_relay_status", remote_name="DHCPRelayStatus", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED'])
self.expose_attribute(local_name="dpi", remote_name="DPI", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="ip_type", remote_name="IPType", attribute_type=str, is_required=False, is_unique=False, choices=[u'DUALSTACK', u'IPV4', u'IPV6'])
self.expose_attribute(local_name="ipv6_address", remote_name="IPv6Address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ipv6_gateway", remote_name="IPv6Gateway", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="evpn_enabled", remote_name="EVPNEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="maintenance_mode", remote_name="maintenanceMode", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'ENABLED_INHERITED'])
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway", remote_name="gateway", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_mac_address", remote_name="gatewayMACAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="access_restriction_enabled", remote_name="accessRestrictionEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="address", remote_name="address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="advertise", remote_name="advertise", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="template_id", remote_name="templateID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="service_id", remote_name="serviceID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="resource_type", remote_name="resourceType", attribute_type=str, is_required=False, is_unique=False, choices=[u'FLOATING', u'NSG_VNF', u'PUBLIC', u'STANDARD'])
self.expose_attribute(local_name="netmask", remote_name="netmask", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="link_local_address", remote_name="linkLocalAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="vn_id", remote_name="vnId", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="enable_dhcpv4", remote_name="enableDHCPv4", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="enable_dhcpv6", remote_name="enableDHCPv6", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="encryption", remote_name="encryption", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="underlay", remote_name="underlay", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="underlay_enabled", remote_name="underlayEnabled", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="ingress_replication_enabled", remote_name="ingressReplicationEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="interface_id", remote_name="interfaceID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="entity_state", remote_name="entityState", attribute_type=str, is_required=False, is_unique=False, choices=[u'MARKED_FOR_DELETION', u'UNDER_CONSTRUCTION'])
self.expose_attribute(local_name="policy_group_id", remote_name="policyGroupID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="color", remote_name="color", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="domain_service_label", remote_name="domainServiceLabel", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="route_distinguisher", remote_name="routeDistinguisher", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="route_target", remote_name="routeTarget", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="split_subnet", remote_name="splitSubnet", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="irb_sub_interface_id", remote_name="irbSubInterfaceID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="proxy_arp", remote_name="proxyARP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="vrrp_ipv6_backup_address", remote_name="vrrpIPv6BackupAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="use_global_mac", remote_name="useGlobalMAC", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'ENTERPRISE_DEFAULT'])
self.expose_attribute(local_name="associated_multicast_channel_map_id", remote_name="associatedMulticastChannelMapID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_shared_network_resource_id", remote_name="associatedSharedNetworkResourceID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="dual_stack_dynamic_ip_allocation", remote_name="dualStackDynamicIPAllocation", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="public", remote_name="public", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="subnet_vlanid", remote_name="subnetVLANID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="multi_home_enabled", remote_name="multiHomeEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="multicast", remote_name="multicast", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="customer_id", remote_name="customerID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.patip_entries = NUPATIPEntriesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.tcas = NUTCAsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.address_ranges = NUAddressRangesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.default_gateways = NUDefaultGatewaysFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.deployment_failures = NUDeploymentFailuresFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vm_resyncs = NUVMResyncsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.bgp_neighbors = NUBGPNeighborsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.dhcp_options = NUDHCPOptionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.dhcpv6_options = NUDHCPv6OptionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.virtual_ips = NUVirtualIPsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ike_gateway_connections = NUIKEGatewayConnectionsFetcher.fetcher_with_object(parent_object=self, relationship="member")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vms = NUVMsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vm_interfaces = NUVMInterfacesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vmip_reservations = NUVMIPReservationsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.enterprise_permissions = NUEnterprisePermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.containers = NUContainersFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.container_interfaces = NUContainerInterfacesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.container_resyncs = NUContainerResyncsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.qoss = NUQOSsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vports = NUVPortsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ip_reservations = NUIPReservationsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.proxy_arp_filters = NUProxyARPFiltersFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.statistics = NUStatisticsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.statistics_policies = NUStatisticsPoliciesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def l2_encap_type(self):
""" Get l2_encap_type value.
Notes:
Subnet Tunnel Type, possible values are MPLS, MPLSoUDP, VLAN and VXLAN.
This attribute is named `l2EncapType` in VSD API.
"""
return self._l2_encap_type
@l2_encap_type.setter
def l2_encap_type(self, value):
""" Set l2_encap_type value.
Notes:
Subnet Tunnel Type, possible values are MPLS, MPLSoUDP, VLAN and VXLAN.
This attribute is named `l2EncapType` in VSD API.
"""
self._l2_encap_type = value
@property
def pat_enabled(self):
""" Get pat_enabled value.
Notes:
Indicates whether PAT is enabled for the subnets in this domain - ENABLED/DISABLED. Possible values are ENABLED, DISABLED. OR INHERITED
This attribute is named `PATEnabled` in VSD API.
"""
return self._pat_enabled
@pat_enabled.setter
def pat_enabled(self, value):
""" Set pat_enabled value.
Notes:
Indicates whether PAT is enabled for the subnets in this domain - ENABLED/DISABLED. Possible values are ENABLED, DISABLED. OR INHERITED
This attribute is named `PATEnabled` in VSD API.
"""
self._pat_enabled = value
@property
def dhcp_relay_status(self):
""" Get dhcp_relay_status value.
Notes:
None
This attribute is named `DHCPRelayStatus` in VSD API.
"""
return self._dhcp_relay_status
@dhcp_relay_status.setter
def dhcp_relay_status(self, value):
""" Set dhcp_relay_status value.
Notes:
None
This attribute is named `DHCPRelayStatus` in VSD API.
"""
self._dhcp_relay_status = value
@property
def dpi(self):
""" Get dpi value.
Notes:
determines whether or not Deep packet inspection is enabled
This attribute is named `DPI` in VSD API.
"""
return self._dpi
@dpi.setter
def dpi(self, value):
""" Set dpi value.
Notes:
determines whether or not Deep packet inspection is enabled
This attribute is named `DPI` in VSD API.
"""
self._dpi = value
@property
def ip_type(self):
""" Get ip_type value.
Notes:
IPv4, DUALSTACK or IPv6
This attribute is named `IPType` in VSD API.
"""
return self._ip_type
@ip_type.setter
def ip_type(self, value):
""" Set ip_type value.
Notes:
IPv4, DUALSTACK or IPv6
This attribute is named `IPType` in VSD API.
"""
self._ip_type = value
@property
def ipv6_address(self):
""" Get ipv6_address value.
Notes:
IP address of the subnet defined. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
This attribute is named `IPv6Address` in VSD API.
"""
return self._ipv6_address
@ipv6_address.setter
def ipv6_address(self, value):
""" Set ipv6_address value.
Notes:
IP address of the subnet defined. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
This attribute is named `IPv6Address` in VSD API.
"""
self._ipv6_address = value
@property
def ipv6_gateway(self):
""" Get ipv6_gateway value.
Notes:
The IPv6 address of the gateway of this subnet
This attribute is named `IPv6Gateway` in VSD API.
"""
return self._ipv6_gateway
@ipv6_gateway.setter
def ipv6_gateway(self, value):
""" Set ipv6_gateway value.
Notes:
The IPv6 address of the gateway of this subnet
This attribute is named `IPv6Gateway` in VSD API.
"""
self._ipv6_gateway = value
@property
def evpn_enabled(self):
""" Get evpn_enabled value.
Notes:
Indicates if EVPN capabilities are enabled for this subnet.
This attribute is named `EVPNEnabled` in VSD API.
"""
return self._evpn_enabled
@evpn_enabled.setter
def evpn_enabled(self, value):
""" Set evpn_enabled value.
Notes:
Indicates if EVPN capabilities are enabled for this subnet.
This attribute is named `EVPNEnabled` in VSD API.
"""
self._evpn_enabled = value
@property
def maintenance_mode(self):
""" Get maintenance_mode value.
Notes:
maintenanceMode is an enum that indicates if the SubNetwork is accepting VM activation requests.
This attribute is named `maintenanceMode` in VSD API.
"""
return self._maintenance_mode
@maintenance_mode.setter
def maintenance_mode(self, value):
""" Set maintenance_mode value.
Notes:
maintenanceMode is an enum that indicates if the SubNetwork is accepting VM activation requests.
This attribute is named `maintenanceMode` in VSD API.
"""
self._maintenance_mode = value
@property
def name(self):
""" Get name value.
Notes:
Name of the current entity(Zone or zone template or subnet etc..) Valid characters are alphabets, numbers, space and hyphen( - ).
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the current entity(Zone or zone template or subnet etc..) Valid characters are alphabets, numbers, space and hyphen( - ).
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def gateway(self):
""" Get gateway value.
Notes:
The IP address of the gateway of this subnet
"""
return self._gateway
@gateway.setter
def gateway(self, value):
""" Set gateway value.
Notes:
The IP address of the gateway of this subnet
"""
self._gateway = value
@property
def gateway_mac_address(self):
""" Get gateway_mac_address value.
Notes:
None
This attribute is named `gatewayMACAddress` in VSD API.
"""
return self._gateway_mac_address
@gateway_mac_address.setter
def gateway_mac_address(self, value):
""" Set gateway_mac_address value.
Notes:
None
This attribute is named `gatewayMACAddress` in VSD API.
"""
self._gateway_mac_address = value
@property
def access_restriction_enabled(self):
""" Get access_restriction_enabled value.
Notes:
This attribute specifies whether subnet is enabled with access restrictions. Note: Applicable to shared infrastructure enterprise subnets.
This attribute is named `accessRestrictionEnabled` in VSD API.
"""
return self._access_restriction_enabled
@access_restriction_enabled.setter
def access_restriction_enabled(self, value):
""" Set access_restriction_enabled value.
Notes:
This attribute specifies whether subnet is enabled with access restrictions. Note: Applicable to shared infrastructure enterprise subnets.
This attribute is named `accessRestrictionEnabled` in VSD API.
"""
self._access_restriction_enabled = value
@property
def address(self):
""" Get address value.
Notes:
IP address of the subnet defined. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
"""
return self._address
@address.setter
def address(self, value):
""" Set address value.
Notes:
IP address of the subnet defined. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
"""
self._address = value
@property
def advertise(self):
""" Get advertise value.
Notes:
Subnet will be advertised in Overlay and WAN BGP
"""
return self._advertise
@advertise.setter
def advertise(self, value):
""" Set advertise value.
Notes:
Subnet will be advertised in Overlay and WAN BGP
"""
self._advertise = value
@property
def template_id(self):
""" Get template_id value.
Notes:
The ID of the subnet template that this subnet object was derived from
This attribute is named `templateID` in VSD API.
"""
return self._template_id
@template_id.setter
def template_id(self, value):
""" Set template_id value.
Notes:
The ID of the subnet template that this subnet object was derived from
This attribute is named `templateID` in VSD API.
"""
self._template_id = value
@property
def service_id(self):
""" Get service_id value.
Notes:
The service ID used by the VSCs to identify this subnet
This attribute is named `serviceID` in VSD API.
"""
return self._service_id
@service_id.setter
def service_id(self, value):
""" Set service_id value.
Notes:
The service ID used by the VSCs to identify this subnet
This attribute is named `serviceID` in VSD API.
"""
self._service_id = value
@property
def description(self):
""" Get description value.
Notes:
A description field provided by the user that identifies the subnet
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description field provided by the user that identifies the subnet
"""
self._description = value
@property
def resource_type(self):
""" Get resource_type value.
Notes:
Defines the type of the subnet, PUBLIC,FLOATING,STANDARD OR NSG_VNF
This attribute is named `resourceType` in VSD API.
"""
return self._resource_type
@resource_type.setter
def resource_type(self, value):
""" Set resource_type value.
Notes:
Defines the type of the subnet, PUBLIC,FLOATING,STANDARD OR NSG_VNF
This attribute is named `resourceType` in VSD API.
"""
self._resource_type = value
@property
def netmask(self):
""" Get netmask value.
Notes:
Netmask of the subnet defined
"""
return self._netmask
@netmask.setter
def netmask(self, value):
""" Set netmask value.
Notes:
Netmask of the subnet defined
"""
self._netmask = value
@property
def link_local_address(self):
""" Get link_local_address value.
Notes:
IPv6 unicast address. Valid range is fe80::/64.
This attribute is named `linkLocalAddress` in VSD API.
"""
return self._link_local_address
@link_local_address.setter
def link_local_address(self, value):
""" Set link_local_address value.
Notes:
IPv6 unicast address. Valid range is fe80::/64.
This attribute is named `linkLocalAddress` in VSD API.
"""
self._link_local_address = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def vn_id(self):
""" Get vn_id value.
Notes:
Current Network's globally unique VXLAN network identifier generated by VSD
This attribute is named `vnId` in VSD API.
"""
return self._vn_id
@vn_id.setter
def vn_id(self, value):
""" Set vn_id value.
Notes:
Current Network's globally unique VXLAN network identifier generated by VSD
This attribute is named `vnId` in VSD API.
"""
self._vn_id = value
@property
def enable_dhcpv4(self):
""" Get enable_dhcpv4 value.
Notes:
Turn on or off DHCP for of IPV4 Addresses
This attribute is named `enableDHCPv4` in VSD API.
"""
return self._enable_dhcpv4
@enable_dhcpv4.setter
def enable_dhcpv4(self, value):
""" Set enable_dhcpv4 value.
Notes:
Turn on or off DHCP for of IPV4 Addresses
This attribute is named `enableDHCPv4` in VSD API.
"""
self._enable_dhcpv4 = value
@property
def enable_dhcpv6(self):
""" Get enable_dhcpv6 value.
Notes:
Turn on or off DHCP for IPv6 Addresses
This attribute is named `enableDHCPv6` in VSD API.
"""
return self._enable_dhcpv6
@enable_dhcpv6.setter
def enable_dhcpv6(self, value):
""" Set enable_dhcpv6 value.
Notes:
Turn on or off DHCP for IPv6 Addresses
This attribute is named `enableDHCPv6` in VSD API.
"""
self._enable_dhcpv6 = value
@property
def encryption(self):
""" Get encryption value.
Notes:
Determines whether or not IPSEC is enabled.
"""
return self._encryption
@encryption.setter
def encryption(self, value):
""" Set encryption value.
Notes:
Determines whether or not IPSEC is enabled.
"""
self._encryption = value
@property
def underlay(self):
""" Get underlay value.
Notes:
Read Only Boolean flag to indicate whether underlay is enabled directly or indirectly
"""
return self._underlay
@underlay.setter
def underlay(self, value):
""" Set underlay value.
Notes:
Read Only Boolean flag to indicate whether underlay is enabled directly or indirectly
"""
self._underlay = value
@property
def underlay_enabled(self):
""" Get underlay_enabled value.
Notes:
Indicates whether UNDERLAY is enabled for the subnets in this domain
This attribute is named `underlayEnabled` in VSD API.
"""
return self._underlay_enabled
@underlay_enabled.setter
def underlay_enabled(self, value):
""" Set underlay_enabled value.
Notes:
Indicates whether UNDERLAY is enabled for the subnets in this domain
This attribute is named `underlayEnabled` in VSD API.
"""
self._underlay_enabled = value
@property
def ingress_replication_enabled(self):
""" Get ingress_replication_enabled value.
Notes:
Enables ingress replication for the VNI.
This attribute is named `ingressReplicationEnabled` in VSD API.
"""
return self._ingress_replication_enabled
@ingress_replication_enabled.setter
def ingress_replication_enabled(self, value):
""" Set ingress_replication_enabled value.
Notes:
Enables ingress replication for the VNI.
This attribute is named `ingressReplicationEnabled` in VSD API.
"""
self._ingress_replication_enabled = value
@property
def interface_id(self):
""" Get interface_id value.
Notes:
SRLinux Interface ID for Subnet configuration
This attribute is named `interfaceID` in VSD API.
"""
return self._interface_id
@interface_id.setter
def interface_id(self, value):
""" Set interface_id value.
Notes:
SRLinux Interface ID for Subnet configuration
This attribute is named `interfaceID` in VSD API.
"""
self._interface_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def entity_state(self):
""" Get entity_state value.
Notes:
Intermediate State of L2 Domain.
This attribute is named `entityState` in VSD API.
"""
return self._entity_state
@entity_state.setter
def entity_state(self, value):
""" Set entity_state value.
Notes:
Intermediate State of L2 Domain.
This attribute is named `entityState` in VSD API.
"""
self._entity_state = value
@property
def policy_group_id(self):
""" Get policy_group_id value.
Notes:
PG ID for the subnet. This is unique per domain and will be in the range 1-4095
This attribute is named `policyGroupID` in VSD API.
"""
return self._policy_group_id
@policy_group_id.setter
def policy_group_id(self, value):
""" Set policy_group_id value.
Notes:
PG ID for the subnet. This is unique per domain and will be in the range 1-4095
This attribute is named `policyGroupID` in VSD API.
"""
self._policy_group_id = value
@property
def color(self):
""" Get color value.
Notes:
The color encoded with a traffic engineering constraint such as minimum latency, hops, maximum bandwidth, etc. This is used for NFIX(Network Function Interconnect). Color is applicable only when the associated Domain's Tunnel Type is MPLSoUDP. Valid range is 1 - 4294967295. If 0 is provided, color will be derived from the associated Domain.
"""
return self._color
@color.setter
def color(self, value):
""" Set color value.
Notes:
The color encoded with a traffic engineering constraint such as minimum latency, hops, maximum bandwidth, etc. This is used for NFIX(Network Function Interconnect). Color is applicable only when the associated Domain's Tunnel Type is MPLSoUDP. Valid range is 1 - 4294967295. If 0 is provided, color will be derived from the associated Domain.
"""
self._color = value
@property
def domain_service_label(self):
""" Get domain_service_label value.
Notes:
Service ID or external label given to Domain
This attribute is named `domainServiceLabel` in VSD API.
"""
return self._domain_service_label
@domain_service_label.setter
def domain_service_label(self, value):
""" Set domain_service_label value.
Notes:
Service ID or external label given to Domain
This attribute is named `domainServiceLabel` in VSD API.
"""
self._domain_service_label = value
@property
def route_distinguisher(self):
""" Get route_distinguisher value.
Notes:
Route distinguisher for this subnet that is used by the BGP-EVPN protocol in VSC. Supported formats are: [2-byte ASN]:[4-byte value] or [4-byte ASN]:[2-byte value]
This attribute is named `routeDistinguisher` in VSD API.
"""
return self._route_distinguisher
@route_distinguisher.setter
def route_distinguisher(self, value):
""" Set route_distinguisher value.
Notes:
Route distinguisher for this subnet that is used by the BGP-EVPN protocol in VSC. Supported formats are: [2-byte ASN]:[4-byte value] or [4-byte ASN]:[2-byte value]
This attribute is named `routeDistinguisher` in VSD API.
"""
self._route_distinguisher = value
@property
def route_target(self):
""" Get route_target value.
Notes:
Route target for this subnet that is used by the BGP-EVPN protocol in VSC. Supported formats are: [2-byte ASN]:[4-byte value] or [4-byte ASN]:[2-byte value]
This attribute is named `routeTarget` in VSD API.
"""
return self._route_target
@route_target.setter
def route_target(self, value):
""" Set route_target value.
Notes:
Route target for this subnet that is used by the BGP-EVPN protocol in VSC. Supported formats are: [2-byte ASN]:[4-byte value] or [4-byte ASN]:[2-byte value]
This attribute is named `routeTarget` in VSD API.
"""
self._route_target = value
@property
def split_subnet(self):
""" Get split_subnet value.
Notes:
Block subnet routes
This attribute is named `splitSubnet` in VSD API.
"""
return self._split_subnet
@split_subnet.setter
def split_subnet(self, value):
""" Set split_subnet value.
Notes:
Block subnet routes
This attribute is named `splitSubnet` in VSD API.
"""
self._split_subnet = value
@property
def irb_sub_interface_id(self):
""" Get irb_sub_interface_id value.
Notes:
The IRB sub interface identifies subnet on SRLinux devices.
This attribute is named `irbSubInterfaceID` in VSD API.
"""
return self._irb_sub_interface_id
@irb_sub_interface_id.setter
def irb_sub_interface_id(self, value):
""" Set irb_sub_interface_id value.
Notes:
The IRB sub interface identifies subnet on SRLinux devices.
This attribute is named `irbSubInterfaceID` in VSD API.
"""
self._irb_sub_interface_id = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def proxy_arp(self):
""" Get proxy_arp value.
Notes:
When set, VRS will act as ARP Proxy
This attribute is named `proxyARP` in VSD API.
"""
return self._proxy_arp
@proxy_arp.setter
def proxy_arp(self, value):
""" Set proxy_arp value.
Notes:
When set, VRS will act as ARP Proxy
This attribute is named `proxyARP` in VSD API.
"""
self._proxy_arp = value
@property
def vrrp_ipv6_backup_address(self):
""" Get vrrp_ipv6_backup_address value.
Notes:
7x50 VRRP IPv6 Backup Address. Valid range is fe80::/64.
This attribute is named `vrrpIPv6BackupAddress` in VSD API.
"""
return self._vrrp_ipv6_backup_address
@vrrp_ipv6_backup_address.setter
def vrrp_ipv6_backup_address(self, value):
""" Set vrrp_ipv6_backup_address value.
Notes:
7x50 VRRP IPv6 Backup Address. Valid range is fe80::/64.
This attribute is named `vrrpIPv6BackupAddress` in VSD API.
"""
self._vrrp_ipv6_backup_address = value
@property
def use_global_mac(self):
""" Get use_global_mac value.
Notes:
if this flag is enabled, the system configured globalMACAddress will be used as the gateway mac address
This attribute is named `useGlobalMAC` in VSD API.
"""
return self._use_global_mac
@use_global_mac.setter
def use_global_mac(self, value):
""" Set use_global_mac value.
Notes:
if this flag is enabled, the system configured globalMACAddress will be used as the gateway mac address
This attribute is named `useGlobalMAC` in VSD API.
"""
self._use_global_mac = value
@property
def associated_multicast_channel_map_id(self):
""" Get associated_multicast_channel_map_id value.
Notes:
The ID of the Multi Cast Channel Map this Subnet/Subnet Template is associated with. This has to be set when enableMultiCast is set to ENABLED
This attribute is named `associatedMulticastChannelMapID` in VSD API.
"""
return self._associated_multicast_channel_map_id
@associated_multicast_channel_map_id.setter
def associated_multicast_channel_map_id(self, value):
""" Set associated_multicast_channel_map_id value.
Notes:
The ID of the Multi Cast Channel Map this Subnet/Subnet Template is associated with. This has to be set when enableMultiCast is set to ENABLED
This attribute is named `associatedMulticastChannelMapID` in VSD API.
"""
self._associated_multicast_channel_map_id = value
@property
def associated_shared_network_resource_id(self):
""" Get associated_shared_network_resource_id value.
Notes:
The ID of public subnet that is associated with this subnet
This attribute is named `associatedSharedNetworkResourceID` in VSD API.
"""
return self._associated_shared_network_resource_id
@associated_shared_network_resource_id.setter
def associated_shared_network_resource_id(self, value):
""" Set associated_shared_network_resource_id value.
Notes:
The ID of public subnet that is associated with this subnet
This attribute is named `associatedSharedNetworkResourceID` in VSD API.
"""
self._associated_shared_network_resource_id = value
@property
def dual_stack_dynamic_ip_allocation(self):
""" Get dual_stack_dynamic_ip_allocation value.
Notes:
This value indicates whether dynamic address allocation is enabled or not. This will be applicable when subnet is in dual stack mode.
This attribute is named `dualStackDynamicIPAllocation` in VSD API.
"""
return self._dual_stack_dynamic_ip_allocation
@dual_stack_dynamic_ip_allocation.setter
def dual_stack_dynamic_ip_allocation(self, value):
""" Set dual_stack_dynamic_ip_allocation value.
Notes:
This value indicates whether dynamic address allocation is enabled or not. This will be applicable when subnet is in dual stack mode.
This attribute is named `dualStackDynamicIPAllocation` in VSD API.
"""
self._dual_stack_dynamic_ip_allocation = value
@property
def public(self):
""" Get public value.
Notes:
when set to true means public subnet under a public zone
"""
return self._public
@public.setter
def public(self, value):
""" Set public value.
Notes:
when set to true means public subnet under a public zone
"""
self._public = value
@property
def subnet_vlanid(self):
""" Get subnet_vlanid value.
Notes:
Determines the VLANID for this associated Subnet.
This attribute is named `subnetVLANID` in VSD API.
"""
return self._subnet_vlanid
@subnet_vlanid.setter
def subnet_vlanid(self, value):
""" Set subnet_vlanid value.
Notes:
Determines the VLANID for this associated Subnet.
This attribute is named `subnetVLANID` in VSD API.
"""
self._subnet_vlanid = value
@property
def multi_home_enabled(self):
""" Get multi_home_enabled value.
Notes:
Boolean flag to indicate whether this is a Multi-homed subnet or not.
This attribute is named `multiHomeEnabled` in VSD API.
"""
return self._multi_home_enabled
@multi_home_enabled.setter
def multi_home_enabled(self, value):
""" Set multi_home_enabled value.
Notes:
Boolean flag to indicate whether this is a Multi-homed subnet or not.
This attribute is named `multiHomeEnabled` in VSD API.
"""
self._multi_home_enabled = value
@property
def multicast(self):
""" Get multicast value.
Notes:
multicast is enum that indicates multicast policy on Subnet/Subnet Template.
"""
return self._multicast
@multicast.setter
def multicast(self, value):
""" Set multicast value.
Notes:
multicast is enum that indicates multicast policy on Subnet/Subnet Template.
"""
self._multicast = value
@property
def customer_id(self):
""" Get customer_id value.
Notes:
CustomerID that is used by NETCONF MANAGER to identify this enterprise. This can be configured by root user.
This attribute is named `customerID` in VSD API.
"""
return self._customer_id
@customer_id.setter
def customer_id(self, value):
""" Set customer_id value.
Notes:
CustomerID that is used by NETCONF MANAGER to identify this enterprise. This can be configured by root user.
This attribute is named `customerID` in VSD API.
"""
self._customer_id = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
## Custom methods
def is_template(self):
""" Verify that the object is a template
Returns:
(bool): True if the object is a template
"""
return False
def is_from_template(self):
""" Verify if the object has been instantiated from a template
Note:
The object has to be fetched. Otherwise, it does not
have information from its parent
Returns:
(bool): True if the object is a template
"""
return self.template_id
| bsd-3-clause | -5,173,366,400,604,724,000 | 30.167903 | 358 | 0.585667 | false |
hemmerling/codingdojo | src/game_of_life/python_coderetreat_berlin_2014-09/python_legacycrberlin03/gol03_test.py | 1 | 1436 | #This file was originally generated by PyScripter's unitest wizard
import unittest
from gol03 import Gol03
def dummy():
""" Dummy function for comparison of the return values """
return
class Gol03Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def check_griddata(griddata,new_griddata):
result = False
count = 0
for cell in griddata:
if cell == True:
count += 1
if count == 3 | count == 4:
result = True
return result
def check_newgriddata(new_griddata):
result = new_griddata[4] == 1
return result
def testIsCellAliveWithTwoNeighbours(self):
self.gol = Gol03()
result = False
griddata = [False, False, False, True, True, True, False, False, False]
self.gol.set_griddata(griddata)
self.gol.next()
new_griddata = self.gol.get_griddata()
if check_griddata(griddata):
result = check_newgriddata(new_griddata)
assert result == True, 'Gol01.get_size() does not provide the right return value'
pass
# run all tests
if __name__ == "__main__":
try:
unittest.main()
except SystemExit as inst:
if inst.args[0] is True: # raised by sys.exit(True) when tests failed
raise
| apache-2.0 | -5,112,050,620,155,597,000 | 19.757576 | 89 | 0.561978 | false |
levelrf/level_basestation | level/examples/test_transmit.py | 1 | 3457 | #!/usr/bin/python
# python ~/workspace/level_basestation/pre-cog/examples/simple_trx.py --port 12345 --radio-addr 85 --dest-addr 86 --args serial=E8R10Z2B1
# python ~/workspace/level_basestation/pre-cog/examples/simple_trx.py --port 12346 --radio-addr 86 --dest-addr 85 --args serial=E4R11Y0B1
from gnuradio import gr
from gnuradio import uhd
from gnuradio import digital
from gnuradio import blks2
from gnuradio.gr import firdes
import gnuradio.gr.gr_threading as _threading
from gnuradio import level
from gnuradio import extras
from math import pi
from gruel import pmt
import urllib2, time, json
class test_transmit(gr.top_block):
def __init__(self):
gr.top_block.__init__(self, "CC430 Transmitter")
self.sent_pkts = 0
# 5555 5555 2c6e fd00 0071 da0b e2
self.packet = chr(0x55)*4 # preamble
self.packet += chr(0x2c) + chr(0x6e) # sync
self.packet += chr(0xfc) # length
self.packet += chr(0x00) + chr(0x00) + chr(0x00) # payload
self.packet += chr(0x71) + chr(0xda) + chr(0x0b) + chr(0xe2) # CRC (currently incorrect)
# Variables
self.samp_rate = samp_rate = 125e3
self.f_center = f_center = 868e6
self.bandwidth = bandwidth = 200e3
self.gain = gain = 5
self.msgq = msgq = gr.msg_queue()
# Blocks
self.uhd_sink = uhd.usrp_sink(
device_addr="",
stream_args=uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_sink.set_samp_rate(samp_rate)
self.uhd_sink.set_center_freq(f_center, 0)
self.uhd_sink.set_gain(gain, 0)
self.uhd_sink.set_bandwidth(bandwidth, 0)
self.msg_src = gr.message_source(1, msgq)
self.msk = level.msk_mod_bc(
samples_per_symbol=2,
bt=0.3
)
# Connections
self.connect(self.msg_src, self.msk, self.uhd_sink)
def send_pkt(self, payload):
msg = gr.message_from_string(str(payload))
self.msgq.insert_tail(msg)
def get_top_hn(self):
try:
f_page = urllib2.urlopen("http://api.ihackernews.com/page").read()
except urllib2.HTTPError:
return "HN returned server error: 0"
fj = json.loads(f_page)
title = fj['items'][0]['title']
score = fj['items'][0]['points']
return str(title) + ":" + str(score)
def form_packet(self, payload):
length = len(payload)
self.packet = chr(0x55)*4 # preamble
self.packet += chr(0xd3) + chr(0x91) # sync
self.packet += chr(length) # length
self.packet += str(payload)
self.packet += chr(0x71) + chr(0xda) + chr(0x0b) + chr(0xe2) # CRC (currently incorrect)
def main_loop(self):
while True:
payload = self.get_top_hn()
print payload
self.packet = self.form_packet(payload)
self.send_pkt(self.packet)
self.sent_pkts += 1
try:
time.sleep(5)
except KeyboardInterrupt:
print "\n\nSent Packets:", self.sent_pkts, "\n"
break
if __name__ == '__main__':
tx = test_transmit()
r = gr.enable_realtime_scheduling()
tx.start()
tx.main_loop()
| gpl-3.0 | -749,694,025,709,994,400 | 33.227723 | 137 | 0.558577 | false |
dagar/Firmware | Tools/HIL/run_tests.py | 1 | 3547 | #! /usr/bin/python
import serial, time
import subprocess
from subprocess import call, Popen
from argparse import ArgumentParser
import re
def do_test(port, baudrate, test_name):
databits = serial.EIGHTBITS
stopbits = serial.STOPBITS_ONE
parity = serial.PARITY_NONE
ser = serial.Serial(port, baudrate, databits, parity, stopbits, timeout=10)
ser.write('\n\n')
finished = 0
success = False
timeout = 10 # 10 seconds
timeout_start = time.time()
while finished == 0:
serial_line = ser.readline()
print(serial_line.replace('\n',''))
if "nsh>" in serial_line:
finished = 1
if time.time() > timeout_start + timeout:
print("Error, timeout")
finished = 1
break
# run test
ser.write('tests ' + test_name + '\n')
time.sleep(0.05)
finished = 0
timeout = 300 # 5 minutes
timeout_start = time.time()
timeout_newline = time.time()
while finished == 0:
serial_line = ser.readline()
print(serial_line.replace('\n',''))
if test_name + " PASSED" in serial_line:
finished = 1
success = True
elif test_name + " FAILED" in serial_line:
finished = 1
success = False
if time.time() > timeout_start + timeout:
print("Error, timeout")
print(test_name + " FAILED")
finished = 1
success = False
break
# newline every 30 seconds if still running
if time.time() - timeout_newline > 30:
ser.write('\n')
timeout_newline = time.time()
ser.close()
return success
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--device', "-d", nargs='?', default = None, help='')
parser.add_argument("--baudrate", "-b", dest="baudrate", type=int, help="Mavlink port baud rate (default=57600)", default=57600)
args = parser.parse_args()
do_test(args.device, args.baudrate, "autodeclination")
do_test(args.device, args.baudrate, "bezier")
do_test(args.device, args.baudrate, "bson")
do_test(args.device, args.baudrate, "commander")
do_test(args.device, args.baudrate, "controllib")
do_test(args.device, args.baudrate, "conv")
do_test(args.device, args.baudrate, "ctlmath")
#do_test(args.device, args.baudrate, "dataman")
do_test(args.device, args.baudrate, "float")
do_test(args.device, args.baudrate, "hrt")
do_test(args.device, args.baudrate, "int")
do_test(args.device, args.baudrate, "IntrusiveQueue")
do_test(args.device, args.baudrate, "List")
do_test(args.device, args.baudrate, "mathlib")
do_test(args.device, args.baudrate, "matrix")
do_test(args.device, args.baudrate, "microbench_hrt")
do_test(args.device, args.baudrate, "microbench_math")
do_test(args.device, args.baudrate, "microbench_matrix")
do_test(args.device, args.baudrate, "microbench_uorb")
#do_test(args.device, args.baudrate, "mixer")
do_test(args.device, args.baudrate, "param")
do_test(args.device, args.baudrate, "parameters")
do_test(args.device, args.baudrate, "perf")
do_test(args.device, args.baudrate, "search_min")
do_test(args.device, args.baudrate, "sleep")
do_test(args.device, args.baudrate, "smoothz")
do_test(args.device, args.baudrate, "time")
do_test(args.device, args.baudrate, "uorb")
do_test(args.device, args.baudrate, "versioning")
if __name__ == "__main__":
main()
| bsd-3-clause | -5,418,919,410,731,338,000 | 31.245455 | 132 | 0.620242 | false |
chrischambers/django-calendartools | calendartools/views/agenda.py | 1 | 1141 | from datetime import date
from calendartools import defaults
from calendartools.views.calendars import (
YearView, TriMonthView, MonthView, WeekView, DayView
)
class YearAgenda(YearView):
template_name = 'calendar/agenda/year.html'
paginate_by = defaults.MAX_AGENDA_ITEMS_PER_PAGE
class MonthAgenda(MonthView):
template_name = 'calendar/agenda/month.html'
paginate_by = defaults.MAX_AGENDA_ITEMS_PER_PAGE
class TriMonthAgenda(TriMonthView):
template_name = 'calendar/agenda/tri_month.html'
paginate_by = defaults.MAX_AGENDA_ITEMS_PER_PAGE
class WeekAgenda(WeekView):
template_name = 'calendar/agenda/week.html'
paginate_by = defaults.MAX_AGENDA_ITEMS_PER_PAGE
class DayAgenda(DayView):
template_name = 'calendar/agenda/day.html'
paginate_by = defaults.MAX_AGENDA_ITEMS_PER_PAGE
def today_agenda(request, slug, *args, **kwargs):
today = date.today()
view = DayAgenda(request=request, slug=slug, year=str(today.year),
month=str(today.strftime('%b').lower()), day=str(today.day), **kwargs)
return view.get(request, slug=slug, year=today.year, day=today.day)
| bsd-3-clause | 3,052,311,570,141,808,600 | 30.694444 | 89 | 0.72305 | false |
dzeban/batti-gtk | src/Notificator.py | 1 | 2868 |
'''
This file is part of batti, a battery monitor for the system tray.
Copyright (C) 2010 Arthur Spitzer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import dbus
from dbus.exceptions import DBusException
class Notificator:
def __init__(self):
try:
bus = dbus.SessionBus()
obj = bus.get_object('org.freedesktop.Notifications', '/org/freedesktop/Notifications')
self.__notify = dbus.Interface(obj, 'org.freedesktop.Notifications')
self.__last_id = dbus.UInt32(0)
self.__posx = -1
self.__posy = -1
self.__positioned = True
self.__duration = 3000
self.__name = 'batti'
self.__check_capabilities()
except DBusException:
self.__notify = None
self.__positioned = False
def __check_capabilities(self):
info = self.__notify.GetServerInformation()
if info[0] == 'notify-osd':
self.__positioned = False
def __show_positioned(self):
if self.__positioned:
return (self.__posx >= 0 and self.__posy >= 0)
else:
return False
def __show(self, icon, subject, msg, urgent):
if self.__notify is not None:
hints = {'urgency':dbus.Byte(urgent), 'desktop-entry':dbus.String('battery-monitor')}
if( self.__show_positioned() ):
hints['x'] = self.__posx
hints['y'] = self.__posy
self.__last_id = self.__notify.Notify(self.__name, self.__last_id, icon, subject, msg, [], hints, self.__duration)
def show(self, icon, subject, msg):
self.__show(icon, subject, msg, 1)
def show_urgent(self, icon, subject, msg):
self.__show(icon, subject, msg, 2)
def close(self):
if (self.__notify is not None) and self.__last_id:
self.__notify.CloseNotification(self.__last_id)
def setPosition(self, x, y):
self.__posx = x
self.__posy = y
def removePosition(self):
self.__posx = -1
self.__posy = -1
def setDuration(self, milSec):
''' Set the duration on a notification with milSec milliseconds '''
self.__duration = milSec
| gpl-2.0 | 959,022,370,888,630,300 | 30.877778 | 126 | 0.589958 | false |
lfalvarez/votai | popular_proposal/tests/views_tests.py | 1 | 21141 | # coding=utf-8
from popular_proposal.tests import ProposingCycleTestCaseBase as TestCase
from django.urls import reverse
from django.forms import Form
from popular_proposal.models import (PopularProposal,
Commitment,
ProposalTemporaryData)
from popular_proposal.filters import (ProposalWithoutAreaFilter,
ProposalWithAreaFilter)
from elections.models import Area, Candidate, Election
from backend_candidate.models import Candidacy
from popular_proposal.forms import (CandidateCommitmentForm,
CandidateNotCommitingForm,
)
from popular_proposal.forms.form_texts import TOPIC_CHOICES
from constance.test import override_config
from django.test import override_settings
class PopularProposalTestCaseBase(TestCase):
def setUp(self):
super(PopularProposalTestCaseBase, self).setUp()
self.algarrobo = Area.objects.get(id=1)
self.popular_proposal1 = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
generated_at=self.algarrobo,
data=self.data,
clasification=TOPIC_CHOICES[1][0],
title=u'This is a title'
)
data2 = self.data
self.popular_proposal2 = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
generated_at=self.algarrobo,
data=data2,
clasification=TOPIC_CHOICES[2][0],
title=u'This is a title'
)
self.popular_proposal3 = PopularProposal.objects.create(proposer=self.fiera,
area=self.alhue,
generated_at=self.alhue,
data=data2,
clasification=TOPIC_CHOICES[2][0],
title=u'This is a title'
)
class ProposalViewTestCase(TestCase):
def setUp(self):
super(ProposalViewTestCase, self).setUp()
self.algarrobo = Area.objects.get(id=1)
def test_there_is_a_page_for_popular_proposal(self):
popular_proposal = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
data=self.data,
title=u'This is a title'
)
# no need to be logged in
url = reverse('popular_proposals:detail', kwargs={'slug': popular_proposal.slug})
self.assertEquals(popular_proposal.get_absolute_url(), url)
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertEqual(response.context['popular_proposal'], popular_proposal)
self.assertTemplateUsed(response, 'popular_proposal/detail.html')
def test_detail_redirect_view(self):
popular_proposal = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
data=self.data,
title=u'This is a title'
)
# no need to be logged in
url = reverse('popular_proposals:short_detail', kwargs={'pk': popular_proposal.pk})
response = self.client.get(url)
self.assertRedirects(response, popular_proposal.get_absolute_url())
self.assertEquals(popular_proposal.get_short_url(), url)
def test_proposal_og_image(self):
popular_proposal = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
data=self.data,
title=u'This is a title'
)
url = reverse('popular_proposals:og_image',
kwargs={'slug': popular_proposal.slug})
response = self.client.get(url)
self.assertIn("image/", response['Content-Type'])
def test_embedded_detail_popular_proposal(self):
popular_proposal = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
data=self.data,
title=u'This is a title'
)
# no need to be logged in
url = reverse('popular_proposals:embedded_detail',
kwargs={'slug': popular_proposal.slug})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertEqual(response.context['layout'], 'embedded_base.html')
self.assertEqual(response.context['popular_proposal'],
popular_proposal)
self.assertTemplateUsed(response,
'popular_proposal/detail.html')
self.assertTemplateUsed(response,
'embedded_base.html')
self.assertTrue(response.context['is_embedded'])
def test_thanks_page(self):
temporary_data = ProposalTemporaryData.objects.create(proposer=self.fiera,
join_advocacy_url=u"http://whatsapp.com/mygroup",
area=self.arica,
data=self.data)
url = reverse('popular_proposals:thanks', kwargs={'pk': temporary_data.id})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.context['proposal'], temporary_data)
class ProposalHomeTestCase(PopularProposalTestCaseBase):
def setUp(self):
super(ProposalHomeTestCase, self).setUp()
self.url = reverse('popular_proposals:home')
def test_there_is_a_page(self):
response = self.client.get(self.url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'popular_proposal/home.html')
def test_brings_a_list_of_proposals(self):
response = self.client.get(self.url, {})
self.assertIsInstance(response.context['form'], Form)
self.assertIn(self.popular_proposal1, response.context['popular_proposals'])
self.assertIn(self.popular_proposal2, response.context['popular_proposals'])
response = self.client.get(self.url, {'clasification': TOPIC_CHOICES[2][0]})
form = response.context['form']
self.assertEquals(form.fields['clasification'].initial, TOPIC_CHOICES[2][0])
self.assertNotIn(self.popular_proposal1, response.context['popular_proposals'])
self.assertIn(self.popular_proposal2, response.context['popular_proposals'])
response = self.client.get(self.url, {'clasification': TOPIC_CHOICES[2][0], 'generated_at': self.alhue.id})
form = response.context['form']
self.assertEquals(form.fields['clasification'].initial, TOPIC_CHOICES[2][0])
self.assertEquals(form.fields['generated_at'].initial, str(self.alhue.id))
self.assertIn(self.popular_proposal3, response.context['popular_proposals'])
self.assertNotIn(self.popular_proposal2, response.context['popular_proposals'])
self.assertNotIn(self.popular_proposal1, response.context['popular_proposals'])
def test_filtering_form(self):
data = {'clasification': '', 'area': ''}
filterset = ProposalWithAreaFilter(data=data)
form = filterset.form
self.assertTrue(form.is_valid())
def test_filtering_form_by_area(self):
data = {'clasification': ''}
filterset = ProposalWithoutAreaFilter(data=data, area=self.alhue)
form = filterset.form
self.assertTrue(form.is_valid())
class ProposalFilterTestsCase(PopularProposalTestCaseBase):
def setUp(self):
super(ProposalFilterTestsCase, self).setUp()
def test_filter_by_area(self):
proposal_filter = ProposalWithoutAreaFilter(area=self.algarrobo)
self.assertIn(self.popular_proposal1, proposal_filter.qs)
self.assertIn(self.popular_proposal2, proposal_filter.qs)
self.assertNotIn(self.popular_proposal3, proposal_filter.qs)
class EmbeddedViewsTests(PopularProposalTestCaseBase):
def setUp(self):
super(EmbeddedViewsTests, self).setUp()
def test_get_home_view(self):
url = reverse('popular_proposals:embedded_home')
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'popular_proposal/home.html')
self.assertTemplateUsed(response, 'embedded_base.html')
self.assertIsInstance(response.context['form'], Form)
self.assertTrue(response.context['is_embedded'])
def test_get_popular_proposals_per_area_embedded(self):
url = reverse('popular_proposals:area_embedded',
kwargs={'slug': self.algarrobo.id})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.context['layout'], 'embedded_base.html')
self.assertTrue(response.context['is_embedded'])
self.assertTemplateUsed('popular_proposal/area.html')
self.assertTemplateUsed('embedded_base.html')
self.assertIsInstance(response.context['form'], Form)
self.assertIn(self.popular_proposal1,
response.context['popular_proposals'])
self.assertIn(self.popular_proposal2,
response.context['popular_proposals'])
self.assertNotIn(self.popular_proposal3,
response.context['popular_proposals'])
response = self.client.get(url, {'clasification': TOPIC_CHOICES[2][0]})
form = response.context['form']
self.assertEquals(form.fields['clasification'].initial, TOPIC_CHOICES[2][0])
self.assertNotIn(self.popular_proposal1,
response.context['popular_proposals'])
self.assertIn(self.popular_proposal2,
response.context['popular_proposals'])
class CandidateCommitmentViewTestCase(PopularProposalTestCaseBase):
def setUp(self):
super(CandidateCommitmentViewTestCase, self).setUp()
self.candidate = Candidate.objects.get(id=1)
self.candidate2 = Candidate.objects.get(id=2)
self.fiera.set_password('feroz')
self.fiera.save()
self.cadidacy = Candidacy.objects.create(candidate=self.candidate,
user=self.fiera)
def test_there_is_a_commit_page(self):
commitment = Commitment.objects.create(candidate=self.candidate,
proposal=self.popular_proposal1,
commited=True)
url = reverse('popular_proposals:commitment', kwargs={'candidate_slug': self.candidate.slug,
'proposal_slug': self.popular_proposal1.slug})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'popular_proposal/commitment/detail_yes.html')
self.assertEquals(response.context['commitment'], commitment)
commitment.delete()
commitment_no = Commitment.objects.create(candidate=self.candidate,
proposal=self.popular_proposal1,
commited=False)
url = reverse('popular_proposals:commitment', kwargs={'candidate_slug': self.candidate.slug,
'proposal_slug': self.popular_proposal1.slug})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'popular_proposal/commitment/detail_no.html')
self.assertEquals(response.context['commitment'], commitment_no)
def test_candidate_commiting_to_a_proposal_view(self):
url = reverse('popular_proposals:commit_yes', kwargs={'proposal_pk': self.popular_proposal1.id,
'candidate_pk': self.candidate.pk})
logged_in = self.client.login(username=self.fiera.username, password='feroz')
self.assertTrue(logged_in)
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'popular_proposal/commitment/commit_yes.html')
self.assertIsInstance(response.context['form'], CandidateCommitmentForm)
self.assertEquals(response.context['proposal'], self.popular_proposal1)
self.assertEquals(response.context['candidate'], self.candidate)
response_post = self.client.post(url, {'terms_and_conditions': True})
detail_url = reverse('popular_proposals:commitment', kwargs={'candidate_slug': self.candidate.slug,
'proposal_slug': self.popular_proposal1.slug})
self.assertRedirects(response_post, detail_url)
@override_config(PROPOSALS_ENABLED=False)
def test_candidates_not_commiting(self):
url = reverse('popular_proposals:commit_yes', kwargs={'proposal_pk': self.popular_proposal1.id,
'candidate_pk': self.candidate.pk})
logged_in = self.client.login(username=self.fiera.username, password='feroz')
self.assertTrue(logged_in)
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
url = reverse('popular_proposals:commit_no', kwargs={'proposal_pk': self.popular_proposal1.id,
'candidate_pk': self.candidate.pk})
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_not_commiting_twice(self):
Commitment.objects.create(candidate=self.candidate,
proposal=self.popular_proposal1,
commited=True)
url = reverse('popular_proposals:commit_yes', kwargs={'proposal_pk': self.popular_proposal1.id,
'candidate_pk': self.candidate.pk})
logged_in = self.client.login(username=self.fiera.username, password='feroz')
self.assertTrue(logged_in)
response = self.client.get(url)
# Already commited
self.assertEquals(response.status_code, 404)
def test_not_commiting_if_representing_someone_else(self):
election = Election.objects.get(id=self.candidate2.election.id)
election.candidates_can_commit_everywhere = False
election.save()
election2 = Election.objects.get(id=self.candidate.election.id)
election2.candidates_can_commit_everywhere = False
election2.save()
url = reverse('popular_proposals:commit_yes', kwargs={'proposal_pk': self.popular_proposal1.id,
'candidate_pk': self.candidate2.id})
logged_in = self.client.login(username=self.fiera.username, password='feroz')
self.assertTrue(logged_in)
response = self.client.get(url)
# Fiera has nothing to do with candidate2
self.assertEquals(response.status_code, 404)
# Fiera cannot commit to a promise for another area
url = reverse('popular_proposals:commit_yes', kwargs={'proposal_pk': self.popular_proposal3.id,
'candidate_pk': self.candidate.pk})
logged_in = self.client.login(username=self.fiera.username, password='feroz')
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_commiting_if_representing_everyone(self):
election = Election.objects.get(id=self.candidate.election.id)
election.candidates_can_commit_everywhere = True
election.save()
url = reverse('popular_proposals:commit_yes', kwargs={'proposal_pk': self.popular_proposal3.id,
'candidate_pk': self.candidate.pk})
logged_in = self.client.login(username=self.fiera.username, password='feroz')
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
def test_not_commiting_as_candidate(self):
url = reverse('popular_proposals:commit_no', kwargs={'proposal_pk': self.popular_proposal1.id,
'candidate_pk': self.candidate.pk})
logged_in = self.client.login(username=self.fiera.username, password='feroz')
self.assertTrue(logged_in)
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'popular_proposal/commitment/commit_no.html')
self.assertIsInstance(response.context['form'], CandidateNotCommitingForm)
response_post = self.client.post(url, {'terms_and_conditions': True,
'details': u'no me gustó pa na la propuesta'})
detail_url = reverse('popular_proposals:commitment', kwargs={'candidate_slug': self.candidate.slug,
'proposal_slug': self.popular_proposal1.slug})
self.assertRedirects(response_post, detail_url)
def test_ayuranos_per_proposal(self):
election = Election.objects.get(id=self.candidate.election.id)
election.candidates_can_commit_everywhere = True
election.save()
popular_proposal = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
data=self.data,
title=u'This is a title'
)
Commitment.objects.create(candidate=self.candidate,
proposal=popular_proposal,
commited=True)
# no need to be logged in
url = reverse('popular_proposals:ayuranos', kwargs={'slug': popular_proposal.slug})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertEqual(response.context['popular_proposal'], popular_proposal)
self.assertTemplateUsed(response, 'popular_proposal/ayuranos.html')
candidates = response.context['candidates']
self.assertIn(self.candidate2, candidates.all())
self.assertNotIn(self.candidate, candidates.all())
@override_settings(PRIORITY_CANDIDATES=[2,])
def test_only_showing_candidates_that_are_priority(self):
election = Election.objects.get(id=self.candidate.election.id)
election.candidates_can_commit_everywhere = True
election.save()
popular_proposal = PopularProposal.objects.create(proposer=self.fiera,
area=self.algarrobo,
data=self.data,
title=u'This is a title'
)
url = reverse('popular_proposals:ayuranos', kwargs={'slug': popular_proposal.slug})
response = self.client.get(url)
candidates = response.context['candidates']
self.assertIn(self.candidate2, candidates.all())
self.assertNotIn(self.candidate, candidates.all())
| gpl-3.0 | 6,578,302,699,767,330,000 | 55.223404 | 115 | 0.563576 | false |
DrGFreeman/RasPiBot202 | maze.py | 1 | 7175 | import networkx as nx
import math
class Maze:
def __init__(self):
self.g = nx.MultiGraph()
self.newNodeUid = 0
self.startNode = None
self.finishNode = None
self.distTol = 75 # Distance tolerance to consider two nodes to be the same
self.farAway = 10000 # A long distance...
def addNode(self, nbPathsOut = 0, start = False, finish = False,):
uid = self.getNewNodeUid()
# Create intersection node object
newNode = Node(uid, nbPathsOut, start, finish)
# If start, define object as maze start
if start:
self.setStartNode(newNode)
if finish:
self.setFinishNode(newNode)
# Create corresponding graph node
self.g.add_node(newNode)
# Return new node
return newNode
def addPath(self, fromNode, toNode, outHeading, inHeading, length):
newPath = Path(fromNode, toNode, outHeading, inHeading)
self.g.add_edge(fromNode, toNode, newPath, weight = length)
def areNeighbors(self, node1, node2):
for neighbor in self.g.neighbors(node1):
if neighbor == node2:
areNeighbors = True
else:
areNeighbors = False
return areNeighbors
# Method to be called when exploring the maze. It will create a new node at position x, y if it does
# not already exists. It will create a path object from the source node to the current node position.
def exploreNode(self, sourceNode, x, y, nbPathsOut, pathLength, outHeading, inHeading, start = False, finish = False):
# Check if already exists
if self.nodeExistsAtPos(x, y):
currentNode = self.getNodeAtPos(x, y)
print "Current node: ", currentNode.uid, " (existing)"
# Check if path loops back to sourceNode
if currentNode == sourceNode:
if currentNode.nbPathsOut <= 1:
currentNode.nbPathsOut = 0
else:
currentNode.nbPathsOut -= 1
print "Loop to self, reducing nbPathsOut for node ", currentNode.uid, " to ", currentNode.nbPathsOut
else:
# Create new node
currentNode = self.addNode(nbPathsOut, start, finish)
currentNode.setPos(x, y)
print "Current node: ", currentNode.uid, " (new)"
# Create path edge from sourceNode to node
self.addPath(sourceNode, currentNode, outHeading, inHeading, pathLength)
return currentNode
def getHeadingToGoal(self, currentNode, goalNode):
nextNode = self.getNextNodeInShortestPath(currentNode, goalNode)
nextPath = self.getPathToNeighbor(currentNode, nextNode)
return nextPath.getHeadingToNode(nextNode)
def getNewNodeUid(self):
uid = self.newNodeUid
self.newNodeUid += 1
return uid
# Finds the nearest node from which there are unvisited paths
def getNearestUnvisited(self, currentNode):
shortestLength = self.farAway
for node in self.g.nodes():
if self.g.degree(node) < node.nbPathsOut + 1:
length = nx.shortest_path_length(self.g, currentNode, node, weight = 'weight')
print "Length to node ", node.uid, ": ", length
if length < shortestLength:
nearestUnvisited = node
shortestLength = length
print "Distance to nearest node with unvisited paths: ", shortestLength
return nearestUnvisited
def getNextNodeInShortestPath(self, currentNode, goalNode):
path = nx.shortest_path(self.g, currentNode, goalNode, weight = 'weight')
if len(path) ==1:
return path[0]
else:
return path[1]
# Finds the next node in the path to the nearest node with unvisited paths
def getNextNodeToNearestUnvisited(self, currentNode):
nearestUnvisited = self.getNearestUnvisited(currentNode)
path = nx.shortest_path(self.g, currentNode, nearestUnvisited, weight = 'weight')
if len(path) == 1:
print "Next node with unvisited paths: ", path[0].uid, " (current node)"
return path[0]
else:
print "Next node with unvisited paths: ", path[1].uid
return path[1]
def getNodeAtPos(self, x, y):
for node in self.g.nodes():
if node.getDistance(x, y) < self.distTol:
return node
def getNodeByUid(self, uid):
for node in self.g.nodes():
if node.uid == uid:
return node
def getPathToNeighbor(self, currentNode, neighborNode):
paths = self.g[currentNode][neighborNode].items()
shortestLength = self.farAway
for path in paths:
if path[1]['weight'] < shortestLength:
shortestPath = path[0]
shortestLength = path[1]['weight']
return shortestPath
def hasUnvisitedPaths(self):
hasUnvisitedPaths = False
for node in self.g.nodes():
if self.g.degree(node) < node.nbPathsOut + 1:
hasUnvisitedPaths = True
return hasUnvisitedPaths
def headingIsUnvisited(self, currentNode, heading):
visitedHeadings = []
for node in self.g.neighbors(currentNode):
paths = self.g[currentNode][node].items()
for path in paths:
visitedHeadings.append(path[0].getHeadingToNode(node))
headingIsUnvisited = True
if visitedHeadings.count(heading) == 1:
headingIsUnvisited = False
return headingIsUnvisited
def nodeExistsAtPos(self, x, y):
for node in self.g.nodes():
if node.getDistance(x, y) < self.distTol:
return True
def setFinishNode(self, node):
if self.finishNode is None:
self.finishNode = node
else:
print 'Error: Finish node already defined'
def setStartNode(self, node):
if self.startNode is None:
self.startNode = node
else:
print 'Error: Start node already defined'
class Node:
def __init__(self, uid, nbPathsOut, start = False, finish = False):
self.uid = uid
self.start = start
self.finish = finish
self.nbPathsOut = nbPathsOut
def getDistance(self, x, y):
return math.sqrt((self.x - x)**2 + (self.y - y)**2)
def setPos(self, x, y):
self.x = x
self.y = y
def getPos(self):
return self.x, self.y
class Path:
def __init__(self, nodeFrom, nodeTo, nodeFromOutHeading, nodeToInHeading):
self.node0 = nodeFrom
self.node0OutHeading = nodeFromOutHeading
self.node1 = nodeTo
self.node1OutHeading = self.inverseHeading(nodeToInHeading)
def inverseHeading(self, heading):
inHeading = ['E', 'N', 'W', 'S']
outHeading = ['W', 'S', 'E', 'N']
return outHeading[inHeading.index(heading)]
def getHeadingToNode(self, node):
if node == self.node0:
return self.node1OutHeading
elif node == self.node1:
return self.node0OutHeading
| mit | -6,924,944,892,534,273,000 | 35.794872 | 122 | 0.605714 | false |
naototty/vagrant-lxc-ironic | ironic/drivers/modules/ipmitool.py | 1 | 42906 | # coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2014 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
IPMI power manager driver.
Uses the 'ipmitool' command (http://ipmitool.sourceforge.net/) to remotely
manage hardware. This includes setting the boot device, getting a
serial-over-LAN console, and controlling the power state of the machine.
NOTE THAT CERTAIN DISTROS MAY INSTALL openipmi BY DEFAULT, INSTEAD OF ipmitool,
WHICH PROVIDES DIFFERENT COMMAND-LINE OPTIONS AND *IS NOT SUPPORTED* BY THIS
DRIVER.
"""
import contextlib
import os
import re
import tempfile
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules import console_utils
from ironic.openstack.common import loopingcall
CONF = cfg.CONF
CONF.import_opt('retry_timeout',
'ironic.drivers.modules.ipminative',
group='ipmi')
CONF.import_opt('min_command_interval',
'ironic.drivers.modules.ipminative',
group='ipmi')
LOG = logging.getLogger(__name__)
VALID_PRIV_LEVELS = ['ADMINISTRATOR', 'CALLBACK', 'OPERATOR', 'USER']
REQUIRED_PROPERTIES = {
'ipmi_address': _("IP address or hostname of the node. Required.")
}
OPTIONAL_PROPERTIES = {
'ipmi_password': _("password. Optional."),
'ipmi_priv_level': _("privilege level; default is ADMINISTRATOR. One of "
"%s. Optional.") % ', '.join(VALID_PRIV_LEVELS),
'ipmi_username': _("username; default is NULL user. Optional."),
'ipmi_bridging': _("bridging_type; default is \"no\". One of \"single\", "
"\"dual\", \"no\". Optional."),
'ipmi_transit_channel': _("transit channel for bridged request. Required "
"only if ipmi_bridging is set to \"dual\"."),
'ipmi_transit_address': _("transit address for bridged request. Required "
"only if ipmi_bridging is set to \"dual\"."),
'ipmi_target_channel': _("destination channel for bridged request. "
"Required only if ipmi_bridging is set to "
"\"single\" or \"dual\"."),
'ipmi_target_address': _("destination address for bridged request. "
"Required only if ipmi_bridging is set "
"to \"single\" or \"dual\"."),
'ipmi_local_address': _("local IPMB address for bridged requests. "
"Used only if ipmi_bridging is set "
"to \"single\" or \"dual\". Optional.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
CONSOLE_PROPERTIES = {
'ipmi_terminal_port': _("node's UDP port to connect to. Only required for "
"console access.")
}
BRIDGING_OPTIONS = [('local_address', '-m'),
('transit_channel', '-B'), ('transit_address', '-T'),
('target_channel', '-b'), ('target_address', '-t')]
LAST_CMD_TIME = {}
TIMING_SUPPORT = None
SINGLE_BRIDGE_SUPPORT = None
DUAL_BRIDGE_SUPPORT = None
TMP_DIR_CHECKED = None
ipmitool_command_options = {
'timing': ['ipmitool', '-N', '0', '-R', '0', '-h'],
'single_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0', '-h'],
'dual_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0',
'-B', '0', '-T', '0', '-h']}
# Note(TheJulia): This string is hardcoded in ipmitool's lanplus driver
# and is substituted in return for the error code received from the IPMI
# controller. As of 1.8.15, no internationalization support appears to
# be in ipmitool which means the string should always be returned in this
# form regardless of locale.
IPMITOOL_RETRYABLE_FAILURES = ['insufficient resources for session']
def _check_option_support(options):
"""Checks if the specific ipmitool options are supported on host.
This method updates the module-level variables indicating whether
an option is supported so that it is accessible by any driver
interface class in this module. It is intended to be called from
the __init__ method of such classes only.
:param options: list of ipmitool options to be checked
:raises: OSError
"""
for opt in options:
if _is_option_supported(opt) is None:
try:
cmd = ipmitool_command_options[opt]
out, err = utils.execute(*cmd)
except processutils.ProcessExecutionError:
# the local ipmitool does not support the command.
_is_option_supported(opt, False)
else:
# looks like ipmitool supports the command.
_is_option_supported(opt, True)
def _is_option_supported(option, is_supported=None):
"""Indicates whether the particular ipmitool option is supported.
:param option: specific ipmitool option
:param is_supported: Optional Boolean. when specified, this value
is assigned to the module-level variable indicating
whether the option is supported. Used only if a value
is not already assigned.
:returns: True, indicates the option is supported
:returns: False, indicates the option is not supported
:returns: None, indicates that it is not aware whether the option
is supported
"""
global SINGLE_BRIDGE_SUPPORT
global DUAL_BRIDGE_SUPPORT
global TIMING_SUPPORT
if option == 'single_bridge':
if (SINGLE_BRIDGE_SUPPORT is None) and (is_supported is not None):
SINGLE_BRIDGE_SUPPORT = is_supported
return SINGLE_BRIDGE_SUPPORT
elif option == 'dual_bridge':
if (DUAL_BRIDGE_SUPPORT is None) and (is_supported is not None):
DUAL_BRIDGE_SUPPORT = is_supported
return DUAL_BRIDGE_SUPPORT
elif option == 'timing':
if (TIMING_SUPPORT is None) and (is_supported is not None):
TIMING_SUPPORT = is_supported
return TIMING_SUPPORT
def _console_pwfile_path(uuid):
"""Return the file path for storing the ipmi password for a console."""
file_name = "%(uuid)s.pw" % {'uuid': uuid}
return os.path.join(tempfile.gettempdir(), file_name)
@contextlib.contextmanager
def _make_password_file(password):
"""Makes a temporary file that contains the password.
:param password: the password
:returns: the absolute pathname of the temporary file
:raises: PasswordFileFailedToCreate from creating or writing to the
temporary file
"""
f = None
try:
f = tempfile.NamedTemporaryFile(mode='w', dir=CONF.tempdir)
f.write(str(password))
f.flush()
except (IOError, OSError) as exc:
if f is not None:
f.close()
raise exception.PasswordFileFailedToCreate(error=exc)
except Exception:
if f is not None:
f.close()
raise
try:
# NOTE(jlvillal): This yield can not be in the try/except block above
# because an exception by the caller of this function would then get
# changed to a PasswordFileFailedToCreate exception which would mislead
# about the problem and its cause.
yield f.name
finally:
if f is not None:
f.close()
def _parse_driver_info(node):
"""Gets the parameters required for ipmitool to access the node.
:param node: the Node of interest.
:returns: dictionary of parameters.
:raises: InvalidParameterValue when an invalid value is specified
:raises: MissingParameterValue when a required ipmi parameter is missing.
"""
info = node.driver_info or {}
bridging_types = ['single', 'dual']
missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
if missing_info:
raise exception.MissingParameterValue(_(
"Missing the following IPMI credentials in node's"
" driver_info: %s.") % missing_info)
address = info.get('ipmi_address')
username = info.get('ipmi_username')
password = info.get('ipmi_password')
port = info.get('ipmi_terminal_port')
priv_level = info.get('ipmi_priv_level', 'ADMINISTRATOR')
bridging_type = info.get('ipmi_bridging', 'no')
local_address = info.get('ipmi_local_address')
transit_channel = info.get('ipmi_transit_channel')
transit_address = info.get('ipmi_transit_address')
target_channel = info.get('ipmi_target_channel')
target_address = info.get('ipmi_target_address')
if port:
try:
port = int(port)
except ValueError:
raise exception.InvalidParameterValue(_(
"IPMI terminal port is not an integer."))
# check if ipmi_bridging has proper value
if bridging_type == 'no':
# if bridging is not selected, then set all bridging params to None
(local_address, transit_channel, transit_address, target_channel,
target_address) = (None,) * 5
elif bridging_type in bridging_types:
# check if the particular bridging option is supported on host
if not _is_option_supported('%s_bridge' % bridging_type):
raise exception.InvalidParameterValue(_(
"Value for ipmi_bridging is provided as %s, but IPMI "
"bridging is not supported by the IPMI utility installed "
"on host. Ensure ipmitool version is > 1.8.11"
) % bridging_type)
# ensure that all the required parameters are provided
params_undefined = [param for param, value in [
("ipmi_target_channel", target_channel),
('ipmi_target_address', target_address)] if value is None]
if bridging_type == 'dual':
params_undefined2 = [param for param, value in [
("ipmi_transit_channel", transit_channel),
('ipmi_transit_address', transit_address)
] if value is None]
params_undefined.extend(params_undefined2)
else:
# if single bridging was selected, set dual bridge params to None
transit_channel = transit_address = None
# If the required parameters were not provided,
# raise an exception
if params_undefined:
raise exception.MissingParameterValue(_(
"%(param)s not provided") % {'param': params_undefined})
else:
raise exception.InvalidParameterValue(_(
"Invalid value for ipmi_bridging: %(bridging_type)s,"
" the valid value can be one of: %(bridging_types)s"
) % {'bridging_type': bridging_type,
'bridging_types': bridging_types + ['no']})
if priv_level not in VALID_PRIV_LEVELS:
valid_priv_lvls = ', '.join(VALID_PRIV_LEVELS)
raise exception.InvalidParameterValue(_(
"Invalid privilege level value:%(priv_level)s, the valid value"
" can be one of %(valid_levels)s") %
{'priv_level': priv_level, 'valid_levels': valid_priv_lvls})
return {
'address': address,
'username': username,
'password': password,
'port': port,
'uuid': node.uuid,
'priv_level': priv_level,
'local_address': local_address,
'transit_channel': transit_channel,
'transit_address': transit_address,
'target_channel': target_channel,
'target_address': target_address
}
def _exec_ipmitool(driver_info, command):
"""Execute the ipmitool command.
This uses the lanplus interface to communicate with the BMC device driver.
:param driver_info: the ipmitool parameters for accessing a node.
:param command: the ipmitool command to be executed.
:returns: (stdout, stderr) from executing the command.
:raises: PasswordFileFailedToCreate from creating or writing to the
temporary file.
:raises: processutils.ProcessExecutionError from executing the command.
"""
args = ['ipmitool',
'-I',
'lanplus',
'-H',
driver_info['address'],
'-L', driver_info['priv_level']
]
if driver_info['username']:
args.append('-U')
args.append(driver_info['username'])
for name, option in BRIDGING_OPTIONS:
if driver_info[name] is not None:
args.append(option)
args.append(driver_info[name])
# specify retry timing more precisely, if supported
num_tries = max(
(CONF.ipmi.retry_timeout // CONF.ipmi.min_command_interval), 1)
if _is_option_supported('timing'):
args.append('-R')
args.append(str(num_tries))
args.append('-N')
args.append(str(CONF.ipmi.min_command_interval))
end_time = (time.time() + CONF.ipmi.retry_timeout)
while True:
num_tries = num_tries - 1
# NOTE(deva): ensure that no communications are sent to a BMC more
# often than once every min_command_interval seconds.
time_till_next_poll = CONF.ipmi.min_command_interval - (
time.time() - LAST_CMD_TIME.get(driver_info['address'], 0))
if time_till_next_poll > 0:
time.sleep(time_till_next_poll)
# Resetting the list that will be utilized so the password arguments
# from any previous execution are preserved.
cmd_args = args[:]
# 'ipmitool' command will prompt password if there is no '-f'
# option, we set it to '\0' to write a password file to support
# empty password
with _make_password_file(
driver_info['password'] or '\0'
) as pw_file:
cmd_args.append('-f')
cmd_args.append(pw_file)
cmd_args.extend(command.split(" "))
try:
out, err = utils.execute(*cmd_args)
return out, err
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception() as ctxt:
err_list = [x for x in IPMITOOL_RETRYABLE_FAILURES
if x in e.args[0]]
if ((time.time() > end_time) or
(num_tries == 0) or
not err_list):
LOG.error(_LE('IPMI Error while attempting '
'"%(cmd)s" for node %(node)s. '
'Error: %(error)s'),
{
'node': driver_info['uuid'],
'cmd': e.cmd,
'error': e
})
else:
ctxt.reraise = False
LOG.warning(_LW('IPMI Error encountered, retrying '
'"%(cmd)s" for node %(node)s. '
'Error: %(error)s'),
{
'node': driver_info['uuid'],
'cmd': e.cmd,
'error': e
})
finally:
LAST_CMD_TIME[driver_info['address']] = time.time()
def _sleep_time(iter):
"""Return the time-to-sleep for the n'th iteration of a retry loop.
This implementation increases exponentially.
:param iter: iteration number
:returns: number of seconds to sleep
"""
if iter <= 1:
return 1
return iter ** 2
def _set_and_wait(target_state, driver_info):
"""Helper function for DynamicLoopingCall.
This method changes the power state and polls the BMCuntil the desired
power state is reached, or CONF.ipmi.retry_timeout would be exceeded by the
next iteration.
This method assumes the caller knows the current power state and does not
check it prior to changing the power state. Most BMCs should be fine, but
if a driver is concerned, the state should be checked prior to calling this
method.
:param target_state: desired power state
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states
"""
if target_state == states.POWER_ON:
state_name = "on"
elif target_state == states.POWER_OFF:
state_name = "off"
def _wait(mutable):
try:
# Only issue power change command once
if mutable['iter'] < 0:
_exec_ipmitool(driver_info, "power %s" % state_name)
else:
mutable['power'] = _power_status(driver_info)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError,
exception.IPMIFailure):
# Log failures but keep trying
LOG.warning(_LW("IPMI power %(state)s failed for node %(node)s."),
{'state': state_name, 'node': driver_info['uuid']})
finally:
mutable['iter'] += 1
if mutable['power'] == target_state:
raise loopingcall.LoopingCallDone()
sleep_time = _sleep_time(mutable['iter'])
if (sleep_time + mutable['total_time']) > CONF.ipmi.retry_timeout:
# Stop if the next loop would exceed maximum retry_timeout
LOG.error(_LE('IPMI power %(state)s timed out after '
'%(tries)s retries on node %(node_id)s.'),
{'state': state_name, 'tries': mutable['iter'],
'node_id': driver_info['uuid']})
mutable['power'] = states.ERROR
raise loopingcall.LoopingCallDone()
else:
mutable['total_time'] += sleep_time
return sleep_time
# Use mutable objects so the looped method can change them.
# Start 'iter' from -1 so that the first two checks are one second apart.
status = {'power': None, 'iter': -1, 'total_time': 0}
timer = loopingcall.DynamicLoopingCall(_wait, status)
timer.start().wait()
return status['power']
def _power_on(driver_info):
"""Turn the power ON for this node.
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states POWER_ON or ERROR.
:raises: IPMIFailure on an error from ipmitool (from _power_status call).
"""
return _set_and_wait(states.POWER_ON, driver_info)
def _power_off(driver_info):
"""Turn the power OFF for this node.
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states POWER_OFF or ERROR.
:raises: IPMIFailure on an error from ipmitool (from _power_status call).
"""
return _set_and_wait(states.POWER_OFF, driver_info)
def _power_status(driver_info):
"""Get the power status for a node.
:param driver_info: the ipmitool access parameters for a node.
:returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
:raises: IPMIFailure on an error from ipmitool.
"""
cmd = "power status"
try:
out_err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW("IPMI power status failed for node %(node_id)s with "
"error: %(error)s."),
{'node_id': driver_info['uuid'], 'error': e})
raise exception.IPMIFailure(cmd=cmd)
if out_err[0] == "Chassis Power is on\n":
return states.POWER_ON
elif out_err[0] == "Chassis Power is off\n":
return states.POWER_OFF
else:
return states.ERROR
def _process_sensor(sensor_data):
sensor_data_fields = sensor_data.split('\n')
sensor_data_dict = {}
for field in sensor_data_fields:
if not field:
continue
kv_value = field.split(':')
if len(kv_value) != 2:
continue
sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip()
return sensor_data_dict
def _get_sensor_type(node, sensor_data_dict):
# Have only three sensor type name IDs: 'Sensor Type (Analog)'
# 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)'
for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)',
'Sensor Type (Threshold)'):
try:
return sensor_data_dict[key].split(' ', 1)[0]
except KeyError:
continue
raise exception.FailedToParseSensorData(
node=node.uuid,
error=(_("parse ipmi sensor data failed, unknown sensor type"
" data: %(sensors_data)s"), {'sensors_data': sensor_data_dict}))
def _parse_ipmi_sensors_data(node, sensors_data):
"""Parse the IPMI sensors data and format to the dict grouping by type.
We run 'ipmitool' command with 'sdr -v' options, which can return sensor
details in human-readable format, we need to format them to JSON string
dict-based data for Ceilometer Collector which can be sent it as payload
out via notification bus and consumed by Ceilometer Collector.
:param sensors_data: the sensor data returned by ipmitool command.
:returns: the sensor data with JSON format, grouped by sensor type.
:raises: FailedToParseSensorData when error encountered during parsing.
"""
sensors_data_dict = {}
if not sensors_data:
return sensors_data_dict
sensors_data_array = sensors_data.split('\n\n')
for sensor_data in sensors_data_array:
sensor_data_dict = _process_sensor(sensor_data)
if not sensor_data_dict:
continue
sensor_type = _get_sensor_type(node, sensor_data_dict)
# ignore the sensors which has no current 'Sensor Reading' data
if 'Sensor Reading' in sensor_data_dict:
sensors_data_dict.setdefault(sensor_type,
{})[sensor_data_dict['Sensor ID']] = sensor_data_dict
# get nothing, no valid sensor data
if not sensors_data_dict:
raise exception.FailedToParseSensorData(
node=node.uuid,
error=(_("parse ipmi sensor data failed, get nothing with input"
" data: %(sensors_data)s") % {'sensors_data': sensors_data}))
return sensors_data_dict
@task_manager.require_exclusive_lock
def send_raw(task, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
node_uuid = task.node.uuid
LOG.debug('Sending node %(node)s raw bytes %(bytes)s',
{'bytes': raw_bytes, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'raw %s' % raw_bytes
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('send raw bytes returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "raw bytes" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def _check_temp_dir():
"""Check for Valid temp directory."""
global TMP_DIR_CHECKED
# because a temporary file is used to pass the password to ipmitool,
# we should check the directory
if TMP_DIR_CHECKED is None:
try:
utils.check_dir()
except (exception.PathNotFound,
exception.DirectoryNotWritable,
exception.InsufficientDiskSpace) as e:
TMP_DIR_CHECKED = False
err_msg = (_("Ipmitool drivers need to be able to create "
"temporary files to pass password to ipmitool. "
"Encountered error: %s") % e)
e.message = err_msg
LOG.error(err_msg)
raise
else:
TMP_DIR_CHECKED = True
class IPMIPower(base.PowerInterface):
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Validate driver_info for ipmitool driver.
Check that node['driver_info'] contains IPMI credentials.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if required ipmi parameters are missing.
:raises: MissingParameterValue if a required parameter is missing.
"""
_parse_driver_info(task.node)
# NOTE(deva): don't actually touch the BMC in validate because it is
# called too often, and BMCs are too fragile.
# This is a temporary measure to mitigate problems while
# 1314954 and 1314961 are resolved.
def get_power_state(self, task):
"""Get the current power state of the task's node.
:param task: a TaskManager instance containing the node to act on.
:returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
:raises: InvalidParameterValue if required ipmi parameters are missing.
:raises: MissingParameterValue if a required parameter is missing.
:raises: IPMIFailure on an error from ipmitool (from _power_status
call).
"""
driver_info = _parse_driver_info(task.node)
return _power_status(driver_info)
@task_manager.require_exclusive_lock
def set_power_state(self, task, pstate):
"""Turn the power on or off.
:param task: a TaskManager instance containing the node to act on.
:param pstate: The desired power state, one of ironic.common.states
POWER_ON, POWER_OFF.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: MissingParameterValue if required ipmi parameters are missing
:raises: PowerStateFailure if the power couldn't be set to pstate.
"""
driver_info = _parse_driver_info(task.node)
if pstate == states.POWER_ON:
state = _power_on(driver_info)
elif pstate == states.POWER_OFF:
state = _power_off(driver_info)
else:
raise exception.InvalidParameterValue(_("set_power_state called "
"with invalid power state %s.") % pstate)
if state != pstate:
raise exception.PowerStateFailure(pstate=pstate)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Cycles the power to the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue if required ipmi parameters are missing.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: PowerStateFailure if the final state of the node is not
POWER_ON.
"""
driver_info = _parse_driver_info(task.node)
_power_off(driver_info)
state = _power_on(driver_info)
if state != states.POWER_ON:
raise exception.PowerStateFailure(pstate=states.POWER_ON)
class IPMIManagement(base.ManagementInterface):
def get_properties(self):
return COMMON_PROPERTIES
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def validate(self, task):
"""Check that 'driver_info' contains IPMI credentials.
Validates whether the 'driver_info' property of the supplied
task's node contains the required credentials information.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required IPMI parameters
are missing.
:raises: MissingParameterValue if a required parameter is missing.
"""
_parse_driver_info(task.node)
def get_supported_boot_devices(self):
"""Get a list of the supported boot devices.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM,
boot_devices.BIOS, boot_devices.SAFE]
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for the task's node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue if an invalid boot device is specified
:raises: MissingParameterValue if required ipmi parameters are missing.
:raises: IPMIFailure on an error from ipmitool.
"""
if device not in self.get_supported_boot_devices():
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
# note(JayF): IPMI spec indicates unless you send these raw bytes the
# boot device setting times out after 60s. Since it's possible it
# could be >60s before a node is rebooted, we should always send them.
# This mimics pyghmi's current behavior, and the "option=timeout"
# setting on newer ipmitool binaries.
timeout_disable = "0x00 0x08 0x03 0x08"
send_raw(task, timeout_disable)
cmd = "chassis bootdev %s" % device
if persistent:
cmd = cmd + " options=persistent"
driver_info = _parse_driver_info(task.node)
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW('IPMI set boot device failed for node %(node)s '
'when executing "ipmitool %(cmd)s". '
'Error: %(error)s'),
{'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Returns the current boot device of the node.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required IPMI parameters
are missing.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
cmd = "chassis bootparam get 5"
driver_info = _parse_driver_info(task.node)
response = {'boot_device': None, 'persistent': None}
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW('IPMI get boot device failed for node %(node)s '
'when executing "ipmitool %(cmd)s". '
'Error: %(error)s'),
{'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
re_obj = re.search('Boot Device Selector : (.+)?\n', out)
if re_obj:
boot_selector = re_obj.groups('')[0]
if 'PXE' in boot_selector:
response['boot_device'] = boot_devices.PXE
elif 'Hard-Drive' in boot_selector:
if 'Safe-Mode' in boot_selector:
response['boot_device'] = boot_devices.SAFE
else:
response['boot_device'] = boot_devices.DISK
elif 'BIOS' in boot_selector:
response['boot_device'] = boot_devices.BIOS
elif 'CD/DVD' in boot_selector:
response['boot_device'] = boot_devices.CDROM
response['persistent'] = 'Options apply to all future boots' in out
return response
def get_sensors_data(self, task):
"""Get sensors data.
:param task: a TaskManager instance.
:raises: FailedToGetSensorData when getting the sensor data fails.
:raises: FailedToParseSensorData when parsing sensor data fails.
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: MissingParameterValue if a required parameter is missing.
:returns: returns a dict of sensor data group by sensor type.
"""
driver_info = _parse_driver_info(task.node)
# with '-v' option, we can get the entire sensor data including the
# extended sensor informations
cmd = "sdr -v"
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
raise exception.FailedToGetSensorData(node=task.node.uuid,
error=e)
return _parse_ipmi_sensors_data(task.node, out)
class VendorPassthru(base.VendorInterface):
def __init__(self):
try:
_check_option_support(['single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def send_raw(self, task, http_method, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
send_raw(task, raw_bytes)
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def bmc_reset(self, task, http_method, warm=True):
"""Reset BMC with IPMI command 'bmc reset (warm|cold)'.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param warm: boolean parameter to decide on warm or cold reset.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified
"""
node_uuid = task.node.uuid
if warm:
warm_param = 'warm'
else:
warm_param = 'cold'
LOG.debug('Doing %(warm)s BMC reset on node %(node)s',
{'warm': warm_param, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'bmc reset %s' % warm_param
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('bmc reset returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "bmc reset" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task, method, **kwargs):
"""Validate vendor-specific actions.
If invalid, raises an exception; otherwise returns None.
Valid methods:
* send_raw
* bmc_reset
:param task: a task from TaskManager.
:param method: method to be validated
:param kwargs: info for action.
:raises: InvalidParameterValue when an invalid parameter value is
specified.
:raises: MissingParameterValue if a required parameter is missing.
"""
if method == 'send_raw':
if not kwargs.get('raw_bytes'):
raise exception.MissingParameterValue(_(
'Parameter raw_bytes (string of bytes) was not '
'specified.'))
_parse_driver_info(task.node)
class IPMIShellinaboxConsole(base.ConsoleInterface):
"""A ConsoleInterface that uses ipmitool and shellinabox."""
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
_check_temp_dir()
def get_properties(self):
d = COMMON_PROPERTIES.copy()
d.update(CONSOLE_PROPERTIES)
return d
def validate(self, task):
"""Validate the Node console info.
:param task: a task from TaskManager.
:raises: InvalidParameterValue
:raises: MissingParameterValue when a required parameter is missing
"""
driver_info = _parse_driver_info(task.node)
if not driver_info['port']:
raise exception.MissingParameterValue(_(
"Missing 'ipmi_terminal_port' parameter in node's"
" driver_info."))
def start_console(self, task):
"""Start a remote console for the node.
:param task: a task from TaskManager
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: PasswordFileFailedToCreate if unable to create a file
containing the password
:raises: ConsoleError if the directory for the PID file cannot be
created
:raises: ConsoleSubprocessFailed when invoking the subprocess failed
"""
driver_info = _parse_driver_info(task.node)
path = _console_pwfile_path(driver_info['uuid'])
pw_file = console_utils.make_persistent_password_file(
path, driver_info['password'])
ipmi_cmd = ("/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s"
" -I lanplus -U %(user)s -f %(pwfile)s"
% {'uid': os.getuid(),
'gid': os.getgid(),
'address': driver_info['address'],
'user': driver_info['username'],
'pwfile': pw_file})
for name, option in BRIDGING_OPTIONS:
if driver_info[name] is not None:
ipmi_cmd = " ".join([ipmi_cmd,
option, driver_info[name]])
if CONF.debug:
ipmi_cmd += " -v"
ipmi_cmd += " sol activate"
try:
console_utils.start_shellinabox_console(driver_info['uuid'],
driver_info['port'],
ipmi_cmd)
except (exception.ConsoleError, exception.ConsoleSubprocessFailed):
with excutils.save_and_reraise_exception():
utils.unlink_without_raise(path)
def stop_console(self, task):
"""Stop the remote console session for the node.
:param task: a task from TaskManager
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: ConsoleError if unable to stop the console
"""
driver_info = _parse_driver_info(task.node)
try:
console_utils.stop_shellinabox_console(driver_info['uuid'])
finally:
utils.unlink_without_raise(
_console_pwfile_path(driver_info['uuid']))
def get_console(self, task):
"""Get the type and connection information about the console."""
driver_info = _parse_driver_info(task.node)
url = console_utils.get_shellinabox_console_url(driver_info['port'])
return {'type': 'shellinabox', 'url': url}
| apache-2.0 | -437,569,069,582,017,540 | 38.617729 | 79 | 0.600289 | false |
Ictp/indico | indico/MaKaC/user.py | 1 | 59857 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
import operator
from BTrees.OOBTree import OOTreeSet, union
from persistent import Persistent
from accessControl import AdminList, AccessWrapper
import MaKaC
from MaKaC.common import filters, indexes
from MaKaC.common.cache import GenericCache
from MaKaC.common.Locators import Locator
from MaKaC.common.ObjectHolders import ObjectHolder
from MaKaC.errors import UserError, MaKaCError
from MaKaC.trashCan import TrashCanManager
import MaKaC.common.info as info
from MaKaC.i18n import _
from MaKaC.authentication.AuthenticationMgr import AuthenticatorMgr
from MaKaC.common.logger import Logger
from MaKaC.fossils.user import IAvatarFossil, IAvatarAllDetailsFossil,\
IGroupFossil, IPersonalInfoFossil, IAvatarMinimalFossil
from MaKaC.common.fossilize import Fossilizable, fossilizes
from pytz import all_timezones
from MaKaC.plugins.base import PluginsHolder
from indico.util.caching import order_dict
from indico.util.decorators import cached_classproperty
from indico.util.event import truncate_path
from indico.util.redis import write_client as redis_write_client
from indico.util.redis import avatar_links, suggestions
from flask import request
"""Contains the classes that implement the user management subsystem
"""
class Group(Persistent, Fossilizable):
fossilizes(IGroupFossil)
"""
"""
groupType = "Default"
def __init__(self, groupData=None):
self.id = ""
self.name = ""
self.description = ""
self.email = ""
self.members = []
self.obsolete = False
def __cmp__(self, other):
if type(self) is not type(other):
# This is actually dangerous and the ZODB manual says not to do this
# because it relies on memory order. However, this branch should never
# be taken anyway since we do not store different types in the same set
# or use them as keys.
return cmp(hash(self), hash(other))
return cmp(self.getId(), other.getId())
def setId(self, newId):
self.id = str(newId)
def getId(self):
return self.id
def setName(self, newName):
self.name = newName.strip()
GroupHolder().notifyGroupNameChange(self)
def getName(self):
return self.name
getFullName = getName
def setDescription(self, newDesc):
self.description = newDesc.strip()
def getDescription(self):
return self.description
def setEmail(self, newEmail):
self.email = newEmail.strip()
def getEmail(self):
try:
return self.email
except:
self.email = ""
return self.email
def isObsolete(self):
if not hasattr(self, "obsolete"):
self.obsolete = False
return self.obsolete
def setObsolete(self, obsolete):
self.obsolete = obsolete
def addMember(self, newMember):
if newMember == self:
raise MaKaCError(_("It is not possible to add a group as member of itself"))
if self.containsMember(newMember) or newMember.containsMember(self):
return
self.members.append(newMember)
if isinstance(newMember, Avatar):
newMember.linkTo(self, "member")
self._p_changed = 1
def removeMember(self, member):
if member == None or member not in self.members:
return
self.members.remove(member)
if isinstance(member, Avatar):
member.unlinkTo(self, "member")
self._p_changed = 1
def getMemberList(self):
return self.members
def _containsUser(self, avatar):
if avatar == None:
return 0
for member in self.members:
if member.containsUser(avatar):
return 1
return 0
def containsUser(self, avatar):
group_membership = GenericCache('groupmembership')
if avatar is None:
return False
key = "{0}-{1}".format(self.getId(), avatar.getId())
user_in_group = group_membership.get(key)
if user_in_group is None:
user_in_group = self._containsUser(avatar)
group_membership.set(key, user_in_group, time=1800)
return user_in_group
def containsMember(self, member):
if member == None:
return 0
if member in self.members:
return 1
for m in self.members:
try:
if m.containsMember(member):
return 1
except AttributeError, e:
continue
return 0
def canModify(self, aw):
return self.canUserModify(aw.getUser())
def canUserModify(self, user):
return self.containsMember(user) or \
(user in AdminList.getInstance().getList())
def getLocator(self):
d = Locator()
d["groupId"] = self.getId()
return d
def exists(self):
return True
class _GroupFFName(filters.FilterField):
_id="name"
def satisfies(self,group):
for value in self._values:
if value.strip() != "":
if value.strip() == "*":
return True
if str(group.getName()).lower().find((str(value).strip().lower()))!=-1:
return True
return False
class _GroupFilterCriteria(filters.FilterCriteria):
_availableFields={"name":_GroupFFName}
def __init__(self,criteria={}):
filters.FilterCriteria.__init__(self,None,criteria)
class GroupHolder(ObjectHolder):
"""
"""
idxName = "groups"
counterName = "PRINCIPAL"
def add(self, group):
ObjectHolder.add(self, group)
self.getIndex().indexGroup(group)
def remove(self, group):
ObjectHolder.remove(self, group)
self.getIndex().unindexGroup(group)
def notifyGroupNameChange(self, group):
self.getIndex().unindexGroup(group)
self.getIndex().indexGroup(group)
def getIndex(self):
index = indexes.IndexesHolder().getById("group")
if index.getLength() == 0:
self._reIndex(index)
return index
def _reIndex(self, index):
for group in self.getList():
index.indexGroup(group)
def getBrowseIndex(self):
return self.getIndex().getBrowseIndex()
def getLength(self):
return self.getIndex().getLength()
def matchFirstLetter(self, letter, searchInAuthenticators=True):
result = []
index = self.getIndex()
if searchInAuthenticators:
self._updateGroupMatchFirstLetter(letter)
match = index.matchFirstLetter(letter)
if match != None:
for groupid in match:
if groupid != "":
if self.getById(groupid) not in result:
gr=self.getById(groupid)
result.append(gr)
return result
def match(self, criteria, searchInAuthenticators=True, exact=False):
crit={}
result = []
for f,v in criteria.items():
crit[f]=[v]
if crit.has_key("groupname"):
crit["name"] = crit["groupname"]
if searchInAuthenticators:
self._updateGroupMatch(crit["name"][0],exact)
match = self.getIndex().matchGroup(crit["name"][0], exact=exact)
if match != None:
for groupid in match:
gr = self.getById(groupid)
if gr not in result:
result.append(gr)
return result
def update(self, group):
if self.hasKey(group.getId()):
current_group = self.getById(group.getId())
current_group.setDescription(group.getDescription())
def _updateGroupMatch(self, name, exact=False):
for auth in AuthenticatorMgr().getList():
for group in auth.matchGroup(name, exact):
if not self.hasKey(group.getId()):
self.add(group)
else:
self.update(group)
def _updateGroupMatchFirstLetter(self, letter):
for auth in AuthenticatorMgr().getList():
for group in auth.matchGroupFirstLetter(letter):
if not self.hasKey(group.getId()):
self.add(group)
else:
self.update(group)
class Avatar(Persistent, Fossilizable):
"""This class implements the representation of users inside the system.
Basically it contains personal data from them which is relevant for the
system.
"""
fossilizes(IAvatarFossil, IAvatarAllDetailsFossil, IAvatarMinimalFossil)
# When this class is defined MaKaC.conference etc. are not available yet
@cached_classproperty
@classmethod
def linkedToMap(cls):
from MaKaC.common.timerExec import Alarm
# Hey, when adding new roles don't forget to handle them in AvatarHolder.mergeAvatar, too!
return {
'category': {'cls': MaKaC.conference.Category,
'roles': set(['access', 'creator', 'favorite', 'manager'])},
'conference': {'cls': MaKaC.conference.Conference,
'roles': set(['abstractSubmitter', 'access', 'chair', 'creator', 'editor', 'manager',
'paperReviewManager', 'participant', 'referee', 'registrar', 'reviewer'])},
'session': {'cls': MaKaC.conference.Session,
'roles': set(['access', 'coordinator', 'manager'])},
'contribution': {'cls': MaKaC.conference.Contribution,
'roles': set(['access', 'editor', 'manager', 'referee', 'reviewer', 'submission'])},
'track': {'cls': MaKaC.conference.Track,
'roles': set(['coordinator'])},
'material': {'cls': MaKaC.conference.Material,
'roles': set(['access'])},
'resource': {'cls': MaKaC.conference.Resource,
'roles': set(['access'])},
'abstract': {'cls': MaKaC.review.Abstract,
'roles': set(['submitter'])},
'registration': {'cls': MaKaC.registration.Registrant,
'roles': set(['registrant'])},
'group': {'cls': MaKaC.user.Group,
'roles': set(['member'])},
'evaluation': {'cls': MaKaC.evaluation.Submission,
'roles': set(['submitter'])},
'alarm': {'cls': Alarm,
'roles': set(['to'])}
}
def __init__(self, userData=None):
"""Class constructor.
Attributes:
userData -- dictionary containing user data to map into the
avatar. Possible key values (those with * are
multiple):
name, surname, title, organisation*, addess*,
email*, telephone*, fax*
"""
self.id = ""
self.personId = None
self.name = ""
self.surName = ""
self.title = ""
self.organisation = [""]
self.address = [""]
self.email = ""
self.secondaryEmails = []
self.pendingSecondaryEmails = []
self.telephone = [""]
self.fax = [""]
self.identities = []
self.status = "Not confirmed" # The status can be 'activated', 'disabled' or 'Not confirmed'
from MaKaC.common import utils
self.key = utils.newKey() #key to activate the account
self.registrants = {}
self.apiKey = None
minfo = info.HelperMaKaCInfo.getMaKaCInfoInstance()
self._lang = minfo.getLang()
self._mergeTo = None
self._mergeFrom = []
#################################
#Fermi timezone awareness #
#################################
self.timezone = ""
self.displayTZMode = ""
#################################
#Fermi timezone awareness(end) #
#################################
self.resetLinkedTo()
self.personalInfo = PersonalInfo()
self.unlockedFields = [] # fields that are not synchronized with auth backends
self.authenticatorPersonalData = {} # personal data from authenticator
if userData is not None:
if 'name' in userData:
self.setName(userData["name"])
if 'surName' in userData:
self.setSurName(userData["surName"])
if 'title' in userData:
self.setTitle(userData["title"])
if 'organisation' in userData:
if len(userData["organisation"])>0:
for org in userData["organisation"]:
if not self.getOrganisation():
self.setOrganisation(org)
else:
self.addOrganisation(org)
if 'address' in userData:
if len(userData["address"])>0:
for addr in userData["address"]:
self.addAddress(addr)
if 'email' in userData:
if type(userData["email"]) == str:
self.setEmail(userData["email"])
elif len(userData["email"])>0:
for em in userData["email"]:
self.setEmail(em)
if 'telephone' in userData:
if len(userData["telephone"])>0:
for tel in userData["telephone"]:
self.addTelephone(tel)
if 'fax' in userData:
if len(userData["fax"])>0:
for fax in userData["fax"]:
self.addTelephone(fax)
############################
#Fermi timezone awareness #
############################
if 'timezone' in userData:
self.setTimezone(userData["timezone"])
else:
self.setTimezone(info.HelperMaKaCInfo.getMaKaCInfoInstance().getTimezone())
self.setDisplayTZMode(userData.get("displayTZMode", "Event Timezone"))
def __repr__(self):
return '<Avatar({0}, {1})>'.format(self.getId(), self.getFullName())
def mergeTo(self, av):
if av:
av.mergeFrom(self)
if self.getMergeTo():
self._mergeTo.unmergeFrom(self)
self._mergeTo = av
def getMergeTo(self):
try:
return self._mergeTo
except:
self._mergeTo = None
return self._mergeTo
def isMerged(self):
if self.getMergeTo():
return True
return False
def mergeFrom(self, av):
if not av in self.getMergeFromList():
self._mergeFrom.append(av)
self._p_changed = 1
def unmergeFrom(self, av):
if av in self.getMergeFromList():
self._mergeFrom.remove(av)
self._p_changed = 1
def getMergeFromList(self):
try:
return self._mergeFrom
except:
self._mergeFrom = []
return self._mergeFrom
def getKey(self):
return self.key
def getAPIKey(self):
try:
return self.apiKey
except:
self.apiKey = None
return self.apiKey
def setAPIKey(self, apiKey):
self.apiKey = apiKey
def getRelatedCategories(self):
favorites = self.getLinkTo('category', 'favorite')
managed = self.getLinkTo('category', 'manager')
res = {}
for categ in union(favorites, managed):
res[(categ.getTitle(), categ.getId())] = {
'categ': categ,
'favorite': categ in favorites,
'managed': categ in managed,
'path': truncate_path(categ.getCategoryPathTitles(), 30, False)
}
return OrderedDict(sorted(res.items(), key=operator.itemgetter(0)))
def getSuggestedCategories(self):
if not redis_write_client:
return []
related = union(self.getLinkTo('category', 'favorite'), self.getLinkTo('category', 'manager'))
res = []
for id, score in suggestions.get_suggestions(self, 'category').iteritems():
categ = MaKaC.conference.CategoryManager().getById(id)
if not categ or categ.isSuggestionsDisabled() or categ in related:
continue
if any(p.isSuggestionsDisabled() for p in categ.iterParents()):
continue
aw = AccessWrapper()
aw.setUser(self)
if request:
aw.setIP(request.remote_addr)
if not categ.canAccess(aw):
continue
res.append({
'score': score,
'categ': categ,
'path': truncate_path(categ.getCategoryPathTitles(), 30, False)
})
return res
def resetLinkedTo(self):
self.linkedTo = {}
self.updateLinkedTo()
self._p_changed = 1
def getLinkedTo(self):
try:
return self.linkedTo
except AttributeError:
self.resetLinkedTo()
return self.linkedTo
def updateLinkedTo(self):
self.getLinkedTo() # Create attribute if does not exist
for field, data in self.linkedToMap.iteritems():
self.linkedTo.setdefault(field, {})
for role in data['roles']:
self.linkedTo[field].setdefault(role, OOTreeSet())
def linkTo(self, obj, role):
# to avoid issues with zombie avatars
if not AvatarHolder().hasKey(self.getId()):
return
self.updateLinkedTo()
for field, data in self.linkedToMap.iteritems():
if isinstance(obj, data['cls']):
if role not in data['roles']:
raise ValueError('role %s is not allowed for %s objects' % (role, type(obj).__name__))
self.linkedTo[field][role].add(obj)
self._p_changed = 1
if redis_write_client:
event = avatar_links.event_from_obj(obj)
if event:
avatar_links.add_link(self, event, field + '_' + role)
break
def getLinkTo(self, field, role):
self.updateLinkedTo()
return self.linkedTo[field][role]
def unlinkTo(self, obj, role):
# to avoid issues with zombie avatars
if not AvatarHolder().hasKey(self.getId()):
return
self.updateLinkedTo()
for field, data in self.linkedToMap.iteritems():
if isinstance(obj, data['cls']):
if role not in data['roles']:
raise ValueError('role %s is not allowed for %s objects' % (role, type(obj).__name__))
if obj in self.linkedTo[field][role]:
self.linkedTo[field][role].remove(obj)
self._p_changed = 1
if redis_write_client:
event = avatar_links.event_from_obj(obj)
if event:
avatar_links.del_link(self, event, field + '_' + role)
break
def getStatus(self):
try:
return self.status
except AttributeError:
self.status = "activated"
return self.status
def setStatus(self, status):
statIdx = indexes.IndexesHolder().getById("status")
statIdx.unindexUser(self)
self.status = status
self._p_changed = 1
statIdx.indexUser(self)
def activateAccount(self, checkPending=True):
self.setStatus("activated")
if checkPending:
#----Grant rights if any
from MaKaC.common import pendingQueues
pendingQueues.PendingQueuesHolder().grantRights(self)
def disabledAccount(self):
self.setStatus("disabled")
def isActivated(self):
return self.status == "activated"
def isDisabled(self):
return self.status == "disabled"
def isNotConfirmed(self):
return self.status == "Not confirmed"
def setId(self, id):
self.id = str(id)
def getId(self):
return self.id
def setPersonId(self, personId):
self.personId = personId
def getPersonId(self):
return getattr(self, 'personId', None)
def setName(self, name, reindex=False):
if reindex:
idx = indexes.IndexesHolder().getById('name')
idx.unindexUser(self)
self.name = name
idx.indexUser(self)
else:
self.name = name
self._p_changed = 1
def getName(self):
return self.name
getFirstName = getName
setFirstName = setName
def setSurName(self, name, reindex=False):
if reindex:
idx = indexes.IndexesHolder().getById('surName')
idx.unindexUser(self)
self.surName = name
idx.indexUser(self)
else:
self.surName = name
def getSurName(self):
return self.surName
def getFamilyName(self):
return self.surName
def getFullName(self):
surName = ""
if self.getSurName() != "":
# accented letter capitalization requires all these encodes/decodes
surName = "%s, " % self.getSurName().decode('utf-8').upper().encode('utf-8')
return "%s%s"%(surName, self.getName())
def getStraightFullName(self, upper = True):
return ("%s %s"%(self.getFirstName(), self.getFamilyName().upper() if upper else self.getFamilyName())).strip()
getDirectFullNameNoTitle = getStraightFullName
def getAbrName(self):
res = self.getSurName()
if self.getName() != "":
if res != "":
res = "%s, "%res
res = "%s%s."%(res, self.getName()[0].upper())
return res
def getStraightAbrName(self):
name = ""
if self.getName() != "":
name = "%s. "%self.getName()[0].upper()
return "%s%s"%(name, self.getSurName())
def addOrganisation(self, newOrg, reindex=False):
if reindex:
idx = indexes.IndexesHolder().getById('organisation')
idx.unindexUser(self)
self.organisation.append(newOrg.strip())
idx.indexUser(self)
else:
self.organisation.append(newOrg.strip())
self._p_changed = 1
def setOrganisation(self, org, item=0, reindex=False):
if reindex:
idx = indexes.IndexesHolder().getById('organisation')
idx.unindexUser(self)
self.organisation[item] = org.strip()
idx.indexUser(self)
else:
self.organisation[item] = org.strip()
self._p_changed = 1
setAffiliation = setOrganisation
def getOrganisations(self):
return self.organisation
def getOrganisation(self):
return self.organisation[0]
getAffiliation = getOrganisation
def setTitle(self, title):
self.title = title
def getTitle(self):
return self.title
#################################
#Fermi timezone awareness #
#################################
def setTimezone(self,tz=None):
if not tz:
tz = info.HelperMaKaCInfo.getMaKaCInfoInstance().getTimezone()
self.timezone = tz
def getTimezone(self):
tz = info.HelperMaKaCInfo.getMaKaCInfoInstance().getTimezone()
try:
if self.timezone in all_timezones:
return self.timezone
else:
self.setTimezone(tz)
return tz
except:
self.setTimezone(tz)
return tz
def setDisplayTZMode(self,display_tz='Event Timezone'):
self.displayTZMode = display_tz
def getDisplayTZMode(self):
return self.displayTZMode
#################################
#Fermi timezone awareness(end) #
#################################
def addAddress(self, newAddress):
self.address.append(newAddress)
self._p_changed = 1
def getAddresses(self):
return self.address
def getAddress(self):
return self.address[0]
def setAddress(self, address, item=0):
self.address[item] = address
self._p_changed = 1
def setEmail(self, email, reindex=False):
if reindex:
idx = indexes.IndexesHolder().getById('email')
idx.unindexUser(self)
self.email = email.strip().lower()
idx.indexUser(self)
else:
self.email = email.strip().lower()
def getEmails(self):
return [self.email] + self.getSecondaryEmails()
def getEmail(self):
return self.email
def getSecondaryEmails(self):
try:
return self.secondaryEmails
except:
self.secondaryEmails = []
return self.secondaryEmails
def addSecondaryEmail(self, email):
email = email.strip().lower()
if not email in self.getSecondaryEmails():
self.secondaryEmails.append(email)
self._p_changed = 1
def removeSecondaryEmail(self, email):
email = email.strip().lower()
if email in self.getSecondaryEmails():
self.secondaryEmails.remove(email)
self._p_changed = 1
def setSecondaryEmails(self, emailList, reindex=False):
emailList = map(lambda email: email.lower().strip(), emailList)
if reindex:
idx = indexes.IndexesHolder().getById('email')
idx.unindexUser(self)
self.secondaryEmails = emailList
idx.indexUser(self)
else:
self.secondaryEmails = emailList
def hasEmail(self, email):
l = [self.email] + self.getSecondaryEmails()
return email.lower().strip() in l
def hasSecondaryEmail(self, email):
return email.lower().strip() in self.getSecondaryEmails()
def getPendingSecondaryEmails(self):
try:
return self.pendingSecondaryEmails
except:
self.pendingSecondaryEmails = []
return self.pendingSecondaryEmails
def addPendingSecondaryEmail(self, email):
email = email.lower().strip()
if not email in self.getPendingSecondaryEmails(): # create attribute if not exist
self.pendingSecondaryEmails.append(email)
self._p_changed = 1
def removePendingSecondaryEmail(self, email):
email = email.lower().strip()
if email in self.getPendingSecondaryEmails(): # create attribute if not exist
self.pendingSecondaryEmails.remove(email)
self._p_changed = 1
def setPendingSecondaryEmails(self, emailList):
self.pendingSecondaryEmails = emailList
def addTelephone(self, newTel):
self.telephone.append(newTel)
self._p_changed = 1
def getTelephone(self):
return self.telephone[0]
getPhone = getTelephone
def setTelephone(self, tel, item=0):
self.telephone[item] = tel
self._p_changed = 1
setPhone = setTelephone
def getTelephones(self):
return self.telephone
def getSecondaryTelephones(self):
return self.telephone[1:]
def addFax(self, newFax):
self.fax.append(newFax)
self._p_changed = 1
def setFax(self, fax, item=0):
self.fax[item] = fax
self._p_changed = 1
def getFax(self):
return self.fax[0]
def getFaxes(self):
return self.fax
def addIdentity(self, newId):
""" Adds a new identity to this Avatar.
:param newId: a new PIdentity or inheriting object
:type newId: PIdentity
"""
if newId != None and (newId not in self.identities):
self.identities.append(newId)
self._p_changed = 1
def removeIdentity(self, Id):
""" Removed an identity from this Avatar.
:param newId: a PIdentity or inheriting object
:type newId: PIdentity
"""
if Id in self.identities:
self.identities.remove(Id)
self._p_changed = 1
def getIdentityList(self, create_identities=False):
""" Returns a list of identities for this Avatar.
Each identity will be a PIdentity or inheriting object
"""
if create_identities:
for authenticator in AuthenticatorMgr().getList():
identities = self.getIdentityByAuthenticatorId(authenticator.getId())
for identity in identities:
self.addIdentity(identity)
return self.identities
def getIdentityByAuthenticatorId(self, authenticatorId):
""" Return a list of PIdentity objects given an authenticator name
:param authenticatorId: the id of an authenticator, e.g. 'Local', 'LDAP', etc
:type authenticatorId: str
"""
result = []
for identity in self.identities:
if identity.getAuthenticatorTag() == authenticatorId:
result.append(identity)
if not result:
identity = AuthenticatorMgr().getById(authenticatorId).fetchIdentity(self)
if identity:
result.append(identity)
return result
def getIdentityById(self, id, tag):
""" Returns a PIdentity object given an authenticator name and the identity's login
:param id: the login string for this identity
:type id: str
:param tag: the name of an authenticator, e.g. 'Local', 'LDAP', etc
:type tag: str
"""
for Id in self.identities:
if Id.getAuthenticatorTag() == tag and Id.getLogin() == id:
return Id
return None
def addRegistrant(self, n):
if n != None and (n.getConference().getId() not in self.getRegistrants().keys()):
self.getRegistrants()[ n.getConference().getId() ] = n
self._p_changed = 1
def removeRegistrant(self, r):
if self.getRegistrants().has_key(r.getConference().getId()):
# unlink registrant from user
self.unlinkTo(r,'registrant')
del self.getRegistrants()[r.getConference().getId()]
self._p_changed = 1
def getRegistrantList(self):
return self.getRegistrants().values()
def getRegistrants(self):
try:
if self.registrants:
pass
except AttributeError, e:
self.registrants = {}
self._p_changed = 1
return self.registrants
def getRegistrantById(self, confId):
if self.getRegistrants().has_key(confId):
return self.getRegistrants()[confId]
return None
def isRegisteredInConf(self, conf):
if conf.getId() in self.getRegistrants().keys():
return True
for email in self.getEmails():
registrant = conf.getRegistrantsByEmail(email)
if registrant:
self.addRegistrant(registrant)
registrant.setAvatar(self)
return True
return False
def hasSubmittedEvaluation(self, evaluation):
for submission in evaluation.getSubmissions():
if submission.getSubmitter()==self:
return True
return False
def containsUser(self, avatar):
return avatar == self
containsMember = containsUser
def canModify(self, aw):
return self.canUserModify(aw.getUser())
def canUserModify(self, user):
return user == self or (user in AdminList.getInstance().getList())
def getLocator(self):
d = Locator()
d["userId"] = self.getId()
return d
def delete(self):
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
# Room booking related
def isMemberOfSimbaList(self, simbaListName):
# Try to get the result from the cache
try:
if simbaListName in self._v_isMember.keys():
return self._v_isMember[simbaListName]
except:
self._v_isMember = {}
groups = []
try:
# try to get the exact match first, which is what we expect since
# there shouldn't be uppercase letters
groups.append(GroupHolder().getById(simbaListName))
except KeyError:
groups = GroupHolder().match({ 'name': simbaListName }, searchInAuthenticators = False, exact=True)
if not groups:
groups = GroupHolder().match({ 'name': simbaListName }, exact=True)
if groups:
result = groups[0].containsUser(self)
self._v_isMember[simbaListName] = result
return result
self._v_isMember[simbaListName] = False
return False
def isAdmin(self):
"""
Convenience method for checking whether this user is an admin.
Returns bool.
"""
al = AdminList.getInstance()
if al.isAdmin(self):
return True
return False
def isRBAdmin(self):
"""
Convenience method for checking whether this user is an admin for the RB module.
Returns bool.
"""
if self.isAdmin():
return True
for entity in PluginsHolder().getPluginType('RoomBooking').getOption('Managers').getValue():
if (isinstance(entity, Group) and entity.containsUser(self)) or \
(isinstance(entity, Avatar) and entity == self):
return True
return False
def getRooms(self):
"""
Returns list of rooms (RoomBase derived objects) this
user is responsible for.
"""
from MaKaC.plugins.RoomBooking.default.room import Room
from MaKaC.rb_location import RoomGUID
rooms = Room.getUserRooms(self)
roomList = [ RoomGUID.parse(str(rg)).getRoom() for rg in rooms ] if rooms else []
return [room for room in roomList if room and room.isActive]
def getReservations(self):
"""
Returns list of ALL reservations (ReservationBase
derived objects) this user has ever made.
"""
# self._ensureRoomAndResv()
# resvs = [guid.getReservation() for guid in self.resvGuids]
# return resvs
from MaKaC.rb_location import CrossLocationQueries
from MaKaC.rb_reservation import ReservationBase
resvEx = ReservationBase()
resvEx.createdBy = str(self.id)
resvEx.isCancelled = None
resvEx.isRejected = None
resvEx.isArchival = None
myResvs = CrossLocationQueries.getReservations(resvExample = resvEx)
return myResvs
def getReservationsOfMyRooms(self):
"""
Returns list of ALL reservations (ReservationBase
derived objects) this user has ever made.
"""
# self._ensureRoomAndResv()
# resvs = [guid.getReservation() for guid in self.resvGuids]
# return resvs
from MaKaC.rb_location import CrossLocationQueries
from MaKaC.rb_reservation import ReservationBase
myRooms = self.getRooms() # Just to speed up
resvEx = ReservationBase()
resvEx.isCancelled = None
resvEx.isRejected = None
resvEx.isArchival = None
myResvs = CrossLocationQueries.getReservations(resvExample = resvEx, rooms = myRooms)
return myResvs
def getPersonalInfo(self):
try:
return self.personalInfo
except:
self.personalInfo = PersonalInfo()
return self.personalInfo
def isFieldSynced(self, field):
if not hasattr(self, 'unlockedFields'):
self.unlockedFields = []
return field not in self.unlockedFields
def setFieldSynced(self, field, synced):
# check if the sync state is the same. also creates the list if it's missing
if synced == self.isFieldSynced(field):
pass
elif synced:
self.unlockedFields.remove(field)
self._p_changed = 1
else:
self.unlockedFields.append(field)
self._p_changed = 1
def getNotSyncedFields(self):
if not hasattr(self, 'unlockedFields'):
self.unlockedFields = []
return self.unlockedFields
def setAuthenticatorPersonalData(self, field, value):
fields = {'phone': {'get': self.getPhone,
'set': self.setPhone},
'fax': {'get': self.getFax,
'set': self.setFax},
'address': {'get': self.getAddress,
'set': self.setAddress},
'surName': {'get': self.getSurName,
'set': lambda x: self.setSurName(x, reindex=True)},
'firstName': {'get': self.getFirstName,
'set': lambda x: self.setFirstName(x, reindex=True)},
'affiliation': {'get': self.getAffiliation,
'set': lambda x: self.setAffiliation(x, reindex=True)},
'email': {'get': self.getEmail,
'set': lambda x: self.setEmail(x, reindex=True)}}
if not hasattr(self, 'authenticatorPersonalData'):
self.authenticatorPersonalData = {}
self.authenticatorPersonalData[field] = value or ''
field_accessors = fields[field]
if value and value != field_accessors['get']() and self.isFieldSynced(field):
field_accessors['set'](value)
self._p_changed = 1
def getAuthenticatorPersonalData(self, field):
if not hasattr(self, 'authenticatorPersonalData'):
self.authenticatorPersonalData = {}
return self.authenticatorPersonalData.get(field)
def clearAuthenticatorPersonalData(self):
self.authenticatorPersonalData = {}
def getLang(self):
try:
return self._lang
except:
minfo = info.HelperMaKaCInfo.getMaKaCInfoInstance()
self._lang = minfo.getLang()
return self._lang
def setLang(self, lang):
self._lang =lang
class AvatarHolder(ObjectHolder):
"""Specialised ObjectHolder dealing with user (avatar) objects. Objects of
this class represent an access point to Avatars of the application and
provides different methods for accessing and retrieving them in several
ways.
"""
idxName = "avatars"
counterName = "PRINCIPAL"
_indexes = [ "email", "name", "surName","organisation", "status" ]
def matchFirstLetter(self, index, letter, onlyActivated=True, searchInAuthenticators=True):
result = {}
if index not in self._indexes:
return None
if index in ["name", "surName", "organisation"]:
match = indexes.IndexesHolder().getById(index).matchFirstLetter(letter, accent_sensitive=False)
else:
match = indexes.IndexesHolder().getById(index).matchFirstLetter(letter)
if match is not None:
for userid in match:
if self.getById(userid) not in result:
av = self.getById(userid)
if not onlyActivated or av.isActivated():
result[av.getEmail()] = av
if searchInAuthenticators:
for authenticator in AuthenticatorMgr().getList():
matches = authenticator.matchUserFirstLetter(index, letter)
if matches:
for email, record in matches.iteritems():
emailResultList = [av.getEmails() for av in result.values()]
if email not in emailResultList:
userMatched = self.match({'email': email}, exact=1, searchInAuthenticators=False)
if not userMatched:
av = Avatar(record)
av.setId(record["id"])
av.status = record["status"]
result[email] = av
else:
av = userMatched[0]
result[av.getEmail()] = av
return result.values()
def match(self, criteria, exact=0, onlyActivated=True, searchInAuthenticators=True):
result = {}
iset = set()
for f, v in criteria.items():
v = str(v).strip()
if v and f in self._indexes:
match = indexes.IndexesHolder().getById(f).matchUser(v, exact=exact, accent_sensitive=False)
if match is not None:
if len(iset) == 0:
iset = set(match)
else:
iset = iset & set(match)
for userid in iset:
av=self.getById(userid)
if not onlyActivated or av.isActivated():
result[av.getEmail()]=av
if searchInAuthenticators:
for authenticator in AuthenticatorMgr().getList():
matches = authenticator.matchUser(criteria, exact=exact)
if matches:
for email, record in matches.iteritems():
emailResultList = [av.getEmails() for av in result.values()]
if not email in emailResultList:
userMatched = self.match({'email': email}, exact=1, searchInAuthenticators=False)
if not userMatched:
av = Avatar(record)
av.setId(record["id"])
av.status = record["status"]
if self._userMatchCriteria(av, criteria, exact):
result[email] = av
else:
av = userMatched[0]
if self._userMatchCriteria(av, criteria, exact):
result[av.getEmail()] = av
return result.values()
def _userMatchCriteria(self, av, criteria, exact):
if criteria.has_key("organisation"):
if criteria["organisation"]:
lMatch = False
for org in av.getOrganisations():
if exact:
if criteria["organisation"].lower() == org.lower():
lMatch = True
else:
if criteria["organisation"].lower() in org.lower():
lMatch = True
if not lMatch:
return False
if criteria.has_key("surName"):
if criteria["surName"]:
if exact:
if not criteria["surName"].lower() == av.getSurName().lower():
return False
else:
if not criteria["surName"].lower() in av.getSurName().lower():
return False
if criteria.has_key("name"):
if criteria["name"]:
if exact:
if not criteria["name"].lower() == av.getName().lower():
return False
else:
if not criteria["name"].lower() in av.getName().lower():
return False
if criteria.has_key("email"):
if criteria["email"]:
lMatch = False
for email in av.getEmails():
if exact:
if criteria["email"].lower() == email.lower():
lMatch = True
else:
if criteria["email"].lower() in email.lower():
lMatch = True
if not lMatch:
return False
return True
def getById(self, id):
try:
return ObjectHolder.getById(self, id)
except:
pass
try:
authId, extId, email = id.split(":")
except:
return None
av = self.match({"email": email}, searchInAuthenticators=False)
if av:
return av[0]
user_data = AuthenticatorMgr().getById(authId).searchUserById(extId)
av = Avatar(user_data)
identity = user_data["identity"](user_data["login"], av)
user_data["authenticator"].add(identity)
av.activateAccount()
self.add(av)
return av
def add(self,av):
"""
Before adding the user, check if the email address isn't used
"""
if av.getEmail() is None or av.getEmail()=="":
raise UserError(_("User not created. You must enter an email address"))
emailmatch = self.match({'email': av.getEmail()}, exact=1, searchInAuthenticators=False)
if emailmatch != None and len(emailmatch) > 0 and emailmatch[0] != '':
raise UserError(_("User not created. The email address %s is already used.")% av.getEmail())
id = ObjectHolder.add(self,av)
for i in self._indexes:
indexes.IndexesHolder().getById(i).indexUser(av)
return id
def mergeAvatar(self, prin, merged):
#replace merged by prin in all object where merged is
links = merged.getLinkedTo()
for objType in links.keys():
if objType == "category":
for role in links[objType].keys():
for cat in set(links[objType][role]):
# if the category has been deleted
if cat.getOwner() == None and cat.getId() != '0':
Logger.get('user.merge').warning(
"Trying to remove %s from %s (%s) but it seems to have been deleted" % \
(cat, prin.getId(), role))
continue
elif role == "creator":
cat.revokeConferenceCreation(merged)
cat.grantConferenceCreation(prin)
elif role == "manager":
cat.revokeModification(merged)
cat.grantModification(prin)
elif role == "access":
cat.revokeAccess(merged)
cat.grantAccess(prin)
elif role == "favorite":
merged.unlinkTo(cat, 'favorite')
prin.linkTo(cat, 'favorite')
elif objType == "conference":
confHolderIdx = MaKaC.conference.ConferenceHolder()._getIdx()
for role in links[objType].keys():
for conf in set(links[objType][role]):
# if the conference has been deleted
if conf.getId() not in confHolderIdx:
Logger.get('user.merge').warning(
"Trying to remove %s from %s (%s) but it seems to have been deleted" % \
(conf, prin.getId(), role))
continue
elif role == "creator":
conf.setCreator(prin)
elif role == "chair":
conf.removeChair(merged)
conf.addChair(prin)
elif role == "manager":
conf.revokeModification(merged)
conf.grantModification(prin)
elif role == "access":
conf.revokeAccess(merged)
conf.grantAccess(prin)
elif role == "abstractSubmitter":
conf.removeAuthorizedSubmitter(merged)
conf.addAuthorizedSubmitter(prin)
if objType == "session":
for role in links[objType].keys():
for ses in set(links[objType][role]):
owner = ses.getOwner()
# tricky, as conference containing it may have been deleted
if owner == None or owner.getOwner() == None:
Logger.get('user.merge').warning(
"Trying to remove %s from %s (%s) but it seems to have been deleted" % \
(ses, prin.getId(), role))
elif role == "manager":
ses.revokeModification(merged)
ses.grantModification(prin)
elif role == "access":
ses.revokeAccess(merged)
ses.grantAccess(prin)
elif role == "coordinator":
ses.removeCoordinator(merged)
ses.addCoordinator(prin)
if objType == "contribution":
for role in links[objType].keys():
for contrib in set(links[objType][role]):
if contrib.getOwner() == None:
Logger.get('user.merge').warning(
"Trying to remove %s from %s (%s) but it seems to have been deleted" % \
(contrib, prin.getId(), role))
elif role == "manager":
contrib.revokeModification(merged)
contrib.grantModification(prin)
elif role == "access":
contrib.revokeAccess(merged)
contrib.grantAccess(prin)
elif role == "submission":
contrib.revokeSubmission(merged)
contrib.grantSubmission(prin)
if objType == "track":
for role in links[objType].keys():
if role == "coordinator":
for track in set(links[objType][role]):
track.removeCoordinator(merged)
track.addCoordinator(prin)
if objType == "material":
for role in links[objType].keys():
if role == "access":
for mat in set(links[objType][role]):
mat.revokeAccess(merged)
mat.grantAccess(prin)
if objType == "file":
for role in links[objType].keys():
if role == "access":
for mat in set(links[objType][role]):
mat.revokeAccess(merged)
mat.grantAccess(prin)
if objType == "abstract":
for role in links[objType].keys():
if role == "submitter":
for abstract in set(links[objType][role]):
abstract.setSubmitter(prin)
if objType == "registration":
for role in links[objType].keys():
if role == "registrant":
for reg in set(links[objType][role]):
reg.setAvatar(prin)
prin.addRegistrant(reg)
if objType == "alarm":
for role in links[objType].keys():
if role == "to":
for alarm in set(links[objType][role]):
alarm.removeToUser(merged)
alarm.addToUser(prin)
if objType == "group":
for role in links[objType].keys():
if role == "member":
for group in set(links[objType][role]):
group.removeMember(merged)
group.addMember(prin)
if objType == "evaluation":
for role in links[objType].keys():
if role == "submitter":
for submission in set(links[objType][role]):
if len([s for s in submission.getEvaluation().getSubmissions() if s.getSubmitter()==prin]) >0 :
#prin has also answered to the same evaluation as merger's.
submission.setSubmitter(None)
else:
#prin ditn't answered to the same evaluation as merger's.
submission.setSubmitter(prin)
# Merge avatars in redis
if redis_write_client:
avatar_links.merge_avatars(prin, merged)
suggestions.merge_avatars(prin, merged)
# remove merged from holder
self.remove(merged)
idxs = indexes.IndexesHolder()
org = idxs.getById('organisation')
email = idxs.getById('email')
name = idxs.getById('name')
surName = idxs.getById('surName')
status_index = idxs.getById('status')
org.unindexUser(merged)
email.unindexUser(merged)
name.unindexUser(merged)
surName.unindexUser(merged)
status_index.unindexUser(merged)
# add merged email and logins to prin and merge users
for mail in merged.getEmails():
prin.addSecondaryEmail(mail)
for id in merged.getIdentityList(create_identities=True):
id.setUser(prin)
prin.addIdentity(id)
merged.mergeTo(prin)
# reindex prin email
email.unindexUser(prin)
email.indexUser(prin)
def unmergeAvatar(self, prin, merged):
if not merged in prin.getMergeFromList():
return False
merged.mergeTo(None)
idxs = indexes.IndexesHolder()
org = idxs.getById('organisation')
email = idxs.getById('email')
name = idxs.getById('name')
surName = idxs.getById('surName')
email.unindexUser(prin)
for mail in merged.getEmails():
prin.removeSecondaryEmail(mail)
for id in merged.getIdentityList(create_identities=True):
prin.removeIdentity(id)
id.setUser(merged)
self.add(merged)
org.indexUser(merged)
email.indexUser(merged)
name.indexUser(merged)
surName.indexUser(merged)
email.indexUser(prin)
return True
# ToDo: This class should ideally derive from TreeHolder as it is thought to
# be a index over the "Principal" objects i.e. it will be a top indexing of
# the contents of AvatarHolder and GroupHolder. This will allow to
# transparently access to Principal objects from its id. To transparently
# index all the objects AvatarHolder and GroupHolder must override the
# "add" method and, apart from their normal operation, include an adding call
# for the PrincipalHolder.
# The problem is that I have experienced some troubles (it seems not to perform
# the adding of objects) while adding an object both to the AvatarHolder and
# to this one; so, for the time being, I will implement it in a "dirty" and
# non-optimal way to be able to continue working, but the trouble must be
# investigated and a better solution found.
# I'll keep the ObjectHolder interface so it will be easier afterwards to
# implement a more optimised solution (just this object needs to be modified)
class PrincipalHolder:
def __init__(self):
self.__gh = GroupHolder()
self.__ah = AvatarHolder()
def getById(self, id):
try:
prin = self.__gh.getById(id)
return prin
except KeyError, e:
pass
prin = self.__ah.getById(id)
return prin
def match(self, element_id, exact=1, searchInAuthenticators=True):
prin = self.__gh.match({"name": element_id}, searchInAuthenticators=searchInAuthenticators, exact=exact)
if not prin:
prin = self.__ah.match({"login": element_id}, searchInAuthenticators=searchInAuthenticators, exact=exact)
return prin
class LoginInfo:
def __init__(self, login, password):
self.setLogin(login)
self.setPassword(password)
def setLogin(self, newLogin):
self.login = newLogin.strip()
def getLogin(self):
return self.login
def setPassword(self, newPassword):
self.password = newPassword
def getPassword(self):
return self.password
class PersonalInfo(Persistent, Fossilizable):
fossilizes(IPersonalInfoFossil)
def __init__(self):
self._basket = PersonalBasket()
self._showPastEvents = False #determines if past events in category overview will be shown
self._p_changed = 1
def getShowPastEvents(self):
if not hasattr(self, "_showPastEvents"):
self._showPastEvents = False
return self._showPastEvents
def setShowPastEvents(self, value):
self._showPastEvents = value
def getBasket(self):
return self._basket
class PersonalBasket(Persistent):
# Generic basket, for Events, Categories, Avatars, Groups and Rooms
def __init__(self):
self._events = {}
self._categories = {}
self._rooms = {}
self._users = {}
self._userGroups = {}
self._p_changed = 1
def __findDict(self, element):
if (type(element) == MaKaC.conference.Conference):
return self._events
elif (type(element) == MaKaC.conference.Category):
return self._categories
elif (type(element) == Avatar):
return self._users
elif (type(element) == Group):
return self._userGroups
elif (type(element) == MaKaC.rb_location.RoomGUID):
return self._rooms
else:
raise Exception(_("Unknown Element Type"))
def addElement(self, element):
basket = self.__findDict(element)
if element.getId() not in basket:
basket[element.getId()] = element
self._p_changed = 1
return True
return False
def deleteElement(self, element=None):
res = self.__findDict(element).pop(element.getId(), None)
if res == None:
return False
self._p_changed = 1
return True
def deleteUser(self, user_id):
res = self._users.pop(user_id, None)
self._p_changed = 1
return res is not None
def hasElement(self, element):
return element.getId() in self.__findDict(element)
def hasUserId(self, id):
return self._users.has_key(id)
def getUsers(self):
return self._users
| gpl-3.0 | -3,996,122,456,246,659,600 | 34.671633 | 123 | 0.549794 | false |
eomahony/Numberjack | examples/Tsccd.py | 1 | 3117 | from Numberjack import *
def get_model(k, v, n):
design = Matrix(v, n)
pairs = Matrix(v * (v - 1) // 2, n)
index = [[0 for i in range(v)] for j in range(v)]
a = 0
for i in range(v - 1):
for j in range(i + 1, v):
index[i][j] = a
index[j][i] = a
a += 1
pair_occurrence = VarArray(v * (v - 1) // 2, 1, v - k)
first = VarArray(v * (v - 1) // 2, n)
last = VarArray(v * (v - 1) // 2, n)
model = Model(
## each block is a k-tuple
[Sum(col) == k for col in design.col],
## exactly one change between each block
[Sum([design[i][j - 1] > design[i][j] for i in range(v)]) == 1 for j in range(1, n)],
[Sum([design[i][j - 1] < design[i][j] for i in range(v)]) == 1 for j in range(1, n)],
## each pair can occur between 1 and v-k times
[pairs[index[i][j]][x] == (design[i][x] & design[j][x]) for i in range(v) for j in range(i) for x in range(n)],
[pair_occurrence[index[i][j]] == Sum(pairs[index[i][j]])],
## consecutive ones (convex rows)
[pairs[index[i][j]][x] <= (first[index[i][j]] <= x) for i in range(v) for j in range(i) for x in range(n)],
[pairs[index[i][j]][x] <= (last[index[i][j]] >= x) for i in range(v) for j in range(i) for x in range(n)],
[((first[index[i][j]] <= x) & (x <= last[index[i][j]])) <= pairs[index[i][j]][x] for x in range(n) for i in range(v) for j in range(i)],
[first[index[i][j]] <= last[index[i][j]] for i in range(v) for j in range(i)],
# implied constraint (we know the number of pairs in in each column)
[Sum(col) == (k*(k-1)//2) for col in pairs.col],
## symmetry breaking
[design[i][0] == 1 for i in range(k)],
design[k-1][1] == 0,
design[k][1] == 1,
)
return first, pairs, last, design, index, model
def solve(param):
k = param['k']
v = param['v']
n = (v * (v - 1) // 2 - k * (k - 1) // 2) // (k - 1) + 1
first, pairs, last, design, index, model = get_model(k, v, n)
solver = model.load(param['solver'])
solver.setHeuristic('DomainOverWDegree', 'Random', 1)
solver.setVerbosity(param['verbose'])
solver.setTimeLimit(param['tcutoff'])
solver.solve()
out = ''
if solver.is_sat():
out += str(design)+'\n'
for i in range(v-1):
for j in range(i + 1, v):
out += str((i, j)).ljust(5) + ' ' + str(first[index[i][j]]) + ' ' + str(pairs[index[i][j]]) + ' ' + str(last[index[i][j]]) + '\n'
the_design = [[] for y in range(n)]
for y in range(n):
for x in range(v):
if design[x][y].get_value() > 0:
the_design[y].append(x)
for x in range(k):
for y in range(n):
out += (str(the_design[y][x]+1).rjust(2) + ' ')
out += '\n'
out += '\nNodes: ' + str(solver.getNodes())
return out
default = {'k': 3, 'v': 6, 'solver': 'MiniSat', 'verbose': 0, 'tcutoff': 30}
if __name__ == '__main__':
param = input(default)
print(solve(param))
| lgpl-2.1 | 2,552,755,813,298,700,300 | 32.880435 | 145 | 0.494386 | false |
joswr1ght/gogomovietwit | movietwit.py | 1 | 10583 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import time
import termios
import tty
import socket
import threading
import tweepy
import json
import string
import re
import HTMLParser
import tempfile
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
from PIL import ImageFont
from vlc import *
try:
import config
except ImportError:
sys.stderr.write("You need to configure the config.py file. Copy config.py.sample to config.py, then edit.\n")
sys.exit(1)
threads = []
sockfile = tempfile._get_default_tempdir() + "/gogomovietwit" + next(tempfile._get_candidate_names())
FONTSIZE=18
# TODO: This class has external dependencies
class GogoMovieTwitListener(StreamListener):
def __init__(self, font, fontsize, videowidth):
self.font = ImageFont.truetype(font, fontsize)
self.vidw = videowidth
def filter_content(self, tweet):
""" Returns True when the content should be filtered """
with open(config.dynamicfilterfile, "r") as f:
for line in f:
if line[0] == ";":
continue
log(line.lower())
log(tweet.lower())
if line.lower().rstrip() in tweet.lower():
return True
return False
def on_data(self, data):
try:
#print "\n" + "-"*80
#print data
tweet=json.loads(data)
# HTML parse ASCII only
text = HTMLParser.HTMLParser().unescape(tweet["text"].encode('ascii', 'ignore'))
# Remove hashtag, case insensitive (matching Tweepy stream)
hashtag = re.compile(re.escape(config.hashtag), re.IGNORECASE)
text = re.sub(hashtag, "", text)
# Remove URLs
text = re.sub(re.compile(r'((https?://[^\s<>"]+|www\.[^\s<>"]+))',re.DOTALL), "", text)
# Remove newlines
log(text)
text = text.replace("\n", '').replace("\r", '')
log(text)
# Skip RT's if configured to do so
if config.skiprt:
if tweet["retweeted"]:
return False
# Split tweet into words (by anything but alphabetic characters) to isolate "RT"
if "RT" in re.split("(?:(?:[^a-zA-Z]+')|(?:'[^a-zA-Z]+))|(?:[^a-zA-Z']+)", text):
return False
name = tweet["user"]["screen_name"]
if config.dynamicfilter:
if self.filter_content(name + " " + text):
return False
# Using known font and font size, wrap text to fix on screen
text = self.wrap_text(text)
if config.anonmode:
sendmessage("%s"%self.wrap_text(text))
else:
sendmessage("%s"%self.wrap_text(name + ": " + text))
# Tweet display duration
time.sleep(config.tweetdur)
return True
except Exception, e:
print('Error on_data: %s' % sys.exc_info()[1])
return True
def on_error(self, status):
print "Stream error:",
print status
return True
def on_disconnect(self, notice):
"""Called when twitter sends a disconnect notice
Disconnect codes are listed here:
https://dev.twitter.com/docs/streaming-apis/messages#Disconnect_messages_disconnect
"""
return
def wrap_text(self, text):
"""Wrap the text to fit video width in pixels"""
#log("DEBUG Before Wrap: %s\n"%text)
wrappedtext=""
words=text.split(" ")
start = stop = 0
while stop < len(words):
stop+=1
if self.font.getsize(" ".join(words[start:stop]))[0] > self.vidw:
wrappedtext += " ".join(words[start:stop-1]) + "\n"
start = stop-1
wrappedtext += " ".join(words[start:stop])
#log("DEBUG After Wrap: %s\n"%wrappedtext)
return wrappedtext
# Sends a message to the given socket
def sendmessage(message):
s = socket.socket(socket.AF_UNIX)
s.settimeout(1)
s.connect(sockfile)
s.send(message)
s.close()
def getch():
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
return ch
def mspf():
"""Milliseconds per frame."""
return int(1000 // (player.get_fps() or 25))
def print_version():
"""Print libvlc version"""
try:
print('Build date: %s (%#x)' % (build_date, hex_version()))
print('LibVLC version: %s (%#x)' % (bytes_to_str(libvlc_get_version()), libvlc_hex_version()))
print('LibVLC compiler: %s' % bytes_to_str(libvlc_get_compiler()))
if plugin_path:
print('Plugin path: %s' % plugin_path)
except:
print('Error: %s' % sys.exc_info()[1])
def print_info():
"""Print information about the media"""
try:
print_version()
media = player.get_media()
print('State: %s' % player.get_state())
print('Media: %s' % bytes_to_str(media.get_mrl()))
print('Track: %s/%s' % (player.video_get_track(), player.video_get_track_count()))
print('Current time: %s/%s' % (player.get_time(), media.get_duration()))
print('Position: %s' % player.get_position())
print('FPS: %s (%d ms)' % (player.get_fps(), mspf()))
print('Rate: %s' % player.get_rate())
print('Video size: %s' % str(player.video_get_size(0))) # num=0
print('Scale: %s' % player.video_get_scale())
print('Aspect ratio: %s' % player.video_get_aspect_ratio())
#print('Window:' % player.get_hwnd()
except Exception:
print('Error: %s' % sys.exc_info()[1])
def sec_forward():
"""Go forward one sec"""
player.set_time(player.get_time() + 1000)
def sec_backward():
"""Go backward one sec"""
player.set_time(player.get_time() - 1000)
def frame_forward():
"""Go forward one frame"""
player.set_time(player.get_time() + mspf())
def frame_backward():
"""Go backward one frame"""
player.set_time(player.get_time() - mspf())
def print_help():
"""Print help"""
print('Single-character commands:')
for k, m in sorted(keybindings.items()):
m = (m.__doc__ or m.__name__).splitlines()[0]
print(' %s: %s.' % (k, m.rstrip('.')))
print('0-9: go to that fraction of the movie')
def quit_app():
"""Stop and exit"""
os.remove(sockfile)
sys.exit(0)
def serverproc(player):
# Remove the socket file if it already exists
if os.path.exists(sockfile):
os.remove(sockfile)
#print "Opening socket..."
server = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM )
server.bind(sockfile)
server.listen(5)
#print "Listening..."
while True:
conn, addr = server.accept()
#print 'Accepted connection'
while True:
data = conn.recv(1024)
if not data:
break
else:
#print "-" * 20
#print data
# Write overlay marquee content
player.video_set_marquee_string(VideoMarqueeOption.Text, str_to_bytes(data))
def clientproc():
### Twitter parsing
auth = OAuthHandler(config.consumer_key, config.consumer_secret)
auth.set_access_token(config.access_token, config.access_secret)
while True:
vidwidth=600 # TODO: Figure out dynamic movie width
twitter_stream = Stream(auth, GogoMovieTwitListener("FreeSansBold.ttf", FONTSIZE, vidwidth), timeout=60)
try:
twitter_stream.filter(track=[config.hashtag])
except Exception, e:
print "Error:"
print e.__doc__
print e.message
print "Restarting stream."
time.sleep(3)
def log(message):
with open(config.logfile, "a") as f:
f.write(message + "\n")
if __name__ == '__main__':
if sys.argv[1:] and sys.argv[1] not in ('-h', '--help'):
movie = os.path.expanduser(sys.argv[1])
if not os.access(movie, os.R_OK):
print('Error: %s file not readable' % movie)
sys.exit(1)
instance = Instance("--sub-source marq")
try:
media = instance.media_new(movie)
except NameError:
print('NameError: %s (%s vs LibVLC %s)' % (sys.exc_info()[1],
__version__,
libvlc_get_version()))
sys.exit(1)
player = instance.media_player_new()
player.set_media(media)
### Kick off background job to handle server messages
t = threading.Thread(target=serverproc, args=(player,))
threads.append(t)
t.daemon=True
t.start()
### Kick off background job to get Twitter messages
t = threading.Thread(target=clientproc)
threads.append(t)
t.daemon=True
t.start()
player.video_set_marquee_int(VideoMarqueeOption.Enable, 1)
player.video_set_marquee_int(VideoMarqueeOption.Size, FONTSIZE) # pixels
player.video_set_marquee_int(VideoMarqueeOption.Position, Position.Bottom+Position.Left)
player.video_set_marquee_int(VideoMarqueeOption.Timeout, 5000) # millisec, 0==forever
player.video_set_marquee_int(VideoMarqueeOption.Refresh, 1000) # millisec (or sec?)
keybindings = {
' ': player.pause,
'+': sec_forward,
'-': sec_backward,
'.': frame_forward,
',': frame_backward,
'f': player.toggle_fullscreen,
'i': print_info,
'q': quit_app,
'?': print_help,
'h': print_help,
}
print_help()
# Start playing the video
player.play()
sendmessage('gogomovietwit - watching hashtag %s'%config.hashtag)
while True:
k = getch()
print('> %s' % k)
if k in keybindings:
keybindings[k]()
elif k.isdigit():
# jump to fraction of the movie.
player.set_position(float('0.'+k))
else:
print('Usage: %s <movie_filename>' % sys.argv[0])
print('Once launched, type ? for help.')
print('')
| mit | -3,674,604,013,726,025,700 | 30.780781 | 114 | 0.5506 | false |
rsjohnco/rez | src/rez/package_resources_.py | 1 | 12812 | from rez.utils.resources import Resource
from rez.utils.schema import Required, schema_keys
from rez.utils.logging_ import print_warning
from rez.utils.data_utils import cached_property, SourceCode, \
AttributeForwardMeta, LazyAttributeMeta
from rez.utils.formatting import PackageRequest
from rez.exceptions import PackageMetadataError, ResourceError
from rez.config import config, Config, create_config
from rez.vendor.version.version import Version
from rez.vendor.schema.schema import Schema, SchemaError, Optional, Or, And, Use
from textwrap import dedent
import os.path
# package attributes created at release time
package_release_keys = (
"timestamp",
'revision',
'changelog',
'release_message',
'previous_version',
'previous_revision',
'vcs')
#------------------------------------------------------------------------------
# utility schemas
#------------------------------------------------------------------------------
help_schema = Or(basestring, # single help entry
[[basestring]]) # multiple help entries
#------------------------------------------------------------------------------
# schema dicts
#------------------------------------------------------------------------------
# requirements of all package-related resources
base_resource_schema_dict = {
Required("name"): basestring
}
# package family
package_family_schema_dict = base_resource_schema_dict.copy()
# schema common to both package and variant
package_base_schema_dict = base_resource_schema_dict.copy()
package_base_schema_dict.update({
# basics
Optional("base"): basestring,
Optional("version"): Version,
Optional('description'): basestring,
Optional('authors'): [basestring],
# dependencies
Optional('requires'): [PackageRequest],
Optional('build_requires'): [PackageRequest],
Optional('private_build_requires'): [PackageRequest],
# plugins
Optional('has_plugins'): bool,
Optional('plugin_for'): [basestring],
# general
Optional('uuid'): basestring,
Optional('config'): Config,
Optional('tools'): [basestring],
Optional('help'): help_schema,
# commands
Optional('pre_commands'): SourceCode,
Optional('commands'): SourceCode,
Optional('post_commands'): SourceCode,
# release info
Optional("timestamp"): int,
Optional('revision'): object,
Optional('changelog'): basestring,
Optional('release_message'): Or(None, basestring),
Optional('previous_version'): Version,
Optional('previous_revision'): object,
Optional('vcs'): basestring,
# custom keys
Optional('custom'): dict
})
# package
package_schema_dict = package_base_schema_dict.copy()
package_schema_dict.update({
Optional("variants"): [[PackageRequest]]
})
# variant
variant_schema_dict = package_base_schema_dict.copy()
#------------------------------------------------------------------------------
# resource schemas
#------------------------------------------------------------------------------
package_family_schema = Schema(package_family_schema_dict)
package_schema = Schema(package_schema_dict)
variant_schema = Schema(variant_schema_dict)
#------------------------------------------------------------------------------
# schemas for converting from POD datatypes
#------------------------------------------------------------------------------
_commands_schema = Or(SourceCode, # commands as converted function
callable, # commands as function
basestring, # commands in text block
[basestring]) # old-style (rez-1) commands
_package_request_schema = And(basestring, Use(PackageRequest))
package_pod_schema_dict = base_resource_schema_dict.copy()
large_string_dict = And(basestring, Use(lambda x: dedent(x).strip()))
package_pod_schema_dict.update({
Optional("base"): basestring,
Optional("version"): And(basestring, Use(Version)),
Optional('description'): large_string_dict,
Optional('authors'): [basestring],
Optional('requires'): [_package_request_schema],
Optional('build_requires'): [_package_request_schema],
Optional('private_build_requires'): [_package_request_schema],
Optional('variants'): [[_package_request_schema]],
Optional('has_plugins'): bool,
Optional('plugin_for'): [basestring],
Optional('uuid'): basestring,
Optional('config'): And(dict,
Use(lambda x: create_config(overrides=x))),
Optional('tools'): [basestring],
Optional('help'): help_schema,
Optional('pre_commands'): _commands_schema,
Optional('commands'): _commands_schema,
Optional('post_commands'): _commands_schema,
Optional("timestamp"): int,
Optional('revision'): object,
Optional('changelog'): large_string_dict,
Optional('release_message'): Or(None, basestring),
Optional('previous_version'): And(basestring, Use(Version)),
Optional('previous_revision'): object,
Optional('vcs'): basestring,
Optional('custom'): dict
})
package_pod_schema = Schema(package_pod_schema_dict)
#------------------------------------------------------------------------------
# resource classes
#------------------------------------------------------------------------------
class PackageRepositoryResource(Resource):
"""Base class for all package-related resources.
Attributes:
schema_error (`Exception`): Type of exception to throw on bad data.
repository_type (str): Type of package repository associated with this
resource type.
"""
schema_error = PackageMetadataError
repository_type = None
def __init__(self, variables=None):
super(PackageRepositoryResource, self).__init__(variables)
self._repository = None
@cached_property
def uri(self):
return self._uri()
@property
def location(self):
return self.get("location")
@property
def name(self):
return self.get("name")
def _uri(self):
"""Return a URI.
Implement this function to return a short, readable string that
uniquely identifies this resource.
"""
raise NotImplementedError
class PackageFamilyResource(PackageRepositoryResource):
"""A package family.
A repository implementation's package family resource(s) must derive from
this class. It must satisfy the schema `package_family_schema`.
"""
pass
class PackageResource(PackageRepositoryResource):
"""A package.
A repository implementation's package resource(s) must derive from this
class. It must satisfy the schema `package_schema`.
"""
@cached_property
def version(self):
ver_str = self.get("version", "")
return Version(ver_str)
class VariantResource(PackageResource):
"""A package variant.
A repository implementation's variant resource(s) must derive from this
class. It must satisfy the schema `variant_schema`.
Even packages that do not have a 'variants' section contain a variant - in
this case it is the 'None' variant (the value of `index` is None). This
provides some internal consistency and simplifies the implementation.
"""
@property
def index(self):
return self.get("index", None)
@cached_property
def root(self):
"""Return the 'root' path of the variant."""
return self._root()
@cached_property
def subpath(self):
"""Return the variant's 'subpath'
The subpath is the relative path the variant's payload should be stored
under, relative to the package base. If None, implies that the variant
root matches the package base.
"""
return self._subpath()
def _root(self):
raise NotImplementedError
def _subpath(self):
raise NotImplementedError
#------------------------------------------------------------------------------
# resource helper classes
#
# Package repository plugins are not required to use the following classes, but
# they may help minimise the amount of code you need to write.
#------------------------------------------------------------------------------
class PackageResourceHelper(PackageResource):
"""PackageResource with some common functionality included.
"""
variant_key = None
@cached_property
def commands(self):
return self._convert_to_rex(self._commands)
@cached_property
def pre_commands(self):
return self._convert_to_rex(self._pre_commands)
@cached_property
def post_commands(self):
return self._convert_to_rex(self._post_commands)
def iter_variants(self):
num_variants = len(self._data.get("variants", []))
if num_variants == 0:
indexes = [None]
else:
indexes = range(num_variants)
for index in indexes:
variant = self._repository.get_resource(
self.variant_key,
location=self.location,
name=self.name,
version=self.get("version"),
index=index)
yield variant
def _convert_to_rex(self, commands):
if isinstance(commands, list):
from rez.utils.backcompat import convert_old_commands
msg = "package %r is using old-style commands." % self.uri
if config.disable_rez_1_compatibility or config.error_old_commands:
raise SchemaError(None, msg)
elif config.warn("old_commands"):
print_warning(msg)
commands = convert_old_commands(commands)
if isinstance(commands, basestring):
return SourceCode(commands)
elif callable(commands):
return SourceCode.from_function(commands)
else:
return commands
class VariantResourceHelper(VariantResource):
"""Helper class for implementing variants that inherit properties from their
parent package.
Since a variant overlaps so much with a package, here we use the forwarding
metaclass to forward our parent package's attributes onto ourself (with some
exceptions - eg 'variants', 'requires'). This is a common enough pattern
that it's supplied here for other repository plugins to use.
"""
class _Metas(AttributeForwardMeta, LazyAttributeMeta): pass
__metaclass__ = _Metas
# Note: lazy key validation doesn't happen in this class, it just fowards on
# attributes from the package. But LazyAttributeMeta does still use this
# schema to create other class attributes, such as `validate_data`.
schema = variant_schema
# forward Package attributes onto ourself
keys = schema_keys(package_schema) - set(["requires", "variants"])
def _uri(self):
index = self.index
idxstr = '' if index is None else str(index)
return "%s[%s]" % (self.parent.uri, idxstr)
def _subpath(self):
if self.index is None:
return None
else:
try:
reqs = self.parent.variants[self.index]
except IndexError:
raise ResourceError(
"Unexpected error - variant %s cannot be found in its "
"parent package %s" % (self.uri, self.parent.uri))
dirs = [x.safe_str() for x in reqs]
subpath = os.path.join(*dirs)
return subpath
def _root(self):
if self.base is None:
return None
elif self.index is None:
return self.base
else:
root = os.path.join(self.base, self.subpath)
return root
@cached_property
def requires(self):
reqs = self.parent.requires or []
index = self.index
if index is not None:
reqs = reqs + (self.parent.variants[index] or [])
return reqs
@property
def wrapped(self): # forward Package attributes onto ourself
return self.parent
def _load(self):
# doesn't have its own data, forwards on from parent instead
return None
| gpl-3.0 | -2,584,266,316,096,890,000 | 31.600509 | 87 | 0.570559 | false |
Azure/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2019_06_01/aio/_configuration.py | 1 | 3182 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class PolicyClientConfiguration(Configuration):
"""Configuration for PolicyClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(PolicyClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2019-06-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| mit | -3,418,886,602,235,532,300 | 46.492537 | 134 | 0.678504 | false |
pdear/verilib | pytools/vlparse/types/primary.py | 1 | 5043 | #
# primary.py - Primary verilog syntax tree types
#
# Verilib - A Verilog HDL development framework
# Copyright (c) 2014, Patrick Dear, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
#
import re
from ..tokens import Tokens
from ..errors import print_error, VlSyntaxError
class SyntaxNode:
""" A generic syntactical node in our parse tree """
def __init__(self):
self.children = []
@staticmethod
def parse(self, tkns):
raise NotImplementedError()
def to_vl_string(self):
""" Transform back into a verilog string """
raise NotImplementedError("to_vl_string not implemented for type " +
str(self.__class__))
class NetType(SyntaxNode):
def __init__(self, nettype=""):
SyntaxNode.__init__(self)
self.nettype = nettype
@staticmethod
def parse(tkns):
""" Parse a net type. """
nettypes = (Tokens.KW_WIRE, Tokens.KW_SUPPLY0, Tokens.KW_SUPPLY1,
Tokens.KW_TRI, Tokens.KW_TRIAND, Tokens.KW_TRIOR, Tokens.KW_TRI0,
Tokens.KW_TRI1, Tokens.KW_WAND, Tokens.KW_WOR)
for t in nettypes:
if tkns.accept(t):
return NetType(t.name)
return None
class Number(SyntaxNode):
""" Represents any kind of verilog number """
def __init__(self, numtype=int, width=32, value=0, base=""):
SyntaxNode.__init__(self)
self.numtype = numtype
self.width = width
self.value = value
self.base = ""
def to_vl_string(self):
""" Convert back into verilog literal """
if self.numtype != int:
raise Exception("Implement me!")
if self.base == "":
return str(self.value)
raise Exception("Implement me!")
@staticmethod
def parse(tkns):
""" Parse an immediate number """
t = tkns.current()
if tkns.accept(Tokens.INTEGER):
return Number(value=int(t.text))
elif tkns.accept(Tokens.DECIMAL_INTEGER):
raise Exception("Implement me!")
elif tkns.accept(Tokens.BINARY_INTEGER):
raise Exception("Implement me!")
elif tkns.accept(Tokens.OCTAL_INTEGER):
raise Exception("Implement me!")
elif tkns.accept(Tokens.HEX_INTEGER):
raise Exception("Implement me!")
elif tkns.accept(Tokens.REAL):
raise Exception("Implement me!")
else:
return None
class Identifier(SyntaxNode):
""" Represents any kind of idendifier """
def __init__(self, identifier):
SyntaxNode.__init__(self)
self.identifier = identifier
def to_vl_string(self):
return self.identifier
@staticmethod
def parse(tkns):
""" Super easy """
t = tkns.current().text
tkns.expect(Tokens.IDENTIFIER)
return Identifier(t)
class UnaryOperator(SyntaxNode):
def __init__(self, op, child=None):
SyntaxNode__init__(self)
self.op = op
self.children = [child] if child != None else []
@staticmethod
def parse(tkns):
""" Parse any sort of unary operator """
unary_ops = (Tokens.OP_PLUS, Tokens.OP_MINUS, Tokens.OP_BANG,
Tokens.OP_TILDE, Tokens.OP_TILDEAND, Tokens.OP_AND,
Tokens.OP_TILDEBAR, Tokens.OP_BAR, Tokens.OP_HAT,
Tokens.OP_TILDEHAT, Tokens.OP_HATTILDE)
for op in unary_ops:
if tkns.accept(op):
return UnaryOperator(op.name)
return None
class BinaryOperator(SyntaxNode):
def __init__(self, op, children=[]):
SyntaxNode.__init__(self)
self.op = op
self.children = children
@staticmethod
def parse(tkns):
""" Parse a binary operator """
binary_ops = (Tokens.OP_PLUS, Tokens.OP_MINUS, Tokens.OP_STAR,
Tokens.OP_SLASH, Tokens.OP_PERCENT, Tokens.OP_EQEQ,
Tokens.OP_NEQ, Tokens.OP_EQEQEQ, Tokens.OP_NEQEQ,
Tokens.OP_ANDAND, Tokens.OP_BARBAR, Tokens.OP_STARSTAR,
Tokens.OP_LT, Tokens.OP_LEQ, Tokens.OP_GT, Tokens.OP_GEQ,
Tokens.OP_AND, Tokens.OP_BAR, Tokens.OP_HAT, Tokens.OP_TILDEHAT,
Tokens.OP_HATTILDE, Tokens.OP_GTGT, Tokens.OP_LTLT,
Tokens.OP_GTGTGT, Tokens.OP_LTLTLT)
for op in binary_ops:
if tkns.accept(op):
m = re.match(r"'?([^']+)'?", op.name)
return BinaryOperator(m.group(1))
return None
| lgpl-3.0 | -3,968,423,585,559,523,000 | 32.845638 | 77 | 0.611541 | false |
felipet/fca3103_pytool | fca3103_tool.py | 1 | 5035 | #! /usr/bin/env python3
# -*- coding: utf-8 -*
'''
Terminal tool to make Time Interval measures using the Tektronix FCA3103
@file
@date Created on Sep. 16, 2015
@author Felipe Torres (torresfelipex1<AT>gmail.com)
@copyright LGPL v2.1
'''
# ----------------------------------------------------------------------------|
# GNU LESSER GENERAL PUBLIC LICENSE |
# ------------------------------------ |
# This source file is free software; you can redistribute it and/or modify it |
# under the terms of the GNU Lesser General Public License as published by the|
# Free Software Foundation; either version 2.1 of the License, or (at your |
# option) any later version. This source is distributed in the hope that it |
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warrant |
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser |
# General Public License for more details. You should have received a copy of |
# the GNU Lesser General Public License along with this source; if not, |
# download it from http://www.gnu.org/licenses/lgpl-2.1.html |
# ----------------------------------------------------------------------------|
# -----------------------------------------------------------------------------
# Import --
# -----------------------------------------------------------------------------
import datetime
import argparse as arg
from subprocess import check_output
from FCA3103 import FCA3103
def main() :
'''
Tool for automatize the control of Tektronix FCA3103 Timer/Counter
'''
parser = arg.ArgumentParser(description='Tektronix FCA3103 tool')
parser.add_argument('--function', '-f', help='Measuring Function', choices=['mtint','tint'],\
required=True)
parser.add_argument('--interval', '-t', help='Time between samples', type=int)
parser.add_argument('--samples', '-s', help='Number of samples', type=int, \
default=1)
parser.add_argument('--debug', '-d', help="Enable debug output", action="store_true", \
default=False)
parser.add_argument('--device', '-l', help="Device port", type=int, default=1)
parser.add_argument('--output', '-o', help='Output data file', type=str)
parser.add_argument('--ref', '-r', help='Input channel for the reference',type=int, \
choices=[1,2],default=1)
parser.add_argument('--trigl','-g',help='Input trigger level', type=float, \
default=1.5)
parser.add_argument('--skip','-i',help='Ignore values far from mean plus error',type=int, \
default=0)
parser.add_argument('--tstamp','-x', help='Add timestamping for each measure',action="store_true", \
default=False)
args = parser.parse_args()
valid_port = False
ports = check_output(["""ls /dev | grep usbtmc"""],shell=True)[:-1]
for p in ports.splitlines():
p = p.decode('utf-8')
if int(p[-1]) == args.device:
valid_port = True
if not valid_port:
print("No device found at /dev/usbtmc%d" % (args.device))
exit(6) # No such device or address
device = FCA3103(args.device, args.ref, 2 if args.ref == 1 else 1)
device.show_dbg = args.debug
device.t_samples = args.interval
device.n_samples = args.samples
device.skip_values = True if args.skip > 0 else False
device.error = args.skip
# TODO: Add de posibility of using different trigger values for the inputs
device.trig_level[0] = device.trig_level[1] = args.trigl
# try:
if args.function == 'mtint':
print("Measuring Mean Time Interval between the inputs (%d secs)..." % (args.samples))
mean = device.mean_time_interval(args.samples, args.interval)
print("Mean Time Interval for %d samples: %g" % (args.samples, mean))
elif args.function == 'tint':
print("Measuring Time Interval between the inputs (%d secs)..." % (args.samples+10))
values = device.time_interval(args.samples, tstamp=args.tstamp)
if args.output:
with open(args.output,'a+') as file:
file.write("# Time Interval Measurement (%d samples) with Tektronix FCA3103 (50ps)\n" % args.samples)
file.write("# %s\n" % datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
for v in values:
if args.tstamp:
file.write("%g\t%g\n" % (v[0], v[1]))
else:
file.write(str(v))
file.write("\n")
print("Output writed to '%s'" % (args.output))
else:
print("Time Interval Measurement (%d samples) with Tektronix FCA3103 (50ps)" % args.samples)
print("%s\n" % datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
for v in values:
print(v)
# except Exception as e:
# print(e)
if __name__ == "__main__" :
main()
| gpl-2.0 | -1,453,097,992,371,775,000 | 44.772727 | 117 | 0.562066 | false |
andyr0id/PyGFNN | pygfnn/structure/connections/gfnn.py | 1 | 6302 | __author__ = 'Andrew J. Lambert, [email protected]'
__author__ = 'Thomas Rueckstiess, [email protected]'
from scipy import reshape, dot, outer, eye
from pybrain.structure.connections import FullConnection, FullNotSelfConnection
from pybrain.structure.connections.connection import Connection
from pybrain.structure.parametercontainer import ParameterContainer
from pygfnn.tools.gfnn import spontAmp
from pygfnn.structure.modules.gfnn import GFNNLayer
import numpy as np
class GFNNExtConnection(Connection):
def __init__(self, inmod, outmod, **kwargs):
# 1st inputs are for external connection
kwargs['outSliceTo'] = outmod.dim
Connection.__init__(self, inmod, outmod, **kwargs)
def _forwardImplementation(self, inbuf, outbuf):
n = self.outmod
outbuf += np.sum(inbuf)
def _backwardImplementation(self, outerr, inerr, inbuf):
#CHECKME: not setting derivatives -- this means the multiplicative weight is never updated!
inerr += 0
class GFNNIntConnection(Connection):
""""""
learnParams = None
type = None
f = None
learn = False
w = 0.05
l = 0
m1 = -1
m2 = -50
e = 4
roote = 2
k = 1
c0 = None
c = None
mask = 1
kSteps = []
def __init__(self, inmod, outmod, learnParams=None, c0=None, **kwargs):
# 2nd half for int connections
kwargs['outSliceFrom'] = outmod.dim
kwargs['outSliceTo'] = outmod.dim*2
Connection.__init__(self, inmod, outmod, **kwargs)
if isinstance(outmod, GFNNLayer):
outmod.conns.append(self)
if learnParams is None:
learnParams = {'learn': True, 'w': 0.05, 'l': 0, 'm1': -1, 'm2': -50, 'e': 4, 'k': 1 } # Critical learning rule
if 'type' not in learnParams:
learnParams['type'] = 'allfreq'
self.setArgs(learnParams=learnParams, c0=c0)
self.setLearnParams(learnParams)
if c0 is not None:
self.c0 = np.zeros((self.outdim, self.indim), np.complex64)
self.c0[:] = c0
self.c = np.zeros((self.outdim, self.indim), np.complex64)
if inmod == outmod:
# Don't learn self-connections
# This could be inverted guassian too
self.mask = 1-eye(self.outdim, self.indim)
# ParameterContainer.__init__(self, self.indim*self.outdim)
self.k *= self.mask
self.kSteps = np.zeros((4, self.outdim, self.indim), np.complex64)
self.randomize()
self.reset()
def getFreqSpace(self):
n1 = self.inmod
n2 = self.outmod
f1 = np.tile(n1.f, (n2.outdim, 1))
f2 = np.tile(n2.f, (n1.outdim, 1)).T
if self.type == 'allfreq':
# Full series of resonant monomials
return 2 * f1 * f2 / (f1 + f2)
return 1.
def setLearnParams(self, learnParams):
n1 = self.inmod
n2 = self.outmod
self.type = learnParams['type']
f = self.getFreqSpace()
if n2.fspac == 'log':
self.f = f
self.w = learnParams['w'] * n2.f
if learnParams['learn']:
self.l = learnParams['l']
self.m1 = learnParams['m1']
self.m2 = learnParams['m2']
self.k = learnParams['k']
self.l *= f
self.m1 *= f
self.m2 *= f
self.k *= f
else:
self.f = np.ones(np.size(f))
self.w = learnParams['w']
if learnParams['learn']:
self.l = learnParams['l']
self.m1 = learnParams['m1']
self.m2 = learnParams['m2']
self.k = learnParams['k']
self.learn = learnParams['learn']
if learnParams['learn']:
self.e = np.complex64(learnParams['e'])
self.roote = np.sqrt(self.e)
def updateLearnParams(self):
n2 = self.outmod
p = self.learnParams
if n2.fspac == 'log':
f = self.getFreqSpace()
self.f[:] = f
self.w[:] = p['w'] * n2.f
self.l[:] = p['l'] * f
self.m1[:] = p['m1'] * f
self.m2[:] = p['m2'] * f
self.k[:] = p['k'] * f
def randomize(self):
# FullNotSelfConnection.randomize(self)
size = np.size(self.f)
# self._params[:] = np.ones(size)*self.stdParams
if self.c0 is None:
a0 = np.zeros(size)
a = spontAmp(np.real(self.l[0,0]), np.real(self.m1[0,0]), np.real(self.m2[0,0]), self.e)
a0 += np.min(a)
a0 = a0 * (1 + .01 * np.random.randn(size))
self.c0 = np.complex64(reshape(a0, (self.outdim, self.indim)))
self.c0[:] = self._randomizePhase(self.c0)
self.c0 *= self.mask
def reset(self, randomizePhase=True):
self.c[:] = self.c0
if randomizePhase:
self.c[:] = self._randomizePhase(self.c)
def _randomizePhase(self, c):
theta0 = np.random.randn(self.outdim * self.indim)
theta0 = np.exp(1j * 2 * np.pi * theta0)
return c * reshape(theta0, (self.outdim, self.indim))
def _forwardImplementation(self, inbuf, outbuf):
outbuf += inbuf
def _backwardImplementation(self, outerr, inerr, inbuf):
#CHECKME: not setting derivatives -- this means the multiplicative weight is never updated!
inerr += 0
if __name__ == "__main__":
# from pybrain.tests import runModuleTestSuite
# import pygfnn.tests.unittests.structure.connections.test_gfnn_connections as test
# runModuleTestSuite(test)
from pybrain.structure.networks.recurrent import RecurrentNetwork
from pybrain import LinearLayer, FullConnection, FullNotSelfConnection, IdentityConnection
from pygfnn import GFNNLayer, RealIdentityConnection, RealMeanFieldConnection
N = RecurrentNetwork('simpleGFNN')
i = LinearLayer(1, name = 'i')
h = GFNNLayer(200, name = 'gfnn')
o = LinearLayer(200, name = 'o')
N.addOutputModule(o)
N.addInputModule(i)
N.addModule(h)
N.addConnection(GFNNExtConnection(i, h, name = 'f1'))
N.addRecurrentConnection(GFNNIntConnection(h, h, name = 'r1'))
N.addConnection(RealIdentityConnection(h, o, name = 'i1'))
N.sortModules() | gpl-2.0 | 4,527,439,174,360,566,300 | 32.349206 | 123 | 0.577594 | false |
qunying/gps | share/support/core/gcov.py | 1 | 7246 | """ Provides the "Tools/Gcov/Compute coverage files" and "Remove coverage
files" menus, which executes gcov automatically.
This script will also perform checks along the way to guide through the
procedure of obtaining gcov info.
The output of the gcov process is displayed in a separate console.
At the end of the processing, the open editors are decorated with coverage
information.
Note that GPS calls gcov so that the .gcov files are generated
- in the directory pointed to by the "GCOV_ROOT" environment variable, or
- in the object directory of the root project, if this variable is not set
"""
###########################################################################
# No user customization below this line
###########################################################################
import GPS
import os
import re
from gps_utils import interactive
from GPS import MDI, Project, Process, CodeAnalysis
# A class to display the output of gcov in a separate console.
class Gcov_Process (GPS.Console, GPS.Process):
def on_output(self, unmatched, matched):
self.write(unmatched + matched)
def on_exit(self, status, remaining_output):
self.write(remaining_output)
if status == 0:
self.write("process terminated successfully")
else:
self.write("process terminated [" + str(status) + "]")
# Show coverage report
analysis = CodeAnalysis.get("Coverage")
if GPS.Project.root().is_harness_project():
original = GPS.Project.root().original_project().file()
analysis.add_gcov_project_info(original)
else:
analysis.add_all_gcov_project_info()
analysis.show_analysis_report()
self.kill()
def on_input(self, input):
self.send(input)
def on_destroy(self):
self.kill()
def __init__(self, process, args="", directory=""):
GPS.Console.__init__(self, "Executing gcov",
on_input=Gcov_Process.on_input,
on_destroy=Gcov_Process.on_destroy,
force=True)
GPS.Process.__init__(self, process + ' ' + args, ".+",
remote_server="Build_Server",
directory=directory,
on_exit=Gcov_Process.on_exit,
on_match=Gcov_Process.on_output)
def using_gcov(context):
return GPS.Preference('Coverage-Toolchain').get() == 'Gcov'
@interactive(name='gcov compute coverage files',
filter=using_gcov)
def run_gcov():
"Run gcov to generate the coverage files"
# Verify that the version of gcov is recent enough to support response
# files and reading of .gc?? data in multiple directories.
try:
p = Process("gcov --version")
out = p.get_result()
p = re.compile("[1-9][0-9][0-9][0-9][0-1][0-9][0-3][0-9]")
found = p.findall(out)
if not found:
MDI.dialog("Could not find a date in the output of gcov.")
else:
date = found[0]
if date < 20071005:
MDI.dialog("Your version of gcov is dated " + str(date) +
".\nThis plugin requires gcov for GNAT dated " +
"20071005 or later.")
return
except:
MDI.dialog("""Could not read gcov version number.
Make sure you are using gcov for GNAT dated 20071005 or later.""")
# Determine the root project
root_project = Project.root()
# Determine where to create the gcov info
GCOV_ROOT = os.getenv("GCOV_ROOT")
if not GCOV_ROOT:
root_object_dirs = root_project.object_dirs(False)
if not root_object_dirs:
MDI.dialog("""The root project does not have an object directory.
Please add one, or set the enviroment variable GCOV_ROOT to
the directory where you would like the gcov files to be
generated.""")
return
else:
gcov_dir = root_object_dirs[0]
else:
gcov_dir = GCOV_ROOT
if not os.access(gcov_dir, os.R_OK and os.W_OK):
MDI.dialog(""" Could not access the directory:
""" + gcov_dir + """
Please point the environment variable GCOV_ROOT to a directory
on which you have permission to read and write.
""")
input_file = os.path.abspath(os.path.join(gcov_dir, "gcov_input.txt"))
# List all the projects
projects = root_project.dependencies(True)
# List all object dirs
object_dirs = root_project.object_dirs(True)
# Write the response file
res = file(input_file, 'wb')
gcda_file_found = False
gcno_file_found = False
for p in projects:
sources = p.sources(False)
for s in sources:
n = s.path
basename = n[max(n.rfind('\\'), n.rfind('/')) + 1:len(n)]
unit = basename[0:basename.rfind('.')]
for object_dir in object_dirs:
gcda = object_dir + os.sep + unit + ".gcda"
# If we have not yet found at least one .gcno file, attempt to
# find one. This is to improve the precision of error messages,
# and detect the case where compilation was successful but the
# executable has never been run.
if not gcno_file_found:
gcno = object_dir + os.sep + unit + ".gcno"
if os.access(gcno, os.F_OK):
gcno_file_found = True
if os.access(gcda, os.F_OK):
gcda_file_found = True
# Write one entry in response file
# Escape all backslashes.
gcda = gcda.replace('\\', '\\\\')
res.write('"' + gcda + '"' + "\n")
break
res.close()
file(input_file).read()
if not gcno_file_found:
# No gcno file was found: display an appropriate message.
MDI.dialog(""" No ".gcno" file was found in any of the object directories.
Make sure you have compiled the sources of interest with
the "Code coverage" flags.""")
else:
if not gcda_file_found:
# Some gcno files were found, but no gcna files.
MDI.dialog(""" No ".gcda" file was found in any of the object directories.
Make sure you have run the executable(s) at least once.
""")
else:
# Run gcov
Gcov_Process("gcov", "@%s" % input_file, directory=gcov_dir)
@interactive(name='gcov remove coverage files',
filter=using_gcov)
def remove_gcov():
"Cleanup the gcov coverage files"
if not MDI.yes_no_dialog(
"This will remove all .gcov and .gcda files, are you sure ?"):
return
# Look in all the projects
for p in Project.root().dependencies(True):
object_dirs = p.object_dirs(False)
if len(object_dirs) > 0:
object_dir = object_dirs[0]
# Browse in the object dirs
for f in os.listdir(object_dir):
# if f is a .gcda or a .gcov, remove it
if f.find(".gcda") != -1 or f.find(".gcov") != -1:
os.remove(object_dir + os.sep + f)
| gpl-3.0 | -8,241,168,727,739,678,000 | 31.78733 | 86 | 0.569418 | false |
danpozmanter/jawaf | conftest.py | 1 | 4037 | """ Setup temporary projects for py.test, cleanup after tests are done."""
import pytest
import os
from pip._internal import main as pip_main
import shutil
from sqlalchemy import create_engine
import sys
import testing.postgresql
from tests import templates
sys.path.insert(0, os.path.abspath('jawaf'))
@pytest.fixture(scope='session')
def test_project():
"""Setup a test project, test app, and test app package.
Load settings and create tables.
Cleans up when test session ends.
"""
# Setup test package:
pip_main(['install', 'tests/example_package/'])
# Create a test project
test_dir = 'temp_test'
test_project = 'test_project'
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.mkdir(test_dir)
from jawaf.management.commands import start_project, start_app
start_project.Command().handle(
name=test_project, directory=os.path.abspath(test_dir)
)
os.mkdir(os.path.join(os.path.abspath(test_dir), 'static'))
start_app.Command().handle(
name='test_app',
directory=os.path.abspath(os.path.join(test_dir, test_project))
)
# Create the code for the test project
templates.write_template(
'project_routes',
os.path.abspath(
os.path.join(test_dir, test_project, test_project, 'routes.py'))
)
templates.write_template(
'app_routes',
os.path.abspath(
os.path.join(test_dir, test_project, 'test_app', 'routes.py'))
)
templates.write_template(
'app_views',
os.path.abspath(
os.path.join(test_dir, test_project, 'test_app', 'views.py'))
)
templates.write_template(
'app_tables',
os.path.abspath(
os.path.join(test_dir, test_project, 'test_app', 'tables.py'))
)
templates.edit_settings(
os.path.abspath(
os.path.join(test_dir, test_project, test_project, 'settings.py')
),
targets=[
[
"'jawaf.auth',",
"'jawaf.auth',\n 'test_app',\n 'jawaf_example_app',"
],
['HOST', "SMTP = {'host':'localhost', 'port':8024}\n\nHOST"],
["# STATIC", 'STATIC'],
])
# Setup test postgresql
postgresql = testing.postgresql.Postgresql()
create_engine(postgresql.url())
# Hot Patch
import smtplibaio
from jawaf.utils.testing import MockSMTP
smtplibaio.SMTP = MockSMTP
smtplibaio.SMTP_SSL = MockSMTP
# Setup Settings and reload modules to ensure project settings are loaded.
os.environ.setdefault(
'JAWAF_SETTINGS_PATH',
f'{test_dir}/{test_project}/{test_project}/settings.py'
)
sys.path.insert(0, os.path.abspath(test_dir))
from imp import reload
from jawaf import conf, db, management, security, server, utils
import jawaf.auth
import jawaf.auth.users
import jawaf.auth.utils
import jawaf.admin
import jawaf.admin.utils
reload(conf)
reload(db)
reload(management)
reload(security)
reload(server)
reload(utils)
reload(jawaf.auth.users)
reload(jawaf.auth.utils)
reload(jawaf.admin)
reload(jawaf.admin.utils)
from jawaf.conf import settings
p_dsn = postgresql.dsn()
settings['DATABASES']['default'] = {'engine': 'postgresql', 'password': ''}
for key in ('database', 'host', 'port', 'user'):
settings['DATABASES']['default'][key] = p_dsn[key]
settings['SESSION'] = {'interface': 'memory'}
# Create tables for auth and the example app
from jawaf.db import create_tables
create_tables(['jawaf.auth'], warn=False)
create_tables(['jawaf.admin'], warn=False)
create_tables(['jawaf_example_app'], warn=False)
yield True
# Clean up
postgresql.stop()
shutil.rmtree(test_dir)
pip_main(['uninstall', 'jawaf_example_app', '-y'])
@pytest.fixture(scope='session')
def waf():
"""Create a Jawaf instance for test session."""
import jawaf.server
return jawaf.server.Jawaf(testing=True)
| bsd-3-clause | -4,507,208,642,298,035,700 | 32.090164 | 79 | 0.631905 | false |
inkasjasonk/rs | research/base/views.py | 1 | 2923 | from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from session_csrf import anonymous_csrf
from django.db.models import get_app, get_models, get_model
from models import *
from forms import *
app = get_app('base')
model_list = get_models(app)
@anonymous_csrf
@login_required(login_url='/accounts/login/')
def home(request):
profiles = SafeProfile.objects.all().values_list('name','price', 'manufacturer')
latest_safes = SafeProfile.objects.all().order_by('date_added')[0:5]
latest_comps = SafeComponentProfile.objects.all().order_by('date_added')[0:5]
gsform = GraphSafesForm()
gscform = GraphSafeComponentForm()
return render_to_response('base/home.html', {'model_list' : model_list, 'profiles' : profiles, 'latest_safes' : latest_safes, 'latest_comps' : latest_comps, 'gsform' : gsform, 'gscform' : gscform,},
context_instance=RequestContext(request))
@anonymous_csrf
def graph_safes(request):
profiles = SafeProfile.objects.all().values_list('name','price')
latest_safes = SafeProfile.objects.all().order_by('date_added')[0:5]
latest_comps = SafeComponentProfile.objects.all().order_by('date_added')[0:5]
if request.method == 'POST': # If the form has been submitted...
gsform = GraphSafesForm(request.POST) # A form bound to the POST data
if not gsform.is_valid():
gsform = GraphSafesForm()
else: gsform = GraphSafesForm()
return render_to_response('base/graphs.html', {'model_list' : model_list, 'profiles' : profiles, 'latest_safes' : latest_safes, 'latest_comps' : latest_comps, 'gsform' : gsform,},
context_instance=RequestContext(request))
@anonymous_csrf
def graph_component(request):
profiles = SafeProfile.objects.all().values_list('name','price')
latest_safes = SafeProfile.objects.all().order_by('date_added')[0:5]
latest_comps = SafeComponentProfile.objects.all().order_by('date_added')[0:5]
if request.method == 'POST': # If the form has been submitted...
gscform = GraphSafeComponentForm(request.POST) # A form bound to the POST data
if not gscform.is_valid():
gscform = GraphSafesForm() # An unbound form
gscform = GraphSafeComponentForm() # An unbound form
return render_to_response('base/graphs.html', {'model_list' : model_list, 'profiles' : profiles, 'latest_safes' : latest_safes, 'latest_comps' : latest_comps, 'gscform' : gscform,},
context_instance=RequestContext(request))
@anonymous_csrf
@login_required(login_url='/accounts/login/')
def raw(request, slug):
raw_model = get_model('base', slug)
raw_data = raw_model.objects.all()
return render_to_response('base/raw.html', {'model_list' : model_list, 'raw_data' : raw_data},
context_instance=RequestContext(request))
| bsd-3-clause | 6,649,872,892,987,389,000 | 49.396552 | 202 | 0.702703 | false |
cproctor/hex | server/hexserver/hexserver/models/spell.py | 1 | 3379 | from db import db_connection
from user import _user_name_exists, _authenticate_user
import json
import time
import logging
log = logging.getLogger(__name__)
def get_spells(request):
conn = db_connection(request)
cursor = conn.cursor()
cursor.execute("SELECT * FROM spells;")
result = cursor.fetchall()
conn.close()
return result
def get_current_spells(request):
conn = db_connection(request)
cursor = conn.cursor()
currentSpells = _get_current_spells(cursor)
conn.close()
return currentSpells
def get_spell_by_time(request, castTime):
conn = db_connection(request)
cursor = conn.cursor()
spell = _get_spell_by_time(cursor, castTime)
conn.close()
return spell
def _get_spell_by_time(cursor, castTime):
cursor.execute("SELECT * FROM spells WHERE cast_time = ?", (castTime,))
return cursor.fetchone()
def _get_current_spells(cursor):
cursor.execute("SELECT * FROM spells WHERE complete = 0 ORDER BY cast_time")
current = cursor.fetchone()
upcoming = cursor.fetchall()
return {
"current": current,
"upcoming":upcoming
}
def create_spell(request, params):
conn = db_connection(request)
cursor = conn.cursor()
spellTime = int(time.time())
# We use spellTime as a primary key. So if we should happen to get two spells
# at the same second, pretend like the second came a second later.
while _get_spell_by_time(cursor, spellTime):
spellTime += 1
try:
assert(_authenticate_user(params['user_name'], params['spirit_animal'],
cursor))
assert(isinstance(params['name'], basestring))
assert(params['name'] != '')
assert(params['setup'] or params['loop'])
for component in ['setup', 'loop']:
if params[component]:
for frame in params[component]:
try:
assert(validate_frame(frame))
except:
log.debug(frame)
raise AssertionError()
except IOError():
return False
setup = json.dumps(params['setup']) if params['setup'] else ''
loop = json.dumps(params['loop']) if params['loop'] else ''
cursor.execute('INSERT INTO spells VALUES (?,?,?,?,?,?,?)', (
params['user_name'],
params['name'],
3,
spellTime,
setup,
loop,
0
))
conn.commit()
newSpell = _get_spell_by_time(cursor, spellTime)
conn.close()
return newSpell
def mark_spell_complete(request, castTime):
conn = db_connection(request)
cursor = conn.cursor()
result = _mark_spell_complete(cursor, castTime)
conn.commit()
conn.close()
return result
def _mark_spell_complete(cursor, castTime):
cursor.execute("UPDATE spells SET complete = ? WHERE cast_time = ?", (1, castTime))
return cursor.fetchone()
def validate_frame(frame):
try:
assert isinstance(frame, list)
for layer in frame:
assert isinstance(layer, list)
assert len(layer) == 2
colors, bulbs = layer
assert len(colors) == 4
for color in colors:
assert isinstance(color, int)
for bulb in bulbs:
assert isinstance(bulb, int)
except:
return False
return True
| mit | -1,238,376,574,511,584,300 | 29.441441 | 87 | 0.600474 | false |
great-expectations/great_expectations | tests/datasource/test_sqlalchemy_datasource.py | 1 | 10417 | import os
from unittest import mock
import pandas as pd
import pytest
from ruamel.yaml import YAML
import great_expectations.dataset.sqlalchemy_dataset
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.dataset import SqlAlchemyDataset
from great_expectations.datasource import SqlAlchemyDatasource
from great_expectations.validator.validator import BridgeValidator, Validator
try:
sqlalchemy = pytest.importorskip("sqlalchemy")
except ImportError:
sqlalchemy = None
yaml = YAML()
def test_sqlalchemy_datasource_custom_data_asset(
data_context_parameterized_expectation_suite, test_db_connection_string
):
name = "test_sqlalchemy_datasource"
class_name = "SqlAlchemyDatasource"
data_asset_type_config = {
"module_name": "custom_sqlalchemy_dataset",
"class_name": "CustomSqlAlchemyDataset",
}
data_context_parameterized_expectation_suite.add_datasource(
name,
class_name=class_name,
credentials={"connection_string": test_db_connection_string},
data_asset_type=data_asset_type_config,
batch_kwargs_generators={
"default": {"class_name": "TableBatchKwargsGenerator"}
},
)
# We should now see updated configs
with open(
os.path.join(
data_context_parameterized_expectation_suite.root_directory,
"great_expectations.yml",
),
) as data_context_config_file:
data_context_file_config = yaml.load(data_context_config_file)
assert (
data_context_file_config["datasources"][name]["data_asset_type"]["module_name"]
== "custom_sqlalchemy_dataset"
)
assert (
data_context_file_config["datasources"][name]["data_asset_type"]["class_name"]
== "CustomSqlAlchemyDataset"
)
# We should be able to get a dataset of the correct type from the datasource.
data_context_parameterized_expectation_suite.create_expectation_suite("table_1.boo")
batch = data_context_parameterized_expectation_suite.get_batch(
data_context_parameterized_expectation_suite.build_batch_kwargs(
"test_sqlalchemy_datasource", "default", "table_1"
),
"table_1.boo",
)
assert type(batch).__name__ == "CustomSqlAlchemyDataset"
res = batch.expect_column_func_value_to_be("col_1", 1)
assert res.success is True
def test_standalone_sqlalchemy_datasource(test_db_connection_string, sa):
datasource = SqlAlchemyDatasource(
"SqlAlchemy",
connection_string=test_db_connection_string,
echo=False,
batch_kwargs_generators={
"default": {"class_name": "TableBatchKwargsGenerator"}
},
)
assert set(datasource.get_available_data_asset_names()["default"]["names"]) == {
("main.table_1", "table"),
("main.table_2", "table"),
}
batch_kwargs = datasource.build_batch_kwargs("default", "main.table_1")
batch = datasource.get_batch(batch_kwargs=batch_kwargs)
assert isinstance(batch, Batch)
batch_data = batch.data
assert isinstance(
batch_data,
great_expectations.dataset.sqlalchemy_dataset.SqlAlchemyBatchReference,
)
dataset = SqlAlchemyDataset(**batch.data.get_init_kwargs())
assert len(dataset.head(10)) == 5
def test_create_sqlalchemy_datasource(data_context_parameterized_expectation_suite, sa):
name = "test_sqlalchemy_datasource"
# type_ = "sqlalchemy"
class_name = "SqlAlchemyDatasource"
# Use sqlite so we don't require postgres for this test.
connection_kwargs = {"credentials": {"drivername": "sqlite"}}
# It should be possible to create a sqlalchemy source using these params without
# saving substitution variables
data_context_parameterized_expectation_suite.add_datasource(
name, class_name=class_name, **connection_kwargs
)
data_context_config = data_context_parameterized_expectation_suite.get_config()
assert name in data_context_config["datasources"]
assert data_context_config["datasources"][name]["class_name"] == class_name
# We should be able to get it in this session even without saving the config
source = data_context_parameterized_expectation_suite.get_datasource(name)
assert isinstance(source, SqlAlchemyDatasource)
var_name = "test_sqlalchemy_datasource"
data_context_parameterized_expectation_suite.save_config_variable(
var_name, connection_kwargs["credentials"]
)
# But we should be able to add a source using a substitution variable
name = "second_source"
data_context_parameterized_expectation_suite.add_datasource(
name, class_name=class_name, credentials="${" + var_name + "}"
)
data_context_config = data_context_parameterized_expectation_suite.get_config()
assert name in data_context_config["datasources"]
assert data_context_config["datasources"][name]["class_name"] == class_name
assert (
data_context_config["datasources"][name]["credentials"] == "${" + var_name + "}"
)
source = data_context_parameterized_expectation_suite.get_datasource(name)
assert isinstance(source, SqlAlchemyDatasource)
# Finally, we should be able to confirm that the folder structure is as expected
with open(
os.path.join(
data_context_parameterized_expectation_suite.root_directory,
"uncommitted/config_variables.yml",
),
) as credentials_file:
substitution_variables = yaml.load(credentials_file)
assert substitution_variables == {
var_name: dict(**connection_kwargs["credentials"])
}
def test_sqlalchemy_source_templating(sqlitedb_engine):
datasource = SqlAlchemyDatasource(
engine=sqlitedb_engine,
batch_kwargs_generators={"foo": {"class_name": "QueryBatchKwargsGenerator"}},
)
generator = datasource.get_batch_kwargs_generator("foo")
generator.add_query(data_asset_name="test", query="select 'cat' as ${col_name};")
batch = datasource.get_batch(
generator.build_batch_kwargs(
"test", query_parameters={"col_name": "animal_name"}
)
)
dataset = BridgeValidator(
batch,
expectation_suite=ExpectationSuite("test"),
expectation_engine=SqlAlchemyDataset,
).get_dataset()
res = dataset.expect_column_to_exist("animal_name")
assert res.success is True
res = dataset.expect_column_values_to_be_in_set("animal_name", ["cat"])
assert res.success is True
def test_sqlalchemy_source_limit(sqlitedb_engine):
df1 = pd.DataFrame({"col_1": [1, 2, 3, 4, 5], "col_2": ["a", "b", "c", "d", "e"]})
df2 = pd.DataFrame({"col_1": [0, 1, 2, 3, 4], "col_2": ["b", "c", "d", "e", "f"]})
df1.to_sql(name="table_1", con=sqlitedb_engine, index=True)
df2.to_sql(name="table_2", con=sqlitedb_engine, index=True, schema="main")
datasource = SqlAlchemyDatasource("SqlAlchemy", engine=sqlitedb_engine)
limited_batch = datasource.get_batch({"table": "table_1", "limit": 1, "offset": 2})
assert isinstance(limited_batch, Batch)
limited_dataset = BridgeValidator(
limited_batch,
expectation_suite=ExpectationSuite("test"),
expectation_engine=SqlAlchemyDataset,
).get_dataset()
assert limited_dataset._table.name.startswith(
"ge_tmp_"
) # we have generated a temporary table
assert len(limited_dataset.head(10)) == 1 # and it is only one row long
assert limited_dataset.head(10)["col_1"][0] == 3 # offset should have been applied
def test_sqlalchemy_datasource_query_and_table_handling(sqlitedb_engine):
# MANUALLY SET DIALECT NAME FOR TEST
datasource = SqlAlchemyDatasource("SqlAlchemy", engine=sqlitedb_engine)
with mock.patch(
"great_expectations.dataset.sqlalchemy_dataset.SqlAlchemyBatchReference.__init__",
return_value=None,
) as mock_batch:
datasource.get_batch({"query": "select * from foo;"})
mock_batch.assert_called_once_with(
engine=sqlitedb_engine, schema=None, query="select * from foo;", table_name=None
)
# Normally, we do not allow both query and table_name
with mock.patch(
"great_expectations.dataset.sqlalchemy_dataset.SqlAlchemyBatchReference.__init__",
return_value=None,
) as mock_batch:
datasource.get_batch({"query": "select * from foo;", "table_name": "bar"})
mock_batch.assert_called_once_with(
engine=sqlitedb_engine, schema=None, query="select * from foo;", table_name=None
)
# Snowflake should require query *and* snowflake_transient_table
sqlitedb_engine.dialect.name = "snowflake"
with mock.patch(
"great_expectations.dataset.sqlalchemy_dataset.SqlAlchemyBatchReference.__init__",
return_value=None,
) as mock_batch:
datasource.get_batch(
{"query": "select * from foo;", "snowflake_transient_table": "bar"}
)
mock_batch.assert_called_once_with(
engine=sqlitedb_engine,
schema=None,
query="select * from foo;",
table_name="bar",
)
def test_sqlalchemy_datasource_processes_dataset_options(test_db_connection_string):
datasource = SqlAlchemyDatasource(
"SqlAlchemy", credentials={"url": test_db_connection_string}
)
batch_kwargs = datasource.process_batch_parameters(
dataset_options={"caching": False}
)
batch_kwargs["query"] = "select * from table_1;"
batch = datasource.get_batch(batch_kwargs)
validator = BridgeValidator(batch, ExpectationSuite(expectation_suite_name="foo"))
dataset = validator.get_dataset()
assert dataset.caching is False
batch_kwargs = datasource.process_batch_parameters(
dataset_options={"caching": True}
)
batch_kwargs["query"] = "select * from table_1;"
batch = datasource.get_batch(batch_kwargs)
validator = BridgeValidator(batch, ExpectationSuite(expectation_suite_name="foo"))
dataset = validator.get_dataset()
assert dataset.caching is True
batch_kwargs = {
"query": "select * from table_1;",
"dataset_options": {"caching": False},
}
batch = datasource.get_batch(batch_kwargs)
validator = BridgeValidator(batch, ExpectationSuite(expectation_suite_name="foo"))
dataset = validator.get_dataset()
assert dataset.caching is False
| apache-2.0 | -650,419,896,327,268,100 | 37.724907 | 90 | 0.679274 | false |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/conf/locale/da/formats.py | 1 | 1035 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', # '25.10.2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| mit | 4,936,223,680,368,423,000 | 33.689655 | 77 | 0.590338 | false |
F5Networks/f5-common-python | f5/bigip/tm/asm/policies/response_pages.py | 1 | 2162 | # coding=utf-8
#
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from f5.bigip.resource import AsmResource
from f5.bigip.resource import Collection
from f5.sdk_exception import UnsupportedOperation
class Response_Pages_s(Collection):
"""BIG-IP® ASM Response Pages sub-collection."""
def __init__(self, policy):
super(Response_Pages_s, self).__init__(policy)
self._meta_data['object_has_stats'] = False
self._meta_data['minimum_version'] = '11.6.0'
self._meta_data['allowed_lazy_attributes'] = [Response_Page]
self._meta_data['required_json_kind'] = 'tm:asm:policies:response-pages:response-pagecollectionstate'
self._meta_data['attribute_registry'] = {
'tm:asm:policies:response-pages:response-pagestate': Response_Page
}
class Response_Page(AsmResource):
"""BIG-IP® ASM Response Page resource."""
def __init__(self, response_pages_s):
super(Response_Page, self).__init__(response_pages_s)
self._meta_data['required_json_kind'] = 'tm:asm:policies:response-pages:response-pagestate'
def create(self, **kwargs):
"""Create is not supported for Response Page resources
:raises: UnsupportedOperation
"""
raise UnsupportedOperation(
"%s does not support the create method" % self.__class__.__name__
)
def delete(self, **kwargs):
"""Delete is not supported for Response Page resources
:raises: UnsupportedOperation
"""
raise UnsupportedOperation(
"%s does not support the delete method" % self.__class__.__name__
)
| apache-2.0 | -1,259,872,560,900,174,800 | 36.894737 | 109 | 0.674074 | false |
Julian/home-assistant | homeassistant/components/notify/pushetta.py | 1 | 1901 | """
Pushetta platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.pushetta/
"""
import logging
from homeassistant.components.notify import (
ATTR_TITLE, DOMAIN, BaseNotificationService)
from homeassistant.const import CONF_API_KEY
from homeassistant.helpers import validate_config
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config):
"""Get the Pushetta notification service."""
from pushetta import Pushetta, exceptions
if not validate_config({DOMAIN: config},
{DOMAIN: [CONF_API_KEY, 'channel_name']},
_LOGGER):
return None
try:
pushetta = Pushetta(config[CONF_API_KEY])
pushetta.pushMessage(config['channel_name'], "Home Assistant started")
except exceptions.TokenValidationError:
_LOGGER.error("Please check your access token")
return None
except exceptions.ChannelNotFoundError:
_LOGGER.error("Channel '%s' not found", config['channel_name'])
return None
return PushettaNotificationService(config[CONF_API_KEY],
config['channel_name'])
# pylint: disable=too-few-public-methods
class PushettaNotificationService(BaseNotificationService):
"""Implement the notification service for Pushetta."""
def __init__(self, api_key, channel_name):
"""Initialize the service."""
from pushetta import Pushetta
self._api_key = api_key
self._channel_name = channel_name
self.pushetta = Pushetta(self._api_key)
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
title = kwargs.get(ATTR_TITLE)
self.pushetta.pushMessage(self._channel_name,
"{} {}".format(title, message))
| mit | 5,321,828,693,776,001,000 | 33.563636 | 78 | 0.651236 | false |
itsvismay/ElasticBodies | Pipeline/Optimizations/cobyla_beam_heightwidthlength_test.py | 1 | 2030 | from scipy.optimize import fmin_cobyla
import sys, os, subprocess, numpy
P, E = 1000.0, 69e9 # N, Pa, m, m
fileName = 'optimizeTest.txt'
resultName = '../TestsResults/opt.txt'
def objective(x):
height = x[0]
width = x[1]
length = x[2]
volume = length * width * height
return volume
def g0(x):
height = 0.0
width = 0.0
length = 0.0
if type(x) is numpy.ndarray:
height = x[0]
width = x[1]
length = x[2]
else:
height = x[0]
width = x[1]
length = x[2]
print 'Calculating for Height, Width, Length:', height, width, length, '\n'
# fail-safes
if height <= 0.0 or width <= 0.0 or length <= 0.0:
return -100
file_write = open(fileName, 'w')
file_write.write(fileName + ".scad "+str(length)+" "+str(width*1000)+" "+str(height*1000))
file_write.close()
subprocess.check_output(['python', 'pipeline.py', '--template', 'templateBeam.py', '--batch', fileName, '--sConfig', 'slic3rConfig.ini', '--ind', str(height* 1000 + width * 1000 + length * 1000), '-c'])
# read results from file and return those
opt = open(resultName)
for line in opt.readlines():
curLine = line.strip().split(' ')
disp = float(curLine[0]) / 1000
I = width * height**3 / 12
tip_disp = (P * length**3)/(3*E*I)
print 'Displacement for Height, Width, Length', height, width, length 'is ::', disp
print 'Analytical Disp for Height, Width, Length', height, width, length, 'is ::', tip_disp, '\n'
return 1e-4 - (float(curLine[0]) / -1000)
return -1000000
def g1(x):
# height > 0.01 m (10 mm)
if x[0] > 0.01 and x[1] > 0.01 and x[2] > 0.01:
return 1
return -1
def g2(x):
# height < 0.5 m (500 mm)
if x[0] < 0.5 and x[1] < 0.5 and x[2] < 0.5
return 1
return -1
h0 = [0.02, 0.02, 0.02] # 20 mm
constraints = [g0, g1, g2]
h_opt = fmin_cobyla(objective, h0, constraints, rhoend=1e-6, maxfun=100, catol=1e-6)
print h_opt, objective(h_opt), g0(h_opt)
| mpl-2.0 | -2,741,182,944,111,957,500 | 28.42029 | 206 | 0.575369 | false |
rr-/docstring_parser | docstring_parser/numpydoc.py | 1 | 10211 | """Numpydoc-style docstring parsing.
.. seealso:: https://numpydoc.readthedocs.io/en/latest/format.html
"""
import inspect
import itertools
import re
import typing as T
from .common import (
Docstring,
DocstringDeprecated,
DocstringMeta,
DocstringParam,
DocstringRaises,
DocstringReturns,
DocstringStyle,
)
def _pairwise(iterable: T.Iterable, end=None) -> T.Iterable:
a, b = itertools.tee(iterable)
next(b, None)
return itertools.zip_longest(a, b, fillvalue=end)
def _clean_str(string: str) -> T.Optional[str]:
string = string.strip()
if len(string) > 0:
return string
KV_REGEX = re.compile(r"^[^\s].*$", flags=re.M)
PARAM_KEY_REGEX = re.compile(r"^(?P<name>.*?)(?:\s*:\s*(?P<type>.*?))?$")
PARAM_OPTIONAL_REGEX = re.compile(r"(?P<type>.*?)(?:, optional|\(optional\))$")
# numpydoc format has no formal grammar for this,
# but we can make some educated guesses...
PARAM_DEFAULT_REGEX = re.compile(
r"[Dd]efault(?: is | = |: |s to |)\s*(?P<value>[\w\-\.]+)"
)
RETURN_KEY_REGEX = re.compile(r"^(?:(?P<name>.*?)\s*:\s*)?(?P<type>.*?)$")
class Section:
"""Numpydoc section parser.
:param title: section title. For most sections, this is a heading like
"Parameters" which appears on its own line, underlined by
en-dashes ('-') on the following line.
:param key: meta key string. In the parsed ``DocstringMeta`` instance this
will be the first element of the ``args`` attribute list.
"""
def __init__(self, title: str, key: str) -> None:
self.title = title
self.key = key
@property
def title_pattern(self) -> str:
"""Regular expression pattern matching this section's header.
This pattern will match this instance's ``title`` attribute in
an anonymous group.
"""
return r"^({})\s*?\n{}\s*$".format(self.title, "-" * len(self.title))
def parse(self, text: str) -> T.Iterable[DocstringMeta]:
"""Parse ``DocstringMeta`` objects from the body of this section.
:param text: section body text. Should be cleaned with
``inspect.cleandoc`` before parsing.
"""
yield DocstringMeta([self.key], description=_clean_str(text))
class _KVSection(Section):
"""Base parser for numpydoc sections with key-value syntax.
E.g. sections that look like this:
key
value
key2 : type
values can also span...
... multiple lines
"""
def _parse_item(self, key: str, value: str) -> DocstringMeta:
pass
def parse(self, text: str) -> T.Iterable[DocstringMeta]:
for match, next_match in _pairwise(KV_REGEX.finditer(text)):
start = match.end()
end = next_match.start() if next_match is not None else None
value = text[start:end]
yield self._parse_item(
key=match.group(), value=inspect.cleandoc(value)
)
class _SphinxSection(Section):
"""Base parser for numpydoc sections with sphinx-style syntax.
E.g. sections that look like this:
.. title:: something
possibly over multiple lines
"""
@property
def title_pattern(self) -> str:
return r"^\.\.\s*({})\s*::".format(self.title)
class ParamSection(_KVSection):
"""Parser for numpydoc parameter sections.
E.g. any section that looks like this:
arg_name
arg_description
arg_2 : type, optional
descriptions can also span...
... multiple lines
"""
def _parse_item(self, key: str, value: str) -> DocstringParam:
m = PARAM_KEY_REGEX.match(key)
arg_name = type_name = is_optional = None
if m is not None:
arg_name, type_name = m.group("name"), m.group("type")
if type_name is not None:
optional_match = PARAM_OPTIONAL_REGEX.match(type_name)
if optional_match is not None:
type_name = optional_match.group("type")
is_optional = True
else:
is_optional = False
default = None
if len(value) > 0:
default_match = PARAM_DEFAULT_REGEX.search(value)
if default_match is not None:
default = default_match.group("value")
return DocstringParam(
args=[self.key, arg_name],
description=_clean_str(value),
arg_name=arg_name,
type_name=type_name,
is_optional=is_optional,
default=default,
)
class RaisesSection(_KVSection):
"""Parser for numpydoc raises sections.
E.g. any section that looks like this:
ValueError
A description of what might raise ValueError
"""
def _parse_item(self, key: str, value: str) -> DocstringRaises:
return DocstringRaises(
args=[self.key, key],
description=_clean_str(value),
type_name=key if len(key) > 0 else None,
)
class ReturnsSection(_KVSection):
"""Parser for numpydoc raises sections.
E.g. any section that looks like this:
return_name : type
A description of this returned value
another_type
Return names are optional, types are required
"""
is_generator = False
def _parse_item(self, key: str, value: str) -> DocstringReturns:
m = RETURN_KEY_REGEX.match(key)
if m is not None:
return_name, type_name = m.group("name"), m.group("type")
else:
return_name = type_name = None
return DocstringReturns(
args=[self.key],
description=_clean_str(value),
type_name=type_name,
is_generator=self.is_generator,
return_name=return_name,
)
class YieldsSection(ReturnsSection):
"""Parser for numpydoc generator "yields" sections."""
is_generator = True
class DeprecationSection(_SphinxSection):
"""Parser for numpydoc "deprecation warning" sections."""
def parse(self, text: str) -> T.Iterable[DocstringDeprecated]:
version, desc, *_ = text.split(sep="\n", maxsplit=1) + [None, None]
if desc is not None:
desc = _clean_str(inspect.cleandoc(desc))
yield DocstringDeprecated(
args=[self.key], description=desc, version=_clean_str(version)
)
DEFAULT_SECTIONS = [
ParamSection("Parameters", "param"),
ParamSection("Params", "param"),
ParamSection("Arguments", "param"),
ParamSection("Args", "param"),
ParamSection("Other Parameters", "other_param"),
ParamSection("Other Params", "other_param"),
ParamSection("Other Arguments", "other_param"),
ParamSection("Other Args", "other_param"),
ParamSection("Receives", "receives"),
ParamSection("Receive", "receives"),
RaisesSection("Raises", "raises"),
RaisesSection("Raise", "raises"),
RaisesSection("Warns", "warns"),
RaisesSection("Warn", "warns"),
ParamSection("Attributes", "attribute"),
ParamSection("Attribute", "attribute"),
ReturnsSection("Returns", "returns"),
ReturnsSection("Return", "returns"),
YieldsSection("Yields", "yields"),
YieldsSection("Yield", "yields"),
Section("Examples", "examples"),
Section("Example", "examples"),
Section("Warnings", "warnings"),
Section("Warning", "warnings"),
Section("See Also", "see_also"),
Section("Related", "see_also"),
Section("Notes", "notes"),
Section("Note", "notes"),
Section("References", "references"),
Section("Reference", "references"),
DeprecationSection("deprecated", "deprecation"),
]
class NumpydocParser:
def __init__(self, sections: T.Optional[T.Dict[str, Section]] = None):
"""Setup sections.
:param sections: Recognized sections or None to defaults.
"""
sections = sections or DEFAULT_SECTIONS
self.sections = {s.title: s for s in sections}
self._setup()
def _setup(self):
self.titles_re = re.compile(
r"|".join(s.title_pattern for s in self.sections.values()),
flags=re.M,
)
def add_section(self, section: Section):
"""Add or replace a section.
:param section: The new section.
"""
self.sections[section.title] = section
self._setup()
def parse(self, text: str) -> Docstring:
"""Parse the numpy-style docstring into its components.
:returns: parsed docstring
"""
ret = Docstring(style=DocstringStyle.numpydoc)
if not text:
return ret
# Clean according to PEP-0257
text = inspect.cleandoc(text)
# Find first title and split on its position
match = self.titles_re.search(text)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
# Break description into short and long parts
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith(
"\n"
)
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
for match, nextmatch in _pairwise(self.titles_re.finditer(meta_chunk)):
title = next(g for g in match.groups() if g is not None)
factory = self.sections[title]
# section chunk starts after the header,
# ends at the start of the next header
start = match.end()
end = nextmatch.start() if nextmatch is not None else None
ret.meta.extend(factory.parse(meta_chunk[start:end]))
return ret
def parse(text: str) -> Docstring:
"""Parse the numpy-style docstring into its components.
:returns: parsed docstring
"""
return NumpydocParser().parse(text)
| mit | -4,061,001,772,794,959,000 | 29.756024 | 79 | 0.590442 | false |
ubyssey/dispatch | dispatch/theme/validators.py | 1 | 1823 | from uuid import UUID
from django.core.validators import slug_re
from dispatch.theme.exceptions import InvalidZone, InvalidWidget
def is_valid_slug(slug):
"""Uses Django's slug regex to test if id is valid"""
return slug_re.match(slug)
def has_valid_id(o):
return hasattr(o, 'id') and o.id and is_valid_slug(o.id)
def has_valid_name(o):
return hasattr(o, 'name') and o.name
def has_valid_template(o):
return hasattr(o, 'template') and o.template
def validate_widget(widget):
"""Checks that the given widget contains the required fields"""
if not has_valid_id(widget):
raise InvalidWidget("%s must contain a valid 'id' attribute" % widget.__name__)
if not has_valid_name(widget):
raise InvalidWidget("%s must contain a valid 'name' attribute" % widget.__name__)
if not has_valid_template(widget):
raise InvalidWidget("%s must contain a valid 'template' attribute" % widget.__name__)
if not hasattr(widget, 'zones') or not widget.zones:
raise InvalidWidget("%s must be compatible with at least one zone" % widget.__name__)
def validate_zone(zone):
"""Checks that the given zone contains the required fields"""
if not has_valid_id(zone):
raise InvalidZone("%s must contain a valid 'id' attribute" % zone.__name__)
if not has_valid_name(zone):
raise InvalidZone("%s must contain a valid 'name' attribute" % zone.__name__)
def is_valid_id(id):
"""Return True if id is a valid integer or UUID, False otherwise."""
return isinstance(id, int) or is_valid_uuid(id)
def is_valid_uuid(id):
"""Return True if id is a valid UUID, False otherwise."""
if not isinstance(id, str):
return False
try:
val = UUID(id, version=4)
except ValueError:
return False
return True | gpl-2.0 | -6,203,565,886,182,744,000 | 29.915254 | 93 | 0.667581 | false |
low-sky/h2codumb | h2co_mm.py | 1 | 6796 | """
===========================
Formaldehyde mm-line fitter
===========================
This is a formaldehyde 3_03-2_02 / 3_22-221 and 3_03-2_02/3_21-2_20 fitter.
It is based entirely on RADEX models.
This is the EWR fork of the fitter in pyspeckit.
"""
import numpy as np
import pyspeckit.spectrum.models.hyperfine as hyperfine
from pyspeckit.spectrum.models import fitter,model#,modelgrid
try: # for model grid reading
import astropy.io.fits as pyfits
except ImportError:
import pyfits
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
line_names = ['threeohthree','threetwotwo','threetwoone']
# http://adsabs.harvard.edu/abs/1971ApJ...169..429T has the most accurate freqs
# http://adsabs.harvard.edu/abs/1972ApJ...174..463T [twotwo]
central_freq_dict = {
'threeohthree': 218.222192e9,
'threetwotwo': 218.475632e9,
'threetwoone': 218.760066e9,
}
line_strength_dict={
'threeohthree': 1.,
'threetwotwo': 1.,
'threetwoone': 1.,
}
relative_strength_total_degeneracy={
'threeohthree': 1.,
'threetwotwo': 1.,
'threetwoone': 1.,
}
freq_dict = central_freq_dict
aval_dict = {
'threeohthree': 2.818e-4,
'threetwotwo': 1.571e-4,
'threetwoone': 1.577e-4,
}
voff_lines_dict = {
'threeohthree': 0.,
'threetwotwo': 0.,
'threetwoone': 0.,
}
formaldehyde_mm_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict,
freq_dict, line_strength_dict, relative_strength_total_degeneracy)
formaldehyde_mm_vtau_fitter = formaldehyde_mm_vtau.fitter
formaldehyde_mm_vtau_vheight_fitter = formaldehyde_mm_vtau.vheight_fitter
def h2co_mm_radex(xarr,
Temperature=25,
logColumn=13,
logDensity=4,
xoff_v=0.0,
width=1.0,
grid_vwidth=1.0,
gridbundle = None,
debug=False,
verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
Parameters
----------
grid_vwidth : float
the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
density : float
Density!
"""
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
Tex303,Tex322,Tex321,tau303,tau322,tau321 = gridbundle
# if this gets too far different from 1, we are gonna have a Bad Time.
scalefac = grid_vwidth/width
tex = (Tex303(logColumn,logDensity,Temperature),
Tex322(logColumn,logDensity,Temperature),
Tex321(logColumn,logDensity,Temperature))
tau = (tau303(logColumn,logDensity,Temperature)*scalefac,
tau322(logColumn,logDensity,Temperature)*scalefac,
tau321(logColumn,logDensity,Temperature)*scalefac)
if np.any(np.isnan(tex)) or np.any(np.isnan(tau)):
raise ValueError("Invalid column/density")
if verbose:
for ta,tk in zip(tau,tex):
print "density %20.12g temperature %20.12g column %20.12g: tau %20.12g tex %20.12g" % (logDensity, Temperature, logColumn, ta, tk)
if debug:
import pdb; pdb.set_trace()
# here there be physics
ckms = 2.99792458e5
freq_dict = {
'303': 218.222192e9,
'322': 218.475632e9,
'321': 218.760066e9,
}
Tbg = 2.73 #because it totally is
nu0 = np.array([ 218.222192e9, 218.475632e9,218.760066e9])
nuwidth = [width/ckms*nu for nu in nu0]
nuoff = [xoff_v/ckms*nu for nu in nu0]
minfreq = nu0/1e9 - 0.25
maxfreq = nu0/1e9 + 0.25
# spec2 = np.zeros(len(xarr))
# for ii in range(len(nu0)):
# taunu = tau[ii]*np.exp(-(xarr+nuoff[ii]-nu0[ii])**2/(2.0*nuwidth[ii]**2))
# spec2 = spec2 + (1-np.exp(-taunu))*tex[ii] + Tbg*(np.exp(-taunu)-1) #second term assumes an ON-OFF
spec = np.sum([
(formaldehyde_mm_vtau(xarr, Tex=float(tex[ii]), tau=float(tau[ii]),
xoff_v=xoff_v, width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii]) * (xarr.as_unit('GHz')<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
# import pdb
# pdb.set_trace()
return spec
def formaldehyde_mm(xarr, amp=1.0, xoff_v=0.0, width=1.0,
return_components=False ):
"""
Generate a model Formaldehyde spectrum based on simple gaussian parameters
the "amplitude" is an essentially arbitrary parameter; we therefore define it to be Tex given tau=0.01 when
passing to the fitter
The final spectrum is then rescaled to that value
The components are independent, but with offsets set by frequency... in principle.
"""
mdl = formaldehyde_vtau(xarr, Tex=amp*0.01, tau=0.01, xoff_v=xoff_v,
width=width,
return_components=return_components)
if return_components:
mdlpeak = np.abs(mdl).squeeze().sum(axis=0).max()
else:
mdlpeak = np.abs(mdl).max()
if mdlpeak > 0:
mdl *= amp/mdlpeak
return mdl
class formaldehyde_mm_model(model.SpectralModel):
pass
formaldehyde_mm_fitter = formaldehyde_mm_model(formaldehyde_mm, 3,
parnames=['amp','center','width'],
parlimited=[(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0)],
shortvarnames=("A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunits='Hz' )
formaldehyde_mm_vheight_fitter = formaldehyde_mm_model(fitter.vheightmodel(formaldehyde_mm), 4,
parnames=['height','amp','center','width'],
parlimited=[(False,False),(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0)],
shortvarnames=("H","A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunits='Hz' )
try:
import pymodelfit
class pmfFormaldehydeModel(pymodelfit.FunctionModel1DAuto):
def f(self, x, amp0=1.0, xoff_v0=0.0,width0=1.0):
return formaldehyde(x,
amp=amp0,
xoff_v=xoff_v0,width=width0)
class pmfFormaldehydeModelVtau(pymodelfit.FunctionModel1DAuto):
def f(self, x, Tex0=1.0, tau0=0.01, xoff_v0=0.0, width0=1.0):
return formaldehyde_vtau(x,
Tex=Tex0, tau=tau0,
xoff_v=xoff_v0,width=width0)
except ImportError:
pass
| gpl-2.0 | -8,952,912,852,867,932,000 | 31.208531 | 142 | 0.617716 | false |
vongola12324/Linking-Loader | main.py | 1 | 3185 | import os
import sys
# Open File
filename = ""
filename = input("Enter Input Filename: ")
if filename == "":
filename = "linkin.txt"
fin = open(filename, "r")
fout = open("out.txt", "w")
# Variable Prepare
PGBLOCKS = {}
MODIFY = []
OBJCODE = []
# Method Prepare
def splitline(line):
word = line.strip().split()
return word
def getObjline(START=None):
start = int(START, 16)
for i in OBJCODE:
objstart = int(i.get("START"), 16)
objlen = int(i.get("LENGTH"), 16)
if objstart <= start <= objstart + objlen:
return i
else:
continue
def toSignedInt(hexstr):
i = int(hexstr, 16)
if i > 0x7FFFFF:
i -= 0x1000000
return i
def toSignedHex(num):
return hex(((abs(num) ^ 0xffff) + 1) & 0xffff)
# Program Start
Offset = input("Enter Program Start Address: ")
Offset = int(Offset, 16)
Length = 0
while True:
line = fin.readline()
if not line:
break
else:
if line[0] == "H":
word = splitline(line)
PGBLOCKS.update({word[1]: hex(int(word[2], 16) + Offset)[2:].upper()})
Length = int(word[3], 16)
elif line[0] == "D":
word = splitline(line)
for i in range(1, len(word), 2):
PGBLOCKS.update({word[i]: word[i + 1]})
elif line[0] == "R":
continue
elif line[0] == "E":
Offset += Length
continue
elif line[0] == "T":
word = splitline(line)
string = ""
for i in range(3, len(word)):
string += word[i]
head = hex(int(word[1], 16) + Offset)[2:].upper()
while len(head) < 6:
head = "0" + head
OBJCODE.append({"START": head, "LENGTH": word[2], "OBJC": string})
else:
word = splitline(line)
if word != []:
MODIFY.append(
{"ADDR": hex(toSignedInt(word[1]) + Offset), "LENGTH": word[2], "OPER": word[3], "PGB": word[4]})
fin.close()
for i in MODIFY:
ObjLine = getObjline(i.get("ADDR"))
Objc = ObjLine.get("OBJC")
selectStart = (int(i.get("ADDR"), 16) - int("0x" + ObjLine.get("START"), 16)) * 2
if int(i.get("LENGTH"), 16) % 2 == 1:
selectStart += 1
ModObjc = Objc[selectStart:selectStart + int(i.get("LENGTH"), 16)]
PGB = PGBLOCKS.get(i.get("PGB"))
if i.get("OPER") == "+":
ModObjc = toSignedHex(toSignedInt(ModObjc) + toSignedInt(PGB))[2:].upper()
else:
ModObjc = toSignedHex(toSignedInt(ModObjc) - toSignedInt(PGB))[2:].upper()
while len(ModObjc) < int(i.get("LENGTH"), 16):
ModObjc = "0" + ModObjc
ObjLine.update({"OBJC": Objc[:selectStart] + ModObjc + Objc[selectStart + int(i.get("LENGTH"), 16):]})
for i in OBJCODE:
Objc = i.get("OBJC")
while len(Objc) < 32:
Objc += "."
i.update({"OBJC": Objc})
fout.write(
"{0:<06s} {1:<8s} {2:<8s} {3:<8s} {4:<8s}\n".format(i.get("START"), i.get("OBJC")[0:8], i.get("OBJC")[8:16],
i.get("OBJC")[16:24], i.get("OBJC")[24:32]))
fout.close() | gpl-3.0 | 8,571,556,472,533,741,000 | 28.229358 | 122 | 0.513344 | false |
terryjbates/test-driven-development-with-python | myflaskapp/tests/test_unit.py | 1 | 1031 | import unittest
import requests
class SmokeTest(unittest.TestCase):
def test_maths(self):
self.assertEquals(6, 2 + 4)
def test_home_page_is_about_todo_lists(self):
request = requests.get('http://localhost:5000')
self.assertTrue(
request.content.startswith(b'\n\n<!doctype html>\n'))
self.assertIn(
'<title>\n \n To-Do\n \n \n </title>\n',
request.text)
self.assertTrue(request.content.endswith(b'</body>\n</html>\n'))
class TestMainPage:
"""WebTest test for title"""
def test_main_page_returns_200(self, user, testapp):
"""Login successful."""
# Goes to homepage
res = testapp.get('/')
assert res.status_code == 200
def test_main_page_returns_expected_title(self, user, testapp):
res = testapp.get('/')
assert '<title>\n \n To-Do\n \n \n </title>\n' in res
# def test_main_page_returns_expected_content(self, user, testapp):
# res = testapp.get('/')
| mit | -7,449,673,310,125,946,000 | 31.21875 | 72 | 0.586809 | false |
LUTAN/tensorflow | tensorflow/tools/docs/generate_lib.py | 1 | 16276 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import inspect
import os
import six
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import parser
from tensorflow.tools.docs import pretty_docs
from tensorflow.tools.docs import py_guide_parser
def _is_free_function(py_object, full_name, index):
"""Check if input is a free function (and not a class- or static method)."""
if not inspect.isfunction(py_object):
return False
# Static methods are functions to inspect (in 2.7), so check if the parent
# is a class. If there is no parent, it's not a function.
if '.' not in full_name:
return False
parent_name = full_name.rsplit('.', 1)[0]
if inspect.isclass(index[parent_name]):
return False
return True
def write_docs(output_dir, parser_config, yaml_toc):
"""Write previously extracted docs to disk.
Write a docs page for each symbol included in the indices of parser_config to
a tree of docs at `output_dir`.
Symbols with multiple aliases will have only one page written about
them, which is referenced for all aliases.
Args:
output_dir: Directory to write documentation markdown files to. Will be
created if it doesn't exist.
parser_config: A `parser.ParserConfig` object, containing all the necessary
indices.
yaml_toc: Set to `True` to generate a "_toc.yaml" file.
"""
# Make output_dir.
try:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
except OSError as e:
print('Creating output dir "%s" failed: %s' % (output_dir, e))
raise
# These dictionaries are used for table-of-contents generation below
# They will contain, after the for-loop below::
# - module name(string):classes and functions the module contains(list)
module_children = {}
# - symbol name(string):pathname (string)
symbol_to_file = {}
# Parse and write Markdown pages, resolving cross-links (@{symbol}).
for full_name, py_object in six.iteritems(parser_config.index):
if full_name in parser_config.duplicate_of:
continue
# Methods and some routines are documented only as part of their class.
if not (inspect.ismodule(py_object) or inspect.isclass(py_object) or
_is_free_function(py_object, full_name, parser_config.index)):
continue
sitepath = os.path.join('api_docs/python',
parser.documentation_path(full_name)[:-3])
# For TOC, we need to store a mapping from full_name to the file
# we're generating
symbol_to_file[full_name] = sitepath
# For a module, remember the module for the table-of-contents
if inspect.ismodule(py_object):
if full_name in parser_config.tree:
module_children.setdefault(full_name, [])
# For something else that's documented,
# figure out what module it lives in
else:
subname = str(full_name)
while True:
subname = subname[:subname.rindex('.')]
if inspect.ismodule(parser_config.index[subname]):
module_children.setdefault(subname, []).append(full_name)
break
print('Writing docs for %s (%r).' % (full_name, py_object))
# Generate docs for `py_object`, resolving references.
page_info = parser.docs_for_object(full_name, py_object, parser_config)
path = os.path.join(output_dir, parser.documentation_path(full_name))
directory = os.path.dirname(path)
try:
if not os.path.exists(directory):
os.makedirs(directory)
with open(path, 'w') as f:
f.write(pretty_docs.build_md_page(page_info))
except OSError as e:
print('Cannot write documentation for %s to %s: %s' % (full_name,
directory, e))
raise
if yaml_toc:
# Generate table of contents
# Put modules in alphabetical order, case-insensitive
modules = sorted(module_children.keys(), key=lambda a: a.upper())
leftnav_path = os.path.join(output_dir, '_toc.yaml')
with open(leftnav_path, 'w') as f:
# Generate header
f.write('# Automatically generated file; please do not edit\ntoc:\n')
for module in modules:
f.write(' - title: ' + module + '\n'
' section:\n' +
' - title: Overview\n' +
' path: /TARGET_DOC_ROOT/VERSION/' +
symbol_to_file[module] + '\n')
symbols_in_module = module_children.get(module, [])
symbols_in_module.sort(key=lambda a: a.upper())
for full_name in symbols_in_module:
f.write(' - title: ' + full_name[len(module)+1:] + '\n'
' path: /TARGET_DOC_ROOT/VERSION/' +
symbol_to_file[full_name] + '\n')
# Write a global index containing all full names with links.
with open(os.path.join(output_dir, 'index.md'), 'w') as f:
f.write(parser.generate_global_index('TensorFlow', parser_config.index,
parser_config.reference_resolver))
def add_dict_to_dict(add_from, add_to):
for key in add_from:
if key in add_to:
add_to[key].extend(add_from[key])
else:
add_to[key] = add_from[key]
# Exclude some libaries in contrib from the documentation altogether.
def _get_default_do_not_descend_map():
# TODO(wicke): Shrink this list.
return {
'': ['cli', 'lib', 'wrappers'],
'contrib': [
'compiler',
'factorization',
'grid_rnn',
'labeled_tensor',
'ndlstm',
'quantization',
'session_bundle',
'slim',
'solvers',
'specs',
'tensor_forest',
'tensorboard',
'testing',
'training',
'tfprof',
],
'contrib.bayesflow': [
'special_math', 'stochastic_gradient_estimators',
'stochastic_variables'
],
'contrib.ffmpeg': ['ffmpeg_ops'],
'contrib.graph_editor': [
'edit',
'match',
'reroute',
'subgraph',
'transform',
'select',
'util'
],
'contrib.keras': ['api', 'python'],
'contrib.layers': ['feature_column', 'summaries'],
'contrib.learn': [
'datasets',
'head',
'graph_actions',
'io',
'models',
'monitors',
'ops',
'preprocessing',
'utils',
],
'contrib.util': ['loader'],
}
def extract(py_modules, do_not_descend_map):
"""Extract docs from tf namespace and write them to disk."""
# Traverse the first module.
visitor = doc_generator_visitor.DocGeneratorVisitor(py_modules[0][0])
api_visitor = public_api.PublicAPIVisitor(visitor)
add_dict_to_dict(do_not_descend_map, api_visitor.do_not_descend_map)
traverse.traverse(py_modules[0][1], api_visitor)
# Traverse all py_modules after the first:
for module_name, module in py_modules[1:]:
visitor.set_root_name(module_name)
traverse.traverse(module, api_visitor)
return visitor
class _GetMarkdownTitle(py_guide_parser.PyGuideParser):
"""Extract the title from a .md file."""
def __init__(self):
self.title = None
py_guide_parser.PyGuideParser.__init__(self)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
class _DocInfo(object):
"""A simple struct for holding a doc's url and title."""
def __init__(self, url, title):
self.url = url
self.title = title
def build_doc_index(src_dir):
"""Build an index from a keyword designating a doc to _DocInfo objects."""
doc_index = {}
for dirpath, _, filenames in os.walk(src_dir):
suffix = os.path.relpath(path=dirpath, start=src_dir)
for base_name in filenames:
if not base_name.endswith('.md'): continue
title_parser = _GetMarkdownTitle()
title_parser.process(os.path.join(dirpath, base_name))
key_parts = os.path.join(suffix, base_name[:-3]).split('/')
if key_parts[-1] == 'index':
key_parts = key_parts[:-1]
doc_info = _DocInfo(os.path.join(suffix, base_name), title_parser.title)
doc_index[key_parts[-1]] = doc_info
if len(key_parts) > 1:
doc_index['/'.join(key_parts[-2:])] = doc_info
return doc_index
class _GuideRef(object):
def __init__(self, base_name, title, section_title, section_tag):
self.url = 'api_guides/python/' + (
('%s#%s' % (base_name, section_tag)) if section_tag else base_name)
self.link_text = (('%s > %s' % (title, section_title))
if section_title else title)
def make_md_link(self, url_prefix):
return '[%s](%s%s)' % (self.link_text, url_prefix, self.url)
class _GenerateGuideIndex(py_guide_parser.PyGuideParser):
"""Turn guide files into an index from symbol name to a list of _GuideRefs."""
def __init__(self):
self.index = {}
py_guide_parser.PyGuideParser.__init__(self)
def process(self, full_path, base_name):
"""Index a file, reading from `full_path`, with `base_name` as the link."""
self.full_path = full_path
self.base_name = base_name
self.title = None
self.section_title = None
self.section_tag = None
py_guide_parser.PyGuideParser.process(self, full_path)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
def process_section(self, _, section_title, tag):
self.section_title = section_title
self.section_tag = tag
def process_line(self, _, line):
"""Index @{symbol} references as in the current file & section."""
for match in parser.SYMBOL_REFERENCE_RE.finditer(line):
val = self.index.get(match.group(1), [])
val.append(_GuideRef(
self.base_name, self.title, self.section_title, self.section_tag))
self.index[match.group(1)] = val
def _build_guide_index(guide_src_dir):
"""Return dict: symbol name -> _GuideRef from the files in `guide_src_dir`."""
index_generator = _GenerateGuideIndex()
if os.path.exists(guide_src_dir):
for full_path, base_name in py_guide_parser.md_files_in_dir(guide_src_dir):
index_generator.process(full_path, base_name)
return index_generator.index
class _UpdateTags(py_guide_parser.PyGuideParser):
"""Rewrites a Python guide so that each section has an explicit tag."""
def process_section(self, line_number, section_title, tag):
self.replace_line(line_number, '<h2 id="%s">%s</h2>' % (tag, section_title))
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
def _other_docs(src_dir, output_dir, reference_resolver):
"""Convert all the files in `src_dir` and write results to `output_dir`."""
header = '<!-- DO NOT EDIT! Automatically generated file. -->\n'
# Iterate through all the source files and process them.
tag_updater = _UpdateTags()
for dirpath, _, filenames in os.walk(src_dir):
# How to get from `dirpath` to api_docs/python/
relative_path_to_root = os.path.relpath(
path=os.path.join(src_dir, 'api_docs/python'), start=dirpath)
# Make the directory under output_dir.
new_dir = os.path.join(output_dir,
os.path.relpath(path=dirpath, start=src_dir))
try:
if not os.path.exists(new_dir):
os.makedirs(new_dir)
except OSError as e:
print('Creating output dir "%s" failed: %s' % (new_dir, e))
raise
for base_name in filenames:
if base_name in EXCLUDED:
print('Skipping excluded file %s...' % base_name)
continue
full_in_path = os.path.join(dirpath, base_name)
suffix = os.path.relpath(path=full_in_path, start=src_dir)
full_out_path = os.path.join(output_dir, suffix)
if not base_name.endswith('.md'):
print('Copying non-md file %s...' % suffix)
open(full_out_path, 'w').write(open(full_in_path).read())
continue
if dirpath.endswith('/api_guides/python'):
print('Processing Python guide %s...' % base_name)
md_string = tag_updater.process(full_in_path)
else:
print('Processing doc %s...' % suffix)
md_string = open(full_in_path).read()
output = reference_resolver.replace_references(
md_string, relative_path_to_root)
with open(full_out_path, 'w') as f:
f.write(header + output)
print('Done.')
class DocGenerator(object):
"""Main entry point for generating docs."""
def __init__(self):
self.argument_parser = argparse.ArgumentParser()
self._py_modules = None
self._do_not_descend_map = _get_default_do_not_descend_map()
self.yaml_toc = True
def add_output_dir_argument(self):
self.argument_parser.add_argument(
'--output_dir',
type=str,
default=None,
required=True,
help='Directory to write docs to.'
)
def add_src_dir_argument(self):
self.argument_parser.add_argument(
'--src_dir',
type=str,
default=None,
required=True,
help='Directory with the source docs.'
)
def add_base_dir_argument(self, default_base_dir):
self.argument_parser.add_argument(
'--base_dir',
type=str,
default=default_base_dir,
help='Base directory to to strip from file names referenced in docs.'
)
def parse_known_args(self):
flags, _ = self.argument_parser.parse_known_args()
return flags
def add_to_do_not_descend_map(self, d):
add_dict_to_dict(d, self._do_not_descend_map)
def set_do_not_descend_map(self, d):
self._do_not_descend_map = d
def set_py_modules(self, py_modules):
self._py_modules = py_modules
def py_module_names(self):
if self._py_modules is None:
raise RuntimeError(
'Must call set_py_modules() before running py_module_names().')
return [name for (name, _) in self._py_modules]
def make_reference_resolver(self, visitor, doc_index):
return parser.ReferenceResolver.from_visitor(
visitor, doc_index, py_module_names=self.py_module_names())
def make_parser_config(self, visitor, reference_resolver, guide_index,
base_dir):
return parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=visitor.duplicates,
duplicate_of=visitor.duplicate_of,
tree=visitor.tree,
index=visitor.index,
reverse_index=visitor.reverse_index,
guide_index=guide_index,
base_dir=base_dir)
def run_extraction(self):
return extract(self._py_modules, self._do_not_descend_map)
def build(self, flags):
"""Actually build the docs."""
doc_index = build_doc_index(flags.src_dir)
visitor = self.run_extraction()
reference_resolver = self.make_reference_resolver(visitor, doc_index)
guide_index = _build_guide_index(
os.path.join(flags.src_dir, 'api_guides/python'))
parser_config = self.make_parser_config(visitor, reference_resolver,
guide_index, flags.base_dir)
output_dir = os.path.join(flags.output_dir, 'api_docs/python')
write_docs(output_dir, parser_config, yaml_toc=self.yaml_toc)
_other_docs(flags.src_dir, flags.output_dir, reference_resolver)
if parser.all_errors:
print('Errors during processing:\n ' + '\n '.join(parser.all_errors))
return 1
return 0
| apache-2.0 | 3,404,556,434,261,722,000 | 32.489712 | 80 | 0.631052 | false |
miyataken999/weblate | weblate/trans/admin.py | 1 | 8516 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib import admin
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from weblate.trans.models import (
Project, SubProject, Translation, Advertisement,
Unit, Suggestion, Comment, Check, Dictionary, Change,
Source, WhiteboardMessage
)
class ProjectAdmin(admin.ModelAdmin):
list_display = (
'name', 'slug', 'web', 'list_owners', 'enable_acl', 'enable_hooks',
'num_vcs', 'num_strings', 'num_words', 'num_langs',
)
prepopulated_fields = {'slug': ('name',)}
search_fields = ['name', 'slug', 'web']
actions = ['update_from_git', 'update_checks', 'force_commit']
def list_owners(self, obj):
return ', '.join(obj.owners.values_list('username', flat=True))
list_owners.short_description = _('Owners')
def num_vcs(self, obj):
return obj.subproject_set.exclude(repo__startswith='weblate:/').count()
num_vcs.short_description = _('VCS repositories')
def num_strings(self, obj):
return obj.get_total()
num_strings.short_description = _('Source strings')
def num_words(self, obj):
return obj.get_total_words()
num_words.short_description = _('Source words')
def num_langs(self, obj):
return obj.get_language_count()
num_langs.short_description = _('Languages')
def update_from_git(self, request, queryset):
"""
Updates selected components from git.
"""
for project in queryset:
project.do_update(request)
self.message_user(request, "Updated %d git repos." % queryset.count())
update_from_git.short_description = _('Update VCS repository')
def update_checks(self, request, queryset):
"""
Recalculates checks for selected components.
"""
cnt = 0
units = Unit.objects.filter(
translation__subproject__project__in=queryset
)
for unit in units.iterator():
unit.run_checks()
cnt += 1
self.message_user(request, "Updated checks for %d units." % cnt)
update_checks.short_description = _('Update quality checks')
def force_commit(self, request, queryset):
"""
Commits pending changes for selected components.
"""
for project in queryset:
project.commit_pending(request)
self.message_user(
request,
"Flushed changes in %d git repos." % queryset.count()
)
force_commit.short_description = _('Commit pending changes')
class SubProjectAdmin(admin.ModelAdmin):
list_display = [
'name', 'slug', 'project', 'repo', 'branch', 'vcs', 'file_format'
]
prepopulated_fields = {'slug': ('name',)}
search_fields = ['name', 'slug', 'repo', 'branch']
list_filter = ['project', 'vcs', 'file_format']
actions = ['update_from_git', 'update_checks', 'force_commit']
def update_from_git(self, request, queryset):
"""
Updates selected components from git.
"""
for project in queryset:
project.do_update(request)
self.message_user(request, "Updated %d git repos." % queryset.count())
update_from_git.short_description = _('Update VCS repository')
def update_checks(self, request, queryset):
"""
Recalculates checks for selected components.
"""
cnt = 0
units = Unit.objects.filter(
translation__subproject__in=queryset
)
for unit in units.iterator():
unit.run_checks()
cnt += 1
self.message_user(
request,
"Updated checks for %d units." % cnt
)
update_checks.short_description = _('Update quality checks')
def force_commit(self, request, queryset):
"""
Commits pending changes for selected components.
"""
for project in queryset:
project.commit_pending(request)
self.message_user(
request,
"Flushed changes in %d git repos." % queryset.count()
)
force_commit.short_description = _('Commit pending changes')
class TranslationAdmin(admin.ModelAdmin):
list_display = [
'subproject', 'language', 'translated', 'total',
'fuzzy', 'revision', 'filename', 'enabled'
]
search_fields = [
'subproject__slug', 'language__code', 'revision', 'filename'
]
list_filter = ['enabled', 'subproject__project', 'subproject', 'language']
actions = ['enable_translation', 'disable_translation']
def enable_translation(self, request, queryset):
"""
Mass enabling of translations.
"""
queryset.update(enabled=True)
self.message_user(
request,
"Enabled %d translations." % queryset.count()
)
def disable_translation(self, request, queryset):
"""
Mass disabling of translations.
"""
queryset.update(enabled=False)
self.message_user(
request,
"Disabled %d translations." % queryset.count()
)
class UnitAdmin(admin.ModelAdmin):
list_display = ['source', 'target', 'position', 'fuzzy', 'translated']
search_fields = ['source', 'target', 'checksum']
list_filter = [
'translation__subproject',
'translation__language',
'fuzzy',
'translated'
]
class SuggestionAdmin(admin.ModelAdmin):
list_display = ['contentsum', 'target', 'project', 'language', 'user']
list_filter = ['project', 'language']
search_fields = ['contentsum', 'target']
class CommentAdmin(admin.ModelAdmin):
list_display = [
'contentsum', 'comment', 'user', 'project', 'language', 'user'
]
list_filter = ['project', 'language']
search_fields = ['contentsum', 'comment']
class CheckAdmin(admin.ModelAdmin):
list_display = ['contentsum', 'check', 'project', 'language', 'ignore']
search_fields = ['contentsum', 'check']
list_filter = ['check', 'project', 'ignore']
class DictionaryAdmin(admin.ModelAdmin):
list_display = ['source', 'target', 'project', 'language']
search_fields = ['source', 'target']
list_filter = ['project', 'language']
class ChangeAdmin(admin.ModelAdmin):
list_display = ['unit', 'user', 'timestamp']
date_hierarchy = 'timestamp'
list_filter = [
'unit__translation__subproject',
'unit__translation__subproject__project',
'unit__translation__language'
]
raw_id_fields = ('unit',)
class WhiteboardAdmin(admin.ModelAdmin):
list_display = ['message', 'project', 'subproject', 'language']
prepopulated_fields = {}
search_fields = ['message']
list_filter = ['project', 'language']
class AdvertisementAdmin(admin.ModelAdmin):
list_display = ['placement', 'date_start', 'date_end', 'text']
search_fields = ['text', 'note']
date_hierarchy = 'date_end'
class SourceAdmin(admin.ModelAdmin):
list_display = ['checksum', 'priority', 'timestamp']
date_hierarchy = 'timestamp'
# Register in admin interface
admin.site.register(Project, ProjectAdmin)
admin.site.register(SubProject, SubProjectAdmin)
admin.site.register(Advertisement, AdvertisementAdmin)
admin.site.register(WhiteboardMessage, WhiteboardAdmin)
# Show some controls only in debug mode
if settings.DEBUG:
admin.site.register(Translation, TranslationAdmin)
admin.site.register(Unit, UnitAdmin)
admin.site.register(Suggestion, SuggestionAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(Check, CheckAdmin)
admin.site.register(Dictionary, DictionaryAdmin)
admin.site.register(Change, ChangeAdmin)
admin.site.register(Source, SourceAdmin)
| gpl-3.0 | 8,613,016,902,550,696,000 | 32.124514 | 79 | 0.633267 | false |
adamcik/mopidy | mopidy/core/actor.py | 1 | 9861 | import collections
import itertools
import logging
import pykka
import mopidy
from mopidy import audio, backend, mixer
from mopidy.audio import PlaybackState
from mopidy.core.history import HistoryController
from mopidy.core.library import LibraryController
from mopidy.core.listener import CoreListener
from mopidy.core.mixer import MixerController
from mopidy.core.playback import PlaybackController
from mopidy.core.playlists import PlaylistsController
from mopidy.core.tracklist import TracklistController
from mopidy.internal import path, storage, validation, versioning
from mopidy.internal.models import CoreState
logger = logging.getLogger(__name__)
class Core(
pykka.ThreadingActor,
audio.AudioListener,
backend.BackendListener,
mixer.MixerListener,
):
library = None
"""An instance of :class:`~mopidy.core.LibraryController`"""
history = None
"""An instance of :class:`~mopidy.core.HistoryController`"""
mixer = None
"""An instance of :class:`~mopidy.core.MixerController`"""
playback = None
"""An instance of :class:`~mopidy.core.PlaybackController`"""
playlists = None
"""An instance of :class:`~mopidy.core.PlaylistsController`"""
tracklist = None
"""An instance of :class:`~mopidy.core.TracklistController`"""
def __init__(self, config=None, mixer=None, backends=None, audio=None):
super().__init__()
self._config = config
self.backends = Backends(backends)
self.library = pykka.traversable(
LibraryController(backends=self.backends, core=self)
)
self.history = pykka.traversable(HistoryController())
self.mixer = pykka.traversable(MixerController(mixer=mixer))
self.playback = pykka.traversable(
PlaybackController(audio=audio, backends=self.backends, core=self)
)
self.playlists = pykka.traversable(
PlaylistsController(backends=self.backends, core=self)
)
self.tracklist = pykka.traversable(TracklistController(core=self))
self.audio = audio
def get_uri_schemes(self):
"""Get list of URI schemes we can handle"""
futures = [b.uri_schemes for b in self.backends]
results = pykka.get_all(futures)
uri_schemes = itertools.chain(*results)
return sorted(uri_schemes)
def get_version(self):
"""Get version of the Mopidy core API"""
return versioning.get_version()
def reached_end_of_stream(self):
self.playback._on_end_of_stream()
def stream_changed(self, uri):
self.playback._on_stream_changed(uri)
def position_changed(self, position):
self.playback._on_position_changed(position)
def state_changed(self, old_state, new_state, target_state):
# XXX: This is a temporary fix for issue #232 while we wait for a more
# permanent solution with the implementation of issue #234. When the
# Spotify play token is lost, the Spotify backend pauses audio
# playback, but mopidy.core doesn't know this, so we need to update
# mopidy.core's state to match the actual state in mopidy.audio. If we
# don't do this, clients will think that we're still playing.
# We ignore cases when target state is set as this is buffering
# updates (at least for now) and we need to get #234 fixed...
if (
new_state == PlaybackState.PAUSED
and not target_state
and self.playback.get_state() != PlaybackState.PAUSED
):
self.playback.set_state(new_state)
self.playback._trigger_track_playback_paused()
def playlists_loaded(self):
# Forward event from backend to frontends
CoreListener.send("playlists_loaded")
def volume_changed(self, volume):
# Forward event from mixer to frontends
CoreListener.send("volume_changed", volume=volume)
def mute_changed(self, mute):
# Forward event from mixer to frontends
CoreListener.send("mute_changed", mute=mute)
def tags_changed(self, tags):
if not self.audio or "title" not in tags:
return
tags = self.audio.get_current_tags().get()
if not tags:
return
# TODO: this is a hack to make sure we don't emit stream title changes
# for plain tracks. We need a better way to decide if something is a
# stream.
if "title" in tags and tags["title"]:
title = tags["title"][0]
current_track = self.playback.get_current_track()
if current_track is not None and current_track.name != title:
self.playback._stream_title = title
CoreListener.send("stream_title_changed", title=title)
def _setup(self):
"""Do not call this function. It is for internal use at startup."""
try:
coverage = []
if self._config and "restore_state" in self._config["core"]:
if self._config["core"]["restore_state"]:
coverage = [
"tracklist",
"mode",
"play-last",
"mixer",
"history",
]
if len(coverage):
self._load_state(coverage)
except Exception as e:
logger.warn("Restore state: Unexpected error: %s", str(e))
def _teardown(self):
"""Do not call this function. It is for internal use at shutdown."""
try:
if self._config and "restore_state" in self._config["core"]:
if self._config["core"]["restore_state"]:
self._save_state()
except Exception as e:
logger.warn("Unexpected error while saving state: %s", str(e))
def _get_data_dir(self):
# get or create data director for core
data_dir_path = (
path.expand_path(self._config["core"]["data_dir"]) / "core"
)
path.get_or_create_dir(data_dir_path)
return data_dir_path
def _get_state_file(self):
return self._get_data_dir() / "state.json.gz"
def _save_state(self):
"""
Save current state to disk.
"""
state_file = self._get_state_file()
logger.info("Saving state to %s", state_file)
data = {}
data["version"] = mopidy.__version__
data["state"] = CoreState(
tracklist=self.tracklist._save_state(),
history=self.history._save_state(),
playback=self.playback._save_state(),
mixer=self.mixer._save_state(),
)
storage.dump(state_file, data)
logger.debug("Saving state done")
def _load_state(self, coverage):
"""
Restore state from disk.
Load state from disk and restore it. Parameter ``coverage``
limits the amount of data to restore. Possible
values for ``coverage`` (list of one or more of):
- 'tracklist' fill the tracklist
- 'mode' set tracklist properties (consume, random, repeat, single)
- 'play-last' restore play state ('tracklist' also required)
- 'mixer' set mixer volume and mute state
- 'history' restore history
:param coverage: amount of data to restore
:type coverage: list of strings
"""
state_file = self._get_state_file()
logger.info("Loading state from %s", state_file)
data = storage.load(state_file)
try:
# Try only once. If something goes wrong, the next start is clean.
state_file.unlink()
except OSError:
logger.info("Failed to delete %s", state_file)
if "state" in data:
core_state = data["state"]
validation.check_instance(core_state, CoreState)
self.history._load_state(core_state.history, coverage)
self.tracklist._load_state(core_state.tracklist, coverage)
self.mixer._load_state(core_state.mixer, coverage)
# playback after tracklist
self.playback._load_state(core_state.playback, coverage)
logger.debug("Loading state done")
class Backends(list):
def __init__(self, backends):
super().__init__(backends)
self.with_library = collections.OrderedDict()
self.with_library_browse = collections.OrderedDict()
self.with_playback = collections.OrderedDict()
self.with_playlists = collections.OrderedDict()
backends_by_scheme = {}
def name(b):
return b.actor_ref.actor_class.__name__
for b in backends:
try:
has_library = b.has_library().get()
has_library_browse = b.has_library_browse().get()
has_playback = b.has_playback().get()
has_playlists = b.has_playlists().get()
except Exception:
self.remove(b)
logger.exception(
"Fetching backend info for %s failed",
b.actor_ref.actor_class.__name__,
)
for scheme in b.uri_schemes.get():
assert scheme not in backends_by_scheme, (
f"Cannot add URI scheme {scheme!r} for {name(b)}, "
f"it is already handled by {name(backends_by_scheme[scheme])}"
)
backends_by_scheme[scheme] = b
if has_library:
self.with_library[scheme] = b
if has_library_browse:
self.with_library_browse[scheme] = b
if has_playback:
self.with_playback[scheme] = b
if has_playlists:
self.with_playlists[scheme] = b
| apache-2.0 | -2,830,826,066,537,320,400 | 34.728261 | 82 | 0.594463 | false |
Gaha/intranet | ce/models.py | 1 | 2664 | #-*- coding:utf-8 -*-
from datetime import datetime
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Agent(models.Model):
LISTE_CONTRAT = (
('CDI', 'CDI'),
('CDD', 'CDD'),
('PRE', 'Prestataire'),
('INT', 'Intérime'),
)
nom = models.CharField(max_length=50)
prenom = models.CharField(max_length=50)
contrat = models.CharField(max_length=3, choices=LISTE_CONTRAT, default='CDI')
# c'est ce qui s'affiche quand on print, notament dans admin
def __unicode__(self):
return u'%s %s' % (self.nom, self.prenom)
class Mendat(models.Model):
LISTE_MENDAT = (
('DUP_PR', 'DUP Président'),
('DUP_SE', 'DUP Secrétaire'),
('DUP_TR', 'DUP Trésorier'),
('CA_TIT', 'DUP Cadre Titulaire'),
('CA_SUP', 'DUP Cadre Suppléant'),
('AG_TIT', 'DUP Agent Titulaire'),
('AG_SUP', 'DUP Agent Suppléant'),
('DS', 'Délégués Syndical'),
('CHS_PR', 'CHSCT Président'),
('CHS_SE', 'CHSCT Secrétaire'),
('CHS_ME', 'CHSCT Membres'),
)
nom = models.ForeignKey('Agent')
mendat = models.CharField(max_length=6, choices=LISTE_MENDAT)
def __unicode__(self):
return u'%s - %s' % (self.nom, self.mendat)
class Commission(models.Model):
nom = models.CharField(max_length=50)
def __unicode__(self):
return u'%s' % (self.nom)
class CommissionMembre(models.Model):
LISTE_MEMBRE = (
('PRE', 'Président'),
('DUP', 'Membre DUP'),
('AGE', 'Membre Agent')
)
commission = models.ForeignKey('Commission')
agent = models.ForeignKey('Agent')
membre = models.CharField(max_length=3, choices=LISTE_MEMBRE)
def __unicode__(self):
return u'%s : %s - %s' % (self.commission, self.agent, self.membre)
class Activitee(models.Model):
nom = models.CharField(max_length=50)
commission = models.ForeignKey('Commission')
date = models.DateField()
heure = models.TimeField()
def __unicode__(self):
return u'%s : %s' % (self.date, self.nom)
class Participation(models.Model):
LISTE_ETAT = (
('IN', 'Inscrit'),
('AN', 'Annuler'),
)
nom = models.ForeignKey('Agent')
activitee = models.ForeignKey('Activitee')
agent = models.IntegerField(default=1)
conjoint = models.IntegerField(default=0)
enfant = models.IntegerField(default=0)
externe = models.IntegerField(default=0)
etat = models.CharField(max_length=2, choices=LISTE_ETAT, default='IN')
def __unicode__(self):
return u'%s : %s' % (self.activitee, self.nom)
| gpl-2.0 | 4,657,869,429,667,921,000 | 27.826087 | 82 | 0.597662 | false |
ccrouch/tuskar | tuskar/api/controllers/v1/types/__init__.py | 1 | 1311 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tuskar.api.controllers.v1.types.base import Base
from tuskar.api.controllers.v1.types.capacity import Capacity
from tuskar.api.controllers.v1.types.chassis import Chassis
from tuskar.api.controllers.v1.types.error import Error
from tuskar.api.controllers.v1.types.flavor import Flavor
from tuskar.api.controllers.v1.types.link import Link
from tuskar.api.controllers.v1.types.node import Node
from tuskar.api.controllers.v1.types.rack import Rack
from tuskar.api.controllers.v1.types.relation import Relation
from tuskar.api.controllers.v1.types.resource_class import ResourceClass
__all__ = (Base, Capacity, Chassis, Error, Flavor, Link, Node, Rack,
Relation, ResourceClass)
| apache-2.0 | 4,752,457,954,367,442,000 | 49.423077 | 78 | 0.77193 | false |
effigies/mne-python | examples/realtime/ftclient_rt_average.py | 2 | 2816 | """
========================================================
Compute real-time evoked responses with FieldTrip client
========================================================
This example demonstrates how to connect the MNE real-time
system to the Fieldtrip buffer using FieldTripClient class.
This example was tested in simulation mode
neuromag2ft --file MNE-sample-data/MEG/sample/sample_audvis_raw.fif
using a modified version of neuromag2ft available at
http://neuro.hut.fi/~mainak/neuromag2ft-2.0.0.zip
to run the FieldTrip buffer. Then running this example acquires the
data on the client side.
Since the Fieldtrip buffer does not contain all the
measurement information required by the MNE real-time processing
pipeline, an info dictionary must be provided to instantiate FieldTripClient.
Alternatively, the MNE-Python script will try to guess the missing
measurement info from the Fieldtrip Header object.
Together with RtEpochs, this can be used to compute evoked
responses using moving averages.
"""
print(__doc__)
# Author: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne.viz import plot_events
from mne.realtime import FieldTripClient, RtEpochs
import matplotlib.pyplot as plt
# select the left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
# user must provide list of bad channels because
# FieldTrip header object does not provide that
bads = ['MEG 2443', 'EEG 053']
plt.ion() # make plot interactive
_, ax = plt.subplots(2, 1, figsize=(8, 8)) # create subplots
with FieldTripClient(host='localhost', port=1972,
tmax=150, wait_max=10) as rt_client:
# get measurement info guessed by MNE-Python
raw_info = rt_client.get_measurement_info()
# select gradiometers
picks = mne.pick_types(raw_info, meg='grad', eeg=False, eog=True,
stim=True, exclude=bads)
# create the real-time epochs object
rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax,
stim_channel='STI 014', picks=picks,
reject=dict(grad=4000e-13, eog=150e-6),
decim=1, isi_max=10.0, proj=None)
# start the acquisition
rt_epochs.start()
for ii, ev in enumerate(rt_epochs.iter_evoked()):
print("Just got epoch %d" % (ii + 1))
if ii > 0:
ev += evoked
evoked = ev
ax[0].cla(), ax[1].cla() # clear axis
plot_events(rt_epochs.events[-5:], sfreq=ev.info['sfreq'],
first_samp=-rt_client.tmin_samp, axes=ax[0])
evoked.plot(axes=ax[1]) # plot on second subplot
ax[1].set_title('Evoked response for gradiometer channels'
'(event_id = %d)' % event_id)
plt.pause(0.05)
plt.draw()
plt.close()
| bsd-3-clause | 8,140,795,396,231,081,000 | 30.288889 | 77 | 0.638849 | false |
shiquanwang/numba | numba/tests/test_struct.py | 1 | 3074 | import os
from numba import *
from numba import error
import numpy as np
#------------------------------------------------------------------------
# Structs as locals
#------------------------------------------------------------------------
struct_type = struct(a=char.pointer(), b=int_)
@autojit(backend='ast', locals=dict(value=struct_type))
def struct_local():
value.a = "foo"
value.b = 10
return value.a, value.b
@autojit(backend='ast', locals=dict(value=struct_type))
def struct_local_inplace():
value.a = "foo"
value.b = 10
value.b += 10.0
return value.a, value.b
# TODO: structs from objects
#@autojit
#def struct_as_arg(arg):
# arg.a = "foo"
# return arg.a
#
#@autojit(backend='ast', locals=dict(value=struct_type))
#def call_struct_as_arg():
# return struct_as_arg(value)
@autojit(backend='ast', locals=dict(value=struct_type))
def struct_local_copy():
value.a = "foo"
value.b = 10
value2 = value
return value2.a, value2.b
def test_struct_locals():
result = struct_local()
assert result == ("foo", 10), result
result = struct_local_inplace()
assert result == ("foo", 20), result
# result = call_struct_as_arg()
# assert result == "foo", result
result = struct_local_copy()
assert result == ("foo", 10), result
#------------------------------------------------------------------------
# Struct indexing
#------------------------------------------------------------------------
@autojit(backend='ast', locals=dict(value=struct_type))
def struct_indexing_strings():
value['a'] = "foo"
value['b'] = 10
return value['a'], value['b']
@autojit(backend='ast', locals=dict(value=struct_type))
def struct_indexing_ints():
value[0] = "foo"
value[1] = 10
return value[0], value[1]
def test_struct_indexing():
assert struct_indexing_strings() == ("foo", 10)
assert struct_indexing_ints() == ("foo", 10)
#------------------------------------------------------------------------
# Record arrays
#------------------------------------------------------------------------
@autojit(backend='ast')
def record_array(array):
array[0].a = 4
array[0].b = 5.0
def test_record_array():
struct_type = struct([('a', int32), ('b', double)])
struct_dtype = struct_type.get_dtype()
array = np.empty((1,), dtype=struct_dtype)
record_array(array)
assert array[0]['a'] == 4, array[0]
assert array[0]['b'] == 5.0, array[0]
#------------------------------------------------------------------------
# Object Coercion
#------------------------------------------------------------------------
struct_type = struct([('a', int_), ('b', double)])
@autojit(backend='ast', locals=dict(value=struct_type))
def coerce_to_obj():
value.a = 10
value.b = 20.2
return object_(value)
def test_coerce_to_obj():
print((coerce_to_obj()))
if __name__ == "__main__":
print((struct_local_copy()))
# print call_struct_as_arg()
test_struct_locals()
test_record_array()
test_coerce_to_obj()
test_struct_indexing()
| bsd-2-clause | -5,321,238,675,857,827,000 | 25.5 | 73 | 0.503904 | false |
bigmlcom/bigmler | bigmler/defaults.py | 1 | 19989 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defaults parser for BigMLer
"""
import configparser
DEFAULTS_FILE = 'bigmler.ini'
FLAGS = {
'BigMLer': [
{'flag': 'debug', 'type': 'boolean'},
{'flag': 'dev', 'type': 'boolean'},
{'flag': 'username', 'type': 'string'},
{'flag': 'api_key', 'type': 'string'},
{'flag': 'train', 'type': 'string'},
{'flag': 'test', 'type': 'string'},
{'flag': 'output', 'type': 'string'},
{'flag': 'objective', 'type': 'string'},
{'flag': 'category', 'type': 'int'},
{'flag': 'description', 'type': 'string'},
{'flag': 'field_names', 'type': 'string'},
{'flag': 'field_attributes', 'type': 'string'},
{'flag': 'types', 'type': 'string'},
{'flag': 'dataset_fields', 'type': 'string'},
{'flag': 'json_filter', 'type': 'string'},
{'flag': 'lisp_filter', 'type': 'string'},
{'flag': 'model_fields', 'type': 'string'},
{'flag': 'train_header', 'type': 'boolean'},
{'flag': 'test_header', 'type': 'boolean'},
{'flag': 'name', 'type': 'string'},
{'flag': 'source', 'type': 'string'},
{'flag': 'dataset', 'type': 'string'},
{'flag': 'model', 'type': 'string'},
{'flag': 'model_file', 'type': 'string'},
{'flag': 'remote', 'type': 'boolean'},
{'flag': 'models', 'type': 'string'},
{'flag': 'datasets', 'type': 'string'},
{'flag': 'pruning', 'type': 'string'},
{'flag': 'datasets', 'type': 'string'},
{'flag': 'number_of_models', 'type': 'int'},
{'flag': 'sample_rate', 'type': 'float'},
{'flag': 'replacement', 'type': 'boolean'},
{'flag': 'max_parallel_models', 'type': 'int'},
{'flag': 'max_batch_models', 'type': 'int'},
{'flag': 'randomize', 'type': 'boolean'},
{'flag': 'no_tag', 'type': 'boolean'},
{'flag': 'tag', 'type': 'string'},
{'flag': 'model_tag', 'type': 'string'},
{'flag': 'public_dataset', 'type': 'boolean'},
{'flag': 'black_box', 'type': 'boolean'},
{'flag': 'white_box', 'type': 'boolean'},
{'flag': 'model_price', 'type': 'float'},
{'flag': 'dataset_price', 'type': 'float'},
{'flag': 'cpp', 'type': 'float'},
{'flag': 'progress_bar', 'type': 'boolean'},
{'flag': 'resources_log', 'type': 'string'},
{'flag': 'from_file', 'type': 'string'},
{'flag': 'source_tag', 'type': 'string'},
{'flag': 'dataset_tag', 'type': 'string'},
{'flag': 'prediction_tag', 'type': 'string'},
{'flag': 'evaluation_tag', 'type': 'string'},
{'flag': 'ensemble_tag', 'type': 'string'},
{'flag': 'all_tag', 'type': 'string'},
{'flag': 'locale', 'type': 'string'},
{'flag': 'combine_votes', 'type': 'string'},
{'flag': 'plurality', 'type': 'string'},
{'flag': 'verbosity', 'type': 'int'},
{'flag': 'fields_map', 'type': 'string'},
{'flag': 'clear_logs', 'type': 'boolean'},
{'flag': 'cross_validation_rate', 'type': 'float'},
{'flag': 'number_of_evaluations', 'type': 'int'},
{'flag': 'store', 'type': 'boolean'},
{'flag': 'test_split', 'type': 'float'},
{'flag': 'ensemble', 'type': 'string'},
{'flag': 'ensemble_file', 'type': 'string'},
{'flag': 'prediction_info', 'type': 'string'},
{'flag': 'max_parallel_evaluations', 'type': 'int'},
{'flag': 'test_separator', 'type': 'string'},
{'flag': 'multi_label', 'type': 'boolean'},
{'flag': 'labels', 'type': 'string'},
{'flag': 'label_separator', 'type': 'string'},
{'flag': 'training_separator', 'type': 'string'},
{'flag': 'prediction_header', 'type': 'boolean'},
{'flag': 'prediction_fields', 'type': 'string'},
{'flag': 'seed', 'type': 'string'},
{'flag': 'max_parallel_ensembles', 'type': 'int'},
{'flag': 'ensembles', 'type': 'string'},
{'flag': 'threshold', 'type': 'int'},
{'flag': 'threshold_class', 'type': 'string'},
{'flag': 'max_categories', 'type': 'int'},
{'flag': 'test_field_attributes', 'type': 'string'},
{'flag': 'test_types', 'type': 'string'},
{'flag': 'test_source', 'type': 'string'},
{'flag': 'test_dataset', 'type': 'string'},
{'flag': 'no_batch', 'type': 'boolean'},
{'flag': 'dataset_attributes', 'type': 'string'},
{'flag': 'output', 'type': 'string'},
{'flag': 'new_fields', 'type': 'string'},
{'flag': 'model_attributes', 'type': 'string'},
{'flag': 'node_threshold', 'type': 'int'},
{'flag': 'multi_label_fields', 'type': 'string'},
{'flag': 'ensemble_attributes', 'type': 'string'},
{'flag': 'source_attributes', 'type': 'string'},
{'flag': 'evaluation_attributes', 'type': 'string'},
{'flag': 'batch_prediction_attributes', 'type': 'string'},
{'flag': 'batch_prediction_tag', 'type': 'string'},
{'flag': 'balance', 'type': 'boolean'},
{'flag': 'weight_field', 'type': 'string'},
{'flag': 'objective_weights', 'type': 'string'},
{'flag': 'label_aggregates', 'type': 'string'},
{'flag': 'missing_strategy', 'type': 'string'},
{'flag': 'other_than', 'type': 'string'},
{'flag': 'newer_than', 'type': 'string'},
{'flag': 'multi_dataset', 'type': 'boolean'},
{'flag': 'multi_dataset_attributes', 'type': 'string'},
{'flag': 'shared', 'type': 'boolean'},
{'flag': 'reports', 'type': 'string'},
{'flag': 'upload', 'type': 'boolean'},
{'flag': 'test_dataset', 'type': 'string'},
{'flag': 'dataset_off', 'type': 'boolean'},
{'flag': 'args_separator', 'type': 'string'},
{'flag': 'cluster_tag', 'type': 'string'},
{'flag': 'centroid_tag', 'type': 'string'},
{'flag': 'batch_centroid_tag', 'type': 'string'},
{'flag': 'to_csv', 'type': 'string'},
{'flag': 'resource_types', 'type': 'string'},
{'flag': 'dry_run', 'type': 'boolean'},
{'flag': 'anomaly_tag', 'type': 'string'},
{'flag': 'anomaly_score_tag', 'type': 'string'},
{'flag': 'project_tag', 'type': 'string'},
{'flag': 'fast', 'type': 'boolean'},
{'flag': 'project', 'type': 'string'},
{'flag': 'project_id', 'type': 'string'},
{'flag': 'no_csv', 'type': 'boolean'},
{'flag': 'to_dataset', 'type': 'boolean'},
{'flag': 'median', 'type': 'boolean'},
{'flag': 'random_candidates', 'type': 'int'},
{'flag': 'status', 'type': 'string'},
{'flag': 'export_fields', 'type': 'string'},
{'flag': 'import_fields', 'type': 'string'},
{'flag': 'only_execution', 'type': 'boolean'},
{'flag': 'ensemble_sample_seed', 'type': 'string'},
{'flag': 'ensemble_sample_rate', 'type': 'float'},
{'flag': 'ensemble_sample_replacement', 'type': 'boolean'},
{'flag': 'boosting', 'type': 'boolean'},
{'flag': 'iterations', 'type': 'int'},
{'flag': 'early_holdout', 'type': 'float'},
{'flag': 'early_out_of_bag', 'type': 'boolean'},
{'flag': 'learning_rate', 'type': 'float'},
{'flag': 'operating_point', 'type': 'string'},
{'flag': 'step_out_of_bag', 'type': 'boolean'},
{'flag': 'org_project', 'type': 'string'},
{'flag': 'split_field', 'type': 'string'},
{'flag': 'float_field', 'type': 'string'}],
'BigMLer analyze': [
{'flag': 'k-fold', 'type': 'int'},
{'flag': 'cv', 'type': 'boolean'},
{'flag': 'features', 'type': 'boolean'},
{'flag': 'maximize', 'type': 'string'},
{'flag': 'optimize', 'type': 'string'},
{'flag': 'nodes', 'type': 'boolean'},
{'flag': 'max_nodes', 'type': 'int'},
{'flag': 'min_nodes', 'type': 'int'},
{'flag': 'nodes_step', 'type': 'int'},
{'flag': 'random_fields', 'type': 'boolean'},
{'flag': 'exclude_features', 'type': 'string'},
{'flag': 'optimize_category', 'type': 'string'},
{'flag': 'predictions_csv', 'type': 'boolean'}],
'BigMLer cluster': [
{'flag': 'cluster_fields', 'type': 'string'},
{'flag': 'cluster', 'type': 'string'},
{'flag': 'cluster_file', 'type': 'string'},
{'flag': 'clusters', 'type': 'string'},
{'flag': 'k', 'type': 'int'},
{'flag': 'no_cluster', 'type': 'boolean'},
{'flag': 'cluster_attributes', 'type': 'string'},
{'flag': 'centroid_attributes', 'type': 'string'},
{'flag': 'batch_centroid_attributes', 'type': 'string'},
{'flag': 'cluster_datasets', 'type': 'string'},
{'flag': 'cluster_models', 'type': 'string'},
{'flag': 'summary_fields', 'type': 'string'}],
'BigMLer anomaly': [
{'flag': 'anomaly_fields', 'type': 'string'},
{'flag': 'anomaly', 'type': 'string'},
{'flag': 'anomaly_file', 'type': 'string'},
{'flag': 'anomalies', 'type': 'string'},
{'flag': 'no_anomaly', 'type': 'boolean'},
{'flag': 'anomaly_attributes', 'type': 'string'},
{'flag': 'anomaly_score_attributes', 'type': 'string'},
{'flag': 'batch_anomaly_score_attributes', 'type': 'string'},
{'flag': 'score', 'type': 'boolean'},
{'flag': 'anomalies-dataset', 'type': 'string'},
{'flag': 'top_n', 'type': 'int'},
{'flag': 'forest_size', 'type': 'int'}],
'BigMLer sample': [
{'flag': 'anomaly_fields', 'type': 'string'},
{'flag': 'sample', 'type': 'string'},
{'flag': 'sample_file', 'type': 'string'},
{'flag': 'samples', 'type': 'string'},
{'flag': 'no_sample', 'type': 'boolean'},
{'flag': 'sample_attributes', 'type': 'string'},
{'flag': 'sample_header', 'type': 'boolean'},
{'flag': 'fields_filter', 'type': 'string'},
{'flag': 'row_index', 'type': 'int'},
{'flag': 'mode', 'type': 'string'},
{'flag': 'occurrence', 'type': 'int'},
{'flag': 'precision', 'type': 'int'},
{'flag': 'rows', 'type': 'int'},
{'flag': 'row_offset', 'type': 'int'},
{'flag': 'row_order_by', 'type': 'string'},
{'flag': 'row_fields', 'type': 'string'},
{'flag': 'stat_fields', 'type': 'string'},
{'flag': 'stat_field', 'type': 'string'},
{'flag': 'unique', 'type': 'boolean'}],
'BigMLer report': [
{'flag': 'from_dir', 'type': 'string'},
{'flag': 'port', 'type': 'int'},
{'flag': 'no_server', 'type': 'boolean'}],
'BigMLer reify': [
{'flag': 'language', 'type': 'string'},
{'flag': 'add_fields', 'type': 'boolean'}],
'BigMLer project': [
{'flag': 'project_attributes', 'type': 'string'}],
'BigMLer association': [
{'flag': 'association_fields', 'type': 'string'},
{'flag': 'association', 'type': 'string'},
{'flag': 'association_file', 'type': 'string'},
{'flag': 'associations', 'type': 'string'},
{'flag': 'association_k', 'type': 'int'},
{'flag': 'no_association', 'type': 'boolean'},
{'flag': 'association_attributes', 'type': 'string'}],
'BigMLer logistic regression': [
{'flag': 'logistic_fields', 'type': 'string'},
{'flag': 'logistic_regression', 'type': 'string'},
{'flag': 'logistic_regression_file', 'type': 'string'},
{'flag': 'logistic_regressions', 'type': 'string'},
{'flag': 'no_logistic_regression', 'type': 'boolean'},
{'flag': 'logistic_regression_attributes', 'type': 'string'},
{'flag': 'bias', 'type': 'boolean'},
{'flag': 'balance_fields', 'type': 'boolean'},
{'flag': 'lr_c', 'type': 'float'},
{'flag': 'eps', 'type': 'float'},
{'flag': 'field_codings', 'type': 'string'},
{'flag': 'missing_numerics', 'type': 'boolean'},
{'flag': 'normalize', 'type': 'boolean'}],
'BigMLer linear regression': [
{'flag': 'linear_fields', 'type': 'string'},
{'flag': 'linear_regression', 'type': 'string'},
{'flag': 'linear_regression_file', 'type': 'string'},
{'flag': 'linear_regressions', 'type': 'string'},
{'flag': 'no_linear_regression', 'type': 'boolean'},
{'flag': 'linear_regression_attributes', 'type': 'string'},
{'flag': 'bias', 'type': 'boolean'},
{'flag': 'field_codings', 'type': 'string'}],
'BigMLer topic model': [
{'flag': 'topic_fields', 'type': 'string'},
{'flag': 'topic_model', 'type': 'string'},
{'flag': 'topic_model_file', 'type': 'string'},
{'flag': 'topic_models', 'type': 'string'},
{'flag': 'no_topic_model', 'type': 'boolean'},
{'flag': 'topic_model_attributes', 'type': 'string'},
{'flag': 'bigrams', 'type': 'boolean'},
{'flag': 'case_sensitive', 'type': 'boolean'},
{'flag': 'excluded_terms', 'type': 'string'},
{'flag': 'number_of_topics', 'type': 'int'},
{'flag': 'term_limit', 'type': 'int'},
{'flag': 'top_n_terms', 'type': 'int'},
{'flag': 'topic_model_attributes', 'type': 'string'},
{'flag': 'use_stopwords', 'type': 'boolean'}],
'BigMLer time-series': [
{'flag': 'objectives', 'type': 'string'},
{'flag': 'time_series', 'type': 'string'},
{'flag': 'time_series_file', 'type': 'string'},
{'flag': 'time_series_set', 'type': 'string'},
{'flag': 'no_time_series', 'type': 'boolean'},
{'flag': 'time_series_attributes', 'type': 'string'},
{'flag': 'all_numeric_objectives', 'type': 'boolean'},
{'flag': 'damped_trend', 'type': 'boolean'},
{'flag': 'default_numeric_value', 'type': 'string'},
{'flag': 'error', 'type': 'int'},
{'flag': 'period', 'type': 'int'},
{'flag': 'field_parameters', 'type': 'string'},
{'flag': 'time_series_attributes', 'type': 'string'},
{'flag': 'range', 'type': 'string'},
{'flag': 'seasonality', 'type': 'int'},
{'flag': 'trend', 'type': 'int'},
{'flag': 'time_start', 'type': 'int'},
{'flag': 'time_end', 'type': 'int'},
{'flag': 'time_interval', 'type': 'int'},
{'flag': 'time_interval_unit', 'type': 'string'},
{'flag': 'error', 'type': 'int'},
{'flag': 'interval', 'type': 'boolean'}],
'BigMLer deepnet': [
{'flag': 'deepnet_fields', 'type': 'string'},
{'flag': 'deepnet', 'type': 'string'},
{'flag': 'deepnet_file', 'type': 'string'},
{'flag': 'deepnets', 'type': 'string'},
{'flag': 'no_deepnet', 'type': 'boolean'},
{'flag': 'deepnet_attributes', 'type': 'string'},
{'flag': 'batch_normalization', 'type': 'boolean'},
{'flag': 'default_numeric_value', 'type': 'string'},
{'flag': 'dropout_rate', 'type': 'float'},
{'flag': 'hidden_layers', 'type': 'string'},
{'flag': 'learn_residuals', 'type': 'boolean'},
{'flag': 'learning_rate', 'type': 'float'},
{'flag': 'max_iterations', 'type': 'int'},
{'flag': 'max_training_time', 'type': 'int'},
{'flag': 'number_of_hidden_layers', 'type': 'int'},
{'flag': 'number_of_model_candidates', 'type': 'int'},
{'flag': 'search', 'type': 'boolean'},
{'flag': 'suggest_structure', 'type': 'boolean'},
{'flag': 'missing_numerics', 'type': 'boolean'},
{'flag': 'no_missing_numerics', 'type': 'boolean'},
{'flag': 'tree_embedding', 'type': 'boolean'},
{'flag': 'no_balance_fields', 'type': 'boolean'},
{'flag': 'deepnet_attributes', 'type': 'string'}],
'BigMLer execute': [
{'flag': 'script', 'type': 'string'},
{'flag': 'library', 'type': 'string'},
{'flag': 'execution', 'type': 'string'},
{'flag': 'code_file', 'type': 'string'},
{'flag': 'embedded_imports', 'type': 'string'},
{'flag': 'output', 'type': 'string'},
{'flag': 'imports', 'type': 'string'},
{'flag': 'inputs', 'type': 'string'},
{'flag': 'declare_inputs', 'type': 'string'},
{'flag': 'declare_outputs', 'type': 'string'},
{'flag': 'input_maps', 'type': 'string'},
{'flag': 'outputs', 'type': 'string'},
{'flag': 'creation_defaults', 'type': 'string'},
{'flag': 'no_execute', 'type': 'boolean'}],
'BigMLer whizzml': [
{'flag': 'package_dir', 'type': 'string'},
{'flag': 'embed_libs', 'type': 'boolean'}],
'BigMLer export': [
{'flag': 'language', 'type': 'string'}],
'BigMLer retrain': [
{'flag': 'model_type', 'type': 'string'}],
'BigMLer PCA': [
{'flag': 'pca_fields', 'type': 'string'},
{'flag': 'pca', 'type': 'string'},
{'flag': 'pca_file', 'type': 'string'},
{'flag': 'pcas', 'type': 'string'},
{'flag': 'no_pca', 'type': 'boolean'},
{'flag': 'max_components', 'type': 'int'},
{'flag': 'variance_threshold', 'type': 'float'},
{'flag': 'pca_attributes', 'type': 'string'}],
'BigMLer Fusion': [
{'flag': 'fusion_models', 'type': 'string'},
{'flag': 'fusion_models_file', 'type': 'string'},
{'flag': 'fusion_file', 'type': 'string'},
{'flag': 'fusions', 'type': 'string'},
{'flag': 'fusion', 'type': 'string'},
{'flag': 'fusion_attributes', 'type': 'string'}],
'BigMLer dataset': [
{'flag': 'file', 'type': 'string'},
{'flag': 'merge', 'type': 'boolean'},
{'flag': 'juxtapose', 'type': 'boolean'},
{'flag': 'sql_query', 'type': 'string'},
{'flag': 'json_query', 'type': 'string'},
{'flag': 'sql_output_fields', 'type': 'string'}],
'BigMLer connector': [
{'flag': 'connector_attributes', 'type': 'string'},
{'flag': 'engine', 'type': 'string'},
{'flag': 'host', 'type': 'string'},
{'flag': 'hosts', 'type': 'string'},
{'flag': 'port', 'type': 'int'},
{'flag': 'user', 'type': 'string'},
{'flag': 'password', 'type': 'string'},
{'flag': 'database', 'type': 'string'}],}
def get_user_defaults(defaults_file=DEFAULTS_FILE):
"""Looks for a defaults file and returns its contents
"""
if defaults_file is None:
defaults_file = DEFAULTS_FILE
try:
open(defaults_file).close()
config = configparser.ConfigParser()
config.read(defaults_file)
defaults = parse_user_defaults(config)
except IOError:
defaults = {}
for section in FLAGS:
defaults[section] = {}
return defaults
def parse_user_defaults(config):
"""Reads default values from file
"""
config_get = {'boolean': config.getboolean,
'float': config.getfloat,
'int': config.getint,
'string': config.get}
defaults = {}
for section in FLAGS:
defaults[section] = {}
for argument in FLAGS[section]:
try:
value = config_get[argument['type']](section,
argument['flag'])
defaults[section].update({argument['flag']: value})
except configparser.Error:
pass
return defaults
| apache-2.0 | 968,097,636,733,711,100 | 46.032941 | 75 | 0.495473 | false |
fastinetserver/portage-idfetch | pym/portage/tests/dep/test_match_from_list.py | 1 | 1287 | # test_match_from_list.py -- Portage Unit Testing Functionality
# Copyright 2006 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
from portage.dep import match_from_list
class AtomCmpEqualGlob(TestCase):
""" A simple testcase for =* glob matching
"""
def testEqualGlobPass(self):
tests = [ ("=sys-apps/portage-45*", "sys-apps/portage-045" ),
("=sys-fs/udev-1*", "sys-fs/udev-123"),
("=sys-fs/udev-4*", "sys-fs/udev-456" ) ]
# I need to look up the cvs syntax
# ("=sys-fs/udev_cvs*","sys-fs/udev_cvs_pre4" ) ]
for test in tests:
self.assertEqual( len(match_from_list( test[0], [test[1]] )), 1 )
def testEqualGlobFail(self):
tests = [ ("=sys-apps/portage-2*", "sys-apps/portage-2.1" ),
("=sys-apps/portage-2.1*", "sys-apps/portage-2.1.2" ) ]
for test in tests:
try:
self.assertEqual( len( match_from_list( test[0], [test[1]] ) ), 1 )
except TypeError: # failure is ok here
pass
| gpl-2.0 | -8,384,553,162,572,872,000 | 41.9 | 99 | 0.509713 | false |
justmedude/librenms | services-wrapper.py | 1 | 13762 | #! /usr/bin/env python3
"""
services-wrapper A small tool which wraps around check-services.php and tries to
guide the services process with a more modern approach with a
Queue and workers.
Based on the original version of poller-wrapper.py by Job Snijders
Author: Neil Lathwood <[email protected]>
Orsiris de Jong <[email protected]>
Date: Oct 2019
Usage: This program accepts one command line argument: the number of threads
that should run simultaneously. If no argument is given it will assume
a default of 1 thread.
Ubuntu Linux: apt-get install python-mysqldb
FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean
RHEL 7: yum install MySQL-python
RHEL 8: dnf install mariadb-connector-c-devel gcc && python -m pip install mysqlclient
Tested on: Python 3.6.8 / PHP 7.2.11 / CentOS 8
License: This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/.
LICENSE.txt contains a copy of the full GPLv3 licensing conditions.
"""
import LibreNMS.library as LNMS
try:
import json
import os
import queue
import subprocess
import sys
import threading
import time
from optparse import OptionParser
except ImportError as exc:
print('ERROR: missing one or more of the following python modules:')
print('threading, queue, sys, subprocess, time, os, json')
print('ERROR: %s' % exc)
sys.exit(2)
APP_NAME = "services_wrapper"
LOG_FILE = "logs/" + APP_NAME + ".log"
_DEBUG = False
servicedisco = False
real_duration = 0
service_devices = 0
"""
Threading helper functions
"""
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC0
def memc_alive():
try:
global memc
key = str(uuid.uuid4())
memc.set('poller.ping.' + key, key, 60)
if memc.get('poller.ping.' + key) == key:
memc.delete('poller.ping.' + key)
return True
else:
return False
except:
return False
def memc_touch(key, time):
try:
global memc
val = memc.get(key)
memc.set(key, val, time)
except:
pass
def get_time_tag(step):
ts = int(time.time())
return ts - ts % step
#EOC0
"""
A seperate queue and a single worker for printing information to the screen prevents
the good old joke:
Some people, when confronted with a problem, think,
"I know, I'll use threads," and then they two they hav erpoblesms.
"""
def printworker():
nodeso = 0
while True:
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC4
global IsNode
global servicedisco
if servicedisco:
if not IsNode:
memc_touch('service.master', 10)
nodes = memc.get('service.nodes')
if nodes is None and not memc_alive():
print("WARNING: Lost Memcached. Taking over all devices. Nodes will quit shortly.")
servicedisco = False
nodes = nodeso
if nodes is not nodeso:
print("INFO: %s Node(s) Total" % (nodes))
nodeso = nodes
else:
memc_touch('service.nodes', 10)
try:
worker_id, device_id, elapsed_time = print_queue.get(False)
except:
pass
try:
time.sleep(1)
except:
pass
continue
else:
worker_id, device_id, elapsed_time = print_queue.get()
# EOC4
global real_duration
global per_device_duration
global service_devices
real_duration += elapsed_time
per_device_duration[device_id] = elapsed_time
service_devices += 1
if elapsed_time < 300:
print("INFO: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time))
else:
print("WARNING: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time))
print_queue.task_done()
"""
This class will fork off single instances of the check-services.php process, record
how long it takes, and push the resulting reports to the printer queue
"""
def poll_worker():
while True:
device_id = poll_queue.get()
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC5
if not servicedisco or memc.get('service.device.' + str(device_id)) is None:
if servicedisco:
result = memc.add('service.device.' + str(device_id), config['distributed_poller_name'], 300)
if not result:
print("This device (%s) appears to be being service checked by another service node" % (device_id))
poll_queue.task_done()
continue
if not memc_alive() and IsNode:
print("Lost Memcached, Not service checking Device %s as Node. Master will check it." % device_id)
poll_queue.task_done()
continue
# EOC5
try:
start_time = time.time()
output = "-d >> %s/services_device_%s.log" % (log_dir, device_id) if debug else ">> /dev/null"
# TODO replace with command_runner
command = "/usr/bin/env php %s -h %s %s 2>&1" % (service_path, device_id, output)
subprocess.check_call(command, shell=True)
elapsed_time = int(time.time() - start_time)
print_queue.put([threading.current_thread().name, device_id, elapsed_time])
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
poll_queue.task_done()
if __name__ == '__main__':
logger = LNMS.logger_get_logger(LOG_FILE, debug=_DEBUG)
install_dir = os.path.dirname(os.path.realpath(__file__))
LNMS.check_for_file(install_dir + '/config.php')
config = json.loads(LNMS.get_config_data(install_dir))
service_path = config['install_dir'] + '/check-services.php'
log_dir = config['log_dir']
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC1
if 'distributed_poller_group' in config:
service_group = str(config['distributed_poller_group'])
else:
service_group = False
if ('distributed_poller' in config and
'distributed_poller_memcached_host' in config and
'distributed_poller_memcached_port' in config and
config['distributed_poller']):
try:
import memcache
import uuid
memc = memcache.Client([config['distributed_poller_memcached_host'] + ':' +
str(config['distributed_poller_memcached_port'])])
if str(memc.get("service.master")) == config['distributed_poller_name']:
print("This system is already joined as the service master.")
sys.exit(2)
if memc_alive():
if memc.get("service.master") is None:
print("Registered as Master")
memc.set("service.master", config['distributed_poller_name'], 10)
memc.set("service.nodes", 0, 300)
IsNode = False
else:
print("Registered as Node joining Master %s" % memc.get("service.master"))
IsNode = True
memc.incr("service.nodes")
servicedisco = True
else:
print("Could not connect to memcached, disabling distributed service checks.")
servicedisco = False
IsNode = False
except SystemExit:
raise
except ImportError:
print("ERROR: missing memcache python module:")
print("On deb systems: apt-get install python3-memcache")
print("On other systems: pip3 install python-memcached")
print("Disabling distributed discovery.")
servicedisco = False
else:
servicedisco = False
# EOC1
s_time = time.time()
real_duration = 0
per_device_duration = {}
service_devices = 0
"""
Take the amount of threads we want to run in parallel from the commandline
if None are given or the argument was garbage, fall back to default of 16
"""
usage = "usage: %prog [options] <workers> (Default: 1 (Do not set too high)"
description = "Spawn multiple check-services.php processes in parallel."
parser = OptionParser(usage=usage, description=description)
parser.add_option('-d', '--debug', action='store_true', default=False,
help="Enable debug output. WARNING: Leaving this enabled will consume a lot of disk space.")
(options, args) = parser.parse_args()
debug = options.debug
try:
amount_of_workers = int(args[0])
except (IndexError, ValueError):
amount_of_workers = 1
devices_list = []
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC2
if service_group is not False:
query = "SELECT DISTINCT(`services`.`device_id`) FROM `services` LEFT JOIN `devices` ON `services`.`device_id` = `devices`.`device_id` WHERE `devices`.`poller_group` IN(" + service_group + ") AND `devices`.`disabled` = 0"
else:
query = "SELECT DISTINCT(`services`.`device_id`) FROM `services` LEFT JOIN `devices` ON `services`.`device_id` = `devices`.`device_id` WHERE `devices`.`disabled` = 0"
# EOC2
db = LNMS.db_open(config['db_socket'], config['db_host'], config['db_port'], config['db_user'], config['db_pass'], config['db_name'])
cursor = db.cursor()
cursor.execute(query)
devices = cursor.fetchall()
for row in devices:
devices_list.append(int(row[0]))
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC3
if servicedisco and not IsNode:
query = "SELECT MAX(`device_id`), MIN(`device_id`) FROM `services`"
cursor.execute(query)
devices = cursor.fetchall()
maxlocks = devices[0][0] or 0
minlocks = devices[0][1] or 0
# EOC3
db.close()
poll_queue = queue.Queue()
print_queue = queue.Queue()
print("INFO: starting the service check at %s with %s threads" % (time.strftime("%Y-%m-%d %H:%M:%S"),
amount_of_workers))
for device_id in devices_list:
poll_queue.put(device_id)
for i in range(amount_of_workers):
t = threading.Thread(target=poll_worker)
t.setDaemon(True)
t.start()
p = threading.Thread(target=printworker)
p.setDaemon(True)
p.start()
try:
poll_queue.join()
print_queue.join()
except (KeyboardInterrupt, SystemExit):
raise
total_time = int(time.time() - s_time)
print("INFO: services-wrapper checked %s devices in %s seconds with %s workers" % (service_devices, total_time, amount_of_workers))
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC6
if servicedisco or memc_alive():
master = memc.get("service.master")
if master == config['distributed_poller_name'] and not IsNode:
print("Wait for all service-nodes to finish")
nodes = memc.get("service.nodes")
while nodes is not None and nodes > 0:
try:
time.sleep(1)
nodes = memc.get("service.nodes")
except:
pass
print("Clearing Locks")
x = minlocks
while x <= maxlocks:
memc.delete('service.device.' + str(x))
x = x + 1
print("%s Locks Cleared" % x)
print("Clearing Nodes")
memc.delete("service.master")
memc.delete("service.nodes")
else:
memc.decr("service.nodes")
print("Finished %s." % time.time())
# EOC6
show_stopper = False
if total_time > 300:
print("WARNING: the process took more than 5 minutes to finish, you need faster hardware or more threads")
print("INFO: in sequential style service checks the elapsed time would have been: %s seconds" % real_duration)
for device in per_device_duration:
if per_device_duration[device] > 300:
print("WARNING: device %s is taking too long: %s seconds" % (device, per_device_duration[device]))
show_stopper = True
if show_stopper:
print("ERROR: Some devices are taking more than 300 seconds, the script cannot recommend you what to do.")
else:
recommend = int(total_time / 300.0 * amount_of_workers + 1)
print(
"WARNING: Consider setting a minimum of %d threads. (This does not constitute professional advice!)" % recommend)
sys.exit(2)
| gpl-3.0 | -8,893,147,896,201,469,000 | 36.807692 | 229 | 0.58102 | false |
mohou/Mohou_Box-master | boxPrint/print_service/__init__.py | 1 | 10414 | # coding=utf-8
#from threading import Thread
import Queue
import sys
import time
import logging
import re
import os
import psutil
class PrintService(object):
def __init__(self, profile, serialInfo):
# super(PrintService, self).__init__(name="PrintService")
self.profile = profile
self.serialInfo = serialInfo
self.printer = None
self.logger = logging.getLogger(__name__)
# self.stopFlag = False
# self.command_queue = Queue.PriorityQueue()
# def run(self):
# while True:
# if self.stopFlag:
# break
# (command, payload) = self.command_queue.get(True)
# print "command: %s" % str(command)
# print "payload: %s" % str(payload)
# method = getattr(self, command, None)
# if not method:
# print "Unkown command: %s!" % command
# continue
# try:
# method(payload)
# except Exception as e:
# print "Exception: %s." % e.message
# else:
# pass
#
# # Stop print service.
def stop(self):
# self.stopFlag = True
self.disconnectPrinter()
#
# # Send command to queue.
# def connect(self, payload=None):
# self.command_queue.put(("connectPrinter", payload), 0)
#
# def disconnect(self, payload=None):
# self.command_queue.put(("disconnectPrinter", payload), 0)
#
# def start(self, payload=None):
# self.command_queue.put(("startPrint", payload), 0)
#
# def pause(self, payload=None):
# self.command_queue.put(("pausePrint", payload), 0)
#
# def unpause(self, payload=None):
# self.command_queue.put(("unpausePrint", payload), 0)
#
# def cancel(self, payload=None):
# self.command_queue.put(("cancelPrint", payload), 0)
#
# def execute(self, payload):
# self.command_queue.put(("executeCommand", payload), 0)
# Execute printer command.
def connectPrinter(self, playload=None):
ret = False
if (self.profile['driver'] is not None) and (self.serialInfo['COM'] is not None):
if self.printer is not None:
self.disconnectPrinter()
time.sleep(0.1)
try:
printer_class = __import__(self.profile['driver'])
except ImportError as ie:
self.logger.error("Printer type %s not supported." % self.profile['driver'])
self.logger.error("Import error: %s" % ie.message)
else:
try:
self.printer = printer_class.Printer(self.profile, self.serialInfo)
except RuntimeError as e:
message = "Can't connect to printer %s %s\nReason: %s" % (self.profile['name'], str(self.serialInfo), e.message)
self.logger.error(message)
except Exception:
message = "Unexpected error while connecting to %s: %s" % (self.profile['name'], sys.exc_info()[1])
self.logger.error(message)
else:
message = "Successful connection to %s!" % (self.profile['name'])
self.logger.info(message)
ret = True
return ret
def disconnectPrinter(self, playload=None):
if self.printer is None:
return
#if self.printer.is_operational():
self.printer.close()
self.printer = None
def startPrint(self, payload):
if self.printer is None:
return
if payload['filetype'] == 'gcode':
self.printer.gcodes(self.printer.resource_url + payload['res_id'], is_link = True, file_type=payload['filetype'], res_id=payload['res_id'])
else:
self.printer.gcodes(self.printer.resource_url + payload['res_id'], is_link = True, file_type=payload['filetype'], res_id=payload['res_id'],\
slc_id=payload['slc_id'], slc_flag=int(payload['slc_flag']), slc_lines=int(payload['slc_lines']), slc_ptime=int(payload['slc_ptime']))
def pausePrint(self, payload=None):
if self.printer is None:
return
self.printer.pause()
def unpausePrint(self, payload=None):
if self.printer is None:
return
self.printer.unpause()
def cancelPrint(self, payload=None):
if self.printer is None:
return
self.printer.cancel()
def executeCommand(self, payload):
if self.printer is None:
return
self.printer.unbuffered_gcodes(payload)
def removeFile(self, payload):
if self.printer is None:
return
self.printer.removeFile(payload)
def toOperational(self, payload=None):
if self.printer is None:
return
self.printer.toOperational()
def getStatus(self):
data = {
"boxid": self.profile['boxid'],
"name": self.profile['box_name'],
"port": "",
"baudrate": "",
"pid": "",
"pname": "",
"vid": "",
"vname": "",
"app_ver": "1.0.1",
#"proto_ver": "1.0.0",
"bed_temperature": 0,
"target_bed_temperature": 0,
"temperature1": 0,
"target_temperature1": 0,
"temperature2": 0,
"target_temperature2": 0,
"extruder_amount": 1,
"printer_state": 1,
"print_progress": 0,
"print_speed": 0,
"fan_speed": 0,
"print_time_escape": "00:00:00",
"print_time_remain": "00:00:00",
'cpu_usage': 0,
'disk_size': 0,
'free_disk_size': 0,
'mem_size': 0,
'free_mem_size': 0,
'loc_ip': "127.0.0.1",
}
if self.printer is None:
data["printer_state"] = 1
else:
self.printer.read_state()
try:
data["bed_temperature"] = self.printer.temps[0]
data["target_bed_temperature"] = self.printer.target_temps[0]
data["temperature1"] = self.printer.temps[1]
data["target_temperature1"] = self.printer.target_temps[1]
data["temperature2"] = self.printer.temps[2]
data["target_temperature2"] = self.printer.target_temps[2]
except Exception as ex:
pass
data["extruder_amount"] = self.printer.extruder_amount
data["printer_state"] = self.printer.printer_state
data["print_progress"] = self.printer.print_progress
data["print_speed"] = self.printer.print_speed
data["fan_speed"] = self.printer.fan_speed
if hasattr(self.printer, "print_time_escape"):
data["print_time_escape"] = self.printer.print_time_escape
if hasattr(self.printer, "print_time_remain"):
data["print_time_remain"] = self.printer.print_time_remain
hddinfo = os.statvfs(self.printer.model_file_path)
data['disk_size'] = hddinfo.f_frsize * hddinfo.f_blocks / 1024
#剩余存储空间,单位为KB
data['free_disk_size'] = hddinfo.f_frsize * hddinfo.f_bavail / 1024
#总内存,单位KB
phymem = psutil.virtual_memory()
#剩余内存,单位KB
data['mem_size'] = phymem.total / 1024
data['free_mem_size'] = phymem.free / 1024
#CPU占用率,百分数,60%表示为60
data['port'] = self.serialInfo["COM"]
data['baudrate'] = self.printer.correct_baudrate
data['cpu_usage'] = psutil.cpu_percent()
data['pid'] = self.serialInfo["PID"]
data['vid'] = self.serialInfo["VID"]
#内网IP如192.168.1.100
text = os.popen("ifconfig eth0").read()
reg_eth0 = re.match(r".*addr:(.*) Bcast:.*Mask:(.*)", text, re.S)
text = os.popen("ifconfig wlan0").read()
reg_wlan0 = re.match(r".*addr:(.*) Bcast:.*Mask:(.*)", text, re.S)
if reg_wlan0:
data['loc_ip'] = reg_wlan0.group(1)
elif reg_eth0:
data['loc_ip'] = reg_eth0.group(1)
else:
data['loc_ip'] = "127.0.0.1"
return data
def goHome(self):
if self.printer is None:
return
self.printer.goHome()
def goXYHome(self):
if self.printer is None:
return
self.printer.goXYHome()
def goZHome(self):
if self.printer is None:
return
self.printer.goZHome()
def goXPosition(self, pos):
if self.printer is None:
return
self.printer.goXPosition(pos)
def goYPosition(self, pos):
if self.printer is None:
return
self.printer.goYPosition(pos)
def goZPosition(self, pos):
if self.printer is None:
return
self.printer.goZPosition(pos)
def goEOperation(self, e, length):
if self.printer is None:
return
self.printer.goEOperation(e, length)
def setBedTargetTemp(self, temp):
if self.printer is None:
return
self.printer.setBedTargetTemp(temp)
def setETargetTemp(self, e, temp):
if self.printer is None:
return
self.printer.setETargetTemp(e, temp)
def setSpeedFactor(self, speedfactor):
if self.printer is None:
return
self.printer.setSpeedFactor(speedfactor)
| apache-2.0 | 6,482,249,820,652,363,000 | 35.194245 | 163 | 0.499516 | false |
IcyMint/Barchine | Barchine_gui.py | 1 | 66916 | import PySimpleGUI as sg
from Ingredient_Library import restoreIngredientLibrary, storeIngredientLibrary, createIngredient
from Ingredient_Library import listIngredients, getFamilyTypes, getBaseTypes, deleteIngredient
from Ingredient_Library import restoreBases
from Drink_Library import restoreDrinkLibrary, storeDrinkLibrary, listDrinks, deleteDrink, getIceTypes, getGlassTypes, createDrink
import Bartender
from Keypad import Keypad
import sys
import os
from pathlib import Path
import re
from Logging import log
#Initialize display properties
if os.environ.get('DISPLAY','') == '':
print('no display found. Using :0.0')
os.environ.__setitem__('DISPLAY', ':0.0')
sg.theme('DarkAmber')
#Fullscreen selector
FULLSCREEN = False
#Screen Resolution
RESOLUTION = {'x':800,'y':480}
#Load library information
restoreBases()
restoreIngredientLibrary()
restoreDrinkLibrary()
def contextSwitcher(current, next, window):
#Check for Home menu selection
if(current == 'Home_home'):
if(next == 'Library_home'):
#window.hide()
LibraryGUI(window)
#window.close()
if(next == 'Ingredients_home'):
#window.hide()
IngredientsGUI(window)
#window.close()
if(next == 'Stations_home'):
#window.hide()
StationsGUI(window)
#window.close()
if(next == 'Stats_home'):
#window.hide()
StatsGUI(window)
#window.close()
if(next == 'Settings_home'):
#window.hide()
SettingsGUI(window)
#window.close()
#Check for Library menu selection
if(current == 'Library_library'):
if(next == 'Home_library'):
#window.hide()
HomeGUI(window)
#window.close()
if(next == 'Ingredients_library'):
#window.hide()
IngredientsGUI(window)
#window.close()
if(next == 'Stations_library'):
#window.hide()
StationsGUI(window)
#window.close()
if(next == 'Stats_library'):
#window.hide()
StatsGUI(window)
#window.close()
if(next == 'Settings_library'):
#window.hide()
SettingsGUI(window)
#window.close()
#Check for Ingredients menu selection
if(current == 'Ingredients_ingredients'):
if(next == 'Home_ingredients'):
#window.hide()
HomeGUI(window)
#window.close()
if(next == 'Library_ingredients'):
#window.hide()
LibraryGUI(window)
#window.close()
if(next == 'Stations_ingredients'):
#window.hide()
StationsGUI(window)
#window.close()
if(next == 'Stats_ingredients'):
#window.hide()
StatsGUI(window)
#window.close()
if(next == 'Settings_ingredients'):
#window.hide()
SettingsGUI(window)
#window.close()
#Check for Stations menu selection
if(current == 'Stations_stations'):
if(next == 'Home_stations'):
#window.hide()
HomeGUI(window)
#window.close()
if(next == 'Library_stations'):
#window.hide()
LibraryGUI(window)
#window.close()
if(next == 'Ingredients_stations'):
#window.hide()
IngredientsGUI(window)
#window.close()
if(next == 'Stats_stations'):
#window.hide()
StatsGUI(window)
#window.close()
if(next == 'Settings_stations'):
#window.hide()
SettingsGUI(window)
#window.close()
#Check for Stats menu selection
if(current == 'Stats_stats'):
if(next == 'Home_stats'):
#window.hide()
HomeGUI(window)
#window.close()
if(next == 'Library_stats'):
#window.hide()
LibraryGUI(window)
#window.close()
if(next == 'Ingredients_stats'):
#window.hide()
IngredientsGUI(window)
#window.close()
if(next == 'Stations_stats'):
#window.hide()
StationsGUI(window)
#window.close()
if(next == 'Settings_stats'):
#window.hide()
SettingsGUI(window)
#window.close()
#Check for Settings menu selection
if(current == 'Settings_settings'):
if(next == 'Home_settings'):
#window.hide()
HomeGUI(window)
#window.close()
if(next == 'Library_settings'):
#window.hide()
LibraryGUI(window)
#window.close()
if(next == 'Ingredients_settings'):
#window.hide()
IngredientsGUI(window)
#window.close()
if(next == 'Stations_settings'):
#window.hide()
StationsGUI(window)
#window.close()
if(next == 'Stats_settings'):
#window.hide()
StatsGUI(window)
#window.close()
def HomeGUI(prev_window):
filtered = True
shelf = {}
#Get a dict of shelf names
for element in Bartender.getShelf():
if(element is not None):
shelf[element.getBase()] = ''
drinkInfo_home = [
[sg.Text('-DRINK_NAME-',key='DRINK_NAME_home',font=('Helvetica', 15),size=(15,1))],
[sg.Text('-ICE_NAME-',key='ICE_NAME_home',size=(15,1))],
[sg.Text('-GLASS_NAME-',key='GLASS_NAME_home',size=(18,1))],
[sg.Text('-GARNISH_NAME-',key='GARNISH_NAME_home',size=(15,1),relief='ridge',enable_events=True)],
[sg.Text('-EXTRAS_NAME-',key='EXTRAS_NAME_home',size=(15,3),relief='ridge',enable_events=True)],
[sg.Text('Ingredients:',font=('Helvetica', 15))],
[sg.Listbox(['-DRINK_COMPONENTS-'],size=(25,4),key='DrinkIngredients_home')]
]
#Image translation
image = Path('Image_Library/placeholder.png')
image_layout_home = [
[sg.Image(filename=image,key='image_home',size=(128,256))]
]
layout_home = [
[sg.Text(text='Barchine',size=(8,1),font=('Helvetica', 30),key='title_home')],
[sg.Button('Home',font=('Helvetica', 15),key='Home_home',border_width=5,button_color=(None,'#60b551')),
sg.Button('Library',font=('Helvetica', 15),key='Library_home'),
sg.Button('Ingredients',font=('Helvetica', 15),key='Ingredients_home'),
sg.Button('Stations',font=('Helvetica', 15),key='Stations_home'),
sg.Button('Stats',font=('Helvetica', 15),key='Stats_home'),
sg.Button('Settings',font=('Helvetica', 15),key='Settings_home')],
[sg.Listbox(Bartender.showDrinkMenu(True),font=('Helvetica', 20),size=(22,8),
key='Menu_List',enable_events=True),sg.Column(drinkInfo_home),sg.Column(image_layout_home)],
[sg.Button('Order',font=('Helvetica', 20),size=(12,1),key='order_home')
,sg.Button('Custom',font=('Helvetica', 20),size=(8,1),key='custom_home')
,sg.Button('Recommended',font=('Helvetica', 20),size=(12,1),key='recommended_home')
,sg.Button('Unfilter',font=('Helvetica', 20),size=(8,1),key='filter_home')]
]
#Launch Window
window_home = sg.Window('Barchine', layout_home, keep_on_top=True,size=(RESOLUTION.get('x'),RESOLUTION.get('y'))).Finalize()
if(FULLSCREEN):
window_home.Maximize()
#Close Previous window
if(prev_window is not None):
prev_window.close()
chosen = None
while True: # Event Loop
event, values = window_home.read()
print(event, values)
#Check for menu selection
if(event == 'Library_home'):
contextSwitcher('Home_home','Library_home',window_home)
if(event == 'Ingredients_home'):
contextSwitcher('Home_home','Ingredients_home',window_home)
if(event == 'Stations_home'):
contextSwitcher('Home_home','Stations_home',window_home)
if(event == 'Stats_home'):
contextSwitcher('Home_home','Stats_home',window_home)
if(event == 'Settings_home'):
contextSwitcher('Home_home','Settings_home',window_home)
#When drink menu item is selected
if event == 'Menu_List':
for drink in listDrinks():
if(drink.getName() == values['Menu_List'][0]):
chosen = drink
window_home['DRINK_NAME_home'].update(drink.getName())
window_home['ICE_NAME_home'].update('Ice: '+drink.getIce())
window_home['GLASS_NAME_home'].update('Glass: '+drink.getGlass())
window_home['GARNISH_NAME_home'].update('Garnish: '+drink.getGarnish())
window_home['EXTRAS_NAME_home'].update('Extras: '+drink.getExtras())
image = Path('Image_Library/'+drink.getImage())
window_home['image_home'].update(filename=image)
#Retrieve list of ingredients formatted
display = []
for key, value in drink.getIngredients().items():
if(key in shelf):
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key))
else:
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key)+' [Miss]')
window_home['DrinkIngredients_home'].update(display)
if(event == 'order_home'):
if(filtered):
Bartender.createOrder(chosen.getName(),False)
else:
display = []
counter = 0
for key, value in chosen.getIngredients().items():
if(key not in shelf):
display.append(str(key))
counter+=1
if(counter!=0):
if(ForceWarning(display,window_home)):
Bartender.createOrder(chosen.getName(),True)
if(event == 'custom_home'):
CustomView(window_home)
if(event == 'recommended_home'):
pass
if(event == 'filter_home'):
#If currently filtered, unfilter
if(filtered):
#Update variables/Button text
filtered = False
window_home['filter_home'].update(text='Filter')
#Format list of drink names
drinks_pretty = []
for drink in listDrinks():
drinks_pretty.append(drink.getName())
#Sort alphabetically
drinks_pretty.sort(key=str.lower)
window_home['Menu_List'].update(values=drinks_pretty)
#If not filtered, make filtered
else:
#Update variables/Button
filtered = True
window_home['filter_home'].update(text='Unfilter')
window_home['Menu_List'].update(values=Bartender.showDrinkMenu(True))
if(event == 'GARNISH_NAME_home' and chosen != None):
TextViewExpanded(chosen.getGarnish(),'Garnish',window_home)
if(event == 'EXTRAS_NAME_home' and chosen != None):
TextViewExpanded(chosen.getExtras(),'Extras',window_home)
if event in (None, 'Exit'):
window_home.close()
break
def ForceWarning(missing,window):
#Temporarily change theme
sg.theme('Dark')
#Temporarily disable host window
window.Disable()
layout_forcewarning = [
[sg.Text('Recipe Warning',key='title_forcewarning',font=('Helvetica', 20))],
[sg.Text('Missing Ingredients:',key='subtitle_forcewarning',font=('Helvetica', 15))],
[sg.Text(size=(12,5),key='missing_forcewarning')],
[sg.Button('Order',font=('Helvetica', 15),key='order_forcewarning'),sg.Button('Cancel',font=('Helvetica', 15),key='cancel_forcewarning')]
]
#Launch window
window_forcewarning = sg.Window('Barchine', layout_forcewarning,keep_on_top=True,no_titlebar=True).Finalize()
window_forcewarning.BringToFront()
#Load missing ingredient values
ingredients = ''
for element in missing:
ingredients+=element+'\n'
window_forcewarning['missing_forcewarning'].update(value=ingredients)
while True: # Event Loop
event, values = window_forcewarning.read()
print(event, values)
if(event == 'order_forcewarning'):
#Re-enable host window
window.Enable()
window.BringToFront()
#Change theme back to normal
sg.theme('DarkAmber')
window_forcewarning.close()
return True
if(event == 'cancel_forcewarning'):
#Re-enable host window
window.Enable()
window.BringToFront()
#Change theme back to normal
sg.theme('DarkAmber')
window_forcewarning.close()
return False
if event in (None, 'Exit'):
break
#Re-enable host window
window.Enable()
window.BringToFront()
#Change theme back to normal
sg.theme('DarkAmber')
window_forcewarning.close()
def CustomView(window):
#Disable host window temporarily
window.Disable()
layout_buttons_customview = [
[sg.Button('Add',font=('Helvetica', 15),key='add_customview')],
[sg.Button('Remove',font=('Helvetica', 15),key='remove_customview')]
]
layout_customview = [
[sg.Text('Custom Drink',key='title_customview',font=('Helvetica', 30))],
[sg.Listbox([],size=(20,4),key='DrinkIngredients_customview',enable_events=True)
,sg.Column(layout_buttons_customview)],
[sg.Button('Order',font=('Helvetica', 15),key='order_customview'),sg.Button('Cancel',font=('Helvetica', 15),key='cancel_customview')],
]
#Launch window
window_customview = sg.Window('Barchine', layout_customview,keep_on_top=True,no_titlebar=True).Finalize()
window_customview.BringToFront()
ingredients = {}
while True: # Event Loop
event, values = window_customview.read()
print(event, values)
if(event == 'add_customview'):
new_elements = IngredientAddPopUp('custom',None,None,window_customview)
if(new_elements[0] is not None):
ingredients[new_elements[0]] = int(new_elements[1])
#Update ingredients list
display = []
for key, value in ingredients.items():
display.append(str(key)+str(value).rjust(20-len(str(key)), ' '))
window_customview['DrinkIngredients_customview'].update(values=display)
if(event == 'remove_customview' and len(values['DrinkIngredients_customview']) > 0):
for key, value in ingredients.items():
if(key == re.findall("[^0-9]*",values['DrinkIngredients_customview'][0])[0].rstrip()):
#Delete from ingredients list
del ingredients[key]
#Update ingredients list
display = []
for key, value in ingredients.items():
display.append(str(key)+str(value).rjust(20-len(str(key)), ' '))
window_customview['DrinkIngredients_customview'].update(values=display)
break
if(event == 'order_customview'):
#TODO: Order the beverage
pass
if(event == 'cancel_customview'):
break
if event in (None, 'Exit'):
break
#Re-enable host window
window.Enable()
window.BringToFront()
window_customview.close()
def LibraryGUI(prev_window):
#Format list of drink names
drinks_pretty = []
for drink in listDrinks():
drinks_pretty.append(drink.getName())
#Sort alphabetically
drinks_pretty.sort(key=str.lower)
shelf = {}
#Get a dict of shelf names
for element in Bartender.getShelf():
if(element is not None):
shelf[element.getBase()] = ''
drinkInfo_library = [
[sg.Text('-DRINK_NAME-',key='DRINK_NAME_library',font=('Helvetica', 15),size=(15,2))],
[sg.Text('-ICE_NAME-',key='ICE_NAME_library',size=(15,1))],
[sg.Text('-GLASS_NAME-',key='GLASS_NAME_library',size=(18,1))],
[sg.Text('-GARNISH_NAME-',key='GARNISH_NAME_library',size=(15,1),relief='ridge',enable_events=True)],
[sg.Text('-EXTRAS_NAME-',key='EXTRAS_NAME_library',size=(15,3),relief='ridge',enable_events=True)],
[sg.Text('Ingredients:',font=('Helvetica', 15))],
[sg.Listbox(['-DRINK_COMPONENTS-'],size=(25,4),key='DrinkIngredients_library')]
]
#Image translation
image = Path('Image_Library/placeholder.png')
image_layout_library = [
[sg.Image(filename=image,key='image_library',size=(128,256))]
]
layout_library = [
[sg.Text(text='Barchine',size=(8,1),font=('Helvetica', 30),key='title_library')],
[sg.Button('Home',font=('Helvetica', 15),key='Home_library'),
sg.Button('Library',font=('Helvetica', 15),key='Library_library',border_width=5,button_color=(None,'#60b551')),
sg.Button('Ingredients',font=('Helvetica', 15),key='Ingredients_library'),
sg.Button('Stations',font=('Helvetica', 15),key='Stations_library'),
sg.Button('Stats',font=('Helvetica', 15),key='Stats_library'),
sg.Button('Settings',font=('Helvetica', 15),key='Settings_library')],
[sg.Listbox(drinks_pretty,font=('Helvetica', 20),size=(22,8),
key='Library_List',enable_events=True),sg.Column(drinkInfo_library),sg.Column(image_layout_library)],
[sg.Button('Add',font=('Helvetica', 15),size=(15,1),key='Add_library'),
sg.Button('Edit',font=('Helvetica', 15),size=(15,1),key='Edit_library'),
sg.Button('Delete',font=('Helvetica', 15),size=(15,1),key='Delete_library')]
]
#Launch window
window_library = sg.Window('Barchine', layout_library, keep_on_top=True,size=(RESOLUTION.get('x'),RESOLUTION.get('y'))).Finalize()
if(FULLSCREEN):
window_library.Maximize()
#Close Previous window
if(prev_window is not None):
prev_window.close()
chosen = None
while True: # Event Loop
event, values = window_library.read()
print(event, values)
#Check for menu selection
if(event == 'Home_library'):
contextSwitcher('Library_library','Home_library',window_library)
if(event == 'Ingredients_library'):
contextSwitcher('Library_library','Ingredients_library',window_library)
if(event == 'Stations_library'):
contextSwitcher('Library_library','Stations_library',window_library)
if(event == 'Stats_library'):
contextSwitcher('Library_library','Stats_library',window_library)
if(event == 'Settings_library'):
contextSwitcher('Library_library','Settings_library',window_library)
#When drink item is selected
if event == 'Library_List':
for drink in listDrinks():
if(drink.getName() == values['Library_List'][0]):
chosen = drink
window_library['DRINK_NAME_library'].update(drink.getName())
window_library['ICE_NAME_library'].update('Ice: '+drink.getIce())
window_library['GLASS_NAME_library'].update('Glass: '+drink.getGlass())
window_library['GARNISH_NAME_library'].update('Garnish: '+drink.getGarnish())
window_library['EXTRAS_NAME_library'].update('Extras: '+drink.getExtras())
image = Path('Image_Library/'+drink.getImage())
window_library['image_library'].update(filename=image)
#Retrieve list of ingredients formatted
display = []
for key, value in drink.getIngredients().items():
if(key in shelf):
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key))
else:
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key)+' [Miss]')
window_library['DrinkIngredients_library'].update(display)
if(event == 'Add_library'):
print(chosen)
DrinkView('new',None,window_library)
chosen = None
#Update list of drinks
drinks_pretty = []
for drink in listDrinks():
drinks_pretty.append(drink.getName())
#Sort alphabetically
drinks_pretty.sort(key=str.lower)
window_library['Library_List'].update(values=drinks_pretty)
pass
if(event == 'Edit_library' and chosen is not None):
DrinkView('edit',chosen,window_library)
chosen = None
#Update list of drinks
drinks_pretty = []
for drink in listDrinks():
drinks_pretty.append(drink.getName())
#Sort alphabetically
drinks_pretty.sort(key=str.lower)
window_library['Library_List'].update(values=drinks_pretty)
pass
if(event == 'Delete_library' and chosen is not None):
print(chosen)
deleteDrink(chosen.getName())
chosen = None
#Update list of drinks
drinks_pretty = []
for drink in listDrinks():
drinks_pretty.append(drink.getName())
#Sort alphabetically
drinks_pretty.sort(key=str.lower)
window_library['Library_List'].update(values=drinks_pretty)
pass
if event in (None, 'Exit'):
window_library.close()
break
if(event == 'GARNISH_NAME_library' and chosen != None):
TextViewExpanded(chosen.getGarnish(),'Garnish',window_library)
if(event == 'EXTRAS_NAME_library' and chosen != None):
TextViewExpanded(chosen.getExtras(),'Extras',window_library)
#Close remaining window
window_library.close()
def IngredientAddPopUp(mode, input_key, input_value, window):
#Temporarily disable host window
window.Disable()
response = None
layout_ingredientaddpopup = [
[sg.Text('MODE',key='mode_name_ingredientaddpopup',font=('Helvetica', 30))],
[sg.Text('Name: ',key='name_text_ingredientaddpopup',font=('Helvetica', 15))
,sg.OptionMenu(getBaseTypes(),key='ingredient_input_ingredientaddpopup',size=(15,10))],
[sg.Text('Amount: ',key='amount_text_ingredientaddpopup',font=('Helvetica', 15))
,sg.Button('',key='amount_input_ingredientaddpopup',size=(4,1))
,sg.Text(' mL',key='unit_ingredientaddpopup',font=('Helvetica', 15))],
[sg.Button('Save',font=('Helvetica', 15),key='save_ingredientaddpopup')
,sg.Button('Exit',font=('Helvetica', 15),key='exit_ingredientaddpopup')],
]
#Launch window
window_ingredientaddpopup = sg.Window('Barchine', layout_ingredientaddpopup,keep_on_top=True,no_titlebar=True).Finalize()
window_ingredientaddpopup.BringToFront()
#Change mode title displayed
if(mode == 'edit'):
window_ingredientaddpopup['mode_name_ingredientaddpopup'].update(value='Edit')
window_ingredientaddpopup['ingredient_input_ingredientaddpopup'].update(value=input_key)
window_ingredientaddpopup['amount_input_ingredientaddpopup'].update(text=input_value)
if(mode == 'new' or mode == 'custom'):
window_ingredientaddpopup['mode_name_ingredientaddpopup'].update(value='New')
#Change displayed options depending on mode
if(mode == 'custom'):
basetypes = set()
for element in Bartender.getShelf():
if(element is not None):
basetypes.add(element.getBase())
window_ingredientaddpopup['ingredient_input_ingredientaddpopup'].update(values=list(basetypes))
while True: # Event Loop
event, values = window_ingredientaddpopup.read()
print(event, values)
if(event == 'amount_input_ingredientaddpopup'):
window_ingredientaddpopup.Disable()
window_ingredientaddpopup['amount_input_ingredientaddpopup'].update(text=Keypad())
window_ingredientaddpopup.Enable()
if(event =='save_ingredientaddpopup'):
if(window_ingredientaddpopup['amount_input_ingredientaddpopup'].GetText()):
response = 'save'
break
else:
print('ERROR: invalid number')
if(event =='exit_ingredientaddpopup'):
response = 'exit'
break
if event in (None, 'Exit'):
break
#Re-enable host window
window.Enable()
window.BringToFront()
window_ingredientaddpopup.close()
if(response == 'save'):
return([values['ingredient_input_ingredientaddpopup'],window_ingredientaddpopup['amount_input_ingredientaddpopup'].GetText()])
elif(response == 'exit'):
return([None,None])
def TextViewExpanded(text,title,window):
#Temporarily disable host window
window.Disable()
layout_textviewexpanded = [
[sg.Text(text=title,font=('Helvetica', 20),key='title_textviewexpanded')],
[sg.Text(text=text,font=('Helvetica', 12),key='content_textviewexpanded',size=(25,6))],
[sg.Button('Close',font=('Helvetica', 10),key='close_textviewexpanded')]
]
#Launch window
window_textviewexpanded = sg.Window('Barchine', layout_textviewexpanded,keep_on_top=True,no_titlebar=True).Finalize()
window_textviewexpanded.BringToFront()
while True: # Event Loop
event, values = window_textviewexpanded.read()
print(event, values)
if(event == 'close_textviewexpanded'):
window_textviewexpanded.close()
break
if event in (None, 'Exit'):
break
#Re-enable host window
window.Enable()
window.BringToFront()
window_textviewexpanded.close()
def DrinkView(mode,drink,window):
#Temporarily disable host window
window.Disable()
layout_buttons_drinkview = [
[sg.Button('Add',font=('Helvetica', 15),key='add_drinkviewingredient')],
[sg.Button('Edit',font=('Helvetica', 15),key='edit_drinkviewingredient')],
[sg.Button('Remove',font=('Helvetica', 15),key='remove_drinkviewingredient')]
]
layout_drinkview = [
[sg.Text('MODE',key='mode_name_drinkview',font=('Helvetica', 30))],
[sg.Text('Name: ',key='name_text_drinkview',font=('Helvetica', 15)),sg.InputText('DEFAULT NAME',key='name_input_drinkview')],
[sg.Text('Ice: ',key='ice_text_drinkview',font=('Helvetica', 15)),sg.OptionMenu(getIceTypes(),key='ice_input_drinkview')],
[sg.Text('Glass: ',key='glass_text_drinkview',font=('Helvetica', 15)),sg.OptionMenu(getGlassTypes(),key='glass_input_drinkview')],
[sg.Text('Garnish: ',key='garnish_text_drinkview',font=('Helvetica', 15)),sg.InputText('None',key='garnish_input_drinkview')],
[sg.Text('Extras: ',key='extras_text_drinkview',font=('Helvetica', 15)),sg.InputText('None',key='extra_input_drinkview')],
[sg.Input(key='filename_field', visible=False, enable_events=True),sg.FileBrowse(file_types=(('Images', '*.png'),))
,sg.Text('Image: ',key='image_text_drinkview',font=('Helvetica', 15))
,sg.Text('placeholder.png',key='filename_drinkview',font=('Helvetica', 12),size=(20,1))],
[sg.Text('Ingredients',key='ingredients_title',font=('Helvetica', 20)),sg.Text(' ',key='spacer_drinkview',size=(20,1))
,sg.Button('Save',font=('Helvetica', 15),key='save_drinkview'),sg.Button('Exit',font=('Helvetica', 15),key='exit_drinkview')],
#TODO:List drink components here
[sg.Listbox([],size=(20,4),key='DrinkIngredients_drinkview',enable_events=True),
sg.Column(layout_buttons_drinkview)
]
]
#Launch window
window_drinkview = sg.Window('Barchine', layout_drinkview,keep_on_top=True,no_titlebar=True).Finalize()
window_drinkview.BringToFront()
#Set default variable values
new_name = None
new_ice = None
new_glass = None
new_garnish = None
new_extras = None
new_ingredients = {}
new_image = None
#Change mode title displayed
if(mode == 'edit'):
window_drinkview['mode_name_drinkview'].update(value='Edit')
if(mode == 'new'):
window_drinkview['mode_name_drinkview'].update(value='New')
#Change displayed info based on mode
if(mode == 'edit'):
#Retrieve proper drink reference
#Set default variables
new_name = drink.getName()
new_ice = drink.getIce()
new_glass = drink.getGlass()
new_garnish = drink.getGarnish()
new_extras = drink.getExtras()
new_ingredients = drink.getIngredients()
new_image = drink.getImage()
#Retrieve list of ingredients formatted
display = []
for key, value in new_ingredients.items():
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key))
#Update fields
window_drinkview['name_input_drinkview'].update(value=new_name)
window_drinkview['ice_input_drinkview'].update(value=new_ice)
window_drinkview['glass_input_drinkview'].update(value=new_glass)
window_drinkview['garnish_input_drinkview'].update(value=new_garnish)
window_drinkview['extra_input_drinkview'].update(value=new_extras)
window_drinkview['DrinkIngredients_drinkview'].update(values=display)
window_drinkview['filename_drinkview'].update(value=new_image)
window_drinkview['filename_field'].update(value=new_image)
while True: # Event Loop
event, values = window_drinkview.read()
print(event, values)
if(event == 'filename_field'):
print('IMAGE FOUND')
window_drinkview['filename_drinkview'].update(value=re.search('([^\/]*)$', values['filename_field']).group())
if(event =='save_drinkview'):
new_name = values['name_input_drinkview']
if(mode == 'new' and new_name is not None and len(new_ingredients) > 0):
#Load in values
new_ice = values['ice_input_drinkview']
new_glass = values['glass_input_drinkview']
new_garnish = values['garnish_input_drinkview']
new_extras = values['extra_input_drinkview']
if(values['filename_field'][-3:] == 'png'):
new_image = re.search('([^\/]*)$', values['filename_field']).group()
else:
new_image = 'placeholder.png'
check = True
#Check for duplicate name
for drink_element in listDrinks():
if(drink_element.getName() == new_name):
check = False
#Continue saving
if(check):
createDrink(new_name,new_ice,new_glass,new_garnish,new_extras,new_ingredients,new_image,False)
break
else:
print('ERROR: Duplicate name or invalid image file')
pass
if(mode == 'edit'):
#Get changes
new_name = values['name_input_drinkview']
new_ice = values['ice_input_drinkview']
new_glass = values['glass_input_drinkview']
new_garnish = values['garnish_input_drinkview']
new_extras = values['extra_input_drinkview']
if(values['filename_field'][-3:] == 'png'):
new_image = re.search('([^\/]*)$', values['filename_field']).group()
else:
new_image = 'placeholder.png'
check = True
#Check for duplicate name
for drink_element in listDrinks():
if(drink_element.getName() == new_name and new_name != drink.getName()):
check = False
#Continue saving
if(check):
#Apply edits
drink.setName(new_name)
drink.setIce(new_ice)
drink.setGlass(new_glass)
drink.setGarnish(new_garnish)
drink.setExtras(new_extras)
drink.setIngredients(new_ingredients)
drink.setImage(new_image)
listDrinks()
else:
print('ERROR: Duplicate name or invalid image file')
break
if(event =='exit_drinkview'):
break
if(event == 'add_drinkviewingredient'):
new_elements = IngredientAddPopUp('new',None,None,window_drinkview)
if(new_elements[0] is not None):
new_ingredients[new_elements[0]] = int(new_elements[1])
#Update ingredients list
display = []
for key, value in new_ingredients.items():
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key))
window_drinkview['DrinkIngredients_drinkview'].update(values=display)
if(event == 'edit_drinkviewingredient' and mode == 'edit' and len(values['DrinkIngredients_drinkview']) > 0):
for key, value in new_ingredients.items():
if(key == values['DrinkIngredients_drinkview'][0][values['DrinkIngredients_drinkview'][0].index('- ')+2:]):
#Send values to user field, then replace with returning values
new_elements = IngredientAddPopUp('edit',key,value,window_drinkview)
#Replace entry
if(new_elements[0] is not None):
del new_ingredients[key]
new_ingredients[new_elements[0]] = int(new_elements[1])
#Update ingredients list
display = []
for key, value in new_ingredients.items():
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key))
window_drinkview['DrinkIngredients_drinkview'].update(values=display)
if(event == 'remove_drinkviewingredient' and len(values['DrinkIngredients_drinkview']) > 0):
for key, value in new_ingredients.items():
if(key == values['DrinkIngredients_drinkview'][0][values['DrinkIngredients_drinkview'][0].index('- ')+2:]):
#Delete from ingredients list
del new_ingredients[key]
#Update ingredients list
display = []
for key, value in new_ingredients.items():
display.append(str(value)+' '*(4-len(str(value)))+'mL - '+str(key))
window_drinkview['DrinkIngredients_drinkview'].update(values=display)
break
if event in (None, 'Exit'):
break
#Re-enable host window
window.Enable()
window.BringToFront()
window_drinkview.close()
def IngredientsGUI(prev_window):
#Format list of ingredient names
ingredients_pretty = []
for ingredient in listIngredients():
ingredients_pretty.append(ingredient.getName())
ingredientInfo_ingredients = [
[sg.Text('-INGREDIENT_NAME-',key='INGREDIENT_NAME_ingredients',font=('Helvetica', 15),size=(30,1))],
[sg.Text('-FAMILY_NAME-',key='FAMILY_NAME_ingredients',size=(15,1))],
[sg.Text('-BASE_NAME-',key='BASE_NAME_ingredients',size=(15,1))],
[sg.Text('-STARTING_VOLUME-',key='STARTING_VOLUME_NAME_ingredients',size=(24,1))],
[sg.Text('-CURRENT_VOLUME-',key='CURRENT_VOLUME_NAME_ingredients',size=(24,1))]
]
layout_ingredients = [
[sg.Text(text='Barchine',size=(8,1),font=('Helvetica', 30),key='title_ingredients')],
[sg.Button('Home',font=('Helvetica', 15),key='Home_ingredients'),
sg.Button('Library',font=('Helvetica', 15),key='Library_ingredients'),
sg.Button('Ingredients',font=('Helvetica', 15),key='Ingredients_ingredients',border_width=5,button_color=(None,'#60b551')),
sg.Button('Stations',font=('Helvetica', 15),key='Stations_ingredients'),
sg.Button('Stats',font=('Helvetica', 15),key='Stats_ingredients'),
sg.Button('Settings',font=('Helvetica', 15),key='Settings_ingredients')],
[sg.Listbox(ingredients_pretty,font=('Helvetica', 20),size=(25,8),
key='Ingredients_List',enable_events=True),sg.Column(ingredientInfo_ingredients)],
[sg.Button('Add',font=('Helvetica', 15),size=(15,1),key='Add_ingredients'),
sg.Button('Edit',font=('Helvetica', 15),size=(15,1),key='Edit_ingredients'),
sg.Button('Delete',font=('Helvetica', 15),size=(15,1),key='Delete_ingredients')]
]
#Launch window
window_ingredients = sg.Window('Barchine', layout_ingredients, keep_on_top=True,size=(RESOLUTION.get('x'),RESOLUTION.get('y'))).Finalize()
if(FULLSCREEN):
window_ingredients.Maximize()
#Close Previous window
if(prev_window is not None):
prev_window.close()
chosen = None
while True: # Event Loop
event, values = window_ingredients.read()
print(event, values)
#Check for menu selection
if(event == 'Home_ingredients'):
contextSwitcher('Ingredients_ingredients','Home_ingredients',window_ingredients)
if(event == 'Library_ingredients'):
contextSwitcher('Ingredients_ingredients','Library_ingredients',window_ingredients)
if(event == 'Stations_ingredients'):
contextSwitcher('Ingredients_ingredients','Stations_ingredients',window_ingredients)
if(event == 'Stats_ingredients'):
contextSwitcher('Ingredients_ingredients','Stats_ingredients',window_ingredients)
if(event == 'Settings_ingredients'):
contextSwitcher('Ingredients_ingredients','Settings_ingredients',window_ingredients)
#When ingredient item is selected
if event == 'Ingredients_List':
for ingredient in listIngredients():
if(ingredient.getName() == values['Ingredients_List'][0]):
chosen = ingredient
window_ingredients['INGREDIENT_NAME_ingredients'].update(ingredient.getName())
window_ingredients['FAMILY_NAME_ingredients'].update('Family: '+ingredient.getFamily())
window_ingredients['BASE_NAME_ingredients'].update('Base: '+ingredient.getBase())
window_ingredients['STARTING_VOLUME_NAME_ingredients'].update('Starting Volume: '+str(ingredient.getStartVol())+' mL')
window_ingredients['CURRENT_VOLUME_NAME_ingredients'].update('Current Volume: '+str(ingredient.getEndVol())+' mL')
if(event == 'Add_ingredients'):
IngredientView('new',None,window_ingredients)
#Update list of ingredients
ingredients_pretty = []
for ingredient in listIngredients():
ingredients_pretty.append(ingredient.getName())
window_ingredients['Ingredients_List'].update(values=ingredients_pretty)
pass
if(event == 'Edit_ingredients' and chosen is not None):
IngredientView('edit',chosen,window_ingredients)
#Update list of ingredients
ingredients_pretty = []
for ingredient in listIngredients():
ingredients_pretty.append(ingredient.getName())
window_ingredients['Ingredients_List'].update(values=ingredients_pretty)
pass
if(event == 'Delete_ingredients' and chosen is not None):
deleteIngredient(chosen.getName())
chosen = None
#Update list of ingredients
ingredients_pretty = []
for ingredient in listIngredients():
ingredients_pretty.append(ingredient.getName())
window_ingredients['Ingredients_List'].update(values=ingredients_pretty)
pass
if event in (None, 'Exit'):
window_ingredients.close()
break
#Close remaining window
window_ingredients.close()
def IngredientView(mode,ingredient,window):
#Temporarily disable host window
window.Disable()
layout_ingredientview = [
[sg.Text('MODE',key='mode_name_ingredientview',font=('Helvetica', 30))],
[sg.Text('Name: ',key='name_text_ingredientview',font=('Helvetica', 15)),sg.InputText('DEFAULT NAME',key='name_input_ingredientview')],
[sg.Text('Base: ',key='base_text_ingredientview',font=('Helvetica', 15))
,sg.OptionMenu(getBaseTypes(),key='base_input_ingredientview',size=(15,10))],
[sg.Text('Family: ',key='family_text_ingredientview',font=('Helvetica', 15))
,sg.OptionMenu(getFamilyTypes(),key='family_input_ingredientview')],
[sg.Text('Starting Volume: ',key='startvol_text_ingredientview',font=('Helvetica', 15))
,sg.Button('',key='startvol_input_ingredientview',size=(4,1))
,sg.Text(' mL',key='unit1_ingredientview',font=('Helvetica', 15))],
[sg.Text('Current Volume: ',key='endvol_text_ingredientview',font=('Helvetica', 15))
,sg.Button('',key='endvol_input_ingredientview',size=(4,1))
,sg.Text(' mL',key='unit2_ingredientview',font=('Helvetica', 15))],
[sg.Button('Save',font=('Helvetica', 15),key='save_ingredientview'),sg.Button('Exit',font=('Helvetica', 15),key='exit_ingredientview')]
]
#Launch window
window_ingredientview = sg.Window('Barchine', layout_ingredientview,keep_on_top=True,no_titlebar=True).Finalize()
window_ingredientview.BringToFront()
#Initialize default variables
new_name = None
new_base = None
new_family = None
new_startVol = None
new_endVol = None
new_active = False
new_position = -1
#Change mode title displayed
if(mode == 'edit'):
window_ingredientview['mode_name_ingredientview'].update(value='Edit')
if(mode == 'new'):
window_ingredientview['mode_name_ingredientview'].update(value='New')
#Change displayed info based on mode
if(mode == 'edit'):
#Set default variables
new_name = ingredient.getName()
new_base = ingredient.getBase()
new_family = ingredient.getFamily()
new_startVol = ingredient.getStartVol()
new_endVol = ingredient.getEndVol()
new_active = ingredient.isActive()
new_position = ingredient.getPosition()
#Update fields
window_ingredientview['name_input_ingredientview'].update(value=new_name)
window_ingredientview['base_input_ingredientview'].update(value=new_base)
window_ingredientview['family_input_ingredientview'].update(value=new_family)
window_ingredientview['startvol_input_ingredientview'].update(text=new_startVol)
window_ingredientview['endvol_input_ingredientview'].update(text=new_endVol)
while True: # Event Loop
event, values = window_ingredientview.read()
print(event, values)
if(event == 'startvol_input_ingredientview'):
window_ingredientview.Disable()
window_ingredientview['startvol_input_ingredientview'].update(text=Keypad())
window_ingredientview.Enable()
if(event == 'endvol_input_ingredientview'):
window_ingredientview.Disable()
window_ingredientview['endvol_input_ingredientview'].update(text=Keypad())
window_ingredientview.Enable()
if(event == 'save_ingredientview'):
new_name = re.sub('[#@,]','', values['name_input_ingredientview'])
if(mode == 'new' and len(new_name) > 0 and new_name is not None):
#Load in values
new_base = values['base_input_ingredientview']
new_family = values['family_input_ingredientview']
new_startVol = window_ingredientview['startvol_input_ingredientview'].GetText()
new_endVol = window_ingredientview['endvol_input_ingredientview'].GetText()
check = True
#Check for duplicate name
for element in listIngredients():
if(new_name == element.getName()):
check = False
#Ensure volumes are correct
if(new_startVol == '' or new_endVol == ''):
check = False
elif(int(new_startVol) < int(new_endVol)):
check = False
if(check):
print('SAVED')
createIngredient(new_name,new_base,new_family,new_startVol,new_endVol,new_active,new_position,False)
break
pass
if(mode == 'edit'):
#Load in values
new_base = values['base_input_ingredientview']
new_family = values['family_input_ingredientview']
new_startVol = window_ingredientview['startvol_input_ingredientview'].GetText()
new_endVol = window_ingredientview['endvol_input_ingredientview'].GetText()
check = True
#Check for duplicate name
for element in listIngredients():
if(element.getName() == new_name and new_name != ingredient.getName()):
check = False
#Ensure volumes are correct
if(int(new_startVol) < int(new_endVol)):
check = False
if(check):
#Load in values
ingredient.setName(new_name)
ingredient.setBase(new_base)
ingredient.setFamily(new_family)
ingredient.setStartVol(new_startVol)
ingredient.setEndVol(new_endVol)
break
if(event == 'exit_ingredientview'):
break
if event in (None, 'Exit'):
break
#Re-enable host window
window.Enable()
window.BringToFront()
window_ingredientview.close()
def StationsGUI(prev_window):
#Image translation
measurebar = Path('Image_Library/measurementbar.png')
#Layout for level indicator image
layout_measure = [
[sg.Text(text='100%',size=(5,1),font=('Helvetica', 8))],
[sg.Image(filename=measurebar,key='image_library',size=(128,140))],
[sg.Text(text='0%',size=(3,1),font=('Helvetica', 12))],
]
#Layouts for alcohol stations
layout_bar1 = [
[sg.Text(text='1',size=(2,1),font=('Helvetica', 12),key='bar1_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar1_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar1_name',enable_events=True)],
]
layout_bar2 = [
[sg.Text(text='2',size=(1,1),font=('Helvetica', 12),key='bar2_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar2_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar2_name',enable_events=True)],
]
layout_bar3 = [
[sg.Text(text='3',size=(1,1),font=('Helvetica', 12),key='bar3_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar3_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar3_name',enable_events=True)],
]
layout_bar4 = [
[sg.Text(text='4',size=(1,1),font=('Helvetica', 12),key='bar4_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar4_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar4_name',enable_events=True)],
]
layout_bar5 = [
[sg.Text(text='5',size=(1,1),font=('Helvetica', 12),key='bar5_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar5_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar5_name',enable_events=True)],
]
layout_bar6 = [
[sg.Text(text='6',size=(1,1),font=('Helvetica', 12),key='bar6_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar6_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar6_name',enable_events=True)],
]
layout_bar7 = [
[sg.Text(text='7',size=(1,1),font=('Helvetica', 12),key='bar7_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar7_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar7_name',enable_events=True)],
]
layout_bar8 = [
[sg.Text(text='8',size=(1,1),font=('Helvetica', 12),key='bar8_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar8_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar8_name',enable_events=True)],
]
layout_bar9 = [
[sg.Text(text='9',size=(1,1),font=('Helvetica', 12),key='bar9_num')],
[sg.ProgressBar(100, orientation='v', size=(10, 30), key='bar9_meter')],
[sg.Text(text='INSERT NAME HERE',size=(5,4),font=('Helvetica', 8),key='bar9_name',enable_events=True)],
]
layout_stations = [
[sg.Text(text='Barchine',size=(8,1),font=('Helvetica', 30),key='title_stations')],
[sg.Button('Home',font=('Helvetica', 15),key='Home_stations'),
sg.Button('Library',font=('Helvetica', 15),key='Library_stations'),
sg.Button('Ingredients',font=('Helvetica', 15),key='Ingredients_stations'),
sg.Button('Stations',font=('Helvetica', 15),key='Stations_stations',border_width=5,button_color=(None,'#60b551')),
sg.Button('Stats',font=('Helvetica', 15),key='Stats_stations'),
sg.Button('Settings',font=('Helvetica', 15),key='Settings_stations')],
[sg.Text(text='Select Station to Edit',size=(30,1),font=('Helvetica', 20),key='subtitle_stations')
,sg.Button('View Mixers',key='station_menu_selector',size=(10,1),font=('Helvetica', 15))],
[sg.Column(layout_measure),sg.Column(layout_bar1,key='bar1_column',visible=True),sg.Column(layout_bar2,key='bar2_column',visible=True),
sg.Column(layout_bar3,key='bar3_column',visible=True),sg.Column(layout_bar4,key='bar4_column',visible=True),
sg.Column(layout_bar5,key='bar5_column',visible=True),sg.Column(layout_bar6,key='bar6_column',visible=True),
sg.Column(layout_bar7,key='bar7_column',visible=True),sg.Column(layout_bar8,key='bar8_column',visible=True),
sg.Column(layout_bar9,key='bar9_column',visible=True)]
]
#Launch window
window_stations = sg.Window('Barchine', layout_stations, keep_on_top=True,size=(RESOLUTION.get('x'),RESOLUTION.get('y'))).Finalize()
if(FULLSCREEN):
window_stations.Maximize()
#Close Previous window
if(prev_window is not None):
prev_window.close()
#Pre-unload and reload all stations to remove visibility offset bug
for i in range(1,Bartender.getAlcCount()+1):
window_stations['bar'+str(i)+'_num'].update(visible=False)
window_stations['bar'+str(i)+'_meter'].update(visible=False)
window_stations['bar'+str(i)+'_name'].update(visible=False)
for i in range(1,Bartender.getAlcCount()+1):
window_stations['bar'+str(i)+'_num'].update(visible=True)
window_stations['bar'+str(i)+'_meter'].update(visible=True)
window_stations['bar'+str(i)+'_name'].update(visible=True)
#Draw the currently loaded stations
startIndex = 0
endIndex = 0
offset = 0
#Setup variables for counting alcohol
if(window_stations['station_menu_selector'].GetText() == 'View Mixers'):
startIndex = 0
endIndex = Bartender.getAlcCount()
#Set up variables for counting mixers
if(window_stations['station_menu_selector'].GetText() == 'View Alcohol'):
startIndex = Bartender.getAlcCount()
endIndex = Bartender.getMaxPos()
offset = Bartender.getAlcCount()
for i in range(startIndex,endIndex):
if(Bartender.getShelf()[i]!=None):
item = Bartender.getShelf()[i]
window_stations['bar'+str(i+1-offset)+'_name'].update(value=item.getName())
window_stations['bar'+str(i+1-offset)+'_meter'].update_bar(item.getEndVol(),item.getStartVol())
else:
window_stations['bar'+str(i+1-offset)+'_name'].update(value='EMPTY')
window_stations['bar'+str(i+1-offset)+'_meter'].update_bar(0,100)
#Increase offset if counting mixers
if(startIndex > Bartender.getAlcCount()):
offset+=1
#Hide/Show leftover stations if unused (mixers)
if(window_stations['station_menu_selector'].GetText() == 'View Alcohol'):
for i in range(Bartender.getMixCount()+1,Bartender.getAlcCount()+1):
window_stations['bar'+str(i)+'_meter'].update(visible=False)
#Reveal hidden stations for alcohol
else:
for i in range(Bartender.getMixCount()+1,Bartender.getAlcCount()+1):
window_stations['bar'+str(i)+'_meter'].update(visible=True)
chosen = None
update = False
while True: # Event Loop
event, values = window_stations.read()
print(event, values)
#Check for menu selection
if(event == 'Home_stations'):
contextSwitcher('Stations_stations','Home_stations',window_stations)
if(event == 'Library_stations'):
contextSwitcher('Stations_stations','Library_stations',window_stations)
if(event == 'Ingredients_stations'):
contextSwitcher('Stations_stations','Ingredients_stations',window_stations)
if(event == 'Stats_stations'):
contextSwitcher('Stations_stations','Stats_stations',window_stations)
if(event == 'Settings_stations'):
contextSwitcher('Stations_stations','Settings_stations',window_stations)
#Check for station menu selector
if(event == 'station_menu_selector'):
#If currently looking at alcohol stations, swap to mixers
if(window_stations['station_menu_selector'].GetText() == 'View Mixers'):
window_stations['station_menu_selector'].update(text='View Alcohol')
else:
window_stations['station_menu_selector'].update(text='View Mixers')
update = True
#Search for the selected station
offset = Bartender.getAlcCount()
for i in range(1,Bartender.getMaxPos()):
#Check for currently active station menu
if(window_stations['station_menu_selector'].GetText() == 'View Mixers' and i < Bartender.getAlcCount()+1):
if(event == 'bar'+str(i)+'_name'):
if(Bartender.getShelf()[i-1] == None):
StationsView(str(i),None,'Alcohol',window_stations)
else:
StationsView(str(i),Bartender.getShelf()[i-1],'Alcohol',window_stations)
#Update Display
update = True
if(window_stations['station_menu_selector'].GetText() == 'View Alcohol' and i < Bartender.getMixCount()+1):
if(event == 'bar'+str(i)+'_name'):
if(Bartender.getShelf()[i-1+offset] == None):
StationsView(str(i+offset),None,'Mixer',window_stations)
else:
StationsView(i+offset,Bartender.getShelf()[i-1+offset],'Mixer',window_stations)
#Update Display
update = True
#Update Display
if(update):
#Draw the currently loaded stations
startIndex = 0
endIndex = 0
offset = 0
#Setup variables for counting alcohol
if(window_stations['station_menu_selector'].GetText() == 'View Mixers'):
startIndex = 0
endIndex = Bartender.getAlcCount()
#Set up variables for counting mixers
if(window_stations['station_menu_selector'].GetText() == 'View Alcohol'):
startIndex = Bartender.getAlcCount()
endIndex = Bartender.getMaxPos()
offset = Bartender.getAlcCount()
for i in range(startIndex,endIndex):
if(Bartender.getShelf()[i]!=None):
item = Bartender.getShelf()[i]
window_stations['bar'+str(i+1-offset)+'_name'].update(value=item.getName())
window_stations['bar'+str(i+1-offset)+'_meter'].update_bar(item.getEndVol(),item.getStartVol())
else:
window_stations['bar'+str(i+1-offset)+'_name'].update(value='EMPTY')
window_stations['bar'+str(i+1-offset)+'_meter'].update_bar(0,100)
#Increase offset if counting mixers
if(startIndex > Bartender.getAlcCount()):
offset+=1
#Hide/Show leftover stations if unused (mixers)
if(window_stations['station_menu_selector'].GetText() == 'View Alcohol'):
for i in range(Bartender.getMixCount()+1,Bartender.getAlcCount()+1):
window_stations['bar'+str(i)+'_num'].update(visible=False)
window_stations['bar'+str(i)+'_meter'].update(visible=False)
window_stations['bar'+str(i)+'_name'].update(visible=False)
#Reveal hidden stations for alcohol
else:
for i in range(Bartender.getMixCount()+1,Bartender.getAlcCount()+1):
window_stations['bar'+str(i)+'_num'].update(visible=True)
window_stations['bar'+str(i)+'_meter'].update(visible=True)
window_stations['bar'+str(i)+'_name'].update(visible=True)
update=False
if event in (None, 'Exit'):
window_stations.close()
break
#Close remaining window
window_stations.close()
def StationsView(station,ingredient,family,window):
#Temporarily disable host window
window.Disable()
available = ['Empty']
for element in listIngredients():
if not element.isActive() and element.getFamily() == family:
available.append(element.getName())
layout_stationsview = [
[sg.Text('Replace Station ',key='title_stationsview',font=('Helvetica', 30)),sg.Text(station,key='title_num_stationsview',font=('Helvetica', 30))],
[sg.Text('New Ingredient: ',key='ingredient_text_stationsview',font=('Helvetica', 15))
,sg.OptionMenu(available,key='ingredient_input_stationsview')],
[sg.Button('Save',font=('Helvetica', 15),key='save_stationsview'),sg.Button('Exit',font=('Helvetica', 15),key='exit_stationsview')]
]
#Launch window
window_stationsview = sg.Window('Barchine', layout_stationsview,keep_on_top=True,no_titlebar=True).Finalize()
window_stationsview.BringToFront()
#Check for preconditions
if(ingredient is not None):
window_stationsview['ingredient_input_stationsview'].update(value=ingredient.getName())
while True: # Event Loop
event, values = window_stationsview.read()
print(event, values)
if(event == 'save_stationsview'):
#Check if field is set to 'Empty'
if(values['ingredient_input_stationsview'] != 'Empty'):
#Get the replacement ingredient and update fields
for element in listIngredients():
if(element.getName() == values['ingredient_input_stationsview']):
element.setActive(True)
element.setPosition(int(station)-1)
#If exists, update old ingredient
if(ingredient is not None):
for element in listIngredients():
if(element.getName() == ingredient.getName()):
element.setActive(False)
element.setPosition(-1)
break
if(event == 'exit_stationsview'):
break
if event in (None, 'Exit'):
break
#Re-enable host window
window.Enable()
window.BringToFront()
window_stationsview.close()
def StatsGUI(prev_window):
layout_stats = [
[sg.Text(text='Barchine',size=(8,1),font=('Helvetica', 30),key='title_stats')],
[sg.Button('Home',font=('Helvetica', 15),key='Home_stats'),
sg.Button('Library',font=('Helvetica', 15),key='Library_stats'),
sg.Button('Ingredients',font=('Helvetica', 15),key='Ingredients_stats'),
sg.Button('Stations',font=('Helvetica', 15),key='Stations_stats'),
sg.Button('Stats',font=('Helvetica', 15),key='Stats_stats',border_width=5,button_color=(None,'#60b551')),
sg.Button('Settings',font=('Helvetica', 15),key='Settings_stats')],
[sg.Text(text='Stats Page',size=(17,1),font=('Helvetica', 20),key='subtitle_stats')]
]
#Launch window
window_stats = sg.Window('Barchine', layout_stats,keep_on_top=True,size=(RESOLUTION.get('x'),RESOLUTION.get('y'))).Finalize()
if(FULLSCREEN):
window_stats.Maximize()
#Close Previous window
if(prev_window is not None):
prev_window.close()
while True: # Event Loop
event, values = window_stats.read()
print(event, values)
#Check for menu selection
if(event == 'Home_stats'):
contextSwitcher('Stats_stats','Home_stats',window_stats)
if(event == 'Library_stats'):
contextSwitcher('Stats_stats','Library_stats',window_stats)
if(event == 'Ingredients_stats'):
contextSwitcher('Stats_stats','Ingredients_stats',window_stats)
if(event == 'Stations_stats'):
contextSwitcher('Stats_stats','Stations_stats',window_stats)
if(event == 'Settings_stats'):
contextSwitcher('Stats_stats','Settings_stats',window_stats)
if event in (None, 'Exit'):
window_stats.close()
break
#Close remaining window
window_stats.close()
def SettingsGUI(prev_window):
layout_settings = [
[sg.Text(text='Barchine',size=(8,1),font=('Helvetica', 30),key='title_settings')],
[sg.Button('Home',font=('Helvetica', 15),key='Home_settings'),
sg.Button('Library',font=('Helvetica', 15),key='Library_settings'),
sg.Button('Ingredients',font=('Helvetica', 15),key='Ingredients_settings'),
sg.Button('Stations',font=('Helvetica', 15),key='Stations_settings'),
sg.Button('Stats',font=('Helvetica', 15),key='Stats_settings'),
sg.Button('Settings',font=('Helvetica', 15),key='Settings_settings',border_width=5,button_color=(None,'#60b551'))],
[sg.Text(text='Settings Page',size=(17,1),font=('Helvetica', 20),key='subtitle_settings')],
[sg.Button('Save',key='save_settings',font=('Helvetica', 20))
,sg.Button('Reload Bases',key='reload_bases_settings',font=('Helvetica', 20))]
]
#Launch window
window_settings = sg.Window('Barchine', layout_settings,keep_on_top=True,size=(RESOLUTION.get('x'),RESOLUTION.get('y'))).Finalize()
if(FULLSCREEN):
window_settings.Maximize()
#Close Previous window
if(prev_window is not None):
prev_window.close()
while True: # Event Loop
event, values = window_settings.read()
print(event, values)
#Check for menu selection
if(event == 'Home_settings'):
contextSwitcher('Settings_settings','Home_settings',window_settings)
if(event == 'Library_settings'):
contextSwitcher('Settings_settings','Library_settings',window_settings)
if(event == 'Ingredients_settings'):
contextSwitcher('Settings_settings','Ingredients_settings',window_settings)
if(event == 'Stations_settings'):
contextSwitcher('Settings_settings','Stations_settings',window_settings)
if(event == 'Stats_settings'):
contextSwitcher('Settings_settings','Stats_settings',window_settings)
if(event == 'save_settings'):
print('Saving libraries')
storeIngredientLibrary()
storeDrinkLibrary()
print('Saved')
if(event == 'reload_bases_settings'):
restoreBases()
if event in (None, 'Exit'):
window_settings.close()
break
#Close remaining window
window_settings.close()
#Launch default home menu
HomeGUI(None) | mit | -2,645,147,178,557,540,400 | 41.298989 | 167 | 0.569505 | false |
jmuhlich/indra | indra/preassembler/hierarchy_manager.py | 1 | 11321 | from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
import rdflib
import logging
try:
from functools import lru_cache
except ImportError:
from functools32 import lru_cache
logger = logging.getLogger('hierarchy_manager')
class HierarchyManager(object):
"""Store hierarchical relationships between different types of entities.
Used to store, e.g., entity hierarchies (proteins and protein families)
and modification hierarchies (serine phosphorylation vs. phosphorylation).
Parameters
----------
rdf_file : string
Path to the RDF file containing the hierarchy.
Attributes
----------
graph : instance of `rdflib.Graph`
The RDF graph containing the hierarchy.
"""
prefixes = """
PREFIX rn: <http://sorger.med.harvard.edu/indra/relations/>
"""
def __init__(self, rdf_file, build_closure=True):
"""Initialize with the path to an RDF file"""
self.graph = rdflib.Graph()
self.graph.parse(rdf_file, format='nt')
self.isa_closure = {}
self.partof_closure = {}
self.components = {}
if build_closure:
self.build_transitive_closures()
# Build reverse lookup dict from the entity hierarchy
self._children = {}
logger.info('Generating reverse lookup table for families')
all_children = set(self.isa_closure.keys()).union(
self.partof_closure.keys())
for child in all_children:
parents = self.get_parents(child)
for parent in parents:
children_list = self._children.get(parent, [])
children_list.append(child)
self._children[parent] = children_list
def build_transitive_closures(self):
"""Build the transitive closures of the hierarchy.
This method constructs dictionaries which contain terms in the
hierarchy as keys and either all the "isa+" or "partof+" related terms
as values.
"""
component_counter = 0
for rel, tc_dict in (('isa', self.isa_closure),
('partof', self.partof_closure)):
qstr = self.prefixes + """
SELECT ?x ?y WHERE {{
{{?x rn:{0}+ ?y .}}
}}
""".format(rel)
res = self.graph.query(qstr)
for x, y in res:
xs = x.toPython()
ys = y.toPython()
try:
tc_dict[xs].append(ys)
except KeyError:
tc_dict[xs] = [ys]
xcomp = self.components.get(xs)
ycomp = self.components.get(ys)
if xcomp is None:
if ycomp is None:
# Neither x nor y are in a component so we start a
# new component and assign x and y to the same
# component
self.components[xs] = component_counter
self.components[ys] = component_counter
component_counter += 1
else:
# Because y is already part of an existing component
# we assign its component to x
self.components[xs] = ycomp
else:
if ycomp is None:
# Because x is already part of an existing component
# we assign its component to y
self.components[ys] = xcomp
else:
# This is a special case in which both x and y are
# parts of components
# If they are in the same component then there's
# nothing further to do
if xcomp == ycomp:
continue
else:
remove_component = max(xcomp, ycomp)
joint_component = min(xcomp, ycomp)
for k, v in self.components.items():
if v == remove_component:
self.components[k] = joint_component
@lru_cache(maxsize=100000)
def find_entity(self, x):
"""
Get the entity that has the specified name (or synonym).
Parameters
----------
x : string
Name or synonym for the target entity.
"""
qstr = self.prefixes + """
SELECT ?x WHERE {{
?x rn:hasName "{0}" .
}}
""".format(x)
res = self.graph.query(qstr)
if list(res):
en = list(res)[0][0].toPython()
return en
else:
return None
def isa(self, ns1, id1, ns2, id2):
"""Indicate whether one entity has an "isa" relationship to another.
Parameters
----------
ns1 : string
Namespace code for an entity.
id1 : string
URI for an entity.
ns2 : string
Namespace code for an entity.
id2 : string
URI for an entity.
Returns
-------
bool
True if t1 has an "isa" relationship with t2, either directly or
through a series of intermediates; False otherwise.
"""
# if id2 is None, or both are None, then it's by definition isa:
if id2 is None or (id2 is None and id1 is None):
return True
# If only id1 is None, then it cannot be isa
elif id1 is None:
return False
if self.isa_closure:
term1 = self.get_uri(ns1, id1)
term2 = self.get_uri(ns2, id2)
ec = self.isa_closure.get(term1)
if ec is not None and term2 in ec:
return True
else:
return False
else:
return self.query_rdf(id1, 'rn:isa+', id2)
def partof(self, ns1, id1, ns2, id2):
"""Indicate whether one entity is physically part of another.
Parameters
----------
ns1 : string
Namespace code for an entity.
id1 : string
URI for an entity.
ns2 : string
Namespace code for an entity.
id2 : string
URI for an entity.
Returns
-------
bool
True if t1 has a "partof" relationship with t2, either directly or
through a series of intermediates; False otherwise.
"""
# if id2 is None, or both are None, then it's by definition isa:
if id2 is None or (id2 is None and id1 is None):
return True
# If only id1 is None, then it cannot be isa
elif id1 is None:
return False
if self.partof_closure:
term1 = self.get_uri(ns1, id1)
term2 = self.get_uri(ns2, id2)
ec = self.partof_closure.get(term1)
if ec is not None and term2 in ec:
return True
else:
return False
else:
return self.query_rdf(id1, 'rn:partof+', id2)
def get_parents(self, uri, type='all'):
"""Return parents of a given entry.
Parameters
----------
uri : str
The URI of the entry whose parents are to be returned. See the
get_uri method to construct this URI from a name space and id.
type : str
'all': return all parents irrespective of level;
'immediate': return only the immediate parents;
'top': return only the highest level parents
"""
immediate_parents = set(self.isa_closure.get(uri, [])).union(
set(self.partof_closure.get(uri, [])))
if type == 'immediate':
return immediate_parents
all_parents = set()
for parent in immediate_parents:
grandparents = self.get_parents(parent, type='all')
all_parents = all_parents.union(grandparents)
all_parents = all_parents.union(immediate_parents)
if type == 'all':
return all_parents
else:
top_parents = set()
for parent in all_parents:
if not self.get_parents(parent, type='immediate'):
top_parents.add(parent)
return top_parents
return
def get_children(self, uri):
"""Return all (not just immediate) children of a given entry.
Parameters
----------
uri : str
The URI of the entry whose children are to be returned. See the
get_uri method to construct this URI from a name space and id.
"""
children = self._children.get(uri, [])
return children
@lru_cache(maxsize=100000)
def query_rdf(self, id1, rel, id2):
term1 = self.find_entity(id1)
term2 = self.find_entity(id2)
qstr = self.prefixes + """
SELECT (COUNT(*) as ?s) WHERE {{
<{}> {} <{}> .
}}
""".format(term1, rel, term2)
res = self.graph.query(qstr)
count = [r[0] for r in res][0]
if count.toPython() == 1:
return True
else:
return False
@staticmethod
def get_uri(ns, id):
if ns == 'HGNC':
return 'http://identifiers.org/hgnc.symbol/' + id
elif ns == 'UP':
return 'http://identifiers.org/uniprot/' + id
elif ns == 'BE' or ns == 'INDRA':
return 'http://sorger.med.harvard.edu/indra/entities/' + id
else:
raise ValueError('Unknown namespace %s' % ns)
# Load the default entity and modification hierarchies
entity_file_path = os.path.join(os.path.dirname(__file__),
'../resources/entity_hierarchy.rdf')
mod_file_path = os.path.join(os.path.dirname(__file__),
'../resources/modification_hierarchy.rdf')
act_file_path = os.path.join(os.path.dirname(__file__),
'../resources/activity_hierarchy.rdf')
ccomp_file_path = os.path.join(os.path.dirname(__file__),
'../resources/cellular_component_hierarchy.rdf')
"""Default entity hierarchy loaded from the RDF file at
`resources/entity_hierarchy.rdf`."""
entity_hierarchy = HierarchyManager(entity_file_path, build_closure=True)
"""Default modification hierarchy loaded from the RDF file at
`resources/modification_hierarchy.rdf`."""
modification_hierarchy = HierarchyManager(mod_file_path, build_closure=True)
"""Default activity hierarchy loaded from the RDF file at
`resources/activity_hierarchy.rdf`."""
activity_hierarchy = HierarchyManager(act_file_path, build_closure=True)
"""Default cellular_component hierarchy loaded from the RDF file at
`resources/cellular_component_hierarchy.rdf`."""
ccomp_hierarchy = HierarchyManager(ccomp_file_path, build_closure=False)
hierarchies = {'entity': entity_hierarchy,
'modification': modification_hierarchy,
'activity': activity_hierarchy,
'cellular_component': ccomp_hierarchy}
| bsd-2-clause | 83,850,203,100,852,430 | 35.756494 | 78 | 0.536083 | false |
wisdomchuck/TestBot | utils/fun/lists.py | 1 | 2804 | # Image urls for the psat command
psat_memes = [
"http://i.imgur.com/5eJ5DbU.jpg",
"http://i.imgur.com/HBDnWVc.jpg",
"http://i.imgur.com/RzZlq2j.jpg",
"http://i.imgur.com/mVRNUIG.jpg",
"http://i.imgur.com/OvOmC6g.jpg",
"http://i.imgur.com/QqlSxaZ.png",
"http://i.imgur.com/finNuzx.jpg",
"http://i.imgur.com/XB2nBmz.png",
"http://i.imgur.com/7sCwNXl.jpg",
"http://i.imgur.com/caw6Pao.png",
"http://i.imgur.com/GwV0JYL.png"
]
# Response for the 8ball command
magic_conch_shell = [
"It is certain",
"It is decidedly so",
"Without a doubt",
"Yes definitely",
"You may rely on it",
"As I see it yes",
"Most likely",
"Outlook good",
"Yes",
"Signs point to yes",
"Reply hazy try again",
"Ask again later",
"Better not tell you now",
"Cannot predict now",
"Concentrate and ask again",
"Don't count on it",
"My reply is no",
"My sources say no",
"Outlook not so good",
"Very doubtful"
]
# Insults for the insult command
insults = [
"is a fucking pedophile",
"is a nigger",
"is so insecure about his penis size because it is smaller than a babies",
"is just a fucking sterotypical 12 year old saying shit like \"I fucked your mom\" and other shit",
"is a fucking disguisting, disgraceful, ignorant, pathetic, and discriminative weeaboo!",
"is a child molester",
"has a kink with 80 year old men",
"is the type of person who loves to fap to little girls",
"has no other purpose in life other than to be retarded and waste people's time",
"needs to kill itself",
"is the definition of faggot",
"has a gamertag, and it is I_Like_To_Rape_Children",
"loves to fap to discord bots",
"wants the d",
"has no life",
"is a furry",
"is a furfag",
"is a worthless piece of shit",
"is an 80 year old man",
"lost his virginity to his grandpa",
"supports abortion",
"is a cuntrag",
"is on the sex offender list"
]
# Drunk lines for the actdrunk command
drunkaf = [
"UDNDUNDUNDUNDUDNUDNDUNDUNDUNDUNDUNDUDNUDNDUNDUNDUNDUNDUNDUNDUNDNUDNDUN",
"AMERICAN IDIOTS YAAAS",
"HEH HEH HEH HEH IM SO FUKED UP LOL",
"lol Ill fuk u up n@4f3 fucjing fite me4",
"fite me u lil fuck",
"i have somethin to tell you: fedc",
"weeeeeeew",
"\*falls*",
"lol wana fuk some suc tonight #5SdE2@"
]
# Image urls for the honk command
honkhonkfgt = [
"https://i.imgur.com/c53XQCI.gif",
"https://i.imgur.com/ObWBP14.png",
"https://i.imgur.com/RZP2tB4.jpg",
"https://i.imgur.com/oxQ083P.gif",
"https://i.imgur.com/byBB7ln.jpg",
"https://i.imgur.com/NvUiLGG.gif",
"https://i.imgur.com/QDyvO4x.jpg",
"https://i.imgur.com/HtrRYSS.png",
"https://i.imgur.com/bvrFQnX.jpg"
]
| gpl-3.0 | -5,187,740,992,660,968,000 | 29.813187 | 103 | 0.634807 | false |
vikingco/django-states | src/states/south_migrations/0006_group_action.py | 1 | 4104 |
from south.db import db
from django.db import models
from states.models import *
class Migration:
def forwards(self, orm):
# Deleting field 'StateReport.group_action'
db.delete_column('states_statereport', 'group_action')
def backwards(self, orm):
# Adding field 'StateReport.group_action'
db.add_column('states_statereport', 'group_action', orm['states.statereport:group_action'])
models = {
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'states.state': {
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'state_id': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '32'})
},
'states.statelog': {
'current_state_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '32'}),
'from_state_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['states.State']"})
},
'states.statereport': {
'ascending': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'columns': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['states.StateReportColumn']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['states.StateReportItem']"}),
'menu_order': ('django.db.models.fields.SmallIntegerField', [], {'default': '-1'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order_column': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sorted_report'", 'to': "orm['states.StateReportColumn']"})
},
'states.statereportcolumn': {
'column_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'expression': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'sorting_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'states.statereportitem': {
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kwargs': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'state_id': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '32'})
}
}
complete_apps = ['states']
| bsd-3-clause | 8,244,591,785,139,202,000 | 58.478261 | 156 | 0.552144 | false |
mseeger/apbsint | python/cython/setup.py | 1 | 6137 | #! /usr/bin/env python
# Build ApBsInT extension modules (C++ code). Use '--workaround' option
# in order to build workaround code (this needs private part not contained
# in the public repo).
from distutils.core import setup
#from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Distutils.extension import Extension
import sys
import numpy
# System profile (has to be edited)
import apbsint_profile as aprof
work_around = False
if '--workaround' in sys.argv:
work_around = True
sys.argv.remove('--workaround')
# Basic information passed to compiler/linker
# NOTE: Do not change the present file. Enter system-specific information
# in apbsint_profile.py.
# - df_include_dirs: Include path(s). The repo root must be in there
# - df_library_dirs: Library path(s) for your system
df_include_dirs = [numpy.get_include()]
tlst = aprof.get_include_dirs()
if type(tlst) == str:
df_include_dirs.append(tlst)
else:
df_include_dirs.extend(tlst)
nwa_include_dirs = df_include_dirs[:]
df_define_macros = [('HAVE_NO_BLAS', None), ('HAVE_FORTRAN', None)]
nwa_define_macros = df_define_macros[:]
df_libraries = ['m']
nwa_libraries = df_libraries[:]
tlst = aprof.get_library_dirs()
if type(tlst) == str:
df_library_dirs = [tlst]
else:
df_library_dirs = tlst
nwa_library_dirs = df_include_dirs[:]
if work_around:
df_define_macros.extend([('HAVE_LIBGSL', None),
('HAVE_WORKAROUND', None)])
df_libraries.append('gsl')
# eptools_ext: Main API to C++ functions
eptools_ext_sources = [
'eptools_ext.pyx',
'base/lhotse/global.cc',
'base/lhotse/StandardException.cc',
'base/lhotse/FileUtils.cc',
'base/lhotse/IntVal.cc',
'base/lhotse/Interval.cc',
'base/lhotse/Range.cc',
'base/lhotse/optimize/OneDimSolver.cc',
'base/src/eptools/FactorizedEPDriver.cc',
'base/src/eptools/potentials/EPScalarPotential.cc',
'base/src/eptools/potentials/DefaultPotManager.cc',
'base/src/eptools/potentials/EPPotentialFactory.cc',
'base/src/eptools/potentials/EPPotentialNamedFactory.cc',
'base/src/eptools/potentials/PotManagerFactory.cc',
'base/src/eptools/potentials/SpecfunServices.cc',
'base/src/eptools/potentials/quad/QuadPotProximalNewton.cc',
'base/src/eptools/potentials/quad/EPPotQuadLaplaceApprox.cc',
'base/src/eptools/potentials/quad/EPPotPoissonExpRate.cc',
'base/src/eptools/wrap/eptools_helper_basic.cc',
'base/src/eptools/wrap/eptools_helper.cc',
'base/src/eptools/wrap/eptwrap_choldnrk1.cc',
'base/src/eptools/wrap/eptwrap_choluprk1.cc',
'base/src/eptools/wrap/eptwrap_epupdate_parallel.cc',
'base/src/eptools/wrap/eptwrap_epupdate_single.cc',
'base/src/eptools/wrap/eptwrap_fact_compmarginals.cc',
'base/src/eptools/wrap/eptwrap_fact_compmaxpi.cc',
'base/src/eptools/wrap/eptwrap_fact_sequpdates.cc',
'base/src/eptools/wrap/eptwrap_getpotid.cc',
'base/src/eptools/wrap/eptwrap_getpotname.cc',
'base/src/eptools/wrap/eptwrap_potmanager_isvalid.cc',
'base/src/eptools/wrap/eptwrap_debug_castannobj.cc'
]
if work_around:
eptools_ext_sources.extend(
['base/lhotse/specfun/Specfun.cc',
'base/src/eptools/potentials/quad/AdaptiveQuadPackServices.cc',
'base/src/eptools/potentials/quad/AdaptiveQuadPackDebugServices.cc']
)
# apbtest_ext: API for test code (excluding the workaround)
apbtest_ext_sources = [
'apbtest_ext.pyx',
'base/lhotse/global.cc',
'base/lhotse/StandardException.cc',
'base/lhotse/FileUtils.cc',
'base/lhotse/IntVal.cc',
'base/lhotse/Interval.cc',
'base/lhotse/Range.cc',
'base/lhotse/optimize/OneDimSolver.cc',
'base/src/eptools/potentials/SpecfunServices.cc'
]
# apbtest_workaround_ext: API for test code (workaround part)
apbtest_workaround_ext_sources = [
'apbtest_workaround_ext.pyx',
'base/lhotse/global.cc',
'base/lhotse/StandardException.cc',
'base/lhotse/FileUtils.cc',
'base/lhotse/IntVal.cc',
'base/lhotse/Interval.cc',
'base/lhotse/Range.cc',
'base/lhotse/specfun/Specfun.cc'
]
# ptannotate_ext: Potential annotation classes
ptannotate_ext_sources = [
'ptannotate_ext_workaround.pyx' if work_around else 'ptannotate_ext.pyx',
'base/lhotse/global.cc',
'base/lhotse/StandardException.cc',
'base/lhotse/FileUtils.cc',
'base/lhotse/IntVal.cc',
'base/lhotse/Interval.cc',
'base/lhotse/Range.cc'
]
if work_around:
ptannotate_ext_sources.extend(
['base/src/eptools/potentials/quad/AdaptiveQuadPackServices.cc',
'base/src/eptools/potentials/quad/AdaptiveQuadPackDebugServices.cc']
)
df_ext_modules = [
Extension(
'eptools_ext',
sources = eptools_ext_sources,
include_dirs = df_include_dirs,
define_macros = df_define_macros,
libraries = df_libraries,
library_dirs = df_library_dirs,
language = 'c++'
),
# NOTE: apbtest_ext has to be build in default mode, even if the workaround
# is used. This is because we use apbtest_ext vs. apbtest_workaround_ext
# for comparison tests.
Extension(
'apbtest_ext',
sources = apbtest_ext_sources,
include_dirs = nwa_include_dirs,
define_macros = nwa_define_macros,
libraries = nwa_libraries,
library_dirs = nwa_library_dirs,
language = 'c++'
),
Extension(
'ptannotate_ext',
sources = ptannotate_ext_sources,
include_dirs = df_include_dirs,
define_macros = df_define_macros,
libraries = df_libraries,
library_dirs = df_library_dirs,
language = 'c++'
)
]
if work_around:
df_ext_modules.append(
Extension(
'apbtest_workaround_ext',
sources = apbtest_workaround_ext_sources,
include_dirs = df_include_dirs,
define_macros = df_define_macros,
libraries = df_libraries,
library_dirs = df_library_dirs,
language = 'c++'
)
)
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = df_ext_modules
)
| bsd-3-clause | -873,107,301,675,034,800 | 32.906077 | 79 | 0.677856 | false |