repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Phobia0ptik/ThinkStats2 | code/populations.py | 68 | 2609 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
| gpl-3.0 | -8,179,515,665,237,356,000 | 27.358696 | 79 | 0.662323 | false | 3.392718 | false | false | false |
2gis/vmmaster | tests/unit/test_commands.py | 1 | 16406 | # coding: utf-8
import copy
import json
from mock import Mock, PropertyMock, patch
from tests.helpers import Handler, BaseTestCase, ServerMock, get_free_port, DatabaseMock
from core.exceptions import CreationException, ConnectionError, \
SessionException, TimeoutException
from core.config import setup_config, config
from flask import Flask
class CommonCommandsTestCase(BaseTestCase):
webdriver_server = None
vmmaster_agent = None
vnc_server = None
host = 'localhost'
@classmethod
def setUpClass(cls):
setup_config("data/config_openstack.py")
body = {
"sessionId": None,
"desiredCapabilities": {
"platform": "some_platform",
"browserName": "firefox",
"version": "",
"javascriptEnabled": True
}
}
session_request_body = json.dumps(body)
session_request_headers = {
'content-length': '%s' % len(session_request_body),
'accept-encoding': 'identity',
'Connection': 'close',
'accept': 'application/json',
'user-agent': 'Python-urllib/2.7',
'host': '127.0.0.1:9000',
'content-type': 'application/json;charset=UTF-8',
}
cls.request = Mock()
cls.request.method = "POST"
cls.request.path = "/wd/hub/session"
cls.request.headers = dict()
cls.request.headers.update(session_request_headers)
cls.request.data = session_request_body
cls.webdriver_server = ServerMock(cls.host, get_free_port())
cls.webdriver_server.start()
cls.vmmaster_agent = ServerMock(cls.host, get_free_port())
cls.vmmaster_agent.start()
cls.vnc_server = ServerMock(cls.host, get_free_port())
cls.vnc_server.start()
cls.app = Flask(__name__)
cls.app.database = None
cls.app.sessions = None
cls.app.database_task_queue = Mock()
cls.app.pool = Mock()
def setUp(self):
self.ctx = self.app.test_request_context()
self.ctx.push()
with patch(
'flask.current_app.database', DatabaseMock()
), patch(
'flask.current_app.sessions', Mock()
):
from core.db.models import Session, Provider, Endpoint
self.session = Session('origin_1')
self.session.name = "session1"
provider = Provider(name='noname', url='nourl')
vm = Endpoint(Mock(), '', provider)
vm.name = 'vm1'
vm.ip = self.host
vm.ports = {
'selenium': self.webdriver_server.port,
'agent': self.vmmaster_agent.port,
'vnc': self.vnc_server.port
}
self.session.endpoint = vm
self.session.run()
from vmmaster.webdriver import commands
self.commands = commands
def tearDown(self):
with patch(
'flask.current_app.sessions', Mock()
), patch(
'flask.current_app.database', Mock()
):
self.session._close()
self.ctx.pop()
@classmethod
def tearDownClass(cls):
cls.webdriver_server.stop()
cls.vmmaster_agent.stop()
cls.vnc_server.stop()
del cls.app
def ping_vm_mock(arg, ports=None):
yield None
def selenium_status_mock(arg1, arg2, arg3):
yield None
@patch(
'vmmaster.webdriver.commands.start_selenium_session', new=Mock(
__name__="start_selenium_session",
side_effect=selenium_status_mock
)
)
@patch(
'vmmaster.webdriver.commands.ping_endpoint_before_start_session',
new=Mock(__name__="ping_endpoint_before_start_session", side_effect=ping_vm_mock)
)
@patch(
'vmmaster.webdriver.helpers.is_request_closed',
Mock(return_value=False)
)
@patch('flask.current_app.database', Mock())
class TestStartSessionCommands(CommonCommandsTestCase):
def setUp(self):
super(TestStartSessionCommands, self).setUp()
self.session.dc = Mock(__name__="dc")
def test_start_session_when_selenium_status_failed(self):
request = copy.copy(self.request)
def make_request_mock(arg1, arg2):
yield 200, {}, json.dumps({'status': 1})
with patch(
'core.db.models.Session.make_request', Mock(
__name__="make_request",
side_effect=make_request_mock
)
):
self.assertRaises(
CreationException, self.commands.start_session,
request, self.session
)
@patch(
'vmmaster.webdriver.helpers.is_session_timeouted',
Mock(return_value=True)
)
@patch(
'requests.request', Mock(side_effect=Mock(
__name__="request",
return_value=(200, {}, json.dumps({'status': 0}))))
)
def test_start_session_when_session_was_timeouted(self):
request = copy.copy(self.request)
self.assertRaises(TimeoutException, self.commands.start_session,
request, self.session)
@patch(
'vmmaster.webdriver.helpers.is_session_closed',
Mock(return_value=True)
)
@patch(
'requests.request', Mock(side_effect=Mock(
__name__="request",
return_value=(200, {}, json.dumps({'status': 0}))))
)
def test_start_session_when_session_was_closed(self):
request = copy.copy(self.request)
self.assertRaises(SessionException, self.commands.start_session,
request, self.session)
@patch('flask.current_app.database', Mock())
class TestStartSeleniumSessionCommands(CommonCommandsTestCase):
@patch(
'vmmaster.webdriver.helpers.is_request_closed',
Mock(return_value=False)
)
@patch("vmmaster.webdriver.commands.ping_endpoint_before_start_session", Mock())
def test_session_response_success(self):
request = copy.deepcopy(self.request)
request.headers.update({"reply": "200"})
status, headers, body = self.commands.start_selenium_session(
request, self.session
)
self.assertEqual(status, 200)
request_headers = dict((key.lower(), value) for key, value in
request.headers.iteritems())
for key, value in headers.iteritems():
if key == 'server' or key == 'date':
continue
self.assertDictContainsSubset({key: value}, request_headers)
self.assertEqual(body, request.data)
@patch(
'vmmaster.webdriver.helpers.is_request_closed',
Mock(return_value=False)
)
@patch("vmmaster.webdriver.commands.ping_endpoint_before_start_session", Mock())
def test_session_response_fail(self):
request = copy.deepcopy(self.request)
request.headers.update({"reply": "500"})
def start_selenium_session(req):
for result in self.commands.start_selenium_session(
req, self.session
):
pass
self.assertRaises(CreationException, start_selenium_session, request)
@patch(
'vmmaster.webdriver.helpers.is_request_closed',
Mock(return_value=True)
)
def test_start_selenium_session_when_connection_closed(self):
self.session.closed = True
request = copy.deepcopy(self.request)
request.headers.update({"reply": "200"})
self.assertRaises(
ConnectionError, self.commands.start_selenium_session,
request, self.session
)
@patch(
'vmmaster.webdriver.helpers.is_request_closed',
Mock(return_value=False)
)
@patch(
'vmmaster.webdriver.helpers.is_session_closed',
Mock(return_value=True)
)
def test_start_selenium_session_when_session_closed(self):
self.session.closed = True
request = copy.deepcopy(self.request)
request.headers.update({"reply": "200"})
self.assertRaises(
SessionException, self.commands.start_selenium_session,
request, self.session
)
@patch(
'vmmaster.webdriver.helpers.is_request_closed',
Mock(return_value=False)
)
@patch(
'vmmaster.webdriver.helpers.is_session_timeouted',
Mock(return_value=True)
)
def test_start_selenium_session_when_session_timeouted(self):
self.session.closed = True
request = copy.deepcopy(self.request)
request.headers.update({"reply": "200"})
self.assertRaises(
TimeoutException, self.commands.start_selenium_session,
request, self.session
)
@patch(
'vmmaster.webdriver.helpers.is_request_closed',
Mock(return_value=False)
)
@patch('flask.current_app.database', Mock())
class TestCheckVmOnline(CommonCommandsTestCase):
def setUp(self):
super(TestCheckVmOnline, self).setUp()
config.PING_TIMEOUT = 0
self._handler_get = Handler.do_GET
self.response_body = "{}"
self.response_headers = {
'header': 'value',
'content-length': len(self.response_body)
}
def tearDown(self):
super(TestCheckVmOnline, self).tearDown()
Handler.do_GET = self._handler_get
def test_check_vm_online_ok(self):
def do_GET(handler):
handler.send_reply(200, self.response_headers,
body=self.response_body)
Handler.do_GET = do_GET
result = self.commands.ping_endpoint_before_start_session(self.session, ports=[
self.webdriver_server.port, self.vmmaster_agent.port, self.vnc_server.port
])
self.assertTrue(result)
def test_check_vm_online_ping_failed_timeout(self):
self.assertRaises(
CreationException, self.commands.ping_endpoint_before_start_session, self.session, config.DEFAULT_PORTS
)
def test_check_vm_online_ping_failed_when_session_closed(self):
config.PING_TIMEOUT = 2
self.session.closed = True
self.assertRaises(
CreationException, self.commands.ping_endpoint_before_start_session, self.session, config.DEFAULT_PORTS
)
def test_check_vm_online_status_failed(self):
def do_GET(handler):
handler.send_reply(500, self.response_headers,
body=self.response_body)
Handler.do_GET = do_GET
request = copy.deepcopy(self.request)
def selenium_status(req):
for result in self.commands.selenium_status(
req, self.session
):
pass
self.assertRaises(CreationException, selenium_status, request)
def test_selenium_status_failed_when_session_closed(self):
self.session.closed = True
def do_GET(handler):
handler.send_reply(200, self.response_headers,
body=self.response_body)
Handler.do_GET = do_GET
request = copy.deepcopy(self.request)
def selenium_status(req):
for result in self.commands.selenium_status(
req, self.session
):
pass
self.assertRaises(CreationException, selenium_status, request)
class TestGetDesiredCapabilities(BaseTestCase):
def setUp(self):
self.body = {
"sessionId": None,
"desiredCapabilities": {
"platform": "some_platform",
}
}
self.session_request_headers = {
'content-length': '%s',
'accept-encoding': 'identity',
'Connection': 'close',
'accept': 'application/json',
'user-agent': 'Python-urllib/2.7',
'host': '127.0.0.1:9000',
'content-type': 'application/json;charset=UTF-8',
}
self.request = Mock()
self.request.method = "POST"
self.request.path = "/wd/hub/session"
self.request.headers = dict()
from vmmaster.webdriver import commands
self.commands = commands
def test_platform(self):
self.session_request_headers = {
'content-length': '%s' % len(self.body),
}
self.request.headers.update(self.session_request_headers)
self.request.data = json.dumps(self.body)
dc = self.commands.get_desired_capabilities(self.request)
self.assertIsInstance(dc["platform"], unicode)
self.assertEqual(self.body["desiredCapabilities"]["platform"],
dc["platform"])
def test_name(self):
self.body['desiredCapabilities'].update({
"name": "some_name"
})
self.session_request_headers = {
'content-length': '%s' % len(self.body),
}
self.request.headers.update(self.session_request_headers)
self.request.data = json.dumps(self.body)
dc = self.commands.get_desired_capabilities(self.request)
self.assertIsInstance(dc["name"], unicode)
self.assertEqual(self.body["desiredCapabilities"]["name"], dc["name"])
def test_no_name(self):
self.session_request_headers = {
'content-length': '%s' % len(self.body),
}
self.request.headers.update(self.session_request_headers)
self.request.data = json.dumps(self.body)
dc = self.commands.get_desired_capabilities(self.request)
self.assertEqual(dc.get("name", None), None)
def test_take_screenshot_bool(self):
self.body['desiredCapabilities'].update({
"takeScreenshot": True
})
self.session_request_headers = {
'content-length': '%s' % len(self.body),
}
self.request.headers.update(self.session_request_headers)
self.request.data = json.dumps(self.body)
dc = self.commands.get_desired_capabilities(self.request)
self.assertTrue(dc["takeScreenshot"])
def test_take_screenshot_some_string(self):
self.body['desiredCapabilities'].update({
"takeScreenshot": "asdf"
})
self.session_request_headers = {
'content-length': '%s' % len(self.body),
}
self.request.headers.update(self.session_request_headers)
self.request.data = json.dumps(self.body)
dc = self.commands.get_desired_capabilities(self.request)
self.assertTrue(dc["takeScreenshot"])
def test_take_screenshot_empty_string(self):
self.body['desiredCapabilities'].update({
"takeScreenshot": ""
})
self.session_request_headers = {
'content-length': '%s' % len(self.body),
}
self.request.headers.update(self.session_request_headers)
self.request.data = json.dumps(self.body)
dc = self.commands.get_desired_capabilities(self.request)
self.assertFalse(dc["takeScreenshot"])
class TestRunScript(CommonCommandsTestCase):
def setUp(self):
super(TestRunScript, self).setUp()
config.DEFAULT_AGENT_PORT = self.vmmaster_agent.port
self.response_body = "some_body"
def tearDown(self):
super(TestRunScript, self).tearDown()
@patch('flask.current_app.database', Mock())
def test_run_script(self):
def run_script_through_websocket_mock(*args, **kwargs):
return 200, {}, 'some_body'
with patch('vmmaster.webdriver.commands.run_script_through_websocket',
run_script_through_websocket_mock):
response = self.commands.run_script(self.request, self.session)
self.assertEqual(200, response[0])
self.assertEqual(self.response_body, response[2])
class TestLabelCommands(CommonCommandsTestCase):
def test_label(self):
request = copy.deepcopy(self.request)
label = "step-label"
label_id = 1
request.data = json.dumps({"label": label})
with patch('core.db.models.Session.current_log_step',
PropertyMock(return_value=Mock(id=label_id))):
status, headers, body = self.commands.vmmaster_label(
request, self.session
)
self.assertEqual(status, 200)
json_body = json.loads(body)
self.assertEqual(json_body["value"], label)
self.assertEqual(json_body["labelId"], label_id)
| mit | 3,751,269,141,683,108,000 | 32.413442 | 115 | 0.598501 | false | 3.990757 | true | false | false |
cjworld/WeddingAssist | parties/models.py | 1 | 2468 | from django.db import models
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
class Photo(models.Model):
author = models.ForeignKey(User, related_name='photos')
title = models.CharField(max_length=127, blank=True)
datetime = models.DateTimeField(auto_now_add=True)
image = models.ImageField(upload_to="images")
def __unicode__(self):
return '%s, %s, %s, %s' % (self.author, self.title, self.datetime, self.image)
class Party(models.Model):
author = models.ForeignKey(User, related_name='parties')
datetime = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=127)
subscription = models.TextField(blank=True)
date = models.DateField(blank=True, null=True)
time = models.TimeField(blank=True, null=True)
place = models.CharField(max_length=511, blank=True)
photos = models.ManyToManyField(Photo, through='PartyPhoto', blank=True, null=True)
def __unicode__(self):
return '%s, %s, %s, %s, %s, %s, %s, %s' % (self.author, self.datetime, self.title, self.subscription, self.date, self.time, self.place, self.photos)
def get_absolute_url(self):
return reverse('parties:party-detail', args=[str(self.id)])
class PartyPhoto(models.Model):
photo = models.ForeignKey(Photo)
party = models.ForeignKey(Party)
class Message(models.Model):
author = models.ForeignKey(User, related_name='messages')
datetime = models.DateTimeField(auto_now_add=True)
body = models.CharField(max_length=2047)
party = models.ForeignKey(Party, related_name='messages')
def __unicode__(self):
return '%s, %s, %s, %s' % (self.author, self.datetime, self.body, self.party)
def get_absolute_url(self):
return reverse('parties:message-detail', args=[str(self.id)])
class Willingness(models.Model):
author = models.ForeignKey(User, related_name='willingnesses')
participation = models.BooleanField()
invitation = models.BooleanField()
host = models.BooleanField()
vegetarian = models.BooleanField()
party = models.ForeignKey(Party, related_name='willingnesses')
def __unicode__(self):
return '%s, %s, %s, %s, %s, %s' % (self.author, self.participation, self.invitation, self.host, self.vegetarian, self.party)
def get_absolute_url(self):
return reverse('parties:willingness-detail', args=[str(self.id)])
| mit | 7,828,495,767,830,519,000 | 40.847458 | 156 | 0.677472 | false | 3.51567 | false | false | false |
shearichard/spellsplash | splsplsh_project/spellweb/migrations/0004_auto_20140920_0007.py | 1 | 1789 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('spellweb', '0003_auto_20140920_0006'),
]
operations = [
migrations.CreateModel(
name='Attempt',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('when', models.DateTimeField(auto_now_add=True)),
('success', models.BooleanField(default=False)),
('learner', models.ForeignKey(to='spellweb.Learner')),
],
options={
'ordering': ['-when'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Word',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.IntegerField()),
('word', models.CharField(max_length=30)),
('source', models.CharField(default=b'OT', max_length=2, choices=[(b'EW', b'Essential Words'), (b'ER', b'NZCER'), (b'OT', b'OTHER')])),
('hint', models.CharField(max_length=30, null=True, blank=True)),
],
options={
'ordering': ['level', 'word'],
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='word',
unique_together=set([('source', 'word')]),
),
migrations.AddField(
model_name='attempt',
name='word',
field=models.ForeignKey(to='spellweb.Word'),
preserve_default=True,
),
]
| gpl-3.0 | -4,937,125,536,096,705,000 | 34.078431 | 151 | 0.508664 | false | 4.352798 | false | false | false |
czechmark/neurioToPvoutput | bothTables.py | 1 | 11432 | #!/usr/bin/env python
from lxml import html
from lxml import etree
import requests
import sqlite3
import sys
import cgi
import cgitb
import datetime
import time
import subprocess
# global variables
speriod=(15*60)-1
dbname='/var/www/neuriolog.db'
ip='192.168.13.239'
endPoint='/both_tables.html'
myOption=''
# store the energy in the database
def log_energy(net,gen,cons):
conn=sqlite3.connect(dbname)
curs=conn.cursor()
print '2'
curs.execute('INSERT INTO energy values(?,?,?,?,?,?,?)', (datetime.datetime.now(),net[1],net[2],gen[1],gen[2],cons[1],cons[2]))
conn.commit()
conn.close()
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
# print the HTTP header
def printHTTPheader():
print "Content-type: text/html\n\n"
# print the HTML head section
# arguments are the page title and the table for the chart
def printHTMLHead(title, table):
print "<head>"
print " <title>"
print title
print " </title>"
print_graph_script(table)
print "</head>"
def getTimeMilli(inTime):
return(time.mktime( inTime.timetuple()) *1000)
def read_html():
data = []
interim = []
page = requests.get('http://' + ip + endPoint)
parser = etree.HTMLParser();
tree2 = etree.fromstring(page.text,parser)
#print etree.tostring(tree2)
walkAll = tree2.getiterator()
foundChannel = False;
count = 0
for elt in walkAll:
myText = elt.text;
if myText == 'Channel':
foundChannel = True
if foundChannel & (elt.tag == 'td') & (myText != None) :
#print elt.text, elt.tag
interim.append(elt.text)
count = count +1;
#print interim
if count == 6:
count = 0;
data.append(interim)
interim = []
#print data
retData = [ ['Name','Import','Export'],
['Net',data[2][2],data[2][3]],
['Gen',data[3][2],data[3][3]],
['Con',data[4][2],data[4][3]] ]
#print retData
return retData
# return a list of records from the database
def get_data(interval):
conn=sqlite3.connect(dbname)
curs=conn.cursor()
#print interval
if interval == None or int(interval) == -1:
curs.execute("SELECT * FROM energy")
else:
curs.execute("SELECT * FROM energy WHERE timestamp>datetime('now','-%s hours','localtime')" % interval)
rows=curs.fetchall()
conn.close()
return rows
# convert rows from database into a javascript table
def create_table(rows):
chart_table=""
smth=0.4
smth2=1-smth
smthh=smth*3600
old_data = None
old_value=0
old_time=0
for row in rows[:-1]:
if old_data != None:
delta=row[1]-old_data
aTime=datetime.datetime.strptime(row[0], "%Y-%m-%d %H:%M:%S")
dTime=aTime-old_time
value=delta/dTime.total_seconds()*smthh+old_value*smth2
if value > 8:
value=8
if value < -8:
value=-8
#rowstr="[new Date({0}), {1}],\n".format(datetime.datetime.strftime(aTime,"%Y,%m,%d,%H,%M,%S"),str(value))
rowstr="[new Date({0}), {1}, {2}],\n".format(getTimeMilli(aTime),str(row[1]),str(value))
chart_table+=rowstr
old_value=value
old_data=row[1]
old_time=datetime.datetime.strptime(row[0], "%Y-%m-%d %H:%M:%S")
row=rows[-1]
delta=row[1]-old_data
aTime=datetime.datetime.strptime(row[0], "%Y-%m-%d %H:%M:%S")
dTime=aTime-old_time
value=delta/dTime.total_seconds()*3600*0.1+old_value*0.9
#rowstr="[new Date({0}), {1}]\n".format(getTimeMilli(aTime),str(value))
rowstr="[new Date({0}), {1}, {2}]\n".format(getTimeMilli(aTime),str(row[1]),str(value))
#rowstr="['{0}', {1}]\n".format(str(row[0]),str(value))
chart_table+=rowstr
#print chart_table
return chart_table
# print the javascript to generate the chart
# pass the table generated from the database info
def print_graph_script(table):
# google chart snippet
#data.setColumnProperty(1, 'type', 'date');
#data.setColumnProperty(2, 'type', 'number');
chart_code="""
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["corechart"]});
google.setOnLoadCallback(drawChart);
function drawChart() {
var data = google.visualization.arrayToDataTable([ ['Time', 'Energy(lhs)', 'Power(rhs)'], %s ]);
data.setColumnProperty(0,'type','datetime');
data.setColumnProperty(1,'type','number');
data.setColumnProperty(2,'type','number');
var options = {
title: 'Energy/Power',
vAxes: { 0: {title: 'KWH'},
1: {title: 'KWatts' }},
hAxis: { title: 'Time', format: 'M/d/yy HH:mm', gridlines:{ color:'#555555', count: 10}},
series: {0: {targetAxisIndex:0},
1: {targetAxisIndex:1}}
};
var chart = new google.visualization.LineChart(document.getElementById('chart_div'));
chart.draw(data, options);
}
</script>"""
print chart_code % (table)
# print the div that contains the graph
def show_graph():
print "<h2>Energy(KWH)/Power(KW) Chart</h2>"
print '<div id="chart_div" style="width: 900px; height: 500px;"></div>'
# connect to the db and show some stats
# argument option is the number of hours
def show_stats(option):
conn=sqlite3.connect(dbname)
curs=conn.cursor()
if option is None or int(option) == -1:
option = str(240000)
#curs.execute("SELECT * FROM energy WHERE timestamp>datetime('now','-%s hours','localtime')" % interval)
curs.execute("SELECT timestamp,max(energy) FROM energy WHERE timestamp>datetime('now','-%s hour','localtime') AND timestamp<=datetime('now','localtime')" % option)
rowmax=curs.fetchone()
rowstrmax="{0}   {1}KWH".format(str(rowmax[0]),str(rowmax[1]))
# curs.execute("SELECT timestamp,min(temp) FROM temps WHERE timestamp>datetime('now','-%s hour') AND timestamp<=datetime('now')" % option)
curs.execute("SELECT timestamp,min(energy) FROM energy WHERE timestamp>datetime('now','-%s hour','localtime') AND timestamp<=datetime('now','localtime')" % option)
rowmin=curs.fetchone()
rowstrmin="{0}   {1}KWH".format(str(rowmin[0]),str(rowmin[1]))
# curs.execute("SELECT avg(temp) FROM temps WHERE timestamp>datetime('now','-%s hour') AND timestamp<=datetime('now')" % option)
curs.execute("SELECT avg(energy) FROM energy WHERE timestamp>datetime('now','-%s hour','localtime') AND timestamp<=datetime('now','localtime')" % option)
rowavg=curs.fetchone()
print "<hr>"
print "<h2>Minumum energy </h2>"
print rowstrmin
print "<h2>Maximum energy</h2>"
print rowstrmax
print "<h2>Average energy</h2>"
print "%.3f" % rowavg+"KWH"
print "<hr>"
print "<h2>In the last hour:</h2>"
print "<table>"
print "<tr><td><strong>Date/Time</strong></td><td><strong>energy</strong></td></tr>"
# rows=curs.execute("SELECT * FROM energy WHERE timestamp>datetime('new','-1 hour') AND timestamp<=datetime('new')")
rows=curs.execute("SELECT * FROM energy WHERE timestamp>datetime('now','-1 hour','localtime') AND timestamp<=datetime('now','localtime')")
for row in rows:
rowstr="<tr><td>{0}  </td><td>{1}KWH</td></tr>".format(str(row[0]),str(row[1]))
print rowstr
print "</table>"
print "<hr>"
conn.close()
def print_time_selector(option):
print """<form action="/cgi-bin/both.py" method="POST">
Show the logs for
<select name="timeinterval">"""
if option is not None:
if option == "-1":
print "<option value=\"-1\" selected=\"selected\">All times</option>"
else:
print "<option value=\"-1\">All times</option>"
#if option == None:
#print "<option value=\"-1\" selected=\"selected\">All times</option>"
#else:
#print "<option value=\"-1\">All times</option>"
if option == "6":
print "<option value=\"6\" selected=\"selected\">the last 6 hours</option>"
else:
print "<option value=\"6\">the last 6 hours</option>"
if option == "12":
print "<option value=\"12\" selected=\"selected\">the last 12 hours</option>"
else:
print "<option value=\"12\">the last 12 hours</option>"
if option == "24":
print "<option value=\"24\" selected=\"selected\">the last 24 hours</option>"
else:
print "<option value=\"24\">the last 24 hours</option>"
if option == "168":
print "<option value=\"168\" selected=\"selected\">1 week</option>"
else:
print "<option value=\"168\">1 week</option>"
else:
print """<option value="-1">All times</option>
<option value="6">the last 6 hours</option>
<option value="12">the last 12 hours</option>
<option value="24" selected="selected">the last 24 hours</option>
<option value="168">1 week</option>"""
print """ </select>
<input type="submit" value="Display">
</form>"""
# check that the option is valid
# and not an SQL injection
def validate_input(option_str):
# check that the option string represents a number
#if option_str == -1:
#return None
if is_number(option_str):
# check that the option is within a specific range
if int(option_str) > -2 and int(option_str) <= 2000:
return option_str
else:
return None
else:
return None
#return the option passed to the script
def get_option():
form=cgi.FieldStorage()
if "timeinterval" in form:
option = form["timeinterval"].value
return validate_input (option)
else:
return None
# main function
# This is where the program starts
def main():
# get options that may have been passed to this script
option=get_option()
if option is None:
option = str(24)
# get data from the database
records=read_html()
log_energy(records[1],records[2],records[3])
#records=get_data(None)
# print the HTTP header
#printHTTPheader()
#if len(records) != 0:
# convert the data into a table
#table=create_table(records)
#else:
#print "<h1>Raspberry Pi energy/power Logger "
#print myOption
#print "No data found"
#print "</h1>"
#return
#global myOption
#myOption=''
# start printing the page
#print "<html>"
# print the head section including the table
# used by the javascript for the chart
#printHTMLHead("Raspberry Pi energy/power Logger", table)
# print the page body
#print "<body>"
#print "<h1>Raspberry Pi energy/power Logger "
#print myOption
#print "</h1>"
#print "<hr>"
#print_time_selector(option)
#show_graph()
#show_stats(option)
#print "</body>"
#print "</html>"
sys.stdout.flush()
if __name__=="__main__":
main()
| gpl-2.0 | -2,662,441,852,015,459,000 | 28.312821 | 167 | 0.595784 | false | 3.554726 | false | false | false |
mailhexu/pyDFTutils | examples/wannier_vasp/BEC_BBB.py | 1 | 2069 | #!/usr/bin/env python
from pyDFTutils.vasp.myvasp import myvasp, default_pps
from pyDFTutils.vasp.vasp_utils import read_efermi
from pyDFTutils.ase_utils.geometry import gen_disped_atoms
from ase.io import read
from pyDFTutils.wannier90.wannier import wannier_input,run_wannier
import os
def calc():
atoms = read('POSCAR.vasp')
d_atoms = gen_disped_atoms(atoms, 'Ti1', distance=0.005, direction='all')
# original
pwd = os.getcwd()
path='orig'
if not os.path.exists(path):
os.mkdir(path)
os.chdir(path)
calc_wannier(atoms)
os.chdir(pwd)
# displaced
pwd = os.getcwd()
path='disp_Ti_x'
if not os.path.exists(path):
os.mkdir(path)
os.chdir(path)
calc_wannier(d_atoms[0])
os.chdir(pwd)
def calc_wannier(atoms):
mycalc = myvasp(
xc='PBE',
gga='PS',
setups=default_pps,
ispin=2,
icharg=0,
kpts=[6, 6, 6],
gamma=True,
prec='normal',
istart=1,
lmaxmix=4,
encut=500)
mycalc.set(lreal='Auto', algo='normal')
atoms.set_calculator(mycalc)
# electronic
mycalc.set(ismear=-5, sigma=0.1, nelm=100, nelmdl=-6, ediff=1e-7)
mycalc.set(ncore=1, kpar=3)
mycalc.scf_calculation()
mycalc.set(
lwannier90=True,
lwrite_unk=False,
lwrite_mmn_amn=True,
ncore=1,
kpar=3)
wa = wannier_input(atoms=atoms)
efermi = read_efermi()
wa.set(
mp_grid=[6, 6, 6],
num_bands=28,
guiding_centres=True,
num_iter=100,
kmesh_tol=1e-9,
search_shells=24,
write_xyz=True,
hr_plot=True,
)
wa.set_energy_window([-70,0.5],[-67.4,0.4],shift_efermi=efermi)
wa.add_basis('Ba','s')
wa.add_basis('Ba','p')
wa.add_basis('Ti','s')
wa.add_basis('Ti','p')
wa.add_basis('O','s')
wa.add_basis('O','p')
wa.write_input()
mycalc.set(nbands=28)
mycalc.scf_calculation()
run_wannier(spin='up')
run_wannier(spin='dn')
#mycalc.ldos_calculation()
calc()
| lgpl-3.0 | -5,069,794,244,428,673,000 | 22.247191 | 77 | 0.58434 | false | 2.704575 | false | false | false |
angdraug/nova | nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py | 3 | 21257 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import mock
from oslo.serialization import jsonutils
from oslo.utils import timeutils
import webob
from nova.api.openstack.compute.contrib import simple_tenant_usage as \
simple_tenant_usage_v2
from nova.api.openstack.compute.plugins.v3 import simple_tenant_usage as \
simple_tenant_usage_v21
from nova.compute import flavors
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
SERVERS = 5
TENANTS = 2
HOURS = 24
ROOT_GB = 10
EPHEMERAL_GB = 20
MEMORY_MB = 1024
VCPUS = 2
NOW = timeutils.utcnow()
START = NOW - datetime.timedelta(hours=HOURS)
STOP = NOW
FAKE_INST_TYPE = {'id': 1,
'vcpus': VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'memory_mb': MEMORY_MB,
'name': 'fakeflavor',
'flavorid': 'foo',
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'swap': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'disabled': False,
'is_public': True,
'extra_specs': {'foo': 'bar'}}
def get_fake_db_instance(start, end, instance_id, tenant_id,
vm_state=vm_states.ACTIVE):
sys_meta = utils.dict_to_metadata(
flavors.save_flavor_info({}, FAKE_INST_TYPE))
# NOTE(mriedem): We use fakes.stub_instance since it sets the fields
# needed on the db instance for converting it to an object, but we still
# need to override system_metadata to use our fake flavor.
inst = fakes.stub_instance(
id=instance_id,
uuid='00000000-0000-0000-0000-00000000000000%02d' % instance_id,
image_ref='1',
project_id=tenant_id,
user_id='fakeuser',
display_name='name',
flavor_id=FAKE_INST_TYPE['id'],
launched_at=start,
terminated_at=end,
vm_state=vm_state,
memory_mb=MEMORY_MB,
vcpus=VCPUS,
root_gb=ROOT_GB,
ephemeral_gb=EPHEMERAL_GB,)
inst['system_metadata'] = sys_meta
return inst
def fake_instance_get_active_by_window_joined(context, begin, end,
project_id, host):
return [get_fake_db_instance(START,
STOP,
x,
"faketenant_%s" % (x / SERVERS))
for x in xrange(TENANTS * SERVERS)]
@mock.patch.object(db, 'instance_get_active_by_window_joined',
fake_instance_get_active_by_window_joined)
class SimpleTenantUsageTestV21(test.TestCase):
url = '/v2/faketenant_0/os-simple-tenant-usage'
alt_url = '/v2/faketenant_1/os-simple-tenant-usage'
policy_rule_prefix = "compute_extension:v3:os-simple-tenant-usage"
def setUp(self):
super(SimpleTenantUsageTestV21, self).setUp()
self.admin_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=True)
self.user_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=False)
self.alt_user_context = context.RequestContext('fakeadmin_0',
'faketenant_1',
is_admin=False)
def _get_wsgi_app(self, context):
return fakes.wsgi_app_v21(fake_auth_context=context,
init_only=('servers',
'os-simple-tenant-usage'))
def _test_verify_index(self, start, stop):
req = webob.Request.blank(
self.url + '?start=%s&end=%s' %
(start.isoformat(), stop.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.admin_context))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
usages = res_dict['tenant_usages']
for i in xrange(TENANTS):
self.assertEqual(int(usages[i]['total_hours']),
SERVERS * HOURS)
self.assertEqual(int(usages[i]['total_local_gb_usage']),
SERVERS * (ROOT_GB + EPHEMERAL_GB) * HOURS)
self.assertEqual(int(usages[i]['total_memory_mb_usage']),
SERVERS * MEMORY_MB * HOURS)
self.assertEqual(int(usages[i]['total_vcpus_usage']),
SERVERS * VCPUS * HOURS)
self.assertFalse(usages[i].get('server_usages'))
def test_verify_index(self):
self._test_verify_index(START, STOP)
def test_verify_index_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_index(START, future)
def test_verify_show(self):
self._test_verify_show(START, STOP)
def test_verify_show_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_show(START, future)
def _get_tenant_usages(self, detailed=''):
req = webob.Request.blank(
self.url + '?detailed=%s&start=%s&end=%s' %
(detailed, START.isoformat(), STOP.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.admin_context))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
return res_dict['tenant_usages']
def test_verify_detailed_index(self):
usages = self._get_tenant_usages('1')
for i in xrange(TENANTS):
servers = usages[i]['server_usages']
for j in xrange(SERVERS):
self.assertEqual(int(servers[j]['hours']), HOURS)
def test_verify_simple_index(self):
usages = self._get_tenant_usages(detailed='0')
for i in xrange(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def test_verify_simple_index_empty_param(self):
# NOTE(lzyeval): 'detailed=&start=..&end=..'
usages = self._get_tenant_usages()
for i in xrange(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def _test_verify_show(self, start, stop):
tenant_id = 0
req = webob.Request.blank(
self.url + '/faketenant_%s?start=%s&end=%s' %
(tenant_id, start.isoformat(), stop.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.user_context))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
usage = res_dict['tenant_usage']
servers = usage['server_usages']
self.assertEqual(len(usage['server_usages']), SERVERS)
uuids = ['00000000-0000-0000-0000-00000000000000%02d' %
(x + (tenant_id * SERVERS)) for x in xrange(SERVERS)]
for j in xrange(SERVERS):
delta = STOP - START
uptime = delta.days * 24 * 3600 + delta.seconds
self.assertEqual(int(servers[j]['uptime']), uptime)
self.assertEqual(int(servers[j]['hours']), HOURS)
self.assertIn(servers[j]['instance_id'], uuids)
def test_verify_show_cannot_view_other_tenant(self):
req = webob.Request.blank(
self.alt_url + '/faketenant_0?start=%s&end=%s' %
(START.isoformat(), STOP.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
rules = {
self.policy_rule_prefix + ":show":
common_policy.parse_rule([
["role:admin"], ["project_id:%(project_id)s"]
])
}
policy.set_rules(rules)
try:
res = req.get_response(self._get_wsgi_app(self.alt_user_context))
self.assertEqual(res.status_int, 403)
finally:
policy.reset()
def test_get_tenants_usage_with_bad_start_date(self):
future = NOW + datetime.timedelta(hours=HOURS)
tenant_id = 0
req = webob.Request.blank(
self.url + '/'
'faketenant_%s?start=%s&end=%s' %
(tenant_id, future.isoformat(), NOW.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.user_context))
self.assertEqual(res.status_int, 400)
def test_get_tenants_usage_with_invalid_start_date(self):
tenant_id = 0
req = webob.Request.blank(
self.url + '/'
'faketenant_%s?start=%s&end=%s' %
(tenant_id, "xxxx", NOW.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.user_context))
self.assertEqual(res.status_int, 400)
def _test_get_tenants_usage_with_one_date(self, date_url_param):
req = webob.Request.blank(
self.url + '/'
'faketenant_0?%s' % date_url_param)
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.user_context))
self.assertEqual(200, res.status_int)
def test_get_tenants_usage_with_no_start_date(self):
self._test_get_tenants_usage_with_one_date(
'end=%s' % (NOW + datetime.timedelta(5)).isoformat())
def test_get_tenants_usage_with_no_end_date(self):
self._test_get_tenants_usage_with_one_date(
'start=%s' % (NOW - datetime.timedelta(5)).isoformat())
class SimpleTenantUsageTestV2(SimpleTenantUsageTestV21):
policy_rule_prefix = "compute_extension:simple_tenant_usage"
def _get_wsgi_app(self, context):
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Simple_tenant_usage'])
return fakes.wsgi_app(fake_auth_context=context,
init_only=('os-simple-tenant-usage', ))
class SimpleTenantUsageSerializerTest(test.TestCase):
def _verify_server_usage(self, raw_usage, tree):
self.assertEqual('server_usage', tree.tag)
# Figure out what fields we expect
not_seen = set(raw_usage.keys())
for child in tree:
self.assertIn(child.tag, not_seen)
not_seen.remove(child.tag)
self.assertEqual(str(raw_usage[child.tag]), child.text)
self.assertEqual(len(not_seen), 0)
def _verify_tenant_usage(self, raw_usage, tree):
self.assertEqual('tenant_usage', tree.tag)
# Figure out what fields we expect
not_seen = set(raw_usage.keys())
for child in tree:
self.assertIn(child.tag, not_seen)
not_seen.remove(child.tag)
if child.tag == 'server_usages':
for idx, gr_child in enumerate(child):
self._verify_server_usage(raw_usage['server_usages'][idx],
gr_child)
else:
self.assertEqual(str(raw_usage[child.tag]), child.text)
self.assertEqual(len(not_seen), 0)
def test_serializer_show(self):
serializer = simple_tenant_usage_v2.SimpleTenantUsageTemplate()
today = timeutils.utcnow()
yesterday = today - datetime.timedelta(days=1)
raw_usage = dict(
tenant_id='tenant',
total_local_gb_usage=789,
total_vcpus_usage=456,
total_memory_mb_usage=123,
total_hours=24,
start=yesterday,
stop=today,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000000',
name='test',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=1,
tenant_id='tenant',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=12,
memory_mb=512,
local_gb=25,
vcpus=2,
tenant_id='tenant',
flavor='m1.tiny',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=43200),
],
)
tenant_usage = dict(tenant_usage=raw_usage)
text = serializer.serialize(tenant_usage)
tree = etree.fromstring(text)
self._verify_tenant_usage(raw_usage, tree)
def test_serializer_index(self):
serializer = simple_tenant_usage_v2.SimpleTenantUsagesTemplate()
today = timeutils.utcnow()
yesterday = today - datetime.timedelta(days=1)
raw_usages = [dict(
tenant_id='tenant1',
total_local_gb_usage=1024,
total_vcpus_usage=23,
total_memory_mb_usage=512,
total_hours=24,
start=yesterday,
stop=today,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000001',
name='test1',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=2,
tenant_id='tenant1',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=42,
memory_mb=4201,
local_gb=25,
vcpus=1,
tenant_id='tenant1',
flavor='m1.tiny',
started_at=today,
ended_at=yesterday,
state='terminated',
uptime=43200),
],
),
dict(
tenant_id='tenant2',
total_local_gb_usage=512,
total_vcpus_usage=32,
total_memory_mb_usage=1024,
total_hours=42,
start=today,
stop=yesterday,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000003',
name='test3',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=2,
tenant_id='tenant2',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=42,
memory_mb=4201,
local_gb=25,
vcpus=1,
tenant_id='tenant4',
flavor='m1.tiny',
started_at=today,
ended_at=yesterday,
state='terminated',
uptime=43200),
],
),
]
tenant_usages = dict(tenant_usages=raw_usages)
text = serializer.serialize(tenant_usages)
tree = etree.fromstring(text)
self.assertEqual('tenant_usages', tree.tag)
self.assertEqual(len(raw_usages), len(tree))
for idx, child in enumerate(tree):
self._verify_tenant_usage(raw_usages[idx], child)
class SimpleTenantUsageControllerTestV21(test.TestCase):
controller = simple_tenant_usage_v21.SimpleTenantUsageController()
def setUp(self):
super(SimpleTenantUsageControllerTestV21, self).setUp()
self.context = context.RequestContext('fakeuser', 'fake-project')
self.baseinst = get_fake_db_instance(START, STOP, instance_id=1,
tenant_id=self.context.project_id,
vm_state=vm_states.DELETED)
# convert the fake instance dict to an object
self.inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), self.baseinst)
def test_get_flavor_from_sys_meta(self):
# Non-deleted instances get their type information from their
# system_metadata
with mock.patch.object(db, 'instance_get_by_uuid',
return_value=self.baseinst):
flavor = self.controller._get_flavor(self.context,
self.inst_obj, {})
self.assertEqual(objects.Flavor, type(flavor))
self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
def test_get_flavor_from_non_deleted_with_id_fails(self):
# If an instance is not deleted and missing type information from
# system_metadata, then that's a bug
self.inst_obj.system_metadata = {}
self.assertRaises(KeyError,
self.controller._get_flavor, self.context,
self.inst_obj, {})
def test_get_flavor_from_deleted_with_id(self):
# Deleted instances may not have type info in system_metadata,
# so verify that they get their type from a lookup of their
# instance_type_id
self.inst_obj.system_metadata = {}
self.inst_obj.deleted = 1
flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
self.assertEqual(objects.Flavor, type(flavor))
self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
def test_get_flavor_from_deleted_with_id_of_deleted(self):
# Verify the legacy behavior of instance_type_id pointing to a
# missing type being non-fatal
self.inst_obj.system_metadata = {}
self.inst_obj.deleted = 1
self.inst_obj.instance_type_id = 99
flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
self.assertIsNone(flavor)
class SimpleTenantUsageControllerTestV2(SimpleTenantUsageControllerTestV21):
controller = simple_tenant_usage_v2.SimpleTenantUsageController()
class SimpleTenantUsageUtilsV21(test.NoDBTestCase):
simple_tenant_usage = simple_tenant_usage_v21
def test_valid_string(self):
dt = self.simple_tenant_usage.parse_strtime(
"2014-02-21T13:47:20.824060", "%Y-%m-%dT%H:%M:%S.%f")
self.assertEqual(datetime.datetime(
microsecond=824060, second=20, minute=47, hour=13,
day=21, month=2, year=2014), dt)
def test_invalid_string(self):
self.assertRaises(exception.InvalidStrTime,
self.simple_tenant_usage.parse_strtime,
"2014-02-21 13:47:20.824060",
"%Y-%m-%dT%H:%M:%S.%f")
class SimpleTenantUsageUtilsV2(SimpleTenantUsageUtilsV21):
simple_tenant_usage = simple_tenant_usage_v2
| apache-2.0 | -7,511,676,056,559,111,000 | 38.437848 | 79 | 0.535306 | false | 4.12197 | true | false | false |
andrewjpage/plasmidtron | plasmidtron/CommandRunner.py | 2 | 1925 | import os
import logging
import subprocess
import shutil
import math
import tempfile
'''Given a list of commands run single threaded or in parallel'''
class CommandRunner:
def __init__(self, output_directory, logger, threads):
self.logger = logger
self.threads = threads
self.output_directory = output_directory
def run_list_of_kmc_commands(self, commands_to_run):
if self.threads > 1:
self.run_with_parallel(commands_to_run, self.kmc_processes())
else:
self.run_sequentially(commands_to_run)
def run_list_of_commands(self, commands_to_run):
if self.threads > 1:
self.run_with_parallel(commands_to_run, self.threads)
else:
self.run_sequentially(commands_to_run)
def run_sequentially(self, commands_to_run):
for c in commands_to_run:
self.logger.warning('GNU parallel command to run %s', c)
subprocess.check_call(c, shell=True)
'''KMC handles multithreading badly. So give each process 2 threads and limit the overall total independant processes'''
def kmc_threads(self):
if self.threads >= 2:
return 2
else:
return 1
def kmc_processes(self):
if self.threads >= 2:
return int(math.floor(self.threads/2))
else:
return 1
'''Use GNU parallel to manage parallel processing'''
def run_with_parallel(self, commands_to_run, processes_in_parallel):
temp_working_dir = tempfile.mkdtemp(dir=os.path.abspath(self.output_directory))
file_of_commands = os.path.join(temp_working_dir,'commands_to_run')
with open(file_of_commands, 'w') as commands_file:
for c in commands_to_run:
self.logger.warning('Command to run %s', c)
commands_file.write(c + "\n")
gnu_parallel_command = ' '.join(['parallel', '--gnu', '-j '+ str(processes_in_parallel), '<',file_of_commands])
self.logger.warning('GNU parallel command to run %s', gnu_parallel_command)
subprocess.check_call(gnu_parallel_command, shell=True)
shutil.rmtree(temp_working_dir)
| gpl-3.0 | -8,374,165,883,074,710,000 | 32.77193 | 121 | 0.714805 | false | 3.119935 | false | false | false |
st-tu-dresden/inloop | inloop/grading/copypasta.py | 1 | 5879 | """
Plagiarism detection support using JPlag.
"""
import re
import subprocess
from pathlib import Path
from shutil import copytree
from tempfile import TemporaryDirectory
from typing import Dict, Iterable, Optional, Set
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models import QuerySet
from huey.api import Result
from huey.contrib.djhuey import db_task
from inloop.grading.models import save_plagiarism_set
from inloop.solutions.models import Solution
from inloop.tasks.models import Task
LINE_REGEX = re.compile(r"Comparing (.*?)-(.*?): (\d+\.\d+)")
@db_task()
def jplag_check_async(users: QuerySet, tasks: QuerySet) -> Result:
"""
Submit a job to check solutions using the jplag_check function.
This function returns immediately and is supposed to be called from inside
view code. The actual JPlag invocation happens in a background worker process
that will wait for JPlag to complete.
The given queryset arguments will be serialized (pickled) before they are sent
to the background queue.
The results of the check will be available in the PlagiarismTest model.
Args:
users: A User queryset.
tasks: A Task queryset.
Returns:
A huey Result object.
"""
jplag_check(users, tasks)
def jplag_check(
users: Iterable[User],
tasks: Iterable[Task],
min_similarity: Optional[int] = None,
result_dir: Optional[Path] = None,
) -> Set[Solution]:
"""
Check solutions of the given users for the given tasks with JPlag.
Args:
users: A User iterable (e.g., queryset).
tasks: A Task iterable (e.g., queryset).
min_similarity: Minimum solution similarity after which two solutions
shall be regarded as plagiarism (optional).
result_dir: Directory where JPlag HTML files shall be saved to (optional).
The given directory must not already exist.
Returns:
A set containing the solutions that have been identified as plagiarism.
"""
if min_similarity is None:
min_similarity = settings.JPLAG_DEFAULT_SIMILARITY
with TemporaryDirectory() as tmpdir:
path = Path(tmpdir)
plagiarism_set = set()
for task in tasks:
plagiarism_set.update(jplag_check_task(users, task, min_similarity, path))
save_plagiarism_set(plagiarism_set, str(path))
if result_dir:
copytree(src=path, dst=result_dir)
return plagiarism_set
def jplag_check_task(
users: Iterable[User],
task: Task,
min_similarity: int,
result_path: Path,
) -> Set[Solution]:
"""
Check solutions of the given users for the given single task with JPlag.
Args:
users: A User queryset.
task: A Task object.
min_similarity: Minimum solution similarity after which two solutions
shall be regarded as plagiarism.
result_path: Directory where JPlag HTML files shall be saved to.
Returns:
A set containing the solutions that have been identified as plagiarism.
"""
with TemporaryDirectory() as tmpdir:
root_path = Path(tmpdir)
last_solutions = get_last_solutions(users, task)
if len(last_solutions) < 2:
return set()
prepare_directories(root_path, last_solutions)
output = exec_jplag(min_similarity, root_path, result_path.joinpath(task.slug))
return parse_output(output, min_similarity, last_solutions)
def get_last_solutions(users: Iterable[User], task: Task) -> Dict[str, Solution]:
"""
Get the last valid solution of the given users for a given task.
"""
last_solutions = {}
for user in users:
last_solution = Solution.objects.filter(author=user, task=task, passed=True).last()
if last_solution is not None:
# escape hyphens in usernames with an unused (since
# disallowed) character, otherwise the usernames cannot
# be extracted from the jplag output
last_solutions[user.username.replace("-", "$")] = last_solution
return last_solutions
def prepare_directories(root_path: Path, last_solutions: Dict[str, Solution]) -> None:
"""
Copy the given solutions to root_path, using the folder structure expected by JPlag.
The expected folder structure, for one task, will look like this:
root_path/
user-1/
File1.java
File2.java
user-2/
File1.java
File2.java
"""
for username, last_solution in last_solutions.items():
copytree(src=last_solution.path, dst=root_path.joinpath(username))
def parse_output(
output: str,
min_similarity: int,
last_solutions: Dict[str, Solution],
) -> Set[Solution]:
"""
Extract plagiarism check results from the given JPlag command line output.
Returns:
A set containing the solutions that have been identified as plagiarism.
"""
plagiarism_set = set()
for match in LINE_REGEX.finditer(output):
username1, username2, similarity = match.groups()
similarity = float(similarity)
if similarity >= min_similarity:
plagiarism_set.add(last_solutions[username1])
plagiarism_set.add(last_solutions[username2])
return plagiarism_set
def exec_jplag(min_similarity: int, root_path: Path, result_path: Path) -> str:
"""
Execute the JPlag Java program with the given parameters and return its output.
"""
args = ["java", "-cp", settings.JPLAG_JAR_PATH, "jplag.JPlag"]
args.append("-vl")
args.extend(["-l", "java19"])
args.extend(["-m", f"{min_similarity}%"])
args.extend(["-r", f"{result_path}"])
args.append(f"{root_path}")
return subprocess.check_output(args, stderr=subprocess.DEVNULL, universal_newlines=True)
| gpl-3.0 | -2,051,349,845,701,333,000 | 32.403409 | 92 | 0.665079 | false | 3.898541 | false | false | false |
cedriclaunay/gaffer | python/GafferUI/GLWidget.py | 1 | 13941 | ##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import ctypes
import logging
# the OpenGL module loves spewing things into logs, and for some reason
# when running in maya 2012 the default log level allows info messages through.
# so we set a specific log level on the OpenGL logger to keep it quiet.
logging.getLogger( "OpenGL" ).setLevel( logging.WARNING )
import IECore
import Gaffer
import GafferUI
# import lazily to improve startup of apps which don't use GL functionality
GL = Gaffer.lazyImport( "OpenGL.GL" )
IECoreGL = Gaffer.lazyImport( "IECoreGL" )
QtCore = GafferUI._qtImport( "QtCore" )
QtGui = GafferUI._qtImport( "QtGui" )
QtOpenGL = GafferUI._qtImport( "QtOpenGL", lazy=True )
## The GLWidget is a base class for all widgets which wish to draw using OpenGL.
# Derived classes override the _draw() method to achieve this.
class GLWidget( GafferUI.Widget ) :
## This enum defines the optional elements of the GL buffer used
# for display.
BufferOptions = IECore.Enum.create(
"Alpha",
"Depth",
"Double"
)
## Note that you won't always get the buffer options you ask for - a best fit is found
# among the available formats. In particular it appears that a depth buffer is often present
# even when not requested.
def __init__( self, bufferOptions = set(), **kw ) :
format = QtOpenGL.QGLFormat()
format.setRgba( True )
format.setAlpha( self.BufferOptions.Alpha in bufferOptions )
format.setDepth( self.BufferOptions.Depth in bufferOptions )
format.setDoubleBuffer( self.BufferOptions.Double in bufferOptions )
if hasattr( format, "setVersion" ) : # setVersion doesn't exist in qt prior to 4.7.
format.setVersion( 2, 1 )
graphicsView = _GLGraphicsView( format )
self.__graphicsScene = _GLGraphicsScene( graphicsView, Gaffer.WeakMethod( self.__draw ) )
graphicsView.setScene( self.__graphicsScene )
GafferUI.Widget.__init__( self, graphicsView, **kw )
## Adds a Widget as an overlay.
## \todo Support more than one overlay, and provide grid-based
# placement options. Perhaps GLWidget should also derive from Container
# to support auto-parenting and appropriate removeChild() behaviour.
def addOverlay( self, overlay ) :
assert( overlay.parent() is None )
self.__overlay = overlay
self.__overlay._setStyleSheet()
item = self.__graphicsScene.addWidget( self.__overlay._qtWidget() )
## Called whenever the widget is resized. May be reimplemented by derived
# classes if necessary. The appropriate OpenGL context will already be current
# when this is called.
def _resize( self, size ) :
GL.glViewport( 0, 0, size.x, size.y )
## Derived classes must override this to draw their contents using
# OpenGL calls. The appropriate OpenGL context will already be current
# when this is called.
def _draw( self ) :
pass
## Derived classes may call this when they wish to trigger a redraw.
def _redraw( self ) :
self._glWidget().update()
## May be used by derived classes to get access to the internal
# QGLWidget. Note that _makeCurrent() should be used in preference
# to _glWidget().makeCurrent(), for the reasons stated in the
# documentation for that method.
def _glWidget( self ) :
return self._qtWidget().viewport()
## May be used by derived classes to make the OpenGL context
# for this widget current. Returns True if the operation was
# successful and False if not. In an ideal world, the return
# value would always be True, but it appears that there are
# Qt/Mac bugs which cause it not to be from time to time -
# typically for newly created Widgets. If False is returned,
# no OpenGL operations should be undertaken subsequently by
# the caller.
def _makeCurrent( self ) :
self._qtWidget().viewport().makeCurrent()
return self.__framebufferValid()
def __framebufferValid( self ) :
import OpenGL.GL.framebufferobjects
return GL.framebufferobjects.glCheckFramebufferStatus( GL.framebufferobjects.GL_FRAMEBUFFER ) == GL.framebufferobjects.GL_FRAMEBUFFER_COMPLETE
def __draw( self ) :
# Qt sometimes enters our GraphicsScene.drawBackground() method
# with a GL error flag still set. We unset it here so it won't
# trigger our own error checking.
while GL.glGetError() :
pass
if not self.__framebufferValid() :
return
# we need to call the init method after a GL context has been
# created, and this seems like the only place that is guaranteed.
# calling it here does mean we call init() way more than needed,
# but it's safe.
IECoreGL.init( True )
self._draw()
class _GLGraphicsView( QtGui.QGraphicsView ) :
def __init__( self, format ) :
QtGui.QGraphicsView.__init__( self )
self.setObjectName( "gafferGLWidget" )
self.setHorizontalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
self.setVerticalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
glWidget = self.__createQGLWidget( format )
# On mac, we need to hide the GL widget until the last
# possible moment, otherwise we get "invalid drawable"
# errors spewing all over the place. See event() for the
# spot where we show the widget.
glWidget.hide()
self.setViewport( glWidget )
self.setViewportUpdateMode( self.FullViewportUpdate )
# QAbstractScrollArea (one of our base classes), implements
# minimumSizeHint() to include enough room for scrollbars.
# But we know we'll never show scrollbars, and don't want
# a minimum size, so we reimplement it.
def minimumSizeHint( self ) :
return QtCore.QSize()
def event( self, event ) :
if event.type() == event.PolishRequest :
# This seems to be the one signal that reliably
# lets us know we're becoming genuinely visible
# on screen. We use it to show the GL widget we
# hid in our constructor.
self.viewport().show()
return QtGui.QGraphicsView.event( self, event )
def resizeEvent( self, event ) :
if self.scene() is not None :
self.scene().setSceneRect( 0, 0, event.size().width(), event.size().height() )
owner = GafferUI.Widget._owner( self )
# clear any existing errors that may trigger
# error checking code in _resize implementations.
while GL.glGetError() :
pass
owner._makeCurrent()
owner._resize( IECore.V2i( event.size().width(), event.size().height() ) )
def keyPressEvent( self, event ) :
# We have to reimplement this method to prevent QAbstractScrollArea
# from stealing the cursor keypresses, preventing them from
# being used by GLWidget subclasses. QAbstractScrollArea uses
# those keypresses to move the scrollbars, but we don't want the
# scrolling functionality at all. Our implementation of this method
# is functionally identical to the QGraphicsView one, except it
# passes unused events to QFrame, bypassing QAbstractScrollArea.
if self.scene() is not None and self.isInteractive() :
QtGui.QApplication.sendEvent( self.scene(), event )
if event.isAccepted() :
return
QtGui.QFrame.keyPressEvent( self, event )
# We keep a single hidden widget which owns the texture and display lists
# and then share those with all the widgets we really want to make.
__shareWidget = None
@classmethod
def __createQGLWidget( cls, format ) :
# try to make a host specific widget if necessary.
result = cls.__createMayaQGLWidget( format )
if result is not None :
return result
result = cls.__createHoudiniQGLWidget( format )
if result is not None :
return result
# and if it wasn't necessary, just breathe a sigh of relief
# and make a nice normal one.
if cls.__shareWidget is None :
cls.__shareWidget = QtOpenGL.QGLWidget()
return QtOpenGL.QGLWidget( format, shareWidget = cls.__shareWidget )
@classmethod
def __createHostedQGLWidget( cls, format, hostContextActivator ) :
# When running Gaffer embedded in a host application such as Maya
# or Houdini, we want to be able to share OpenGL resources between
# gaffer uis and host viewport uis, because IECoreGL will be used
# in both. So we implement our own QGLContext class which creates a
# context which shares with the host.
import OpenGL.GLX
# This is our custom context class which allows us to share gl
# resources with the hosts's contexts. We define it in here rather than
# at the top level because we want to import QtOpenGL lazily and
# don't want to trigger a full import until the last minute.
## \todo Call glXDestroyContext appropriately, although as far as I
# can tell this is impossible. The base class implementation calls it
# in reset(), but that's not virtual, and we can't store it in d->cx
# (which is what the base class destroys) because that's entirely
# on the C++ side of things.
class HostedGLContext( QtOpenGL.QGLContext ) :
def __init__( self, format, paintDevice, hostContextActivator ) :
QtOpenGL.QGLContext.__init__( self, format, paintDevice )
self.__paintDevice = paintDevice
self.__context = None
self.__hostContextActivator = hostContextActivator
def chooseContext( self, shareContext ) :
assert( self.__context is None )
# We have to call this to get d->vi set in the base class, because
# QGLWidget::setContext() accesses it directly, and will crash if we don't.
QtOpenGL.QGLContext.chooseContext( self, shareContext )
# Get the host's main OpenGL context. It is the responsibility
# of the hostContextActivator passed to __init__ to make the host
# context current so we can access it.
self.__hostContextActivator()
hostContext = OpenGL.GLX.glXGetCurrentContext()
self.__display = OpenGL.GLX.glXGetCurrentDisplay()
# Get a visual - we let the base class figure this out, but then we need
# to convert it from the form given by the qt bindings into the ctypes form
# needed by PyOpenGL.
visual = self.chooseVisual()
visual = ctypes.cast( int( visual ), ctypes.POINTER( OpenGL.raw._GLX.XVisualInfo ) )
# Make our context.
self.__context = OpenGL.GLX.glXCreateContext(
self.__display[0],
visual,
hostContext,
True
)
return True
def makeCurrent( self ) :
success = OpenGL.GLX.glXMakeCurrent( self.__display, self.__paintDevice.effectiveWinId(), self.__context )
assert( success )
result = QtOpenGL.QGLWidget()
result.setContext( HostedGLContext( format, result, hostContextActivator ) )
return result
@classmethod
def __createMayaQGLWidget( cls, format ) :
try :
import maya.OpenMayaRender
except ImportError :
# we're not in maya - createQGLWidget() will just make a
# normal widget.
return None
mayaRenderer = maya.OpenMayaRender.MHardwareRenderer.theRenderer()
return cls.__createHostedQGLWidget( format, IECore.curry( mayaRenderer.makeResourceContextCurrent, mayaRenderer.backEndString() ) )
@classmethod
def __createHoudiniQGLWidget( cls, format ) :
try :
import hou
except ImportError :
# we're not in houdini - createQGLWidget() will just make a
# normal widget.
return None
import IECoreHoudini
return cls.__createHostedQGLWidget( format, IECoreHoudini.makeMainGLContextCurrent )
class _GLGraphicsScene( QtGui.QGraphicsScene ) :
def __init__( self, parent, backgroundDrawFunction ) :
QtGui.QGraphicsScene.__init__( self, parent )
self.__backgroundDrawFunction = backgroundDrawFunction
self.sceneRectChanged.connect( self.__sceneRectChanged )
def addWidget( self, widget ) :
if widget.layout() is not None :
# removing the size constraint is necessary to keep the widget the
# size we tell it to be in __updateItemGeometry.
widget.layout().setSizeConstraint( QtGui.QLayout.SetNoConstraint )
item = QtGui.QGraphicsScene.addWidget( self, widget )
self.__updateItemGeometry( item, self.sceneRect() )
return item
def drawBackground( self, painter, rect ) :
self.__backgroundDrawFunction()
def __sceneRectChanged( self, sceneRect ) :
for item in self.items() :
self.__updateItemGeometry( item, sceneRect )
def __updateItemGeometry( self, item, sceneRect ) :
geometry = item.widget().geometry()
item.widget().setGeometry( QtCore.QRect( 0, 0, sceneRect.width(), item.widget().sizeHint().height() ) )
| bsd-3-clause | -7,099,908,822,228,933,000 | 34.473282 | 144 | 0.719748 | false | 3.7176 | false | false | false |
OSSystems/jenkins-job-builder | jenkins_jobs/parser.py | 2 | 16452 | #!/usr/bin/env python
# Copyright (C) 2015 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage JJB yaml feature implementation
import copy
import fnmatch
import io
import itertools
import logging
import re
import os
from jenkins_jobs.constants import MAGIC_MANAGE_STRING
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.formatter import deep_format
import jenkins_jobs.local_yaml as local_yaml
from jenkins_jobs import utils
__all__ = [
"YamlParser"
]
logger = logging.getLogger(__name__)
def matches(what, glob_patterns):
"""
Checks if the given string, ``what``, matches any of the glob patterns in
the iterable, ``glob_patterns``
:arg str what: String that we want to test if it matches a pattern
:arg iterable glob_patterns: glob patterns to match (list, tuple, set,
etc.)
"""
return any(fnmatch.fnmatch(what, glob_pattern)
for glob_pattern in glob_patterns)
def combination_matches(combination, match_combinations):
"""
Checks if the given combination is matches for any of the given combination
globs, being those a set of combinations where if a key is missing, it's
considered matching
(key1=2, key2=3)
would match the combination match:
(key2=3)
but not:
(key1=2, key2=2)
"""
for cmatch in match_combinations:
for key, val in combination.items():
if cmatch.get(key, val) != val:
break
else:
return True
return False
class YamlParser(object):
def __init__(self, jjb_config=None):
self.data = {}
self.jobs = []
self.views = []
self.jjb_config = jjb_config
self.keep_desc = jjb_config.yamlparser['keep_descriptions']
self.path = jjb_config.yamlparser['include_path']
def load_files(self, fn):
# handle deprecated behavior, and check that it's not a file like
# object as these may implement the '__iter__' attribute.
if not hasattr(fn, '__iter__') or hasattr(fn, 'read'):
logger.warning(
'Passing single elements for the `fn` argument in '
'Builder.load_files is deprecated. Please update your code '
'to use a list as support for automatic conversion will be '
'removed in a future version.')
fn = [fn]
files_to_process = []
for path in fn:
if not hasattr(path, 'read') and os.path.isdir(path):
files_to_process.extend([os.path.join(path, f)
for f in os.listdir(path)
if (f.endswith('.yml')
or f.endswith('.yaml'))])
else:
files_to_process.append(path)
# symlinks used to allow loading of sub-dirs can result in duplicate
# definitions of macros and templates when loading all from top-level
unique_files = []
for f in files_to_process:
if hasattr(f, 'read'):
unique_files.append(f)
continue
rpf = os.path.realpath(f)
if rpf not in unique_files:
unique_files.append(rpf)
else:
logger.warning("File '%s' already added as '%s', ignoring "
"reference to avoid duplicating yaml "
"definitions." % (f, rpf))
for in_file in unique_files:
# use of ask-for-permissions instead of ask-for-forgiveness
# performs better when low use cases.
if hasattr(in_file, 'name'):
fname = in_file.name
else:
fname = in_file
logger.debug("Parsing YAML file {0}".format(fname))
if hasattr(in_file, 'read'):
self._parse_fp(in_file)
else:
self.parse(in_file)
def _parse_fp(self, fp):
# wrap provided file streams to ensure correct encoding used
data = local_yaml.load(utils.wrap_stream(fp), search_path=self.path)
if data:
if not isinstance(data, list):
raise JenkinsJobsException(
"The topmost collection in file '{fname}' must be a list,"
" not a {cls}".format(fname=getattr(fp, 'name', fp),
cls=type(data)))
for item in data:
cls, dfn = next(iter(item.items()))
group = self.data.get(cls, {})
if len(item.items()) > 1:
n = None
for k, v in item.items():
if k == "name":
n = v
break
# Syntax error
raise JenkinsJobsException("Syntax error, for item "
"named '{0}'. Missing indent?"
.format(n))
# allow any entry to specify an id that can also be used
_id = dfn.get('id', dfn['name'])
if _id in group:
self._handle_dups(
"Duplicate entry found in '{0}: '{1}' already "
"defined".format(fp.name, _id))
group[_id] = dfn
self.data[cls] = group
def parse(self, fn):
with io.open(fn, 'r', encoding='utf-8') as fp:
self._parse_fp(fp)
def _handle_dups(self, message):
if not self.jjb_config.yamlparser['allow_duplicates']:
logger.error(message)
raise JenkinsJobsException(message)
else:
logger.warning(message)
def _getJob(self, name):
job = self.data.get('job', {}).get(name, None)
if not job:
return job
return self._applyDefaults(job)
def _getJobGroup(self, name):
return self.data.get('job-group', {}).get(name, None)
def _getJobTemplate(self, name):
job = self.data.get('job-template', {}).get(name, None)
if not job:
return job
return self._applyDefaults(job)
def _applyDefaults(self, data, override_dict=None):
if override_dict is None:
override_dict = {}
whichdefaults = data.get('defaults', 'global')
defaults = copy.deepcopy(self.data.get('defaults',
{}).get(whichdefaults, {}))
if defaults == {} and whichdefaults != 'global':
raise JenkinsJobsException("Unknown defaults set: '{0}'"
.format(whichdefaults))
for key in override_dict.keys():
if key in defaults.keys():
defaults[key] = override_dict[key]
newdata = {}
newdata.update(defaults)
newdata.update(data)
return newdata
def _formatDescription(self, job):
if self.keep_desc:
description = job.get("description", None)
else:
description = job.get("description", '')
if description is not None:
job["description"] = description + \
self._get_managed_string().lstrip()
def _getfullname(self, data):
if 'folder' in data:
return "%s/%s" % (data['folder'], data['name'])
return data['name']
def expandYaml(self, registry, jobs_glob=None):
changed = True
while changed:
changed = False
for module in registry.modules:
if hasattr(module, 'handle_data'):
if module.handle_data(self.data):
changed = True
for job in self.data.get('job', {}).values():
job = self._applyDefaults(job)
job['name'] = self._getfullname(job)
if jobs_glob and not matches(job['name'], jobs_glob):
logger.debug("Ignoring job {0}".format(job['name']))
continue
logger.debug("Expanding job '{0}'".format(job['name']))
self._formatDescription(job)
self.jobs.append(job)
for view in self.data.get('view', {}).values():
view['name'] = self._getfullname(view)
logger.debug("Expanding view '{0}'".format(view['name']))
self._formatDescription(view)
self.views.append(view)
for project in self.data.get('project', {}).values():
logger.debug("Expanding project '{0}'".format(project['name']))
# use a set to check for duplicate job references in projects
seen = set()
for jobspec in project.get('jobs', []):
if isinstance(jobspec, dict):
# Singleton dict containing dict of job-specific params
jobname, jobparams = next(iter(jobspec.items()))
if not isinstance(jobparams, dict):
jobparams = {}
else:
jobname = jobspec
jobparams = {}
job = self._getJob(jobname)
if job:
# Just naming an existing defined job
if jobname in seen:
self._handle_dups("Duplicate job '{0}' specified "
"for project '{1}'"
.format(jobname, project['name']))
seen.add(jobname)
continue
# see if it's a job group
group = self._getJobGroup(jobname)
if group:
for group_jobspec in group['jobs']:
if isinstance(group_jobspec, dict):
group_jobname, group_jobparams = \
next(iter(group_jobspec.items()))
if not isinstance(group_jobparams, dict):
group_jobparams = {}
else:
group_jobname = group_jobspec
group_jobparams = {}
job = self._getJob(group_jobname)
if job:
if group_jobname in seen:
self._handle_dups(
"Duplicate job '{0}' specified for "
"project '{1}'".format(group_jobname,
project['name']))
seen.add(group_jobname)
continue
template = self._getJobTemplate(group_jobname)
# Allow a group to override parameters set by a project
d = type(project)(project)
d.update(jobparams)
d.update(group)
d.update(group_jobparams)
# Except name, since the group's name is not useful
d['name'] = project['name']
if template:
self._expandYamlForTemplateJob(d, template,
jobs_glob)
continue
# see if it's a template
template = self._getJobTemplate(jobname)
if template:
d = type(project)(project)
d.update(jobparams)
self._expandYamlForTemplateJob(d, template, jobs_glob)
else:
raise JenkinsJobsException("Failed to find suitable "
"template named '{0}'"
.format(jobname))
# check for duplicate generated jobs
seen = set()
# walk the list in reverse so that last definition wins
for job in self.jobs[::-1]:
if job['name'] in seen:
self._handle_dups("Duplicate definitions for job '{0}' "
"specified".format(job['name']))
self.jobs.remove(job)
seen.add(job['name'])
return self.jobs, self.views
def _expandYamlForTemplateJob(self, project, template, jobs_glob=None):
dimensions = []
template_name = template['name']
# reject keys that are not useful during yaml expansion
for k in ['jobs']:
project.pop(k)
excludes = project.pop('exclude', [])
for (k, v) in project.items():
tmpk = '{{{0}}}'.format(k)
if tmpk not in template_name:
continue
if type(v) == list:
dimensions.append(zip([k] * len(v), v))
# XXX somewhat hackish to ensure we actually have a single
# pass through the loop
if len(dimensions) == 0:
dimensions = [(("", ""),)]
for values in itertools.product(*dimensions):
params = copy.deepcopy(project)
params = self._applyDefaults(params, template)
params['template-name'] = re.sub(r'({|})', r'\1\1', template_name)
try:
expanded_values = {}
for (k, v) in values:
if isinstance(v, dict):
inner_key = next(iter(v))
expanded_values[k] = inner_key
expanded_values.update(v[inner_key])
else:
expanded_values[k] = v
except TypeError:
project_name = project.pop('name')
logger.error(
"Exception thrown while expanding template '%s' for "
"project '%s', with expansion arguments of:\n%s\n"
"Original project input variables for template:\n%s\n"
"Most likely the inputs have items indented incorrectly "
"to describe how they should be applied.\n\nNote yaml "
"'null' is mapped to python's 'None'", template_name,
project_name,
"".join(local_yaml.dump({k: v}, default_flow_style=False)
for (k, v) in values),
local_yaml.dump(project, default_flow_style=False))
raise
params.update(expanded_values)
try:
params = deep_format(params, params)
except Exception:
logging.error(
"Failure formatting params '%s' with itself", params)
raise
if combination_matches(params, excludes):
logger.debug('Excluding combination %s', str(params))
continue
for key in template.keys():
if key not in params:
params[key] = template[key]
try:
expanded = deep_format(
template, params,
self.jjb_config.yamlparser['allow_empty_variables'])
except Exception:
logging.error(
"Failure formatting template '%s', containing '%s' with "
"params '%s'", template_name, template, params)
raise
expanded['name'] = self._getfullname(expanded)
job_name = expanded.get('name')
if jobs_glob and not matches(job_name, jobs_glob):
continue
self._formatDescription(expanded)
self.jobs.append(expanded)
def _get_managed_string(self):
# The \n\n is not hard coded, because they get stripped if the
# project does not otherwise have a description.
return "\n\n" + MAGIC_MANAGE_STRING
| apache-2.0 | -5,172,574,663,110,602,000 | 38.835351 | 79 | 0.504194 | false | 4.707296 | true | false | false |
xflows/clowdflows | workflows/management/commands/export_all.py | 2 | 1091 | from unicodedata import category
from django.core.management.base import BaseCommand, CommandError
from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption
from django.core import serializers
from optparse import make_option
import uuid
import os
import sys
from django.conf import settings
import json
from .export_package import export_package
class Command(BaseCommand):
args = 'package_name'
help = 'Exports all packages.'
def handle(self, *args, **options):
packages = []
for app in settings.INSTALLED_APPS:
if 'workflows.' in app:
packages.append(app)
for package in packages:
package_name = package.split('workflows.')[1]
self.stdout.write("Exporting package "+package_name+"\n")
export_package(package_name,self.stdout)
#temporary fix
#self.stdout.write("Exporting cf_nlp package \n")
#export_package('nlp',self.stdout, dest_folder='/home/matej/platforms/clowdflows-env/cf_nlp/nlp') | mit | 8,983,052,174,325,971,000 | 34.433333 | 105 | 0.68011 | false | 4.086142 | false | false | false |
songjmcn/machine_learning | core/kmeans.py | 1 | 3013 | #coding=utf-8
'''
Created on 2014年6月18日
K-means 算法,将数据聚类到K个中心点
距离计算:欧式距离
中心店计算:均值法
@author: sjm
'''
import numpy as np
import random
def Euclid_dist(x,y):
if len(y.shape)==1:
return np.sqrt(np.sum(np.sum((x-y)**2)))
elif len(y.shape)==2:
return np.sqrt(np.sum((x-y)**2,axis=1))
else:
raise ValueError('error x or y shape')
def dist(x,y):
'''
计算两个数据间的距离,使用马氏距离
'''
return np.sqrt(np.sum((x-y)**2),axis=1)
def distMat(X,Y):
'''
计算两个矩阵间的距里,即矩阵里的每一个数据与另一个矩阵中每一个数据的距离
'''
mat=[map(lambda y:dist(x,y),Y) for x in X]
return np.array(mat)
def sum_dist(data,label,center):
s=0
for i in range(data.shape[0]):
s+=dist(data[i],center[label[i]])
return s
def kmeans(data,cluster,threshold=1.0e-19,maxIter=100):
data=np.array(data)
d1,d2=data.shape
'''
find the label
'''
batch=np.random.permutation(d1)
center=data[batch[0:cluster],:]
print(center.shape)
labels=np.zeros((d1,))
last_cost=0
for ii in xrange(0,d1):
d=Euclid_dist(data[ii,:],center[labels[ii],:])
last_cost+=d
for index in xrange(0,maxIter):
'''
寻找每个类的标号
'''
for ii in xrange(0,d1):
this_data=data[ii,:]
d=Euclid_dist(this_data,center)
label=np.argmin(d)
labels[ii]=label
for ii in xrange(0,cluster):
batch_no=(labels==ii).nonzero()
batch=data[batch_no]
m=np.mean(batch,axis=0)
#print(m.shape)
center[ii,:]=m
#print(center)
current_cost=0
for ii in xrange(0,d1):
d=Euclid_dist(data[ii,:],center[labels[ii],:])
current_cost+=d
if last_cost-current_cost<threshold:
break
else:
last_cost=current_cost
return center
'''
def kmeans2(data,cluster,threshold=1.0e-19,maxIter=100):
m=len(data)
labels=np.zeros(m)
#cluster=None
center=np.array(random.sample(data,cluster))
s=sum_dist(data,labels,center)
n=0
while 1:
n=n+1
tmp_mat=distMat(data,center)
labels=tmp_mat.argmin(axis=1)
for i in xrange(cluster):
idx=(labels==i).nonzero()
m=np.mean(data[idx[0]],axis=0)
center[i]=m
#d_i=data[idx[0]]
#d_i=d_i[0]
s1=sum_dist(data,labels,center)
if s-s1<threshold:
break;
s=s1
if n>maxIter:
break;
return center
'''
if __name__=='__main__':
from scipy.io import loadmat,savemat
data=loadmat(r'E:\code\matlab\DeepLearnToolbox-master\data\mnist_uint8.mat')
train_x=np.asarray(data['train_x'],np.float)/255.0
codebook=kmeans(train_x,10)
savemat('codebook.mat',{'C':codebook}) | gpl-2.0 | -3,712,052,875,470,658,600 | 25.895238 | 80 | 0.555437 | false | 2.665722 | false | false | false |
xopok/xopok-scripts | media/dts2ac3.py | 1 | 4427 | #!/usr/bin/env python
import os
import sys
import re
import time
import subprocess
import tempfile
import xml.dom.minidom as xmldom
ac3fifo = False
def GetText(n):
return n.childNodes.item(0).data
def CreateFifo(suffix='.ac3'):
tfile, tname = tempfile.mkstemp(suffix=suffix)
os.close(tfile)
os.unlink(tname)
os.mkfifo(tname)
return tname
filesToDelete = []
def main():
if len(sys.argv) < 3:
print "Usage: %s infile outfile [format]" % sys.argv[0]
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2]
fmt = 'ac3'
if len(sys.argv) >= 4:
fmt = sys.argv[3]
if not os.path.exists(infile):
print "%s not exists" % infile
sys.exit(1)
if not os.access(os.path.dirname(os.path.realpath(outfile)), os.W_OK):
print "File \"%s\" could not be written" % os.path.realpath(outfile)
sys.exit(1)
p = subprocess.Popen(['mkvinfo', '-s', infile], stdout=subprocess.PIPE)
tracksToConvert = []
tracksToCopy = []
# TODO: preserve track's language
for line in p.stdout.xreadlines():
if line.startswith("Track"):
r = re.search("Track [0-9]+: ([^,]+), codec ID: ([^,]+), mkvmerge[^0-9]+([0-9]+),.*", line)
if r and r.groups()[0] == 'audio':
id = r.groups()[2]
srcfmt = ['A_DTS']
if fmt == 'mp3':
srcfmt = ['A_DTS', 'A_AAC', 'A_AC3']
if not r.groups()[1] in srcfmt:
tracksToCopy.append(id)
else:
tracksToConvert.append(id)
else:
p.kill()
p.wait()
break
if not tracksToConvert:
print "Nothing to convert"
return 0
tracks = []
for i in tracksToConvert:
dts = CreateFifo(suffix='.dts')
if ac3fifo:
ac3 = CreateFifo(suffix='.'+fmt)
else:
tfile, ac3 = tempfile.mkstemp(suffix='.'+fmt)
os.close(tfile)
filesToDelete.append(dts)
filesToDelete.append(ac3)
tracks.append((i, dts, ac3))
# Extractor
cmdline = ['mkvextract', 'tracks', infile]
for id, dts, ac3 in tracks:
cmdline += ['%s:%s' % (id, dts)]
print cmdline
p_extract = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
devnull = os.open('/dev/null', os.O_WRONLY)
convs = []
# Converters
for id, dts, ac3 in tracks:
#cmdline = ['ffmpeg', '-v', '3', '-y', '-i', dts, '-alang', 'rus', '-ab', '448k', '-ar', '48000', '-ac', '6', '-acodec', 'ac3', ac3]
cmdline = []
if fmt == 'ac3':
cmdline = ['avconv', '-threads', 'auto', '-y', '-i', dts, '-b', '448k', '-ar', '48000', '-q', '0', '-ac', '6', '-acodec', 'ac3', ac3]
else:
cmdline = ['avconv', '-threads', 'auto', '-y', '-i', dts, '-b', '256k', '-ar', '48000', '-q', '0', '-acodec', 'libmp3lame', ac3]
print cmdline
if not ac3fifo:
p = subprocess.Popen(cmdline, stdout=devnull, stderr=devnull)
else:
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=devnull)
p1 = subprocess.Popen(['bash', '-c', 'cat > %s' % ac3], stdin = p.stdout)
convs.append((p1, None))
convs.append((p, cmdline))
# Wait for extract and convert
if not ac3fifo:
out_e = p_extract.communicate()
if p_extract.returncode != 0:
print "Extract failed, %s" % str(out_e)
return 2
for i, cmdline in convs:
out = i.communicate()
if i.returncode != 0:
print "Convert (%s) failed, %s" % (str(cmdline), str(out))
return 3
# Merger
cmdline = ['mkvmerge', '-q', '-o', outfile]
for id, dts, ac3 in tracks:
cmdline += [ac3]
if tracksToCopy:
cmdline += ['-a', ",".join(tracksToCopy)]
else:
cmdline += ['-A']
cmdline += [infile]
print cmdline
p_merge = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = p_merge.communicate()
if p_merge.returncode != 0:
print "Merge failed: [%s], [%s]" % (out[0], out[1])
return 1
print "Ok"
return 0
if __name__ == '__main__':
res = 1
#try:
res = main()
#except Exception, e:
# print e
# pass
for i in filesToDelete:
try:
os.unlink(i)
except:
pass
sys.exit(res)
| mit | 8,234,692,097,620,393,000 | 27.018987 | 143 | 0.527445 | false | 3.286563 | false | false | false |
YuriGural/erpnext | erpnext/setup/doctype/company/delete_company_transactions.py | 15 | 3848 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint
from frappe import _
from frappe.desk.notifications import clear_notifications
@frappe.whitelist()
def delete_company_transactions(company_name):
frappe.only_for("System Manager")
doc = frappe.get_doc("Company", company_name)
if frappe.session.user != doc.owner:
frappe.throw(_("Transactions can only be deleted by the creator of the Company"),
frappe.PermissionError)
delete_bins(company_name)
delete_lead_addresses(company_name)
for doctype in frappe.db.sql_list("""select parent from
tabDocField where fieldtype='Link' and options='Company'"""):
if doctype not in ("Account", "Cost Center", "Warehouse", "Budget",
"Party Account", "Employee", "Sales Taxes and Charges Template",
"Purchase Taxes and Charges Template", "POS Profile", 'BOM'):
delete_for_doctype(doctype, company_name)
# Clear notification counts
clear_notifications()
def delete_for_doctype(doctype, company_name):
meta = frappe.get_meta(doctype)
company_fieldname = meta.get("fields", {"fieldtype": "Link",
"options": "Company"})[0].fieldname
if not meta.issingle:
if not meta.istable:
# delete communication
delete_communications(doctype, company_name, company_fieldname)
# delete children
for df in meta.get_table_fields():
frappe.db.sql("""delete from `tab{0}` where parent in
(select name from `tab{1}` where `{2}`=%s)""".format(df.options,
doctype, company_fieldname), company_name)
# delete parent
frappe.db.sql("""delete from `tab{0}`
where {1}= %s """.format(doctype, company_fieldname), company_name)
# reset series
naming_series = meta.get_field("naming_series")
if naming_series and naming_series.options:
prefixes = sorted(naming_series.options.split("\n"), lambda a, b: len(b) - len(a))
for prefix in prefixes:
if prefix:
last = frappe.db.sql("""select max(name) from `tab{0}`
where name like %s""".format(doctype), prefix + "%")
if last and last[0][0]:
last = cint(last[0][0].replace(prefix, ""))
else:
last = 0
frappe.db.sql("""update tabSeries set current = %s
where name=%s""", (last, prefix))
def delete_bins(company_name):
frappe.db.sql("""delete from tabBin where warehouse in
(select name from tabWarehouse where company=%s)""", company_name)
def delete_lead_addresses(company_name):
"""Delete addresses to which leads are linked"""
leads = frappe.get_all("Lead", filters={"company": company_name})
leads = [ "'%s'"%row.get("name") for row in leads ]
addresses = []
if leads:
addresses = frappe.db.sql_list("""select parent from `tabDynamic Link` where link_name
in ({leads})""".format(leads=",".join(leads)))
if addresses:
addresses = ["'%s'"%addr for addr in addresses]
frappe.db.sql("""delete from tabAddress where name in ({addresses}) and
name not in (select distinct dl1.parent from `tabDynamic Link` dl1
inner join `tabDynamic Link` dl2 on dl1.parent=dl2.parent
and dl1.link_doctype<>dl2.link_doctype)""".format(addresses=",".join(addresses)))
frappe.db.sql("""delete from `tabDynamic Link` where link_doctype='Lead'
and parenttype='Address' and link_name in ({leads})""".format(leads=",".join(leads)))
frappe.db.sql("""update tabCustomer set lead_name=NULL where lead_name in ({leads})""".format(leads=",".join(leads)))
def delete_communications(doctype, company_name, company_fieldname):
frappe.db.sql("""
DELETE FROM `tabCommunication` WHERE reference_doctype = %s AND
EXISTS (SELECT name FROM `tab{0}` WHERE {1} = %s AND `tabCommunication`.reference_name = name)
""".format(doctype, company_fieldname), (doctype, company_name))
| gpl-3.0 | -4,044,735,935,855,092,700 | 37.48 | 119 | 0.695166 | false | 3.378402 | false | false | false |
unicefuganda/uSurvey | survey/forms/question_set.py | 1 | 3532 | from django import forms
from django.core.exceptions import ValidationError
from django.forms import ModelForm
from survey.models import QuestionSetChannel
from survey.models import WebAccess
from survey.models import Batch
def get_question_set_form(model_class):
class QuestionSetForm(ModelForm):
access_channels = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple(
attrs={
'class': 'access_channels'}), choices=[
opt for opt in QuestionSetChannel.ACCESS_CHANNELS if not opt[0] == WebAccess.choice_name()])
def __init__(self, *args, **kwargs):
if kwargs.get('instance'):
initial = kwargs.setdefault('initial', {})
initial['access_channels'] = [
c.channel for c in kwargs['instance'].access_channels.all()]
#self.fields['validation'] = kwargs['instance']
super(QuestionSetForm, self).__init__(*args, **kwargs)
class Meta:
model = model_class
fields = ['name', 'description', ]
widgets = {
'name': forms.TextInput(
attrs={
'size': 29,
'title': 'Your name',
'style': 'height: 2em;width:231px;'}),
'description': forms.Textarea(
attrs={
"rows": 5,
"cols": 30}),
}
def clean_name(self):
name = self.cleaned_data['name'].strip()
if self.instance is None and model_class.objects.filter(
name=name).exists():
raise ValidationError('Name already exists')
return name
def save(self, commit=True, **kwargs):
question_set = super(QuestionSetForm, self).save(commit=commit)
bc = QuestionSetChannel.objects.filter(qset=question_set)
bc.delete()
for val in kwargs['access_channels']:
QuestionSetChannel.objects.create(
qset=question_set, channel=val)
return question_set
return QuestionSetForm
class BatchForm(get_question_set_form(Batch)):
class Meta:
model = Batch
fields = ['name', 'description', 'survey', ]
widgets = {
'description': forms.Textarea(attrs={"rows": 4, "cols": 40}),
'survey': forms.HiddenInput(),
}
#
# class BatchQuestionsForm(ModelForm):
# questions = forms.ModelMultipleChoiceField(label=u'', queryset=QuestionTemplate.objects.filter(),
# widget=forms.SelectMultiple(attrs={'class': 'multi-select'}))
#
# class Meta:
# model = Batch
# fields = []
#
# def __init__(self, batch=None, *args, **kwargs):
# super(BatchQuestionsForm, self).__init__(*args, **kwargs)
# def save_question_to_batch(self, batch):
# for question in self.cleaned_data['questions']:
# question.save()
# order = BatchQuestionOrder.next_question_order_for(batch)
# BatchQuestionOrder.objects.create(question=question, batch=batch, order=order)
# question.batches.add(batch)
#
# def save(self, commit=True, *args, **kwargs):
# batch = super(BatchQuestionsForm, self).save(commit=commit, *args, **kwargs)
#
# if commit:
# batch.save()
# self.save_question_to_batch(batch)
| bsd-3-clause | -2,064,479,342,412,632,000 | 36.574468 | 110 | 0.554643 | false | 4.365884 | false | false | false |
BaptisteLefebvre/pyalp | pyalp/base/constant.py | 1 | 4545 | # Special value
ALP_DEFAULT = 0
ALP_INVALID_ID = 2 ** 32 - 1 # ulong maximum
# Return value
ALP_OK = 0
ALP_NOT_ONLINE = 1001
ALP_NOT_IDLE = 1002
ALP_NOT_AVAILABLE = 1003
ALP_NOT_READY = 1004
ALP_PARM_INVALID = 1005
ALP_ADDR_INVALID = 1006
ALP_MEMORY_FULL = 1007
ALP_SEQ_IN_USE = 1008
ALP_HALTED = 1009
ALP_ERROR_INIT = 1010
ALP_ERROR_COMM = 1011
ALP_DEVICE_REMOVED = 1012
ALP_NOT_CONFIGURED = 1013
ALP_LOADER_VERSION = 1014
ALP_ERROR_POWER_DOWN = 1018
# Device inquire and control types
ALP_DEVICE_NUMBER = 2000
ALP_VERSION = 2001
ALP_AVAIL_MEMORY = 2003
ALP_SYNCH_POLARITY = 2004
ALP_LEVEL_HIGH = 2006
ALP_LEVEL_LOW = 2007
ALP_TRIGGER_EDGE = 2005
ALP_EDGE_FALLING = 2008
ALP_EDGE_RISING = 2009
ALP_DEV_DMDTYPE = 2021
ALP_DMDTYPE_XGA = 1
ALP_DMDTYPE_1080P_095A = 3
ALP_DMDTYPE_XGA_07A = 4
ALP_DMDTYPE_XGA_055X = 6
ALP_DMDTYPE_WUXGA_096A = 7
ALP_DMDTYPE_DISCONNECT = 255
# TODO check is these constant values exist...
# ALP_DMDTYPE_XGA_055A =
# ALP_DMDTYPE_SXGA_PLUS =
# ALP_DMDTYPE_WQXGA_400MHZ_090A =
# ALP_DMDTYPE_WQXGA_480MHZ_090A =
ALP_USB_CONNECTION = 2016
ALP_DEV_DYN_SYNCH_OUT1_GATE = 2023
ALP_DEV_DYN_SYNCH_OUT2_GATE = 2024
ALP_DEV_DYN_SYNCH_OUT3_GATE = 2025
ALP_DDC_FPGA_TEMPERATURE = 2050
ALP_APPS_FPGA_TEMPERATURE = 2051
ALP_PCB_TEMPERATURE = 2052
ALP_DEV_DISPLAY_HEIGHT = 2057
ALP_DEV_DISPLAY_WIDTH = 2058
ALP_PWM_LEVEL = 2063
ALP_DEV_DMD_MODE = 2064
ALP_DMD_POWER_FLOAT = 1
# Sequence inquire and control types
ALP_BITPLANES = 2200
ALP_BITNUM = 2103
ALP_BIN_MODE = 2104
ALP_BIN_NORMAL = 2105
ALP_BIN_UNINTERRUPTED = 2106
ALP_PICNUM = 2201
ALP_FIRSTFRAME = 2101
ALP_LASTFRAME = 2102
ALP_FIRSTLINE = 2111
ALP_LASTLINE = 2112
ALP_LINE_INC = 2113
ALP_SCROLL_FROM_ROW = 2123
ALP_SCROLL_TO_ROW = 2124
ALP_SEQ_REPEAT = 2100
ALP_PICTURE_TIME = 2203
ALP_MIN_PICTURE_TIME = 2211
ALP_MAX_PICTURE_TIME = 2213
ALP_ILLUMINATE_TIME = 2204
ALP_MIN_ILLUMINATE_TIME = 2212
ALP_ON_TIME = 2214
ALP_OFF_TIME = 2215
ALP_SYNCH_DELAY = 2205
ALP_MAX_SYNCH_DELAY = 2209
ALP_SYNCH_PULSEWIDTH = 2206
ALP_TRIGGER_IN_DELAY = 2207
ALP_MAX_TRIGGER_IN_DELAY = 2210
ALP_DATA_FORMAT = 2110
ALP_DATA_MSB_ALIGN = 0
ALP_DATA_LSB_ALIGN = 1
ALP_DATA_BINARY_TOPDOWN = 2
ALP_DATA_BINARY_BOTTOMUP = 3
ALP_SEQ_PUT_LOCK = 2117
ALP_FLUT_MODE = 2118
ALP_FLUT_NONE = 0
ALP_FLUT_9BIT = 1
ALP_FLUT_18BIT = 2
ALP_FLUT_ENTRIES9 = 2120
ALP_FLUT_OFFSET9 = 2122
ALP_PWM_MODE = 2107
ALP_FLEX_PWM = 3
# Projection inquire and control types
ALP_PROJ_MODE = 2300
ALP_MASTER = 2301
ALP_SLAVE = 2302
ALP_PROJ_STEP = 2329
ALP_PROJ_STATE = 2400
ALP_PROJ_ACTIVE = 1200
ALP_PROJ_IDLE = 1201
ALP_PROJ_INVERSION = 2306
ALP_PROJ_UPSIDE_DOWN = 2307
ALP_PROJ_QUEUE_MODE = 2314
ALP_PROJ_LEGACY = 0
ALP_PROJ_SEQUENCE_QUEUE = 1
ALP_PROJ_QUEUE_ID = 2315
ALP_PROJ_QUEUE_MAX_AVAIL = 2316
ALP_PROJ_QUEUE_AVAIL = 2317
ALP_PROJ_PROGRESS = 2318
ALP_FLAG_QUEUE_IDLE = 1
ALP_FLAG_SEQUENCE_ABORTING = 2
ALP_FLAG_SEQUENCE_INDEFINITE = 4
ALP_FLAG_FRAME_FINISHED = 8
ALP_PROJ_RESET_QUEUE = 2319
ALP_PROJ_ABORT_SEQUENCE = 2320
ALP_PROJ_ABORT_FRAME = 2321
ALP_PROJ_WAIT_UNTIL = 2323
ALP_PROJ_WAIT_PIC_TIME = 0
ALP_PROJ_WAIT_ILLU_TIME = 1
ALP_FLUT_MAX_ENTRIES9 = 2324
ALP_FLUT_WRITE_9BIT = 2325
ALP_FLUT_WRITE_18BIT = 2326
# LED types
ALP_HLD_PT120_RED = 257
ALP_HLD_PT120_GREEN = 258
ALP_HLD_PT120_BLUE = 259
ALP_HLD_PT120_UV = 260
ALP_HLD_CBT90_WHITE = 262
ALP_HLD_PT120TE_BLUE = 263
ALP_HLD_CBT140_WHITE = 264
# LED inquire and control types
ALP_LED_SET_CURRENT = 1001
ALP_LED_BRIGHTNESS = 1002
ALP_LED_FORCE_OFF = 1003
ALP_LED_AUTO_OFF = 0
ALP_LED_OFF = 1
ALP_LED_ON = 2
ALP_LED_TYPE = 1101
ALP_LED_MEASURED_CURRENT = 1102
ALP_LED_TEMPERATURE_REF = 1103
ALP_LED_TEMPERATURE_JUNCTION = 1104
# Extended LED inquire and control types
ALP_LED_ALLOC_PARAMS = 2101
# TODO correct following lines, different constants can have the same value (e.g. ALP_DEFAULT and ALP_LED_AUTO_OFF)
# look_up_table = dict([
# (value, key)
# for key, value in globals().items()
# if key[0:4] == "ALP_"
# ])
#
#
# def constant_to_string(constant):
# """TODO add docstring"""
# string = look_up_table[constant]
# return string
dmd_type_look_up_table = dict([
(value, key)
for key, value in globals().items()
if key[0:12] == "ALP_DMDTYPE_"
])
def dmd_type_constant_to_string(dmd_type_constant):
"""TODO add docstring"""
dmd_type_string = dmd_type_look_up_table[dmd_type_constant]
return dmd_type_string
| mit | 7,852,411,521,684,727,000 | 23.25 | 115 | 0.692849 | false | 2.15505 | false | false | false |
dashng/netseen | netseen/common/ns_except.py | 3 | 2391 | # Copyright 2015-2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class NetseenException(Exception):
'''
customize exception
'''
message = 'An unknown exception occurred'
def __init__(self, **kwargs):
try:
super(NetseenException, self).__init__(self.message % kwargs)
self.msg = self.message % kwargs
except Exception:
# at least get the core message out if something happened
super(NetseenException, self).__init__(self.message)
def __unicode__(self):
return unicode(self.msg)
def log(self):
'''
log except msg
'''
pass
class NotFound(NetseenException):
"""A generic not found exception."""
pass
class BadRequest(NetseenException):
"""An exception indicating a generic bad request for a said resource.
A generic exception indicating a bad request for a specified resource.
"""
message = 'Bad %(resource)s request: %(msg)s.'
class ObjectFieldInvalid(NetseenException):
"""the field value of object is invalid
"""
message = "Field %(field)s of %(objname)s is not an instance of Field"
class Conflict(NetseenException):
"""A generic conflict exception."""
pass
class NotAuthorized(NetseenException):
"""A generic not authorized exception."""
message = "Not authorized."
class ServiceUnavailable(NetseenException):
"""A generic service unavailable exception."""
message = "The service is unavailable."
class ObjectNotFound(NotFound):
"""A not found exception indicating an identifiable object isn't found.
A specialization of the NotFound exception indicating an object with a said
ID doesn't exist.
:param id: The ID of the (not found) object.
"""
message = "Object %(id)s not found."
| apache-2.0 | -2,226,040,746,375,711,500 | 27.807229 | 79 | 0.673777 | false | 4.403315 | false | false | false |
oVirt/jenkins | stdci_libs/struct_normalizer.py | 1 | 9373 | """struct_normalizer.py - Normalize data structures to a requested schema
Given a nested data structure `struct` it can be normalized into a specified
form like so:
result = normalize_value(ctx, struct, as=list_of(
map_with(
'field1': fallback_option(mandtory(
scalar(type=str, else_='Invalud value for field1'),
else_='Missing configuration field: field1'
)),
'field2: scalar(),
'field3: list_of(scalar(type=int)),
'field4: list_of(
map_with(
'x': mandatory(scalar(type=int)),
'y': mandatory(scalar(type=int)),
),
),
),
)
The `ctx` variable is an application-specific context. It is passed to all
normalization functions and is mean to make it easier to write custom
normalization functions that accept custom application data.
Normalization functions are simply functions with the following signature:
def normalizer(ctx, value)
The normalizer functions are meant to return a normalized value or raise an
exception. While this module provides a set of generic normalization functions
it is expected that applications would implement custom functions to perform
more complex data manipulations.
"""
from collections import Mapping, Iterable
from functools import wraps
from six import string_types, iteritems
class DataNormalizationError(Exception):
pass
def scalar(type=None, else_='Invalid scalar specified'):
"""A normalization function generator for scalar values
:param function type: Optional conversion function to convert a value to a
desired type, if not given, scalar value would be
returned as-is
:param str else_: Optional error message to raise if value is not a
scalar
:rtype: function
:returns: A function that accepts a context and a value and returns a
scalar if the value could be converted into it, or raises the
given error message in a DataNormalizationError exception.
If `type` function is given, it is called with the value and the
result is returned. If it raise a ValueError exception,
a DataNormalizationError will be raised instead.
"""
def normalizer(ctx, value):
if isinstance(value, Iterable) and not isinstance(value, string_types):
raise DataNormalizationError(else_)
if type is None:
return value
try:
return type(value)
except ValueError:
raise DataNormalizationError(else_)
return normalizer
def list_of(member_type):
"""A normalization function generator for list values
:param function member_type: A normalization function for members of the
list
:rtype: function
:returns: A function that accepts a context and a value and returns a list
where each member had been normalized with the given
normalization function. If the value is not a list, it is
converted into a list of a single normalized value.
"""
def normalizer(ctx, value):
if isinstance(value, string_types) or isinstance(value, Mapping):
lst = [value]
elif isinstance(value, Iterable):
lst = value
else:
lst = [value]
return [normalize_value(ctx, val, to=member_type) for val in lst]
return normalizer
def map_with(**options):
"""A normalization function generator for mapping values
Each keyword argument points to a normalization function, so that if that
keyword appears as key in the input map, it is included in the normalized
map with the value normalized by the function.
If a given normalization function has a __default__ attribute, and the
keyword that points to it is not included in the input, the value of the
attribute is placed in the output map.
If a given normalization function has a __mandatory__ attribute, the
keyword that points to it must be included in the input (Unless is also has
the __default__ attribute). Otherwise, the value of the __mandatory__
attribute is used as an error message in a raised DataNormalizationError.
If the input is not a map, a keyword argument is looked for that point to a
function that includes the __fallback_option__ attribute. If found, a map
is generated with the keyword pointing to the value which is then
normalized with the normalization function the keyword points to. If not
found a DataNormalizationError exception is raised.
:rtype: function
:returns: A normalization function that accepts a context and a value
and normalizes it according to the rules specified above
"""
fallback_key = next((
key for key, norm_func in iteritems(options)
if hasattr(norm_func, '__fallback_option__')
), None)
def normalizer(ctx, value):
if isinstance(value, Mapping):
srcmap = value
elif fallback_key is None:
srcmap = {}
else:
srcmap = { fallback_key: value }
dstmap = {}
for key, norm_func in iteritems(options):
dstmap.update(normalize_option(ctx, srcmap, key, to=norm_func))
return dstmap
return normalizer
def normalize_option(ctx, mp, key, to):
"""Normalize a single option in a map
:param object ctx: A context to pass to normalization function
:param dict mp: An input map
:param str key: The name of the option to normalize
:param function to: A normalization function used to normalize the value
pointed to by `key` in the input map
The normalization function can be annotated with the __mandatory__ and
__default__ attributes to define behaviour if the option does not exist in
the input map, as specified in the docstring for `map_with`.
:rtype: dict
:returns: If `key` found in `mp`, then a map with `key` pointing to a
normalized value, otherwise, may return an empty map, a map
with a default value or raise an exception according to wither
__mandatory__ and __default__ are set on the `to` function.
"""
if key in mp:
return {key: normalize_value(ctx, mp[key], to)}
elif hasattr(to, '__mandatory__'):
if hasattr(to, '__default__'):
return {key: to.__default__}
else:
raise DataNormalizationError(to.__mandatory__)
else:
return {}
def mandatory(value_type, default=None, else_='Mandatory option missing'):
"""Annotate a normalization function to indicate a mandatory option
:param function value_type: A normalization function to annotate
:param object default: An optional default value to associate with the
function
:param str else_: An error message for the case where a mandatory
value is missing
:rtype: function
:returns: A function that calls `value_type` and has the __mandatory__ and
optionally __default__ attributes set so it conforms with the
requirements of the `map_with` and `normalize_option` functions.
"""
@wraps(value_type)
def normalizer(*args, **kwargs):
return value_type(*args, **kwargs)
normalizer.__mandatory__ = else_
if default is not None:
normalizer.__default__ = default
return normalizer
def fallback_option(value_type):
"""Annotate a normalization function to indicate a fallback option
:param function value_type: A normalization function to annotate
:rtype: function
:returns: A function that calls `value_type` and has the
__fallback__option__ attribute set so it conforms with the
requirements of the `map_with` and `normalize_option` functions.
"""
@wraps(value_type)
def normalizer(*args, **kwargs):
return value_type(*args, **kwargs)
normalizer.__fallback_option__ = True
return normalizer
def normalize_value(ctx, value, to):
"""Normalize a single value
:param object ctx: A context to pass to normalization function
:param object value: A value to normalize
:param function to: A normalization function
Call the `to` function passing in `ctx` and `value`, and returning the
result. The is the core of the normalization mini-DSL.
:rtype: object
:returns: Whatever the `to` function returns
"""
return to(ctx, value)
def all_of(*normalizers):
"""Chain normalization functions together
:param list normalizers: A list of two or more normalization functions
that should be a applied to a value
:rtype: function
:returns: A normalization function that calls all the given normalization
functions in a chain, generating a value that is normalized by
all of them.
"""
def normalizer(ctx, value):
for nrmfun in normalizers:
value = normalize_value(ctx, value, to=nrmfun)
return value
for nrmfun in normalizers:
normalizer = wraps(nrmfun)(normalizer)
return normalizer
| gpl-3.0 | -6,038,026,529,675,170,000 | 37.731405 | 79 | 0.651766 | false | 4.779704 | false | false | false |
Unode/ete | examples/evol/3_branchsite_test.py | 4 | 2423 | #!/usr/bin/python
"""
15 Nov 2010
simple example to mark a tree and compute branch-site test of positive selection
"""
__author__ = "Francois-Jose Serra"
__email__ = "[email protected]"
__licence__ = "GPLv3"
__version__ = "0.0"
from ete3 import EvolTree
try:
input = raw_input
except NameError:
pass
tree = EvolTree("data/L_example/measuring_L_tree.nw")
tree.link_to_alignment('data/L_example/alignment_L_measuring_evol.fasta')
print (tree)
# input('\n tree and alignment loaded\nHit some key, to start computation of branch site models A and A1 on each branch.\n')
print ('running model M0, for comparison with branch-site models...')
tree.run_model('M0')
# each node/leaf has two kind of identifiers node_id and paml_id, to mark nodes we have to specify
# the node_id of the nodes we want to mark, and the kind of mark in this way:
for leaf in tree:
leaf.node_id
print ('\n---------\nNow working with leaf ' + leaf.name)
tree.mark_tree([leaf.node_id], marks=['#1'])
print (tree.write())
# to organize a bit, we name model with the name of the marked node
# any character after the dot, in model name, is not taken into account
# for computation. (have a look in /tmp/ete3.../bsA.. directory)
print ('running model bsA and bsA1')
tree.run_model('bsA.'+ leaf.name)
tree.run_model('bsA1.' + leaf.name)
print ('p-value of positive selection for sites on this branch is: ')
ps = tree.get_most_likely('bsA.' + leaf.name, 'bsA1.'+ leaf.name)
rx = tree.get_most_likely('bsA1.'+ leaf.name, 'M0')
print (str(ps))
print ('p-value of relaxation for sites on this branch is: ')
print (str(rx))
model = tree.get_evol_model("bsA." + leaf.name)
if ps < 0.05 and float(model.classes['foreground w'][2]) > 1:
print ('we have positive selection on sites on this branch')
tree.show(histfaces=['bsA.' + leaf.name])
elif rx<0.05 and ps>=0.05:
print ('we have relaxation on sites on this branch')
else:
print ('no signal detected on this branch, best fit for M0')
print ('\nclean tree, remove marks')
tree.mark_tree(map(lambda x: x.node_id, tree.get_descendants()),
marks=[''] * len(tree.get_descendants()), verbose=True)
# nothing working yet to get which sites are under positive selection/relaxation,
# have to look at the main outfile or rst outfile
print ('The End.')
| gpl-3.0 | -3,492,670,507,625,500,000 | 35.164179 | 126 | 0.662815 | false | 3.243641 | false | false | false |
pathfinder14/OpenSAPM | utils/WENO_method/WENOmethod.py | 1 | 5464 | import numpy as np
'''
du/dt + a du/dx = 0
WENO метод для уравнения переноса
a - параметр перед производной по x
dt - шаг по времени
dx - шаг по координате
u - начальное условие t = 0 (numpy array)
Возвращаемое значение: Следующий слой (numpy array)
'''
def WENOmethod(a, tau, h, u):
# Количество весов: Порядок = 2k - 1
k = 3
# Количество узлов
nx = int((2 / h ) + 1)
# Количество пустых клеток
gc = k - 1
# Добавление пустых клеток
x = np.linspace(0, 2, nx)
gcr = x[-1] + np.linspace(1, gc, gc) * h
gcl = x[0] + np.linspace(-gc, -1, gc) * h
xc = np.append(x, gcr)
xc = np.append(gcl, xc)
uc = np.append(u, u[-gc:])
uc = np.append(u[0:gc], uc)
gs = np.zeros([nx + 2 * gc, 1])
flux = np.zeros(nx + 2 * gc)
un = uc.copy()
for i in range(gc, nx - 1 + gc): # i = 2
xloc = xc[i - (k - 1):i + k] # i + k - 1 - (i - (k - 1) - 1) = 2k -1
uloc = uc[i - (k - 1):i + k]
f_left, f_right = WENO(xloc, uloc, k)
# Положительный поток
flux[i] = 0.5 * (a + np.fabs(a)) * f_left + 0.5 * (a - np.fabs(a)) * f_right
for i in range(gc, nx - gc):
if (a > 0):
uc[i] = un[i] - tau / h * (flux[i] - flux[i - 1])
else:
uc[i] = un[i] - tau / h * (flux[i + 1] - flux[i])
return uc[3:-3]
'''
Функция вычисляет левые и правые границы клетки, используя ENO
xloc - 2k - 1 узел
uloc - 2k - 1 значение в узле
k - количество весов
Возвращаемое значение: Кортеж из левого и правого значений
'''
def WENO(xloc, uloc, k):
# Особый случай - не нужно выбирать шаблон
if (k == 1):
ul = uloc[0]
ur = uloc[1]
# Применение процедур WENO
alphal = np.zeros(k, dtype = np.float)
alphar = np.zeros(k, dtype = np.float)
omegal = np.zeros(k, dtype = np.float)
omegar = np.zeros(k, dtype = np.float)
beta = np.zeros(k, dtype = np.float)
d = np.zeros(k, dtype = np.float)
vareps = 1e-6
# Вычисление k значений xl и xr построенных на разных шаблонах
ulr = np.zeros(k, dtype = np.float)
urr = np.zeros(k, dtype = np.float)
for r in range(0, k):
cr = ENOweights(k, r)
cl = ENOweights(k, r - 1)
for i in range(0, k):
urr[r] = urr[r] + cr[i] * uloc[k - r + i - 1]
ulr[r] = ulr[r] + cl[i] * uloc[k - r + i - 1]
# Вычисление WENO коэффициентов для разных порядков 2k - 1 (3 и 5 порядки)
if (k == 2):
# Оптимальные веса
d[0] = 2 / 3.
d[1] = 1 / 3.
# Вычисление индикатора гладкости для каждого шаблона
beta[0] = (uloc[2] - uloc[1]) ** 2
beta[1] = (uloc[1] - uloc[0]) ** 2
if(k == 3):
# Оптимальные веса
d[0] = 3 / 10.
d[1] = 3 / 5.
d[2] = 1 / 10.
# Вычисление индикатора гладкости для каждого шаблона
beta[0] = 13/12.*(uloc[2]-2*uloc[3]+uloc[4])**2 + 1/4.*(3*uloc[2]-4*uloc[3]+uloc[4])**2
beta[1] = 13/12.*(uloc[1]-2*uloc[2]+uloc[3])**2 + 1/4.*(uloc[1]-uloc[3])**2
beta[2] = 13/12.*(uloc[0]-2*uloc[1]+uloc[2])**2 + 1/4.*(3*uloc[2]-4*uloc[1]+uloc[0])**2
# Вычисление альфа параметров
for r in range(0,k):
alphar[r] = d[r] / (vareps + beta[r]) ** 2
alphal[r] = d[k - r - 1] / (vareps + beta[r]) ** 2
# Вычисление весовых параметров WENO
for r in range(0,k):
omegal[r] = alphal[r] / alphal.sum()
omegar[r] = alphar[r] / alphar.sum()
# Вычисление значений на краях ячейки
ul = 0
ur = 0
for r in range(0,k):
ul = ul + omegal[r] * ulr[r]
ur = ur + omegar[r] * urr[r]
return (ul,ur)
'''
Функция вычисляет оптимальные веса C__k ^ r для WENO
v_[i+1/2] = \sum_[j=0]^[k-1] c_[rj] v_[i-r+j]
k - порядок
r - смещение
Возвращаемое значение: Массив весов c_rk (numpy array)
'''
def ENOweights(k,r):
c = np.zeros(k)
for j in range(0,k):
de3 = 0.
for m in range(j + 1, k + 1):
# Вычисление знаменателя (denominator)
de2 = 0.
for l in range(0, k + 1):
if l is not m:
de1 = 1.
for q in range(0, k + 1):
if (q is not m) and (q is not l):
de1 = de1 * (r - q + 1)
de2 = de2 + de1
# Вычисление числителя
de1 = 1.
for l in range(0, k + 1):
if (l is not m):
de1 = de1 * (m - l)
de3 = de3 + de2 / de1
c[j] = de3
return c | mit | -8,156,871,276,959,244,000 | 29.11039 | 95 | 0.485979 | false | 1.993978 | false | false | false |
bmbouter/kombu | kombu/tests/async/aws/sqs/test_message.py | 9 | 1174 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from kombu.async.aws.sqs.message import AsyncMessage
from kombu.tests.async.aws.case import AWSCase
from kombu.tests.case import PromiseMock, Mock
from kombu.utils import uuid
class test_AsyncMessage(AWSCase):
def setup(self):
self.queue = Mock(name='queue')
self.callback = PromiseMock(name='callback')
self.x = AsyncMessage(self.queue, 'body')
self.x.receipt_handle = uuid()
def test_delete(self):
self.assertTrue(self.x.delete(callback=self.callback))
self.x.queue.delete_message.assert_called_with(
self.x, self.callback,
)
self.x.queue = None
self.assertIsNone(self.x.delete(callback=self.callback))
def test_change_visibility(self):
self.assertTrue(self.x.change_visibility(303, callback=self.callback))
self.x.queue.connection.change_message_visibility.assert_called_with(
self.x.queue, self.x.receipt_handle, 303, self.callback,
)
self.x.queue = None
self.assertIsNone(self.x.change_visibility(
303, callback=self.callback,
))
| bsd-3-clause | -2,800,758,915,447,123,500 | 31.611111 | 78 | 0.660988 | false | 3.579268 | false | false | false |
mozman/ezdxf | src/ezdxf/entities/mleader.py | 1 | 34642 | # Copyright (c) 2018-2021, Manfred Moitzi
# License: MIT License
from typing import TYPE_CHECKING, List, Union, Optional
import copy
import logging
from collections import namedtuple
from ezdxf.lldxf import const
from ezdxf.lldxf.attributes import (
DXFAttr,
DXFAttributes,
DefSubclass,
XType,
group_code_mapping,
)
from ezdxf.lldxf.tags import Tags
from ezdxf.math import Vec3, NULLVEC, X_AXIS, Y_AXIS, Z_AXIS, Matrix44
from ezdxf import colors
from .dxfentity import base_class, SubclassProcessor
from .dxfobj import DXFObject
from .dxfgfx import DXFGraphic, acdb_entity
from .factory import register_entity
from .objectcollection import ObjectCollection
if TYPE_CHECKING:
from ezdxf.eztypes import TagWriter, Drawing, DXFNamespace, DXFTag
__all__ = ["MultiLeader", "MLeader", "MLeaderStyle", "MLeaderStyleCollection"]
logger = logging.getLogger("ezdxf")
# DXF Examples:
# "D:\source\dxftest\CADKitSamples\house design for two family with common staircasedwg.dxf"
# "D:\source\dxftest\CADKitSamples\house design.dxf"
# How to render MLEADER: https://atlight.github.io/formats/dxf-leader.html
# DXF reference:
# http://help.autodesk.com/view/OARX/2018/ENU/?guid=GUID-72D20B8C-0F5E-4993-BEB7-0FCF94F32BE0
acdb_mleader = DefSubclass(
"AcDbMLeader",
{
"version": DXFAttr(270, default=2),
"style_handle": DXFAttr(340),
# Theory: Take properties from MLEADERSTYLE,
# except explicit overridden here:
"property_override_flags": DXFAttr(90),
# Bit coded flags:
# 1 << 0 = leader_type
# 1 << 1 = leader_line_color
# 1 << 2 = leader_linetype_handle
# 1 << 3 = leader_lineweight
# 1 << 4 = landing_flag
# 1 << 5 = landing_gap ???
# 1 << 6 = dogleg_flag
# 1 << 7 = dogleg_length
# 1 << 8 = arrow_head_handle
# 1 << 9 = arrow_head_size
# 1 << 10 = content_type
# 1 << 11 = text_style_handle
# 1 << 12 = text_left_attachment_type (of MTEXT)
# 1 << 13 = text_angle_type (of MTEXT)
# 1 << 14 = text_alignment_type (of MTEXT)
# 1 << 15 = text_color (of MTEXT)
# 1 << 16 = ??? Text height (of MTEXT) ???
# 1 << 17 = text_frame_flag
# 1 << 18 = ??? Enable use of default MTEXT (from MLEADERSTYLE)
# 1 << 19 = block_record_handle
# 1 << 20 = block_color
# 1 << 21 = block_scale_vector
# 1 << 22 = block_rotation
# 1 << 23 = block_connection_type
# 1 << 24 = ??? Scale ???
# 1 << 25 = text_right_attachment_type (of MTEXT)
# 1 << 26 = ??? Text switch alignment type (of MTEXT) ???
# 1 << 27 = text_attachment_direction (of MTEXT)
# 1 << 28 = text_top_attachment_type (of MTEXT)
# 1 << 29 = Text_bottom_attachment_type (of MTEXT)
"leader_type": DXFAttr(170, default=1),
"leader_line_color": DXFAttr(91, default=colors.BY_BLOCK_RAW_VALUE),
"leader_linetype_handle": DXFAttr(341),
"leader_lineweight": DXFAttr(171, default=const.LINEWEIGHT_BYBLOCK),
"has_landing": DXFAttr(290, default=1),
"has_dogleg": DXFAttr(291, default=1),
"dogleg_length": DXFAttr(41, default=8), # depend on $MEASUREMENT?
# no handle is default arrow 'closed filled':
"arrow_head_handle": DXFAttr(342),
"arrow_head_size": DXFAttr(42, default=4), # depend on $MEASUREMENT?
"content_type": DXFAttr(172, default=2),
# 0 = None
# 1 = Block content
# 2 = MTEXT content
# 3 = TOLERANCE content
# Text Content:
"text_style_handle": DXFAttr(343),
"text_left_attachment_type": DXFAttr(173, default=1),
# Values 0-8 are used for the left/right attachment
# point (attachment direction is horizontal), values 9-10 are used for the
# top/bottom attachment points (attachment direction is vertical).
# Attachment point is:
# 0 = top of top text line,
# 1 = middle of top text line,
# 2 = middle of text,
# 3 = middle of bottom text line,
# 4 = bottom of bottom text line,
# 5 = bottom text line,
# 6 = bottom of top text line. Underline bottom line
# 7 = bottom of top text line. Underline top line,
# 8 = bottom of top text line. Underline all content,
# 9 = center of text (y-coordinate only),
# 10 = center of text (y-coordinate only), and overline top/underline
# bottom content.
"text_right_attachment_type": DXFAttr(95), # like 173
"text_angle_type": DXFAttr(174, default=1),
# 0 = text angle is equal to last leader line segment angle
# 1 = text is horizontal
# 2 = text angle is equal to last leader line segment angle, but potentially
# rotated by 180 degrees so the right side is up for readability.
"text_alignment_type": DXFAttr(175, default=2),
"text_color": DXFAttr(92, default=colors.BY_BLOCK_RAW_VALUE),
"has_frame_text": DXFAttr(292, default=0),
# Block Content:
"block_record_handle": DXFAttr(344),
"block_color": DXFAttr(
93, default=colors.BY_BLOCK_RAW_VALUE
), # raw color
"block_scale_vector": DXFAttr(
10, xtype=XType.point3d, default=Vec3(1, 1, 1)
),
"block_rotation": DXFAttr(43, default=0), # in radians!!!
"block_connection_type": DXFAttr(176, default=0),
# 0 = center extents
# 1 = insertion point
"is_annotative": DXFAttr(293, default=0),
# REPEAT "arrow_heads": DXF R2007+
# arrow_head_index: 94, ???
# arrow_head_handle: 345
# END "arrow heads"
# REPEAT "block attribs" (ATTDEF): DXF R2007+
# attrib_handle: 330
# attrib_index: 177, sequential index of the label in the collection
# attrib_width: 44
# attrib_text: 302, collision with group code (302, "LEADER{") in context data
# END "block attribs"
# Text Content:
"is_text_direction_negative": DXFAttr(
294, default=0, dxfversion=const.DXF2007
),
"text_IPE_align": DXFAttr(178, default=0, dxfversion=const.DXF2007),
"text_attachment_point": DXFAttr(
179, default=1, dxfversion=const.DXF2007
),
# 1 = left
# 2 = center
# 3 = right
"scale": DXFAttr(45, default=1, dxfversion=const.DXF2007),
"text_attachment_direction": DXFAttr(
271, default=0, dxfversion=const.DXF2010
),
# This defines whether the leaders attach to the left/right of the content
# block/text, or attach to the top/bottom:
# 0 = horizontal
# 1 = vertical
"text_bottom_attachment_direction": DXFAttr(
272, default=9, dxfversion=const.DXF2010
),
# like 173, but
# 9 = center
# 10= underline and center
"text_top_attachment_direction": DXFAttr(
273, default=9, dxfversion=const.DXF2010
),
# like 173, but
# 9 = center
# 10= overline and center
"leader_extend_to_text": DXFAttr(
295, default=0, dxfversion=const.DXF2013
),
},
)
acdb_mleader_group_codes = group_code_mapping(acdb_mleader)
CONTEXT_STR = "CONTEXT_DATA{"
LEADER_STR = "LEADER{"
LEADER_LINE_STR = "LEADER_LINE{"
START_CONTEXT_DATA = 300
END_CONTEXT_DATA = 301
START_LEADER = 302
END_LEADER = 303
START_LEADER_LINE = 304
END_LEADER_LINE = 305
def compile_context_tags(
data: List["DXFTag"], stop_code: int
) -> List[Union["DXFTag", List]]:
def build_structure(
tag: "DXFTag", stop: int
) -> List[Union["DXFTag", List]]:
collector = [tag]
tag = next(tags)
while tag.code != stop:
if tag.code == START_LEADER:
collector.append(build_structure(tag, END_LEADER))
# Group code 304 is used also for MTEXT content, therefore always
# test for group code AND and value string:
elif tag.code == START_LEADER_LINE and tag.value == LEADER_LINE_STR:
collector.append(build_structure(tag, END_LEADER_LINE))
else:
collector.append(tag)
tag = next(tags)
return collector
tags = iter(data)
return build_structure(next(tags), stop_code)
ArrowHeadData = namedtuple("ArrowHeadData", "index, handle")
AttribData = namedtuple("AttribData", "handle, index, width, text")
@register_entity
class MultiLeader(DXFGraphic):
DXFTYPE = "MULTILEADER"
DXFATTRIBS = DXFAttributes(base_class, acdb_entity, acdb_mleader)
MIN_DXF_VERSION_FOR_EXPORT = const.DXF2000
def __init__(self):
super().__init__()
self.context = MultiLeaderContext()
self.arrow_heads: List[ArrowHeadData] = []
self.block_attribs: List[AttribData] = []
def _copy_data(self, entity: "MultiLeader") -> None:
"""Copy leaders"""
entity.context = copy.deepcopy(self.context)
entity.arrow_heads = copy.deepcopy(self.arrow_heads)
entity.block_attribs = copy.deepcopy(self.block_attribs)
def load_dxf_attribs(
self, processor: SubclassProcessor = None
) -> "DXFNamespace":
dxf = super().load_dxf_attribs(processor)
if processor is None:
return dxf
tags = processor.subclass_by_index(2)
context = self.extract_context_data(tags)
if context:
try:
self.context = self.load_context(context)
except const.DXFStructureError:
logger.info(
f"Context structure error in entity MULTILEADER(#{dxf.handle})"
)
self.arrow_heads = self.extract_arrow_heads(tags)
self.block_attribs = self.extract_block_attribs(tags)
processor.fast_load_dxfattribs(
dxf, acdb_mleader_group_codes, subclass=tags, recover=True
)
return dxf
@staticmethod
def extract_context_data(tags: Tags) -> List["DXFTag"]:
start, end = None, None
context_data = []
for index, tag in enumerate(tags):
if tag.code == START_CONTEXT_DATA:
start = index
elif tag.code == END_CONTEXT_DATA:
end = index + 1
if start and end:
context_data = tags[start:end]
# Remove context data!
del tags[start:end]
return context_data
@staticmethod
def load_context(data: List["DXFTag"]) -> "MultiLeaderContext":
try:
context = compile_context_tags(data, END_CONTEXT_DATA)
except StopIteration:
raise const.DXFStructureError
else:
return MultiLeaderContext.load(context)
@staticmethod
def extract_arrow_heads(data: Tags) -> List[ArrowHeadData]:
def store_head():
heads.append(
ArrowHeadData(
collector.get(94, 0), # arrow head index
collector.get(345, "0"), # arrow head handle
)
)
collector.clear()
heads = []
try:
start = data.tag_index(94)
except const.DXFValueError:
return heads
end = start
collector = dict()
for code, value in data.collect_consecutive_tags({94, 345}, start):
end += 1
collector[code] = value
if code == 345:
store_head()
# Remove processed tags:
del data[start:end]
return heads
@staticmethod
def extract_block_attribs(data: Tags) -> List[AttribData]:
def store_attrib():
attribs.append(
AttribData(
collector.get(330, "0"), # ATTDEF handle
collector.get(177, 0), # ATTDEF index
collector.get(44, 1.0), # ATTDEF width
collector.get(302, ""), # ATTDEF text (content)
)
)
collector.clear()
attribs = []
try:
start = data.tag_index(330)
except const.DXFValueError:
return attribs
end = start
collector = dict()
for code, value in data.collect_consecutive_tags(
{330, 177, 44, 302}, start
):
end += 1
if code == 330 and len(collector):
store_attrib()
collector[code] = value
if len(collector):
store_attrib()
# Remove processed tags:
del data[start:end]
return attribs
def preprocess_export(self, tagwriter: "TagWriter") -> bool:
if self.context.is_valid:
return True
else:
logger.debug(
f"Ignore {str(self)} at DXF export, invalid context data."
)
return False
def export_entity(self, tagwriter: "TagWriter") -> None:
def write_handle_if_exist(code: int, name: str):
handle = dxf.get(name)
if handle is not None:
write_tag2(code, handle)
super().export_entity(tagwriter)
dxf = self.dxf
version = tagwriter.dxfversion
write_tag2 = tagwriter.write_tag2
write_tag2(100, acdb_mleader.name)
write_tag2(270, dxf.version)
self.context.export_dxf(tagwriter)
# Export common MLEADER tags:
# Don't use dxf.export_dxf_attribs() - all attributes should be written
# even if equal to the default value:
write_tag2(340, dxf.style_handle)
write_tag2(90, dxf.property_override_flags)
write_tag2(170, dxf.leader_type)
write_tag2(91, dxf.leader_line_color)
write_tag2(341, dxf.leader_linetype_handle)
write_tag2(171, dxf.leader_lineweight)
write_tag2(290, dxf.has_landing)
write_tag2(291, dxf.has_dogleg)
write_tag2(41, dxf.dogleg_length)
# arrow_head_handle is None for default arrow 'closed filled':
write_handle_if_exist(342, "arrow_head_handle")
write_tag2(42, dxf.arrow_head_size)
write_tag2(172, dxf.content_type)
write_tag2(343, dxf.text_style_handle) # mandatory!
write_tag2(173, dxf.text_left_attachment_type)
write_tag2(95, dxf.text_right_attachment_type)
write_tag2(174, dxf.text_angle_type)
write_tag2(175, dxf.text_alignment_type)
write_tag2(92, dxf.text_color)
write_tag2(292, dxf.has_frame_text)
write_handle_if_exist(344, "block_record_handle")
write_tag2(93, dxf.block_color)
tagwriter.write_vertex(10, dxf.block_scale_vector)
write_tag2(43, dxf.block_rotation)
write_tag2(176, dxf.block_connection_type)
write_tag2(293, dxf.is_annotative)
if version >= const.DXF2007:
self.export_arrow_heads(tagwriter)
self.export_block_attribs(tagwriter)
write_tag2(294, dxf.is_text_direction_negative)
write_tag2(178, dxf.text_IPE_align)
write_tag2(179, dxf.text_attachment_point)
write_tag2(45, dxf.scale)
if version >= const.DXF2010:
write_tag2(271, dxf.text_attachment_direction)
write_tag2(272, dxf.text_bottom_attachment_direction)
write_tag2(273, dxf.text_top_attachment_direction)
if version >= const.DXF2013:
write_tag2(295, dxf.leader_extend_to_text)
def export_arrow_heads(self, tagwriter: "TagWriter") -> None:
for index, handle in self.arrow_heads:
tagwriter.write_tag2(94, index)
tagwriter.write_tag2(345, handle)
def export_block_attribs(self, tagwriter: "TagWriter") -> None:
for attrib in self.block_attribs:
tagwriter.write_tag2(330, attrib.handle)
tagwriter.write_tag2(177, attrib.index)
tagwriter.write_tag2(44, attrib.width)
tagwriter.write_tag2(302, attrib.text)
class MultiLeaderContext:
ATTRIBS = {
40: "scale",
10: "base_point",
41: "text_height",
140: "arrowhead_size",
145: "landing_gap_size",
174: "left_attachment",
175: "right_attachment",
176: "text_align_type",
177: "attachment_type",
110: "plane_origin",
111: "plane_x_axis",
112: "plane_y_axis",
297: "plane_normal_reversed",
272: "top_attachment",
273: "bottom_attachment",
}
def __init__(self):
self.leaders: List["Leader"] = []
self.scale: float = 1.0 # overall scale
self.base_point: Vec3 = NULLVEC
self.text_height = 4.0
self.arrowhead_size = 4.0
self.landing_gap_size = 2.0
self.left_attachment = 1
self.right_attachment = 1
self.text_align_type = 0 # 0=left, 1=center, 2=right
self.attachment_type = 0 # 0=content extents, 1=insertion point
self.mtext: Optional[MTextData] = None
self.block: Optional[BlockData] = None
self.plane_origin: Vec3 = NULLVEC
self.plane_x_axis: Vec3 = X_AXIS
self.plane_y_axis: Vec3 = Y_AXIS
self.plane_normal_reversed: int = 0
self.top_attachment = 9
self.bottom_attachment = 9
@classmethod
def load(cls, context: List[Union["DXFTag", List]]) -> "MultiLeaderContext":
assert context[0] == (START_CONTEXT_DATA, CONTEXT_STR)
ctx = cls()
content = None
for tag in context:
if isinstance(tag, list): # Leader()
ctx.leaders.append(Leader.load(tag))
continue
# parse context tags
code, value = tag
if content:
if content.parse(code, value):
continue
else:
content = None
if code == 290 and value == 1:
content = MTextData()
ctx.mtext = content
elif code == 296 and value == 1:
content = BlockData()
ctx.block = content
else:
name = MultiLeaderContext.ATTRIBS.get(code)
if name:
ctx.__setattr__(name, value)
return ctx
@property
def is_valid(self) -> bool:
return True
def export_dxf(self, tagwriter: "TagWriter") -> None:
write_tag2 = tagwriter.write_tag2
write_vertex = tagwriter.write_vertex
write_tag2(START_CONTEXT_DATA, CONTEXT_STR)
# All MultiLeaderContext tags:
write_tag2(40, self.scale)
write_vertex(10, self.base_point)
write_tag2(41, self.text_height)
write_tag2(140, self.arrowhead_size)
write_tag2(145, self.landing_gap_size)
write_tag2(174, self.left_attachment)
write_tag2(175, self.right_attachment)
write_tag2(176, self.text_align_type)
write_tag2(177, self.attachment_type)
if self.mtext:
write_tag2(290, 1) # has mtext content
self.mtext.export_dxf(tagwriter)
else:
write_tag2(290, 0)
if self.block:
write_tag2(296, 1) # has block content
self.block.export_dxf(tagwriter)
else:
write_tag2(296, 0)
write_vertex(110, self.plane_origin)
write_vertex(111, self.plane_x_axis)
write_vertex(112, self.plane_y_axis)
write_tag2(297, self.plane_normal_reversed)
# Export Leader and LiederLine objects:
for leader in self.leaders:
leader.export_dxf(tagwriter)
# Additional MultiLeaderContext tags:
if tagwriter.dxfversion >= const.DXF2010:
write_tag2(272, self.top_attachment)
write_tag2(273, self.bottom_attachment)
write_tag2(END_CONTEXT_DATA, "}")
class MTextData:
ATTRIBS = {
304: "default_content",
11: "normal_direction",
340: "style_handle",
12: "location",
13: "direction",
42: "rotation",
43: "boundary_width",
44: "boundary_height",
45: "line_space_factor",
170: "line_space_style",
90: "color",
171: "alignment",
172: "flow_direction",
91: "bg_color",
141: "bg_scale_factor",
92: "bg_transparency",
291: "has_bg_color",
292: "has_bg_fill",
173: "column_type",
293: "use_auto_height",
142: "column_width",
143: "column_gutter_width",
294: "column_flow_reversed",
144: "column_sizes", # multiple values
295: "use_word_break",
}
def __init__(self):
self.default_content: str = ""
self.normal_direction: Vec3 = Z_AXIS
self.style_handle = None # handle of TextStyle() table entry
self.location: Vec3 = NULLVEC
self.direction: Vec3 = X_AXIS # text direction
self.rotation: float = 0.0 # in radians!
self.boundary_width: float = 0.0
self.boundary_height: float = 0.0
self.line_space_factor: float = 1.0
self.line_space_style: int = 1 # 1=at least, 2=exactly
self.color: int = colors.BY_BLOCK_RAW_VALUE
self.alignment: int = 1 # 1=left, 2=center, 3=right
self.flow_direction: int = 1 # 1=horiz, 3=vert, 6=by style
self.bg_color: int = -939524096 # use window background color? (CMC)
self.bg_scale_factor: float = 1.5
self.bg_transparency: int = 0
self.has_bg_color: int = 0
self.has_bg_fill: int = 0
self.column_type: int = 0 # unknown values
self.use_auto_height: int = 0
self.column_width: float = 0.0
self.column_gutter_width: float = 0.0
self.column_flow_reversed: int = 0
self.column_sizes: List[float] = [] # heights?
self.use_word_break: int = 1
def parse(self, code: int, value) -> bool:
# return True if data belongs to mtext else False (end of mtext section)
if code == 144:
self.column_sizes.append(value)
return True
attrib = MTextData.ATTRIBS.get(code)
if attrib:
self.__setattr__(attrib, value)
return bool(attrib)
def export_dxf(self, tagwriter: "TagWriter") -> None:
write_tag2 = tagwriter.write_tag2
write_vertex = tagwriter.write_vertex
write_tag2(304, self.default_content)
write_vertex(11, self.normal_direction)
if self.style_handle:
write_tag2(340, self.style_handle)
else:
# Do not write None, but "0" is also not valid!
# DXF structure error should be detected before export.
write_tag2(340, "0")
write_vertex(12, self.location)
write_vertex(13, self.direction)
write_tag2(42, self.rotation)
write_tag2(43, self.boundary_width)
write_tag2(44, self.boundary_height)
write_tag2(45, self.line_space_factor)
write_tag2(170, self.line_space_style)
write_tag2(90, self.color)
write_tag2(171, self.alignment)
write_tag2(172, self.flow_direction)
write_tag2(91, self.bg_color)
write_tag2(141, self.bg_scale_factor)
write_tag2(92, self.bg_transparency)
write_tag2(291, self.has_bg_color)
write_tag2(292, self.has_bg_fill)
write_tag2(173, self.column_type)
write_tag2(293, self.use_auto_height)
write_tag2(142, self.column_width)
write_tag2(143, self.column_gutter_width)
write_tag2(294, self.column_flow_reversed)
for size in self.column_sizes:
write_tag2(144, size)
write_tag2(295, self.use_word_break)
class BlockData:
ATTRIBS = {
341: "block_record_handle",
14: "normal_direction",
15: "location",
16: "scale",
46: "rotation",
93: "color",
}
def __init__(self):
self.block_record_handle = None
self.normal_direction: Vec3 = Z_AXIS
self.location: Vec3 = NULLVEC
self.scale: Vec3 = Vec3(1, 1, 1)
self.rotation: float = 0 # in radians!
self.color: int = colors.BY_BLOCK_RAW_VALUE
# The transformation matrix is stored in transposed order
# of ezdxf.math.Matrix44()!
self._matrix: List[float] = [] # group code 47 x 16
@property
def matrix44(self) -> Matrix44:
m = Matrix44(self._matrix)
m.transpose()
return m
@matrix44.setter
def matrix44(self, m: Matrix44) -> None:
m = m.copy()
m.transpose()
self._matrix = list(m)
def parse(self, code: int, value) -> bool:
attrib = BlockData.ATTRIBS.get(code)
if attrib:
self.__setattr__(attrib, value)
elif code == 47:
self._matrix.append(value)
else:
return False
# return True if data belongs to block else False (end of block section)
return True
def export_dxf(self, tagwriter: "TagWriter") -> None:
write_tag2 = tagwriter.write_tag2
write_vertex = tagwriter.write_vertex
if self.block_record_handle:
write_tag2(341, self.block_record_handle)
else:
# Do not write None, but "0" is also not valid!
# DXF structure error should be detected before export.
write_tag2(341, "0")
write_vertex(14, self.normal_direction)
write_vertex(15, self.location)
write_vertex(16, self.scale)
write_tag2(46, self.rotation)
write_tag2(93, self.color)
for value in self._matrix:
write_tag2(47, value)
class Leader:
def __init__(self):
self.lines: List["LeaderLine"] = []
self.has_last_leader_line: int = 0 # group code 290
self.has_dogleg_vector: int = 0 # group code 291
self.last_leader_point: Vec3 = NULLVEC # group code (10, 20, 30)
self.dogleg_vector: Vec3 = X_AXIS # group code (11, 21, 31)
self.dogleg_length: float = 1.0 # group code 40
self.index: int = 0 # group code 90
self.attachment_direction: int = 0 # group code 271, R21010+
self.breaks = [] # group code 12, 13 - multiple breaks possible!
@classmethod
def load(cls, context: List[Union["DXFTag", List]]):
assert context[0] == (START_LEADER, LEADER_STR)
leader = cls()
for tag in context:
if isinstance(tag, list): # LeaderLine()
leader.lines.append(LeaderLine.load(tag))
continue
code, value = tag
if code == 290:
leader.has_last_leader_line = value
elif code == 291:
leader.has_dogleg_vector = value
elif code == 10:
leader.last_leader_point = value
elif code == 11:
leader.dogleg_vector = value
elif code == 40:
leader.dogleg_length = value
elif code == 90:
leader.index = value
elif code == 271:
leader.attachment_direction = value
elif code in (12, 13):
leader.breaks.append(value)
return leader
def export_dxf(self, tagwriter: "TagWriter") -> None:
write_tag2 = tagwriter.write_tag2
write_vertex = tagwriter.write_vertex
write_tag2(START_LEADER, LEADER_STR)
write_tag2(290, self.has_last_leader_line)
write_tag2(291, self.has_dogleg_vector)
if self.has_last_leader_line:
write_vertex(10, self.last_leader_point)
if self.has_dogleg_vector:
write_vertex(11, self.dogleg_vector)
code = 0
for vertex in self.breaks:
# write alternate group code 12 and 13
write_vertex(12 + code, vertex)
code = 1 - code
write_tag2(90, self.index)
write_tag2(40, self.dogleg_length)
# Export leader lines:
for line in self.lines:
line.export_dxf(tagwriter)
if tagwriter.dxfversion >= const.DXF2010:
write_tag2(271, self.attachment_direction)
write_tag2(END_LEADER, "}")
class LeaderLine:
def __init__(self):
self.vertices: List[Vec3] = []
self.breaks: Optional[List[Union[int, Vec3]]] = None
# Breaks: 90, 11, 12, [11, 12, ...] [, 90, 11, 12 [11, 12, ...]]
# group code 90 = break index
# group code 11 = start vertex of break
# group code 12 = end vertex of break
# multiple breaks per index possible
self.index: int = 0 # group code 91
self.color: int = colors.BY_BLOCK_RAW_VALUE # group code 92
# R2010+: override properties see ODA DWG pg. 214-215
@classmethod
def load(cls, tags: List["DXFTag"]):
assert tags[0] == (START_LEADER_LINE, LEADER_LINE_STR)
line = LeaderLine()
vertices = line.vertices
breaks = []
for code, value in tags:
if code == 10:
vertices.append(value)
elif code in (90, 11, 12):
breaks.append(value)
elif code == 91:
line.index = value
elif code == 92:
line.color = value
if breaks:
line.breaks = breaks
return line
def export_dxf(self, tagwriter: "TagWriter") -> None:
write_tag2 = tagwriter.write_tag2
write_vertex = tagwriter.write_vertex
write_tag2(START_LEADER_LINE, LEADER_LINE_STR)
for vertex in self.vertices:
write_vertex(10, vertex)
if self.breaks:
code = 0
for value in self.breaks:
if isinstance(value, int):
# break index
write_tag2(90, value)
else:
# 11 .. start vertex of break
# 12 .. end vertex of break
write_vertex(11 + code, value)
code = 1 - code
write_tag2(91, self.index)
write_tag2(92, self.color)
write_tag2(END_LEADER_LINE, "}")
@register_entity
class MLeader(MultiLeader):
DXFTYPE = "MLEADER"
acdb_mleader_style = DefSubclass(
"AcDbMLeaderStyle",
{
"unknown1": DXFAttr(179, default=2),
"content_type": DXFAttr(170, default=2),
"draw_mleader_order_type": DXFAttr(171, default=1),
"draw_leader_order_type": DXFAttr(172, default=0),
"max_leader_segments_points": DXFAttr(90, default=2),
"first_segment_angle_constraint": DXFAttr(40, default=0.0),
"second_segment_angle_constraint": DXFAttr(41, default=0.0),
"leader_type": DXFAttr(173, default=1),
"leader_line_color": DXFAttr(91, default=-1056964608),
# raw color: BY_BLOCK
# raw color: BY_BLOCK
"leader_linetype_handle": DXFAttr(340),
"leader_lineweight": DXFAttr(92, default=-2),
"has_landing": DXFAttr(290, default=1),
"landing_gap": DXFAttr(42, default=2.0),
"has_dogleg": DXFAttr(291, default=1),
"dogleg_length": DXFAttr(43, default=8),
"name": DXFAttr(3, default="Standard"),
# no handle is default arrow 'closed filled':
"arrow_head_handle": DXFAttr(341),
"arrow_head_size": DXFAttr(44, default=4),
"default_text_content": DXFAttr(300, default=""),
"text_style_handle": DXFAttr(342),
"text_left_attachment_type": DXFAttr(174, default=1),
"text_angle_type": DXFAttr(175, default=1),
"text_alignment_type": DXFAttr(176, default=0),
"text_right_attachment_type": DXFAttr(178, default=1),
"text_color": DXFAttr(93, default=-1056964608), # raw color: BY_BLOCK
"text_height": DXFAttr(45, default=4),
"has_frame_text": DXFAttr(292, default=0),
"text_align_always_left": DXFAttr(297, default=0),
"align_space": DXFAttr(46, default=4),
"has_block_scaling": DXFAttr(293),
"block_record_handle": DXFAttr(343),
"block_color": DXFAttr(94, default=-1056964608), # raw color: BY_BLOCK
"block_scale_x": DXFAttr(47, default=1),
"block_scale_y": DXFAttr(49, default=1),
"block_scale_z": DXFAttr(140, default=1),
"has_block_rotation": DXFAttr(294, default=1),
"block_rotation": DXFAttr(141, default=0),
"block_connection_type": DXFAttr(177, default=0),
"scale": DXFAttr(142, default=1),
"overwrite_property_value": DXFAttr(295, default=0),
"is_annotative": DXFAttr(296, default=0),
"break_gap_size": DXFAttr(143, default=3.75),
# 0 = Horizontal; 1 = Vertical:
"text_attachment_direction": DXFAttr(271, default=0),
# 9 = Center; 10 = Underline and Center:
"text_bottom__attachment_direction": DXFAttr(272, default=9),
# 9 = Center; 10 = Overline and Center:
"text_top_attachment_direction": DXFAttr(273, default=9),
"unknown2": DXFAttr(298, optional=True), # boolean flag ?
},
)
acdb_mleader_style_group_codes = group_code_mapping(acdb_mleader_style)
@register_entity
class MLeaderStyle(DXFObject):
DXFTYPE = "MLEADERSTYLE"
DXFATTRIBS = DXFAttributes(base_class, acdb_mleader_style)
MIN_DXF_VERSION_FOR_EXPORT = const.DXF2000
def load_dxf_attribs(
self, processor: SubclassProcessor = None
) -> "DXFNamespace":
dxf = super().load_dxf_attribs(processor)
if processor:
processor.fast_load_dxfattribs(
dxf, acdb_mleader_style_group_codes, subclass=1
)
return dxf
def export_entity(self, tagwriter: "TagWriter") -> None:
super().export_entity(tagwriter)
tagwriter.write_tag2(const.SUBCLASS_MARKER, acdb_mleader_style.name)
self.dxf.export_dxf_attribs(
tagwriter, acdb_mleader_style.attribs.keys()
)
class MLeaderStyleCollection(ObjectCollection):
def __init__(self, doc: "Drawing"):
super().__init__(
doc, dict_name="ACAD_MLEADERSTYLE", object_type="MLEADERSTYLE"
)
self.create_required_entries()
def create_required_entries(self) -> None:
for name in ("Standard",):
if name not in self.object_dict:
mleader_style = self.new(name)
# set standard text style
text_style = self.doc.styles.get("Standard")
mleader_style.dxf.text_style_handle = text_style.dxf.handle
| mit | -356,234,445,368,242,200 | 35.774947 | 93 | 0.577709 | false | 3.520528 | false | false | false |
davejm/pyfling | pyfling.py | 1 | 4564 | """
@author: David Moodie
"""
import json
import os
import requests
flingAPI = "https://api.superfling.com/api/v2/"
amazonAPI = "http://unii-fling.s3.amazonaws.com/"
class Fling(object):
def __init__(self, bearer):
"""Requires authentication bearer to instantiate"""
#self.bearer = ""
self.bearer = bearer
def _request(self, endpoint="", data=None, req_type="post"):
global flingAPI
if data is None:
data = {}
user_agent = 'fling/1.6.2 (iPhone; iOS 8.3; Scale/2.00)'
bearer = 'Bearer ' + self.bearer
headers = {'User-Agent' : user_agent, 'Authorization' : bearer}
url = flingAPI
if req_type == "post":
headers['Content-Type'] = 'application/json'
r = requests.post(url + endpoint, data=data, headers=headers)
else:
r = requests.get(url + endpoint, params=data, headers=headers)
#if raise_for_status:
# r.raise_for_status()
return r
def _request_amazon(self, url, data=None, files=None):
#global amazonAPI
if data is None:
data = {}
user_agent = 'fling/1.6.2 (iPhone; iOS 8.3; Scale/2.00)'
headers = {'User-Agent' : user_agent}
#url = amazonAPI
r = requests.post(url, data=data, files=files, headers=headers)
return r
def _init_fling_on_server(self):
"""Create slot on fling to store fling data"""
media_type = "image" #Temporary
if media_type == "image":
data = {"uploads": {"extension":".jpg"}}
data = json.dumps(data)
r = self._request("uploads", data=data)
result = r.json()
uploads = result['uploads']
return uploads
def _upload_to_amazon(self, path, data):
"""Actually upload media to Amazon S3 so that fling can be downloaded/viewied"""
#print(data)
if not os.path.exists(path):
raise ValueError('No such file: {0}'.format(path))
with open(path, 'rb') as f:
file = f.read()
#Note: Must use tuple value for file data otherwise erroneous 'filename' field is put in request
files = {'file' : (None, file)}
amazonS3RequestURL = data['url']
if data['static_fields']['content-type'] == None:
data['static_fields']['content-type'] = ""
submitdata = data['static_fields']
r = self._request_amazon(amazonS3RequestURL, data=submitdata, files=files)
return r
def get_flings(self, limit=50, page=1):
data = {"limit" : limit, "page" : page}
r = self._request("flings", data=data, req_type="get")
result = r.json()
result = result['flings']
return result
def get_me(self):
r = self._request("users/me", req_type="get")
result = r.json()
result = result['users']
return result
def upload(self, path):
"""Init a new picture with fling API and
upload the picture from your harddrive to fling amazon S3 servers"""
datafromfling = self._init_fling_on_server()
#result = self._upload_to_amazon(path, datafromfling)
img_url = datafromfling['final_location']
return img_url
def send_text(self, text):
send_type = "Text"
if len(text) > 140:
print("Text must be <= 140 chars")
return "Text must be <= 140 chars"
else:
media = {"type" : send_type, "text" : text, "y" : 0}
data = {"flings": {"media" : media}}
data=json.dumps(data)
#print(data)
r = self._request("flings", data=data)
result = r.json()
return result
def send_image(self, img_url):
send_type = "Image"
media = {"type" : send_type, "url" : img_url, "y" : 0}
data = {"flings": {"media" : media}}
data=json.dumps(data)
#print(data)
r = self._request("flings", data=data)
result = r.json()
return result
def geocode(self, lat, lng):
geocode = requests.get("https://maps.googleapis.com/maps/api/geocode/json?latlng=" + str(lat) + "," + str(lng))
address = geocode.json()
if len(address['results']) == 0:
return ""
address = address['results']
return address
| mit | 3,062,441,199,841,669,000 | 31.834532 | 119 | 0.530675 | false | 3.759473 | false | false | false |
gagneurlab/concise | docs/autogen.py | 2 | 12495 | # -*- coding: utf-8 -*-
'''
General documentation architecture:
Home
Index
- Getting started
Getting started to concise
Layers
- Preprocessing
Genomic Sequence Preprocessing
RNA Structure Preprocessing
Spline-position Preprocessing
- Data
Encode
Attract
Losses
Metrics
Eval metrics
Optimizers
Initializers
Regularizers
- effects
- Utils
fasta
model_data
pwm
splines
Contributing
'''
from __future__ import print_function
from __future__ import unicode_literals
import re
import inspect
import os
import shutil
import sys
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding('utf8')
import concise
from concise import utils
from concise.utils import fasta
from concise.utils import helper
from concise.utils import model_data
from concise.utils import pwm, load_motif_db
from concise.utils import splines
from concise.data import encode, attract, hocomoco
from concise.preprocessing import sequence, splines, structure
from concise import constraints
from concise import eval_metrics
from concise import metrics
from concise import hyopt
from concise import initializers
from concise import layers
from concise import losses
from concise import optimizers
from concise import regularizers
from concise import effects
EXCLUDE = {
'Optimizer',
'Wrapper',
'get_session',
'set_session',
'CallbackList',
'serialize',
'deserialize',
'get',
}
PAGES = [
{
'page': 'preprocessing/sequence.md',
'functions': [
sequence.encodeSequence,
sequence.pad_sequences,
sequence.encodeDNA,
sequence.encodeRNA,
sequence.encodeCodon,
sequence.encodeAA,
]
},
{
'page': 'preprocessing/splines.md',
'classes': [
splines.EncodeSplines,
],
'functions': [
splines.encodeSplines,
]
},
{
'page': 'preprocessing/structure.md',
'functions': [
structure.encodeRNAStructure,
]
},
{
'page': 'layers.md',
'functions': [
layers.InputDNA,
layers.InputRNA,
layers.InputRNAStructure,
layers.InputCodon,
layers.InputAA,
layers.InputSplines,
],
'classes': [
layers.SplineT,
layers.SplineWeight1D,
layers.ConvSequence,
layers.ConvDNA,
layers.ConvRNA,
layers.ConvRNAStructure,
layers.ConvAA,
layers.ConvCodon,
# layers.ConvSplines,
layers.GlobalSumPooling1D,
],
},
{
'page': 'losses.md',
'all_module_functions': [losses],
},
{
'page': 'metrics.md',
'all_module_functions': [metrics],
},
{
'page': 'eval_metrics.md',
'all_module_functions': [eval_metrics],
},
{
'page': 'initializers.md',
'all_module_functions': [initializers],
'all_module_classes': [initializers],
},
{
'page': 'regularizers.md',
# 'all_module_functions': [regularizers],
# 'all_module_classes': [regularizers],
'classes': [
regularizers.SplineSmoother,
]
},
{
'page': 'optimizers.md',
'all_module_classes': [optimizers],
'functions': [
optimizers.data_based_init
]
},
{
'page': 'effects.md',
'functions': [
effects.effect_from_model,
effects.gradient_pred,
effects.dropout_pred,
effects.ism,
]
},
{
'page': 'utils/fasta.md',
'all_module_functions': [utils.fasta],
},
{
'page': 'utils/model_data.md',
'all_module_functions': [utils.model_data],
},
{
'page': 'utils/pwm.md',
'classes': [utils.pwm.PWM],
'functions': [
load_motif_db,
]
},
{
'page': 'utils/splines.md',
'classes': [utils.splines.BSpline]
},
{
'page': 'hyopt.md',
'classes': [
hyopt.CMongoTrials,
hyopt.CompileFN,
],
'functions': [
hyopt.test_fn,
hyopt.eval_model,
]
},
{
'page': 'data/encode.md',
'functions': [
encode.get_metadata,
encode.get_pwm_list,
]
},
{
'page': 'data/attract.md',
'functions': [
attract.get_metadata,
attract.get_pwm_list,
]
},
{
'page': 'data/hocomoco.md',
'functions': [
hocomoco.get_metadata,
hocomoco.get_pwm_list,
]
},
]
# TODO
ROOT = 'http://concise.io/'
def get_earliest_class_that_defined_member(member, cls):
ancestors = get_classes_ancestors([cls])
result = None
for ancestor in ancestors:
if member in dir(ancestor):
result = ancestor
if not result:
return cls
return result
def get_classes_ancestors(classes):
ancestors = []
for cls in classes:
ancestors += cls.__bases__
filtered_ancestors = []
for ancestor in ancestors:
if ancestor.__name__ in ['object']:
continue
filtered_ancestors.append(ancestor)
if filtered_ancestors:
return filtered_ancestors + get_classes_ancestors(filtered_ancestors)
else:
return filtered_ancestors
def get_function_signature(function, method=True):
signature = getattr(function, '_legacy_support_signature', None)
if signature is None:
signature = inspect.getargspec(function)
defaults = signature.defaults
if method:
args = signature.args[1:]
else:
args = signature.args
if defaults:
kwargs = zip(args[-len(defaults):], defaults)
args = args[:-len(defaults)]
else:
kwargs = []
st = '%s.%s(' % (function.__module__, function.__name__)
for a in args:
st += str(a) + ', '
for a, v in kwargs:
if isinstance(v, str):
v = '\'' + v + '\''
st += str(a) + '=' + str(v) + ', '
if kwargs or args:
return st[:-2] + ')'
else:
return st + ')'
def get_class_signature(cls):
try:
class_signature = get_function_signature(cls.__init__)
class_signature = class_signature.replace('__init__', cls.__name__)
except:
# in case the class inherits from object and does not
# define __init__
class_signature = cls.__module__ + '.' + cls.__name__ + '()'
return class_signature
def class_to_docs_link(cls):
module_name = cls.__module__
assert module_name[:8] == 'concise.'
module_name = module_name[8:]
link = ROOT + module_name.replace('.', '/') + '#' + cls.__name__.lower()
return link
def class_to_source_link(cls):
module_name = cls.__module__
assert module_name[:8] == 'concise.'
path = module_name.replace('.', '/')
path += '.py'
line = inspect.getsourcelines(cls)[-1]
link = 'https://github.com/avsecz/concise/blob/master/' + path + '#L' + str(line)
return '[[source]](' + link + ')'
def code_snippet(snippet):
result = '```python\n'
result += snippet + '\n'
result += '```\n'
return result
def process_class_docstring(docstring):
docstring = re.sub(r'\n # (.*)\n',
r'\n __\1__\n\n',
docstring)
docstring = re.sub(r' ([^\s\\]+):(.*)\n',
r' - __\1__:\2\n',
docstring)
docstring = docstring.replace(' ' * 5, '\t\t')
docstring = docstring.replace(' ' * 3, '\t')
docstring = docstring.replace(' ', '')
return docstring
def process_function_docstring(docstring):
docstring = re.sub(r'\n # (.*)\n',
r'\n __\1__\n\n',
docstring)
docstring = re.sub(r'\n # (.*)\n',
r'\n __\1__\n\n',
docstring)
docstring = re.sub(r' ([^\s\\]+):(.*)\n',
r' - __\1__:\2\n',
docstring)
docstring = docstring.replace(' ' * 6, '\t\t')
docstring = docstring.replace(' ' * 4, '\t')
docstring = docstring.replace(' ', '')
return docstring
print('Cleaning up existing sources directory.')
if os.path.exists('sources'):
shutil.rmtree('sources')
print('Populating sources directory with templates.')
for subdir, dirs, fnames in os.walk('templates'):
for fname in fnames:
new_subdir = subdir.replace('templates', 'sources')
if not os.path.exists(new_subdir):
os.makedirs(new_subdir)
if fname[-3:] == '.md':
fpath = os.path.join(subdir, fname)
new_fpath = fpath.replace('templates', 'sources')
shutil.copy(fpath, new_fpath)
# Take care of index page.
readme = open('../README.md').read()
index = open('templates/index.md').read()
index = index.replace('{{autogenerated}}', readme[readme.find('##'):])
f = open('sources/index.md', 'w')
f.write(index)
f.close()
print('Starting autogeneration.')
for page_data in PAGES:
blocks = []
classes = page_data.get('classes', [])
for module in page_data.get('all_module_classes', []):
module_classes = []
for name in dir(module):
if name[0] == '_' or name in EXCLUDE:
continue
module_member = getattr(module, name)
if inspect.isclass(module_member):
cls = module_member
if cls.__module__ == module.__name__:
if cls not in module_classes:
module_classes.append(cls)
module_classes.sort(key=lambda x: id(x))
classes += module_classes
for cls in classes:
subblocks = []
signature = get_class_signature(cls)
subblocks.append('<span style="float:right;">' + class_to_source_link(cls) + '</span>')
subblocks.append('### ' + cls.__name__ + '\n')
subblocks.append(code_snippet(signature))
docstring = cls.__doc__
if docstring:
subblocks.append(process_class_docstring(docstring))
blocks.append('\n'.join(subblocks))
functions = page_data.get('functions', [])
for module in page_data.get('all_module_functions', []):
module_functions = []
for name in dir(module):
if name[0] == '_' or name in EXCLUDE:
continue
module_member = getattr(module, name)
if inspect.isfunction(module_member):
function = module_member
if module.__name__ in function.__module__:
if function not in module_functions:
module_functions.append(function)
module_functions.sort(key=lambda x: id(x))
functions += module_functions
for function in functions:
subblocks = []
signature = get_function_signature(function, method=False)
signature = signature.replace(function.__module__ + '.', '')
subblocks.append('### ' + function.__name__ + '\n')
subblocks.append(code_snippet(signature))
docstring = function.__doc__
if docstring:
subblocks.append(process_function_docstring(docstring))
blocks.append('\n\n'.join(subblocks))
if not blocks:
raise RuntimeError('Found no content for page ' +
page_data['page'])
mkdown = '\n----\n\n'.join(blocks)
# save module page.
# Either insert content into existing page,
# or create page otherwise
page_name = page_data['page']
path = os.path.join('sources', page_name)
if os.path.exists(path):
template = open(path).read()
assert '{{autogenerated}}' in template, ('Template found for ' + path +
' but missing {{autogenerated}} tag.')
mkdown = template.replace('{{autogenerated}}', mkdown)
print('...inserting autogenerated content into template:', path)
else:
print('...creating new page with autogenerated content:', path)
subdir = os.path.dirname(path)
if not os.path.exists(subdir):
os.makedirs(subdir)
open(path, 'w').write(mkdown)
shutil.copyfile('../CONTRIBUTING.md', 'sources/contributing.md')
| mit | -911,624,956,512,092,000 | 26.7051 | 95 | 0.54958 | false | 3.908352 | false | false | false |
jajberni/pcse_web | main/pcse/settings/default_settings.py | 1 | 2765 | # -*- coding: utf-8 -*-
# Copyright (c) 2004-2014 Alterra, Wageningen-UR
# Allard de Wit ([email protected]), April 2014
"""Settings for PCSE
Default values will be read from the files 'pcse/settings/default_settings.py'
User specific settings are read from '$HOME/.pcse/user_settings.py'. Any
settings defined in user settings will override the default settings
Setting must be defined as ALL-CAPS and can be accessed as attributes
from pcse.settings.settings
For example, to use the settings in a module under 'crop':
from ..settings import settings
print settings.METEO_CACHE_DIR
Settings that are not ALL-CAPS will generate a warning. To avoid warnings
for everything that is not a setting (such as imported modules), prepend
and underscore to the name.
"""
# Location for meteo cache files
METEO_CACHE_DIR = "meteo_cache"
# Do range checks for meteo variables
METEO_RANGE_CHECKS = True
# PCSE sets all rate variables to zero after state integration for consistency.
# You can disable this behaviour for increased performance.
ZEROFY = True
# Configuration of logging
# The logging system of PCSE consists of two log handlers. One that sends log messages
# to the screen ('console') and one that sends message to a file. The location and name of
# the log is defined by LOG_DIR and LOG_FILE_NAME. Moreover, the console and file handlers
# can be given a log level as defined LOG_LEVEL_FILE and LOG_LEVEL_CONSOLE. By default
# these levels are INFO and WARNING meaning that log message of INFO and up are sent to
# file and WARNING and up are send to the console. For detailed log messages the log
# level can be set to DEBUG but this will generate a large number of logging messages.
#
# Log files can become 1Mb large. When this file size is reached a new file is opened
# and the old one is renamed. Only the most recent 7 log files are retained to avoid
# getting large log file sizes.
LOG_LEVEL_CONSOLE = "INFO"
LOG_CONFIG = \
{
'version': 1,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
'brief': {
'format': '[%(levelname)s] - %(message)s'
},
},
'handlers': {
'console': {
'level':LOG_LEVEL_CONSOLE,
'class':'logging.StreamHandler',
'formatter':'brief'
},
},
'root': {
'handlers': ['console'],
'propagate': True,
'level':'NOTSET'
}
}
| apache-2.0 | -1,878,564,587,037,230,300 | 37.943662 | 90 | 0.61736 | false | 4.253846 | false | false | false |
nyirog/feedlink | lib/feedlink/classify.py | 1 | 1665 | from lxml import etree
def classify(fh):
"""
Classify the feed type of the *link*
Args:
fh: url handler
Returns:
feed type: 'atom' or 'rss'
Raises:
UnknownFeedError: if the *link* does not point to a valid feed
"""
feed = fh.read()
for subclass in FeedClassifier.__subclasses__():
if subclass.check(feed):
return subclass.__name__
raise UnknownFeedError()
def get_feed_types():
"""List the available feed types by this feed classifier module."""
types = [subcls.__name__ for subcls in FeedClassifier.__subclasses__()]
return types
class FeedClassifier(object):
"""
Super class of the feed classifiers. The check class method has to be
overwritten by the descendant classes.
The name of the descendant class will be its feed type.
"""
@classmethod
def check(cls, feed):
"""Validate the *feed* content"""
return False
class atom(FeedClassifier):
"""atom feed classifier"""
xmlns = 'http://www.w3.org/2005/Atom'
@classmethod
def check(cls, feed):
try:
root = etree.fromstring(feed)
except etree.XMLSyntaxError, error:
return False
else:
if root.nsmap.get(None) == cls.xmlns:
return True
return False
class rss(FeedClassifier):
"""rss feed classifier"""
@classmethod
def check(cls, feed):
try:
root = etree.fromstring(feed)
except etree.XMLSyntaxError, error:
return False
return root.tag == cls.__name__
pass
class UnknownFeedError(Exception):
pass
| gpl-2.0 | -4,236,688,970,001,432,600 | 22.785714 | 75 | 0.603003 | false | 4.335938 | false | false | false |
thompsonx/kaira | gui/projectconfig.py | 11 | 1970 | #
# Copyright (C) 2010 Stanislav Bohm
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
#
from parameters import ParametersWidget
from build import BuildOptionsWidget
import gtk
class LibraryConfig(gtk.VBox):
def __init__(self, project):
def set_rpc(w):
project.library_rpc = w.get_active()
def set_octave(w):
project.library_octave = w.get_active()
gtk.VBox.__init__(self)
self.project = project
button = gtk.CheckButton("Build library in RPC mode")
button.set_active(project.library_rpc)
button.connect("toggled", set_rpc)
self.pack_start(button, False, False)
button = gtk.CheckButton("Build Octave module")
button.set_active(project.library_octave)
button.connect("toggled", set_octave)
self.pack_start(button, False, False)
self.show()
class ProjectConfig(gtk.Notebook):
def __init__(self, app):
gtk.Notebook.__init__(self)
self.set_tab_pos(gtk.POS_LEFT)
w = LibraryConfig(app.project)
self.append_page(w, gtk.Label("Library"))
w = ParametersWidget(app.project, app.window)
self.append_page(w, gtk.Label("Parameters"))
w = BuildOptionsWidget(app.project, app)
self.append_page(w, gtk.Label("Build"))
self.show_all()
| gpl-3.0 | -8,147,209,288,014,349,000 | 29.78125 | 73 | 0.654315 | false | 3.703008 | false | false | false |
vegitron/django-performance-middleware | performance_middleware/middleware.py | 1 | 2842 |
from datetime import datetime
from django.conf import settings
from django.db import connection
import pstats
from cStringIO import StringIO
from random import random
import logging
try:
import cProfile as profile
except ImportError:
import profile
class PerformanceMiddleware(object):
_process_data = {}
profiling = False
logger = logging.getLogger(__name__)
def process_view(self, request, callback, callback_args, callback_kwargs):
# self is reused :(
self._process_data = {}
self.profiling = False
self.profiler = None
self._process_data['start_time'] = datetime.now()
profile_per = getattr(settings, "PERFORMANCE_MIDDLEWARE_PROFILE_EVERY", 10)
random_less_than = 1.0 / profile_per
rand_val = random()
if rand_val < random_less_than:
self.profiling = True
self.profiler = profile.Profile()
args = (request,) + callback_args
try:
return self.profiler.runcall(callback, *args, **callback_kwargs)
except:
# we want the process_exception middleware to fire
# https://code.djangoproject.com/ticket/12250
return
def process_response(self, request, response):
now = datetime.now()
start = self._process_data['start_time']
td = (now - start)
seconds_taken = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
warning_threshold = getattr(settings, "PERFORMANCE_MIDDLEWARE_WARNING_THRESHOLD", 1.0)
error_threshold = getattr(settings, "PERFORMANCE_MIDDLEWARE_ERROR_THRESHOLD", 2.0)
critical_threshold = getattr(settings, "PERFORMANCE_MIDDLEWARE_CRITICAL_THRESHOLD", 5.0)
if (seconds_taken < warning_threshold) and (seconds_taken < error_threshold) and (seconds_taken < critical_threshold):
return response
io = StringIO()
io.write("Time taken: %f seconds\n" % seconds_taken)
io.write("Request: \n%s\n" % request.__str__())
io.write("Profile: \n")
if self.profiling:
self.profiler.create_stats()
stats = pstats.Stats(self.profiler, stream=io)
stats.sort_stats('cumulative')
stats.print_stats(100)
else:
io.write("No profile for this request, sorry")
io.write("SQL:\n")
for query in connection.queries:
io.write("Time: %s, Query: %s\n" % (query['time'], query['sql']))
if seconds_taken > critical_threshold:
self.logger.critical(io.getvalue())
elif seconds_taken > error_threshold:
self.logger.error(io.getvalue())
elif seconds_taken > warning_threshold:
self.logger.warning(io.getvalue())
return response
| apache-2.0 | 853,256,153,517,864,800 | 32.046512 | 126 | 0.612949 | false | 4.148905 | false | false | false |
raxenak/borg | src/borg/testsuite/patterns.py | 4 | 16633 | import argparse
import io
import os.path
import sys
import pytest
from ..patterns import PathFullPattern, PathPrefixPattern, FnmatchPattern, ShellPattern, RegexPattern
from ..patterns import load_exclude_file, load_pattern_file
from ..patterns import parse_pattern, PatternMatcher
def check_patterns(files, pattern, expected):
"""Utility for testing patterns.
"""
assert all([f == os.path.normpath(f) for f in files]), "Pattern matchers expect normalized input paths"
matched = [f for f in files if pattern.match(f)]
assert matched == (files if expected is None else expected)
@pytest.mark.parametrize("pattern, expected", [
# "None" means all files, i.e. all match the given pattern
("/", []),
("/home", ["/home"]),
("/home///", ["/home"]),
("/./home", ["/home"]),
("/home/user", ["/home/user"]),
("/home/user2", ["/home/user2"]),
("/home/user/.bashrc", ["/home/user/.bashrc"]),
])
def test_patterns_full(pattern, expected):
files = ["/home", "/home/user", "/home/user2", "/home/user/.bashrc", ]
check_patterns(files, PathFullPattern(pattern), expected)
@pytest.mark.parametrize("pattern, expected", [
# "None" means all files, i.e. all match the given pattern
("", []),
("relative", []),
("relative/path/", ["relative/path"]),
("relative/path", ["relative/path"]),
])
def test_patterns_full_relative(pattern, expected):
files = ["relative/path", "relative/path2", ]
check_patterns(files, PathFullPattern(pattern), expected)
@pytest.mark.parametrize("pattern, expected", [
# "None" means all files, i.e. all match the given pattern
("/", None),
("/./", None),
("", []),
("/home/u", []),
("/home/user", ["/home/user/.profile", "/home/user/.bashrc"]),
("/etc", ["/etc/server/config", "/etc/server/hosts"]),
("///etc//////", ["/etc/server/config", "/etc/server/hosts"]),
("/./home//..//home/user2", ["/home/user2/.profile", "/home/user2/public_html/index.html"]),
("/srv", ["/srv/messages", "/srv/dmesg"]),
])
def test_patterns_prefix(pattern, expected):
files = [
"/etc/server/config", "/etc/server/hosts", "/home", "/home/user/.profile", "/home/user/.bashrc",
"/home/user2/.profile", "/home/user2/public_html/index.html", "/srv/messages", "/srv/dmesg",
]
check_patterns(files, PathPrefixPattern(pattern), expected)
@pytest.mark.parametrize("pattern, expected", [
# "None" means all files, i.e. all match the given pattern
("", []),
("foo", []),
("relative", ["relative/path1", "relative/two"]),
("more", ["more/relative"]),
])
def test_patterns_prefix_relative(pattern, expected):
files = ["relative/path1", "relative/two", "more/relative"]
check_patterns(files, PathPrefixPattern(pattern), expected)
@pytest.mark.parametrize("pattern, expected", [
# "None" means all files, i.e. all match the given pattern
("/*", None),
("/./*", None),
("*", None),
("*/*", None),
("*///*", None),
("/home/u", []),
("/home/*",
["/home/user/.profile", "/home/user/.bashrc", "/home/user2/.profile", "/home/user2/public_html/index.html",
"/home/foo/.thumbnails", "/home/foo/bar/.thumbnails"]),
("/home/user/*", ["/home/user/.profile", "/home/user/.bashrc"]),
("/etc/*", ["/etc/server/config", "/etc/server/hosts"]),
("*/.pr????e", ["/home/user/.profile", "/home/user2/.profile"]),
("///etc//////*", ["/etc/server/config", "/etc/server/hosts"]),
("/./home//..//home/user2/*", ["/home/user2/.profile", "/home/user2/public_html/index.html"]),
("/srv*", ["/srv/messages", "/srv/dmesg"]),
("/home/*/.thumbnails", ["/home/foo/.thumbnails", "/home/foo/bar/.thumbnails"]),
])
def test_patterns_fnmatch(pattern, expected):
files = [
"/etc/server/config", "/etc/server/hosts", "/home", "/home/user/.profile", "/home/user/.bashrc",
"/home/user2/.profile", "/home/user2/public_html/index.html", "/srv/messages", "/srv/dmesg",
"/home/foo/.thumbnails", "/home/foo/bar/.thumbnails",
]
check_patterns(files, FnmatchPattern(pattern), expected)
@pytest.mark.parametrize("pattern, expected", [
# "None" means all files, i.e. all match the given pattern
("*", None),
("**/*", None),
("/**/*", None),
("/./*", None),
("*/*", None),
("*///*", None),
("/home/u", []),
("/home/*",
["/home/user/.profile", "/home/user/.bashrc", "/home/user2/.profile", "/home/user2/public_html/index.html",
"/home/foo/.thumbnails", "/home/foo/bar/.thumbnails"]),
("/home/user/*", ["/home/user/.profile", "/home/user/.bashrc"]),
("/etc/*/*", ["/etc/server/config", "/etc/server/hosts"]),
("/etc/**/*", ["/etc/server/config", "/etc/server/hosts"]),
("/etc/**/*/*", ["/etc/server/config", "/etc/server/hosts"]),
("*/.pr????e", []),
("**/.pr????e", ["/home/user/.profile", "/home/user2/.profile"]),
("///etc//////*", ["/etc/server/config", "/etc/server/hosts"]),
("/./home//..//home/user2/", ["/home/user2/.profile", "/home/user2/public_html/index.html"]),
("/./home//..//home/user2/**/*", ["/home/user2/.profile", "/home/user2/public_html/index.html"]),
("/srv*/", ["/srv/messages", "/srv/dmesg", "/srv2/blafasel"]),
("/srv*", ["/srv", "/srv/messages", "/srv/dmesg", "/srv2", "/srv2/blafasel"]),
("/srv/*", ["/srv/messages", "/srv/dmesg"]),
("/srv2/**", ["/srv2", "/srv2/blafasel"]),
("/srv2/**/", ["/srv2/blafasel"]),
("/home/*/.thumbnails", ["/home/foo/.thumbnails"]),
("/home/*/*/.thumbnails", ["/home/foo/bar/.thumbnails"]),
])
def test_patterns_shell(pattern, expected):
files = [
"/etc/server/config", "/etc/server/hosts", "/home", "/home/user/.profile", "/home/user/.bashrc",
"/home/user2/.profile", "/home/user2/public_html/index.html", "/srv", "/srv/messages", "/srv/dmesg",
"/srv2", "/srv2/blafasel", "/home/foo/.thumbnails", "/home/foo/bar/.thumbnails",
]
check_patterns(files, ShellPattern(pattern), expected)
@pytest.mark.parametrize("pattern, expected", [
# "None" means all files, i.e. all match the given pattern
("", None),
(".*", None),
("^/", None),
("^abc$", []),
("^[^/]", []),
("^(?!/srv|/foo|/opt)",
["/home", "/home/user/.profile", "/home/user/.bashrc", "/home/user2/.profile",
"/home/user2/public_html/index.html", "/home/foo/.thumbnails", "/home/foo/bar/.thumbnails", ]),
])
def test_patterns_regex(pattern, expected):
files = [
'/srv/data', '/foo/bar', '/home',
'/home/user/.profile', '/home/user/.bashrc',
'/home/user2/.profile', '/home/user2/public_html/index.html',
'/opt/log/messages.txt', '/opt/log/dmesg.txt',
"/home/foo/.thumbnails", "/home/foo/bar/.thumbnails",
]
obj = RegexPattern(pattern)
assert str(obj) == pattern
assert obj.pattern == pattern
check_patterns(files, obj, expected)
def test_regex_pattern():
# The forward slash must match the platform-specific path separator
assert RegexPattern("^/$").match("/")
assert RegexPattern("^/$").match(os.path.sep)
assert not RegexPattern(r"^\\$").match("/")
def use_normalized_unicode():
return sys.platform in ("darwin",)
def _make_test_patterns(pattern):
return [PathPrefixPattern(pattern),
FnmatchPattern(pattern),
RegexPattern("^{}/foo$".format(pattern)),
ShellPattern(pattern),
]
@pytest.mark.parametrize("pattern", _make_test_patterns("b\N{LATIN SMALL LETTER A WITH ACUTE}"))
def test_composed_unicode_pattern(pattern):
assert pattern.match("b\N{LATIN SMALL LETTER A WITH ACUTE}/foo")
assert pattern.match("ba\N{COMBINING ACUTE ACCENT}/foo") == use_normalized_unicode()
@pytest.mark.parametrize("pattern", _make_test_patterns("ba\N{COMBINING ACUTE ACCENT}"))
def test_decomposed_unicode_pattern(pattern):
assert pattern.match("b\N{LATIN SMALL LETTER A WITH ACUTE}/foo") == use_normalized_unicode()
assert pattern.match("ba\N{COMBINING ACUTE ACCENT}/foo")
@pytest.mark.parametrize("pattern", _make_test_patterns(str(b"ba\x80", "latin1")))
def test_invalid_unicode_pattern(pattern):
assert not pattern.match("ba/foo")
assert pattern.match(str(b"ba\x80/foo", "latin1"))
@pytest.mark.parametrize("lines, expected", [
# "None" means all files, i.e. none excluded
([], None),
(["# Comment only"], None),
(["*"], []),
(["# Comment",
"*/something00.txt",
" *whitespace* ",
# Whitespace before comment
" #/ws*",
# Empty line
"",
"# EOF"],
["/more/data", "/home", " #/wsfoobar"]),
(["re:.*"], []),
(["re:\s"], ["/data/something00.txt", "/more/data", "/home"]),
([r"re:(.)(\1)"], ["/more/data", "/home", "\tstart/whitespace", "/whitespace/end\t"]),
(["", "", "",
"# This is a test with mixed pattern styles",
# Case-insensitive pattern
"re:(?i)BAR|ME$",
"",
"*whitespace*",
"fm:*/something00*"],
["/more/data"]),
([r" re:^\s "], ["/data/something00.txt", "/more/data", "/home", "/whitespace/end\t"]),
([r" re:\s$ "], ["/data/something00.txt", "/more/data", "/home", " #/wsfoobar", "\tstart/whitespace"]),
(["pp:./"], None),
(["pp:/"], [" #/wsfoobar", "\tstart/whitespace"]),
(["pp:aaabbb"], None),
(["pp:/data", "pp: #/", "pp:\tstart", "pp:/whitespace"], ["/more/data", "/home"]),
(["/nomatch", "/more/*"],
['/data/something00.txt', '/home', ' #/wsfoobar', '\tstart/whitespace', '/whitespace/end\t']),
# the order of exclude patterns shouldn't matter
(["/more/*", "/nomatch"],
['/data/something00.txt', '/home', ' #/wsfoobar', '\tstart/whitespace', '/whitespace/end\t']),
])
def test_exclude_patterns_from_file(tmpdir, lines, expected):
files = [
'/data/something00.txt', '/more/data', '/home',
' #/wsfoobar',
'\tstart/whitespace',
'/whitespace/end\t',
]
def evaluate(filename):
patterns = []
load_exclude_file(open(filename, "rt"), patterns)
matcher = PatternMatcher(fallback=True)
matcher.add_inclexcl(patterns)
return [path for path in files if matcher.match(path)]
exclfile = tmpdir.join("exclude.txt")
with exclfile.open("wt") as fh:
fh.write("\n".join(lines))
assert evaluate(str(exclfile)) == (files if expected is None else expected)
@pytest.mark.parametrize("lines, expected_roots, expected_numpatterns", [
# "None" means all files, i.e. none excluded
([], [], 0),
(["# Comment only"], [], 0),
(["- *"], [], 1),
(["+fm:*/something00.txt",
"-/data"], [], 2),
(["R /"], ["/"], 0),
(["R /",
"# comment"], ["/"], 0),
(["# comment",
"- /data",
"R /home"], ["/home"], 1),
])
def test_load_patterns_from_file(tmpdir, lines, expected_roots, expected_numpatterns):
def evaluate(filename):
roots = []
inclexclpatterns = []
load_pattern_file(open(filename, "rt"), roots, inclexclpatterns)
return roots, len(inclexclpatterns)
patternfile = tmpdir.join("patterns.txt")
with patternfile.open("wt") as fh:
fh.write("\n".join(lines))
roots, numpatterns = evaluate(str(patternfile))
assert roots == expected_roots
assert numpatterns == expected_numpatterns
def test_switch_patterns_style():
patterns = """\
+0_initial_default_is_shell
p fm
+1_fnmatch
P re
+2_regex
+3_more_regex
P pp
+4_pathprefix
p fm
p sh
+5_shell
"""
pattern_file = io.StringIO(patterns)
roots, patterns = [], []
load_pattern_file(pattern_file, roots, patterns)
assert len(patterns) == 6
assert isinstance(patterns[0].val, ShellPattern)
assert isinstance(patterns[1].val, FnmatchPattern)
assert isinstance(patterns[2].val, RegexPattern)
assert isinstance(patterns[3].val, RegexPattern)
assert isinstance(patterns[4].val, PathPrefixPattern)
assert isinstance(patterns[5].val, ShellPattern)
@pytest.mark.parametrize("lines", [
(["X /data"]), # illegal pattern type prefix
(["/data"]), # need a pattern type prefix
])
def test_load_invalid_patterns_from_file(tmpdir, lines):
patternfile = tmpdir.join("patterns.txt")
with patternfile.open("wt") as fh:
fh.write("\n".join(lines))
filename = str(patternfile)
with pytest.raises(argparse.ArgumentTypeError):
roots = []
inclexclpatterns = []
load_pattern_file(open(filename, "rt"), roots, inclexclpatterns)
@pytest.mark.parametrize("lines, expected", [
# "None" means all files, i.e. none excluded
([], None),
(["# Comment only"], None),
(["- *"], []),
# default match type is sh: for patterns -> * doesn't match a /
(["-*/something0?.txt"],
['/data', '/data/something00.txt', '/data/subdir/something01.txt',
'/home', '/home/leo', '/home/leo/t', '/home/other']),
(["-fm:*/something00.txt"],
['/data', '/data/subdir/something01.txt', '/home', '/home/leo', '/home/leo/t', '/home/other']),
(["-fm:*/something0?.txt"],
["/data", '/home', '/home/leo', '/home/leo/t', '/home/other']),
(["+/*/something0?.txt",
"-/data"],
["/data/something00.txt", '/home', '/home/leo', '/home/leo/t', '/home/other']),
(["+fm:*/something00.txt",
"-/data"],
["/data/something00.txt", '/home', '/home/leo', '/home/leo/t', '/home/other']),
# include /home/leo and exclude the rest of /home:
(["+/home/leo",
"-/home/*"],
['/data', '/data/something00.txt', '/data/subdir/something01.txt', '/home', '/home/leo', '/home/leo/t']),
# wrong order, /home/leo is already excluded by -/home/*:
(["-/home/*",
"+/home/leo"],
['/data', '/data/something00.txt', '/data/subdir/something01.txt', '/home']),
(["+fm:/home/leo",
"-/home/"],
['/data', '/data/something00.txt', '/data/subdir/something01.txt', '/home', '/home/leo', '/home/leo/t']),
])
def test_inclexcl_patterns_from_file(tmpdir, lines, expected):
files = [
'/data', '/data/something00.txt', '/data/subdir/something01.txt',
'/home', '/home/leo', '/home/leo/t', '/home/other'
]
def evaluate(filename):
matcher = PatternMatcher(fallback=True)
roots = []
inclexclpatterns = []
load_pattern_file(open(filename, "rt"), roots, inclexclpatterns)
matcher.add_inclexcl(inclexclpatterns)
return [path for path in files if matcher.match(path)]
patternfile = tmpdir.join("patterns.txt")
with patternfile.open("wt") as fh:
fh.write("\n".join(lines))
assert evaluate(str(patternfile)) == (files if expected is None else expected)
@pytest.mark.parametrize("pattern, cls", [
("", FnmatchPattern),
# Default style
("*", FnmatchPattern),
("/data/*", FnmatchPattern),
# fnmatch style
("fm:", FnmatchPattern),
("fm:*", FnmatchPattern),
("fm:/data/*", FnmatchPattern),
("fm:fm:/data/*", FnmatchPattern),
# Regular expression
("re:", RegexPattern),
("re:.*", RegexPattern),
("re:^/something/", RegexPattern),
("re:re:^/something/", RegexPattern),
# Path prefix
("pp:", PathPrefixPattern),
("pp:/", PathPrefixPattern),
("pp:/data/", PathPrefixPattern),
("pp:pp:/data/", PathPrefixPattern),
# Shell-pattern style
("sh:", ShellPattern),
("sh:*", ShellPattern),
("sh:/data/*", ShellPattern),
("sh:sh:/data/*", ShellPattern),
])
def test_parse_pattern(pattern, cls):
assert isinstance(parse_pattern(pattern), cls)
@pytest.mark.parametrize("pattern", ["aa:", "fo:*", "00:", "x1:abc"])
def test_parse_pattern_error(pattern):
with pytest.raises(ValueError):
parse_pattern(pattern)
def test_pattern_matcher():
pm = PatternMatcher()
assert pm.fallback is None
for i in ["", "foo", "bar"]:
assert pm.match(i) is None
# add extra entries to aid in testing
for target in ["A", "B", "Empty", "FileNotFound"]:
pm.is_include_cmd[target] = target
pm.add([RegexPattern("^a")], "A")
pm.add([RegexPattern("^b"), RegexPattern("^z")], "B")
pm.add([RegexPattern("^$")], "Empty")
pm.fallback = "FileNotFound"
assert pm.match("") == "Empty"
assert pm.match("aaa") == "A"
assert pm.match("bbb") == "B"
assert pm.match("ccc") == "FileNotFound"
assert pm.match("xyz") == "FileNotFound"
assert pm.match("z") == "B"
assert PatternMatcher(fallback="hey!").fallback == "hey!"
| bsd-3-clause | -8,637,553,673,178,634,000 | 34.616702 | 112 | 0.57903 | false | 3.288454 | true | false | false |
lucha93/RecFIN-Data-Analysis-Project | predict.py | 1 | 1522 | import sqlite3
import matplotlib.pyplot as plt
from collections import OrderedDict
import numpy as np
# Connect to database
conn = sqlite3.connect('sitedb.sqlite')
cur = conn.cursor()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
avg_list = []
std_list = []
for i in range(1, 13, 1):
stuff = cur.execute('''
SELECT * FROM Data WHERE month = ? ORDER BY date''', (str(i), ))
# catch from each month
month_catch = []
for line in stuff:
month_catch.append(line[7])
high = max(month_catch)
month_catch = month_catch.remove(high)
avg = np.mean(month_catch)
std_dev = np.std(month_catch)
# list of avg ordered by month
avg_list.append(avg)
# list of std deviations ordered by month
std_list.append(std_dev)
# Graph of normal distribution of predictions
for i in range(len(avg_list)):
mu = avg_list[i]
sigma = std_list[i]
s = np.random.normal(mu, sigma, 1000)
count, bins, ignored = plt.hist(s, 30, normed=True)
plt.title('Normal Distribution of Predicted Catch in %s' % months[i])
plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *
np.exp( - (bins - mu)**2 / (2 * sigma**2) ),
linewidth=2, color='r')
axes = plt.gca()
axes.set_xlim(0,)
plt.show()
#plt.figure(1)
#plt.bar(range(len(avg_catch)), avg_catch.values(), align='center')
#plt.xticks(range(len(avg_catch)), avg_catch.keys())
#plt.xlabel('Month')
#plt.ylabel('Average Catch')
#plt.title('Average Catch at Ventura County Shore Sites 2000-2010')
#plt.show()
| mit | -3,463,609,953,121,147,000 | 20.138889 | 70 | 0.65046 | false | 2.757246 | false | false | false |
DESatAPSU/DAWDs | python/plot_spectra_variations.py | 1 | 1955 | #!/usr/bin/env python
#this script plots the a white-dwarf model and its four variations in 1 simga.
#It accepts a text file will the model file names listed.
#Slight modification is required on line 61 to scale the y-axis correctly.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Read the text list of files to plot (starList.txt) into a python list
with open('starList.txt') as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
# loop stuff
listLength = len(lines)
count = 0
# this loop plots the original model and the 4 variations
while (count < listLength):
inputString1 = lines[count]
inputString2 = lines[count + 1]
inputString3 = lines[count + 2]
inputString4 = lines[count + 3]
inputString5 = lines[count + 4]
plotTitle = lines[count][:12]
plotFileName = lines[count][:12] + ".pdf"
array1 = np.genfromtxt(inputString1,names=['wave','flam'])
array2 = np.genfromtxt(inputString2,names=['wave','flam'])
array3 = np.genfromtxt(inputString3,names=['wave','flam'])
array4 = np.genfromtxt(inputString4,names=['wave','flam'])
array5 = np.genfromtxt(inputString5,names=['wave','flam'])
fig = plt.figure()
axes = fig.add_subplot(111)
axes.set_title(plotTitle)
axes.set_xlabel('Wavelength (A)')
axes.set_ylabel('Flux (Flam)')
axes.plot(array1['wave'],array1['flam'],label='Original',linewidth=1)
axes.plot(array2['wave'],array2['flam'],label='tm, gm',linewidth=1)
axes.plot(array3['wave'],array3['flam'],label='tm, gp',linewidth=1)
axes.plot(array4['wave'],array4['flam'],label='tp, gm',linewidth=1)
axes.plot(array5['wave'],array5['flam'],label='tp, gp',linewidth=1)
axes.set_xlim([3000,11000])
axes.set_ylim([0,array1['flam'][1208]])
axes.legend(loc='upper right', shadow=True)
plt.grid(True)
plt.savefig(plotFileName)
plt.clf()
count = count + 5
exit()
| mit | 7,664,172,300,656,599,000 | 24.723684 | 79 | 0.66445 | false | 3.184039 | false | false | false |
ActiveState/code | recipes/Python/577919_Splash_Screen_GTK/recipe-577919.py | 1 | 1518 | import gtk
from time import sleep
class splashScreen():
def __init__(self):
#DONT connect 'destroy' event here!
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title('Your app name')
self.window.set_position(gtk.WIN_POS_CENTER)
main_vbox = gtk.VBox(False, 1)
self.window.add(main_vbox)
hbox = gtk.HBox(False, 0)
self.lbl = gtk.Label("This shouldn't take too long... :)")
self.lbl.set_alignment(0, 0.5)
main_vbox.pack_start(self.lbl, True, True)
self.window.show_all()
class yourApp():
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title('Your app name')
self.window.set_position(gtk.WIN_POS_CENTER)
self.window.connect('destroy', gtk.main_quit)
main_vbox = gtk.VBox(False, 1)
self.window.add(main_vbox)
hbox = gtk.HBox(False, 0)
self.lbl = gtk.Label('All done! :)')
self.lbl.set_alignment(0, 0.5)
main_vbox.pack_start(self.lbl, True, True)
self.window.show_all()
if __name__ == "__main__":
splScr = splashScreen()
#If you don't do this, the splash screen will show, but wont render it's contents
while gtk.events_pending():
gtk.main_iteration()
#Here you can do all that nasty things that take some time.
sleep(3)
app = yourApp()
#We don't need splScr anymore.
splScr.window.destroy()
gtk.main()
| mit | -4,535,005,170,835,478,000 | 33.5 | 85 | 0.593544 | false | 3.350993 | false | false | false |
google/flight-lab | controller/utils/projector_test.py | 1 | 1627 | # Copyright 2018 Flight Lab authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test-cases for utils.Projector."""
from __future__ import print_function
import logging
import sys
import projector
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
def test(argv):
ip = argv[1]
p = projector.Projector(name='test', address=ip)
p.on('state_changed', on_state_changed)
p.start()
while True:
cmd = raw_input('Command>')
if cmd == 'on':
try:
p.power_on()
except Exception as e:
print(e)
elif cmd == 'off':
try:
p.power_off()
except Exception as e:
print(e)
elif cmd == 'exit':
break
p.stop()
def on_state_changed(old_state, new_state):
print('State changed: "{0}" => "{1}"'.format(old_state, new_state))
if __name__ == '__main__':
logger = logging.getLogger('')
logger.setLevel('DEBUG')
console_handler = logging.StreamHandler()
console_handler.setFormatter(
logging.Formatter('%(levelname)-8s %(name)-12s: %(message)s'))
logger.addHandler(console_handler)
test(sys.argv)
| apache-2.0 | -5,848,118,711,507,224,000 | 25.672131 | 74 | 0.666257 | false | 3.623608 | false | false | false |
akosiorek/attend_infer_repeat | attend_infer_repeat/cell.py | 1 | 7842 | import numpy as np
import sonnet as snt
import tensorflow as tf
from tensorflow.contrib.distributions import Bernoulli, NormalWithSoftplusScale
from modules import SpatialTransformer, ParametrisedGaussian
class AIRCell(snt.RNNCore):
"""RNN cell that implements the core features of Attend, Infer, Repeat, as described here:
https://arxiv.org/abs/1603.08575
"""
_n_transform_param = 4
def __init__(self, img_size, crop_size, n_appearance,
transition, input_encoder, glimpse_encoder, glimpse_decoder, transform_estimator, steps_predictor,
discrete_steps=True, canvas_init=None, explore_eps=None, debug=False):
"""Creates the cell
:param img_size: int tuple, size of the image
:param crop_size: int tuple, size of the attention glimpse
:param n_appearance: number of latent units describing the "what"
:param transition: an RNN cell for maintaining the internal hidden state
:param input_encoder: callable, encodes the original input image before passing it into the transition
:param glimpse_encoder: callable, encodes the glimpse into latent representation
:param glimpse_decoder: callable, decodes the glimpse from latent representation
:param transform_estimator: callabe, transforms the hidden state into parameters for the spatial transformer
:param steps_predictor: callable, predicts whether to take a step
:param discrete_steps: boolean, steps are samples from a Bernoulli distribution if True; if False, all steps are
taken and are weighted by the step probability
:param canvas_init: float or None, initial value for the reconstructed image. If None, the canvas is black. If
float, the canvas starts with a given value, which is trainable.
:param explore_eps: float or None; if float, it has to be \in (0., .5); step probability is clipped between
`explore_eps` and (1 - `explore_eps)
:param debug: boolean, adds checks for NaNs in the inputs to distributions
"""
super(AIRCell, self).__init__(self.__class__.__name__)
self._img_size = img_size
self._n_pix = np.prod(self._img_size)
self._crop_size = crop_size
self._n_appearance = n_appearance
self._transition = transition
self._n_hidden = self._transition.output_size[0]
self._sample_presence = discrete_steps
self._explore_eps = explore_eps
self._debug = debug
with self._enter_variable_scope():
self._canvas = tf.zeros(self._img_size, dtype=tf.float32)
if canvas_init is not None:
self._canvas_value = tf.get_variable('canvas_value', dtype=tf.float32, initializer=canvas_init)
self._canvas += self._canvas_value
transform_constraints = snt.AffineWarpConstraints.no_shear_2d()
self._spatial_transformer = SpatialTransformer(img_size, crop_size, transform_constraints)
self._inverse_transformer = SpatialTransformer(img_size, crop_size, transform_constraints, inverse=True)
self._transform_estimator = transform_estimator(self._n_transform_param)
self._input_encoder = input_encoder()
self._glimpse_encoder = glimpse_encoder()
self._glimpse_decoder = glimpse_decoder(crop_size)
self._what_distrib = ParametrisedGaussian(n_appearance, scale_offset=0.5,
validate_args=self._debug, allow_nan_stats=not self._debug)
self._steps_predictor = steps_predictor()
@property
def state_size(self):
return [
np.prod(self._img_size), # image
np.prod(self._img_size), # canvas
self._n_appearance, # what
self._n_transform_param, # where
self._transition.state_size, # hidden state of the rnn
1, # presence
]
@property
def output_size(self):
return [
np.prod(self._img_size), # canvas
np.prod(self._crop_size), # glimpse
self._n_appearance, # what code
self._n_appearance, # what loc
self._n_appearance, # what scale
self._n_transform_param, # where code
self._n_transform_param, # where loc
self._n_transform_param, # where scale
1, # presence prob
1 # presence
]
@property
def output_names(self):
return 'canvas glimpse what what_loc what_scale where where_loc where_scale presence_prob presence'.split()
def initial_state(self, img):
batch_size = img.get_shape().as_list()[0]
hidden_state = self._transition.initial_state(batch_size, tf.float32, trainable=True)
where_code = tf.zeros([1, self._n_transform_param], dtype=tf.float32, name='where_init')
what_code = tf.zeros([1, self._n_appearance], dtype=tf.float32, name='what_init')
flat_canvas = tf.reshape(self._canvas, (1, self._n_pix))
where_code, what_code, flat_canvas = (tf.tile(i, (batch_size, 1)) for i in (where_code, what_code, flat_canvas))
flat_img = tf.reshape(img, (batch_size, self._n_pix))
init_presence = tf.ones((batch_size, 1), dtype=tf.float32)
return [flat_img, flat_canvas,
what_code, where_code, hidden_state, init_presence]
def _build(self, inpt, state):
"""Input is unused; it's only to force a maximum number of steps"""
img_flat, canvas_flat, what_code, where_code, hidden_state, presence = state
img_inpt = img_flat
img = tf.reshape(img_inpt, (-1,) + tuple(self._img_size))
inpt_encoding = self._input_encoder(img)
with tf.variable_scope('rnn_inpt'):
hidden_output, hidden_state = self._transition(inpt_encoding, hidden_state)
where_param = self._transform_estimator(hidden_output)
where_distrib = NormalWithSoftplusScale(*where_param,
validate_args=self._debug, allow_nan_stats=not self._debug)
where_loc, where_scale = where_distrib.loc, where_distrib.scale
where_code = where_distrib.sample()
cropped = self._spatial_transformer(img, where_code)
with tf.variable_scope('presence'):
presence_prob = self._steps_predictor(hidden_output)
if self._explore_eps is not None:
presence_prob = self._explore_eps / 2 + (1 - self._explore_eps) * presence_prob
if self._sample_presence:
presence_distrib = Bernoulli(probs=presence_prob, dtype=tf.float32,
validate_args=self._debug, allow_nan_stats=not self._debug)
new_presence = presence_distrib.sample()
presence *= new_presence
else:
presence = presence_prob
what_params = self._glimpse_encoder(cropped)
what_distrib = self._what_distrib(what_params)
what_loc, what_scale = what_distrib.loc, what_distrib.scale
what_code = what_distrib.sample()
decoded = self._glimpse_decoder(what_code)
inversed = self._inverse_transformer(decoded, where_code)
with tf.variable_scope('rnn_outputs'):
inversed_flat = tf.reshape(inversed, (-1, self._n_pix))
canvas_flat += presence * inversed_flat
decoded_flat = tf.reshape(decoded, (-1, np.prod(self._crop_size)))
output = [canvas_flat, decoded_flat, what_code, what_loc, what_scale, where_code, where_loc, where_scale,
presence_prob, presence]
state = [img_flat, canvas_flat,
what_code, where_code, hidden_state, presence]
return output, state | gpl-3.0 | -8,653,469,741,491,232,000 | 44.865497 | 120 | 0.623693 | false | 3.866864 | false | false | false |
PymientoProject/acien101 | AudioMixer/VLCControllWithArduino.py | 1 | 8591 | import CustomVLCClass
import serial
import time
import threading
time.sleep(20)
while True:
def inputListener():
inputdata = input('0 to quit the first song, 1 to quit the second song')
if(inputdata == '0'):
if a.mediaplayer.is_playing() :
a.pause()
else:
a.play()
print("Quiting 0")
inputListener() #Starting another time the inputListener
elif(inputdata == '1'):
if b.mediaplayer.is_playing():
b.pause()
else:
b.play()
print("Quiting 1")
inputListener() #Starting another time the inputListener
elif(inputdata == '00'):
a.mute()
inputListener()
elif(inputdata == '01'):
a.unmute()
inputListener()
def arduinoListener():
past0 = 0 #For counting the last chip in the field
past1 = 0
past2 = 0
past3 = 0
past4 = 0
while True:
try:
line = ser.readline()
if not line:
continue
x = line.decode('ascii', errors='replace')
if x == '00\r\n':
print("00")
if past0 == 1:
a.mute()
if past0 == 2:
b.mute()
if past0 == 3:
c.mute()
if past0 == 4:
d.mute()
if past0 == 5:
e.mute()
if past0 == 6:
f.mute()
past0 = 0
elif x == '01\r\n':
print("01")
past0 = 1
a.unmute()
elif x == '02\r\n':
print("02")
past0 = 2
b.unmute()
elif x == '03\r\n':
print("03")
past0 = 3
c.unmute()
elif x == '04\r\n':
print("04")
past0 = 4
d.unmute()
elif x == '05\r\n':
print("05")
past0 = 5
e.unmute()
elif x == '06\r\n':
print("06")
past0 = 6
f.unmute()
if x == '10\r\n':
print("10")
if past1 == 1:
a.mute()
if past1 == 2:
b.mute()
if past1 == 3:
c.mute()
if past1 == 4:
d.mute()
if past1 == 5:
e.mute()
if past1 == 6:
f.mute()
past1 = 0
elif x == '11\r\n':
print("11")
past1 = 1
a.unmute()
elif x == '12\r\n':
print("12")
past1 = 2
b.unmute()
elif x == '13\r\n':
print("13")
past1 = 3
c.unmute()
elif x == '14\r\n':
print("14")
past1 = 4
d.unmute()
elif x == '15\r\n':
print("15")
past1 = 5
e.unmute()
elif x == '16\r\n':
print("16")
past1 = 6
f.unmute()
if x == '20\r\n':
print("20")
if past2 == 1:
a.mute()
if past2 == 2:
b.mute()
if past2 == 3:
c.mute()
if past2 == 4:
d.mute()
if past2 == 5:
e.mute()
if past2 == 6:
f.mute()
past1 = 0
elif x == '21\r\n':
print("21")
past2 = 1
a.unmute()
elif x == '22\r\n':
print("22")
past2 = 2
b.unmute()
elif x == '23\r\n':
print("23")
past2 = 3
c.unmute()
elif x == '24\r\n':
print("24")
past2 = 4
d.unmute()
elif x == '25\r\n':
print("25")
past2 = 5
e.unmute()
elif x == '26\r\n':
print("26")
past2 = 6
f.unmute()
if x == '30\r\n':
print("30")
if past3 == 1:
a.mute()
if past3 == 2:
b.mute()
if past3 == 3:
c.mute()
if past3 == 4:
d.mute()
if past3 == 5:
e.mute()
if past3 == 6:
f.mute()
past3 = 0
elif x == '31\r\n':
print("31")
past3 = 1
a.unmute()
elif x == '32\r\n':
print("32")
past3 = 2
b.unmute()
elif x == '33\r\n':
print("33")
past3 = 3
c.unmute()
elif x == '34\r\n':
print("34")
past3 = 4
d.unmute()
elif x == '35\r\n':
print("35")
past3 = 5
e.unmute()
elif x == '36\r\n':
print("36")
past3 = 6
f.unmute()
if x == '40\r\n':
print("40")
if past4 == 1:
a.mute()
if past4 == 2:
b.mute()
if past4 == 3:
c.mute()
if past4 == 4:
d.mute()
if past4 == 5:
e.mute()
if past4 == 6:
f.mute()
past4 = 0
elif x == '41\r\n':
print("41")
past4 = 1
a.unmute()
elif x == '42\r\n':
print("42")
past4 = 2
b.unmute()
elif x == '43\r\n':
print("43")
past4 = 3
c.unmute()
elif x == '44\r\n':
print("44")
past4 = 4
d.unmute()
elif x == '45\r\n':
print("45")
past4 = 5
e.unmute()
elif x == '46\r\n':
print("46")
past4 = 6
f.unmute()
except KeyboardInterrupt:
print("exiting")
break
ser = serial.Serial('/dev/ttyAMA0', 9600, timeout=1.0)
ser.setDTR(False)
time.sleep(1)
ser.flushInput()
ser.setDTR(True)
a = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/1.mp3")
b = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/2.mp3")
c = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/3.mp3")
d = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/4.mp3")
e = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/5.mp3")
f = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/6.mp3")
inputArduinoThread = threading.Thread(target=arduinoListener, name="inputAduino")
inputArduinoThread.start()
while a.mediaplayer.is_playing() and b.mediaplayer.is_playing:
time.sleep(0.1)
| mit | -4,285,258,228,453,292,000 | 26.802589 | 85 | 0.304505 | false | 4.623789 | false | false | false |
verejnedigital/verejne.digital | data/server.py | 1 | 1797 | """Runs the server for backend application `data`."""
import argparse
import json
from paste import httpserver
import webapp2
import status
class MyServer(webapp2.RequestHandler):
"""Abstract request handler, to be subclasses by server hooks."""
def get(self):
"""Implements actual hook logic and responds to requests."""
raise NotImplementedError('Must implement method `get`.')
def returnJSON(self,j):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(j, separators=(',',':')))
class SourceDataInfo(MyServer):
def get(self):
result = status.get_source_data_info()
self.returnJSON(result)
class ProdDataInfo(MyServer):
def get(self):
result = status.get_prod_data_info()
self.returnJSON(result)
class PublicDumpsInfo(MyServer):
def get(self):
result = status.get_public_dumps_info()
self.returnJSON(result)
class ColabsInfo(MyServer):
def get(self):
result = status.get_colabs_info()
self.returnJSON(result)
# Setup the webapp2 WSGI application.
app = webapp2.WSGIApplication([
('/source_data_info', SourceDataInfo),
('/prod_data_info', ProdDataInfo),
('/public_dumps_info', PublicDumpsInfo),
('/colabs_info', ColabsInfo),
], debug=False)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--listen',
help='host:port to listen on',
default='127.0.0.1:8084')
args = parser.parse_args()
host, port = args.listen.split(':')
httpserver.serve(
app,
host=host,
port=port,
request_queue_size=128,
use_threadpool=True,
threadpool_workers=32,
)
if __name__ == '__main__':
main()
| apache-2.0 | -5,313,645,500,533,935,000 | 22.96 | 69 | 0.634391 | false | 3.77521 | false | false | false |
ooici/coi-services | ion/services/sa/acquisition/data_acquisition_management_service.py | 1 | 63562 | #!/usr/bin/env python
"""Data Acquisition Management service to keep track of Data Producers, Data Sources and external data agents
and the relationships between them"""
from pyon.agent.agent import ResourceAgentClient
__author__ = 'Maurice Manning, Michael Meisinger'
from collections import deque
import logging
from copy import deepcopy
from ooi.timer import Timer, Accumulator
from pyon.core.exception import NotFound, BadRequest, ServerError
from pyon.ion.resource import ExtendedResourceContainer
from pyon.public import CFG, IonObject, log, RT, LCS, PRED, OT
from pyon.util.arg_check import validate_is_instance
from ion.services.sa.instrument.agent_configuration_builder import ExternalDatasetAgentConfigurationBuilder
from ion.util.enhanced_resource_registry_client import EnhancedResourceRegistryClient
from ion.util.stored_values import StoredValueManager
from ion.util.agent_launcher import AgentLauncher
from interface.objects import ProcessDefinition, ProcessSchedule, ProcessTarget, ProcessRestartMode
from interface.objects import Parser, DataProducer, InstrumentProducerContext, ExtDatasetProducerContext, DataProcessProducerContext
from interface.objects import AttachmentType
from interface.services.sa.idata_product_management_service import DataProductManagementServiceProcessClient
from interface.services.sa.idata_acquisition_management_service import BaseDataAcquisitionManagementService
stats = Accumulator(persist=True)
class DataAcquisitionManagementService(BaseDataAcquisitionManagementService):
def on_init(self):
self.RR2 = EnhancedResourceRegistryClient(self.clients.resource_registry)
self.DPMS = DataProductManagementServiceProcessClient(self) # TODO: Add to clients
# -----------------
# The following operations register different types of data producers
# -----------------
def register_external_data_set(self, external_dataset_id=''):
"""Register an existing external data set as data producer
@param external_dataset_id str
@retval data_producer_id str
"""
ext_dataset_obj = self.clients.resource_registry.read(external_dataset_id)
if ext_dataset_obj is None:
raise NotFound('External Data Set %s does not exist' % external_dataset_id)
#create a InstrumentProducerContext to hold the state of the this producer
producer_context_obj = ExtDatasetProducerContext(configuration=vars(ext_dataset_obj))
#create data producer resource and associate to this data_process_id
data_producer_obj = DataProducer(name=ext_dataset_obj.name,
description='Primary DataProducer for External Dataset %s' % ext_dataset_obj.name,
is_primary=True,
producer_context=producer_context_obj)
data_producer_id, rev = self.clients.resource_registry.create(data_producer_obj)
# Create association
self.clients.resource_registry.create_association(subject=external_dataset_id, predicate=PRED.hasDataProducer, object=data_producer_id)
return data_producer_id
def unregister_external_data_set(self, external_dataset_id=''):
"""
@param external_dataset_id str
@throws NotFound object with specified id does not exist
"""
# Verify that id is valid
external_data_set_obj = self.clients.resource_registry.read(external_dataset_id)
# List all resource ids that are objects for this data_source and has the hasDataProducer link
producers, producer_assns = self.clients.resource_registry.find_objects(
subject=external_dataset_id, predicate=PRED.hasDataProducer, id_only=True)
for producer, producer_assn in zip(producers, producer_assns):
log.debug("DataAcquisitionManagementService:unregister_external_data_set delete association %s", str(producer_assn))
self.clients.resource_registry.delete_association(producer_assn)
log.debug("DataAcquisitionManagementService:unregister_external_data_set delete producer %s", str(producer))
self.clients.resource_registry.delete(producer)
return
def register_process(self, data_process_id=''):
"""
Register an existing data process as data producer
"""
# retrieve the data_process object
data_process_obj = self.clients.resource_registry.read(data_process_id)
if data_process_obj is None:
raise NotFound('Data Process %s does not exist' % data_process_id)
producer_context_obj = DataProcessProducerContext(configuration=data_process_obj.configuration)
#create data producer resource and associate to this data_process_id
data_producer_obj = DataProducer(name=data_process_obj.name,
description='Primary DataProducer for DataProcess %s' % data_process_obj.name,
producer_context=producer_context_obj,
is_primary=True)
data_producer_id, rev = self.clients.resource_registry.create(data_producer_obj)
# Create association
self.clients.resource_registry.create_association(data_process_id, PRED.hasDataProducer, data_producer_id)
return data_producer_id
def register_event_process(self, process_id=''):
"""
Register an existing data process as data producer
"""
# retrieve the data_process object
# retrieve the data_process object
data_process_obj = self.clients.resource_registry.read(process_id)
if data_process_obj is None:
raise NotFound('Data Process %s does not exist' % process_id)
producer_context_obj = DataProcessProducerContext(configuration=data_process_obj.process_configuration)
#create data producer resource and associate to this data_process_id
data_producer_obj = DataProducer(name=data_process_obj.name,
description='Primary DataProducer for DataProcess %s' % data_process_obj.name,
producer_context=producer_context_obj,
is_primary=True)
data_producer_id, rev = self.clients.resource_registry.create(data_producer_obj)
# Create association
self.clients.resource_registry.create_association(process_id, PRED.hasDataProducer, data_producer_id)
return data_producer_id
def unregister_process(self, data_process_id=''):
"""
Remove the associated DataProcess and disc
"""
# Verify that id is valid
input_process_obj = self.clients.resource_registry.read(data_process_id)
# List all resource ids that are objects for this data_source and has the hasDataProducer link
producers, producer_assns = self.clients.resource_registry.find_objects(subject=data_process_id, predicate=PRED.hasDataProducer, id_only=True)
for producer, producer_assn in zip(producers, producer_assns):
log.debug("DataAcquisitionManagementService:unregister_process delete association %s", str(producer_assn))
self.clients.resource_registry.delete_association(producer_assn)
log.debug("DataAcquisitionManagementService:unregister_process delete producer %s", str(producer))
log.debug("DAMS:unregister_process delete producer: %s ", str(producer) )
self.clients.resource_registry.delete(producer)
def unregister_event_process(self, process_id=''):
"""
Remove the associated Process and disc
"""
# Verify that id is valid
input_process_obj = self.clients.resource_registry.read(process_id)
# List all resource ids that are objects for this data_source and has the hasDataProducer link
producers, producer_assns = self.clients.resource_registry.find_objects(subject=process_id, predicate=PRED.hasDataProducer, id_only=True)
for producer, producer_assn in zip(producers, producer_assns):
log.debug("DataAcquisitionManagementService:unregister_process delete association %s", str(producer_assn))
self.clients.resource_registry.delete_association(producer_assn)
log.debug("DataAcquisitionManagementService:unregister_process delete producer %s", str(producer))
log.debug("DAMS:unregister_process delete producer: %s ", str(producer) )
self.clients.resource_registry.delete(producer)
def register_instrument(self, instrument_id=''):
"""
Register an existing instrument as data producer
"""
# retrieve the data_process object
instrument_obj = self.clients.resource_registry.read(instrument_id)
if instrument_obj is None:
raise NotFound('Instrument object %s does not exist' % instrument_id)
#create a InstrumentProducerContext to hold the state of the this producer
producer_context_obj = InstrumentProducerContext(configuration=vars(instrument_obj))
#create data producer resource and associate to this data_process_id
data_producer_obj = DataProducer(name=instrument_obj.name,
description='Primary DataProducer for DataProcess %s' % instrument_obj.name,
is_primary=True,
producer_context=producer_context_obj)
data_producer_id, rev = self.clients.resource_registry.create(data_producer_obj)
# Create association
self.clients.resource_registry.create_association(instrument_id, PRED.hasDataProducer, data_producer_id)
return data_producer_id
def unregister_instrument(self, instrument_id=''):
# Verify that id is valid
# Verify that id is valid
input_process_obj = self.clients.resource_registry.read(instrument_id)
# List all resource ids that are objects for this data_source and has the hasDataProducer link
producers, producer_assns = self.clients.resource_registry.find_objects(subject=instrument_id, predicate=PRED.hasDataProducer, id_only=True)
for producer, producer_assn in zip(producers, producer_assns):
log.debug("DataAcquisitionManagementService:unregister_instrument delete association %s", str(producer_assn))
self.clients.resource_registry.delete_association(producer_assn)
log.debug("DataAcquisitionManagementService:unregister_instrument delete producer %s", str(producer))
self.clients.resource_registry.delete(producer)
return
def assign_data_product(self, input_resource_id='', data_product_id=''):
log.debug('assigning data product %s to resource %s', data_product_id, input_resource_id)
#Connect the producer for an existing input resource with a data product
t = Timer() if stats.is_log_enabled() else None
# Verify that both ids are valid
#input_resource_obj = self.clients.resource_registry.read(input_resource_id) #actually, don't need this one unless producer is not found (see if below)
data_product_obj = self.clients.resource_registry.read(data_product_id)
if t:
t.complete_step('dams.assign_data_product.read_dataproduct')
#find the data producer resource associated with the source resource that is creating the data product
primary_producer_ids, _ = self.clients.resource_registry.find_objects(subject=input_resource_id, predicate=PRED.hasDataProducer, object_type=RT.DataProducer, id_only=True)
if t:
t.complete_step('dams.assign_data_product.find_producer')
if not primary_producer_ids:
self.clients.resource_registry.read(input_resource_id) # raise different NotFound if resource didn't exist
raise NotFound("Data Producer for input resource %s does not exist" % input_resource_id)
#connect the producer to the product directly
self.clients.resource_registry.create_association(subject=input_resource_id, predicate=PRED.hasOutputProduct, object=data_product_id)
if t:
t.complete_step('dams.assign_data_product.create_association.hasOutputProduct')
#create data producer resource for this data product
data_producer_obj = DataProducer(name=data_product_obj.name, description=data_product_obj.description)
data_producer_obj.producer_context.configuration = {}
data_producer_id, rev = self.clients.resource_registry.create(data_producer_obj)
if t:
t.complete_step('dams.assign_data_product.create_dataproducer')
attachments = self.clients.resource_registry.find_attachments(data_product_id, include_content=False, id_only=False)
if t:
t.complete_step('dams.assign_data_product.find_attachments')
for attachment in attachments:
if attachment.attachment_type == AttachmentType.REFERENCE:
parser_id = attachment.context.parser_id
if parser_id:
self.register_producer_qc_reference(data_producer_id, parser_id, attachment._id)
if t:
t.complete_step('dams.assign_data_product.register_qc')
# Associate the Product with the Producer
self.clients.resource_registry.create_association(data_product_id, PRED.hasDataProducer, data_producer_id)
if t:
t.complete_step('dams.assign_data_product.create_association.hasDataProducer')
# Associate the Producer with the main Producer
self.clients.resource_registry.create_association(data_producer_id, PRED.hasParent, primary_producer_ids[0])
if t:
t.complete_step('dams.assign_data_product.create_association.hasParent')
stats.add(t)
stats.add_value('dams.assign_data_product.attachment_count', len(attachments))
def unassign_data_product(self, input_resource_id='', data_product_id=''):
"""
Disconnect the Data Product from the Data Producer
@param data_product_id str
@throws NotFound object with specified id does not exist
"""
# Verify that both ids are valid
input_resource_obj = self.clients.resource_registry.read(input_resource_id)
data_product_obj = self.clients.resource_registry.read(data_product_id)
#find the data producer resource associated with the source resource that is creating the data product
primary_producer_ids, _ = self.clients.resource_registry.find_objects(input_resource_id, PRED.hasDataProducer, RT.DataProducer, id_only=True)
if not primary_producer_ids:
raise NotFound("Data Producer for input resource %s does not exist" % input_resource_id)
else:
log.debug("unassign_data_product: primary producer ids %s" % str(primary_producer_ids))
#find the hasDataProduct association between the data product and the input resource
associations = self.clients.resource_registry.find_associations(subject=input_resource_id, predicate=PRED.hasOutputProduct, object=data_product_id, id_only=True)
for association in associations:
log.debug("unassign_data_product: unlink input resource with data product %s" % association)
self.clients.resource_registry.delete_association(association)
#find the data producer resource associated with the source resource that is creating the data product
producers, producer_assns = self.clients.resource_registry.find_objects(data_product_id, PRED.hasDataProducer, RT.DataProducer, True)
for producer, producer_assn in zip(producers, producer_assns):
#remove the link to the data product
self.clients.resource_registry.delete_association(producer_assn)
#remove the link to the parent data producer
associations = self.clients.resource_registry.find_associations(subject=producer, predicate=PRED.hasParent, id_only=True)
for association in associations:
self.clients.resource_registry.delete_association(association)
log.debug("DAMS:unassign_data_product delete producer: %s ", str(producer) )
self.clients.resource_registry.delete(producer)
return
def assign_data_product_source(self, data_product_id='', source_id=''):
# Connect a Data Product to the data source, either a Site or a Device
if source_id:
#connect the producer to the product directly
self.clients.resource_registry.create_association(data_product_id, PRED.hasSource, source_id)
return
def unassign_data_product_source(self, data_product_id='', source_id=''):
# Disconnect the Data Product from the data source
# Find and break association with either a Site or a Decvice
assocs = self.clients.resource_registry.find_associations(data_product_id, PRED.hasSource, source_id)
if not assocs or len(assocs) == 0:
raise NotFound("DataProduct to source association for data product id %s to source %s does not exist" % (data_product_id, source_id))
association_id = assocs[0]._id
self.clients.resource_registry.delete_association(association_id)
return
#
# def create_data_producer(name='', description=''):
# """Create a data producer resource, create a stream reource via DM then associate the two resources. Currently, data producers and streams are one-to-one. If the data producer is a process, connect the data producer to any parent data producers.
#
# @param name str
# @param description str
# @retval data_producer_id str
# @throws BadRequest if object passed has _id or _rev attribute
# """
# pass
#
# def update_data_producer(self, data_producer=None):
# '''
# Update an existing data producer.
#
# @param data_producer The data_producer object with updated properties.
# @retval success Boolean to indicate successful update.
# @todo Add logic to validate optional attributes. Is this interface correct?
# '''
# # Return Value
# # ------------
# # {success: true}
# #
# log.debug("Updating data_producer object: %s" % data_producer.name)
# return self.clients.resource_registry.update(data_producer)
#
# def read_data_producer(self, data_producer_id=''):
# '''
# Get an existing data_producer object.
#
# @param data_producer_id The id of the stream.
# @retval data_producer The data_producer object.
# @throws NotFound when data_producer doesn't exist.
# '''
# # Return Value
# # ------------
# # data_producer: {}
# #
# log.debug("Reading data_producer object id: %s" % data_producer_id)
# data_producer_obj = self.clients.resource_registry.read(data_producer_id)
#
# return data_producer_obj
#
# def delete_data_producer(self, data_producer_id=''):
# '''
# Delete an existing data_producer.
#
# @param data_producer_id The id of the stream.
# @retval success Boolean to indicate successful deletion.
# @throws NotFound when data_producer doesn't exist.
# '''
# # Return Value
# # ------------
# # {success: true}
# #
# log.debug("Deleting data_producer id: %s" % data_producer_id)
#
# return self.clients.resource_registry.lcs_delete(data_producer_id)
#
#
# def force_delete_data_producer(self, data_producer_id=''):
# self._remove_associations(data_producer_id)
# self.clients.resource_registry.delete(data_producer_id)
# -----------------
# The following operations manage EOI resources
# -----------------
##########################################################################
#
# External Data Provider
#
##########################################################################
def create_external_data_provider(self, external_data_provider=None):
# Persist ExternalDataProvider object and return object _id as OOI id
return self.RR2.create(external_data_provider, RT.ExternalDataProvider)
def update_external_data_provider(self, external_data_provider=None):
# Overwrite ExternalDataProvider object
self.RR2.update(external_data_provider, RT.ExternalDataProvider)
def read_external_data_provider(self, external_data_provider_id=''):
# Read ExternalDataProvider object with _id matching passed user id
return self.RR2.read(external_data_provider_id, RT.ExternalDataProvider)
def delete_external_data_provider(self, external_data_provider_id=''):
self.RR2.lcs_delete(external_data_provider_id, RT.ExternalDataProvider)
def force_delete_external_data_provider(self, external_data_provider_id=''):
self.RR2.force_delete(external_data_provider_id, RT.ExternalDataProvider)
##########################################################################
#
# Data Source
#
##########################################################################
def create_data_source(self, data_source=None):
# Persist DataSource object and return object _id as OOI id
return self.RR2.create(data_source, RT.DataSource)
def update_data_source(self, data_source=None):
# Overwrite DataSource object
self.RR2.update(data_source, RT.DataSource)
def read_data_source(self, data_source_id=''):
# Read DataSource object with _id matching passed user id
log.debug("Reading DataSource object id: %s" % data_source_id)
data_source_obj = self.RR2.read(data_source_id, RT.DataSource)
return data_source_obj
def delete_data_source(self, data_source_id=''):
# Read and delete specified DataSource object
log.debug("Deleting DataSource id: %s" % data_source_id)
self.RR2.lcs_delete(data_source_id, RT.DataSource)
return
def force_delete_data_source(self, data_source_id=''):
self.RR2.force_delete(data_source_id, RT.DataSource)
def create_data_source_model(self, data_source_model=None):
# Persist DataSourceModel object and return object _id as OOI id
return self.RR2.create(data_source_model, RT.DataSourceModel)
def update_data_source_model(self, data_source_model=None):
# Overwrite DataSourceModel object
self.RR2.update(data_source_model, RT.DataSourceModel)
def read_data_source_model(self, data_source_model_id=''):
# Read DataSourceModel object with _id matching passed user id
return self.RR2.read(data_source_model_id, RT.DataSourceModel)
def delete_data_source_model(self, data_source_model_id=''):
# Read and delete specified ExternalDatasetModel object
self.RR2.lcs_delete(data_source_model_id, RT.DataSourceModel)
return
def force_delete_data_source_model(self, data_source_model_id=''):
self.RR2.force_delete(data_source_model_id, RT.DataSourceModel)
def create_data_source_agent(self, data_source_agent=None, data_source_model_id='' ):
# Persist ExternalDataSourcAgent object and return object _id as OOI id
data_source_agent_id = self.RR2.create(data_source_agent, RT.DataSourceAgent)
if data_source_model_id:
self.RR2.assign_data_source_model_to_data_source_agent_with_has_model(data_source_model_id, data_source_agent_id)
return data_source_agent_id
def update_data_source_agent(self, data_source_agent=None):
# Overwrite DataSourceAgent object
self.RR2.update(data_source_agent, RT.DataSourceAgent)
def read_data_source_agent(self, data_source_agent_id=''):
# Read DataSourceAgent object with _id matching passed user id
data_source_agent = self.RR2.read(data_source_agent_id, RT.DataSourceAgent)
return data_source_agent
def delete_data_source_agent(self, data_source_agent_id=''):
# Read and delete specified DataSourceAgent object
self.RR2.lcs_delete(data_source_agent_id, RT.DataSourceAgent)
def force_delete_data_source_agent(self, data_source_agent_id=''):
self.RR2.force_delete(data_source_agent_id, RT.DataSourceAgent)
def create_data_source_agent_instance(self, data_source_agent_instance=None, data_source_agent_id='', data_source_id=''):
# Persist DataSourceAgentInstance object and return object _id as OOI id
data_source_agent_instance_id = self.RR2.create(data_source_agent_instance, RT.DataSourceAgentInstance)
if data_source_id:
self.RR2.assign_data_source_agent_instance_to_data_source_with_has_agent_instance(data_source_agent_instance_id, data_source_id)
if data_source_agent_id:
self.RR2.assign_data_source_agent_to_data_source_agent_instance_with_has_agent_definition(data_source_agent_id, data_source_agent_instance_id)
return data_source_agent_instance_id
def update_data_source_agent_instance(self, data_source_agent_instance=None):
# Overwrite DataSourceAgentInstance object
self.RR2.update(data_source_agent_instance, RT.DataSourceAgentInstance)
def read_data_source_agent_instance(self, data_source_agent_instance_id=''):
# Read DataSourceAgentInstance object with _id matching passed user id
data_source_agent_instance = self.RR2.read(data_source_agent_instance_id, RT.DataSourceAgentInstance)
return data_source_agent_instance
def delete_data_source_agent_instance(self, data_source_agent_instance_id=''):
# Read and delete specified DataSourceAgentInstance object
self.RR2.lcs_delete(data_source_agent_instance_id, RT.DataSourceAgentInstance)
def force_delete_data_source_agent_instance(self, data_source_agent_instance_id=''):
self.RR2.force_delete(data_source_agent_instance_id, RT.DataSourceAgentInstance)
def start_data_source_agent_instance(self, data_source_agent_instance_id=''):
"""Launch an data source agent instance process and return its process id. Agent instance resource
must exist and be associated with an external data source
@param data_source_agent_instance_id str
@retval process_id str
@throws NotFound object with specified id does not exist
"""
pass
def stop_data_source_agent_instance(self, data_source_agent_instance_id=''):
"""Deactivate the agent instance process
@param data_source_agent_instance_id str
@throws NotFound object with specified id does not exist
"""
pass
##########################################################################
#
# External Data Set
#
##########################################################################
def create_external_dataset(self, external_dataset=None, external_dataset_model_id=''):
# Persist ExternalDataSet object and return object _id as OOI id
external_dataset_id = self.RR2.create(external_dataset, RT.ExternalDataset)
if external_dataset_model_id:
self.RR2.assign_external_dataset_model_to_external_dataset_with_has_model(external_dataset_model_id, external_dataset_id)
return external_dataset_id
def update_external_dataset(self, external_dataset=None):
# Overwrite ExternalDataSet object
self.RR2.update(external_dataset, RT.ExternalDataset)
def read_external_dataset(self, external_dataset_id=''):
# Read ExternalDataSet object with _id matching passed user id
external_dataset = self.RR2.read(external_dataset_id, RT.ExternalDataset)
return external_dataset
def delete_external_dataset(self, external_dataset_id=''):
# Read and delete specified ExternalDataSet object
self.RR2.lcs_delete(external_dataset_id, RT.ExternalDataset)
def force_delete_external_dataset(self, external_dataset_id=''):
self.RR2.force_delete(external_dataset_id, RT.ExternalDataset)
def create_external_dataset_model(self, external_dataset_model=None):
# Persist ExternalDatasetModel object and return object _id as OOI id
return self.RR2.create(external_dataset_model, RT.ExternalDatasetModel)
def update_external_dataset_model(self, external_dataset_model=None):
# Overwrite ExternalDatasetModel object
self.RR2.update(external_dataset_model, RT.ExternalDatasetModel)
def read_external_dataset_model(self, external_dataset_model_id=''):
# Read ExternalDatasetModel object with _id matching passed user id
external_dataset_model = self.RR2.read(external_dataset_model_id, RT.ExternalDatasetModel)
return external_dataset_model
def delete_external_dataset_model(self, external_dataset_model_id=''):
# Read and delete specified ExternalDatasetModel object
self.RR2.lcs_delete(external_dataset_model_id, RT.ExternalDatasetModel)
def force_delete_external_dataset_model(self, external_dataset_model_id=''):
self.RR2.force_delete(external_dataset_model_id, RT.ExternalDatasetModel)
#
# ExternalDatasetAgent
#
def create_external_dataset_agent(self, external_dataset_agent=None, external_dataset_model_id=''):
# Persist ExternalDatasetAgent object and return object _id as OOI id
agent_id = self.RR2.create(external_dataset_agent, RT.ExternalDatasetAgent)
if external_dataset_model_id:
# NOTE: external_dataset_model_id can be any model type
self.clients.resource_registry.create_association(agent_id, PRED.hasModel, external_dataset_model_id)
# Create the process definition to launch the agent
process_definition = ProcessDefinition()
process_definition.name = "ProcessDefinition for ExternalDatasetAgent %s" % external_dataset_agent.name
process_definition.executable['url'] = external_dataset_agent.agent_uri
process_definition.executable['module'] = external_dataset_agent.agent_module or 'ion.agents.data.dataset_agent'
process_definition.executable['class'] = external_dataset_agent.agent_class or 'DataSetAgent'
process_definition_id = self.clients.process_dispatcher.create_process_definition(process_definition=process_definition)
log.debug("external_dataset_agent has process definition id %s", process_definition_id)
# Associate the agent and the process def
self.RR2.assign_process_definition_to_external_dataset_agent_with_has_process_definition(process_definition_id, agent_id)
return agent_id
def update_external_dataset_agent(self, external_dataset_agent=None):
# Overwrite ExternalDataAgent object
self.RR2.update(external_dataset_agent, RT.ExternalDatasetAgent)
def read_external_dataset_agent(self, external_dataset_agent_id=''):
# Read ExternalDatasetAgent object with _id matching passed user id
external_dataset_agent = self.RR2.read(external_dataset_agent_id, RT.ExternalDatasetAgent)
return external_dataset_agent
def delete_external_dataset_agent(self, external_dataset_agent_id=''):
# Read and delete specified ExternalDataAgent object
self.RR2.lcs_delete(external_dataset_agent_id, RT.ExternalDatasetAgent)
def force_delete_external_dataset_agent(self, external_dataset_agent_id=''):
self.RR2.force_delete(external_dataset_agent_id, RT.ExternalDatasetAgent)
def assign_model_to_external_dataset_agent(self, model_id='', external_dataset_agent_id=''):
self.clients.resource_registry.create_association(external_dataset_agent_id, PRED.hasModel, model_id)
def unassign_model_from_external_dataset_agent(self, model_id='', external_dataset_agent_id=''):
self.clients.resource_registry.delete_association((external_dataset_agent_id, PRED.hasModel, model_id))
def assign_external_data_agent_to_agent_instance(self, external_data_agent_id='', agent_instance_id=''):
#Connect the agent with an agent instance
data_source = self.clients.resource_registry.read(external_data_agent_id)
agent_instance = self.clients.resource_registry.read(agent_instance_id)
log.debug("associating: external dataset agent instance %s hasAgentDefinition %s", agent_instance_id, external_data_agent_id)
# check if the association already exists
associations = self.clients.resource_registry.find_associations(agent_instance_id, PRED.hasAgentDefinition, external_data_agent_id, id_only=True)
log.trace('found associations: %r', associations)
if not associations:
self.clients.resource_registry.create_association(agent_instance_id, PRED.hasAgentDefinition, external_data_agent_id)
def unassign_external_data_agent_from_agent_instance(self, external_data_agent_id='', agent_instance_id=''):
data_source = self.clients.resource_registry.read(external_data_agent_id)
agent_instance = self.clients.resource_registry.read(agent_instance_id)
# delete the associations
self.clients.resource_registry.delete_association((agent_instance_id, PRED.hasAgentDefinition, external_data_agent_id))
def prepare_external_dataset_agent_support(self, external_dataset_agent_id=''):
#TODO - does this have to be filtered by Org ( is an Org parameter needed )
extended_resource_handler = ExtendedResourceContainer(self)
resource_data = extended_resource_handler.create_prepare_resource_support(external_dataset_agent_id, OT.ExternalDatasetAgentPrepareSupport)
#Fill out service request information for creating a instrument agent
extended_resource_handler.set_service_requests(resource_data.create_request,
'data_acquisition_management',
'create_external_dataset_agent',
{ "external_dataset_agent": "$(external_dataset_agent)" })
#Fill out service request information for creating a instrument agent
extended_resource_handler.set_service_requests(resource_data.update_request,
'data_acquisition_management',
'update_external_dataset_agent',
{ "external_dataset_agent": "$(external_dataset_agent)" })
#Fill out service request information for assigning a InstrumentModel
extended_resource_handler.set_service_requests(resource_data.associations['InstrumentModel'].assign_request,
'data_acquisition_management',
'assign_model_to_external_dataset_agent',
{"model_id": "$(instrument_model_id)",
"external_dataset_agent_id": external_dataset_agent_id })
#Fill out service request information for unassigning a InstrumentModel
extended_resource_handler.set_service_requests(resource_data.associations['InstrumentModel'].unassign_request,
'data_acquisition_management',
'unassign_model_from_external_dataset_agent',
{"model_id": "$(instrument_model_id)",
"external_dataset_agent_id": external_dataset_agent_id })
#Fill out service request information for assigning a PlatformModel
extended_resource_handler.set_service_requests(resource_data.associations['PlatformModel'].assign_request,
'data_acquisition_management',
'assign_model_to_external_dataset_agent',
{"model_id": "$(platform_model_id)",
"external_dataset_agent_id": external_dataset_agent_id })
#Fill out service request information for unassigning a PlatformModel
extended_resource_handler.set_service_requests(resource_data.associations['PlatformModel'].unassign_request,
'data_acquisition_management',
'unassign_model_from_external_dataset_agent',
{"model_id": "$(platform_model_id)",
"external_dataset_agent_id": external_dataset_agent_id })
#Fill out service request information for assigning a ExternalDatasetAgentInstance
extended_resource_handler.set_service_requests(resource_data.associations['ExternalDatasetAgentInstance'].assign_request,
'data_acquisition_management',
'assign_external_data_agent_to_agent_instance',
{"external_data_agent_id": external_dataset_agent_id,
"agent_instance_id": "$(external_dataset_agent_instance_id)" })
#Fill out service request information for unassigning a ExternalDatasetAgentInstance
extended_resource_handler.set_service_requests(resource_data.associations['ExternalDatasetAgentInstance'].unassign_request,
'data_acquisition_management',
'unassign_external_data_agent_from_agent_instance',
{"external_data_agent_id": external_dataset_agent_id,
"agent_instance_id": "$(external_dataset_agent_instance_id)" })
return resource_data
#
# ExternalDatasetAgentInstance
#
def create_external_dataset_agent_instance(self, external_dataset_agent_instance=None, external_dataset_agent_id='', external_dataset_id=''):
# Persist ExternalDatasetAgentInstance object and return object _id as OOI id
external_dataset_agent_instance_id = self.RR2.create(external_dataset_agent_instance, RT.ExternalDatasetAgentInstance)
if external_dataset_id:
self.RR2.assign_external_dataset_agent_instance_to_external_dataset_with_has_agent_instance(
external_dataset_agent_instance_id, external_dataset_id)
if external_dataset_agent_id:
self.assign_external_data_agent_to_agent_instance(external_dataset_agent_id, external_dataset_agent_instance_id)
log.debug('created dataset agent instance %s, agent id=%s', external_dataset_agent_instance_id, external_dataset_agent_id)
return external_dataset_agent_instance_id
def update_external_dataset_agent_instance(self, external_dataset_agent_instance=None):
# Overwrite ExternalDataAgent object
self.RR2.update(external_dataset_agent_instance, RT.ExternalDatasetAgentInstance)
def read_external_dataset_agent_instance(self, external_dataset_agent_instance_id=''):
# Read ExternalDatasetAgent object with _id matching passed user id
external_dataset_agent_instance = self.RR2.read(external_dataset_agent_instance_id, RT.ExternalDatasetAgentInstance)
return external_dataset_agent_instance
def delete_external_dataset_agent_instance(self, external_dataset_agent_instance_id=''):
self.RR2.lcs_delete(external_dataset_agent_instance_id, RT.ExternalDatasetAgentInstance)
def force_delete_external_dataset_agent_instance(self, external_dataset_agent_instance_id=''):
self.RR2.force_delete(external_dataset_agent_instance_id, RT.ExternalDatasetAgentInstance)
def assign_external_dataset_agent_instance_to_device(self, external_dataset_agent_instance_id='', device_id=''):
self.clients.resource_registry.create_association(device_id, PRED.hasAgentInstance, external_dataset_agent_instance_id)
def unassign_external_dataset_agent_instance_from_device(self, external_dataset_agent_instance_id='', device_id=''):
self.clients.resource_registry.delete_association((device_id, PRED.hasAgentInstance, external_dataset_agent_instance_id))
def _assert_persistence_on(self, config_builder):
if not config_builder or RT.DataProduct not in config_builder.associated_objects:
return
data_products = config_builder.associated_objects[RT.DataProduct]
if config_builder._get_device().type_ == RT.PlatformDevice:
for dp in data_products:
if self.DPMS.is_persisted(dp._id):
return
raise BadRequest("Cannot start agent - data product persistence is not activated!")
else:
parsed_dp_id = None
for dp in data_products:
if dp.processing_level_code == "Parsed":
parsed_dp_id = dp._id
break
if parsed_dp_id:
if not self.DPMS.is_persisted(parsed_dp_id):
raise BadRequest("Cannot start agent - data product persistence is not activated!")
else:
log.warn("Cannot determine if persistence is activated for agent instance=%s", config_builder.agent_instance_obj._id)
def start_external_dataset_agent_instance(self, external_dataset_agent_instance_id=''):
"""Launch an external dataset agent instance process and return its process id.
Agent instance resource must exist and be associated with an external dataset or device and an agent definition
@param external_dataset_agent_instance_id str
@retval process_id str
@throws NotFound object with specified id does not exist
"""
#todo: may want to call retrieve_external_dataset_agent_instance here
#todo: if instance running, then return or throw
#todo: if instance exists and dataset_agent_instance_obj.dataset_agent_config is completd then just schedule_process
dataset_agent_instance_obj = self.clients.resource_registry.read(external_dataset_agent_instance_id)
# can be a Device or ExternalDataset
source_id = self.clients.resource_registry.read_subject(
predicate=PRED.hasAgentInstance, object=external_dataset_agent_instance_id, id_only=True)
ext_dataset_agent_obj = self.clients.resource_registry.read_object(
object_type=RT.ExternalDatasetAgent, predicate=PRED.hasAgentDefinition, subject=external_dataset_agent_instance_id, id_only=False)
process_definition_id = self.clients.resource_registry.read_object(
subject=ext_dataset_agent_obj._id, predicate=PRED.hasProcessDefinition, object_type=RT.ProcessDefinition, id_only=True)
# Agent launch
config_builder = ExternalDatasetAgentConfigurationBuilder(self.clients)
try:
config_builder.set_agent_instance_object(dataset_agent_instance_obj)
config = config_builder.prepare()
log.trace("Using dataset agent configuration: %s", config)
except Exception:
log.error('failed to launch', exc_info=True)
raise ServerError('failed to launch')
# Check that persistence is on
self._assert_persistence_on(config_builder)
# Save the config into an object in the object store which will be passed to the agent by the container.
config_builder.record_launch_parameters(config)
launcher = AgentLauncher(self.clients.process_dispatcher)
process_id = launcher.launch(config, config_builder._get_process_definition()._id)
if not process_id:
raise ServerError("Launched external dataset agent instance but no process_id")
launcher.await_launch(10.0)
return process_id
def stop_external_dataset_agent_instance(self, external_dataset_agent_instance_id=''):
"""
Deactivate the agent instance process
"""
# this dataset agent instance could be link to a external dataset or a instrument device. Retrieve whatever is the data producer.
external_dataset_device_ids, _ = self.clients.resource_registry.find_subjects( predicate=PRED.hasAgentInstance, object=external_dataset_agent_instance_id, id_only=True)
if len(external_dataset_device_ids) != 1:
raise NotFound("ExternalDatasetAgentInstance resource is not correctly associated with an ExternalDataset or InstrumentDevice" )
agent_process_id = ResourceAgentClient._get_agent_process_id(external_dataset_device_ids[0])
try:
# Cancels the execution of the given process id.
self.clients.process_dispatcher.cancel_process(agent_process_id)
finally:
# Save the process state
agent_instance_res = self.clients.resource_registry.read(external_dataset_agent_instance_id)
old_state = None
try:
old_state,_ = self.container.state_repository.get_state(agent_process_id)
old_state["_prior_agent_process_id"] = agent_process_id
except NotFound:
log.warn("Could not find process state for agent instance %s", external_dataset_agent_instance_id)
if old_state and isinstance(old_state, dict):
agent_instance_res.saved_agent_state = old_state
else:
agent_instance_res.saved_agent_state = {}
agent_instance_res.saved_agent_state = old_state
self.clients.resource_registry.update(agent_instance_res)
def prepare_external_dataset_agent_instance_support(self, external_dataset_agent_instance_id=''):
#TODO - does this have to be filtered by Org ( is an Org parameter needed )
extended_resource_handler = ExtendedResourceContainer(self)
resource_data = extended_resource_handler.create_prepare_resource_support(external_dataset_agent_instance_id, OT.ExternalDatasetAgentInstancePrepareSupport)
#Fill out service request information for creating a instrument agent instance
extended_resource_handler.set_service_requests(resource_data.create_request,
'data_acquisition_management',
'create_external_dataset_agent_instance',
{"external_dataset_agent_instance": "$(external_dataset_agent_instance)" })
#Fill out service request information for creating a instrument agent instance
extended_resource_handler.set_service_requests(resource_data.update_request,
'data_acquisition_management',
'update_external_dataset_agent_instance',
{"external_dataset_agent_instance": "$(external_dataset_agent_instance)" })
#Fill out service request information for starting an instrument agent instance
extended_resource_handler.set_service_requests(resource_data.start_request,
'data_acquisition_management',
'start_external_dataset_agent_instance',
{"external_dataset_agent_instance_id": "$(external_dataset_agent_instance_id)" })
#Fill out service request information for starting an instrument agent instance
extended_resource_handler.set_service_requests(resource_data.stop_request,
'data_acquisition_management',
'stop_external_dataset_agent_instance',
{"external_dataset_agent_instance_id": "$(external_dataset_agent_instance_id)" })
#Fill out service request information for assigning a InstrumentDevice
extended_resource_handler.set_service_requests(resource_data.associations['InstrumentDevice'].assign_request,
'data_acquisition_management',
'assign_external_dataset_agent_instance_to_device',
{"device_id": "$(instrument_device_id)",
"external_dataset_agent_instance_id": external_dataset_agent_instance_id })
#Fill out service request information for unassigning a InstrumentDevice
extended_resource_handler.set_service_requests(resource_data.associations['InstrumentDevice'].unassign_request,
'data_acquisition_management',
'unassign_external_dataset_agent_instance_from_device',
{"device_id": "$(instrument_device_id)",
"external_dataset_agent_instance_id": external_dataset_agent_instance_id })
#Fill out service request information for assigning a PlatformDevice
extended_resource_handler.set_service_requests(resource_data.associations['PlatformDevice'].assign_request,
'data_acquisition_management',
'assign_external_dataset_agent_instance_to_device',
{"device_id": "$(platform_device_id)",
"external_dataset_agent_instance_id": external_dataset_agent_instance_id })
#Fill out service request information for unassigning a PlatformDevice
extended_resource_handler.set_service_requests(resource_data.associations['PlatformDevice'].unassign_request,
'data_acquisition_management',
'unassign_external_dataset_agent_instance_from_device',
{"device_id": "$(platform_device_id)",
"external_dataset_agent_instance_id": external_dataset_agent_instance_id })
#Fill out service request information for assigning a InstrumentAgent
extended_resource_handler.set_service_requests(resource_data.associations['ExternalDatasetAgent'].assign_request,
'data_acquisition_management',
'assign_external_data_agent_to_agent_instance',
{"external_data_agent_id": "$(external_dataset_agent_id)",
"agent_instance_id": external_dataset_agent_instance_id })
#Fill out service request information for unassigning a InstrumentAgent
extended_resource_handler.set_service_requests(resource_data.associations['ExternalDatasetAgent'].unassign_request,
'data_acquisition_management',
'unassign_external_data_agent_from_agent_instance',
{"external_data_agent_id": "$(external_dataset_agent_id)",
"agent_instance_id": external_dataset_agent_instance_id })
return resource_data
def retrieve_external_dataset_agent_instance(self, external_dataset_id=''):
"""
Retrieve the agent instance for an external dataset and check if it is running
"""
#Connect the data source with an external data provider
data_set = self.clients.resource_registry.read(external_dataset_id)
# check if the association already exists
ai_ids, _ = self.clients.resource_registry.find_objects(external_dataset_id, PRED.hasAgentInstance, id_only=True)
if len(ai_ids) > 1:
raise NotFound("ExternalDataset resource %s is associated with multiple agent instances" % external_dataset_id)
if ai_ids is None:
return None, None
else:
if not ResourceAgentClient._get_agent_process_id(external_dataset_id):
active = False
else:
active = True
return ai_ids[0], active
##########################################################################
#
# Resource Assign Functions
#
##########################################################################
def assign_data_source_to_external_data_provider(self, data_source_id='', external_data_provider_id=''):
#Connect the data source with an external data provider
data_source = self.clients.resource_registry.read(data_source_id)
agent_instance = self.clients.resource_registry.read(external_data_provider_id)
# check if the association already exists
associations = self.clients.resource_registry.find_associations(data_source_id, PRED.hasProvider, external_data_provider_id, id_only=True)
if not associations:
self.clients.resource_registry.create_association(data_source_id, PRED.hasProvider, external_data_provider_id)
def unassign_data_source_from_external_data_provider(self, data_source_id='', external_data_provider_id=''):
#Disconnect the data source from the external data provider
data_source = self.clients.resource_registry.read(data_source_id)
agent_instance = self.clients.resource_registry.read(external_data_provider_id)
# delete the associations
self.clients.resource_registry.delete_association((data_source_id, PRED.hasProvider, external_data_provider_id))
def assign_data_source_to_data_model(self, data_source_id='', data_source_model_id=''):
#Connect the data source with an external data model
data_source = self.clients.resource_registry.read(data_source_id)
agent_instance = self.clients.resource_registry.read(data_source_model_id)
# check if the association already exists
associations = self.clients.resource_registry.find_associations(data_source_id, PRED.hasModel, data_source_model_id, id_only=True)
if not associations:
self.clients.resource_registry.create_association(data_source_id, PRED.hasModel, data_source_model_id)
def unassign_data_source_from_data_model(self, data_source_id='', data_source_model_id=''):
#Disonnect the data source from the external data model
data_source = self.clients.resource_registry.read(data_source_id)
agent_instance = self.clients.resource_registry.read(data_source_model_id)
# delete the associations
self.clients.resource_registry.delete_association((data_source_id, PRED.hasModel, data_source_model_id))
def assign_external_dataset_to_agent_instance(self, external_dataset_id='', agent_instance_id=''):
#Connect the agent instance with an external data set
data_source = self.clients.resource_registry.read(external_dataset_id)
agent_instance = self.clients.resource_registry.read(agent_instance_id)
log.debug("associating: external dataset %s hasAgentInstance %s", external_dataset_id, agent_instance_id)
# check if the association already exists
associations = self.clients.resource_registry.find_associations(external_dataset_id, PRED.hasAgentInstance, agent_instance_id, id_only=True)
if not associations:
self.clients.resource_registry.create_association(external_dataset_id, PRED.hasAgentInstance, agent_instance_id)
def unassign_external_dataset_from_agent_instance(self, external_dataset_id='', agent_instance_id=''):
data_source = self.clients.resource_registry.read(external_dataset_id)
agent_instance = self.clients.resource_registry.read(agent_instance_id)
# delete the associations
self.clients.resource_registry.delete_association((external_dataset_id, PRED.hasAgentInstance, agent_instance_id))
def assign_dataset_agent_to_external_dataset_model(self, dataset_agent_id='', external_dataset_model_id=''):
#Connect the external data agent with an external data model
external_data_agent = self.clients.resource_registry.read(dataset_agent_id)
external_dataset_model = self.clients.resource_registry.read(external_dataset_model_id)
# check if the association already exists
associations = self.clients.resource_registry.find_associations(dataset_agent_id, PRED.hasModel, external_dataset_model_id, id_only=True)
if not associations:
self.clients.resource_registry.create_association(dataset_agent_id, PRED.hasModel, external_dataset_model_id)
def unassign_dataset_agent_from_external_dataset_model(self, dataset_agent_id='', external_dataset_model_id=''):
#Disonnect the external data agent from the external data model
dataset_agent = self.clients.resource_registry.read(dataset_agent_id)
external_dataset_model = self.clients.resource_registry.read(external_dataset_model_id)
# delete the associations
self.clients.resource_registry.delete_association((dataset_agent_id, PRED.hasModel, external_dataset_model_id))
def assign_external_dataset_to_data_source(self, external_dataset_id='', data_source_id=''):
#Connect the external data set to a data source
data_source = self.clients.resource_registry.read(external_dataset_id)
agent_instance = self.clients.resource_registry.read(data_source_id)
# check if the association already exists
associations = self.clients.resource_registry.find_associations(external_dataset_id, PRED.hasSource, data_source_id, id_only=True)
if not associations:
self.clients.resource_registry.create_association(external_dataset_id, PRED.hasDataSource, data_source_id)
def unassign_external_dataset_from_data_source(self, external_dataset_id='', data_source_id=''):
#Disonnect the external data set from the data source
data_source = self.clients.resource_registry.read(external_dataset_id)
agent_instance = self.clients.resource_registry.read(data_source_id)
# delete the associations
self.clients.resource_registry.delete_association((external_dataset_id, PRED.hasDataSource, data_source_id))
def create_parser(self, parser=None):
parser_id, rev = self.clients.resource_registry.create(parser)
return parser_id
def read_parser(self, parser_id=''):
parser = self.clients.resource_registry.read(parser_id)
validate_is_instance(parser,Parser,'The specified identifier does not correspond to a Parser resource')
return parser
def delete_parser(self, parser_id=''):
self.clients.resource_registry.delete(parser_id)
return True
def update_parser(self, parser=None):
if parser:
self.clients.resource_registry.update(parser)
def register_producer_qc_reference(self, producer_id='', parser_id='', attachment_id=''):
log.debug('register_producer_qc_reference: %s %s %s', producer_id, parser_id, attachment_id)
attachment = self.clients.resource_registry.read_attachment(attachment_id, include_content=True)
document = attachment.content
document_keys = self.parse_qc_reference(parser_id, document) or []
producer_obj = self.clients.resource_registry.read(producer_id)
if 'qc_keys' in producer_obj.producer_context.configuration:
producer_obj.producer_context.configuration['qc_keys'].extend(document_keys)
else:
producer_obj.producer_context.configuration['qc_keys'] = document_keys
self.clients.resource_registry.update(producer_obj)
return True
def parse_qc_reference(self, parser_id='', document=None):
document_keys = []
if document is None:
raise BadRequest('Empty Document')
parser = self.read_parser(parser_id=parser_id)
try:
module = __import__(parser.module, fromlist=[parser.method])
method = getattr(module, parser.method)
except ImportError:
raise BadRequest('No import named {0} found.'.format(parser.module))
except AttributeError:
raise BadRequest('No method named {0} in {1}.'.format(parser.method, parser.module))
except:
log.exception('Failed to parse document')
raise
svm = StoredValueManager(self.container)
for key, doc in method(document):
try:
svm.stored_value_cas(key, doc)
document_keys.append(key)
except:
log.error('Error parsing a row in document.')
return document_keys
def list_qc_references(self, data_product_id=''):
''' Performs a breadth-first traversal of the provenance for a data product in an attempt to collect all the document keys'''
document_keys = []
producer_ids, _ = self.clients.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasDataProducer, id_only=True)
if not len(producer_ids):
return []
producer_id = producer_ids.pop(0)
def traversal(owner_id):
def edges(resource_ids=[]):
retval = []
if not isinstance(resource_ids, list):
resource_ids = list(resource_ids)
for resource_id in resource_ids:
retval.extend(self.clients.resource_registry.find_objects(subject=resource_id, predicate=PRED.hasParent,id_only=True)[0])
return retval
visited_resources = deque([producer_id] + edges([owner_id]))
traversal_queue = deque()
done = False
t = None
while not done:
t = traversal_queue or deque(visited_resources)
traversal_queue = deque()
for e in edges(t):
if not e in visited_resources:
visited_resources.append(e)
traversal_queue.append(e)
if not len(traversal_queue): done = True
return list(visited_resources)
for prod_id in traversal(producer_id):
producer = self.clients.resource_registry.read(prod_id)
if 'qc_keys' in producer.producer_context.configuration:
document_keys.extend(producer.producer_context.configuration['qc_keys'])
return document_keys
| bsd-2-clause | 4,531,962,079,538,114,000 | 53.700516 | 254 | 0.653095 | false | 4.35088 | true | false | false |
rosenjens/monad | LookAhead/evaluate_timetable.py | 2 | 2302 | import csv
import PyPDF2
import re
import datetime
from datetime import date, timedelta
from fitness import Fitness
from dbConnection import DB
import toolBox
def eval(individual):
''' Evaluates best ind timetable'''
with open('timetable.csv', 'w') as csvfile1:
writer = csv.writer(csvfile1)
writer.writerow(['Line', 'Capacity', 'Headway', 'Departure time'])
for trip, item in enumerate(individual):
if trip % 7 == 0:
[writer.writerow(individual[trip+i]) for i in range(7)]
writer.writerow([])
def evaluateTimetable():
''' Evaluates how well a current static timetable does in terms of waiting time. The purpose is to give some kind of
comparison metric with the timetable as generated by GA.
'''
pdfFileObj = open('H1_web.pdf', 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
numPages = pdfReader.numPages
pagesText = []
for i in range(2):
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
pageObj = pdfReader.getPage(i)
txt = pageObj.extractText()
timeRegex = re.compile(r'\d{2}\.\d{2}') # escape dot to match
mo = timeRegex.findall(txt)
pagesText += mo
departures = list()
for index in range(len(pagesText)):
if index % 4 == 0:
for i in range(4):
departures.append(pagesText[index+i])
departures[:] = ['2015 12 09 ' + x for x in departures]
length = len(departures)
items = []
for i in range(8):
item = departures.pop()
items.append(item)
items.reverse()
#departures[:] = items + departures
individual = list()
for t in departures:
individual.append([1, 120, 1, datetime.datetime.strptime(t, '%Y %m %d %H.%M')])
phenotype = []
ind = []
for q in range(len(individual)):
try:
if q % 4 == 0:
ind.append(individual[q])
t = []
for x in range(4):
t.append(individual[q+x])
#phenotype.append(t)
except IndexError, e:
t[:] = []
for x in range(4):
t.append(individual[x+0])
#phenotype.append(t)
print ind
if __name__ == "__main__":
evaluateTimetable()
| apache-2.0 | -8,454,910,139,132,885,000 | 27.775 | 120 | 0.572546 | false | 3.755302 | false | false | false |
sumedhasingla/VTK | Filters/Points/Testing/Python/TestPointInterpolator.py | 1 | 6099 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Parameters for debugging
res = 1000
# create pipeline
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName(VTK_DATA_ROOT + "/Data/combxyz.bin")
pl3d.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
# Create a probe plane
center = output.GetCenter()
plane = vtk.vtkPlaneSource()
plane.SetResolution(res,res)
plane.SetOrigin(0,0,0)
plane.SetPoint1(10,0,0)
plane.SetPoint2(0,10,0)
plane.SetCenter(center)
plane.SetNormal(0,1,0)
# Reuse the locator
locator = vtk.vtkStaticPointLocator()
locator.SetDataSet(output)
locator.BuildLocator()
# Voronoi kernel------------------------------------------------
voronoiKernel = vtk.vtkVoronoiKernel()
interpolator = vtk.vtkPointInterpolator()
interpolator.SetInputConnection(plane.GetOutputPort())
interpolator.SetSourceData(output)
interpolator.SetKernel(voronoiKernel)
interpolator.SetLocator(locator)
# Time execution
timer = vtk.vtkTimerLog()
timer.StartTimer()
interpolator.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Interpolate Points (Voronoi): {0}".format(time))
intMapper = vtk.vtkPolyDataMapper()
intMapper.SetInputConnection(interpolator.GetOutputPort())
intActor = vtk.vtkActor()
intActor.SetMapper(intMapper)
# Create an outline
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
# Gaussian kernel-------------------------------------------------------
gaussianKernel = vtk.vtkGaussianKernel()
#gaussianKernel = vtk.vtkEllipsoidalGaussianKernel()
#gaussianKernel.UseScalarsOn()
#gaussianKernel.UseNormalsOn()
gaussianKernel.SetSharpness(4)
gaussianKernel.SetRadius(0.5)
interpolator1 = vtk.vtkPointInterpolator()
interpolator1.SetInputConnection(plane.GetOutputPort())
interpolator1.SetSourceData(output)
interpolator1.SetKernel(gaussianKernel)
interpolator1.SetLocator(locator)
interpolator1.SetNullPointsStrategyToNullValue()
# Time execution
timer.StartTimer()
interpolator1.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Interpolate Points (Gaussian): {0}".format(time))
intMapper1 = vtk.vtkPolyDataMapper()
intMapper1.SetInputConnection(interpolator1.GetOutputPort())
intActor1 = vtk.vtkActor()
intActor1.SetMapper(intMapper1)
# Create an outline
outline1 = vtk.vtkStructuredGridOutlineFilter()
outline1.SetInputData(output)
outlineMapper1 = vtk.vtkPolyDataMapper()
outlineMapper1.SetInputConnection(outline1.GetOutputPort())
outlineActor1 = vtk.vtkActor()
outlineActor1.SetMapper(outlineMapper1)
# Shepard kernel-------------------------------------------------------
shepardKernel = vtk.vtkShepardKernel()
shepardKernel.SetPowerParameter(2)
shepardKernel.SetRadius(0.5)
interpolator2 = vtk.vtkPointInterpolator()
interpolator2.SetInputConnection(plane.GetOutputPort())
interpolator2.SetSourceData(output)
interpolator2.SetKernel(shepardKernel)
interpolator2.SetLocator(locator)
interpolator2.SetNullPointsStrategyToMaskPoints()
# Time execution
timer.StartTimer()
interpolator2.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Interpolate Points (Shepard): {0}".format(time))
intMapper2 = vtk.vtkPolyDataMapper()
intMapper2.SetInputConnection(interpolator2.GetOutputPort())
intActor2 = vtk.vtkActor()
intActor2.SetMapper(intMapper2)
# Create an outline
outline2 = vtk.vtkStructuredGridOutlineFilter()
outline2.SetInputData(output)
outlineMapper2 = vtk.vtkPolyDataMapper()
outlineMapper2.SetInputConnection(outline2.GetOutputPort())
outlineActor2 = vtk.vtkActor()
outlineActor2.SetMapper(outlineMapper2)
# SPH kernel-------------------------------------------------------
SPHKernel = vtk.vtkSPHKernel()
interpolator3 = vtk.vtkPointInterpolator()
interpolator3.SetInputConnection(plane.GetOutputPort())
interpolator3.SetSourceData(output)
interpolator3.SetKernel(voronoiKernel)
#interpolator3.SetKernel(SPHKernel)
interpolator3.SetLocator(locator)
# Time execution
timer.StartTimer()
interpolator3.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Interpolate Points (SPH): {0}".format(time))
intMapper3 = vtk.vtkPolyDataMapper()
intMapper3.SetInputConnection(interpolator3.GetOutputPort())
intActor3 = vtk.vtkActor()
intActor3.SetMapper(intMapper3)
# Create an outline
outline3 = vtk.vtkStructuredGridOutlineFilter()
outline3.SetInputData(output)
outlineMapper3 = vtk.vtkPolyDataMapper()
outlineMapper3.SetInputConnection(outline3.GetOutputPort())
outlineActor3 = vtk.vtkActor()
outlineActor3.SetMapper(outlineMapper3)
# Create the RenderWindow, Renderer and both Actors
#
ren0 = vtk.vtkRenderer()
ren0.SetViewport(0,0,.5,.5)
ren1 = vtk.vtkRenderer()
ren1.SetViewport(0.5,0,1,.5)
ren2 = vtk.vtkRenderer()
ren2.SetViewport(0,0.5,.5,1)
ren3 = vtk.vtkRenderer()
ren3.SetViewport(0.5,0.5,1,1)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren0)
renWin.AddRenderer(ren1)
renWin.AddRenderer(ren2)
renWin.AddRenderer(ren3)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren0.AddActor(intActor)
ren0.AddActor(outlineActor)
ren0.SetBackground(0.1, 0.2, 0.4)
ren1.AddActor(intActor1)
ren1.AddActor(outlineActor1)
ren1.SetBackground(0.1, 0.2, 0.4)
ren2.AddActor(intActor2)
ren2.AddActor(outlineActor2)
ren2.SetBackground(0.1, 0.2, 0.4)
ren3.AddActor(intActor3)
ren3.AddActor(outlineActor3)
ren3.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(500, 500)
cam = ren0.GetActiveCamera()
cam.SetClippingRange(3.95297, 50)
cam.SetFocalPoint(8.88908, 0.595038, 29.3342)
cam.SetPosition(-12.3332, 31.7479, 41.2387)
cam.SetViewUp(0.060772, -0.319905, 0.945498)
ren1.SetActiveCamera(cam)
ren2.SetActiveCamera(cam)
ren3.SetActiveCamera(cam)
iren.Initialize()
# render the image
#
renWin.Render()
#iren.Start()
| bsd-3-clause | -1,206,573,601,351,961,000 | 25.517391 | 72 | 0.77898 | false | 3.100661 | false | false | false |
griffinfoster/pulsar-polarization-sims | scripts/tableSlope.py | 1 | 2516 | #!/usr/bin/env python
"""
"""
import os,sys
import numpy as np
import cPickle as pkl
if __name__ == "__main__":
from optparse import OptionParser
o = OptionParser()
o.set_usage('%prog [options] [pklReduceDict.py DICT]')
o.set_description(__doc__)
o.add_option('--snr',dest='snr',default=100,type='int',
help='SNR value to use (rounds to nearest int value), default: 100')
o.add_option('--info',dest='info',action='store_true',
help='Print parameter information in the dictionary and exit')
o.add_option('--dJ',dest='dJ',default=0.05,type='float',
help='Calibration error to select out, default: 0.05')
o.add_option('-c','--cal',dest='calMode',default='cal',
help='cal mode to use: cal or uncal, default: cal')
o.add_option('-m','--mode',dest='mode',default='rms',
help='Data mode: rms, chi2, sigma ; default: rms')
o.add_option('-r','--rms', dest='rmsMode', default=0, type='int',
help='Set RMS mode, 0: total intesity, 1: invariant interval, 2: matrix template matching. default: 0')
opts, args = o.parse_args(sys.argv[1:])
print 'Loading PKL file'
reduceDict=pkl.load(open(args[0]))
if opts.info:
snrs=[]
deltaJs=[]
ixrs=[]
for key,val in reduceDict.iteritems():
snrs.append(key[1])
deltaJs.append(key[2]*100.)
ixrs.append(10.*np.log10(1./(key[3]**2)))
snrs=np.array(snrs)
deltaJs=np.array(deltaJs)
ixrs=np.array(ixrs)
print 'SNR:', np.unique(snrs)
print 'delta J (\%):',np.unique(deltaJs)
print 'IXR (dB):', np.unique(ixrs)
exit()
ixrdbs=[]
vals=[]
for key,val in reduceDict.iteritems():
#key: (mode,snr,dJ,IXR,cal/uncal)
#val keys: ['rms', 'chi2', 'avgSigma', 'obsMJD', 'nobs', 'expMJD', 'sigmas']
if key[0]==opts.rmsMode and int(key[1])==opts.snr and key[2]==opts.dJ and key[4].startswith(opts.calMode): #timing mode, snr, dJ, cal mode selection
ixrdb=10.*np.log10(1./(key[3]**2))
ixrdbs.append(ixrdb)
if opts.mode.startswith('rms'): vals.append(val['rms'])
elif opts.mode.startswith('chi'): vals.append(val['chi2'])
elif opts.mode.startswith('sigma'): vals.append(val['avgSigma'])
ixrdbs=np.array(ixrdbs)
vals=np.array(vals)
idx=np.argsort(ixrdbs)
print 'IXR',ixrdbs[idx]
print 'RMS',vals[idx]
print 'precent',100.*np.diff(vals[idx])/vals[idx][:-1]
| mit | 6,060,779,592,940,729,000 | 36.552239 | 156 | 0.589825 | false | 3.11772 | false | false | false |
assisi/assisipy-lib | assisipy_utils/examples/exec_sim/demo_deploy/spawn_agents.py | 2 | 2263 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#from math import pi
from assisipy import sim
import argparse
#import random
from assisipy_utils import arena
from assisipy_utils.mgmt import specs
#from assisipy_utils.arena import Transformation
#import yaml
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'''
Create a circular wall with some casus outside of the wall,
and spawn bees
''')
parser.add_argument('-n', '--num-bees', type=int, default=0)
parser.add_argument('-ol', '--obj-listing', type=str, default=None)
parser.add_argument('-a', '--arena-file', type=str, default='valid.arena')
parser.add_argument('-l', '--label', type=str, default='popln1-')
parser.add_argument('-e', '--exec-script', type=str, required=True,
help='name of script to execute for each bee in `bee-file`')
args = parser.parse_args()
simctrl = sim.Control()
obj_file = None
if args.obj_listing is not None:
obj_file = open(args.obj_listing, 'w')
specs.write_header(obj_file)
# find out where the bees can go
bl, tr, trans =arena.read_reqs(args.arena_file)
bee_poses = arena.gen_valid_bee_positions((bl, tr), n=args.num_bees, trans=trans)
if args.num_bees > 0:
for i, pts in enumerate(bee_poses):
pose = pts[0].x, pts[0].y, pts[1]
#for i in range(1, args.num_bees+1):
name = '{}-Bee-{:03d}'.format(args.label, i)
if i < args.num_bees / 2:
conf = 'gf.conf'
else:
conf = 'wf.conf'
#pose = (random.uniform(-4, 4), random.uniform(-4, 4),
# 2*pi*random.random())
simctrl.spawn('Bee', name, pose)
print 'Spawned bee', name
if obj_file:
s = specs.gen_spec_str(name, 'Bee', pose,
args.exec_script, conf,
'tcp://localhost:5556',
'tcp://localhost:5555',
)
obj_file.write(s + "\n")
if obj_file:
obj_file.close()
print "[I] wrote object listing to {}".format(obj_file.name)
| lgpl-3.0 | 8,941,692,339,584,461,000 | 32.776119 | 85 | 0.539991 | false | 3.47619 | false | false | false |
vlegoff/tsunami | src/secondaires/magie/types/grimoire.py | 1 | 4456 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le type Grimoire."""
from bases.objet.attribut import Attribut
from primaires.interpreteur.editeur.uniligne import Uniligne
from primaires.objet.types.base import BaseType
class Grimoire(BaseType):
"""Type d'objet : grimoire.
Ce type d'objet permet d'apprendre un sort, en l'étudiant, si on
est du bon élément. Sinon il se détruit et les points de tribut
du sort sont ajoutés dans les points du lecteur.
"""
nom_type = "grimoire"
def __init__(self, cle=""):
"""Constructeur de l'objet"""
BaseType.__init__(self, cle)
self._cle_sort = ""
self.etendre_editeur("s", "sort", Uniligne, self, "cle_sort")
# Attributs propres à l'objet (non au prototype)
self._attributs = {
"proprietaire": Attribut(None),
}
def _get_cle_sort(self):
return self._cle_sort
def _set_cle_sort(self, sort):
sorts = [sort.cle for sort in type(self).importeur.magie.sorts.values()]
if sort in sorts:
self._cle_sort = sort
cle_sort = property(_get_cle_sort, _set_cle_sort)
@property
def sort(self):
"""Renvoie le sort de ce parchemin."""
if self.cle_sort:
return importeur.magie.sorts[self.cle_sort]
else:
return None
def travailler_enveloppes(self, enveloppes):
"""Travail sur les enveloppes"""
sort = enveloppes["s"]
sort.apercu = "{objet.cle_sort}"
sort.prompt = "Clé du sort : "
sort.aide_courte = \
"Entrez la |ent|clé|ff| du sort appris par ce grimoire. Il " \
"va sans dire que le sort\nen question doit être déjà créé. " \
"Entrez |cmd|/|ff| pour revenir à la fenêtre parente.\n\n" \
"Sort actuel : {objet.cle_sort}"
def acheter(self, quantite, magasin, transaction):
"""Achète le grimoire."""
objets = BaseType.acheter(self, quantite, magasin, transaction)
acheteur = transaction.initiateur
for objet in objets:
objet.proprietaire = acheteur
acheteur.envoyer_tip("Vous êtes propriétaire de ce grimoire. " \
"Utilisez la commande %étudier% pour l'étudier.")
def regarder(self, personnage):
"""Le personnage regarde l'objet."""
sort = self.sort
if sort:
if sort.elements[0] != personnage.element:
return "L'ancre ondule étrangement devant vos yeux... " \
"vous ne pouvez lire ce parchemin."
msg = BaseType.regarder(self, personnage)
points = sort.points_tribut
s = "s" if points > 1 else ""
phrase = "Il vous faut {} point{s} de tribut pour apprendre ce " \
"sort.".format(points, s=s)
msg += "\n\n" + phrase
return msg
| bsd-3-clause | 2,825,652,957,668,951,600 | 37.903509 | 80 | 0.655017 | false | 3.539505 | false | false | false |
nwjs/chromium.src | mojo/public/tools/bindings/pylib/mojom/generate/module.py | 2 | 32893 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This module's classes provide an interface to mojo modules. Modules are
# collections of interfaces and structs to be used by mojo ipc clients and
# servers.
#
# A simple interface would be created this way:
# module = mojom.generate.module.Module('Foo')
# interface = module.AddInterface('Bar')
# method = interface.AddMethod('Tat', 0)
# method.AddParameter('baz', 0, mojom.INT32)
# We use our own version of __repr__ when displaying the AST, as the
# AST currently doesn't capture which nodes are reference (e.g. to
# types) and which nodes are definitions. This allows us to e.g. print
# the definition of a struct when it's defined inside a module, but
# only print its name when it's referenced in e.g. a method parameter.
def Repr(obj, as_ref=True):
"""A version of __repr__ that can distinguish references.
Sometimes we like to print an object's full representation
(e.g. with its fields) and sometimes we just want to reference an
object that was printed in full elsewhere. This function allows us
to make that distinction.
Args:
obj: The object whose string representation we compute.
as_ref: If True, use the short reference representation.
Returns:
A str representation of |obj|.
"""
if hasattr(obj, 'Repr'):
return obj.Repr(as_ref=as_ref)
# Since we cannot implement Repr for existing container types, we
# handle them here.
elif isinstance(obj, list):
if not obj:
return '[]'
else:
return ('[\n%s\n]' % (',\n'.join(' %s' % Repr(elem, as_ref).replace(
'\n', '\n ') for elem in obj)))
elif isinstance(obj, dict):
if not obj:
return '{}'
else:
return ('{\n%s\n}' % (',\n'.join(' %s: %s' % (
Repr(key, as_ref).replace('\n', '\n '),
Repr(val, as_ref).replace('\n', '\n '))
for key, val in obj.items())))
else:
return repr(obj)
def GenericRepr(obj, names):
"""Compute generic Repr for |obj| based on the attributes in |names|.
Args:
obj: The object to compute a Repr for.
names: A dict from attribute names to include, to booleans
specifying whether those attributes should be shown as
references or not.
Returns:
A str representation of |obj|.
"""
def ReprIndent(name, as_ref):
return ' %s=%s' % (name, Repr(getattr(obj, name), as_ref).replace(
'\n', '\n '))
return '%s(\n%s\n)' % (obj.__class__.__name__, ',\n'.join(
ReprIndent(name, as_ref) for (name, as_ref) in names.items()))
class Kind(object):
"""Kind represents a type (e.g. int8, string).
Attributes:
spec: A string uniquely identifying the type. May be None.
module: {Module} The defining module. Set to None for built-in types.
parent_kind: The enclosing type. For example, an enum defined
inside an interface has that interface as its parent. May be None.
"""
def __init__(self, spec=None, module=None):
self.spec = spec
self.module = module
self.parent_kind = None
def Repr(self, as_ref=True):
return '<%s spec=%r>' % (self.__class__.__name__, self.spec)
def __repr__(self):
# Gives us a decent __repr__ for all kinds.
return self.Repr()
class ReferenceKind(Kind):
"""ReferenceKind represents pointer and handle types.
A type is nullable if null (for pointer types) or invalid handle (for handle
types) is a legal value for the type.
Attributes:
is_nullable: True if the type is nullable.
"""
def __init__(self, spec=None, is_nullable=False, module=None):
assert spec is None or is_nullable == spec.startswith('?')
Kind.__init__(self, spec, module)
self.is_nullable = is_nullable
self.shared_definition = {}
def Repr(self, as_ref=True):
return '<%s spec=%r is_nullable=%r>' % (self.__class__.__name__, self.spec,
self.is_nullable)
def MakeNullableKind(self):
assert not self.is_nullable
if self == STRING:
return NULLABLE_STRING
if self == HANDLE:
return NULLABLE_HANDLE
if self == DCPIPE:
return NULLABLE_DCPIPE
if self == DPPIPE:
return NULLABLE_DPPIPE
if self == MSGPIPE:
return NULLABLE_MSGPIPE
if self == SHAREDBUFFER:
return NULLABLE_SHAREDBUFFER
nullable_kind = type(self)()
nullable_kind.shared_definition = self.shared_definition
if self.spec is not None:
nullable_kind.spec = '?' + self.spec
nullable_kind.is_nullable = True
nullable_kind.parent_kind = self.parent_kind
nullable_kind.module = self.module
return nullable_kind
@classmethod
def AddSharedProperty(cls, name):
"""Adds a property |name| to |cls|, which accesses the corresponding item in
|shared_definition|.
The reason of adding such indirection is to enable sharing definition
between a reference kind and its nullable variation. For example:
a = Struct('test_struct_1')
b = a.MakeNullableKind()
a.name = 'test_struct_2'
print(b.name) # Outputs 'test_struct_2'.
"""
def Get(self):
return self.shared_definition[name]
def Set(self, value):
self.shared_definition[name] = value
setattr(cls, name, property(Get, Set))
# Initialize the set of primitive types. These can be accessed by clients.
BOOL = Kind('b')
INT8 = Kind('i8')
INT16 = Kind('i16')
INT32 = Kind('i32')
INT64 = Kind('i64')
UINT8 = Kind('u8')
UINT16 = Kind('u16')
UINT32 = Kind('u32')
UINT64 = Kind('u64')
FLOAT = Kind('f')
DOUBLE = Kind('d')
STRING = ReferenceKind('s')
HANDLE = ReferenceKind('h')
DCPIPE = ReferenceKind('h:d:c')
DPPIPE = ReferenceKind('h:d:p')
MSGPIPE = ReferenceKind('h:m')
SHAREDBUFFER = ReferenceKind('h:s')
NULLABLE_STRING = ReferenceKind('?s', True)
NULLABLE_HANDLE = ReferenceKind('?h', True)
NULLABLE_DCPIPE = ReferenceKind('?h:d:c', True)
NULLABLE_DPPIPE = ReferenceKind('?h:d:p', True)
NULLABLE_MSGPIPE = ReferenceKind('?h:m', True)
NULLABLE_SHAREDBUFFER = ReferenceKind('?h:s', True)
# Collection of all Primitive types
PRIMITIVES = (
BOOL,
INT8,
INT16,
INT32,
INT64,
UINT8,
UINT16,
UINT32,
UINT64,
FLOAT,
DOUBLE,
STRING,
HANDLE,
DCPIPE,
DPPIPE,
MSGPIPE,
SHAREDBUFFER,
NULLABLE_STRING,
NULLABLE_HANDLE,
NULLABLE_DCPIPE,
NULLABLE_DPPIPE,
NULLABLE_MSGPIPE,
NULLABLE_SHAREDBUFFER
)
ATTRIBUTE_MIN_VERSION = 'MinVersion'
ATTRIBUTE_EXTENSIBLE = 'Extensible'
ATTRIBUTE_SYNC = 'Sync'
class NamedValue(object):
def __init__(self, module, parent_kind, mojom_name):
self.module = module
self.parent_kind = parent_kind
self.mojom_name = mojom_name
def GetSpec(self):
return (self.module.mojom_namespace + '.' +
(self.parent_kind and (self.parent_kind.mojom_name + '.') or "") +
self.mojom_name)
class BuiltinValue(object):
def __init__(self, value):
self.value = value
class ConstantValue(NamedValue):
def __init__(self, module, parent_kind, constant):
NamedValue.__init__(self, module, parent_kind, constant.mojom_name)
self.constant = constant
@property
def name(self):
return self.constant.name
class EnumValue(NamedValue):
def __init__(self, module, enum, field):
NamedValue.__init__(self, module, enum.parent_kind, field.mojom_name)
self.field = field
self.enum = enum
def GetSpec(self):
return (self.module.mojom_namespace + '.' +
(self.parent_kind and (self.parent_kind.mojom_name + '.') or "") +
self.enum.mojom_name + '.' + self.mojom_name)
@property
def name(self):
return self.field.name
class Constant(object):
def __init__(self, mojom_name=None, kind=None, value=None, parent_kind=None):
self.mojom_name = mojom_name
self.kind = kind
self.value = value
self.parent_kind = parent_kind
def Stylize(self, stylizer):
self.name = stylizer.StylizeConstant(self.mojom_name)
class Field(object):
def __init__(self, mojom_name=None, kind=None, ordinal=None, default=None,
attributes=None):
if self.__class__.__name__ == 'Field':
raise Exception()
self.mojom_name = mojom_name
self.kind = kind
self.ordinal = ordinal
self.default = default
self.attributes = attributes
def Repr(self, as_ref=True):
# Fields are only referenced by objects which define them and thus
# they are always displayed as non-references.
return GenericRepr(self, {'mojom_name': False, 'kind': True})
def Stylize(self, stylizer):
self.name = stylizer.StylizeField(self.mojom_name)
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
class StructField(Field): pass
class UnionField(Field): pass
class Struct(ReferenceKind):
"""A struct with typed fields.
Attributes:
mojom_name: {str} The name of the struct type as defined in mojom.
name: {str} The stylized name.
native_only: {bool} Does the struct have a body (i.e. any fields) or is it
purely a native struct.
custom_serializer: {bool} Should we generate a serializer for the struct or
will one be provided by non-generated code.
fields: {List[StructField]} The members of the struct.
enums: {List[Enum]} The enums defined in the struct scope.
constants: {List[Constant]} The constants defined in the struct scope.
attributes: {dict} Additional information about the struct, such as
if it's a native struct.
"""
ReferenceKind.AddSharedProperty('mojom_name')
ReferenceKind.AddSharedProperty('name')
ReferenceKind.AddSharedProperty('native_only')
ReferenceKind.AddSharedProperty('custom_serializer')
ReferenceKind.AddSharedProperty('fields')
ReferenceKind.AddSharedProperty('enums')
ReferenceKind.AddSharedProperty('constants')
ReferenceKind.AddSharedProperty('attributes')
def __init__(self, mojom_name=None, module=None, attributes=None):
if mojom_name is not None:
spec = 'x:' + mojom_name
else:
spec = None
ReferenceKind.__init__(self, spec, False, module)
self.mojom_name = mojom_name
self.native_only = False
self.custom_serializer = False
self.fields = []
self.enums = []
self.constants = []
self.attributes = attributes
def Repr(self, as_ref=True):
if as_ref:
return '<%s mojom_name=%r module=%s>' % (
self.__class__.__name__, self.mojom_name,
Repr(self.module, as_ref=True))
else:
return GenericRepr(self,
{'mojom_name': False, 'fields': False, 'module': True})
def AddField(self, mojom_name, kind, ordinal=None, default=None,
attributes=None):
field = StructField(mojom_name, kind, ordinal, default, attributes)
self.fields.append(field)
return field
def Stylize(self, stylizer):
self.name = stylizer.StylizeStruct(self.mojom_name)
for field in self.fields:
field.Stylize(stylizer)
for enum in self.enums:
enum.Stylize(stylizer)
for constant in self.constants:
constant.Stylize(stylizer)
class Union(ReferenceKind):
"""A union of several kinds.
Attributes:
mojom_name: {str} The name of the union type as defined in mojom.
name: {str} The stylized name.
fields: {List[UnionField]} The members of the union.
attributes: {dict} Additional information about the union, such as
which Java class name to use to represent it in the generated
bindings.
"""
ReferenceKind.AddSharedProperty('mojom_name')
ReferenceKind.AddSharedProperty('name')
ReferenceKind.AddSharedProperty('fields')
ReferenceKind.AddSharedProperty('attributes')
def __init__(self, mojom_name=None, module=None, attributes=None):
if mojom_name is not None:
spec = 'x:' + mojom_name
else:
spec = None
ReferenceKind.__init__(self, spec, False, module)
self.mojom_name = mojom_name
self.fields = []
self.attributes = attributes
def Repr(self, as_ref=True):
if as_ref:
return '<%s spec=%r is_nullable=%r fields=%s>' % (
self.__class__.__name__, self.spec, self.is_nullable,
Repr(self.fields))
else:
return GenericRepr(self, {'fields': True, 'is_nullable': False})
def AddField(self, mojom_name, kind, ordinal=None, attributes=None):
field = UnionField(mojom_name, kind, ordinal, None, attributes)
self.fields.append(field)
return field
def Stylize(self, stylizer):
self.name = stylizer.StylizeUnion(self.mojom_name)
for field in self.fields:
field.Stylize(stylizer)
class Array(ReferenceKind):
"""An array.
Attributes:
kind: {Kind} The type of the elements. May be None.
length: The number of elements. None if unknown.
"""
ReferenceKind.AddSharedProperty('kind')
ReferenceKind.AddSharedProperty('length')
def __init__(self, kind=None, length=None):
if kind is not None:
if length is not None:
spec = 'a%d:%s' % (length, kind.spec)
else:
spec = 'a:%s' % kind.spec
ReferenceKind.__init__(self, spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
self.length = length
def Repr(self, as_ref=True):
if as_ref:
return '<%s spec=%r is_nullable=%r kind=%s length=%r>' % (
self.__class__.__name__, self.spec, self.is_nullable, Repr(self.kind),
self.length)
else:
return GenericRepr(self, {'kind': True, 'length': False,
'is_nullable': False})
class Map(ReferenceKind):
"""A map.
Attributes:
key_kind: {Kind} The type of the keys. May be None.
value_kind: {Kind} The type of the elements. May be None.
"""
ReferenceKind.AddSharedProperty('key_kind')
ReferenceKind.AddSharedProperty('value_kind')
def __init__(self, key_kind=None, value_kind=None):
if (key_kind is not None and value_kind is not None):
ReferenceKind.__init__(self,
'm[' + key_kind.spec + '][' + value_kind.spec +
']')
if IsNullableKind(key_kind):
raise Exception("Nullable kinds cannot be keys in maps.")
if IsAnyHandleKind(key_kind):
raise Exception("Handles cannot be keys in maps.")
if IsAnyInterfaceKind(key_kind):
raise Exception("Interfaces cannot be keys in maps.")
if IsArrayKind(key_kind):
raise Exception("Arrays cannot be keys in maps.")
else:
ReferenceKind.__init__(self)
self.key_kind = key_kind
self.value_kind = value_kind
def Repr(self, as_ref=True):
if as_ref:
return '<%s spec=%r is_nullable=%r key_kind=%s value_kind=%s>' % (
self.__class__.__name__, self.spec, self.is_nullable,
Repr(self.key_kind), Repr(self.value_kind))
else:
return GenericRepr(self, {'key_kind': True, 'value_kind': True})
class PendingRemote(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
'pending_remote<T> requires T to be an interface type. Got %r' %
kind.spec)
ReferenceKind.__init__(self, 'rmt:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
class PendingReceiver(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
'pending_receiver<T> requires T to be an interface type. Got %r' %
kind.spec)
ReferenceKind.__init__(self, 'rcv:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
class PendingAssociatedRemote(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
'pending_associated_remote<T> requires T to be an interface ' +
'type. Got %r' % kind.spec)
ReferenceKind.__init__(self, 'rma:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
class PendingAssociatedReceiver(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
'pending_associated_receiver<T> requires T to be an interface' +
'type. Got %r' % kind.spec)
ReferenceKind.__init__(self, 'rca:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
class InterfaceRequest(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
"Interface request requires %r to be an interface." % kind.spec)
ReferenceKind.__init__(self, 'r:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
class AssociatedInterfaceRequest(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, InterfaceRequest):
raise Exception(
"Associated interface request requires %r to be an interface "
"request." % kind.spec)
assert not kind.is_nullable
ReferenceKind.__init__(self, 'asso:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind.kind if kind is not None else None
class Parameter(object):
def __init__(self, mojom_name=None, kind=None, ordinal=None, default=None,
attributes=None):
self.mojom_name = mojom_name
self.ordinal = ordinal
self.kind = kind
self.default = default
self.attributes = attributes
def Repr(self, as_ref=True):
return '<%s mojom_name=%r kind=%s>' % (
self.__class__.__name__, self.mojom_name, self.kind.Repr(as_ref=True))
def Stylize(self, stylizer):
self.name = stylizer.StylizeParameter(self.mojom_name)
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
class Method(object):
def __init__(self, interface, mojom_name, ordinal=None, attributes=None):
self.interface = interface
self.mojom_name = mojom_name
self.ordinal = ordinal
self.parameters = []
self.param_struct = None
self.response_parameters = None
self.response_param_struct = None
self.attributes = attributes
def Repr(self, as_ref=True):
if as_ref:
return '<%s mojom_name=%r>' % (self.__class__.__name__, self.mojom_name)
else:
return GenericRepr(self, {'mojom_name': False, 'parameters': True,
'response_parameters': True})
def AddParameter(self, mojom_name, kind, ordinal=None, default=None,
attributes=None):
parameter = Parameter(mojom_name, kind, ordinal, default, attributes)
self.parameters.append(parameter)
return parameter
def AddResponseParameter(self, mojom_name, kind, ordinal=None, default=None,
attributes=None):
if self.response_parameters == None:
self.response_parameters = []
parameter = Parameter(mojom_name, kind, ordinal, default, attributes)
self.response_parameters.append(parameter)
return parameter
def Stylize(self, stylizer):
self.name = stylizer.StylizeMethod(self.mojom_name)
for param in self.parameters:
param.Stylize(stylizer)
if self.response_parameters is not None:
for param in self.response_parameters:
param.Stylize(stylizer)
if self.param_struct:
self.param_struct.Stylize(stylizer)
if self.response_param_struct:
self.response_param_struct.Stylize(stylizer)
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
@property
def sync(self):
return self.attributes.get(ATTRIBUTE_SYNC) \
if self.attributes else None
class Interface(ReferenceKind):
ReferenceKind.AddSharedProperty('mojom_name')
ReferenceKind.AddSharedProperty('name')
ReferenceKind.AddSharedProperty('methods')
ReferenceKind.AddSharedProperty('enums')
ReferenceKind.AddSharedProperty('constants')
ReferenceKind.AddSharedProperty('attributes')
def __init__(self, mojom_name=None, module=None, attributes=None):
if mojom_name is not None:
spec = 'x:' + mojom_name
else:
spec = None
ReferenceKind.__init__(self, spec, False, module)
self.mojom_name = mojom_name
self.methods = []
self.enums = []
self.constants = []
self.attributes = attributes
def Repr(self, as_ref=True):
if as_ref:
return '<%s mojom_name=%r>' % (self.__class__.__name__, self.mojom_name)
else:
return GenericRepr(self, {'mojom_name': False, 'attributes': False,
'methods': False})
def AddMethod(self, mojom_name, ordinal=None, attributes=None):
method = Method(self, mojom_name, ordinal, attributes)
self.methods.append(method)
return method
def Stylize(self, stylizer):
self.name = stylizer.StylizeInterface(self.mojom_name)
for method in self.methods:
method.Stylize(stylizer)
for enum in self.enums:
enum.Stylize(stylizer)
for constant in self.constants:
constant.Stylize(stylizer)
class AssociatedInterface(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
"Associated interface requires %r to be an interface." % kind.spec)
assert not kind.is_nullable
ReferenceKind.__init__(self, 'asso:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
class EnumField(object):
def __init__(self, mojom_name=None, value=None, attributes=None,
numeric_value=None):
self.mojom_name = mojom_name
self.value = value
self.attributes = attributes
self.numeric_value = numeric_value
def Stylize(self, stylizer):
self.name = stylizer.StylizeEnumField(self.mojom_name)
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
class Enum(Kind):
def __init__(self, mojom_name=None, module=None, attributes=None):
self.mojom_name = mojom_name
self.native_only = False
if mojom_name is not None:
spec = 'x:' + mojom_name
else:
spec = None
Kind.__init__(self, spec, module)
self.fields = []
self.attributes = attributes
self.min_value = None
self.max_value = None
def Repr(self, as_ref=True):
if as_ref:
return '<%s mojom_name=%r>' % (self.__class__.__name__, self.mojom_name)
else:
return GenericRepr(self, {'mojom_name': False, 'fields': False})
def Stylize(self, stylizer):
self.name = stylizer.StylizeEnum(self.mojom_name)
for field in self.fields:
field.Stylize(stylizer)
@property
def extensible(self):
return self.attributes.get(ATTRIBUTE_EXTENSIBLE, False) \
if self.attributes else False
class Module(object):
def __init__(self, path=None, mojom_namespace=None,
attributes=None):
self.path = path
self.mojom_namespace = mojom_namespace
self.structs = []
self.unions = []
self.interfaces = []
self.enums = []
self.constants = []
self.kinds = {}
self.attributes = attributes
self.imports = []
self.imported_kinds = {}
def __repr__(self):
# Gives us a decent __repr__ for modules.
return self.Repr()
def Repr(self, as_ref=True):
if as_ref:
return '<%s path=%r mojom_namespace=%r>' % (
self.__class__.__name__, self.path, self.mojom_namespace)
else:
return GenericRepr(self, {'path': False, 'mojom_namespace': False,
'attributes': False, 'structs': False,
'interfaces': False, 'unions': False})
def AddInterface(self, mojom_name, attributes=None):
interface = Interface(mojom_name, self, attributes)
self.interfaces.append(interface)
return interface
def AddStruct(self, mojom_name, attributes=None):
struct = Struct(mojom_name, self, attributes)
self.structs.append(struct)
return struct
def AddUnion(self, mojom_name, attributes=None):
union = Union(mojom_name, self, attributes)
self.unions.append(union)
return union
def Stylize(self, stylizer):
self.namespace = stylizer.StylizeModule(self.mojom_namespace)
for struct in self.structs:
struct.Stylize(stylizer)
for union in self.unions:
union.Stylize(stylizer)
for interface in self.interfaces:
interface.Stylize(stylizer)
for enum in self.enums:
enum.Stylize(stylizer)
for constant in self.constants:
constant.Stylize(stylizer)
for imported_module in self.imports:
imported_module.Stylize(stylizer)
def IsBoolKind(kind):
return kind.spec == BOOL.spec
def IsFloatKind(kind):
return kind.spec == FLOAT.spec
def IsDoubleKind(kind):
return kind.spec == DOUBLE.spec
def IsIntegralKind(kind):
return (kind.spec == BOOL.spec or
kind.spec == INT8.spec or
kind.spec == INT16.spec or
kind.spec == INT32.spec or
kind.spec == INT64.spec or
kind.spec == UINT8.spec or
kind.spec == UINT16.spec or
kind.spec == UINT32.spec or
kind.spec == UINT64.spec)
def IsStringKind(kind):
return kind.spec == STRING.spec or kind.spec == NULLABLE_STRING.spec
def IsGenericHandleKind(kind):
return kind.spec == HANDLE.spec or kind.spec == NULLABLE_HANDLE.spec
def IsDataPipeConsumerKind(kind):
return kind.spec == DCPIPE.spec or kind.spec == NULLABLE_DCPIPE.spec
def IsDataPipeProducerKind(kind):
return kind.spec == DPPIPE.spec or kind.spec == NULLABLE_DPPIPE.spec
def IsMessagePipeKind(kind):
return kind.spec == MSGPIPE.spec or kind.spec == NULLABLE_MSGPIPE.spec
def IsSharedBufferKind(kind):
return (kind.spec == SHAREDBUFFER.spec or
kind.spec == NULLABLE_SHAREDBUFFER.spec)
def IsStructKind(kind):
return isinstance(kind, Struct)
def IsUnionKind(kind):
return isinstance(kind, Union)
def IsArrayKind(kind):
return isinstance(kind, Array)
def IsInterfaceKind(kind):
return isinstance(kind, Interface)
def IsAssociatedInterfaceKind(kind):
return isinstance(kind, AssociatedInterface)
def IsInterfaceRequestKind(kind):
return isinstance(kind, InterfaceRequest)
def IsAssociatedInterfaceRequestKind(kind):
return isinstance(kind, AssociatedInterfaceRequest)
def IsPendingRemoteKind(kind):
return isinstance(kind, PendingRemote)
def IsPendingReceiverKind(kind):
return isinstance(kind, PendingReceiver)
def IsPendingAssociatedRemoteKind(kind):
return isinstance(kind, PendingAssociatedRemote)
def IsPendingAssociatedReceiverKind(kind):
return isinstance(kind, PendingAssociatedReceiver)
def IsEnumKind(kind):
return isinstance(kind, Enum)
def IsReferenceKind(kind):
return isinstance(kind, ReferenceKind)
def IsNullableKind(kind):
return IsReferenceKind(kind) and kind.is_nullable
def IsMapKind(kind):
return isinstance(kind, Map)
def IsObjectKind(kind):
return IsPointerKind(kind) or IsUnionKind(kind)
def IsPointerKind(kind):
return (IsStructKind(kind) or IsArrayKind(kind) or IsStringKind(kind) or
IsMapKind(kind))
# Please note that it doesn't include any interface kind.
def IsAnyHandleKind(kind):
return (IsGenericHandleKind(kind) or
IsDataPipeConsumerKind(kind) or
IsDataPipeProducerKind(kind) or
IsMessagePipeKind(kind) or
IsSharedBufferKind(kind))
def IsAnyInterfaceKind(kind):
return (IsInterfaceKind(kind) or IsInterfaceRequestKind(kind) or
IsAssociatedKind(kind) or IsPendingRemoteKind(kind) or
IsPendingReceiverKind(kind))
def IsAnyHandleOrInterfaceKind(kind):
return IsAnyHandleKind(kind) or IsAnyInterfaceKind(kind)
def IsAssociatedKind(kind):
return (IsAssociatedInterfaceKind(kind) or
IsAssociatedInterfaceRequestKind(kind) or
IsPendingAssociatedRemoteKind(kind) or
IsPendingAssociatedReceiverKind(kind))
def HasCallbacks(interface):
for method in interface.methods:
if method.response_parameters != None:
return True
return False
# Finds out whether an interface passes associated interfaces and associated
# interface requests.
def PassesAssociatedKinds(interface):
visited_kinds = set()
for method in interface.methods:
if MethodPassesAssociatedKinds(method, visited_kinds):
return True
return False
def _AnyMethodParameterRecursive(method, predicate, visited_kinds=None):
def _HasProperty(kind):
if kind in visited_kinds:
# No need to examine the kind again.
return False
visited_kinds.add(kind)
if predicate(kind):
return True
if IsArrayKind(kind):
return _HasProperty(kind.kind)
if IsStructKind(kind) or IsUnionKind(kind):
for field in kind.fields:
if _HasProperty(field.kind):
return True
if IsMapKind(kind):
if _HasProperty(kind.key_kind) or _HasProperty(kind.value_kind):
return True
return False
if visited_kinds is None:
visited_kinds = set()
for param in method.parameters:
if _HasProperty(param.kind):
return True
if method.response_parameters != None:
for param in method.response_parameters:
if _HasProperty(param.kind):
return True
return False
# Finds out whether a method passes associated interfaces and associated
# interface requests.
def MethodPassesAssociatedKinds(method, visited_kinds=None):
return _AnyMethodParameterRecursive(method, IsAssociatedKind,
visited_kinds=visited_kinds)
# Determines whether a method passes interfaces.
def MethodPassesInterfaces(method):
return _AnyMethodParameterRecursive(method, IsInterfaceKind)
def HasSyncMethods(interface):
for method in interface.methods:
if method.sync:
return True
return False
def ContainsHandlesOrInterfaces(kind):
"""Check if the kind contains any handles.
This check is recursive so it checks all struct fields, containers elements,
etc.
Args:
struct: {Kind} The kind to check.
Returns:
{bool}: True if the kind contains handles.
"""
# We remember the types we already checked to avoid infinite recursion when
# checking recursive (or mutually recursive) types:
checked = set()
def Check(kind):
if kind.spec in checked:
return False
checked.add(kind.spec)
if IsStructKind(kind):
return any(Check(field.kind) for field in kind.fields)
elif IsUnionKind(kind):
return any(Check(field.kind) for field in kind.fields)
elif IsAnyHandleKind(kind):
return True
elif IsAnyInterfaceKind(kind):
return True
elif IsArrayKind(kind):
return Check(kind.kind)
elif IsMapKind(kind):
return Check(kind.key_kind) or Check(kind.value_kind)
else:
return False
return Check(kind)
def ContainsNativeTypes(kind):
"""Check if the kind contains any native type (struct or enum).
This check is recursive so it checks all struct fields, scoped interface
enums, etc.
Args:
struct: {Kind} The kind to check.
Returns:
{bool}: True if the kind contains native types.
"""
# We remember the types we already checked to avoid infinite recursion when
# checking recursive (or mutually recursive) types:
checked = set()
def Check(kind):
if kind.spec in checked:
return False
checked.add(kind.spec)
if IsEnumKind(kind):
return kind.native_only
elif IsStructKind(kind):
if kind.native_only:
return True
if any(enum.native_only for enum in kind.enums):
return True
return any(Check(field.kind) for field in kind.fields)
elif IsUnionKind(kind):
return any(Check(field.kind) for field in kind.fields)
elif IsInterfaceKind(kind):
return any(enum.native_only for enum in kind.enums)
elif IsArrayKind(kind):
return Check(kind.kind)
elif IsMapKind(kind):
return Check(kind.key_kind) or Check(kind.value_kind)
else:
return False
return Check(kind)
| bsd-3-clause | -1,952,731,669,021,165,600 | 28.686823 | 80 | 0.659958 | false | 3.692938 | false | false | false |
mattnenterprise/servo | tests/wpt/web-platform-tests/tools/gitignore/gitignore.py | 8 | 4131 | import itertools
import re
import os
end_space = re.compile(r"([^\\]\s)*$")
def fnmatch_translate(pat, path_name=False):
parts = []
seq = False
i = 0
if pat[0] == "/" or path_name:
parts.append("^")
any_char = "[^/]"
if pat[0] == "/":
pat = pat[1:]
else:
any_char = "."
parts.append("^(?:.*/)?")
if pat[-1] == "/":
# If the last character is / match this directory or any subdirectory
pat = pat[:-1]
suffix = "(?:/|$)"
else:
suffix = "$"
while i < len(pat):
c = pat[i]
if c == "\\":
if i < len(pat) - 1:
i += 1
c = pat[i]
parts.append(re.escape(c))
else:
raise ValueError
elif seq:
if c == "]":
seq = False
# First two cases are to deal with the case where / is the only character
# in the sequence but path_name is True so it shouldn't match anything
if parts[-1] == "[":
parts = parts[:-1]
elif parts[-1] == "^" and parts[-2] == "[":
parts = parts[:-2]
else:
parts.append(c)
elif c == "-":
parts.append(c)
elif not (path_name and c == "/"):
parts += re.escape(c)
elif c == "[":
parts.append("[")
if i < len(pat) - 1 and pat[i+1] in ("!", "^"):
parts.append("^")
i += 1
seq = True
elif c == "*":
if i < len(pat) - 1 and pat[i+1] == "*":
parts.append(any_char + "*")
i += 1
if i < len(pat) - 1 and pat[i+1] == "*":
raise ValueError
else:
parts.append(any_char + "*")
elif c == "?":
parts.append(any_char)
else:
parts.append(re.escape(c))
i += 1
if seq:
raise ValueError
parts.append(suffix)
try:
return re.compile("".join(parts))
except Exception:
raise
def parse_line(line):
line = line.rstrip()
if not line or line[0] == "#":
return
invert = line[0] == "!"
if invert:
line = line[1:]
dir_only = line[-1] == "/"
if dir_only:
line = line[:-1]
return invert, dir_only, fnmatch_translate(line, dir_only)
class PathFilter(object):
def __init__(self, root, extras=None):
if root:
ignore_path = os.path.join(root, ".gitignore")
else:
ignore_path = None
if not ignore_path and not extras:
self.trivial = True
return
self.trivial = False
self.rules_file = []
self.rules_dir = []
if extras is None:
extras = []
if ignore_path and os.path.exists(ignore_path):
self._read_ignore(ignore_path)
for item in extras:
self._read_line(item)
def _read_ignore(self, ignore_path):
with open(ignore_path) as f:
for line in f:
self._read_line(line)
def _read_line(self, line):
parsed = parse_line(line)
if not parsed:
return
invert, dir_only, regexp = parsed
if dir_only:
self.rules_dir.append((regexp, invert))
else:
self.rules_file.append((regexp, invert))
def __call__(self, path):
if os.path.sep != "/":
path = path.replace(os.path.sep, "/")
if self.trivial:
return True
path_is_dir = path[-1] == "/"
if path_is_dir:
path = path[:-1]
rules = self.rules_dir
else:
rules = self.rules_file
include = True
for regexp, invert in rules:
if not include and invert and regexp.match(path):
include = True
elif include and not invert and regexp.match(path):
include = False
return include
| mpl-2.0 | 3,534,001,207,476,998,700 | 25.824675 | 89 | 0.446139 | false | 4.00679 | false | false | false |
riggsd/davies | davies/pockettopo/__init__.py | 1 | 16239 | """
davies.pockettopo: Module for parsing and working with exported PocketTopo survey data
"""
from __future__ import division
from __future__ import print_function
import re
import codecs
import logging
from datetime import datetime
from collections import OrderedDict, defaultdict
log = logging.getLogger(__name__)
__all__ = 'TxtFile', 'Survey', 'MergingSurvey', 'Shot', 'PocketTopoTxtParser'
# TODO: properly handle zero-length shots with both from/to (station equivalence)
# TODO: older versions didn't specify units?
class Shot(OrderedDict):
"""
Representation of a single shot in a PocketTopo Survey.
:kwarg FROM: (str) from station
:kwarg TO: (str) optional to station
:kwarg LENGTH: (float) distance
:kwarg AZM: (float) compass
:kwarg INC: (float) inclination
:kwarg COMMENT: (str)
:kwarg declination: (float) optional
:ivar declination: (float) set or get the applied magnetic declination for the shot
"""
def __init__(self, *args, **kwargs):
self.declination = kwargs.pop('declination', 0.0)
OrderedDict.__init__(self, *args, **kwargs)
self.dupe_count = 1 # denotes averaged backsights (2) and triple-shots (3)
@property
def azm(self):
"""Corrected azimuth, taking into account declination."""
return self.get('AZM', -0.0) + self.declination
@property
def inc(self):
"""Corrected inclination."""
return self.get('INC', -0.0)
@property
def length(self):
"""Corrected distance."""
return self.get('LENGTH', -0.0)
@property
def is_splay(self):
"""Is this shot a "splay shot"?"""
return self.get('TO', None) in (None, '')
def __str__(self):
return ', '.join('%s=%s' % (k,v) for (k,v) in self.items())
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self)
class Survey(object):
"""
Representation of a PocketTopo Survey object. A Survey is a container for :class:`Shot` objects.
"""
def __init__(self, name=None, date=None, comment=None, declination=0.0, cave_name=None, length_units='m', angle_units=360, shots=None):
self.name = name
self.date = date
self.comment = comment
self.declination = declination
self.cave_name = cave_name
self.length_units = length_units
self.angle_units = angle_units
self.shots = []
self.splays = defaultdict(list)
if shots:
[self.add_shot(shot) for shot in shots]
def add_shot(self, shot):
"""Add a Shot to :attr:`shots`, applying our survey's :attr:`declination` to it."""
shot.declination = self.declination
if shot.is_splay:
self.splays[shot['FROM']].append(shot)
self.shots.append(shot)
@property
def length(self):
"""Total surveyed cave length, not including splays."""
return sum([shot.length for shot in self.shots if not shot.is_splay])
@property
def total_length(self):
"""Total surveyed length including splays."""
return sum([shot.length for shot in self.shots])
def __len__(self):
return len(self.shots)
def __iter__(self):
for shot in self.shots:
yield shot
def __contains__(self, item):
for shot in self.shots:
if item in (shot.get('FROM', None), shot.get('TO', None)):
return True
return False
def __str__(self):
return self.name
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.name)
# def _serialize(self):
# return []
class MergingSurvey(Survey):
"""
Representation of a PocketTopo Survey object. A Survey is a container for :class:`Shot` objects.
This Survey implementation merges "duplicate" shots into a single averaged shot.
PocketTopo (and DistoX) convention is to use triple forward shots for mainline survey. When
adding a new shot to this class with `add_shot()`, if we detect that the previous shot was
between the same two stations, we average values and merge the two together instead of appending
the duplicate shot. We use a "running" mean algorithm, so that this feature works for any number
of subsequent duplicate shots (two, three, four...).
"""
# For performance, we only look backwards at the immediately preceding shots!
def _inverse_azm(self, azm):
"""Convert forward AZM to back AZM and vice versa"""
return (azm + self.angle_units/2) % self.angle_units
def _inverse_inc(self, inc):
"""Convert forward INC to back INC and vice versa"""
return -1 * inc
def add_shot(self, shot):
"""
Add a shot dictionary to :attr:`shots`, applying our survey's :attr:`declination`, and
optionally averaging and merging with duplicate previous shot.
"""
if not self.shots or not shot.get('TO', None) or not self.shots[-1].get('TO', None):
return super(MergingSurvey, self).add_shot(shot)
from_, to = shot['FROM'], shot['TO']
prev_shot = self.shots[-1]
prev_from, prev_to = prev_shot['FROM'], prev_shot['TO']
if from_ == prev_from and to == prev_to:
# dupe shot! calculate iterative "running" mean and merge into the previous shot
total_count = prev_shot.dupe_count + 1
log.debug('Merging %d shots "%s" <- "%s"', total_count, prev_shot, shot)
if abs(shot['AZM'] - prev_shot['AZM']) > 2.0:
log.warning('Merged forward AZM disagreement of %0.1f for "%s" <- "%s"', abs(shot['AZM'] - prev_shot['AZM']), prev_shot, shot)
if abs(shot['INC'] - prev_shot['INC']) > 2.0:
log.warning('Merged forward INC disagreement of %0.1f for "%s" <- "%s"', abs(shot['INC'] - prev_shot['INC']), prev_shot, shot)
if abs(shot['LENGTH'] - prev_shot['LENGTH']) > 1.0:
log.warning('Merged forward LENGTH disagreement of %0.1f for "%s" <- "%s"', abs(shot['LENGTH'] - prev_shot['LENGTH']), prev_shot, shot)
avg_length = (prev_shot['LENGTH'] * prev_shot.dupe_count + shot['LENGTH']) / total_count
avg_azm = (prev_shot['AZM'] * prev_shot.dupe_count + shot['AZM']) / total_count
avg_inc = (prev_shot['INC'] * prev_shot.dupe_count + shot['INC']) / total_count
merged_comments = ('%s %s' % (prev_shot.get('COMMENT', '') or '', shot.get('COMMENT', '') or '')).strip() or None
prev_shot['LENGTH'], prev_shot['AZM'], prev_shot['INC'], prev_shot['COMMENT'] = avg_length, avg_azm, avg_inc, merged_comments
prev_shot.dupe_count += 1
elif from_ == prev_to and to == prev_from:
# backsight! we do the same iterative "running" mean rather than assuming a single forward and single back
total_count = prev_shot.dupe_count + 1
inv_azm, inv_inc = self._inverse_azm(shot['AZM']), self._inverse_inc(shot['INC'])
log.debug('Merging %d backsights "%s" <- "%s"', total_count, prev_shot, shot)
if abs(inv_azm - prev_shot['AZM']) > 2.0:
log.warning('Backsight AZM disagreement of %0.1f for "%s" <- "%s"', abs(inv_azm - prev_shot['AZM']), prev_shot, shot)
if abs(inv_inc - prev_shot['INC']) > 2.0:
log.warning('Backsight INC disagreement of %0.1f for "%s" <- "%s"', abs(inv_inc - prev_shot['INC']), prev_shot, shot)
if abs(shot['LENGTH'] - prev_shot['LENGTH']) > 1.0:
log.warning('Backsight LENGTH disagreement of %0.1f for "%s" <- "%s"', abs(shot['LENGTH'] - prev_shot['LENGTH']), prev_shot, shot)
avg_length = (prev_shot['LENGTH'] * prev_shot.dupe_count + shot['LENGTH']) / total_count
avg_azm = (prev_shot['AZM'] * prev_shot.dupe_count + inv_azm) / total_count
avg_inc = (prev_shot['INC'] * prev_shot.dupe_count + inv_inc) / total_count
merged_comments = ('%s %s' % (prev_shot.get('COMMENT', '') or '', shot.get('COMMENT', '') or '')).strip() or None
prev_shot['LENGTH'], prev_shot['AZM'], prev_shot['INC'], prev_shot['COMMENT'] = avg_length, avg_azm, avg_inc, merged_comments
prev_shot.dupe_count += 1
else:
# a new, different shot; no merge
return super(MergingSurvey, self).add_shot(shot)
class UTMLocation(object):
"""
Represents a UTM-based coordinate for Reference Point.
Note that PocketTopo doesn't support UTM Zones.
:ivar easting: (float)
:ivar northing: (float)
:ivar elevation: (float) meters
:ivar comment: (str)
"""
def __init__(self, easting, northing, elevation=0.0, comment=None):
self.easting = easting
self.northing = northing
self.elevation = elevation
self.altitude = elevation # alias
self.comment = comment
@property
def __geo_interface__(self):
return {'type': 'Point', 'coordinates': (self.easting, self.northing, self.elevation)}
def __str__(self):
return "<UTM %0.1fE %0.1fN %0.1fm>" % (self.easting, self.northing, self.elevation)
class TxtFile(object):
"""
Representation of a PocketTopo .TXT File. A TxtFile is a container for :class:`Survey` objects.
:ivar name: (string) the TxtFile's "name"
:ivar length_units: (string) `m` (default) or `feet`
:ivar angle_units: (int) `360` for degrees (default) or `400` for grads
:ivar surveys: (list of :class:`Survey`)
:ivar reference_points: (dict of :class:`UTMLocation` by station)
"""
def __init__(self, name=None, length_units='m', angle_units=360):
self.name = name
if length_units not in ('m', 'feet'):
raise Exception('Length units must be either \'m\' for meters (default) or \'feet\' for feet')
self.length_units = length_units
if angle_units not in (360, '360', 400, '400'):
raise Exception('Angle units must be either `360` for degrees (default) or `400` for grads')
self.angle_units = int(angle_units)
self.surveys = []
self.reference_points = OrderedDict()
def add_survey(self, survey):
"""Add a :class:`Survey` to :attr:`surveys`."""
survey.length_units = self.length_units
survey.angle_units = self.angle_units
self.surveys.append(survey)
def add_reference_point(self, station, utm_location):
"""Add a :class:`UTMLocation` to :attr:`reference_points`."""
self.reference_points[station] = utm_location
@property
def length(self):
"""Total surveyed length."""
return sum([survey.length for survey in self.surveys])
def __len__(self):
return len(self.surveys)
def __iter__(self):
for survey in self.surveys:
yield survey
def __contains__(self, item):
for survey in self.surveys:
if item == survey.name or item == survey:
return True
return False
def __getitem__(self, item):
for survey in self.surveys:
if item == survey.name or item == survey:
return survey
raise KeyError(item)
@staticmethod
def read(fname, merge_duplicate_shots=False, encoding='windows-1252'):
"""Read a PocketTopo .TXT file and produce a `TxtFile` object which represents it"""
return PocketTopoTxtParser(fname, merge_duplicate_shots, encoding).parse()
# def write(self, outf):
# """Write a `Survey` to the specified .DAT file"""
# with codecs.open(outf, 'wb', 'windows-1252') as outf:
# for survey in self.surveys:
# outf.write('\r\n'.join(survey._serialize()))
class PocketTopoTxtParser(object):
"""Parses the PocketTopo .TXT file format"""
def __init__(self, txtfilename, merge_duplicate_shots=False, encoding='windows-1252'):
self.txtfilename = txtfilename
self.merge_duplicate_shots = merge_duplicate_shots
self.encoding = encoding
def parse(self):
"""Produce a `TxtFile` object from the .TXT file"""
log.debug('Parsing PocketTopo .TXT file %s ...', self.txtfilename)
SurveyClass = MergingSurvey if self.merge_duplicate_shots else Survey
txtobj = None
with codecs.open(self.txtfilename, 'rb', self.encoding) as txtfile:
lines = txtfile.read().splitlines()
# first line is cave name and units
first_line_re = re.compile(r'^([\w\s]*)\(([\w\s]*),([\w\s]*)')
first_line = lines.pop(0)
cave_name, length_units, angle_units = first_line_re.search(first_line).groups()
cave_name, angle_units = cave_name.strip(), int(angle_units)
txtobj = TxtFile(cave_name, length_units, angle_units)
while not lines[0]:
lines.pop(0) # skip blanks
# next block identifies surveys (trip) metadata
while lines[0].startswith('['):
toks = lines.pop(0).split(None, 3)
id, date, declination = toks[:3]
id = id.strip('[]:')
date = datetime.strptime(date, '%Y/%m/%d').date()
declination = float(declination)
comment = toks[3].strip('"') if len(toks) == 4 else ''
survey = SurveyClass(id, date, comment, declination, cave_name)
txtobj.add_survey(survey)
while not lines[0]:
lines.pop(0) # skip blanks
# finally actual survey data
while lines:
line = lines.pop(0).strip()
if not line:
continue
if '"' in line:
line, comment = line.split('"', 1)
comment = comment.rstrip('"')
else:
comment = None
if '[' not in line:
# this is either a Reference Point or a zero-length fake shot
toks = line.split()
if len(toks) != 4: # ??
log.debug('Skipping unrecognized shot: %s %s', line, '"%s"' % comment if comment else '')
continue
station, vals = toks[0], list(map(float, toks[1:]))
if vals[0] == 0.0: # fake shot
log.debug('Skipping zero-length shot: %s %s', line, '"%s"' % comment if comment else '')
else: # reference point
easting, northing, altitude = vals
reference_point = UTMLocation(easting, northing, altitude, comment)
log.debug('Reference point: %s', reference_point)
txtobj.add_reference_point(station, reference_point)
continue
line, survey_id = line.split('[')
survey_id = survey_id.rstrip().rstrip(']')
toks = line.split()
from_to, (length, azm, inc) = toks[:-3], (float(tok) for tok in toks[-3:])
if len(from_to) == 2:
from_, to = tuple(from_to) # shot
elif len(from_to) == 1:
from_, to = from_to[0], None # splay
elif not from_to and length == 0.0:
continue # skip junk zero-length placeholder shots
else:
raise Exception()
shot = Shot([('FROM',from_), ('TO',to), ('LENGTH',length), ('AZM',azm), ('INC',inc), ('COMMENT',comment)])
txtobj[survey_id].add_shot(shot)
return txtobj
if __name__ == '__main__':
import sys
logging.basicConfig(level=logging.DEBUG)
for fname in sys.argv[1:]:
txtfile = PocketTopoTxtParser(fname, merge_duplicate_shots=True).parse()
print('%s (%s, %d)' % (txtfile.name, txtfile.length_units, txtfile.angle_units))
for survey in txtfile:
print('\t', '[%s] %s (%0.1f %s)' % (survey.name, survey.comment, survey.length, txtfile.length_units))
for shot in survey:
print('\t\t', shot)
| mit | 6,452,339,638,069,648,000 | 38.899263 | 151 | 0.573188 | false | 3.66073 | false | false | false |
red-hat-storage/errata-tool | errata_tool/tests/cli/test_main_cli.py | 2 | 1472 | import sys
import pytest
from errata_tool.cli import main
import errata_tool.cli.release
from errata_tool.connector import ErrataConnector
class CallRecorder(object):
def __call__(self, *args):
self.args = args
def test_short_help(monkeypatch):
monkeypatch.setattr(sys, 'argv', ['errata-tool', '-h'])
with pytest.raises(SystemExit):
main.main()
def test_help(monkeypatch):
monkeypatch.setattr(sys, 'argv', ['errata-tool', '--help'])
with pytest.raises(SystemExit):
main.main()
def test_prod_connector(monkeypatch):
argv = ['errata-tool', 'release', 'get', 'rhceph-2.4']
monkeypatch.setattr(sys, 'argv', argv)
monkeypatch.setattr(errata_tool.cli.release, 'get', lambda x: None)
main.main()
expected = 'https://errata.devel.redhat.com'
assert ErrataConnector._url == expected
def test_staging_connector(monkeypatch):
argv = ['errata-tool', '--stage', 'release', 'get', 'rhceph-2.4']
monkeypatch.setattr(sys, 'argv', argv)
monkeypatch.setattr(errata_tool.cli.release, 'get', lambda x: None)
main.main()
expected = 'https://errata.stage.engineering.redhat.com'
assert ErrataConnector._url == expected
def test_dispatch(monkeypatch):
argv = ['errata-tool', 'release', 'get', 'rhceph-2.4']
monkeypatch.setattr(sys, 'argv', argv)
recorder = CallRecorder()
monkeypatch.setattr(errata_tool.cli.release, 'get', recorder)
main.main()
assert recorder.args
| mit | -5,617,476,978,478,982,000 | 29.040816 | 71 | 0.675951 | false | 3.360731 | true | false | false |
mx3L/archivczsk | build/plugin/src/gui/poster.py | 1 | 6972 | import Queue
import os
import shutil
import urlparse
import random
from datetime import datetime
from Plugins.Extensions.archivCZSK import log
from Plugins.Extensions.archivCZSK.engine.tools import util
from Plugins.Extensions.archivCZSK.compat import eConnectCallback
from Components.AVSwitch import AVSwitch
from Tools.LoadPixmap import LoadPixmap
from enigma import eTimer, ePicLoad
class PosterProcessing:
def __init__(self, poster_limit, poster_dir):
self.poster_limit = poster_limit
self.poster_dir = poster_dir
self.got_image_callback = None
self.poster_files = []
self._init_poster_dir()
def _init_poster_dir(self):
if not os.path.isdir(self.poster_dir):
try:
os.makedirs(self.poster_dir)
except Exception:
pass
for filename in os.listdir(self.poster_dir):
file_path = os.path.join(self.poster_dir, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
log.error('Failed to delete %s. Reason: %s' % (file_path, e))
def _remove_oldest_poster_file(self):
_, path = self.poster_files.pop(0)
log.debug("PosterProcessing._remove_oldest_poster_file: {0}".format(path))
try:
os.unlink(path)
except Exception as e:
log.error("PosterProcessing._remove_oldest_poster_file: {0}".format(str(e)))
def _create_poster_path(self):
dt = datetime.now()
filename = datetime.strftime(dt, "poster_%y_%m_%d__%H_%M_%S")
filename += "_"+ str(random.randint(1,9)) + ".jpg"
dest = os.path.join(self.poster_dir, filename)
return dest
def _image_downloaded(self, url, path):
if path is None:
return
if len(self.poster_files) == self.poster_limit:
log.debug("PosterProcessing._image_downloaded: download limit reached({0})".format(self.poster_limit))
self._remove_oldest_poster_file()
log.debug("PosterProcessing._image_downloaded: {0}".format(path))
self.poster_files.append((url, path))
self.got_image_callback(url, path)
def get_image_file(self, poster_url):
if os.path.isfile(poster_url):
log.debug("PosterProcessing.get_image_file: found poster path (local)")
return poster_url
for idx, (url, path) in enumerate(self.poster_files):
if (url == poster_url):
print "PosterProcessing.get_image_file: found poster path on position {0}/{1}".format(idx, self.poster_limit)
return path
from Plugins.Extensions.archivCZSK.settings import USER_AGENT
headers = {"User-Agent": USER_AGENT }
util.download_to_file_async(util.toString(poster_url), self._create_poster_path(), self._image_downloaded, headers=headers, timeout=3)
return None
class PosterPixmapHandler:
def __init__(self, poster_widget, poster_processing, no_image_path):
self.poster_widget = poster_widget
self.poster_processing = poster_processing
self.poster_processing.got_image_callback = self._got_image_data
self.no_image_path = no_image_path
self._decoding_url = None
self._decoding_path = None
self.last_decoded_url = None
self.last_selected_url = None
self.picload = ePicLoad()
self.picload_conn = eConnectCallback(self.picload.PictureData, self._got_picture_data)
self.retry_timer = eTimer()
self.retry_timer_conn = eConnectCallback(self.retry_timer.timeout, self._decode_current_image)
self._max_retry_times = 3
self._retry_times = 0
def __del__(self):
log.debug("PosterImageHandler.__del__")
self.retry_timer.stop()
del self.retry_timer_conn
del self.retry_timer
del self.picload_conn
del self.picload
def _got_image_data(self, url, path):
self._start_decode_image(url, path)
def _decode_current_image(self):
if self._retry_times < self._max_retry_times:
self._retry_times += 1
self._start_decode_image(self.last_selected_url, self._decoding_path)
else:
self._start_decode_image(None, self.no_image_path)
self._retry_times = 0
self.retry_timer.stop()
def _start_decode_image(self, url, path):
log.debug("PosterImageHandler._start_decode_image: {0}".format(path))
if self._decode_image(path):
log.debug("PosterImageHandler._start_decode_image: started...")
self.retry_timer.stop()
self._decoding_path = None
self._decoding_url = url
else:
log.debug("PosterImageHandler._start_decode_image: failed...")
self._decoding_path = path
self.retry_timer.start(200)
def _decode_image(self, path):
wsize = self.poster_widget.instance.size()
sc = AVSwitch().getFramebufferScale()
self.picload.setPara((wsize.width(), wsize.height(),
sc[0], sc[1], False, 1, "#ff000000"))
self.last_decoded_url = None
return 0 == self.picload.startDecode(util.toString(path))
def _got_picture_data(self, picInfo=None):
picPtr = self.picload.getData()
if picPtr is not None:
log.debug("PosterImageHandler._got_picture_data, success")
self.poster_widget.instance.setPixmap(picPtr)
self.last_decoded_url = self._decoding_url
else:
log.error("PosterImageHandler._got_picture_data, failed")
self.last_decoded_url = None
self._decoding_url = None
def set_image(self, url):
log.debug("PosterImageHandler.set_image: {0}".format(url))
if self.last_selected_url:
if self.last_selected_url == url:
log.debug("PosterImageHandler.set_image: same url as before")
return
self.last_selected_url = url
if self.last_decoded_url:
if self.last_decoded_url == url:
log.debug("PosterImageHandler.set_image: same decoded url as before")
return
self.retry_timer.stop()
if url is None:
imgPtr = LoadPixmap(path=self.no_image_path, cached=True)
if imgPtr:
self.poster_widget.instance.setPixmap(imgPtr)
else:
path = self.poster_processing.get_image_file(url)
log.debug("PosterImageHandler.set_image: path={0}".format(path))
self.poster_widget.instance.setPixmap(None)
self.last_decoded_url = None
# sync
if path is not None:
self._start_decode_image(url, path)
| gpl-2.0 | 3,024,275,769,700,276,700 | 37.733333 | 142 | 0.607143 | false | 3.752422 | false | false | false |
QualiSystems/Azure-Shell | package/cloudshell/cp/azure/domain/services/task_waiter.py | 1 | 2523 | from datetime import datetime, timedelta
import time
from cloudshell.cp.azure.common.exceptions.quali_timeout_exception import QualiTimeoutException
class TaskWaiterService(object):
def __init__(self, cancellation_service):
"""
:param cancellation_service: cloudshell.cp.azure.domain.services.command_cancellation.CommandCancellationService
"""
self.cancellation_service = cancellation_service
def wait_for_task(self, operation_poller, cancellation_context, wait_time=30, logger=None):
"""Wait for Azure operation end
:param operation_poller: msrestazure.azure_operation.AzureOperationPoller instance
:param cancellation_context cloudshell.shell.core.driver_context.CancellationContext instance
:param wait_time: (int) seconds to wait before polling request
:return: Azure Operation Poller result
"""
while not operation_poller.done():
if logger:
logger.info('Waiting for poller, current status is {0}'.format(operation_poller.status()))
self.cancellation_service.check_if_cancelled(cancellation_context)
time.sleep(wait_time)
return operation_poller.result()
def wait_for_task_with_timeout(self, operation_poller, cancellation_context, wait_time=30, timeout=1800,
logger=None):
"""Wait for Azure operation end
:param timeout:
:param operation_poller: msrestazure.azure_operation.AzureOperationPoller instance
:param cancellation_context cloudshell.shell.core.driver_context.CancellationContext instance
:param wait_time: (int) seconds to wait before polling request
:return: Azure Operation Poller result
"""
datetime_now = datetime.now()
next_time = datetime_now + timedelta(seconds=timeout)
while not operation_poller.done() and (datetime_now < next_time):
self.cancellation_service.check_if_cancelled(cancellation_context)
if logger:
logger.info('Waiting for poller, current status is {0}'.format(operation_poller.status()))
time.sleep(wait_time)
datetime_now = datetime.now()
if not operation_poller.done() and (datetime_now > next_time):
if logger:
logger.warn('Had a timeout, current status in poller is: {0}'.format(operation_poller.status()))
raise QualiTimeoutException()
return operation_poller.result()
| apache-2.0 | -7,264,038,260,741,346,000 | 43.263158 | 120 | 0.674594 | false | 4.426316 | false | false | false |
hajicj/muscima | scripts/recode_cropobjects.py | 1 | 9362 | #!/usr/bin/env python
"""This is a script that ensures the specified CropObjectList files are
formatted up-to-date:
* Uses ``<Top>`` and ``<Left>``, not ``<X>`` and ``<Y>``
* Does not use ``<Selected>``
* Does not use ``<MLClassId>``
You can either provide a root directory, individual files, and ``--outdir``,
which takes the files together with their filenames and creates the re-coded
copies in the output directory (including paths), or you can provide
``--inplace`` and the script modifies the file in-place.
Example::
recode_xy_to_topleft.py -r /my/data/cropobjects -i /my/data/cropobjects/*.xml
-o /my/data/recoded-cropobjects
"""
from __future__ import print_function, unicode_literals
from builtins import zip
from builtins import str
import argparse
import copy
import logging
import os
import time
from muscima.io import parse_cropobject_list, export_cropobject_list
__version__ = "0.0.1"
__author__ = "Jan Hajic jr."
##############################################################################
def get_document_namespace(filename, root=None, output_dir=None):
"""Derives the document namespace for a CropObjectList file
with the given filename, optionally with a given root
and output dir.
In fact, only takes ``os.path.splitext(os.path.basename(filename))[0]``.
"""
return os.path.splitext(os.path.basename(filename))[0]
def recode_ids(cropobjects,
document_namespace,
dataset_namespace):
"""Recode all IDs of the given CropObjects, so that they are (hopefully)
globally unique. That is, from e.g. ``611``, we get
``MUSCIMA++_1.0::CVC-MUSCIMA_W-35_N-08_D-ideal::611.
Creates new CropObjects, does *not* modify the input in-place.
:param cropobjects: A list of CropObject instances.
:param document_namespace: An identifier of the given
CropObjectList. It should be unique for each dataset,
i.e. ``absolute_dataset_namespace``.
:param dataset_namespace: An identifier of the given
dataset. It should be globally unique (which is impossible
to guarantee, but at least within further versions of MUSCIMA++,
it should hold).
"""
output_cropobjects = []
for c in cropobjects:
c_out = copy.deepcopy(c)
uid = c.UID_DELIMITER.join([dataset_namespace,
document_namespace,
str(c.objid)])
c_out.set_uid(uid)
output_cropobjects.append(c_out)
return output_cropobjects
##############################################################################
def build_argument_parser():
parser = argparse.ArgumentParser(description=__doc__, add_help=True,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-r', '--root', action='store', default=None,
help='Root directory of the CropObjectList files.'
' The paths of the input files will be interpreted'
' relative to this directory in order to place'
' the output files, unless \'--inplace\' is given.'
' If \'--output_dir\' is given but \'--root\''
' is not, the ')
parser.add_argument('-o', '--output_dir', action='store',
help='Output directory for the recoded files.'
' Equivalent role to the \'--root\': if you'
' supply a file /my/root/subdir/somefile.xml,'
' root /my/root/, and output dir /my/output, the'
' output file will be /my/output/subdir/somefile.xml.'
' If the output dir does not exist, it will be'
' created')
parser.add_argument('-i', '--input_files', action='store', nargs='+',
help='Input files. Full paths, *including* root dir:'
' the root is only there for retaining directory'
' structure, if applicable. (This is because you'
' will probably want to use shell wildcards, and'
' it would not work if you did not supply the'
' full paths to the input directory/directories.)')
parser.add_argument('--inplace', action='store_true',
help='Modify input files in-place.')
parser.add_argument('--recode_uids', action='store_true',
help='Add UIDs to CropObjects. The dataset namespace'
' is given by \'--uid_global_namespace\', the'
' document namespace is derived from filenames'
' (as basename, without filetype extension).')
parser.add_argument('--uid_dataset_namespace', action='store',
default='MUSCIMA-pp_1.0',
help='If UIDs are getting added, this is their global'
' namespace.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Turn on INFO messages.')
parser.add_argument('--debug', action='store_true',
help='Turn on DEBUG messages.')
return parser
def main(args):
logging.info('Starting main...')
_start_time = time.clock()
##########################################################################
logging.info('Converting to absolute paths...')
root = None
if args.root is not None:
root = os.path.abspath(args.root)
output_dir = os.path.abspath(args.output_dir)
input_files = [os.path.abspath(f) for f in args.input_files]
logging.info('Root: {0}'.format(root))
logging.info('Output: {0}'.format(output_dir))
logging.info('Example input: {0}'.format(input_files[0]))
##########################################################################
# Get output filenames,
# fail on non-corresponding input file and root.
logging.info('Inferring output pathnames...')
if args.inplace:
output_files = input_files
else:
if args.root is None:
relative_files = input_files
else:
len_root = len(root)
relative_files = []
for f in input_files:
if not os.path.samefile(os.path.commonpath([f, root]),
root):
raise ValueError('Input file {0} does not seem to'
' come from the root directory {1}.'
''.format(f, root))
relative_files.append(f[len_root+1:])
# Ensure output dir exists
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
logging.debug('Making output file names. Output dir: {0}'.format(output_dir))
logging.debug('Example rel file: {0}'.format(relative_files[0]))
logging.debug('Ex. output: {0}'.format(os.path.join(output_dir, relative_files[0])))
output_files = [os.path.join(output_dir, f)
for f in relative_files]
logging.debug('Local Example output file: {0}'.format(output_files[0]))
logging.info('Example output file: {0}'.format(output_files[0]))
##########################################################################
# Parse cropobjects
logging.info('Parsing cropobject files ({0} total)...'.format(len(input_files)))
cropobjects_for_files = []
for i, f in enumerate(input_files):
cropobjects_for_files.append(parse_cropobject_list(f))
if (i > 0) and (i % 10 == 0):
logging.info('Parsed {0} files.'.format(i))
if args.recode_uids:
dataset_namespace = args.uid_dataset_namespace
document_namespace = get_document_namespace(filename=f,
root=root,
output_dir=output_dir)
recoded_cropobjects = recode_ids(cropobjects_for_files[-1],
document_namespace=document_namespace,
dataset_namespace=dataset_namespace)
cropobjects_for_files[-1] = recoded_cropobjects
##########################################################################
logging.info('Exporting cropobjects...')
_i = 0
for output_file, c in zip(output_files, cropobjects_for_files):
s = export_cropobject_list(c)
with open(output_file, 'w') as hdl:
hdl.write(s)
hdl.write('\n')
_i += 1
if (_i % 10) == 0:
logging.info('Done: {0} files'.format(_i))
_end_time = time.clock()
logging.info('recode_xy_to_topleft.py done in {0:.3f} s'.format(_end_time - _start_time))
if __name__ == '__main__':
parser = build_argument_parser()
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
if args.debug:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
main(args)
| mit | 4,823,466,878,793,929,000 | 41.554545 | 93 | 0.537172 | false | 4.228546 | false | false | false |
TheNathanBlack/tell-me-to | app/main/views.py | 1 | 1758 | from flask import render_template
from . import main
@main.route('/')
@main.route('/name/<name>/')
@main.route('/<one>/name/<name>')
@main.route('/<one>/<two>/name/<name>')
@main.route('/<one>/<two>/<three>/name/<name>')
@main.route('/<one>/<two>/<three>/<four>/name/<name>')
@main.route('/<one>/<two>/<three>/<four>/<five>/name/<name>')
@main.route('/<one>/<two>/<three>/<four>/<five>/<six>/name/<name>')
@main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>/name/<name>')
@main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>/<eight>/name/<name>')
@main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>/<eight>/<nine>/name/<name>')
@main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>/<eight>/<nine>/<ten>/name/<name>')
@main.route('/<one>')
@main.route('/<one>/<two>')
@main.route('/<one>/<two>/<three>')
@main.route('/<one>/<two>/<three>/<four>')
@main.route('/<one>/<two>/<three>/<four>/<five>')
@main.route('/<one>/<two>/<three>/<four>/<five>/<six>')
@main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>')
@main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>/<eight>')
@main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>/<eight>/<nine>')
@main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>/<eight>/<nine>/<ten>')
def index(**kwargs):
name = None
directive = ""
possible_keys=("one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten")
if kwargs:
if 'name' in kwargs.keys():
name = kwargs['name']
del kwargs['name']
if kwargs:
directive = ' '.join([ kwargs[possible_keys[key]] for key in range(len(kwargs.keys()))])
return render_template('index.html', name=name, directive=directive)
| mit | -2,252,751,368,226,385,700 | 41.878049 | 100 | 0.57281 | false | 3.167568 | false | false | false |
Eric89GXL/vispy | vispy/color/color_space.py | 1 | 6042 | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division # just to be safe...
import numpy as np
###############################################################################
# Utility functions
def _check_color_dim(val):
"""Ensure val is Nx(n_col), usually Nx3"""
val = np.atleast_2d(val)
if val.shape[1] not in (3, 4):
raise RuntimeError('Value must have second dimension of size 3 or 4')
return val, val.shape[1]
###############################################################################
# RGB<->HEX conversion
def _hex_to_rgba(hexs):
"""Convert hex to rgba, permitting alpha values in hex"""
hexs = np.atleast_1d(np.array(hexs, '|U9'))
out = np.ones((len(hexs), 4), np.float32)
for hi, h in enumerate(hexs):
assert isinstance(h, str)
off = 1 if h[0] == '#' else 0
assert len(h) in (6+off, 8+off)
e = (len(h)-off) // 2
out[hi, :e] = [int(h[i:i+2], 16) / 255.
for i in range(off, len(h), 2)]
return out
def _rgb_to_hex(rgbs):
"""Convert rgb to hex triplet"""
rgbs, n_dim = _check_color_dim(rgbs)
return np.array(['#%02x%02x%02x' % tuple((255*rgb[:3]).astype(np.uint8))
for rgb in rgbs], '|U7')
###############################################################################
# RGB<->HSV conversion
def _rgb_to_hsv(rgbs):
"""Convert Nx3 or Nx4 rgb to hsv"""
rgbs, n_dim = _check_color_dim(rgbs)
hsvs = list()
for rgb in rgbs:
rgb = rgb[:3] # don't use alpha here
idx = np.argmax(rgb)
val = rgb[idx]
c = val - np.min(rgb)
if c == 0:
hue = 0
sat = 0
else:
if idx == 0: # R == max
hue = ((rgb[1] - rgb[2]) / c) % 6
elif idx == 1: # G == max
hue = (rgb[2] - rgb[0]) / c + 2
else: # B == max
hue = (rgb[0] - rgb[1]) / c + 4
hue *= 60
sat = c / val
hsv = [hue, sat, val]
hsvs.append(hsv)
hsvs = np.array(hsvs, dtype=np.float32)
if n_dim == 4:
hsvs = np.concatenate((hsvs, rgbs[:, 3]), axis=1)
return hsvs
def _hsv_to_rgb(hsvs):
"""Convert Nx3 or Nx4 hsv to rgb"""
hsvs, n_dim = _check_color_dim(hsvs)
# In principle, we *might* be able to vectorize this, but might as well
# wait until a compelling use case appears
rgbs = list()
for hsv in hsvs:
c = hsv[1] * hsv[2]
m = hsv[2] - c
hp = hsv[0] / 60
x = c * (1 - abs(hp % 2 - 1))
if 0 <= hp < 1:
r, g, b = c, x, 0
elif hp < 2:
r, g, b = x, c, 0
elif hp < 3:
r, g, b = 0, c, x
elif hp < 4:
r, g, b = 0, x, c
elif hp < 5:
r, g, b = x, 0, c
else:
r, g, b = c, 0, x
rgb = [r + m, g + m, b + m]
rgbs.append(rgb)
rgbs = np.array(rgbs, dtype=np.float32)
if n_dim == 4:
rgbs = np.concatenate((rgbs, hsvs[:, 3]), axis=1)
return rgbs
###############################################################################
# RGB<->CIELab conversion
# These numbers are adapted from MIT-licensed MATLAB code for
# Lab<->RGB conversion. They provide an XYZ<->RGB conversion matrices,
# w/D65 white point normalization built in.
#_rgb2xyz = np.array([[0.412453, 0.357580, 0.180423],
# [0.212671, 0.715160, 0.072169],
# [0.019334, 0.119193, 0.950227]])
#_white_norm = np.array([0.950456, 1.0, 1.088754])
#_rgb2xyz /= _white_norm[:, np.newaxis]
#_rgb2xyz_norm = _rgb2xyz.T
_rgb2xyz_norm = np.array([[0.43395276, 0.212671, 0.01775791],
[0.37621941, 0.71516, 0.10947652],
[0.18982783, 0.072169, 0.87276557]])
#_xyz2rgb = np.array([[3.240479, -1.537150, -0.498535],
# [-0.969256, 1.875992, 0.041556],
# [0.055648, -0.204043, 1.057311]])
#_white_norm = np.array([0.950456, 1., 1.088754])
#_xyz2rgb *= _white_norm[np.newaxis, :]
_xyz2rgb_norm = np.array([[3.07993271, -1.53715, -0.54278198],
[-0.92123518, 1.875992, 0.04524426],
[0.05289098, -0.204043, 1.15115158]])
def _rgb_to_lab(rgbs):
rgbs, n_dim = _check_color_dim(rgbs)
# convert RGB->XYZ
xyz = rgbs[:, :3].copy() # a misnomer for now but will end up being XYZ
over = xyz > 0.04045
xyz[over] = ((xyz[over] + 0.055) / 1.055) ** 2.4
xyz[~over] /= 12.92
xyz = np.dot(xyz, _rgb2xyz_norm)
over = xyz > 0.008856
xyz[over] = xyz[over] ** (1. / 3.)
xyz[~over] = 7.787 * xyz[~over] + 0.13793103448275862
# Convert XYZ->LAB
L = (116. * xyz[:, 1]) - 16
a = 500 * (xyz[:, 0] - xyz[:, 1])
b = 200 * (xyz[:, 1] - xyz[:, 2])
labs = [L, a, b]
# Append alpha if necessary
if n_dim == 4:
labs.append(np.atleast1d(rgbs[:, 3]))
labs = np.array(labs, order='F').T # Becomes 'C' order b/c of .T
return labs
def _lab_to_rgb(labs):
"""Convert Nx3 or Nx4 lab to rgb"""
# adapted from BSD-licensed work in MATLAB by Mark Ruzon
# Based on ITU-R Recommendation BT.709 using the D65
labs, n_dim = _check_color_dim(labs)
# Convert Lab->XYZ (silly indexing used to preserve dimensionality)
y = (labs[:, 0] + 16.) / 116.
x = (labs[:, 1] / 500.) + y
z = y - (labs[:, 2] / 200.)
xyz = np.concatenate(([x], [y], [z])) # 3xN
over = xyz > 0.2068966
xyz[over] = xyz[over] ** 3.
xyz[~over] = (xyz[~over] - 0.13793103448275862) / 7.787
# Convert XYZ->LAB
rgbs = np.dot(_xyz2rgb_norm, xyz).T
over = rgbs > 0.0031308
rgbs[over] = 1.055 * (rgbs[over] ** (1. / 2.4)) - 0.055
rgbs[~over] *= 12.92
if n_dim == 4:
rgbs = np.concatenate((rgbs, labs[:, 3]), axis=1)
rgbs = np.clip(rgbs, 0., 1.)
return rgbs
| bsd-3-clause | -1,485,538,623,175,610,400 | 32.381215 | 79 | 0.487421 | false | 2.862151 | false | false | false |
txomon/vdsm | vdsm/virt/vmxml.py | 1 | 16526 | #
# Copyright 2008-2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from operator import itemgetter
import xml.dom
import xml.dom.minidom
import xml.etree.ElementTree as etree
from vdsm import constants
from vdsm import utils
import caps
def has_channel(domXML, name):
domObj = etree.fromstring(domXML)
devices = domObj.findall('devices')
if len(devices) == 1:
for chan in devices[0].findall('channel'):
targets = chan.findall('target')
if len(targets) == 1:
if targets[0].attrib['name'] == name:
return True
return False
def all_devices(domXML):
domObj = xml.dom.minidom.parseString(domXML)
devices = domObj.childNodes[0].getElementsByTagName('devices')[0]
for deviceXML in devices.childNodes:
if deviceXML.nodeType == xml.dom.Node.ELEMENT_NODE:
yield deviceXML
def filter_devices_with_alias(devices):
for deviceXML in devices:
aliasElement = deviceXML.getElementsByTagName('alias')
if aliasElement:
alias = aliasElement[0].getAttribute('name')
yield deviceXML, alias
class Device(object):
# since we're inheriting all VM devices from this class, __slots__ must
# be initialized here in order to avoid __dict__ creation
__slots__ = ()
def createXmlElem(self, elemType, deviceType, attributes=()):
"""
Create domxml device element according to passed in params
"""
elemAttrs = {}
element = Element(elemType)
if deviceType:
elemAttrs['type'] = deviceType
for attrName in attributes:
if not hasattr(self, attrName):
continue
attr = getattr(self, attrName)
if isinstance(attr, dict):
element.appendChildWithArgs(attrName, **attr)
else:
elemAttrs[attrName] = attr
element.setAttrs(**elemAttrs)
return element
class Element(object):
def __init__(self, tagName, text=None, **attrs):
self._elem = xml.dom.minidom.Document().createElement(tagName)
self.setAttrs(**attrs)
if text is not None:
self.appendTextNode(text)
def __getattr__(self, name):
return getattr(self._elem, name)
def setAttrs(self, **attrs):
for attrName, attrValue in attrs.iteritems():
self._elem.setAttribute(attrName, attrValue)
def appendTextNode(self, text):
textNode = xml.dom.minidom.Document().createTextNode(text)
self._elem.appendChild(textNode)
def appendChild(self, element):
self._elem.appendChild(element)
def appendChildWithArgs(self, childName, text=None, **attrs):
child = Element(childName, text, **attrs)
self._elem.appendChild(child)
return child
class Domain(object):
def __init__(self, conf, log, arch):
"""
Create the skeleton of a libvirt domain xml
<domain type="kvm">
<name>vmName</name>
<uuid>9ffe28b6-6134-4b1e-8804-1185f49c436f</uuid>
<memory>262144</memory>
<currentMemory>262144</currentMemory>
<vcpu current='smp'>160</vcpu>
<devices>
</devices>
</domain>
"""
self.conf = conf
self.log = log
self.arch = arch
self.doc = xml.dom.minidom.Document()
if utils.tobool(self.conf.get('kvmEnable', 'true')):
domainType = 'kvm'
else:
domainType = 'qemu'
domainAttrs = {'type': domainType}
# Hack around libvirt issue BZ#988070, this is going to be removed as
# soon as the domain XML format supports the specification of USB
# keyboards
if self.arch == caps.Architecture.PPC64:
domainAttrs['xmlns:qemu'] = \
'http://libvirt.org/schemas/domain/qemu/1.0'
self.dom = Element('domain', **domainAttrs)
self.doc.appendChild(self.dom)
self.dom.appendChildWithArgs('name', text=self.conf['vmName'])
self.dom.appendChildWithArgs('uuid', text=self.conf['vmId'])
if 'numOfIoThreads' in self.conf:
self.dom.appendChildWithArgs('iothreads',
text=str(self.conf['numOfIoThreads']))
memSizeKB = str(int(self.conf.get('memSize', '256')) * 1024)
self.dom.appendChildWithArgs('memory', text=memSizeKB)
self.dom.appendChildWithArgs('currentMemory', text=memSizeKB)
vcpu = self.dom.appendChildWithArgs('vcpu', text=self._getMaxVCpus())
vcpu.setAttrs(**{'current': self._getSmp()})
self._devices = Element('devices')
self.dom.appendChild(self._devices)
def appendClock(self):
"""
Add <clock> element to domain:
<clock offset="variable" adjustment="-3600">
<timer name="rtc" tickpolicy="catchup">
</clock>
for hyperv:
<clock offset="variable" adjustment="-3600">
<timer name="hypervclock">
</clock>
"""
m = Element('clock', offset='variable',
adjustment=str(self.conf.get('timeOffset', 0)))
if utils.tobool(self.conf.get('hypervEnable', 'false')):
m.appendChildWithArgs('timer', name='hypervclock')
else:
m.appendChildWithArgs('timer', name='rtc', tickpolicy='catchup')
m.appendChildWithArgs('timer', name='pit', tickpolicy='delay')
if self.arch == caps.Architecture.X86_64:
m.appendChildWithArgs('timer', name='hpet', present='no')
self.dom.appendChild(m)
def appendOs(self):
"""
Add <os> element to domain:
<os>
<type arch="x86_64" machine="pc">hvm</type>
<boot dev="cdrom"/>
<kernel>/tmp/vmlinuz-2.6.18</kernel>
<initrd>/tmp/initrd-2.6.18.img</initrd>
<cmdline>ARGs 1</cmdline>
<smbios mode="sysinfo"/>
</os>
"""
oselem = Element('os')
self.dom.appendChild(oselem)
DEFAULT_MACHINES = {caps.Architecture.X86_64: 'pc',
caps.Architecture.PPC64: 'pseries',
caps.Architecture.PPC64LE: 'pseries'}
machine = self.conf.get('emulatedMachine', DEFAULT_MACHINES[self.arch])
oselem.appendChildWithArgs('type', text='hvm', arch=self.arch,
machine=machine)
qemu2libvirtBoot = {'a': 'fd', 'c': 'hd', 'd': 'cdrom', 'n': 'network'}
for c in self.conf.get('boot', ''):
oselem.appendChildWithArgs('boot', dev=qemu2libvirtBoot[c])
if self.conf.get('initrd'):
oselem.appendChildWithArgs('initrd', text=self.conf['initrd'])
if self.conf.get('kernel'):
oselem.appendChildWithArgs('kernel', text=self.conf['kernel'])
if self.conf.get('kernelArgs'):
oselem.appendChildWithArgs('cmdline', text=self.conf['kernelArgs'])
if self.arch == caps.Architecture.X86_64:
oselem.appendChildWithArgs('smbios', mode='sysinfo')
if utils.tobool(self.conf.get('bootMenuEnable', False)):
oselem.appendChildWithArgs('bootmenu', enable='yes')
def appendSysinfo(self, osname, osversion, serialNumber):
"""
Add <sysinfo> element to domain:
<sysinfo type="smbios">
<bios>
<entry name="vendor">QEmu/KVM</entry>
<entry name="version">0.13</entry>
</bios>
<system>
<entry name="manufacturer">Fedora</entry>
<entry name="product">Virt-Manager</entry>
<entry name="version">0.8.2-3.fc14</entry>
<entry name="serial">32dfcb37-5af1-552b-357c-be8c3aa38310</entry>
<entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
</system>
</sysinfo>
"""
sysinfoelem = Element('sysinfo', type='smbios')
self.dom.appendChild(sysinfoelem)
syselem = Element('system')
sysinfoelem.appendChild(syselem)
def appendEntry(k, v):
syselem.appendChildWithArgs('entry', text=v, name=k)
appendEntry('manufacturer', constants.SMBIOS_MANUFACTURER)
appendEntry('product', osname)
appendEntry('version', osversion)
appendEntry('serial', serialNumber)
appendEntry('uuid', self.conf['vmId'])
def appendFeatures(self):
"""
Add machine features to domain xml.
Currently only
<features>
<acpi/>
<features/>
for hyperv:
<features>
<acpi/>
<hyperv>
<relaxed state='on'/>
</hyperv>
<features/>
"""
if (utils.tobool(self.conf.get('acpiEnable', 'true')) or
utils.tobool(self.conf.get('hypervEnable', 'false'))):
features = self.dom.appendChildWithArgs('features')
if utils.tobool(self.conf.get('acpiEnable', 'true')):
features.appendChildWithArgs('acpi')
if utils.tobool(self.conf.get('hypervEnable', 'false')):
hyperv = Element('hyperv')
features.appendChild(hyperv)
hyperv.appendChildWithArgs('relaxed', state='on')
# turns off an internal Windows watchdog, and by doing so avoids
# some high load BSODs.
hyperv.appendChildWithArgs('vapic', state='on')
# magic number taken from recomendations. References:
# https://bugzilla.redhat.com/show_bug.cgi?id=1083529#c10
# https://bugzilla.redhat.com/show_bug.cgi?id=1053846#c0
hyperv.appendChildWithArgs(
'spinlocks', state='on', retries='8191')
def appendCpu(self):
"""
Add guest CPU definition.
<cpu match="exact">
<model>qemu64</model>
<topology sockets="S" cores="C" threads="T"/>
<feature policy="require" name="sse2"/>
<feature policy="disable" name="svm"/>
</cpu>
"""
cpu = Element('cpu')
if self.arch in (caps.Architecture.X86_64):
cpu.setAttrs(match='exact')
features = self.conf.get('cpuType', 'qemu64').split(',')
model = features[0]
if model == 'hostPassthrough':
cpu.setAttrs(mode='host-passthrough')
elif model == 'hostModel':
cpu.setAttrs(mode='host-model')
else:
cpu.appendChildWithArgs('model', text=model)
# This hack is for backward compatibility as the libvirt
# does not allow 'qemu64' guest on intel hardware
if model == 'qemu64' and '+svm' not in features:
features += ['-svm']
for feature in features[1:]:
# convert Linux name of feature to libvirt
if feature[1:6] == 'sse4_':
feature = feature[0] + 'sse4.' + feature[6:]
featureAttrs = {'name': feature[1:]}
if feature[0] == '+':
featureAttrs['policy'] = 'require'
elif feature[0] == '-':
featureAttrs['policy'] = 'disable'
cpu.appendChildWithArgs('feature', **featureAttrs)
if ('smpCoresPerSocket' in self.conf or
'smpThreadsPerCore' in self.conf):
maxVCpus = int(self._getMaxVCpus())
cores = int(self.conf.get('smpCoresPerSocket', '1'))
threads = int(self.conf.get('smpThreadsPerCore', '1'))
cpu.appendChildWithArgs('topology',
sockets=str(maxVCpus / cores / threads),
cores=str(cores), threads=str(threads))
# CPU-pinning support
# see http://www.ovirt.org/wiki/Features/Design/cpu-pinning
if 'cpuPinning' in self.conf:
cputune = Element('cputune')
cpuPinning = self.conf.get('cpuPinning')
for cpuPin in cpuPinning.keys():
cputune.appendChildWithArgs('vcpupin', vcpu=cpuPin,
cpuset=cpuPinning[cpuPin])
self.dom.appendChild(cputune)
# Guest numa topology support
# see http://www.ovirt.org/Features/NUMA_and_Virtual_NUMA
if 'guestNumaNodes' in self.conf:
numa = Element('numa')
guestNumaNodes = sorted(
self.conf.get('guestNumaNodes'), key=itemgetter('nodeIndex'))
for vmCell in guestNumaNodes:
nodeMem = int(vmCell['memory']) * 1024
numa.appendChildWithArgs('cell',
cpus=vmCell['cpus'],
memory=str(nodeMem))
cpu.appendChild(numa)
self.dom.appendChild(cpu)
# Guest numatune support
def appendNumaTune(self):
"""
Add guest numatune definition.
<numatune>
<memory mode='strict' nodeset='0-1'/>
</numatune>
"""
if 'numaTune' in self.conf:
numaTune = self.conf.get('numaTune')
if 'nodeset' in numaTune.keys():
mode = numaTune.get('mode', 'strict')
numatune = Element('numatune')
numatune.appendChildWithArgs('memory', mode=mode,
nodeset=numaTune['nodeset'])
self.dom.appendChild(numatune)
def _appendAgentDevice(self, path, name):
"""
<channel type='unix'>
<target type='virtio' name='org.linux-kvm.port.0'/>
<source mode='bind' path='/tmp/socket'/>
</channel>
"""
channel = Element('channel', type='unix')
channel.appendChildWithArgs('target', type='virtio', name=name)
channel.appendChildWithArgs('source', mode='bind', path=path)
self._devices.appendChild(channel)
def appendInput(self):
"""
Add input device.
<input bus="ps2" type="mouse"/>
"""
if utils.tobool(self.conf.get('tabletEnable')):
inputAttrs = {'type': 'tablet', 'bus': 'usb'}
else:
if self.arch == caps.Architecture.PPC64:
mouseBus = 'usb'
else:
mouseBus = 'ps2'
inputAttrs = {'type': 'mouse', 'bus': mouseBus}
self._devices.appendChildWithArgs('input', **inputAttrs)
def appendKeyboardDevice(self):
"""
Add keyboard device for ppc64 using a QEMU argument directly.
This is a workaround to the issue BZ#988070 in libvirt
<qemu:commandline>
<qemu:arg value='-usbdevice'/>
<qemu:arg value='keyboard'/>
</qemu:commandline>
"""
commandLine = Element('qemu:commandline')
commandLine.appendChildWithArgs('qemu:arg', value='-usbdevice')
commandLine.appendChildWithArgs('qemu:arg', value='keyboard')
self.dom.appendChild(commandLine)
def appendEmulator(self):
emulatorPath = '/usr/bin/qemu-system-' + self.arch
emulator = Element('emulator', text=emulatorPath)
self._devices.appendChild(emulator)
def appendDeviceXML(self, deviceXML):
self._devices.appendChild(
xml.dom.minidom.parseString(deviceXML).firstChild)
def toxml(self):
return self.doc.toprettyxml(encoding='utf-8')
def _getSmp(self):
return self.conf.get('smp', '1')
def _getMaxVCpus(self):
return self.conf.get('maxVCpus', self._getSmp())
| gpl-2.0 | -1,802,004,618,766,356,000 | 33.429167 | 79 | 0.571463 | false | 4.021903 | false | false | false |
ttm/music | music/core/classes.py | 1 | 5983 | from .. import tables
from .functions import AD, V, W, H_
import numpy as n
H = H_
T = tables.Basic()
V_ = V
n_ = n
def ADV(note_dict={}, adsr_dict={}):
return AD(sonic_vector=V_(**note_dict), **adsr_dict)
class Being:
def __init__(self):
rhythm = [1.] # repetition of one second
rhythm2 = [1/2, 1/2] # repetition of one second
rhythm3 = [1/3, 1/3, 1/3] # repetition of one second
rhythm4 = [1/4, 1/4, 1/3] # repetition of one second
# assume duration = 1 (be 1 second, minute or whatnot):
rhythmic_spectrum = [ [1./i]*i for i in range(1,300) ]
# pitch or frequency sequences (to be used at will)
f = 110
freqs = [220]
freq_spectrum = [i*f for i in range(1, 300)]
neg_spec = [f/i for i in range(2,300)]
freq_sym = [[f*2**((i*j)/12) for i in range(j)] for j in [2,3,4,6]]
freq_sym_ = [[f*2**((i*j)/12) for i in range(300)] for j in [2,3,4,6]]
dia = [2,2,1,2,2,2,1]
notes_diatonic = [[dia[(j+i)%7] for i in range(7)] for j in range(7)]
notes_diatonic_ = [sum(notes_diatonic[i]) for i in range(7)]
freq_diatonic = [[f*2**( (12*i + notes_diatonic_[j])/12) for i in range(30)] for j in range(7)]
intensity_octaves = [[10**((i*10)/(j*20)) for i in range(300)] for j in range(1,20)] # steps of 10db - 1/2 dB
db0=10**(-120/20)
intensity_spec = [[db0*i for i in j] for j in intensity_octaves]
# diatonic noise, noises derived from the symmetric scales etc: one sinusoid or other basic waveform in each note.
# Synth on the freq domain to optimize and simplify the process
# make music of the spheres using ellipses and relations recalling gravity
self.resources = locals()
self.startBeing()
def walk(self, n, method='straight'):
# walk n steps up (n<0 => walk |n| steps down, n==0 => don't move, return []
if method == 'straight':
# ** TTM
sequence = [self.grid[self.pointer + i] for i in range(n)]
self.pointer += n
elif method == 'low-high':
sequence = [ self.grid[ self.pointer + i % (self.seqsize + 1) + i // self.seqsize ] for i in range(n*self.seqsize) ]
elif method == 'perm-walk':
# restore walk from 02peal
pass
self.addSeq(sequence)
def setPar(self, par='f'):
# set parameter to be developed in walks and stays
if par == 'f':
self.grid = self.fgrid
self.pointer = self.fpointer
def setSize(self, ss):
self.seqsize = ss
def setPerms(self, perms):
self.perms = perms
def stay(self, n, method='perm'):
# stay somewhere for n notes (n<0 => stay for n cycles or n permutations)
if method == 'straight':
sequence = [self.grid[(self.pointer + i) % self.seqsize] for i in range(n)]
elif method == 'perm':
# ** TTM
sequence = []
if type(self.domain) != n_.ndarray:
if not self.domain:
domain = self.grid[self.pointer : self.pointer + self.seqsize]
else:
domain = n_.array(self.domain)
print("Implemented OK?? TTM")
else:
domain = self.domain
# nel = self.perms[0].size # should match self.seqsize ?
count = 0
while len(sequence) < n:
perm = self.perms[count % len(self.perms)]
seq = perm(domain)
sequence.extend(seq)
count += 1
sequence = sequence[:n]
self.addSeq(sequence)
self.total_notes += n
def addSeq(self, sequence):
if type(self.__dict__[self.curseq]) == list:
self.__dict__[self.curseq].extend(sequence)
else:
self.__dict__[self.curseq] = H(self.__dict__[self.curseq], sequence)
def render(self, nn, fn=False):
# Render nn notes of the Being!
# Render with legatto, with V__ or whatever it is called
self.mkArray()
ii = n.arange(nn)
d = self.d_[ii%len(self.d_)]*self.dscale
f = self.f_[ii%len(self.f_)]
tab = self.tab_[ii%len(self.tab_)]
fv = self.fv_[ii%len(self.fv_)]
nu = self.nu_[ii%len(self.nu_)]
A = self.A_[ii%len(self.A_)]
D = self.D_[ii%len(self.D_)]
S = self.S_[ii%len(self.S_)]
R = self.R_[ii%len(self.R_)]
notes = [ADV({'f':ff, 'd':dd, 'fv':fvv, 'nu':nuu, 'tab': tabb}, {'A':AA, 'D': DD, 'S': SS, 'R':RR}) for ff,dd,fvv,nuu,tabb,AA,DD,SS,RR in zip(f, d, fv, nu, tab, A, D, S, R)]
if fn:
if type(fn) != str:
fn = 'abeing.wav'
if fn[-4:] != '.wav':
fn += '.wav'
W(H(*notes), fn)
else:
return H(*notes)
def startBeing(self):
self.dscale = 1
self.d_ = [1]
self.f_ = [220]
self.fv_ = [3]
self.nu_ = [1]
self.tab_ = [T.triangle]
self.A_ = [20]
self.D_ = [20]
self.S_ = [-5]
self.R_ = [50]
self.mkArray()
self.total_notes = 0
def mkArray(self):
self.d_ = n.array(self.d_ )
self.f_ = n.array(self.f_ )
self.fv_ = n.array(self.fv_)
self.nu_ = n.array(self.nu_)
self.tab_ = n.array(self.tab_)
self.A_ = n.array(self.A_)
self.D_ = n.array(self.D_)
self.S_ = n.array(self.S_)
self.R_ = n.array(self.R_)
def howl(self):
# some sound ressembing a toki pona mu, a grown or any other animal noise.
pass
def freeze(self):
# a long sound/note with the parameters set into the being
pass
# use sequences of parameters to be iterated though with or without permutations.
# use the fact that sequences of different sizes might yield longer cycles
| gpl-3.0 | 1,482,018,997,863,734,500 | 37.10828 | 181 | 0.519639 | false | 3.194341 | false | false | false |
mckinziebrandon/DeepChatModels | chatbot/legacy/legacy_models.py | 1 | 20872 | """Sequence-to-sequence models."""
# EDIT: Modified inheritance strucutre (see _models.py) so these *should* work again.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import tensorflow as tf
from tensorflow.contrib.legacy_seq2seq import embedding_attention_seq2seq
from tensorflow.contrib.legacy_seq2seq import model_with_buckets
#from tensorflow.contrib.rnn.python.ops import core_rnn
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.ops import embedding_ops
from chatbot._models import BucketModel
class ChatBot(BucketModel):
"""Sequence-to-sequence model with attention and for multiple buckets.
The input-to-output path can be thought of (on a high level) as follows:
1. Inputs: Batches of integer lists, where each integer is a
word ID to a pre-defined vocabulary.
2. Embedding: each input integer is mapped to an embedding vector.
Each embedding vector is of length 'layer_size', an argument to __init__.
The encoder and decoder have their own distinct embedding spaces.
3. Encoding: The embedded batch vectors are fed to a multi-layer cell containing GRUs.
4. Attention: At each timestep, the output of the multi-layer cell is saved, so that
the decoder can access them in the manner specified in the paper on
jointly learning to align and translate. (should give a link to paper...)
5. Decoding: The decoder, the same type of embedded-multi-layer cell
as the encoder, is initialized with the last output of the encoder,
the "context". Thereafter, we either feed it a target sequence
(when training) or we feed its previous output as its next input (chatting).
"""
def __init__(self, buckets, dataset, params):
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('ChatBotLogger')
super(ChatBot, self).__init__(
logger=logger,
buckets=buckets,
dataset=dataset,
params=params)
if len(buckets) > 1:
self.log.error("ChatBot requires len(buckets) be 1 since tensorflow's"
" model_with_buckets function is now deprecated and BROKEN. The only"
"workaround is ensuring len(buckets) == 1. ChatBot apologizes."
"ChatBot also wishes it didn't have to be this way. "
"ChatBot is jealous that DynamicBot does not have these issues.")
raise ValueError("Not allowed to pass buckets with len(buckets) > 1.")
# ==========================================================================================
# Define basic components: cell(s) state, encoder, decoder.
# ==========================================================================================
#cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.GRUCell(state_size)for _ in range(num_layers)])
cell = tf.contrib.rnn.GRUCell(self.state_size)
self.encoder_inputs = ChatBot._get_placeholder_list("encoder", buckets[-1][0])
self.decoder_inputs = ChatBot._get_placeholder_list("decoder", buckets[-1][1] + 1)
self.target_weights = ChatBot._get_placeholder_list("weight", buckets[-1][1] + 1, tf.float32)
target_outputs = [self.decoder_inputs[i + 1] for i in range(len(self.decoder_inputs) - 1)]
# If specified, sample from subset of full vocabulary size during training.
softmax_loss, output_proj = None, None
if 0 < self.num_samples < self.vocab_size:
softmax_loss, output_proj = ChatBot._sampled_loss(self.num_samples,
self.state_size,
self.vocab_size)
# ==========================================================================================
# Combine the components to construct desired model architecture.
# ==========================================================================================
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs):
# Note: the returned function uses separate embeddings for encoded/decoded sets.
# Maybe try implementing same embedding for both.
# Question: the outputs are projected to vocab_size NO MATTER WHAT.
# i.e. if output_proj is None, it uses its own OutputProjectionWrapper instead
# --> How does this affect our model?? A bit misleading imo.
#with tf.variable_scope(scope or "seq2seq2_f") as seq_scope:
return embedding_attention_seq2seq(encoder_inputs, decoder_inputs, cell,
num_encoder_symbols=self.vocab_size,
num_decoder_symbols=self.vocab_size,
embedding_size=self.state_size,
output_projection=output_proj,
feed_previous=self.is_chatting,
dtype=tf.float32)
# Note that self.outputs and self.losses are lists of length len(buckets).
# This allows us to identify which outputs/losses to compute given a particular bucket.
# Furthermore, \forall i < j, len(self.outputs[i]) < len(self.outputs[j]). (same for loss)
self.outputs, self.losses = model_with_buckets(
self.encoder_inputs, self.decoder_inputs,
target_outputs, self.target_weights,
buckets, seq2seq_f,
softmax_loss_function=softmax_loss)
# If decoding, append _projection to true output to the model.
if self.is_chatting and output_proj is not None:
self.outputs = ChatBot._get_projections(len(buckets), self.outputs, output_proj)
with tf.variable_scope("summaries"):
self.summaries = {}
for i, loss in enumerate(self.losses):
name = "loss{}".format(i)
self.summaries[name] = tf.summary.scalar("loss{}".format(i), loss)
def step(self, encoder_inputs, decoder_inputs, target_weights, bucket_id, forward_only=False):
"""Run a step of the model.
Args:
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
Returns:
[summary, gradient_norms, loss, outputs]
"""
encoder_size, decoder_size = self.buckets[bucket_id]
super(ChatBot, self).check_input_lengths(
[encoder_inputs, decoder_inputs, target_weights],
[encoder_size, decoder_size, decoder_size])
input_feed = {}
for l in range(encoder_size):
input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]
for l in range(decoder_size):
input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]
input_feed[self.target_weights[l].name] = target_weights[l]
input_feed[self.decoder_inputs[decoder_size].name] = np.zeros([self.batch_size],
dtype=np.int32)
if not forward_only: # Not just for decoding; also for validating in training.
fetches = [self.summaries["loss{}".format(bucket_id)],
self.apply_gradients[bucket_id], # Update Op that does SGD.
self.losses[bucket_id]] # Loss for this batch.
outputs = self.sess.run(fetches=fetches, feed_dict=input_feed)
return outputs[0], None, outputs[2], None # Summary, no gradients, loss, outputs.
else:
fetches = [self.losses[bucket_id]] # Loss for this batch.
for l in range(decoder_size): # Output logits.
fetches.append(self.outputs[bucket_id][l])
outputs = self.sess.run(fetches=fetches, feed_dict=input_feed)
return None, None, outputs[0], outputs[1:] # No summary, no gradients, loss, outputs.
@staticmethod
def _sampled_loss(num_samples, hidden_size, vocab_size):
"""Defines the samples softmax loss op and the associated output _projection.
Args:
num_samples: (context: importance sampling) size of subset of outputs for softmax.
hidden_size: number of units in the individual recurrent states.
vocab_size: number of unique output words.
Returns:
sampled_loss, apply_projection
- function: sampled_loss(labels, inputs)
- apply_projection: transformation to full vocab space, applied to decoder output.
"""
assert(0 < num_samples < vocab_size)
# Define the standard affine-softmax transformation from hidden_size -> vocab_size.
# True output (for a given bucket) := tf.matmul(decoder_out, w) + b
w_t = tf.get_variable("proj_w", [vocab_size, hidden_size], dtype=tf.float32)
w = tf.transpose(w_t)
b = tf.get_variable("proj_b", [vocab_size], dtype=tf.float32)
output_projection = (w, b)
def sampled_loss(labels, inputs):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(
weights=w_t,
biases=b,
labels=labels,
inputs=inputs,
num_sampled=num_samples,
num_classes=vocab_size)
return sampled_loss, output_projection
@staticmethod
def _get_projections(num_buckets, unprojected_vals, projection_operator):
"""Apply _projection operator to unprojected_vals, a tuple of length num_buckets.
:param num_buckets: the number of projections that will be applied.
:param unprojected_vals: tuple of length num_buckets.
:param projection_operator: (in the mathematical meaning) tuple of shape unprojected_vals.shape[-1].
:return: tuple of length num_buckets, with entries the same shape as entries in unprojected_vals, except for the last dimension.
"""
projected_vals = unprojected_vals
for b in range(num_buckets):
projected_vals[b] = [tf.matmul(output, projection_operator[0]) + projection_operator[1]
for output in unprojected_vals[b]]
return projected_vals
@staticmethod
def _get_placeholder_list(name, length, dtype=tf.int32):
"""
Args:
name: prefix of name of each tf.placeholder list item, where i'th name is [name]i.
length: number of items (tf.placeholders) in the returned list.
Returns:
list of tensorflow placeholder of dtype=tf.int32 and unspecified shape.
"""
return [tf.placeholder(dtype, shape=[None], name=name+str(i)) for i in range(length)]
class SimpleBot(BucketModel):
"""Primitive implementation from scratch, for learning purposes.
1. Inputs: same as ChatBot.
2. Embedding: same as ChatBot.
3. BasicEncoder: Single GRUCell.
4. DynamicDecoder: Single GRUCell.
"""
def __init__(self, dataset, params):
# SimpleBot allows user to not worry about making their own buckets.
# SimpleBot does that for you. SimpleBot cares.
max_seq_len = dataset.max_seq_len
buckets = [(max_seq_len // 2, max_seq_len // 2), (max_seq_len, max_seq_len)]
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('SimpleBotLogger')
super(SimpleBot, self).__init__(
logger=logger,
buckets=buckets,
dataset=dataset,
params=params)
# ==========================================================================================
# Create placeholder lists for encoder/decoder sequences.
# ==========================================================================================
with tf.variable_scope("placeholders"):
self.encoder_inputs = [tf.placeholder(tf.int32, shape=[None], name="encoder"+str(i))
for i in range(self.max_seq_len)]
self.decoder_inputs = [tf.placeholder(tf.int32, shape=[None], name="decoder"+str(i))
for i in range(self.max_seq_len+1)]
self.target_weights = [tf.placeholder(tf.float32, shape=[None], name="weight"+str(i))
for i in range(self.max_seq_len+1)]
# ==========================================================================================
# Before bucketing, need to define the underlying model(x, y) -> outputs, state(s).
# ==========================================================================================
def seq2seq(encoder_inputs, decoder_inputs, scope=None):
"""Builds basic encoder-decoder model and returns list of (2D) output tensors."""
with tf.variable_scope(scope or "seq2seq"):
encoder_cell = tf.contrib.rnn.GRUCell(self.state_size)
encoder_cell = tf.contrib.rnn.EmbeddingWrapper(encoder_cell, self.vocab_size, self.state_size)
# BasicEncoder(raw_inputs) -> Embed(raw_inputs) -> [be an RNN] -> encoder state.
_, encoder_state = tf.contrib.rnn.static_rnn(encoder_cell, encoder_inputs, dtype=tf.float32)
with tf.variable_scope("decoder"):
def loop_function(x):
with tf.variable_scope("loop_function"):
params = tf.get_variable("embed_tensor", [self.vocab_size, self.state_size])
return embedding_ops.embedding_lookup(params, tf.argmax(x, 1))
_decoder_cell = tf.contrib.rnn.GRUCell(self.state_size)
_decoder_cell = tf.contrib.rnn.EmbeddingWrapper(_decoder_cell, self.vocab_size, self.state_size)
# Dear TensorFlow: you should replace the 'reuse' param in
# OutputProjectionWrapper with 'scope' and just do scope.reuse in __init__.
# sincerely, programming conventions.
decoder_cell = tf.contrib.rnn.OutputProjectionWrapper(
_decoder_cell, self.vocab_size, reuse=tf.get_variable_scope().reuse)
decoder_outputs = []
prev = None
decoder_state = None
for i, dec_inp in enumerate(decoder_inputs):
if self.is_chatting and prev is not None:
dec_inp = loop_function(tf.reshape(prev, [1, 1]))
if i == 0:
output, decoder_state = decoder_cell(dec_inp, encoder_state,
scope=tf.get_variable_scope())
else:
tf.get_variable_scope().reuse_variables()
output, decoder_state = decoder_cell(dec_inp, decoder_state,
scope=tf.get_variable_scope())
decoder_outputs.append(output)
return decoder_outputs
# ====================================================================================
# Now we can build a simple bucketed seq2seq model.
# ====================================================================================
self.losses = []
self.outputs = []
values = self.encoder_inputs + self.decoder_inputs + self.decoder_inputs
with tf.name_scope("simple_bucket_model", values):
for idx_b, bucket in enumerate(buckets):
# Reminder: you should never explicitly set reuse=False. It's a no-no.
with tf.variable_scope(tf.get_variable_scope(), reuse=True if idx_b > 0 else None)\
as bucket_scope:
# The outputs for this bucket are defined entirely by the seq2seq function.
self.outputs.append(seq2seq(
self.encoder_inputs[:bucket[0]],
self.decoder_inputs[:bucket[1]],
scope=bucket_scope))
# Target outputs are just the inputs time-shifted by 1.
target_outputs = [self.decoder_inputs[i + 1]
for i in range(len(self.decoder_inputs) - 1)]
# Compute loss by comparing outputs and target outputs.
self.losses.append(SimpleBot._simple_loss(self.batch_size,
self.outputs[-1],
target_outputs[:bucket[1]],
self.target_weights[:bucket[1]]))
with tf.variable_scope("summaries"):
self.summaries = {}
for i, loss in enumerate(self.losses):
name = "loss{}".format(i)
self.summaries[name] = tf.summary.scalar("loss{}".format(i), loss)
@staticmethod
def _simple_loss(batch_size, logits, targets, weights):
"""Compute weighted cross-entropy loss on softmax(logits)."""
# Note: name_scope only affects names of ops,
# while variable_scope affects both ops AND variables.
with tf.name_scope("simple_loss", values=logits+targets+weights):
log_perplexities = []
for l, t, w in zip(logits, targets, weights):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=t, logits=l)
log_perplexities.append(cross_entropy * w)
# Reduce via elementwise-sum.
log_perplexities = tf.add_n(log_perplexities)
# Get weighted-averge by dividing by sum of the weights.
log_perplexities /= tf.add_n(weights) + 1e-12
return tf.reduce_sum(log_perplexities) / tf.cast(batch_size, tf.float32)
def step(self, encoder_inputs, decoder_inputs, target_weights, bucket_id, forward_only=False):
"""Run a step of the model.
Args:
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
Returns:
[summary, gradient_norms, loss, outputs]:
"""
encoder_size, decoder_size = self.buckets[bucket_id]
super(SimpleBot, self).check_input_lengths(
[encoder_inputs, decoder_inputs, target_weights],
[encoder_size, decoder_size, decoder_size])
input_feed = {}
for l in range(encoder_size):
input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]
for l in range(decoder_size):
input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]
input_feed[self.target_weights[l].name] = target_weights[l]
input_feed[self.decoder_inputs[decoder_size].name] = np.zeros([self.batch_size], dtype=np.int32)
# Fetches: the Operations/Tensors we want executed/evaluated during session.run(...).
if not forward_only: # Not just for decoding; also for validating in training.
fetches = [self.summaries["loss{}".format(bucket_id)],
self.apply_gradients[bucket_id], # Update Op that does SGD.
self.losses[bucket_id]] # Loss for this batch.
outputs = self.sess.run(fetches=fetches, feed_dict=input_feed)
return outputs[0], None, outputs[2], None # summaries, No gradient norm, loss, no outputs.
else:
fetches = [self.losses[bucket_id]] # Loss for this batch.
for l in range(decoder_size): # Output logits.
fetches.append(self.outputs[bucket_id][l])
outputs = self.sess.run(fetches=fetches, feed_dict=input_feed)
return None, None, outputs[0], outputs[1:] #No summary, No gradient norm, loss, outputs.
| mit | -301,686,413,445,935,940 | 54.363395 | 136 | 0.55711 | false | 4.457924 | false | false | false |
sejros/The-Nature-of-Python-Examples | chp04_systems/1 ParticleSystem.py | 1 | 3389 | # coding=utf-8
import random
import numpy as np
import pygame
from numpy import array as vector
WIDTH = 800
HEIGHT = 600
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
mousepos = np.array([WIDTH / 2, HEIGHT / 2])
is_mouse_down = False
is_rmouse_down = False
class Particle:
def __init__(self, pos):
spread = 1.0
self.position = pos
self.velocity = np.array([random.uniform(-spread, spread),
random.uniform(-spread, spread)])
self.acceleration = np.array([0.0, 0.0])
self.mover = None
self.mass = 1.0
# self.mass = random.uniform(0.5, 2.0)
self.radius = 5 * self.mass
self.lifespan = 75
self.size = vector([random.random() * 10 + 10, random.random() * 5 + 5])
def apply(self, force):
self.acceleration += force / self.mass
def update(self):
self.velocity += self.acceleration
self.position += self.velocity
self.acceleration = np.array([0.0, 0.0])
self.lifespan -= 1
def draw(self, scr):
s = pygame.Surface(self.size, pygame.SRCALPHA) # per-pixel alpha
s.fill((127, 127, 127, (128 - self.lifespan))) # notice the alpha value in the color
pygame.draw.rect(s, (0, 0, 0, ((255 / 75) * self.lifespan)),
[0, 0,
self.size[0], self.size[1]], 3)
# s = pygame.transform.rotate(s, 45)
scr.blit(s, self.position)
class ParticleSystem:
def __init__(self, pos):
self.pos = pos.copy()
self.particles = []
def draw(self, c):
for particle in self.particles:
particle.draw(c)
def update(self):
self.particles.append(Particle(self.pos.copy()))
for particle in self.particles:
particle.update()
if particle.lifespan <= 0:
self.particles.remove(particle)
def run(self, c):
self.update()
self.draw(c)
def apply(self, force):
for particle in self.particles:
particle.apply(force)
screen = pygame.display.set_mode((WIDTH, HEIGHT))
done = False
clock = pygame.time.Clock()
ps = ParticleSystem(vector([WIDTH / 2, 50]))
def main():
global ps
screen.fill(WHITE)
ps.run(screen)
for system in pss:
system.run(screen)
gravity = np.array([0, 0.1])
ps.apply(gravity)
for system in pss:
system.apply(gravity)
pygame.display.flip()
clock.tick(60)
# drag_coeff = -0.005
# drag = drag_coeff * bob.velocity * np.linalg.norm(bob.velocity)
# bob.apply(drag)
pss = []
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.MOUSEMOTION:
mousepos = [event.pos[0], event.pos[1]]
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
is_mouse_down = True
pss.append(ParticleSystem(mousepos.copy()))
elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:
is_mouse_down = False
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 3:
is_rmouse_down = True
elif event.type == pygame.MOUSEBUTTONUP and event.button == 3:
is_rmouse_down = False
main()
# root.mainloop()
| mit | -4,929,973,109,470,681,000 | 25.271318 | 93 | 0.57185 | false | 3.454638 | false | false | false |
Habu-Kagumba/mini_matlab | mini_matlab/matrix.py | 1 | 2232 | #!/usr/bin/env python
"""Minimal linear algebra operations to matrices."""
import ast
import numpy as np
input_vars = {}
def save_namespace(filename='.workspace.mat'):
"""Save the current workspace."""
namespace_file = open(filename, 'wb')
namespace_file.write(str(input_vars) + '\n')
namespace_file.close()
def load_namespace(filename='.workspace.mat'):
"""Load saved workspace if any."""
try:
namespace_file = open(filename, 'r')
v = namespace_file.read()
namespace_file.close()
vars = ast.literal_eval(v)
input_vars.update(vars)
except IOError:
pass
class Matrix(object): # noqa
"""Perform basic matrix operations."""
def __init__(self):
"""Initialize the matrix class.
:token: list or string to matrix.
"""
pass
def to_matrix(self, tokens):
"""Transform list or string to matrix."""
tokens = str(tokens)
try:
if tokens.find(';') < 0:
return np.array(tokens)
else:
tokens.replace('[', '')
tokens.replace(']', '')
return np.matrix(tokens)
except ValueError:
return None
def var_assigner(self, var_dict):
"""Keep track of assigned variables."""
input_vars[str(var_dict[0])] = str(var_dict[1])
def find_variable(self, var):
"""Find the variable value or raise error."""
value = input_vars.get(str(var), None)
return value
def transpose(self, mx):
"""Perform a transpose."""
return np.transpose(mx)
def arith(self, operands):
"""Perform arithmetic operations."""
try:
result = ''
if operands[2] == '+':
result = operands[0] + operands[1]
elif operands[2] == '-':
result = operands[0] - operands[1]
elif operands[2] == '*':
result = np.dot(operands[0], operands[1])
elif operands[0].startswith('inv'):
result = np.linalg.inv(operands[1])
return result
except (TypeError, ValueError, np.linalg.LinAlgError):
return None
| mit | 2,816,978,211,774,502,400 | 26.219512 | 62 | 0.541667 | false | 4.251429 | false | false | false |
tipsybear/actors-simulation | setup.py | 1 | 2859 | #!/usr/bin/env python
# setup
# Setup script for the actors simulation (gvas)
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Thu Nov 05 15:13:02 2015 -0500
#
# Copyright (C) 2015 University of Maryland
# For license information, see LICENSE.txt
#
# ID: setup.py [] [email protected] $
"""
Setup script for the actors simulation (gvas)
"""
##########################################################################
## Imports
##########################################################################
try:
from setuptools import setup
from setuptools import find_packages
except ImportError:
raise ImportError("Could not import \"setuptools\"."
"Please install the setuptools package.")
##########################################################################
## Package Information
##########################################################################
# Read the __init__.py file for version info
version = None
versfile = os.path.join(os.path.dirname(__file__), "gvas", "__init__.py")
with open(versfile, 'r') as versf:
exec(versf.read(), namespace)
version = namespace['get_version']()
## Discover the packages
packages = find_packages(where=".", exclude=("tests", "bin", "docs", "fixtures", "register",))
## Load the requirements
requires = []
with open('requirements.txt', 'r') as reqfile:
for line in reqfile:
requires.append(line.strip())
## Define the classifiers
classifiers = (
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
)
## Define the keywords
keywords = ('simulation', 'actors', 'distributed computing')
## Define the description
long_description = ""
## Define the configuration
config = {
"name": "GVAS Actors Simulation",
"version": version,
"description": "A simulation of the Actor model of communication for a variety of applications.",
"long_description": long_description,
"license": "MIT",
"author": "Benjamin Bengfort, Allen Leis, Konstantinos Xirogiannopoulos",
"author_email": "[email protected], [email protected], [email protected]",
"url": "https://github.com/tipsybear/actors-simulation",
"download_url": 'https://github.com/tipsybear/actors-simulation/tarball/v%s' % version,
"packages": packages,
"install_requires": requires,
"classifiers": classifiers,
"keywords": keywords,
"zip_safe": True,
"scripts": ['simulate.py'],
}
##########################################################################
## Run setup script
##########################################################################
if __name__ == '__main__':
setup(**config)
| mit | -6,837,508,749,406,721,000 | 31.123596 | 101 | 0.562784 | false | 4.119597 | false | false | false |
electric-blue-green/GSCE-Coursework-GTIN | Task 3/Development/1.x/1.0.py | 1 | 2622 | import math, sys, csv, string, os, re
print('GCSE Controlled Assesment A453\nThomas Bass 4869\nTask 2')
database = []
def start(database, orderGtin, orderPos, orderQty, orderName, items):
print('Reading File...')
os.getcwd()
filename = 'task2.csv'
filepath = os.path.join(os.getcwd(), filename)
database = []
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
database.append(row)
find(database, orderGtin, orderPos, orderQty, orderName, items)
def find(database, orderGtin, orderPos, orderQty, orderName, items):
print('Enter the GTIN Number of the product you wish to find')
gtin = input(':')
for value in database:
if gtin in value[0]:
print('Product Found!')
product = database.index(value)
name = database[product][1]
volume = database[product][2]
price = database[product][3]
stock = database[product][4]
if volume.isnumeric() == True:
full = volume+'ml'
else:
full = ''
print('Product Name =', full, name)
print(stock, 'in stock')
print('Enter the quantity you wish to buy')
qty = input(': ')
print(qty, full, name, '@', price)
print('Add to order? Y/N (This can not be un-done)')
add = input(': ')
if add == 'y' or add == 'Y':
orderGtin.append(gtin)
orderPos.append(product)
orderQty.append(qty)
orderName.append(qty+'x'+full+' '+name+' @ £'+price)
items = items + 1
print('Current order')
print(orderName)
editStock(gtin, product, stock, qty)
print('Add another item?(Y/N)')
again = input(': ')
if again == 'y' or again == 'Y':
find(database, orderGtin, orderPos, orderQty, orderName, items)
else:
print('Final order')
print(orderName)
print('Order Shipped!')
else:
print('Order Cancled')
find(database)
def editStock(gtin, product, stock, qty):
changeTo = int(stock) - int(qty)
changeTo = str(changeTo)
data = open("task2.csv").read()
data = data.split("\n")
for i, s in enumerate(data):
data[i] = data[i].split(",")
data[product][4] = changeTo
for i, s in enumerate(data):
data[i] = str(",".join(data[i]))
data = "\n".join(data)
o = open("task2.csv","w")
o.write(data)
o.close()
orderGtin = []
orderPos = []
orderQty = []
orderName = []
items = 0
start(database, orderGtin, orderPos, orderQty, orderName, items)
| apache-2.0 | -418,448,345,392,284,350 | 29.578313 | 75 | 0.570393 | false | 3.448684 | false | false | false |
WojciechMula/sse-popcount | scripts/function_registry.py | 1 | 1232 | import os
import os.path
from codecs import open
from collections import OrderedDict
class FunctionRegistry(object):
def __init__(self):
self.functions = self.__parse_cpp()
def __parse_cpp(self):
root = os.path.dirname(__file__)
src = os.path.join(root, "../function_registry.cpp")
with open(src) as f:
lines = [line.strip() for line in f]
start = lines.index("// definition start")
end = lines.index("// definition end")
definitions = lines[start + 1:end]
i = 0
L = OrderedDict()
while i < len(definitions):
line = definitions[i]
if line.startswith("add_trusted("):
name = line[len("add_trusted("):][1:-2]
description = definitions[i+1][1:-2]
L[name] = description
i += 2
elif line.startswith("add("):
name = line[len("add("):][1:-2]
description = definitions[i+1][1:-2]
L[name] = description
i += 2
else:
i += 1
return L
| bsd-2-clause | 8,276,411,022,167,700,000 | 26.377778 | 66 | 0.456981 | false | 4.529412 | false | false | false |
UB-info/estructura-datos | Entrega/model.py | 1 | 1980 | class User:
def __init__(self, uid, age, gender, country, songs_played):
self.__uid = uid
self.__age = age
self.__gender = gender
self.__country = country
self.__songs_played = songs_played
#print max(songs_played) == songs_played[0]#debug
self.__most_played = songs_played[0]#assume that are already sorted (I check it before)
#if we dont asume that artists are already sorted...
#self.__most_played = max(songs_played)
sum_times = reduce(lambda x, y: x + y.times, songs_played, 0)#sumatory of times of all artists
coef = 1.0 * self.__most_played.times / sum_times#percentage of the best respect total
self.__relevance = coef
def __str__(self):
out = ""
out += "User: " + self.__uid[:16] + "..." + "\n"#id is ellided, is too long!
out += "Country: " + self.__country + "\n"
out += "Age: " + str(self.__age) + "\n"
out += "Gender: " + str(self.__gender) + "\n"
out += "Most Played: " + self.__most_played.name + "\n"
out += "Relevance: " + '{0:.2%}'.format(self.__relevance) + "\n"#percentage formating
return out
def __cmp__(self, other):
return cmp(self.__uid, other.uid())
def uid(self):
#relevance getter
return self.__uid
class Artist:
__slots__ = ('name', 'times')
def __init__(self, *args):
self.name = args[0]
#I found corruption in big.dat file. An artist parsed without times (i.e. no "::" to split)
if len(args) > 1:#times is provided
self.times = int(args[1])
else:#times is not provided
self.times = 0
def __str__(self):
return self.name + ' ' + str(self.times)
def __cmp__(self, other):
return cmp(self.times, other.times)
| mit | 55,605,343,194,227,624 | 36.358491 | 112 | 0.506566 | false | 3.548387 | false | false | false |
aarshayj/easyML | easyML/models_classification.py | 1 | 59771 | #####################################################################
##### IMPORT STANDARD MODULES
#####################################################################
#Python 3 support:
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# import pydot
import os
from scipy.stats.mstats import chisquare, mode
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn import metrics, model_selection
from sklearn.feature_selection import RFE, RFECV
from abc import ABCMeta, abstractmethod
# from StringIO import StringIO
# import xgboost as xgb
# from xgboost.sklearn import XGBClassifier
from .genericmodelclass import GenericModelClass
from .data import DataBlock
#####################################################################
##### GENERIC MODEL CLASS
#####################################################################
class base_classification(GenericModelClass):
""" A base class which defines the generic classification functions
and variable definitions.
Parameters
----------
alg : object
An sklearn-style estimator
data_block : object
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
#Define as a meta class to disable direct instances
__metaclass__ = ABCMeta
# Map possible inputs to functions in sklean.metrics.
# Each value of the dictionary is a tuple of 3:
# (function, multi-class support, requires-probabilities)
# function: the sklearn metrics function
# multi-class support: if True, function allows multi-class support
# requires-probabilities: if True, the function requires
# probabilities to be passed as arguments
metrics_map = {
'accuracy':(metrics.accuracy_score,True,False),
'auc':(metrics.roc_auc_score,False,True),
'log_loss':(metrics.log_loss,True,True),
'f1':(metrics.f1_score,True,False),
'average_precision':(metrics.average_precision_score,False,True)
}
def __init__(
self, alg, data_block, predictors=[],cv_folds=5,
scoring_metric='accuracy',additional_display_metrics=[]
):
GenericModelClass.__init__(
self, alg=alg, data_block=data_block, predictors=predictors,
cv_folds=cv_folds,scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics)
#Run input datatype checks:
self.check_datatype(data_block,'data_block',DataBlock)
self.subset_check(predictors)
self.check_datatype(cv_folds,'cv_folds',int)
self.check_datatype(scoring_metric,'scoring_metric',basestring)
self.check_datatype(
additional_display_metrics,'additional_display_metrics',list)
#Store predicted probabilities in a dictionary with keys as the
# name of the dataset (train/test/predict) and values as the actual
# predictions.
self.predictions_probabilities = {}
#Boolean to store whether the estimator chosen allows probability
# predictions
self.probabilities_available = True
#Define number of classes in target.
self.num_target_class = len(
self.datablock.train[self.datablock.target].unique())
#A Series object to store generic classification model outcomes.
self.classification_output=pd.Series(
index = ['ModelID','CVScore_mean','CVScore_std','AUC',
'ActualScore (manual entry)','CVMethod','Predictors']
)
#Get the dictionary of available dataframes
self.dp = self.datablock.data_present()
#Check all the entered metrics. Note that this check has to be
#placed after declaration of num_target_class attribute
for metric in [scoring_metric]+additional_display_metrics:
self.check_metric(metric,self.num_target_class)
@classmethod
def check_metric(cls,metric,num_target_class):
if metric not in cls.metrics_map:
raise self.InvalidInput("The input '%s' is not a valid scoring metric for this module"%metric)
if num_target_class>2:
if not cls.metrics_map[metric][1]:
raise self.InvalidInput("The %s metric does not support multi-class classification case"%metric)
def fit_model(
self, performCV=True, printResults=True,
printTopN=None, printConfusionMatrix=True,
printModelParameters=True):
"""An advanced model fit function which fits the model on the
training data and performs cross-validation. It prints a model
report containing the following:
- The parameters being used to fit the model
- Confusion matrix for the train and test data
- Scoring metrics for the train and test data
- CV mean and std scores for scoring metric
- Additional scoring metrics on train and test data, if specified
Note that you can decide which details are to be printed using method
arguments.
Parameters
----------
performCV : bool, default True
if True, the model performs cross-validation using the number of
folds as the cv_folds parameter of the model
printResults : bool, default True
if True, prints the report of the model. This should be kept as
True unless the module being used in a background script
printTopN : int, default None
The number of top scored features to be displayed in the feature
importance or coefficient plot of the model. If None, all the
features will be displayed by default. Note:
- For algorithms supporting real coefficient, the features will
be sorted by their magnitudes (absolute values).
- For algorithms supporting positive feature importance scores,
features are sorted on the score itself.
This will be ignored is printResults is False.
printConfusionMatrix : bool, default True
if True, the confusion matrix for the train and test dataframes
are printed, otherwise they are ommitted.
This will be ignored is printResults is False.
print
printModelParameters : bool, default True
if True, the parameters being used to the run the model are
printed. It helps in validating the parameters and also makes
jupyter notebooks more informative if used
"""
self.check_datatype(performCV,'performCV',bool)
self.check_datatype(printResults,'printResults',bool)
self.check_datatype(printConfusionMatrix,'printConfusionMatrix',bool)
self.check_datatype(printModelParameters,'printModelParameters',bool)
if printTopN:
self.check_datatype(printTopN,'printTopN',int)
self.alg.fit(
self.datablock.train[self.predictors],
self.datablock.train[self.datablock.target])
#Get algo_specific_values
self.algo_specific_fit(printTopN)
#Get predictions:
for key,data in self.dp.items():
self.predictions_class[key] = self.alg.predict(
data[self.predictors])
if self.probabilities_available:
for key,data in self.dp.items():
self.predictions_probabilities[key] = self.alg.predict_proba(
data[self.predictors])
self.calc_model_characteristics(performCV)
if printResults:
self.printReport(printConfusionMatrix, printModelParameters)
def calc_model_characteristics(self, performCV=True):
# Determine key metrics to analyze the classification model. These
# are stored in the classification_output series object belonginf to
# this class.
for metric in [self.scoring_metric]+self.additional_display_metrics:
#Determine for both test and train, except predict:
for key,data in self.dp.items():
if key!='predict':
name = '%s_%s'%(metric,key)
#Case where probabilities to be passed as arguments
if base_classification.metrics_map[metric][2]:
self.classification_output[name] = \
base_classification.metrics_map[metric][0](
data[self.datablock.target],
self.predictions_probabilities[key])
#case where class predictions to be passed as arguments
else:
self.classification_output[name] = \
base_classification.metrics_map[metric][0](
data[self.datablock.target],
self.predictions_class[key])
#Determine confusion matrix:
name = 'ConfusionMatrix_%s'%key
self.classification_output[name] = pd.crosstab(
data[self.datablock.target],
self.predictions_class[key]
).to_string()
if performCV:
cv_score = self.KFold_CrossValidation(
scoring_metric=self.scoring_metric)
else:
cv_score = {
'mean_error': 0.0,
'std_error': 0.0
}
self.classification_output['CVMethod'] = \
'KFold - ' + str(self.cv_folds)
self.classification_output['CVScore_mean'] = cv_score['mean_error']
self.classification_output['CVScore_std'] = cv_score['std_error']
self.classification_output['Predictors'] = str(self.predictors)
def printReport(self, printConfusionMatrix, printModelParameters):
# Print the metric determined in the previous function.
print("\nModel Report")
#Outpute the parameters used for modeling
if printModelParameters:
print('\nModel being built with the following parameters:')
print(self.alg.get_params())
if printConfusionMatrix:
for key,data in self.dp.items():
if key!='predict':
print("\nConfusion Matrix for %s data:"%key)
print(pd.crosstab(
data[self.datablock.target],
self.predictions_class[key])
)
print('Note: rows - actual; col - predicted')
print("\nScoring Metric:")
for key,data in self.dp.items():
if key!='predict':
name = '%s_%s'%(self.scoring_metric,key)
print("\t%s (%s): %s" %
(
self.scoring_metric,
key,
"{0:.3%}".format(self.classification_output[name])
)
)
print("\nCV Score for Scoring Metric (%s):"%self.scoring_metric)
print("\tMean - %f | Std - %f" % (
self.classification_output['CVScore_mean'],
self.classification_output['CVScore_std'])
)
if self.additional_display_metrics:
print("\nAdditional Scoring Metrics:")
for metric in self.additional_display_metrics:
for key,data in self.dp.items():
if key!='predict':
name = '%s_%s'%(metric,key)
print("\t%s (%s): %s" % (
metric,
key,
"{0:.3%}".format(
self.classification_output[name])
)
)
def plot_feature_importance(self, printTopN):
num_print = len(self.feature_imp)
if printTopN is not None:
num_print = min(printTopN,len(self.feature_imp))
self.feature_imp.iloc[:num_print].plot(
kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
plt.show(block=False)
def plot_abs_coefficients(self,coeff,printTopN):
num_print = len(coeff)
if printTopN is not None:
num_print = min(printTopN,num_print)
coeff_abs_sorted = sorted(
abs(coeff).index,
key=lambda x: abs(coeff_abs[x]),
reverse=True
)
coeff[coeff_abs_sorted].iloc[:num_print,].plot(
kind='bar',
title='Feature Coefficients (Sorted by Magnitude)'
)
plt.ylabel('Magnitute of Coefficients')
plt.show(block=False)
def submission_proba(
self, IDcol, proba_colnames,filename="Submission.csv"):
"""
"""
submission = pd.DataFrame({
x: self.datablock.predict[x] for x in list(IDcol)
})
if len(list(proba_colnames))>1:
for i in range(len(proba_colnames)):
submission[proba_colnames[i]] = self.test_pred_prob[:,i]
else:
submission[list(proba_colnames)[0]] = self.test_pred_prob[:,1]
submission.to_csv(filename, index=False)
def set_parameters(self, param=None, cv_folds=None, set_default=False):
""" Set the parameters of the model. Only the parameters to be
updated are required to be passed.
Parameters
__________
param : dict, default None
A dictionary of key,value pairs where the keys are the parameters
to be updated and values as the new value of those parameters.
If None, no update performed
Ignored if set_default iss True.
cv_folds : int, default None
Pass the number of CV folds to be used in the model.
If None, no update performed.
set_default : bool, default True
if True, the model will be set to default parameters as defined
in model definition by scikit-learn. Note that this will not
affect the cv_folds parameter.
"""
#Check input
self.check_datatype(param,'param',dict)
self.check_datatype(set_default,'set_default',bool)
if param:
if set(param.keys()).issubset(
set(base_classification.default_parameters.keys())
):
raise self.InvalidInput("""The parameters passed should be a
subset of the model parameters""")
if set_default:
param = self.default_parameters
self.alg.set_params(**param)
self.model_output.update(pd.Series(param))
if cv_folds:
self.cv_folds = cv_folds
def export_model_base(self, IDcol, mstr):
self.create_ensemble_dir()
filename = os.path.join(os.getcwd(),'ensemble/%s_models.csv'%mstr)
comb_series = self.classification_output.append(
self.model_output,
verify_integrity=True)
if os.path.exists(filename):
models = pd.read_csv(filename)
mID = int(max(models['ModelID'])+1)
else:
mID = 1
models = pd.DataFrame(columns=comb_series.index)
comb_series['ModelID'] = mID
models = models.append(comb_series, ignore_index=True)
models.to_csv(filename, index=False, float_format="%.5f")
model_filename = os.path.join(
os.getcwd(),
'ensemble/%s_%s.csv'%(mstr,str(mID))
)
self.submission(IDcol, model_filename)
@abstractmethod
def algo_specific_fit(self,printTopN):
#Run algo-specific commands
pass
@abstractmethod
def export_model(self,IDcol):
#Export models
pass
#####################################################################
##### LOGISTIC REGRESSION
#####################################################################
class logistic_regression(base_classification):
""" Create a Logistic Regression model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'C':1.0,
'tol':0.0001,
'solver':'liblinear',
'multi_class':'ovr',
'class_weight':'balanced'
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=LogisticRegression(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output=pd.Series(self.default_parameters)
self.model_output['Coefficients'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
if self.num_target_class==2:
coeff = pd.Series(
np.concatenate(
(self.alg.intercept_,
self.alg.coef_[0])),
index=["Intercept"]+self.predictors
)
self.plot_abs_coefficients(coeff,printTopN)
else:
cols=['coef_class_%d'%i for i in range(0,self.num_target_class)]
coeff = pd.DataFrame(
self.alg.coef_.T,
columns=cols,
index=self.predictors
)
print('\nCoefficients:')
print(coeff)
self.model_output['Coefficients'] = coeff.to_string()
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'logistic_reg')
#####################################################################
##### DECISION TREE
#####################################################################
class decision_tree(base_classification):
""" Create a Decision Tree model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'criterion':'gini',
'max_depth':None,
'min_samples_split':2,
'min_samples_leaf':1,
'max_features':None,
'random_state':None,
'max_leaf_nodes':None,
'class_weight':'balanced'
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=DecisionTreeClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'decision_tree')
## UNDER DEVELOPMENT CODE FOR PRINTING TREES
# def get_tree(self):
# return self.alg.tree_
# Print the tree in visual format
# Inputs:
# export_pdf - if True, a pdf will be exported with the
# filename as specified in pdf_name argument
# pdf_name - name of the pdf file if export_pdf is True
# def printTree(self, export_pdf=True, file_name="Decision_Tree.pdf"):
# dot_data = StringIO()
# export_graphviz(
# self.alg, out_file=dot_data, feature_names=self.predictors,
# filled=True, rounded=True, special_characters=True)
# export_graphviz(
# self.alg, out_file='data.dot', feature_names=self.predictors,
# filled=True, rounded=True, special_characters=True
# )
# graph = pydot.graph_from_dot_data(dot_data.getvalue())
# if export_pdf:
# graph.write_pdf(file_name)
# return graph
#####################################################################
##### RANDOM FOREST
#####################################################################
class random_forest(base_classification):
""" Create a Random Forest model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'n_estimators':10,
'criterion':'gini',
'max_depth':None,
'min_samples_split':2,
'min_samples_leaf':1,
'max_features':'auto',
'max_leaf_nodes':None,
'oob_score':False,
'random_state':None,
'class_weight':'balanced',
'n_jobs':1
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=RandomForestClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
self.model_output['OOB_Score'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
if self.model_output['oob_score']:
print('OOB Score : %f' % self.alg.oob_score_)
self.model_output['OOB_Score'] = self.alg.oob_score_
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'random_forest')
#####################################################################
##### EXTRA TREES FOREST
#####################################################################
class extra_trees(base_classification):
""" Create an Extra Trees Forest model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'n_estimators':10,
'criterion':'gini',
'max_depth':None,
'min_samples_split':2,
'min_samples_leaf':1,
'max_features':'auto',
'max_leaf_nodes':None,
'oob_score':False,
'random_state':None,
'class_weight':'balanced',
'n_jobs':1
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=ExtraTreesClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
self.model_output['OOB_Score'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
if self.model_output['oob_score']:
print('OOB Score : %f' % self.alg.oob_score_)
self.model_output['OOB_Score'] = self.alg.oob_score_
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'extra_trees')
#####################################################################
##### ADABOOST CLASSIFICATION
#####################################################################
class adaboost(base_classification):
""" Create an AdaBoost model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'n_estimators':50,
'learning_rate':1.0
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=AdaBoostClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
plt.xlabel("AdaBoost Estimator")
plt.ylabel("Estimator Error")
plt.plot(
range(1, int(self.model_output['n_estimators'])+1),
self.alg.estimator_errors_
)
plt.plot(
range(1, int(self.model_output['n_estimators'])+1),
self.alg.estimator_weights_
)
plt.legend(
['estimator_errors','estimator_weights'],
loc='upper left'
)
plt.show(block=False)
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'adaboost')
#####################################################################
##### GRADIENT BOOSTING MACHINE
#####################################################################
class gradient_boosting_machine(base_classification):
""" Create a GBM (Gradient Boosting Machine) model using implementation
from scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'loss':'deviance',
'learning_rate':0.1,
'n_estimators':100,
'subsample':1.0,
'min_samples_split':2,
'min_samples_leaf':1,
'max_depth':3, 'init':None,
'random_state':None,
'max_features':None,
'verbose':0,
'max_leaf_nodes':None,
'warm_start':False,
'presort':'auto'
}
def __init__(
self, data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=GradientBoostingClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
#Plot OOB estimates if subsample <1:
if self.model_output['subsample']<1:
plt.xlabel("GBM Iteration")
plt.ylabel("Score")
plt.plot(
range(1, self.model_output['n_estimators']+1),
self.alg.oob_improvement_
)
plt.legend(['oob_improvement_','train_score_'], loc='upper left')
plt.show(block=False)
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'gbm')
#####################################################################
##### Support Vector Classifier
#####################################################################
class linear_svm(base_classification):
""" Create a Linear Support Vector Machine model using implementation
from scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'C':1.0,
'kernel':'linear', #modified not default
'degree':3,
'gamma':'auto',
'coef0':0.0,
'shrinking':True,
'probability':False,
'tol':0.001,
'cache_size':200,
'class_weight':None,
'verbose':False,
'max_iter':-1,
'decision_function_shape':None,
'random_state':None
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=SVC(), data_block=data_block, predictors=predictors,
cv_folds=cv_folds,scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output=pd.Series(self.default_parameters)
self.model_output['Coefficients'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
#Check if probabilities enables:
if not self.alg.get_params()['probability']:
self.probabilities_available = False
def algo_specific_fit(self, printTopN):
if self.num_target_class==2:
coeff = pd.Series(
np.concatenate((self.alg.intercept_,self.alg.coef_[0])),
index=["Intercept"]+self.predictors
)
#print the chart of importances
self.plot_abs_coefficients(coeff, printTopN)
else:
cols=['coef_class_%d'%i for i in range(0,self.num_target_class)]
coeff = pd.DataFrame(
self.alg.coef_.T,
columns=cols,
index=self.predictors
)
print('\nCoefficients:')
print(coeff)
self.model_output['Coefficients'] = coeff.to_string()
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'linear_svm')
#####################################################################
##### XGBOOST ALGORITHM (UNDER DEVELOPMENT)
#####################################################################
"""
#Define the class similar to the overall classification class
class XGBoost(base_classification):
def __init__(self,data_block, predictors, cv_folds=5,scoring_metric_skl='accuracy', scoring_metric_xgb='error'):
base_classification.__init__(self, alg=XGBClassifier(), data_block=data_block, predictors=predictors,cv_folds=cv_folds,scoring_metric=scoring_metric_skl)
#Define default parameters on your own:
self.default_parameters = {
'max_depth':3, 'learning_rate':0.1,
'n_estimators':100, 'silent':True,
'objective':"binary:logistic",
'nthread':1, 'gamma':0, 'min_child_weight':1,
'max_delta_step':0, 'subsample':1, 'colsample_bytree':1, 'colsample_bylevel':1,
'reg_alpha':0, 'reg_lambda':1, 'scale_pos_weight':1,
'base_score':0.5, 'seed':0, 'missing':None
}
self.model_output = pd.Series(self.default_parameters)
#create DMatrix with nan as missing by default. If later this is changed then the matrix are re-calculated. If not set,will give error is nan present in data
self.xgtrain = xgb.DMatrix(self.datablock.train[self.predictors].values, label=self.datablock.train[self.datablock.target].values, missing=np.nan)
self.xgtest = xgb.DMatrix(self.datablock.predict[self.predictors].values, missing=np.nan)
self.num_class = 2
self.n_estimators = 10
self.eval_metric = 'error'
self.train_predictions = []
self.train_pred_prob = []
self.test_predictions = []
self.test_pred_prob = []
self.num_target_class = len(data_train[target].unique())
#define scoring metric:
self.scoring_metric_skl = scoring_metric_skl
# if scoring_metric_xgb=='f1':
# self.scoring_metric_xgb = self.xg_f1
# else:
self.scoring_metric_xgb = scoring_metric_xgb
#Define a Series object to store generic classification model outcomes;
self.classification_output=pd.Series(index=['ModelID','Accuracy','CVScore_mean','CVScore_std','SpecifiedMetric',
'ActualScore (manual entry)','CVMethod','ConfusionMatrix','Predictors'])
#feature importance (g_scores)
self.feature_imp = None
self.model_output['Feature_Importance'] = "-"
#Set parameters to default values:
# self.set_parameters(set_default=True)
#Define custom f1 score metric:
def xg_f1(self,y,t):
t = t.get_label()
y_bin = [1. if y_cont > 0.5 else 0. for y_cont in y] # binaryzing your output
return 'f1',metrics.f1_score(t,y_bin)
# Set the parameters of the model.
# Note:
# > only the parameters to be updated are required to be passed
# > if set_default is True, the passed parameters are ignored and default parameters are set which are defined in scikit learn module
def set_parameters(self, param=None, set_default=False):
if set_default:
param = self.default_parameters
self.alg.set_params(**param)
self.model_output.update(pd.Series(param))
if 'missing' in param:
#update DMatrix with missing:
self.xgtrain = xgb.DMatrix(self.datablock.train[self.predictors].values, label=self.datablock.train[self.datablock.target].values, missing=param['missing'])
self.xgtest = xgb.DMatrix(self.datablock.predict[self.predictors].values, missing=param['missing'])
if 'num_class' in param:
self.num_class = param['num_class']
if 'cv_folds' in param:
self.cv_folds = param['cv_folds']
# def set_feature_importance(self):
# fs = self.alg.booster().get_fscore()
# ftimp = pd.DataFrame({
# 'feature': fs.keys(),
# 'importance_Score': fs.values()
# })
# ftimp['predictor'] = ftimp['feature'].apply(lambda x: self.predictors[int(x[1:])])
# self.feature_imp = pd.Series(ftimp['importance_Score'].values, index=ftimp['predictor'].values)
#Fit the model using predictors and parameters specified before.
# Inputs:
# printCV - if True, CV is performed
def modelfit(self, performCV=True, useTrainCV=False, TrainCVFolds=5, early_stopping_rounds=20, show_progress=True, printTopN='all'):
if useTrainCV:
xgb_param = self.alg.get_xgb_params()
if self.num_class>2:
xgb_param['num_class']=self.num_class
if self.scoring_metric_xgb=='f1':
cvresult = xgb.cv(xgb_param,self.xgtrain, num_boost_round=self.alg.get_params()['n_estimators'], nfold=self.cv_folds,
metrics=['auc'],feval=self.xg_f1,early_stopping_rounds=early_stopping_rounds, show_progress=show_progress)
else:
cvresult = xgb.cv(xgb_param,self.xgtrain, num_boost_round=self.alg.get_params()['n_estimators'], nfold=self.cv_folds,
metrics=self.scoring_metric_xgb, early_stopping_rounds=early_stopping_rounds, show_progress=show_progress)
self.alg.set_params(n_estimators=cvresult.shape[0])
print(self.alg.get_params())
obj = self.alg.fit(self.datablock.train[self.predictors], self.datablock.train[self.datablock.target], eval_metric=self.eval_metric)
#Print feature importance
# self.set_feature_importance()
self.feature_imp = pd.Series(self.alg.booster().get_fscore()).sort_values(ascending=False)
num_print = len(self.feature_imp)
if printTopN is not None:
if printTopN != 'all':
num_print = min(printTopN,len(self.feature_imp))
self.feature_imp.iloc[:num_print].plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
plt.show(block=False)
self.model_output['Feature_Importance'] = self.feature_imp.to_string()
#Get train predictions:
self.train_predictions = self.alg.predict(self.datablock.train[self.predictors])
self.train_pred_prob = self.alg.predict_proba(self.datablock.train[self.predictors])
#Get test predictions:
self.test_predictions = self.alg.predict(self.datablock.predict[self.predictors])
self.test_pred_prob = self.alg.predict_proba(self.datablock.predict[self.predictors])
self.calc_model_characteristics(performCV)
self.printReport()
#Export the model into the model file as well as create a submission with model index. This will be used for creating an ensemble.
def export_model(self, IDcol):
self.create_ensemble_dir()
filename = os.path.join(os.getcwd(),'ensemble/xgboost_models.csv')
comb_series = self.classification_output.append(self.model_output, verify_integrity=True)
if os.path.exists(filename):
models = pd.read_csv(filename)
mID = int(max(models['ModelID'])+1)
else:
mID = 1
models = pd.DataFrame(columns=comb_series.index)
comb_series['ModelID'] = mID
models = models.append(comb_series, ignore_index=True)
models.to_csv(filename, index=False, float_format="%.5f")
model_filename = os.path.join(os.getcwd(),'ensemble/xgboost_'+str(mID)+'.csv')
self.submission(IDcol, model_filename)
"""
#####################################################################
##### ENSEMBLE (UNDER DEVELOPMENT)
#####################################################################
"""
#Class for creating an ensemble model using the exported files from previous classes
class Ensemble_Classification(object):
#initialize the object with target variable
def __init__(self, target, IDcol):
self.datablock.target = target
self.data = None
self.relationMatrix_chi2 = None
self.relationMatrix_diff = None
self.IDcol = IDcol
#create the ensemble data
# Inputs:
# models - dictionary with key as the model name and values as list containing the model numbers to be ensebled
# Note: all the models in the list specified should be present in the ensemble folder. Please cross-check once
def create_ensemble_data(self, models):
self.data = None
for key, value in models.items():
# print key,value
for i in value:
fname = key + '_' + str(i)
fpath = os.path.join(os.getcwd(), 'ensemble', fname+'.csv')
tempdata = pd.read_csv(fpath)
tempdata = tempdata.rename(columns = {self.datablock.target: fname})
if self.data is None:
self.data = tempdata
else:
self.data = self.data.merge(tempdata,on=self.data.columns[0])
#get the data being used for ensemble
def get_ensemble_data(self):
return self.data
#Check chisq test between different model outputs to check which combination of ensemble will generate better results. Note: Models with high correlation should not be combined together.
def chisq_independence(self, col1, col2, verbose = False):
contingencyTable = pd.crosstab(col1,col2,margins=True)
if len(col1)/((contingencyTable.shape[0] - 1) * (contingencyTable.shape[1] - 1)) <= 5:
return "TMC"
expected = contingencyTable.copy()
total = contingencyTable.loc["All","All"]
# print contingencyTable.index
# print contingencyTable.columns
for m in contingencyTable.index:
for n in contingencyTable.columns:
expected.loc[m,n] = contingencyTable.loc[m,"All"]*contingencyTable.loc["All",n]/float(total)
if verbose:
print('\n\nAnalysis of models: %s and %s' % (col1.name, col2.name))
print('Contingency Table:')
print(contingencyTable)
# print '\nExpected Frequency Table:'
# print expected
observed_frq = contingencyTable.iloc[:-1,:-1].values.ravel()
expected_frq = expected.iloc[:-1,:-1].values.ravel()
numless1 = len(expected_frq[expected_frq<1])
perless5 = len(expected_frq[expected_frq<5])/len(expected_frq)
#Adjustment in DOF so use the 1D chisquare to matrix shaped data; -1 in row n col because of All row and column
matrixadj = (contingencyTable.shape[0] - 1) + (contingencyTable.shape[1] - 1) - 2
# print matrixadj
pval = np.round(chisquare(observed_frq, expected_frq,ddof=matrixadj)[1],3)
if numless1>0 or perless5>=0.2:
return str(pval)+"*"
else:
return pval
#Create the relational matrix between models
def check_ch2(self, verbose=False):
col = self.data.columns[1:]
self.relationMatrix_chi2 = pd.DataFrame(index=col,columns=col)
for i in range(len(col)):
for j in range(i, len(col)):
if i==j:
self.relationMatrix_chi2.loc[col[i],col[j]] = 1
else:
pval = self.chisq_independence(self.data.iloc[:,i+1],self.data.iloc[:,j+1], verbose=verbose)
self.relationMatrix_chi2.loc[col[j],col[i]] = pval
self.relationMatrix_chi2.loc[col[i],col[j]] = pval
print('\n\n Relational Matrix (based on Chi-square test):')
print(self.relationMatrix_chi2)
def check_diff(self):
col = self.data.columns[1:]
self.relationMatrix_diff = pd.DataFrame(index=col,columns=col)
nrow = self.data.shape[0]
for i in range(len(col)):
for j in range(i, len(col)):
if i==j:
self.relationMatrix_diff.loc[col[i],col[j]] = '-'
else:
# print col[i],col[j]
pval = "{0:.2%}".format(sum( np.abs(self.data.iloc[:,i+1]-self.data.iloc[:,j+1]) )/float(nrow))
self.relationMatrix_diff.loc[col[j],col[i]] = pval
self.relationMatrix_diff.loc[col[i],col[j]] = pval
print('\n\n Relational Matrix (based on perc difference):')
print(self.relationMatrix_diff)
#Generate submission for the ensembled model by combining the mentioned models.
# Inputs:
# models_to_use - list with model names to use; if None- all models will be used
# filename - the filename of the final submission
# Note: the models should be odd in nucmber to allow a clear winner in terms of mode otherwise the first element will be chosen
def submission(self, models_to_use=None, filename="Submission_ensemble.csv"):
#if models_to_use is None then use all, else filter:
if models_to_use is None:
data_ens = self.data
else:
data_ens = self.data[models_to_use]
def mode_ens(x):
return int(mode(x).mode[0])
ensemble_output = data_ens.apply(mode_ens,axis=1)
submission = pd.DataFrame({
self.IDcol: self.data.iloc[:,0],
self.datablock.target: ensemble_output
})
submission.to_csv(filename, index=False)
""" | bsd-3-clause | 6,956,967,973,953,770,000 | 39.137646 | 190 | 0.572234 | false | 4.413098 | true | false | false |
cosven/FeelUOwn | feeluown/serializers/model_helpers.py | 1 | 2919 | from feeluown.library import AbstractProvider
from feeluown.models import SongModel, ArtistModel, \
AlbumModel, PlaylistModel, UserModel, SearchModel
from .base import try_cast_model_to_v1
class ModelSerializerMixin:
def _get_items(self, model):
model = try_cast_model_to_v1(model)
# initialize fields that need to be serialized
# if as_line option is set, we always use fields_display
if self.opt_as_line or self.opt_brief:
fields = model.meta.fields_display
else:
fields = self._declared_fields
items = [("provider", model.source),
("identifier", model.identifier),
("uri", str(model))]
if self.opt_fetch:
for field in fields:
items.append((field, getattr(model, field)))
else:
for field in fields:
items.append((field, getattr(model, field + '_display')))
return items
class SongSerializerMixin:
class Meta:
types = (SongModel, )
# since url can be too long, we put it at last
fields = ('title', 'duration', 'album', 'artists', 'url')
line_fmt = '{uri:{uri_length}}\t# {title:_18} - {artists_name:_20}'
class ArtistSerializerMixin:
class Meta:
types = (ArtistModel, )
fields = ('name', 'songs')
line_fmt = '{uri:{uri_length}}\t# {name:_40}'
class AlbumSerializerMixin:
class Meta:
types = (AlbumModel, )
fields = ('name', 'artists', 'songs')
line_fmt = '{uri:{uri_length}}\t# {name:_18} - {artists_name:_20}'
class PlaylistSerializerMixin:
class Meta:
types = (PlaylistModel, )
fields = ('name', )
line_fmt = '{uri:{uri_length}}\t# {name:_40}'
class UserSerializerMixin:
class Meta:
types = (UserModel, )
fields = ('name', 'playlists')
line_fmt = '{uri:{uri_length}}\t# {name:_40}'
class SearchSerializerMixin:
"""
.. note::
SearchModel isn't a standard model, it does not have identifier,
the uri of SearchModel instance is also not so graceful, so we handle
it as a normal object temporarily.
"""
class Meta:
types = (SearchModel, )
def _get_items(self, result):
fields = ('songs', 'albums', 'artists', 'playlists',)
items = []
for field in fields:
value = getattr(result, field)
if value: # only append if it is not empty
items.append((field, value))
return items
class ProviderSerializerMixin:
class Meta:
types = (AbstractProvider, )
def _get_items(self, provider):
"""
:type provider: AbstractProvider
"""
return [
('identifier', provider.identifier),
('uri', 'fuo://{}'.format(provider.identifier)),
('name', provider.name),
]
| gpl-3.0 | 1,014,168,446,928,558,000 | 27.90099 | 77 | 0.571771 | false | 3.933962 | false | false | false |
jhdulaney/fulla | fulla/droplet.py | 1 | 5978 | #
# fulla -- work with Digital Ocean
#
# Copyright (C) 2015 John H. Dulaney <[email protected]>
#
# Licensed under the GNU General Public License Version 2
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
"""Interact with Digital Ocean account"""
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
import pycurl
import json
from fulla import settings
Null = json.dumps(None)
def get_info(location):
"""Retreive Droplet data from Digital Ocean"""
buff = BytesIO()
auth = 'Authorization: Bearer ' + settings.token
curler = pycurl.Curl()
curler.setopt(curler.URL, settings.api_url + location)
curler.setopt(curler.HTTPHEADER, [auth])
curler.setopt(curler.WRITEDATA, buff)
try:
curler.perform()
except:
raise
curler.close()
results = buff.getvalue()
results = results.decode('iso-8859-1')
results = json.loads(results)
return results
def send_request(location, request):
location = settings.api_url + location
class _Buffer(object):
def __init__(self):
self.data = ''
def incoming(self, buff):
self.data += buff.decode('iso-8859-1')
auth = 'Authorization: Bearer ' + settings.token
post_request = json.dumps(request)
try:
buff = _Buffer()
curler = pycurl.Curl()
curler.setopt(curler.HTTPHEADER, [auth, "Content-type: application/json"])
curler.setopt(curler.URL, location)
curler.setopt(curler.POSTFIELDS, post_request)
curler.setopt(curler.WRITEFUNCTION, buff.incoming)
curler.perform()
curler.close()
return buff.data
except:
raise
def send_delete(location):
location = settings.api_url + location
buff = BytesIO()
auth = 'Authorization: Bearer ' + settings.token
try:
curler = pycurl.Curl()
curler.setopt(curler.HTTPHEADER, [auth, "Content-type: application/json"])
curler.setopt(curler.URL, location)
curler.setopt(curler.CUSTOMREQUEST, "DELETE")
curler.setopt(curler.WRITEDATA, buff)
curler.perform()
curler.close()
result = json.loads(buff.getvalue().decode('iso-8859-1'))
return result
except:
raise
class Account(object):
"""Digital Ocean Account object"""
def __init__(self):
self.droplet_limit = 0
self.email = ''
self.uuid = ''
self.email_verified = None
self.status = ''
self.status_message = ''
def get_data(self):
"""Retreive user data from Digital Ocean"""
results = get_info('account')
try:
results = results['account']
self.droplet_limit = results['droplet_limit']
self.email = results['email']
self.uuid = results['uuid']
self.email_verified = results['email_verified']
self.status = results['status']
self.status_message = ['status_message']
except:
print(results['id'], results['message'])
raise
return 0
def get_droplets():
"""Retreive Droplet data from Digital Ocean"""
results = get_info('droplets')
try:
droplets = results['droplets']
num_droplets = results['meta']['total']
except:
print(results['id'], results['message'])
return droplets, num_droplets
def get_imagelist():
"""Get list of available images"""
results = get_info('images?page=1')
try:
num_pages = int(results['links']['pages']['last'].rsplit('=', 1)[1])
except:
print(results['id'], results['message'])
raise
image_list = results['images']
for page in range(2, num_pages + 1):
results = get_info('images?page=' + str(page))
image_list += results['images']
return image_list
def get_keys():
results = get_info('account/keys')
try:
num_keys = int(results['meta']['total'])
keys = results['ssh_keys']
except:
print(results['id'], results['message'])
raise
return keys, num_keys
def create_droplet(name, region, size, image_slug, ssh_keys, user_data=Null, private_networking=Null, ipv6=Null, backups=Null):
"""Create new droplet
Note: ssh_keys *must* be a list
"""
images = get_imagelist()
droplet = None
for image in images:
if (image_slug == image['slug'] or image_slug == image['id']):
droplet = {"name": name, "region": region, "size": size, "image": image_slug,
"ssh_keys": ssh_keys, "backups": backups, "ipv6": ipv6,
"user_data": user_data, "private_networking": private_networking}
if droplet is not None:
result = send_request('droplets', droplet)
try:
result = json.loads(result)
except:
print(result['id'], result['message'])
raise
return result
else:
print("Image does not exist")
raise
def delete_droplet(droplet_id):
send_delete('droplets/' + str(droplet_id))
return 0
def reboot_droplet(droplet_id):
"""Reboot droplet"""
request = 'droplets/' + str(droplet_id) + '/actions'
result = send_request(request, '{"type":"reboot"}')
return result
| gpl-3.0 | -8,170,448,895,829,790,000 | 28.741294 | 127 | 0.617598 | false | 3.80522 | false | false | false |
noemis-fr/old-custom | e3z_mail_ipbox/sale.py | 1 | 2613 | # -*- coding: utf-8 -*-
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-2013 Elanz (<http://www.openelanz.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__author__ = 'vchemiere'
from openerp.osv import osv, fields
class sale_order(osv.osv):
_inherit = 'sale.order'
def create(self, cr, uid, vals, context=None):
usr_obj = self.pool.get('res.users')
group_obj = self.pool.get('res.groups')
ir_model_data = self.pool.get('ir.model.data')
adv_group_id = ir_model_data.get_object_reference(cr, uid, 'sale', 'adv')[1]
adv_users = group_obj.browse(cr, uid, adv_group_id).users
if not vals.get('message_follower_ids', False):
vals['message_follower_ids'] = []
if adv_users:
for adv_user_id in adv_users:
adv_id = usr_obj.browse(cr, uid, adv_user_id.id).partner_id.id
vals['message_follower_ids'] += [4, adv_id]
mrp_group_id = ir_model_data.get_object_reference(cr, uid, 'mrp', 'team')[1]
mrp_users = group_obj.browse(cr, uid, mrp_group_id).users
if mrp_users:
for mrp_user_id in mrp_users:
mrp_id = usr_obj.browse(cr, uid, mrp_user_id.id).partner_id.id
vals['message_follower_ids'] += [4, mrp_id]
new_id = super(sale_order, self).create(cr, uid, vals, context)
follower_ids = self.pool.get('mail.followers').search(cr, uid, [('res_id', '=', new_id)])
for follower_id in follower_ids:
follower = self.pool.get('mail.followers').browse(cr, uid, follower_id)
return new_id
def action_button_confirm(self, cr, uid, ids, context=None):
if context is None:
context = {}
res = super(sale_order, self).action_button_confirm(cr, uid, ids, context)
self.pool.get('mail.proxy').send_mail(cr, uid, ids, 'sale.order', 'Sales Order - Send by Email', context)
return res
| agpl-3.0 | 912,175,386,903,377,700 | 40.47619 | 113 | 0.626483 | false | 3.389105 | false | false | false |
fagusMcFagel/ticketsystem | ticketsystem/ticketsystem/settings_prod.py | 1 | 1414 | """
Django settings for ticketsystem project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
######## SETTINGS SUITABLE FOR DEVELOPMENT ########
from .settings_dev import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['localhost']
#DIRECTORY AND URL FOR MEDIA (FileField in Models)
MEDIA_ROOT = os.path.join(BASE_DIR, 'C:/#DjangoApp/ticketsystem/media/')
MEDIA_URL = '/'
#LOGOUT USER ON BROWSER CLOSE
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
#SET EXPIRATION AGE IN SECONDS
COOKIE_EXP_AGE = 30*60
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'djangodatabase',
'USER': 'DjangoApp',
'PASSWORD': 'testDjango',
'HOST': 'localhost',
'PORT': '',
}
}
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
X_FRAME_OPTIONS = 'DENY'
STATIC_ROOT = os.path.join(BASE_DIR, 'C:/#DjangoApp/ticketsystem/static/')
STATICFILES_DIRS = [
BASE_DIR+'/static/',
] | mit | -886,011,686,743,345,400 | 24.716981 | 74 | 0.661952 | false | 3.199095 | false | false | false |
auspbro/CodeSnippets | Python/pycode_LXF/crawler_ex6.py | 1 | 1321 | # _*_ coding:utf-8 _*_
import urllib,urllib2
def loadPage(url,filename):
#根据url发送请求,获取服务器响应文件
print '正在下载' + filename
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'}
request = urllib2.Request(url,headers = headers)
content = urllib2.urlopen(request).read()
return content
def writePage(html,filename):
#将html内容写入到本地
print '正在保存' + filename
with open(unicode(filename,'utf-8'),'w') as f:
f.write(html)
print '_' * 30
def tiebaSpider(url,beginPage,endPage):
#贴吧爬虫调度器,负责组合处理每个页面的url
for page in range(beginPage,endPage + 1):
pn = (page - 1) * 50
filename = '第' + str(page) + '页.html'
fullurl = url + '&pn=' + str(pn)
# print fullurl
html = loadPage(fullurl,filename)
writePage(html,filename)
if __name__ == '__main__':
kw = raw_input('请输入贴吧名:')
beginPage = int(raw_input('请输入起始页:'))
endPage = int(raw_input('请输入结束页:'))
url = 'https://tieba.baidu.com/f?'
key = urllib.urlencode({'kw':kw})
fullurl = url + key
tiebaSpider(fullurl,beginPage,endPage) | gpl-3.0 | -2,428,953,414,852,437,000 | 31.611111 | 145 | 0.622336 | false | 2.474684 | false | false | false |
kubeflow/xgboost-operator | config/samples/lightgbm-dist/main.py | 1 | 2583 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import argparse
from train import train
from utils import generate_machine_list_file, generate_train_conf_file
logger = logging.getLogger(__name__)
def main(args, extra_args):
master_addr = os.environ["MASTER_ADDR"]
master_port = os.environ["MASTER_PORT"]
worker_addrs = os.environ["WORKER_ADDRS"]
worker_port = os.environ["WORKER_PORT"]
world_size = int(os.environ["WORLD_SIZE"])
rank = int(os.environ["RANK"])
logger.info(
"extract cluster info from env variables \n"
f"master_addr: {master_addr} \n"
f"master_port: {master_port} \n"
f"worker_addrs: {worker_addrs} \n"
f"worker_port: {worker_port} \n"
f"world_size: {world_size} \n"
f"rank: {rank} \n"
)
if args.job_type == "Predict":
logging.info("starting the predict job")
elif args.job_type == "Train":
logging.info("starting the train job")
logging.info(f"extra args:\n {extra_args}")
machine_list_filepath = generate_machine_list_file(
master_addr, master_port, worker_addrs, worker_port
)
logging.info(f"machine list generated in: {machine_list_filepath}")
local_port = worker_port if rank else master_port
config_file = generate_train_conf_file(
machine_list_file=machine_list_filepath,
world_size=world_size,
output_model="model.txt",
local_port=local_port,
extra_args=extra_args,
)
logging.info(f"config generated in: {config_file}")
train(config_file)
logging.info("Finish distributed job")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--job_type",
help="Job type to execute",
choices=["Train", "Predict"],
required=True,
)
logging.basicConfig(format="%(message)s")
logging.getLogger().setLevel(logging.INFO)
args, extra_args = parser.parse_known_args()
main(args, extra_args)
| apache-2.0 | -6,987,369,922,413,389,000 | 31.2875 | 75 | 0.646535 | false | 3.738061 | false | false | false |
sthirugn/robottelo | tests/foreman/api/test_usergroup.py | 2 | 9960 | """Unit tests for the ``usergroups`` paths.
Each ``APITestCase`` subclass tests a single URL. A full list of URLs to be
tested can be found here:
http://theforeman.org/api/1.11/apidoc/v2/usergroups.html
@Requirement: Usergroup
@CaseAutomation: Automated
@CaseLevel: Acceptance
@CaseComponent: API
@TestType: Functional
@CaseImportance: High
@Upstream: No
"""
from fauxfactory import gen_string
from nailgun import entities
from random import randint
from requests.exceptions import HTTPError
from robottelo.datafactory import (
invalid_values_list,
valid_data_list,
valid_usernames_list,
)
from robottelo.decorators import tier1, tier2
from robottelo.test import APITestCase
class UserGroupTestCase(APITestCase):
"""Tests for the ``usergroups`` path."""
@tier1
def test_positive_create_with_name(self):
"""Create new user group using different valid names
@id: 3a2255d9-f48d-4f22-a4b9-132361bd9224
@Assert: User group is created successfully.
"""
for name in valid_data_list():
with self.subTest(name):
user_group = entities.UserGroup(name=name).create()
self.assertEqual(user_group.name, name)
@tier1
def test_positive_create_with_user(self):
"""Create new user group using valid user attached to that group.
@id: ab127e09-31d2-4c5b-ae6c-726e4b11a21e
@Assert: User group is created successfully.
"""
for login in valid_usernames_list():
with self.subTest(login):
user = entities.User(login=login).create()
user_group = entities.UserGroup(user=[user]).create()
self.assertEqual(len(user_group.user), 1)
self.assertEqual(user_group.user[0].read().login, login)
@tier1
def test_positive_create_with_users(self):
"""Create new user group using multiple users attached to that group.
@id: b8dbbacd-b5cb-49b1-985d-96df21440652
@Assert: User group is created successfully and contains all expected
users.
"""
users = [entities.User().create() for _ in range(randint(3, 5))]
user_group = entities.UserGroup(user=users).create()
self.assertEqual(
sorted([user.login for user in users]),
sorted([user.read().login for user in user_group.user])
)
@tier1
def test_positive_create_with_role(self):
"""Create new user group using valid role attached to that group.
@id: c4fac71a-9dda-4e5f-a5df-be362d3cbd52
@Assert: User group is created successfully.
"""
for role_name in valid_data_list():
with self.subTest(role_name):
role = entities.Role(name=role_name).create()
user_group = entities.UserGroup(role=[role]).create()
self.assertEqual(len(user_group.role), 1)
self.assertEqual(user_group.role[0].read().name, role_name)
@tier1
def test_positive_create_with_roles(self):
"""Create new user group using multiple roles attached to that group.
@id: 5838fcfd-e256-49cf-aef8-b2bf215b3586
@Assert: User group is created successfully and contains all expected
roles
"""
roles = [entities.Role().create() for _ in range(randint(3, 5))]
user_group = entities.UserGroup(role=roles).create()
self.assertEqual(
sorted([role.name for role in roles]),
sorted([role.read().name for role in user_group.role])
)
@tier1
def test_positive_create_with_usergroup(self):
"""Create new user group using another user group attached to the
initial group.
@id: 2a3f7b1a-7411-4c12-abaf-9a3ca1dfae31
@Assert: User group is created successfully.
"""
for name in valid_data_list():
with self.subTest(name):
sub_user_group = entities.UserGroup(name=name).create()
user_group = entities.UserGroup(
usergroup=[sub_user_group],
).create()
self.assertEqual(len(user_group.usergroup), 1)
self.assertEqual(user_group.usergroup[0].read().name, name)
@tier2
def test_positive_create_with_usergroups(self):
"""Create new user group using multiple user groups attached to that
initial group.
@id: 9ba71288-af8b-4957-8413-442a47057634
@Assert: User group is created successfully and contains all expected
user groups
@CaseLevel: Integration
"""
sub_user_groups = [
entities.UserGroup().create() for _ in range(randint(3, 5))]
user_group = entities.UserGroup(usergroup=sub_user_groups).create()
self.assertEqual(
sorted([usergroup.name for usergroup in sub_user_groups]),
sorted(
[usergroup.read().name for usergroup in user_group.usergroup])
)
@tier1
def test_negative_create_with_name(self):
"""Attempt to create user group with invalid name.
@id: 1a3384dc-5d52-442c-87c8-e38048a61dfa
@Assert: User group is not created.
"""
for name in invalid_values_list():
with self.subTest(name):
with self.assertRaises(HTTPError):
entities.UserGroup(name=name).create()
@tier1
def test_negative_create_with_same_name(self):
"""Attempt to create user group with a name of already existent entity.
@id: aba0925a-d5ec-4e90-86c6-404b9b6f0179
@Assert: User group is not created.
"""
user_group = entities.UserGroup().create()
with self.assertRaises(HTTPError):
entities.UserGroup(name=user_group.name).create()
@tier1
def test_positive_update(self):
"""Update existing user group with different valid names.
@id: b4f0a19b-9059-4e8b-b245-5a30ec06f9f3
@Assert: User group is updated successfully.
"""
user_group = entities.UserGroup().create()
for new_name in valid_data_list():
with self.subTest(new_name):
user_group.name = new_name
user_group = user_group.update(['name'])
self.assertEqual(new_name, user_group.name)
@tier1
def test_positive_update_with_new_user(self):
"""Add new user to user group
@id: e11b57c3-5f86-4963-9cc6-e10e2f02468b
@Assert: User is added to user group successfully.
"""
user = entities.User().create()
user_group = entities.UserGroup().create()
user_group.user = [user]
user_group = user_group.update(['user'])
self.assertEqual(user.login, user_group.user[0].read().login)
@tier2
def test_positive_update_with_existing_user(self):
"""Update user that assigned to user group with another one
@id: 71b78f64-867d-4bf5-9b1e-02698a17fb38
@Assert: User group is updated successfully.
@CaseLevel: Integration
"""
users = [entities.User().create() for _ in range(2)]
user_group = entities.UserGroup(user=[users[0]]).create()
user_group.user[0] = users[1]
user_group = user_group.update(['user'])
self.assertEqual(users[1].login, user_group.user[0].read().login)
@tier1
def test_positive_update_with_new_role(self):
"""Add new role to user group
@id: 8e0872c1-ae88-4971-a6fc-cd60127d6663
@Assert: Role is added to user group successfully.
"""
new_role = entities.Role().create()
user_group = entities.UserGroup().create()
user_group.role = [new_role]
user_group = user_group.update(['role'])
self.assertEqual(new_role.name, user_group.role[0].read().name)
@tier1
def test_positive_update_with_new_usergroup(self):
"""Add new user group to existing one
@id: 3cb29d07-5789-4f94-9fd9-a7e494b3c110
@Assert: User group is added to existing group successfully.
"""
new_usergroup = entities.UserGroup().create()
user_group = entities.UserGroup().create()
user_group.usergroup = [new_usergroup]
user_group = user_group.update(['usergroup'])
self.assertEqual(
new_usergroup.name, user_group.usergroup[0].read().name)
@tier1
def test_negative_update(self):
"""Attempt to update existing user group using different invalid names.
@id: 03772bd0-0d52-498d-8259-5c8a87e08344
@Assert: User group is not updated.
"""
user_group = entities.UserGroup().create()
for new_name in invalid_values_list():
with self.subTest(new_name):
user_group.name = new_name
with self.assertRaises(HTTPError):
user_group.update(['name'])
self.assertNotEqual(user_group.read().name, new_name)
@tier1
def test_negative_update_with_same_name(self):
"""Attempt to update user group with a name of already existent entity.
@id: 14888998-9282-4d81-9e99-234d19706783
@Assert: User group is not updated.
"""
name = gen_string('alphanumeric')
entities.UserGroup(name=name).create()
new_user_group = entities.UserGroup().create()
new_user_group.name = name
with self.assertRaises(HTTPError):
new_user_group.update(['name'])
self.assertNotEqual(new_user_group.read().name, name)
@tier1
def test_positive_delete(self):
"""Create user group with valid name and then delete it
@id: c5cfcc4a-9177-47bb-8f19-7a8930eb7ca3
@assert: User group is deleted successfully
"""
user_group = entities.UserGroup().create()
user_group.delete()
with self.assertRaises(HTTPError):
user_group.read()
| gpl-3.0 | -3,526,521,967,801,218,600 | 32.993174 | 79 | 0.619177 | false | 3.730337 | true | false | false |
StackStorm/st2 | st2client/st2client/models/keyvalue.py | 3 | 1173 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from st2client.models import core
LOG = logging.getLogger(__name__)
class KeyValuePair(core.Resource):
_alias = "Key"
_display_name = "Key Value Pair"
_plural = "Keys"
_plural_display_name = "Key Value Pairs"
_repr_attributes = ["name", "value"]
# Note: This is a temporary hack until we refactor client and make it support non id PKs
def get_id(self):
return self.name
def set_id(self, value):
pass
id = property(get_id, set_id)
| apache-2.0 | 9,167,533,811,843,134,000 | 28.325 | 92 | 0.710997 | false | 3.808442 | false | false | false |
nirajkvinit/flask-headlines | upto_ch4/headlines6.py | 1 | 2953 | import feedparser
import json
import urllib.request
from urllib.parse import quote
from flask import Flask
from flask import render_template
from flask import request
from pprint import pprint
app = Flask(__name__)
RSS_FEEDS = {
'bbc' : 'http://feeds.bbci.co.uk/news/rss.xml',
'cnn' : 'http://rss.cnn.com/rss/edition.rss',
'fox' : 'http://feeds.foxnews.com/foxnews/latest',
'iol' : 'http://www.iol.co.za/cmlink/1.640'
}
DEFAULTS = {
'publication' : 'cnn',
'city' : 'Calcutta, IN',
'currency_from' : 'USD',
'currency_to' : 'INR'
}
WEATHER_URL = "http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid=65b8831d1736fe05836815097ae4a457"
CURRENCY_URL = "https://openexchangerates.org//api/latest.json?app_id=09f8ae338add4275a341e3c556444eae"
@app.route("/")
def home():
publication = request.args.get("publication")
if not publication:
publication = DEFAULTS['publication']
articles = get_news(publication)
city = request.args.get('city')
if not city:
city = DEFAULTS['city']
weather = get_weather(city)
currency_from = request.args.get("currency_from")
currency_to = request.args.get("currency_to")
if not currency_from:
currency_from = DEFAULTS['currency_from']
if not currency_to:
currency_to = DEFAULTS['currency_to']
rate, currencies = get_rates(currency_from, currency_to)
return render_template(
"home2.html",
articles = articles,
weather = weather,
feeds = RSS_FEEDS,
publication = publication,
city = city,
currency_from = currency_from,
currency_to = currency_to,
rate = rate,
currencies = sorted(currencies)
)
def get_news(query):
if not query or query.lower() not in RSS_FEEDS:
publication = DEFAULTS['publication']
else:
publication = query.lower()
feed = feedparser.parse(RSS_FEEDS[publication])
return feed['entries']
def get_weather(query):
query = quote(query)
url = WEATHER_URL.format(query)
data = urllib.request.urlopen(url).read().decode("utf-8")
parsed = json.loads(data)
weather = None
if parsed.get("weather"):
weather = {
"description" : parsed["weather"][0]["description"],
"temperature" : parsed["main"]["temp"],
"city" : parsed["name"],
"country" : parsed["sys"]["country"]
}
return weather
def get_rates(from_rate, to_rate):
all_currency = urllib.request.urlopen(CURRENCY_URL).read().decode("utf-8")
parsed = json.loads(all_currency).get('rates')
parsed_from_rate = parsed.get(from_rate.upper())
parsed_to_rate = parsed.get(to_rate.upper())
return (parsed_to_rate/parsed_from_rate, parsed.keys())
if __name__ == "__main__":
app.run(port=5000, debug=True)
#65b8831d1736fe05836815097ae4a457 #WEATHER_URL
#09f8ae338add4275a341e3c556444eae #CURRENCY_URL
| mit | 7,481,833,986,435,559,000 | 28.237624 | 119 | 0.642398 | false | 3.252203 | false | false | false |
junkoda/mockgallib | script/generate_mock.py | 1 | 3535 | """
This script generates a mock/random catalogue from a lightcone
python3 generate_mock.py [--random] <n>
Args:
n: index of lightcone
Options:
--param [=param.json]: parameter file
--random: generate random catalogue
Input:
halo_lightcone/lightcone_<n>.h5
rand_lightcone/lightcone_<n>.h5
Output:
mocks/mock_<n>.txt
rands/random_<n>.txt
"""
import os
import argparse
import json
import signal
import numpy as np
import mockgallib as mock
signal.signal(signal.SIGINT, signal.SIG_DFL) # stop with ctrl-c
#
# Command-line options
#
parser = argparse.ArgumentParser()
parser.add_argument('n', help='index of lightcone')
parser.add_argument('--reg', default='w1', help='region w1/w4')
parser.add_argument('--dir', default='.', help='base data directory')
parser.add_argument('--param', default='param.json',
help='parameter json file')
parser.add_argument('--mock', help='generate mock catalogue',
action="store_true")
parser.add_argument('--rand', help='generate random catalogue',
action="store_true")
arg = parser.parse_args()
data_dir = '/workplace/wp2e/como5/data'
#
# Read parameter file
#
print('Parameter file: %s' % arg.param)
with open(arg.param, 'r') as f:
param = json.load(f)
omega_m = param['omega_m']
print('Setting cosmology: omega_m= %.4f' % omega_m)
#
# Initilise
#
mock.set_loglevel(0)
mock.cosmology.set(omega_m)
mock.power.init(arg.dir + '/' + param['power_spectrum'])
#
# redshift range
#
z_min = 0.39
z_max = 1.21
print('redshift range %f %f' % (z_min, z_max))
# nz
# nbar_obs= mock.array.loadtxt(arg.dir + '/' + param['nz'])
# sky
sky = {}
for reg in param['reg']:
sky[reg['name']] = mock.Sky(reg['ra'], reg['dec'], [z_min, z_max])
#
# Set HOD parameters
#
hod = mock.Hod()
hod_param = [11.632682100874081, -0.5706390738948128, 4.904043697780981, -1.0126352684312565, 0.45, 0.9, 1.05, 0.0, 0.9, 0.0, 4.0, 2.0]
hod.set_coef(hod_param)
lightcones = mock.LightCones()
cats = mock.Catalogues()
n = int(arg.n)
def write_catalogue(filename, a):
with open(filename, 'w') as f:
for i in range(a.shape[0]):
f.write('%d %e %e %e %e %e %e %e %e %e %e\n' % (
i,
a[i, 0], a[i, 1], a[i, 2],
a[i, 4], a[i, 3],
a[i, 5], a[i, 6],
a[i, 7], a[i, 10], a[i, 11]))
reg = arg.reg
# mock
if arg.mock:
halo_lightcones = mock.LightCones()
halo_lightcones.load_h5(
['%s/halo_lightcone/%s/lightcone_%05d.h5' % (arg.dir, reg, n)])
galaxy_catalogues = mock.Catalogues()
galaxy_catalogues.generate_galaxies(hod, halo_lightcones, sky[reg],
z_min, z_max)
write_catalogue('mocks/%s/mock_%s_%05d.txt' % (reg, reg, n),
galaxy_catalogues[0])
if arg.rand:
rand_lightcones = mock.LightCones()
rand_lightcones.load_h5(
['%s/rand_lightcone/%s/lightcone_%05d.h5' % (arg.dir, reg, n)])
random_catalogues = mock.Catalogues()
random_catalogues.generate_randoms(hod, rand_lightcones, sky[reg],
z_min, z_max)
write_catalogue('rands/%s/rand_%s_%05d.txt' % (reg, reg, n),
random_catalogues[0])
# Column 0: index
# Column 1: x realspace [1/h Mpc]
# Column 2: y
# Column 3: z
# Column 4: vr [km/s]
# Column 5: redshift
# Column 6: RA
# Column 7: Dec
# Column 8: M_host_halo
# Column 9: r_satellite
# Column 10: vr_satellite
| gpl-3.0 | -5,826,871,821,911,481,000 | 23.548611 | 135 | 0.595191 | false | 2.761719 | false | false | false |
RacingTadpole/cmsplugin-rt | setup.py | 1 | 1454 | import os
from setuptools import setup, find_packages
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name = 'cmsplugin-rt',
version = '0.5.1',
packages = find_packages(), #'cmsplugin_rt', #find_packages(),
include_package_data = True,
license = 'BSD License', # example license
description = 'This package contains a number of basic plugins to kick start your DjangoCMS project, such as Twitter Bootstrap navbar and buttons, Facebook and Twitter buttons, a Style Modifier, Google Analytics tracking code, Google fonts, meta tags and resizable pictures.',
long_description = README,
keywords = "button meta twitter bootstrap style modifier racing tadpole",
url = 'https://github.com/RacingTadpole/cmsplugin-rt',
author = 'Art Street',
author_email = '[email protected]',
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
zip_safe = False,
)
| bsd-3-clause | 7,341,109,183,959,611,000 | 43.060606 | 280 | 0.662999 | false | 3.940379 | false | true | false |
DataViva/dataviva-scripts | scripts/crosswalk/format_raw_data.py | 1 | 1434 | # -*- coding: utf-8 -*-
import os, sys, time, bz2, click
import pandas as pd
import pandas.io.sql as sql
import numpy as np
import itertools
@click.command()
@click.argument('file_path', type=click.Path(exists=True))
@click.option('output_path', '--output', '-o', help='Path to save files to.', type=click.Path(), required=True, prompt="Output path")
def main(file_path, output_path):
nestings = []
fieldA = "hs"
fieldB = "cnae"
df = pd.read_csv(file_path, converters={fieldA: str, fieldB: str})
df = df[ (df[fieldA].str.len() > 0) & (df[fieldB].str.len() >0)]
df = df[[fieldA, fieldB]]
if fieldA == "hs":
df.hs = df.hs.str.slice(2, 6)
df = df.drop_duplicates()
print df
print
print
# depths = {"hs" : [2, 6], "cnae": [1, 5]}
# for depthcol, lengths in depths.items():
# my_nesting.append(lengths)
# my_nesting_cols.append(depthcol)
# print my_nesting, my_nesting_cols
# for depths in itertools.product(*my_nesting):
# series = {}
# print depths
# for col_name, l in zip(my_nesting_cols, depths):
# series[col_name] = df[col_name].str.slice(0, l)
# addtl_rows = pd.DataFrame(series)
# full_table = pd.concat([addtl_rows, full_table])
# # print pk
# print full_table
df.to_csv("pi_crosswalk.csv", index=False)
if __name__ == "__main__":
main()
| mit | -7,191,728,656,972,591,000 | 30.173913 | 133 | 0.577406 | false | 2.9875 | false | false | false |
uskudnik/ggrc-core | src/ggrc/models/track_object_state.py | 2 | 1612 | from sqlalchemy import event
from datetime import datetime
from ggrc import db
from sqlalchemy.ext.declarative import declared_attr
from .mixins import deferred
from ggrc.login import get_current_user_id
from .reflection import PublishOnly
class HasObjectState(object):
_publish_attrs = [
PublishOnly('os_state'),
]
def __init__(self, *args, **kwargs):
self._skip_os_state_update = False;
super(HasObjectState, self).__init__(*args, **kwargs)
@declared_attr
def os_state(cls):
return deferred(db.Column(db.String, nullable=False, default=ObjectStates.DRAFT), cls.__name__)
def skip_os_state_update(self):
self._skip_os_state_update = True
class ObjectStates:
DRAFT = 'Draft'
APPROVED = 'Approved'
DECLINED = 'Declined'
MODIFIED = 'Modified'
# This table
class ObjectStateTables:
table_names = [
'programs', 'objectives', 'controls', 'sections',
'systems', 'data_assets', 'facilities',
'markets', 'products', 'projects', 'directives',
'org_groups', 'vendors'
]
def state_before_insert_listener(mapper, connection, target):
if hasattr(target, 'os_state'):
target.os_state = ObjectStates.DRAFT
def state_before_update_listener(mapper, connection, target):
if hasattr(target, 'os_state'):
if hasattr(target, '_skip_os_state_update'):
if True == target._skip_os_state_update:
return
target.os_state = ObjectStates.MODIFIED
def track_state_for_class(object_class):
event.listen(object_class, 'before_insert', state_before_insert_listener)
event.listen(object_class, 'before_update', state_before_update_listener)
| apache-2.0 | 3,622,604,706,887,073,300 | 28.309091 | 99 | 0.707196 | false | 3.408034 | false | false | false |
netvigator/myPyPacks | pyPacks/String/Eat.py | 2 | 13226 | #!/usr/bin/pythonTest
# -*- coding: utf-8 -*-
#
# string functions Eat
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# The GNU General Public License is available from:
# The Free Software Foundation, Inc.
# 51 Franklin Street, Fifth Floor
# Boston MA 02110-1301 USA
#
# http://www.gnu.org/licenses/gpl.html
#
# Copyright 2004-2016 Rick Graves
#
from String.Test import isAsciiAlpha, isAsciiDigit
def _eatOffOneEnd( sText, sEatThese = '',
fEatThese = None, bEatOffFront = True, bEatOffBoth = False ):
#
"""
This is the generic program,
it is normally only called by specific implementations below.
"""
#
from String.Get import getTheseCharsOffOneEnd
#
iEat = len(
getTheseCharsOffOneEnd( sText, sEatThese, fEatThese, bEatOffFront ) )
#
if bEatOffFront or bEatOffBoth:
#
sText = sText[ iEat : ]
#
if bEatOffBoth:
#
iEat = len(
getTheseCharsOffOneEnd( sText, sEatThese, fEatThese, False ) )
#
#
if bEatOffBoth or not bEatOffFront:
#
if iEat: sText = sText[ : - iEat ]
#
#
return sText
def eatCharsOffBeg( sText, sEatThese = '', fEatThese = None ):
#
return _eatOffOneEnd( sText, sEatThese, fEatThese, bEatOffFront = True )
def eatCharsOffEnd( sText, sEatThese = '', fEatThese = None ):
#
return _eatOffOneEnd( sText, sEatThese, fEatThese, bEatOffFront = False )
def eatCharsOffBothEnds( sText, sEatThese = '', fEatThese = None ):
#
return _eatOffOneEnd( sText, sEatThese,
fEatThese, bEatOffFront = False, bEatOffBoth = True )
def eatPunctuationBegAndEnd( sFrag ):
#
from String.Test import isPunctuation
#
return eatCharsOffEnd(
eatCharsOffBeg( sFrag, fEatThese = isPunctuation ), fEatThese = isPunctuation )
def eatPunctuationEnd( sFrag ):
#
from String.Test import isPunctuation
#
return eatCharsOffEnd( sFrag, fEatThese = isPunctuation )
def eatPunctAndSpacesOffEnd( sFrag ):
#
from String.Test import isPunctOrSpace
#
return eatCharsOffEnd( sFrag, fEatThese = isPunctOrSpace )
def eatPunctAndSpacesOffBegAndEnd( sFrag ):
#
from String.Test import isPunctOrSpace
#
return eatCharsOffEnd(
eatCharsOffBeg( sFrag, fEatThese = isPunctOrSpace ), fEatThese = isPunctOrSpace )
def eatFrontNonAlpha( sText ):
#
def fEatThese( sChar ): return not isAsciiAlpha( sChar )
#
return _eatOffOneEnd( sText, fEatThese = fEatThese )
def eatFrontNonDigits( sText ):
#
from String.Test import isNotDigit
#
return _eatOffOneEnd( sText, fEatThese = isNotDigit )
def eatBackNonDigits( sText ):
#
from String.Test import isNotDigit
#
return _eatOffOneEnd( sText, fEatThese = isNotDigit, bEatOffFront = False )
def eatFrontNonAlphaNum( sText ):
#
def fEatThese( sChar ): return not (
isAsciiAlpha( sChar ) or
isAsciiDigit( sChar ) )
#
return _eatOffOneEnd( sText, fEatThese = fEatThese )
def eatFrontNonAlphaNumButKeepLF( sText ):
#
def fEatThese( sChar ): return not (
isAsciiAlpha( sChar ) or
isAsciiDigit( sChar ) or
sChar == '\n' )
#
return _eatOffOneEnd( sText, fEatThese = fEatThese )
def eatEndNonAlphaNum( sText ):
#
#def fEatThese( sChar ): return not ( sChar.isalpha() or sChar.isdigit() )
def fEatThese( sChar ):
return not (
isAsciiAlpha( sChar ) or
isAsciiDigit( sChar ) )
#
return _eatOffOneEnd( sText, fEatThese = fEatThese, bEatOffFront = False )
def eatNonAlphaNumBothEnds( sText ):
#
return eatEndNonAlphaNum( eatFrontNonAlphaNum( sText ) )
def eatNonAlphaBothEnds( sText ):
#
return eatEndNonAlpha( eatFrontNonAlpha( sText ) )
def eatAlphaOffEnd( sText ):
#
return eatCharsOffEnd( sText, fEatThese = isAsciiAlpha )
setCRLF = frozenset( ( '\n', '\r' ) )
def _gotCRLF( sChar ): return sChar in setCRLF
def eatEndCRLF( sText ):
#
return _eatOffOneEnd( sText, fEatThese = _gotCRLF, bEatOffFront = False )
def eatBegCRLF( sText ):
#
return _eatOffOneEnd( sText, fEatThese = _gotCRLF )
def eatEndAlpha( sText ):
#
def fEatThese( sChar ): return sChar.isalpha()
#
return _eatOffOneEnd( sText, fEatThese = fEatThese, bEatOffFront = False )
def eatEndNonAlpha( sText ):
#
def fEatThese( sChar ): return not isAsciiAlpha( sChar )
#
return _eatOffOneEnd( sText, fEatThese = fEatThese, bEatOffFront = False )
def eatFrontDigits( sText ):
#
from String.Test import isDigit
#
return _eatOffOneEnd( sText, fEatThese = isDigit )
def eatEndDigits( sText ):
#
from String.Test import isDigit
#
return _eatOffOneEnd( sText, fEatThese = isDigit, bEatOffFront = False )
def eatWhiteSpaceBothEnds( sText ):
#
from string import whitespace
#
return _eatOffOneEnd(
_eatOffOneEnd( sText, whitespace ),
whitespace, bEatOffFront = False )
def eatWhiteSpaceFront( sText ):
#
from string import whitespace
#
return _eatOffOneEnd( sText, whitespace )
def eatEndSpaces( sText ):
#
from String.Test import isSpace
#
return _eatOffOneEnd( sText, fEatThese = isSpace, bEatOffFront = False )
def _getFrontCharOff( s, sDigit ):
#
while s.startswith( sDigit ):
#
s = s[ 1 : ]
#
#
return s
def eatFrontZeros( s ):
#
return _getFrontCharOff( s, '0' )
def eatFrontOnes( s ):
#
return _getFrontCharOff( s, '1' )
_setZeroOne = frozenset( ( '0', '1' ) )
def eatFrontZerosOnes( s ):
#
while s and s[0] in _setZeroOne:
#
s = s[ 1 : ]
#
#
return s
def eatFrontOneByOne( sOrig, sEat ):
#
from String.Get import getTextAfter
#
sRest = sOrig
#
for c in sEat:
#
sRest = getTextAfter( sRest, c )
#
#
return sRest
if __name__ == "__main__":
#
from string import digits, whitespace
from string import ascii_lowercase as lowercase
from string import ascii_uppercase as uppercase
#
from six import print_ as print3
#
from Iter.AllVers import iZip, iMap, tMap
from String.Get import getStringInRange
from Utils.Result import sayTestResult
#
lProblems = []
#
def fEatThese( s ): return not s.isalpha()
#
sLeft = _eatOffOneEnd( lowercase + digits,
fEatThese = fEatThese, bEatOffFront = False )
#
if sLeft != lowercase:
#
print3( 'sLeft:', sLeft )
lProblems.append( '_eatOffOneEnd()' )
#
#
if eatCharsOffBeg( lowercase, 'lkjihgfedcba' ) != 'mnopqrstuvwxyz':
#
lProblems.append( 'eatTheseCharsOffBeg()' )
#
if eatCharsOffEnd( lowercase, 'zyxwvutsrqponm' ) != 'abcdefghijkl':
#
lProblems.append( 'eatTheseCharsOffEnd()' )
#
#
if eatCharsOffBothEnds( '/abc/', '/' ) != 'abc':
#
# print3( eatCharsOffBothEnds( '/abc/', '/' ) )
lProblems.append( 'eatCharsOffBothEnds() remove' )
#
#
if eatCharsOffBothEnds( 'abc', '/' ) != 'abc':
#
lProblems.append( 'eatCharsOffBothEnds() nothing to remove' )
#
#
#
if eatPunctuationBegAndEnd( ',-./0123456789:;' ) != '0123456789':
#
lProblems.append( 'RemovePunctuationBegAndEnd()' )
#
#
# getStringInRange( 32, 91 ) =
# ' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
#
if eatPunctuationEnd( ',-./0123456789:;' ) != ',-./0123456789':
#
lProblems.append( 'eatPunctuationEnd()' )
#
#
if eatPunctAndSpacesOffEnd( ',-./0123456789: ; ' ) != ',-./0123456789':
#
lProblems.append( 'eatPunctAndSpacesOffEnd()' )
#
#
if eatPunctAndSpacesOffBegAndEnd( ', -./0123456789: ; ' ) != '0123456789':
#
lProblems.append( 'eatPunctAndSpacesOffBegAndEnd()' )
#
#
s32to90 = getStringInRange( 32, 91 )
#
if eatFrontNonAlpha( s32to90 ) != uppercase:
#
lProblems.append( 'eatFrontNonAlpha()' )
#
if eatFrontNonAlphaNum( s32to90 ) != '0123456789:;<=>?@' + uppercase:
#
lProblems.append( 'eatFrontNonAlphaNum()' )
#
if eatFrontNonAlphaNumButKeepLF( '\n' + s32to90 ) != '\n' + s32to90 or \
eatFrontNonAlphaNumButKeepLF( '\r' + s32to90 ) != '0123456789:;<=>?@' + uppercase:
#
lProblems.append( 'eatFrontNonAlphaNumButKeepLF()' )
#
#
# getStringInRange( 48, 65 ) = '0123456789:;<=>?@'
#
if eatEndNonAlphaNum( lowercase + whitespace ) != lowercase:
#
print3( eatEndNonAlphaNum( lowercase + whitespace ) )
lProblems.append( 'eatEndNonAlphaNum( lowercase + whitespace )' )
#
#
if eatEndNonAlphaNum( getStringInRange( 97, 256 ) ) != lowercase:
#
s = eatEndNonAlphaNum( getStringInRange( 97, 256 ) )
#
#
print3( tMap( str, iMap( ord, ( s[0], s[-1] ) ) ) )
lProblems.append( 'eatEndNonAlphaNum( getStringInRange( 97, 256 ) )' )
#
#
if eatEndNonAlphaNum( getStringInRange( 48, 65 ) ) != digits:
#
print3( eatEndNonAlphaNum( getStringInRange( 48, 65 ) ) )
lProblems.append( 'eatEndNonAlphaNum( getStringInRange( 48, 65 ) )' )
#
#
# print3( 'getStringInRange( 65, 123 )', '= ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz' )
# print3( 'getStringInRange( 32, 97 )', '''= !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`''' )
#
if eatNonAlphaNumBothEnds( getStringInRange( 32, 256 ) ) != getStringInRange( 48, 123 ) or \
eatNonAlphaNumBothEnds( getStringInRange( 32, 97 ) ) != getStringInRange( 48, 91 ):
#
#print3( eatNonAlphaNumBothEnds( getStringInRange( 32, 256 ) ) )
#print3( eatNonAlphaNumBothEnds( getStringInRange( 32, 97 ) ) )
lProblems.append( 'eatNonAlphaNumBothEnds()' )
#
if eatNonAlphaBothEnds( getStringInRange( 32, 97 ) ) != uppercase:
#
lProblems.append( 'eatNonAlphaBothEnds()' )
#
if eatAlphaOffEnd( '1234abcd' ) != '1234':
#
lProblems.append( 'eatAlphaOffEnd()' )
#
#
if eatEndCRLF( '\r\n' + uppercase + '\r\n' ) != '\r\n' + uppercase:
#
lProblems.append( 'eatEndCRLF()' )
#
if eatBegCRLF( '\r\n' + uppercase + '\r\n' ) != uppercase + '\r\n':
#
lProblems.append( 'eatBegCRLF()' )
#
#
# getStringInRange( 65, 123 ) = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz'
#
if eatEndAlpha( getStringInRange( 65, 123 ) ) != uppercase + '[\\]^_`':
#
lProblems.append( 'eatEndAlpha()' )
#
if eatEndNonAlpha( getStringInRange( 97, 256 ) ) != lowercase:
#
print3( eatEndNonAlpha( getStringInRange( 97, 256 ) ) )
lProblems.append( 'eatEndNonAlpha()' )
#
#
# getStringInRange( 48, 65 ) = '0123456789:;<=>?@'
#
if eatFrontDigits( getStringInRange( 48, 65 ) ) != ':;<=>?@':
#
lProblems.append( 'eatFrontDigits()' )
#
#
# getStringInRange( 32, 58 ) = ' !"#$%&\'()*+,-./0123456789'
#
if eatEndDigits( getStringInRange( 32, 58 ) ) != ' !"#$%&\'()*+,-./':
#
lProblems.append( 'eatEndDigits()' )
#
#
if eatWhiteSpaceBothEnds(
whitespace + lowercase + whitespace ) != lowercase:
#
lProblems.append( 'eatWhiteSpaceBothEnds()' )
#
#
if eatWhiteSpaceFront(
whitespace + lowercase + whitespace ) != \
lowercase + whitespace:
#
lProblems.append( 'eatWhiteSpaceFront()' )
#
#
if eatEndSpaces( '\t\n\x0b\x0c\r ' ) != '\t\n\x0b\x0c\r' or \
eatEndSpaces( 'abc' ) != 'abc':
#
lProblems.append( 'eatEndSpaces()' )
#
#
if eatFrontNonDigits( '-206-632-9929' ) != '206-632-9929':
#
lProblems.append( 'eatFrontNonDigits()' )
#
#
if eatBackNonDigits( '123xzy' ) != '123':
#
lProblems.append( 'eatBackNonDigits()' )
#
#
sOrig = '1-2-3-4-5-6-7-8-9'
sEat = '123'
#
if eatFrontOneByOne( sOrig, sEat ) != '-4-5-6-7-8-9':
#
lProblems.append( 'eatFrontOneByOne()' )
#
#
#
#
#
sayTestResult( lProblems ) | gpl-2.0 | 1,387,247,632,390,510,300 | 25.507014 | 121 | 0.585362 | false | 3.121548 | true | false | false |
whn09/tensorflow | tensorflow/contrib/kernel_methods/python/kernel_estimators.py | 2 | 13769 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimators that combine explicit kernel mappings with linear models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import layers
from tensorflow.contrib.kernel_methods.python.mappers import dense_kernel_mapper as dkm
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
_FEATURE_COLUMNS = "feature_columns"
_KERNEL_MAPPERS = "kernel_mappers"
_OPTIMIZER = "optimizer"
def _check_valid_kernel_mappers(kernel_mappers):
"""Checks that the input kernel_mappers are valid."""
if kernel_mappers is None:
return True
for kernel_mappers_list in six.itervalues(kernel_mappers):
for kernel_mapper in kernel_mappers_list:
if not isinstance(kernel_mapper, dkm.DenseKernelMapper):
return False
return True
def _check_valid_head(head):
"""Returns true if the provided head is supported."""
if head is None:
return False
# pylint: disable=protected-access
return isinstance(head, head_lib._BinaryLogisticHead) or isinstance(
head, head_lib._MultiClassHead)
# pylint: enable=protected-access
def _update_features_and_columns(features, feature_columns,
kernel_mappers_dict):
"""Updates features and feature_columns based on provided kernel mappers.
Currently supports the update of `RealValuedColumn`s only.
Args:
features: Initial features dict. The key is a `string` (feature column name)
and the value is a tensor.
feature_columns: Initial iterable containing all the feature columns to be
consumed (possibly after being updated) by the model. All items should be
instances of classes derived from `FeatureColumn`.
kernel_mappers_dict: A dict from feature column (type: _FeatureColumn) to
objects inheriting from KernelMapper class.
Returns:
updated features and feature_columns based on provided kernel_mappers_dict.
"""
if kernel_mappers_dict is None:
return features, feature_columns
# First construct new columns and features affected by kernel_mappers_dict.
mapped_features = dict()
mapped_columns = set()
for feature_column in kernel_mappers_dict:
column_name = feature_column.name
# Currently only mappings over RealValuedColumns are supported.
if not isinstance(feature_column, layers.feature_column._RealValuedColumn): # pylint: disable=protected-access
logging.warning(
"Updates are currently supported on RealValuedColumns only. Metadata "
"for FeatureColumn {} will not be updated.".format(column_name))
continue
mapped_column_name = column_name + "_MAPPED"
# Construct new feature columns based on provided kernel_mappers.
column_kernel_mappers = kernel_mappers_dict[feature_column]
new_dim = sum([mapper.output_dim for mapper in column_kernel_mappers])
mapped_columns.add(
layers.feature_column.real_valued_column(mapped_column_name, new_dim))
# Get mapped features by concatenating mapped tensors (one mapped tensor
# per kernel mappers from the list of kernel mappers corresponding to each
# feature column).
output_tensors = []
for kernel_mapper in column_kernel_mappers:
output_tensors.append(kernel_mapper.map(features[column_name]))
tensor = array_ops.concat(output_tensors, 1)
mapped_features[mapped_column_name] = tensor
# Finally update features dict and feature_columns.
features = features.copy()
features.update(mapped_features)
feature_columns = set(feature_columns)
feature_columns.update(mapped_columns)
return features, feature_columns
def _kernel_model_fn(features, labels, mode, params, config=None):
"""model_fn for the Estimator using kernel methods.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction. See
`ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
* kernel_mappers: Dictionary of kernel mappers to be applied to the input
features before training.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
feature_columns = params[_FEATURE_COLUMNS]
kernel_mappers = params[_KERNEL_MAPPERS]
updated_features, updated_columns = _update_features_and_columns(
features, feature_columns, kernel_mappers)
params[_FEATURE_COLUMNS] = updated_columns
return linear._linear_model_fn( # pylint: disable=protected-access
updated_features, labels, mode, params, config)
class _KernelEstimator(estimator.Estimator):
"""Generic kernel-based linear estimator."""
def __init__(self,
feature_columns=None,
model_dir=None,
weight_column_name=None,
head=None,
optimizer=None,
kernel_mappers=None,
config=None):
"""Constructs a `_KernelEstimator` object."""
if not feature_columns and not kernel_mappers:
raise ValueError(
"You should set at least one of feature_columns, kernel_mappers.")
if not _check_valid_kernel_mappers(kernel_mappers):
raise ValueError("Invalid kernel mappers.")
if not _check_valid_head(head):
raise ValueError(
"head type: {} is not supported. Supported head types: "
"_BinaryLogisticHead, _MultiClassHead.".format(type(head)))
params = {
"head": head,
_FEATURE_COLUMNS: feature_columns or [],
_OPTIMIZER: optimizer,
_KERNEL_MAPPERS: kernel_mappers
}
super(_KernelEstimator, self).__init__(
model_fn=_kernel_model_fn,
model_dir=model_dir,
config=config,
params=params)
class KernelLinearClassifier(_KernelEstimator):
"""Linear classifier using kernel methods as feature preprocessing.
It trains a linear model after possibly mapping initial input features into
a mapped space using explicit kernel mappings. Due to the kernel mappings,
training a linear classifier in the mapped (output) space can detect
non-linearities in the input space.
The user can provide a list of kernel mappers to be applied to all or a subset
of existing feature_columns. This way, the user can effectively provide 2
types of feature columns:
* those passed as elements of feature_columns in the classifier's constructor
* those appearing as a key of the kernel_mappers dict.
If a column appears in feature_columns only, no mapping is applied to it. If
it appears as a key in kernel_mappers, the corresponding kernel mappers are
applied to it. Note that it is possible that a column appears in both places.
Currently kernel_mappers are supported for _RealValuedColumns only.
Example usage:
```
real_column_a = real_valued_column(name='real_column_a',...)
sparse_column_b = sparse_column_with_hash_bucket(...)
kernel_mappers = {real_column_a : [RandomFourierFeatureMapper(...)]}
optimizer = ...
# real_column_a is used as a feature in both its initial and its transformed
# (mapped) form. sparse_column_b is not affected by kernel mappers.
kernel_classifier = KernelLinearClassifier(
feature_columns=[real_column_a, sparse_column_b],
model_dir=...,
optimizer=optimizer,
kernel_mappers=kernel_mappers)
# real_column_a is used as a feature in its transformed (mapped) form only.
# sparse_column_b is not affected by kernel mappers.
kernel_classifier = KernelLinearClassifier(
feature_columns=[sparse_column_b],
model_dir=...,
optimizer=optimizer,
kernel_mappers=kernel_mappers)
# Input builders
def train_input_fn: # returns x, y
...
def eval_input_fn: # returns x, y
...
kernel_classifier.fit(input_fn=train_input_fn)
kernel_classifier.evaluate(input_fn=eval_input_fn)
kernel_classifier.predict(...)
```
Input of `fit` and `evaluate` should have following features, otherwise there
will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
feature_columns=None,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
kernel_mappers=None,
config=None):
"""Construct a `KernelLinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph etc. This can also be
used to load checkpoints from the directory into an estimator to
continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Note that class labels are integers representing the class index (i.e.
values from 0 to n_classes-1). For arbitrary label values (e.g. string
labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: The optimizer used to train the model. If specified, it should
be an instance of `tf.Optimizer`. If `None`, the Ftrl optimizer is used
by default.
kernel_mappers: Dictionary of kernel mappers to be applied to the input
features before training a (linear) model. Keys are feature columns and
values are lists of mappers to be applied to the corresponding feature
column. Currently only _RealValuedColumns are supported and therefore
all mappers should conform to the `DenseKernelMapper` interface (see
./mappers/dense_kernel_mapper.py).
config: `RunConfig` object to configure the runtime settings.
Returns:
A `KernelLinearClassifier` estimator.
Raises:
ValueError: if n_classes < 2.
ValueError: if neither feature_columns nor kernel_mappers are provided.
ValueError: if mappers provided as kernel_mappers values are invalid.
"""
super(KernelLinearClassifier, self).__init__(
feature_columns=feature_columns,
model_dir=model_dir,
weight_column_name=weight_column_name,
head=head_lib.multi_class_head(
n_classes=n_classes, weight_column_name=weight_column_name),
kernel_mappers=kernel_mappers,
config=config)
def predict_classes(self, input_fn=None):
"""Runs inference to determine the predicted class per instance.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted classes for the features provided by input_fn.
Each predicted class is represented by its class index (i.e. integer from
0 to n_classes-1)
"""
key = prediction_key.PredictionKey.CLASSES
predictions = super(KernelLinearClassifier, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
def predict_proba(self, input_fn=None):
"""Runs inference to determine the class probability predictions.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted class probabilities for the features provided by
input_fn.
"""
key = prediction_key.PredictionKey.PROBABILITIES
predictions = super(KernelLinearClassifier, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
| apache-2.0 | -8,200,934,012,331,352,000 | 39.736686 | 115 | 0.701213 | false | 4.282737 | true | false | false |
Tanganelli/CoAPthon | observelayer.py | 2 | 11065 | import logging
import time
import threading
from coapthon import defines
__author__ = 'Giacomo Tanganelli'
logger = logging.getLogger(__name__)
class ObserveItem(object):
def __init__(self, timestamp, non_counter, allowed, transaction, serv=None):
"""
Data structure for the Observe option
:param timestamp: the timestamop of last message sent
:param non_counter: the number of NON notification sent
:param allowed: if the client is allowed as observer
:param transaction: the transaction
:param serv: reference to CoAP object
"""
self.timestamp = timestamp
self.non_counter = non_counter
self.allowed = allowed
self.transaction = transaction
# parameters for dynamic resource observing
self.conditional = False
self.conditions = {}
self.last_notify = time.time()
self.timer = None
self.coap = serv
# timer for notification procedure is set at (pmax - pmin)/2
def pmax_timer(self):
self.coap.notify(self.transaction.resource)
def start_timer(self):
pmin = 0
pmax = 0
for cond in self.conditions:
if cond == "pmin":
pmin = self.conditions[cond]
elif cond == "pmax":
pmax = self.conditions[cond]
if pmax == 0:
return
else:
self.timer = threading.Timer((pmax-pmin)/2, self.pmax_timer)
self.timer.start()
class ObserveLayer(object):
"""
Manage the observing feature. It store observing relationships.
"""
def __init__(self, server=None):
self._relations = {}
self._server = server
def send_request(self, request):
"""
Add itself to the observing list
:param request: the request
:return: the request unmodified
"""
if request.observe == 0:
# Observe request
host, port = request.destination
key_token = hash(str(host) + str(port) + str(request.token))
self._relations[key_token] = ObserveItem(time.time(), None, True, None)
if request.observe == 1:
# Cancelling observe explicitly
self.remove_subscriber(request)
return request
def receive_response(self, transaction):
"""
Sets notification's parameters.
:type transaction: Transaction
:param transaction: the transaction
:rtype : Transaction
:return: the modified transaction
"""
host, port = transaction.response.source
key_token = hash(str(host) + str(port) + str(transaction.response.token))
if key_token in self._relations and transaction.response.type == defines.Types["CON"]:
transaction.notification = True
return transaction
def send_empty(self, message):
"""
Eventually remove from the observer list in case of a RST message.
:type message: Message
:param message: the message
:return: the message unmodified
"""
host, port = message.destination
key_token = hash(str(host) + str(port) + str(message.token))
if key_token in self._relations and message.type == defines.Types["RST"]:
del self._relations[key_token]
return message
def receive_request(self, transaction):
"""
Manage the observe option in the request end eventually initialize the client for adding to
the list of observers or remove from the list.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the modified transaction
"""
if transaction.request.observe == 0:
# Observe request
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
non_counter = 0
if key_token in self._relations:
# Renew registration
allowed = True
else:
allowed = False
self._relations[key_token] = ObserveItem(time.time(), non_counter, allowed, transaction, self._server)
# check if the observing request has dynamic parameters (sent inside uri_query field)
if transaction.request.uri_query is not None:
logger.info("Dynamic Observing registration")
self._relations[key_token].conditional = True
self._relations[key_token].conditions = ObserveLayer.parse_uri_query(transaction.request.uri_query)
self._relations[key_token].start_timer()
elif transaction.request.observe == 1:
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
logger.info("Remove Subscriber")
try:
del self._relations[key_token]
except KeyError:
pass
return transaction
def receive_empty(self, empty, transaction):
"""
Manage the observe feature to remove a client in case of a RST message receveide in reply to a notification.
:type empty: Message
:param empty: the received message
:type transaction: Transaction
:param transaction: the transaction that owns the notification message
:rtype : Transaction
:return: the modified transaction
"""
if empty.type == defines.Types["RST"]:
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
logger.info("Remove Subscriber")
try:
del self._relations[key_token]
except KeyError:
pass
transaction.completed = True
return transaction
def send_response(self, transaction):
"""
Finalize to add the client to the list of observer.
:type transaction: Transaction
:param transaction: the transaction that owns the response
:return: the transaction unmodified
"""
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
if key_token in self._relations:
if transaction.response.code == defines.Codes.CONTENT.number:
if transaction.resource is not None and transaction.resource.observable:
transaction.response.observe = transaction.resource.observe_count
self._relations[key_token].allowed = True
self._relations[key_token].transaction = transaction
self._relations[key_token].timestamp = time.time()
else:
del self._relations[key_token]
elif transaction.response.code >= defines.Codes.ERROR_LOWER_BOUND:
del self._relations[key_token]
return transaction
def notify(self, resource, root=None):
"""
Prepare notification for the resource to all interested observers.
:rtype: list
:param resource: the resource for which send a new notification
:param root: deprecated
:return: the list of transactions to be notified
"""
ret = []
if root is not None:
resource_list = root.with_prefix_resource(resource.path)
else:
resource_list = [resource]
for key in self._relations.keys():
if self._relations[key].transaction.resource in resource_list:
# checking dynamic resource parameters
if self._relations[key].conditional:
if self.verify_conditions(self._relations[key]) is False:
continue
# updating relation timestamp and resetting timer
self._relations[key].last_notify = time.time()
self._relations[key].timer.cancel()
self._relations[key].start_timer()
if self._relations[key].non_counter > defines.MAX_NON_NOTIFICATIONS \
or self._relations[key].transaction.request.type == defines.Types["CON"]:
self._relations[key].transaction.response.type = defines.Types["CON"]
self._relations[key].non_counter = 0
elif self._relations[key].transaction.request.type == defines.Types["NON"]:
self._relations[key].non_counter += 1
self._relations[key].transaction.response.type = defines.Types["NON"]
self._relations[key].transaction.resource = resource
del self._relations[key].transaction.response.mid
del self._relations[key].transaction.response.token
ret.append(self._relations[key].transaction)
return ret
def remove_subscriber(self, message):
"""
Remove a subscriber based on token.
:param message: the message
"""
logger.debug("Remove Subscriber")
host, port = message.destination
key_token = hash(str(host) + str(port) + str(message.token))
try:
self._relations[key_token].transaction.completed = True
del self._relations[key_token]
except AttributeError:
logger.warning("No Transaction")
except KeyError:
logger.warning("No Subscriber")
@staticmethod
def parse_uri_query(uri_query):
"""
parse the conditional parameters for the conditional observing
:return: a map with pairs [parameter, value]
"""
dict_att = {}
print(uri_query)
attributes = uri_query.split(";")
for att in attributes:
a = att.split("=")
if len(a) > 1:
if str(a[0]) == "band":
a[1] = bool(a[1])
if a[1].isdigit():
a[1] = int(a[1])
dict_att[str(a[0])] = a[1]
else:
dict_att[str(a[0])] = a[0]
print (dict_att)
return dict_att
@staticmethod
def verify_conditions(item):
"""
checks if the changed resource requires a notification
:param item: ObserveItem
:return: Boolean
"""
for cond in item.conditions:
if cond == "pmin":
# CURRENT TIME - TIMESTAMP < PMIN
t = int(time.time() - item.last_notify)
if t < int(item.conditions[cond]):
return False
return True
| mit | 3,178,576,142,935,755,000 | 36.420139 | 116 | 0.566109 | false | 4.787971 | false | false | false |
jagg81/translate-toolkit | setup.py | 1 | 17005 | #!/usr/bin/env python
from distutils.core import setup, Extension, Distribution, Command
import distutils.sysconfig
import sys
import os
import os.path
from translate import __version__
from translate import __doc__
try:
import py2exe
build_exe = py2exe.build_exe.py2exe
Distribution = py2exe.Distribution
except ImportError:
py2exe = None
build_exe = Command
# TODO: check out installing into a different path with --prefix/--home
join = os.path.join
PRETTY_NAME = 'Translate Toolkit'
translateversion = __version__.sver
packagesdir = distutils.sysconfig.get_python_lib()
sitepackages = packagesdir.replace(sys.prefix + os.sep, '')
infofiles = [(join(sitepackages,'translate'),
[join('translate',filename) for filename in 'ChangeLog', 'COPYING', 'LICENSE', 'README'])]
initfiles = [(join(sitepackages,'translate'),[join('translate','__init__.py')])]
subpackages = [
"convert",
"filters",
"lang",
"misc",
join("misc", "typecheck"),
"storage",
join("storage", "placeables"),
join("storage", "versioncontrol"),
join("storage", "xml_extract"),
"search",
join("search", "indexing"),
"services",
"tools",
]
# TODO: elementtree doesn't work in sdist, fix this
packages = ["translate"]
translatescripts = [apply(join, ('translate', ) + script) for script in
('convert', 'pot2po'),
('convert', 'moz2po'), ('convert', 'po2moz'),
('convert', 'oo2po'), ('convert', 'po2oo'),
('convert', 'oo2xliff'), ('convert', 'xliff2oo'),
('convert', 'prop2po'), ('convert', 'po2prop'),
('convert', 'csv2po'), ('convert', 'po2csv'),
('convert', 'txt2po'), ('convert', 'po2txt'),
('convert', 'ts2po'), ('convert', 'po2ts'),
('convert', 'html2po'), ('convert', 'po2html'),
('convert', 'ical2po'), ('convert', 'po2ical'),
('convert', 'ini2po'), ('convert', 'po2ini'),
('convert', 'json2po'), ('convert', 'po2json'),
('convert', 'tiki2po'), ('convert', 'po2tiki'),
('convert', 'php2po'), ('convert', 'po2php'),
('convert', 'rc2po'), ('convert', 'po2rc'),
('convert', 'xliff2po'), ('convert', 'po2xliff'),
('convert', 'sub2po'), ('convert', 'po2sub'),
('convert', 'symb2po'), ('convert', 'po2symb'),
('convert', 'po2tmx'),
('convert', 'po2wordfast'),
('convert', 'csv2tbx'),
('convert', 'odf2xliff'), ('convert', 'xliff2odf'),
('convert', 'web2py2po'), ('convert', 'po2web2py'),
('filters', 'pofilter'),
('tools', 'pocompile'),
('tools', 'poconflicts'),
('tools', 'pocount'),
('tools', 'podebug'),
('tools', 'pogrep'),
('tools', 'pomerge'),
('tools', 'porestructure'),
('tools', 'posegment'),
('tools', 'poswap'),
('tools', 'poclean'),
('tools', 'poterminology'),
('tools', 'pretranslate'),
('services', 'tmserver'),
('tools', 'build_tmdb')]
translatebashscripts = [apply(join, ('tools', ) + (script, )) for script in [
'pomigrate2', 'pocompendium',
'posplit', 'popuretext', 'poreencode', 'pocommentclean',
'junitmsgfmt',
]]
def addsubpackages(subpackages):
for subpackage in subpackages:
initfiles.append((join(sitepackages, 'translate', subpackage),
[join('translate', subpackage, '__init__.py')]))
for infofile in ('README', 'TODO'):
infopath = join('translate', subpackage, infofile)
if os.path.exists(infopath):
infofiles.append((join(sitepackages, 'translate', subpackage), [infopath]))
packages.append("translate.%s" % subpackage)
class build_exe_map(build_exe):
"""distutils py2exe-based class that builds the exe file(s) but allows mapping data files"""
def reinitialize_command(self, command, reinit_subcommands=0):
if command == "install_data":
install_data = build_exe.reinitialize_command(self, command, reinit_subcommands)
install_data.data_files = self.remap_data_files(install_data.data_files)
return install_data
return build_exe.reinitialize_command(self, command, reinit_subcommands)
def remap_data_files(self, data_files):
"""maps the given data files to different locations using external map_data_file function"""
new_data_files = []
for f in data_files:
if type(f) in (str, unicode):
f = map_data_file(f)
else:
datadir, files = f
datadir = map_data_file(datadir)
if datadir is None:
f = None
else:
f = datadir, files
if f is not None:
new_data_files.append(f)
return new_data_files
class InnoScript:
"""class that builds an InnoSetup script"""
def __init__(self, name, lib_dir, dist_dir, exe_files = [], other_files = [], install_scripts = [], version = "1.0"):
self.lib_dir = lib_dir
self.dist_dir = dist_dir
if not self.dist_dir.endswith(os.sep):
self.dist_dir += os.sep
self.name = name
self.version = version
self.exe_files = [self.chop(p) for p in exe_files]
self.other_files = [self.chop(p) for p in other_files]
self.install_scripts = install_scripts
def getcompilecommand(self):
try:
import _winreg
compile_key = _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, "innosetupscriptfile\\shell\\compile\\command")
compilecommand = _winreg.QueryValue(compile_key, "")
compile_key.Close()
except:
compilecommand = 'compil32.exe "%1"'
return compilecommand
def chop(self, pathname):
"""returns the path relative to self.dist_dir"""
assert pathname.startswith(self.dist_dir)
return pathname[len(self.dist_dir):]
def create(self, pathname=None):
"""creates the InnoSetup script"""
if pathname is None:
self.pathname = os.path.join(self.dist_dir, self.name + os.extsep + "iss").replace(' ', '_')
else:
self.pathname = pathname
# See http://www.jrsoftware.org/isfaq.php for more InnoSetup config options.
ofi = self.file = open(self.pathname, "w")
print >> ofi, "; WARNING: This script has been created by py2exe. Changes to this script"
print >> ofi, "; will be overwritten the next time py2exe is run!"
print >> ofi, r"[Setup]"
print >> ofi, r"AppName=%s" % self.name
print >> ofi, r"AppVerName=%s %s" % (self.name, self.version)
print >> ofi, r"DefaultDirName={pf}\%s" % self.name
print >> ofi, r"DefaultGroupName=%s" % self.name
print >> ofi, r"OutputBaseFilename=%s-%s-setup" % (self.name, self.version)
print >> ofi, r"ChangesEnvironment=yes"
print >> ofi
print >> ofi, r"[Files]"
for path in self.exe_files + self.other_files:
print >> ofi, r'Source: "%s"; DestDir: "{app}\%s"; Flags: ignoreversion' % (path, os.path.dirname(path))
print >> ofi
print >> ofi, r"[Icons]"
print >> ofi, r'Name: "{group}\Documentation"; Filename: "{app}\doc\index.html";'
print >> ofi, r'Name: "{group}\Translate Toolkit Command Prompt"; Filename: "cmd.exe"'
print >> ofi, r'Name: "{group}\Uninstall %s"; Filename: "{uninstallexe}"' % self.name
print >> ofi
print >> ofi, r"[Registry]"
# TODO: Move the code to update the Path environment variable to a Python script which will be invoked by the [Run] section (below)
print >> ofi, r'Root: HKCU; Subkey: "Environment"; ValueType: expandsz; ValueName: "Path"; ValueData: "{reg:HKCU\Environment,Path|};{app};"'
print >> ofi
if self.install_scripts:
print >> ofi, r"[Run]"
for path in self.install_scripts:
print >> ofi, r'Filename: "{app}\%s"; WorkingDir: "{app}"; Parameters: "-install"' % path
print >> ofi
print >> ofi, r"[UninstallRun]"
for path in self.install_scripts:
print >> ofi, r'Filename: "{app}\%s"; WorkingDir: "{app}"; Parameters: "-remove"' % path
print >> ofi
ofi.close()
def compile(self):
"""compiles the script using InnoSetup"""
shellcompilecommand = self.getcompilecommand()
compilecommand = shellcompilecommand.replace('"%1"', self.pathname)
result = os.system(compilecommand)
if result:
print "Error compiling iss file"
print "Opening iss file, use InnoSetup GUI to compile manually"
os.startfile(self.pathname)
class build_installer(build_exe_map):
"""distutils class that first builds the exe file(s), then creates a Windows installer using InnoSetup"""
description = "create an executable installer for MS Windows using InnoSetup and py2exe"
user_options = getattr(build_exe, 'user_options', []) + \
[('install-script=', None,
"basename of installation script to be run after installation or before deinstallation")]
def initialize_options(self):
build_exe.initialize_options(self)
self.install_script = None
def run(self):
# First, let py2exe do it's work.
build_exe.run(self)
lib_dir = self.lib_dir
dist_dir = self.dist_dir
# create the Installer, using the files py2exe has created.
exe_files = self.windows_exe_files + self.console_exe_files
install_scripts = self.install_script
if isinstance(install_scripts, (str, unicode)):
install_scripts = [install_scripts]
script = InnoScript(PRETTY_NAME, lib_dir, dist_dir, exe_files, self.lib_files, version=self.distribution.metadata.version, install_scripts=install_scripts)
print "*** creating the inno setup script***"
script.create()
print "*** compiling the inno setup script***"
script.compile()
# Note: By default the final setup.exe will be in an Output subdirectory.
def import_setup_module(modulename, modulepath):
import imp
modfile, pathname, description = imp.find_module(modulename, [modulepath])
return imp.load_module(modulename, modfile, pathname, description)
def map_data_file (data_file):
"""remaps a data_file (could be a directory) to a different location
This version gets rid of Lib\\site-packages, etc"""
data_parts = data_file.split(os.sep)
if data_parts[:2] == ["Lib", "site-packages"]:
data_parts = data_parts[2:]
if data_parts:
data_file = os.path.join(*data_parts)
else:
data_file = ""
if data_parts[:1] == ["translate"]:
data_parts = data_parts[1:]
if data_parts:
data_file = os.path.join(*data_parts)
else:
data_file = ""
return data_file
def getdatafiles():
datafiles = initfiles + infofiles
def listfiles(srcdir):
return join(sitepackages, srcdir), [join(srcdir, f) for f in os.listdir(srcdir) if os.path.isfile(join(srcdir, f))]
docfiles = []
for subdir in ['doc', 'share']:
docwalk=os.walk(os.path.join('translate', subdir))
for docs in docwalk:
if not '.svn' in docs[0]:
docfiles.append(listfiles(docs[0]))
datafiles += docfiles
return datafiles
def buildinfolinks():
linkfile = getattr(os, 'symlink', None)
linkdir = getattr(os, 'symlink', None)
import shutil
if linkfile is None:
linkfile = shutil.copy2
if linkdir is None:
linkdir = shutil.copytree
basedir = os.path.abspath(os.curdir)
os.chdir("translate")
if os.path.exists("LICENSE") or os.path.islink("LICENSE"):
os.remove("LICENSE")
linkfile("COPYING", "LICENSE")
os.chdir(basedir)
for infofile in ["COPYING", "README", "LICENSE"]:
if os.path.exists(infofile) or os.path.islink(infofile):
os.remove(infofile)
linkfile(os.path.join("translate", infofile), infofile)
def buildmanifest_in(file, scripts):
"""This writes the required files to a MANIFEST.in file"""
print >>file, "# MANIFEST.in: the below autogenerated by setup.py from translate %s" % translateversion
print >>file, "# things needed by translate setup.py to rebuild"
print >>file, "# informational files"
for infofile in ("README", "TODO", "ChangeLog", "COPYING", "LICENSE", "*.txt"):
print >>file, "global-include %s" % infofile
print >>file, "# C programs"
print >>file, "global-include *.c"
print >> file, "# scripts which don't get included by default in sdist"
for scriptname in scripts:
print >>file, "include %s" % scriptname
print >> file, "# include our documentation"
print >> file, "graft translate/doc"
print >> file, "graft translate/share"
# wordlist, portal are in the source tree but unconnected to the python code
print >>file, "prune wordlist"
print >>file, "prune spelling"
print >>file, "prune lingua"
print >>file, "prune Pootle"
print >>file, "prune pootling"
print >>file, "prune virtaal"
print >>file, "prune spelt"
print >>file, "prune corpuscatcher"
print >>file, "prune amagama"
print >>file, "prune .svn"
print >>file, "# MANIFEST.in: the above autogenerated by setup.py from translate %s" % translateversion
class TranslateDistribution(Distribution):
"""a modified distribution class for translate"""
def __init__(self, attrs):
baseattrs = {}
py2exeoptions = {}
py2exeoptions["packages"] = ["translate", "encodings"]
py2exeoptions["compressed"] = True
py2exeoptions["excludes"] = ["PyLucene", "Tkconstants", "Tkinter", "tcl", "enchant", #We need to do more to support spell checking on Windows
# strange things unnecessarily included with some versions of pyenchant:
"win32ui", "_win32sysloader", "win32pipe", "py2exe", "win32com", "pywin", "isapi", "_tkinter", "win32api",
]
version = attrs.get("version", translateversion)
py2exeoptions["dist_dir"] = "translate-toolkit-%s" % version
py2exeoptions["includes"] = ["lxml", "lxml._elementpath", "psyco"]
options = {"py2exe": py2exeoptions}
baseattrs['options'] = options
if py2exe:
baseattrs['console'] = translatescripts
baseattrs['zipfile'] = "translate.zip"
baseattrs['cmdclass'] = {"py2exe": build_exe_map, "innosetup": build_installer}
options["innosetup"] = py2exeoptions.copy()
options["innosetup"]["install_script"] = []
baseattrs.update(attrs)
Distribution.__init__(self, baseattrs)
def standardsetup(name, version, custompackages=[], customdatafiles=[]):
buildinfolinks()
# TODO: make these end with .py ending on Windows...
try:
manifest_in = open("MANIFEST.in", "w")
buildmanifest_in(manifest_in, translatescripts + translatebashscripts)
manifest_in.close()
except IOError, e:
print >> sys.stderr, "warning: could not recreate MANIFEST.in, continuing anyway. Error was %s" % e
addsubpackages(subpackages)
datafiles = getdatafiles()
ext_modules = []
dosetup(name, version, packages + custompackages, datafiles + customdatafiles, translatescripts+ translatebashscripts, ext_modules)
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Programming Language :: Python",
"Topic :: Software Development :: Localization",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"Operating System :: Microsoft :: Windows",
"Operating System :: Unix"
]
def dosetup(name, version, packages, datafiles, scripts, ext_modules=[]):
long_description = __doc__
description = __doc__.split("\n", 1)[0]
setup(name=name,
version=version,
license="GNU General Public License (GPL)",
description=description,
long_description=long_description,
author="Translate.org.za",
author_email="[email protected]",
url="http://translate.sourceforge.net/wiki/toolkit/index",
download_url="http://sourceforge.net/project/showfiles.php?group_id=91920&package_id=97082",
platforms=["any"],
classifiers=classifiers,
packages=packages,
data_files=datafiles,
scripts=scripts,
ext_modules=ext_modules,
distclass=TranslateDistribution
)
if __name__ == "__main__":
standardsetup("translate-toolkit", translateversion)
| gpl-2.0 | -424,227,525,966,133,440 | 41.726131 | 163 | 0.607998 | false | 3.759673 | false | false | false |
fumitoh/modelx | modelx/qtgui/modeltree.py | 1 | 13640 | # Copyright (c) 2017-2021 Fumito Hamamura <[email protected]>
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation version 3.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
# The source code in this file is modified from:
# https://github.com/baoboa/pyqt5/blob/master/examples/itemviews/simpletreemodel/simpletreemodel.py
# See below for the original copyright notice.
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
import itertools
from qtpy.QtCore import QAbstractItemModel, QModelIndex, Qt
class BaseItem(object):
"""Base Item class for all tree item classes."""
def __init__(self, data, parent=None):
self.colType = 1
self.colParam = 2
self.parentItem = parent
self.itemData = None
self.childItems = []
self.updateData(data)
def updateData(self, data):
if self.itemData != data:
self.itemData = data
self.updateChild()
else:
self.itemData = data
def updateChild(self):
raise NotImplementedError
def changeParent(self, parent):
self.parentItem = parent
def appendChild(self, item):
item.changeParent(self)
self.childItems.append(item)
def insertChild(self, index, item):
item.changeParent(self)
self.childItems.insert(index, item)
def child(self, row):
return self.childItems[row]
def childCount(self):
return len(self.childItems)
def columnCount(self):
return 3
def data(self, column):
if column == 0:
return self.itemData["name"]
elif column == self.colType:
return self.getType()
elif column == self.colParam:
return self.getParams()
else:
raise IndexError
def parent(self):
return self.parentItem
def row(self):
if self.parentItem:
return self.parentItem.childItems.index(self)
return 0
def getType(self):
raise NotImplementedError
def getParams(self):
raise NotImplementedError
class InterfaceItem(BaseItem):
"""Object item, such as Model, Space, Cells"""
@property
def objid(self):
return self.itemData["id"]
def __eq__(self, other):
if isinstance(other, InterfaceItem):
return self.objid == other.objid
else:
return False
def __hash__(self):
return hash(self.objid)
class ViewItem(BaseItem):
@property
def attrid(self):
return self.getType()
def __eq__(self, other):
if isinstance(other, ViewItem):
return (
self.parent() == other.parent() and self.attrid == other.attrid
)
def __hash__(self):
return hash((self.parent().objid, self.attrid))
class SpaceContainerItem(InterfaceItem):
"""Base Item class for Models and Spaces which inherit SpaceContainer."""
def updateChild(self):
self.childItems = self.newChildItems(self.itemData)
def newChildItems(self, data):
return [
SpaceItem(space, self)
for space in data["spaces"]["items"].values()
]
class ModelItem(SpaceContainerItem):
"""Item class for a Model (root item)"""
def __init__(self, data):
super(ModelItem, self).__init__(data, parent=None)
def getType(self):
return "Model"
def getParams(self):
return ""
class SpaceItem(SpaceContainerItem):
"""Item class for Space objects."""
def updateChild(self):
self.childItems.clear()
for space in self.itemData["named_spaces"]["items"].values():
self.childItems.append(SpaceItem(space, self))
dynspaces = self.itemData["named_itemspaces"]["items"]
if len(dynspaces) > 0:
self.childItems.append(DynamicSpaceMapItem(dynspaces, self))
cellsmap = self.itemData["cells"]["items"]
for cells in cellsmap.values():
self.childItems.append(CellsItem(cells, self))
def getType(self):
return "Space"
def getParams(self):
if "argvalues" in self.itemData:
args = self.itemData["argvalues"]
if args is not None:
return args
else:
return ""
else:
return ""
class DynamicSpaceMapItem(ViewItem):
"""Item class for parent nodes of dynamic spaces of a space."""
def updateChild(self):
self.childItems.clear()
for space in self.itemData.values():
self.childItems.append(SpaceItem(space, self))
def data(self, column):
if column == 0:
return "Dynamic Spaces"
else:
return BaseItem.data(self, column)
def getType(self):
return ""
def getParams(self):
return self.parent().itemData["params"]
class CellsItem(InterfaceItem):
"""Item class for cells objects."""
def updateChild(self):
pass
def getType(self):
return "Cells"
def getParams(self):
return self.itemData["params"]
class ModelTreeModel(QAbstractItemModel):
def __init__(self, data, parent=None):
super(ModelTreeModel, self).__init__(parent)
self.rootItem = ModelItem(data)
def updateRoot(self, data):
newmodel = ModelItem(data)
self.updateItem(QModelIndex(), newmodel)
def getItem(self, index):
if not index.isValid():
return self.rootItem
else:
return index.internalPointer()
def updateItem(self, index, newitem, recursive=True):
if not index.isValid():
item = self.rootItem
else:
item = index.internalPointer()
if item.itemData != newitem.itemData:
item.itemData = newitem.itemData
# self.dataChanged.emit(index, index)
delItems = set(item.childItems) - set(newitem.childItems)
if delItems:
delRows = sorted([item.row() for item in delItems])
delRows = [
list(g)
for _, g in itertools.groupby(
delRows, key=lambda n, c=itertools.count(): n - next(c)
)
]
for rows in delRows:
self.removeRows(rows[0], len(rows), index)
addItems = set(newitem.childItems) - set(item.childItems)
if addItems:
addRows = sorted([item.row() for item in addItems])
addRows = [
list(g)
for _, g in itertools.groupby(
addRows, key=lambda n, c=itertools.count(): n - next(c)
)
]
for rows in addRows:
self.insertRows(rows, newitem, index)
self.reorderChild(index, newitem)
if recursive:
for row, child in enumerate(item.childItems):
child_index = self.index(row, 0, index)
self.updateItem(child_index, newitem.childItems[row])
def insertRows(self, rows, newitem, parent):
# Signature is different from the base method.
item = self.getItem(parent)
self.beginInsertRows(parent, rows[0], rows[-1])
for row in rows:
item.insertChild(row, newitem.childItems[row])
self.endInsertRows()
def removeRows(self, position, rows, parent=QModelIndex()):
item = self.getItem(parent)
self.beginRemoveRows(parent, position, position + rows - 1)
for row in range(position, position + rows):
item.childItems.pop(row)
self.endRemoveRows()
def reorderChild(self, parent, newitem):
"""Reorder a list to match target by moving a sequence at a time.
Written for QtAbstractItemModel.moveRows.
"""
source = self.getItem(parent).childItems
target = newitem.childItems
i = 0
while i < len(source):
if source[i] == target[i]:
i += 1
continue
else:
i0 = i
j0 = source.index(target[i0])
j = j0 + 1
while j < len(source):
if source[j] == target[j - j0 + i0]:
j += 1
continue
else:
break
self.moveRows(parent, i0, j0, j - j0)
i += j - j0
def moveRows(self, parent, index_to, index_from, length):
"""Move a sub sequence in a list
index_to must be smaller than index_from
"""
source = self.getItem(parent).childItems
self.beginMoveRows(
parent, index_from, index_from + length - 1, parent, index_to
)
sublist = [source.pop(index_from) for _ in range(length)]
for _ in range(length):
source.insert(index_to, sublist.pop())
self.endMoveRows()
@property
def modelid(self):
if self.rootItem:
return self.rootItem.objid
else:
return None
def columnCount(self, parent):
if parent.isValid():
return parent.internalPointer().columnCount()
else:
return self.rootItem.columnCount()
def data(self, index, role):
if not index.isValid():
return None
if role != Qt.DisplayRole:
return None
item = index.internalPointer()
return item.data(index.column())
def flags(self, index):
if not index.isValid():
return Qt.NoItemFlags
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
# TODO: Refactor hard-coding column indexes
if section == 0:
return "Objects"
elif section == 1:
return "Type"
elif section == 2:
return "Parameters"
return None
def index(self, row, column, parent):
if not self.hasIndex(row, column, parent):
return QModelIndex()
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
childItem = parentItem.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QModelIndex()
def parent(self, index):
if not index.isValid():
return QModelIndex()
childItem = index.internalPointer()
parentItem = childItem.parent()
if parentItem is None or parentItem == self.rootItem:
return QModelIndex()
return self.createIndex(parentItem.row(), 0, parentItem)
def rowCount(self, parent):
if parent.column() > 0:
return 0
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
return parentItem.childCount()
| gpl-3.0 | -8,648,158,871,068,630,000 | 28.846827 | 99 | 0.591569 | false | 4.316456 | false | false | false |
orangeYao/twiOpinion | dustbin/mode.py | 1 | 1144 | def mode():
mode = raw_input(
"The three available classification mode you can select:\n"
+ "1. set all grabed tweets as category 1 and grab category 2 by new (opposite) tag \n"
+ "2. classify grabed tweets by selecting key words \n"
+ "3. classify grabed tweets one by one manually \n"
+ "select the mode you want by typing corresponding number: "
) or "2"
if (mode == "1"):
file_name2, pw2 = grabSetting()
if (mode == "2"):
words_category1 = raw_input("Type in the key words requirements for a tweet to be classified as category 1:"
+ "seperate alternative words by \"or\"") or "good"
words_category2 = raw_input("Type in the key words requirements for a tweet to be classified as category 2:"
+ "seperate alternative words by \"or\"") or "bad"
mode2(file_name, words_category1, words_category2)
if (mode == "3"):
judge = raw_input("For each tweet displayed, type \"1\" for category 1 and \"2\" for category 2, \"0\" to skip and \"q\" to stop labeling")
| mit | 7,105,871,057,704,096,000 | 53.47619 | 147 | 0.588287 | false | 3.931271 | false | false | false |
janezkranjc/clowdflows | workflows/graphs/library.py | 4 | 1143 | import re
def graphs_create_integers(input_dict):
intStr = input_dict['intStr']
intList = []
for i in re.findall(r'\w+', intStr):
try:
intList.append(int(i))
except:
pass
if input_dict['sort'].lower() == "true":
intList.sort()
return {'intList':intList}
def graphs_sum_integers(input_dict):
intList = input_dict['intList']
return {'sum':sum(intList)}
def graphs_pre_filter_integers(input_dict):
return input_dict
def graphs_post_filter_integers(postdata,input_dict,output_dict):
intListOut = postdata['intListOut']
intList = []
for i in intListOut:
try:
intList.append(int(i))
except:
pass
return {'intList': intList}
def graphs_pre_display_summation(input_dict):
return {}
###########################################
def graphs_visualize_visjs(input_dict):
return {}
def graphs_json2networkx(input_dict):
from json import loads
from networkx.readwrite import json_graph
gtext = loads(input_dict['graph'])
g = json_graph.node_link_graph(gtext)
return {'nxgraph': g} | gpl-3.0 | 3,909,127,524,348,086,000 | 21.88 | 65 | 0.5993 | false | 3.495413 | false | false | false |
ViktorBarzin/TechFest | techfest/management/commands/autorefreshserver.py | 1 | 8327 |
from django.contrib.staticfiles.management.commands.runserver import \
Command as RunServerCommand
from django.utils.autoreload import reloader_thread
from http import server as BaseHTTPServer
import threading as thread
import os
import sys
import subprocess
# default port number where we would run the change reporting server
REFRESH_PORT = 32000
# prompts of our support PDBs
PDB_PROMPTS = ["(Pdb) ", "ipdb> "]
# Global counter that will be incremented whenever a refresh is required
_needs_refresh = 0
# to hold the last _needs_refresh counter sent to the client
# we compare this against _needs_refresh to determine if the client
# needs to refresh itself
_last_refresh = 0
_refresh_port = REFRESH_PORT
class SilentHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""
HTTP Response handler, adapted from sample code in python Wiki.
Supresses the connection message which interferes with
the default Django server messages.
Probably can be made better, but I'm not going to bother for now.
"""
def do_HEAD(s):
s.send_response(200)
s.send_header("Content-type", "text/json")
s.end_headers()
def do_GET(s):
# GET returns a boolean indicating if browser page needs
# to be refreshed
global _needs_refresh
global _last_refresh
s.send_response(200)
s.send_header("Content-type", "text/json")
s.send_header("Access-Control-Allow-Origin", "*")
s.end_headers()
s.wfile.write(bytes('{ "changed": %d }\n' % s.needs_refresh(), 'utf-8'))
_last_refresh = _needs_refresh
def do_POST(s):
'''POST can be used to force a refresh externally'''
global _needs_refresh
s.send_response(200)
s.send_header("Content-type", "text/json")
s.end_headers()
_needs_refresh += 1
s.wfile.write('{ "POST": 1, "changed": %d }\n' % s.needs_refresh())
def needs_refresh(self):
'''returns a boolean indicating if a refresh is required'''
global _needs_refresh
global _last_refresh
return _needs_refresh != _last_refresh
def log_request(self, *args, **kwargs):
pass
def refresh_state_server():
"""
A simple HTTP server that does just one thing, serves a JSON object
with a single attribute indicating whether the development server
has been reloaded and therefore browser page requires refreshing.
Extended to accept a POST request which forces the refresh flag
"""
httpd = BaseHTTPServer.HTTPServer(("127.0.0.1", _refresh_port),
SilentHandler)
try:
sys.stdout.write("Starting auto refresh state server at 127.0.0.1:%d\n" \
% _refresh_port)
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
class Command(RunServerCommand):
"""
A customized version of the runserver command that spawns a secondary
http server which can be queried to check if the Django development
server has been reloaded (and therefore the browser page needs refresh)
"""
help = "Starts a lightweight Web server for development that serves static files and provides refresh status through a secondary HTTP server running at %d." % _refresh_port
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('--refreshport', action='store', default=32000, type=int,
help='Port number where the refresh server listens. Defaults to 32000')
def run(self, **options):
use_reloader = options.get('use_reloader')
global _refresh_port
_refresh_port = options.get('refreshport', REFRESH_PORT)
if use_reloader:
self.autoreload()
else:
self.inner_run(None, **options)
def autoreload(self):
"""Copied from django.core.autoload.python_reloader"""
if os.environ.get("RUN_MAIN") == "true":
thread.Thread(target=self.inner_run).start() # start http server
try:
#sys.stdout.write("Starting reloader_thread...\n")
reloader_thread() # poll source files for modifications
# if modified, kill self
except KeyboardInterrupt:
pass
else:
try:
exit_code = self.restart_with_reloader()
if exit_code < 0:
os.kill(os.getpid(), -exit_code)
else:
sys.exit(exit_code)
except KeyboardInterrupt:
pass
def restart_with_reloader(self):
"""
Differs from django.core.autoreload in that _needs_refresh counter
is incremented everytime the Development server is reloaded owing
to detected file changes.
"""
global _needs_refresh
global _last_refresh
# start the internal HTTP server that will serve the refresh
# poll requests from our Chrome extenstion
threadid = thread.Thread(target=refresh_state_server).start()
while True:
args = [sys.executable] + ['-u'] + ['-W%s' % o for o in sys.warnoptions] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
proc = subprocess.Popen(args,
bufsize=1,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
env=new_environ,
close_fds=True)
# We loop reading all the output of the child process
# until it prints the 'Quit the server with CONTROL'
# When that's done, we can be certain that the server
# is fully initialized and ready to serve pages
while True and proc.returncode == None:
line = proc.stdout.readline()
if line:
print(line)
if bytes('Quit the server with CONTROL', 'utf-8') in line:
break;
proc.poll()
# Since the development server is fully initialized, we can
# now set the refresh state flag.
sys.stdout.write("Development server reinitialized, setting refresh flag\n")
_needs_refresh += 1
# Here we're reading the output character by character rather
# than line by line as done previously.
# This is necessary for us to integrate with python debuggers
# such as pdb and ipdb. When the child process is interrupted
# by one of these two debuggers, it shows a prompt
# and waits for user input. Since these prompts do not have a
# terminating \n, readline would never return and we get a
# non-intuitive user experience where inputs do not correspond
# to pdb> prompts. Reading one character at a time allows to
# detect these prompts and then ask user for input which we can
# write back to child process' stdin.
line = ''
while True and proc.returncode == None:
char = proc.stdout.read(1)
if char:
sys.stdout.write(char)
# Buffer the character until we hit newline or one of
# the recognized pdb prompts (PDB_PROMPTS)
if char != '\n':
line += char
if line in PDB_PROMPTS: # keep checking if we hit pdb
# Child process has hit pdb breakpoint.
# Read a command from stdin and write to the
# child process' stdin
line = ''
command = raw_input()
proc.stdin.write(command+'\n')
else:
line = ''
proc.poll() # will set the proc.returncode if proc terminates
sys.stdout.write("Development server terminated with exit code %d\n" % proc.returncode)
if proc.returncode != 3:
return proc.returncode
| mit | -7,501,403,048,418,236,000 | 38.464455 | 176 | 0.59241 | false | 4.52063 | false | false | false |
RadicoLabs/DIGITS | tools/test_create_db.py | 1 | 7395 | # Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import os.path
import tempfile
import shutil
from cStringIO import StringIO
import unittest
import platform
import Queue
from collections import Counter
import shutil
import nose.tools
import mock
import PIL.Image
import numpy as np
from . import create_db as _
class BaseTest():
"""
Provides some helpful files and utilities
"""
@classmethod
def setUpClass(cls):
cls.empty_file = tempfile.mkstemp()
cls.empty_dir = tempfile.mkdtemp()
# Create one good textfile
cls.good_file = tempfile.mkstemp()
# Create a color image
cls.color_image_file = tempfile.mkstemp(suffix='.png')
cls.numpy_image_color = np.ones((8,10,3), dtype='uint8')
cls.pil_image_color = PIL.Image.fromarray(cls.numpy_image_color)
cls.pil_image_color.save(cls.color_image_file[1])
# Create a grayscale image
cls.gray_image_file = tempfile.mkstemp(suffix='.png')
cls.numpy_image_gray = np.ones((8,10), dtype='uint8')
cls.pil_image_gray = PIL.Image.fromarray(cls.numpy_image_gray)
cls.pil_image_gray.save(cls.gray_image_file[1])
cls.image_count = 0
for i in xrange(3):
for j in xrange(3):
os.write(cls.good_file[0], '%s %s\n' % (cls.color_image_file[1], i))
os.write(cls.good_file[0], '%s %s\n' % (cls.gray_image_file[1], i))
cls.image_count += 2
@classmethod
def tearDownClass(cls):
for f in cls.empty_file, cls.good_file, cls.color_image_file, cls.gray_image_file:
try:
os.close(f[0])
os.remove(f[1])
except OSError:
pass
try:
shutil.rmtree(cls.empty_dir)
except OSError:
raise
class TestFillLoadQueue(BaseTest):
def test_valid_file(self):
for shuffle in True, False:
yield self.check_valid_file, shuffle
def check_valid_file(self, shuffle):
queue = Queue.Queue()
result = _._fill_load_queue(self.good_file[1], queue, shuffle)
assert result == self.image_count, 'lines not added'
assert queue.qsize() == self.image_count, 'queue not full'
def test_empty_file(self):
for shuffle in True, False:
yield self.check_empty_file, shuffle
def check_empty_file(self, shuffle):
queue = Queue.Queue()
nose.tools.assert_raises(
_.BadInputFileError,
_._fill_load_queue,
self.empty_file[1], queue, shuffle)
class TestParseLine():
def test_good_lines(self):
for label, line in [
(0, '/path/image.jpg 0'),
(1, 'image.jpg 1'),
(2, 'image.jpg 2\n'),
(3, 'image.jpg 3'),
(4, 'spaces in filename.jpg 4'),
]:
yield self.check_good_line, line, label
def check_good_line(self, line, label):
c = Counter()
p, l = _._parse_line(line, c)
assert l == label, 'parsed label wrong'
assert c[l] == 1, 'distribution is wrong'
def test_bad_lines(self):
for line in [
'nolabel.jpg',
'non-number.jpg five',
'negative.jpg -1',
]:
yield self.check_bad_line, line
def check_bad_line(self, line):
nose.tools.assert_raises(
_.ParseLineError,
_._parse_line,
line, Counter()
)
class TestCalculateBatchSize():
def test(self):
for count, batch_size in [
(1, 1),
(50, 50),
(100, 100),
(200, 100),
]:
yield self.check, count, batch_size
def check(self, count, batch_size):
assert _._calculate_batch_size(count) == batch_size
class TestCalculateNumThreads():
def test(self):
for batch_size, shuffle, num in [
(1000, True, 10),
(1000, False, 1),
(100, True, 10),
(100, False, 1),
(50, True, 7),
(4, True, 2),
(1, True, 1),
]:
yield self.check, batch_size, shuffle, num
def check(self, batch_size, shuffle, num):
assert _._calculate_num_threads(
batch_size, shuffle) == num
class TestInitialImageSum():
def test_color(self):
s = _._initial_image_sum(10, 10, 3)
assert s.shape == (10, 10, 3)
assert s.dtype == 'float64'
def test_grayscale(self):
s = _._initial_image_sum(10, 10, 1)
assert s.shape == (10, 10)
assert s.dtype == 'float64'
class TestImageToDatum(BaseTest):
def test(self):
for compression in None, 'png', 'jpg':
yield self.check_color, compression
yield self.check_grayscale, compression
def check_color(self, compression):
d = _._array_to_datum(self.numpy_image_color, 1, compression)
assert d.height == self.numpy_image_color.shape[0]
assert d.width == self.numpy_image_color.shape[1]
assert d.channels == 3
assert d.encoded == bool(compression)
def check_grayscale(self, compression):
d = _._array_to_datum(self.numpy_image_gray, 1, compression)
assert d.height == self.numpy_image_gray.shape[0]
assert d.width == self.numpy_image_gray.shape[1]
assert d.channels == 1
assert d.encoded == bool(compression)
class TestSaveMeans():
def test(self):
for color in True, False:
d = tempfile.mkdtemp()
for filename in 'mean.jpg', 'mean.png', 'mean.npy', 'mean.binaryproto':
yield self.check, d, filename, color
shutil.rmtree(d)
def check(self, directory, filename, color):
filename = os.path.join(directory, filename)
if color:
s = np.ones((8,10,3),dtype='float64')
else:
s = np.ones((8,10),dtype='float64')
_._save_means(s, 2, [filename])
assert os.path.exists(filename)
class BaseCreationTest(BaseTest):
def test_image_sizes(self):
for width in 8, 12:
for channels in 1, 3:
yield self.check_image_sizes, width, channels, False
def check_image_sizes(self, width, channels, shuffle):
_.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
width, 10, channels, self.BACKEND)
def test_no_shuffle(self):
_.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
10, 10, 1, self.BACKEND, shuffle=False)
def test_means(self):
mean_files = []
for suffix in 'jpg','npy','png','binaryproto':
mean_files.append(os.path.join(self.empty_dir, 'mean.%s' % suffix))
_.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
10, 10, 1, self.BACKEND, mean_files=mean_files)
class TestLmdbCreation(BaseCreationTest):
BACKEND = 'lmdb'
class TestHdf5Creation(BaseCreationTest):
BACKEND = 'hdf5'
def test_dset_limit(self):
_.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
10, 10, 1, 'hdf5', hdf5_dset_limit=10*10)
| bsd-3-clause | 1,129,440,676,417,889,300 | 30.468085 | 90 | 0.556187 | false | 3.61084 | true | false | false |
bashu/fluentcms-forms-builder | fluentcms_forms_builder/content_plugins.py | 1 | 3825 | # -*- coding: utf-8 -*-
from django.conf import settings
try:
from django.core.context_processors import csrf
except ImportError:
from django.template.context_processors import csrf
from django.utils.translation import ugettext_lazy as _
from django.template import RequestContext
from django.shortcuts import redirect
from email_extras.utils import send_mail_template
from forms_builder.forms.settings import EMAIL_FAIL_SILENTLY
from forms_builder.forms.signals import form_invalid, form_valid
from forms_builder.forms.utils import split_choices
from fluent_contents.extensions import ContentPlugin, plugin_pool
from .forms import FormForForm
from .models import FormItem, Form
@plugin_pool.register
class FormPlugin(ContentPlugin):
model = FormItem
category = _('Form')
render_template = "fluentcms_forms_builder/form.html"
cache_output = False
def get_context(self, request, instance, **kwargs):
context = super(FormPlugin, self).get_context(
request, instance, **kwargs)
context.update(form=instance.form, **csrf(request))
return context
def render(self, request, instance, **kwargs):
context = self.get_context(request, instance, **kwargs)
form = context['form']
if request.method == 'POST':
form_for_form = FormForForm(
form, RequestContext(request), request.POST, request.FILES or None)
if not form_for_form.is_valid():
form_invalid.send(sender=request, form=form_for_form)
else:
attachments = []
for f in form_for_form.files.values():
f.seek(0)
attachments.append((f.name, f.read()))
entry = form_for_form.save()
form_valid.send(sender=request, form=form_for_form, entry=entry)
self.send_emails(request, form_for_form, form, entry, attachments)
if not request.is_ajax() and form.redirect_url:
return redirect(str(form.redirect_url))
return self.render_to_string(request, "fluentcms_forms_builder/form_sent.html", context)
else:
form_for_form = FormForForm(form, RequestContext(request))
context.update(form_for_form=form_for_form)
return self.render_to_string(request, self.render_template, context)
def send_emails(self, request, form_for_form, form, entry, attachments):
subject = form.email_subject
if not subject:
subject = "%s - %s" % (form.title, entry.entry_time)
fields = []
for (k, v) in form_for_form.fields.items():
value = form_for_form.cleaned_data[k]
if isinstance(value, list):
value = ", ".join([i.strip() for i in value])
fields.append((v.label, value))
context = {
"fields": fields,
"message": form.email_message,
"request": request,
}
email_from = form.email_from or settings.DEFAULT_FROM_EMAIL
email_to = form_for_form.email_to()
if email_to and form.send_email:
send_mail_template(subject, "form_response", email_from,
email_to, context=context,
fail_silently=EMAIL_FAIL_SILENTLY)
headers = None
if email_to:
headers = {"Reply-To": email_to}
email_copies = split_choices(form.email_copies)
if email_copies:
send_mail_template(subject, "form_response_copies", email_from,
email_copies, context=context,
attachments=attachments,
fail_silently=EMAIL_FAIL_SILENTLY,
headers=headers)
| apache-2.0 | -6,763,389,801,044,049,000 | 37.25 | 104 | 0.60366 | false | 4.104077 | false | false | false |
jkitchin/scopus | pybliometrics/scopus/serial_title.py | 1 | 7337 | from collections import namedtuple
from pybliometrics.scopus.superclasses import Retrieval
from pybliometrics.scopus.utils import check_parameter_value, get_link
class SerialTitle(Retrieval):
@property
def aggregation_type(self):
"""The type of the source."""
return self._entry['prism:aggregationType']
@property
def citescoreyearinfolist(self):
"""A list of two tuples of the form (year, cite-score). The first
tuple represents the current cite-score, the second tuple
represents the tracker cite-score."""
try:
d = self._entry['citeScoreYearInfoList']
except KeyError:
return None
current = (d['citeScoreCurrentMetricYear'], d['citeScoreCurrentMetric'])
tracker = (d['citeScoreTrackerYear'], d['citeScoreTracker'])
return [current, tracker]
@property
def eissn(self):
"""The electronic ISSN of the source."""
return self._entry.get('prism:eIssn')
@property
def issn(self):
"""The ISSN of the source."""
return self._entry.get('prism:issn')
@property
def oaallowsauthorpaid(self):
"""Whether under the Open-Access policy authors are allowed to pay."""
return self._entry.get('oaAllowsAuthorPaid')
@property
def openaccess(self):
"""Open Access status (0 or 1)."""
return self._entry.get('openaccess')
@property
def openaccessstartdate(self):
"""Starting availability date."""
return self._entry.get('openaccessStartDate')
@property
def openaccesstype(self):
"""Open Archive status (full or partial)."""
return self._entry.get('openaccessType')
@property
def openaccessarticle(self):
"""Open Access status (boolean)."""
return self._entry.get('openaccessArticle')
@property
def openarchivearticle(self):
"""Open Archive status (boolean)."""
return self._entry.get('openArchiveArticle')
@property
def openaccesssponsorname(self):
"""The name of the Open Access sponsor."""
return self._entry.get('openaccessSponsorName')
@property
def openaccesssponsortype(self):
"""The type of the Open Access sponsor."""
return self._entry.get('openaccessSponsorType')
@property
def openaccessuserlicense(self):
"""The User license."""
return self._entry.get('openaccessUserLicense')
@property
def publisher(self):
"""The publisher of the source."""
return self._entry['dc:publisher']
@property
def scopus_source_link(self):
"""URL to info site on scopus.com."""
return get_link(self._entry, 0, ["link"])
@property
def self_link(self):
"""URL to the source's API page."""
return get_link(self._json, 0, ["link"])
@property
def sjrlist(self):
"""The SCImago Journal Rank (SJR) indicator as list of
(year, indicator)-tuples. See
https://www.scimagojr.com/journalrank.php.
"""
return _parse_list(self._entry, "SJR")
@property
def sniplist(self):
"""The Source-Normalized Impact per Paper (SNIP) as list of
(year, indicator)-tuples. See
https://blog.scopus.com/posts/journal-metrics-in-scopus-source-normalized-impact-per-paper-snip.
"""
return _parse_list(self._entry, "SNIP")
@property
def source_id(self):
"""The Scopus ID of the source."""
return self._entry['source-id']
@property
def subject_area(self):
"""List of named tuples of subject areas in the form
(area, abbreviation, code) of the source.
"""
area = namedtuple('Subjectarea', 'area abbreviation code')
areas = [area(area=item['$'], code=item['@code'],
abbreviation=item['@abbrev'])
for item in self._entry["subject-area"]]
return areas or None
@property
def title(self):
"""The title of the source."""
return self._entry['dc:title']
def __init__(self, issn, refresh=False, view="ENHANCED", years=None):
"""Interaction with the Serial Title API.
Parameters
----------
issn : str or int
The ISSN or the E-ISSN of the source.
refresh : bool or int (optional, default=False)
Whether to refresh the cached file if it exists or not. If int
is passed, cached file will be refreshed if the number of days
since last modification exceeds that value.
view : str (optional, default="ENHANCED")
The view of the file that should be downloaded. Allowed values:
BASIC, STANDARD, ENHANCED. For details see
https://dev.elsevier.com/sc_serial_title_views.html.
years : str (optional, default=None)
A string specifying a year or range of years (combining two
years with a hyphen) for which yearly metric data (SJR, SNIP,
yearly-data) should be looked up for. If None, only the
most recent metric data values are provided.
Note: If not None, refresh will always be True.
Examples
--------
See https://pybliometrics.readthedocs.io/en/stable/examples/SerialTitle.html.
Notes
-----
The directory for cached results is `{path}/{view}/{source_id}`,
where `path` is specified in `~/.scopus/config.ini`.
"""
# Checks
check_parameter_value(view, ('BASIC', 'STANDARD', 'ENHANCED'), "view")
# Load json
self._id = str(issn)
self._years = years
# Force refresh when years is specified
if years:
refresh = True
Retrieval.__init__(self, identifier=self._id, view=view, date=years,
api='SerialTitle', refresh=refresh)
self._json = self._json['serial-metadata-response']
self._entry = self._json['entry'][0]
def __str__(self):
"""Print a summary string."""
date = self.get_cache_file_mdate().split()[0]
areas = [e.area for e in self.subject_area]
if len(areas) == 1:
areas = areas[0]
else:
areas = " and ".join([", ".join(areas[:-1]), areas[-1]])
s = f"'{self.title}', {self.aggregation_type} published by "\
f"'{self.publisher}', is active in {areas}\n"
metrics = []
if self.sjrlist:
metrics.append(f"SJR: year value")
for rec in self.sjrlist:
metrics.append(f" {rec[0]} {rec[1]}")
if self.sniplist:
metrics.append(f"SNIP: year value")
for rec in self.sniplist:
metrics.append(f" {rec[0]} {rec[1]}")
if metrics:
s += f"Metrics as of {date}:\n " + "\n ".join(metrics) + "\n"
s += f" ISSN: {self.issn or '-'}, E-ISSN: {self.eissn or '-'}, "\
f"Scopus ID: {self.source_id}"
return s
def _parse_list(d, metric):
"""Auxiliary function to parse SNIP and SJR lists."""
try:
values = [(r['@year'], r['$']) for r in d[metric + "List"][metric]]
return sorted(set(values))
except (KeyError, TypeError):
return None
| mit | -4,404,568,984,855,321,600 | 33.608491 | 104 | 0.585525 | false | 3.92773 | false | false | false |
ncsulug/ncsulug-website | lug_events/models.py | 1 | 4371 | from django.core.exceptions import ValidationError
from django.db import models
def ncsu_semester(date):
"""
An algorithm for estimating NC State University semester start dates.
* Spring is January 1-May 14.
* Summer is May 15-August 14.
* Fall is August 15-December 31.
"""
if date.month < 5:
return "Spring"
elif date.month == 5 and date.day < 15:
return "Spring"
elif date.month < 8:
return "Summer"
elif date.month == 8 and date.month < 15:
return "Summer"
else:
return "Fall"
class EventKind(models.Model):
singular = models.CharField(max_length=32,
help_text="What we call this kind of event, title case. "
"Examples: Hack Day, Technical Meeting, "
"Social Dinner, Business Meeting")
plural = models.CharField(max_length=32,
help_text="Pluralize the name above.")
description = models.CharField(max_length=128,
help_text="A tooltip description for this event kind. "
"This should be a noun phrase capitalized "
"and punctuated as a sentence.")
class Meta:
ordering = ['plural']
def __unicode__(self):
return self.plural
class Event(models.Model):
name = models.CharField(max_length=64,
help_text="The event's name, to go on the calendar. "
"Repeating names is OK.")
kind = models.ForeignKey(EventKind, null=False)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
speaker = models.CharField(max_length=48, blank=True,
help_text="The name of the speaker or sponsor, "
"if applicable. "
"Examples: \"Matthew Frazier\", "
"\"Jim Whitehurst of Red Hat\"")
location = models.CharField(max_length=64,
help_text="The event's location. Examples: "
"\"Engineering Building II 1227\", "
"\"2426 Hillsborough St\", "
"\"Location TBD\"")
pitch = models.TextField(blank=True,
help_text="A quick paragraph describing the event and "
"encouraging people to attend. "
"For full details, use the URL below. "
"Plain text.")
custom_url = models.URLField("Custom URL", blank=True,
help_text="A custom URL for the event, to use instead "
"of a wiki page.")
advisory = models.CharField(max_length=32, blank=True,
help_text="Some sort of notice that needs to be "
"advertised for the event. It will be displayed "
"prominently and with a sense of urgency. "
"Example: Cancelled due to inclement weather")
on_website = models.BooleanField("display on Web site", default=True,
help_text="Whether to display this event in the events "
"lineup on the homepage and the history page.")
on_billboard = models.BooleanField("display on Billboard", default=True,
help_text="Whether to display this event on the "
"Billboard slides.")
class Meta:
get_latest_by = 'start_time'
ordering = ['-start_time']
def __unicode__(self):
return self.name
def clean(self):
if self.start_time >= self.end_time:
raise ValidationError("Events must end after they start.")
@property
def semester(self):
return ncsu_semester(self.start_time) + self.start_time.strftime(" %Y")
@property
def has_link(self):
return bool(self.custom_url)
def get_absolute_url(self):
if self.custom_url:
return self.custom_url
else:
return '/events/' # FIXME
| gpl-3.0 | 6,551,252,841,250,338,000 | 39.850467 | 83 | 0.511782 | false | 4.596215 | false | false | false |
bepo13/destiny-stl-generator | src/main.py | 1 | 1203 | import os
from BungieDatabase import BungieDatabase
outputPath = "../stl"
def main():
print("Welcome to the Destiny stl generator")
# Create a Bungie Database object and connect to it
db = BungieDatabase()
db.connect()
if not os.path.exists(outputPath):
print("Creating stl output directory "+outputPath)
os.makedirs(outputPath)
while True:
# Get user request
command = input("Enter an item name or id: ")
# Break if q, quit or exit was typed
if command == "q" or command == "quit" or command == "exit":
break
# Update the database if requested
elif command == "update":
db.update()
# Assume the entered text was an item name or id
else:
# Download the model data for this item
item = command
model = db.getModel(item)
# If the model is not null generate the stl file
if model is not None:
model.generate(outputPath+"/"+item+".stl")
# Close the database and exit
db.close()
print("Bye.")
exit()
if __name__ == '__main__':
main()
| mit | 89,564,502,718,134,220 | 26.976744 | 68 | 0.557772 | false | 4.34296 | false | false | false |
ChinaMassClouds/copenstack-server | openstack/src/nova-2014.2/nova/api/openstack/compute/contrib/createserverext.py | 100 | 1156 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
class Createserverext(extensions.ExtensionDescriptor):
"""Extended support to the Create Server v1.1 API."""
name = "Createserverext"
alias = "os-create-server-ext"
namespace = ("http://docs.openstack.org/compute/ext/"
"createserverext/api/v1.1")
updated = "2011-07-19T00:00:00Z"
def get_resources(self):
res = extensions.ResourceExtension('os-create-server-ext',
inherits='servers')
return [res]
| gpl-2.0 | -591,397,606,980,651,900 | 37.533333 | 78 | 0.679066 | false | 4.070423 | false | false | false |
tkf/rash | rash/daemon.py | 1 | 6617 | # Copyright (C) 2013- Takafumi Arakaki
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
def daemon_run(no_error, restart, record_path, keep_json, check_duplicate,
use_polling, log_level):
"""
Run RASH index daemon.
This daemon watches the directory ``~/.config/rash/data/record``
and translate the JSON files dumped by ``record`` command into
sqlite3 DB at ``~/.config/rash/data/db.sqlite``.
``rash init`` will start RASH automatically by default.
But there are alternative ways to start daemon.
If you want to organize background process in one place such
as supervisord_, it is good to add `--restart` option to force
stop other daemon process if you accidentally started it in
other place. Here is an example of supervisord_ setup::
[program:rash-daemon]
command=rash daemon --restart
.. _supervisord: http://supervisord.org/
Alternatively, you can call ``rash index`` in cron job to
avoid using daemon. It is useful if you want to use RASH
on NFS, as it looks like watchdog does not work on NFS.::
# Refresh RASH DB every 10 minutes
*/10 * * * * rash index
"""
# Probably it makes sense to use this daemon to provide search
# API, so that this daemon is going to be the only process that
# is connected to the DB?
from .config import ConfigStore
from .indexer import Indexer
from .log import setup_daemon_log_file, LogForTheFuture
from .watchrecord import watch_record, install_sigterm_handler
install_sigterm_handler()
cfstore = ConfigStore()
if log_level:
cfstore.daemon_log_level = log_level
flogger = LogForTheFuture()
# SOMEDAY: make PID checking/writing atomic if possible
flogger.debug('Checking old PID file %r.', cfstore.daemon_pid_path)
if os.path.exists(cfstore.daemon_pid_path):
flogger.debug('Old PID file exists. Reading from it.')
with open(cfstore.daemon_pid_path, 'rt') as f:
pid = int(f.read().strip())
flogger.debug('Checking if old process with PID=%d is alive', pid)
try:
os.kill(pid, 0) # check if `pid` is alive
except OSError:
flogger.info(
'Process with PID=%d is already dead. '
'So just go on and use this daemon.', pid)
else:
if restart:
flogger.info('Stopping old daemon with PID=%d.', pid)
stop_running_daemon(cfstore, pid)
else:
message = ('There is already a running daemon (PID={0})!'
.format(pid))
if no_error:
flogger.debug(message)
# FIXME: Setup log handler and flogger.dump().
# Note that using the default log file is not safe
# since it has already been used.
return
else:
raise RuntimeError(message)
else:
flogger.debug('Daemon PID file %r does not exists. '
'So just go on and use this daemon.',
cfstore.daemon_pid_path)
with open(cfstore.daemon_pid_path, 'w') as f:
f.write(str(os.getpid()))
try:
setup_daemon_log_file(cfstore)
flogger.dump()
indexer = Indexer(cfstore, check_duplicate, keep_json, record_path)
indexer.index_all()
watch_record(indexer, use_polling)
finally:
os.remove(cfstore.daemon_pid_path)
def stop_running_daemon(cfstore, pid):
import time
import signal
os.kill(pid, signal.SIGTERM)
for _ in range(30):
time.sleep(0.1)
if not os.path.exists(cfstore.daemon_pid_path):
break
else:
raise RuntimeError(
'Failed to stop running daemon process (PID={0})'
.format(pid))
def start_daemon_in_subprocess(options, outpath=os.devnull):
"""
Run `rash daemon --no-error` in background.
:type options: list of str
:arg options: options for "rash daemon" command
:type outpath: str
:arg outpath: path to redirect daemon output
"""
import subprocess
import sys
from .utils.py3compat import nested
from .utils.pathutils import mkdirp
if outpath != os.devnull:
mkdirp(os.path.dirname(outpath))
with nested(open(os.devnull),
open(outpath, 'w')) as (stdin, stdout):
subprocess.Popen(
[os.path.abspath(sys.executable), '-m', 'rash.cli',
'daemon', '--no-error'] + options,
preexec_fn=os.setsid,
stdin=stdin, stdout=stdout, stderr=subprocess.STDOUT)
def daemon_add_arguments(parser):
parser.add_argument(
'--no-error', action='store_true', default=False,
help="""
Do nothing if a daemon is already running.
""")
parser.add_argument(
'--restart', action='store_true', default=False,
help="""
Kill already running daemon process if exist.
""")
parser.add_argument(
'--record-path',
help="""
specify the directory that has JSON records.
""")
parser.add_argument(
'--keep-json', default=False, action='store_true',
help="""
Do not remove old JSON files. It turns on --check-duplicate.
""")
parser.add_argument(
'--check-duplicate', default=False, action='store_true',
help='do not store already existing history in DB.')
parser.add_argument(
'--use-polling', default=False, action='store_true',
help="""
Use polling instead of system specific notification.
This is useful, for example, when your $HOME is on NFS where
inotify does not work.
""")
parser.add_argument(
'--log-level',
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'],
help='logging level.')
commands = [
('daemon', daemon_add_arguments, daemon_run),
]
| gpl-3.0 | 5,581,733,001,531,351,000 | 34.196809 | 75 | 0.614176 | false | 4.027389 | false | false | false |
li-yuntao/SiliconLives | BasicModels/page_rank.py | 1 | 1448 | # coding: utf-8
import random
import numpy as np
def fake_adjacency_list(node_size):
adjancency_list = {}
for node_src in range(node_size):
adjancency_list[node_src] = []
threshold = random.random()
for node_dst in range(node_size):
p_jump = random.random()
if p_jump >= threshold:
adjancency_list[node_src].append(node_dst)
return adjancency_list
def page_rank(p, adjancency_list):
def adjancency_list_to_table(adjancency_list):
node_size = len(adjancency_list)
adjancency_table = np.zeros([node_size, node_size])
for src_node, dst_nodes in adjancency_list.items():
cnt_dst_nodes = len(dst_nodes)
for dst_node in dst_nodes:
adjancency_table[src_node, dst_node] = 1.0 / cnt_dst_nodes
return adjancency_table
node_size = len(adjancency_list)
adjancency_table = adjancency_list_to_table(adjancency_list)
init_state = np.array([[1.0 / node_size for _ in range(node_size)]]).T
# loop
last_state = init_state
while True:
state = p * adjancency_table.dot(last_state) + (1 - p) * init_state
if (state == last_state).all():
break
last_state = state
return last_state
if __name__ == '__main__':
adjancency_list = fake_adjacency_list(6)
p = 0.8
page_rank_value = page_rank(p, adjancency_list)
print(page_rank_value)
| gpl-3.0 | -1,871,651,116,630,963,000 | 29.166667 | 75 | 0.604972 | false | 3.100642 | false | false | false |
JayVora-SerpentCS/vertical-hotel | hotel/models/hotel.py | 1 | 46745 | # -*- coding: utf-8 -*-
# See LICENSE file for full copyright and licensing details.
import time
import datetime
from odoo.exceptions import UserError, ValidationError
from odoo.osv import expression
from odoo.tools import misc, DEFAULT_SERVER_DATETIME_FORMAT
from odoo import models, fields, api, _
from decimal import Decimal
def _offset_format_timestamp1(src_tstamp_str, src_format, dst_format,
ignore_unparsable_time=True, context=None):
"""
Convert a source timeStamp string into a destination timeStamp string,
attempting to apply the correct offset if both the server and local
timeZone are recognized,or no offset at all if they aren't or if
tz_offset is false (i.e. assuming they are both in the same TZ).
@param src_tstamp_str: the STR value containing the timeStamp.
@param src_format: the format to use when parsing the local timeStamp.
@param dst_format: the format to use when formatting the resulting
timeStamp.
@param server_to_client: specify timeZone offset direction (server=src
and client=dest if True, or client=src and
server=dest if False)
@param ignore_unparsable_time: if True, return False if src_tstamp_str
cannot be parsed using src_format or
formatted using dst_format.
@return: destination formatted timestamp, expressed in the destination
timezone if possible and if tz_offset is true, or src_tstamp_str
if timezone offset could not be determined.
"""
if not src_tstamp_str:
return False
res = src_tstamp_str
if src_format and dst_format:
try:
# dt_value needs to be a datetime.datetime object\
# (so notime.struct_time or mx.DateTime.DateTime here!)
dt_value = datetime.datetime.strptime(src_tstamp_str, src_format)
if context.get('tz', False):
try:
import pytz
src_tz = pytz.timezone(context['tz'])
dst_tz = pytz.timezone('UTC')
src_dt = src_tz.localize(dt_value, is_dst=True)
dt_value = src_dt.astimezone(dst_tz)
except Exception:
pass
res = dt_value.strftime(dst_format)
except Exception:
# Normal ways to end up here are if strptime or strftime failed
if not ignore_unparsable_time:
return False
pass
return res
class HotelFloor(models.Model):
_name = "hotel.floor"
_description = "Floor"
name = fields.Char('Floor Name', size=64, required=True, index=True)
sequence = fields.Integer('Sequence', size=64, index=True)
class HotelRoomType(models.Model):
_name = "hotel.room.type"
_description = "Room Type"
name = fields.Char('Name', size=64, required=True)
categ_id = fields.Many2one('hotel.room.type', 'Category')
child_id = fields.One2many('hotel.room.type', 'categ_id',
'Child Categories')
@api.multi
def name_get(self):
def get_names(cat):
""" Return the list [cat.name, cat.categ_id.name, ...] """
res = []
while cat:
res.append(cat.name)
cat = cat.categ_id
return res
return [(cat.id, " / ".join(reversed(get_names(cat)))) for cat in self]
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
if not args:
args = []
if name:
# Be sure name_search is symetric to name_get
category_names = name.split(' / ')
parents = list(category_names)
child = parents.pop()
domain = [('name', operator, child)]
if parents:
names_ids = self.name_search(' / '.join(parents), args=args,
operator='ilike', limit=limit)
category_ids = [name_id[0] for name_id in names_ids]
if operator in expression.NEGATIVE_TERM_OPERATORS:
categories = self.search([('id', 'not in', category_ids)])
domain = expression.OR([[('categ_id', 'in',
categories.ids)], domain])
else:
domain = expression.AND([[('categ_id', 'in',
category_ids)], domain])
for i in range(1, len(category_names)):
domain = [[('name', operator,
' / '.join(category_names[-1 - i:]))], domain]
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = expression.AND(domain)
else:
domain = expression.OR(domain)
categories = self.search(expression.AND([domain, args]),
limit=limit)
else:
categories = self.search(args, limit=limit)
return categories.name_get()
class ProductProduct(models.Model):
_inherit = "product.product"
isroom = fields.Boolean('Is Room')
iscategid = fields.Boolean('Is categ id')
isservice = fields.Boolean('Is Service id')
class HotelRoomAmenitiesType(models.Model):
_name = 'hotel.room.amenities.type'
_description = 'amenities Type'
name = fields.Char('Name', size=64, required=True)
amenity_id = fields.Many2one('hotel.room.amenities.type', 'Category')
child_id = fields.One2many('hotel.room.amenities.type', 'amenity_id',
'Child Categories')
@api.multi
def name_get(self):
def get_names(cat):
""" Return the list [cat.name, cat.amenity_id.name, ...] """
res = []
while cat:
res.append(cat.name)
cat = cat.amenity_id
return res
return [(cat.id, " / ".join(reversed(get_names(cat)))) for cat in self]
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
if not args:
args = []
if name:
# Be sure name_search is symetric to name_get
category_names = name.split(' / ')
parents = list(category_names)
child = parents.pop()
domain = [('name', operator, child)]
if parents:
names_ids = self.name_search(' / '.join(parents), args=args,
operator='ilike', limit=limit)
category_ids = [name_id[0] for name_id in names_ids]
if operator in expression.NEGATIVE_TERM_OPERATORS:
categories = self.search([('id', 'not in', category_ids)])
domain = expression.OR([[('amenity_id', 'in',
categories.ids)], domain])
else:
domain = expression.AND([[('amenity_id', 'in',
category_ids)], domain])
for i in range(1, len(category_names)):
domain = [[('name', operator,
' / '.join(category_names[-1 - i:]))], domain]
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = expression.AND(domain)
else:
domain = expression.OR(domain)
categories = self.search(expression.AND([domain, args]),
limit=limit)
else:
categories = self.search(args, limit=limit)
return categories.name_get()
class HotelRoomAmenities(models.Model):
_name = 'hotel.room.amenities'
_description = 'Room amenities'
product_id = fields.Many2one('product.product', 'Product Category',
required=True, delegate=True,
ondelete='cascade')
categ_id = fields.Many2one('hotel.room.amenities.type',
string='Amenities Category', required=True)
product_manager = fields.Many2one('res.users', string='Product Manager')
class FolioRoomLine(models.Model):
_name = 'folio.room.line'
_description = 'Hotel Room Reservation'
_rec_name = 'room_id'
room_id = fields.Many2one(comodel_name='hotel.room', string='Room id')
check_in = fields.Datetime('Check In Date', required=True)
check_out = fields.Datetime('Check Out Date', required=True)
folio_id = fields.Many2one('hotel.folio', string='Folio Number')
status = fields.Selection(string='state', related='folio_id.state')
class HotelRoom(models.Model):
_name = 'hotel.room'
_description = 'Hotel Room'
product_id = fields.Many2one('product.product', 'Product_id',
required=True, delegate=True,
ondelete='cascade')
floor_id = fields.Many2one('hotel.floor', 'Floor No',
help='At which floor the room is located.')
max_adult = fields.Integer('Max Adult')
max_child = fields.Integer('Max Child')
categ_id = fields.Many2one('hotel.room.type', string='Room Category',
required=True)
room_amenities = fields.Many2many('hotel.room.amenities', 'temp_tab',
'room_amenities', 'rcateg_id',
string='Room Amenities',
help='List of room amenities. ')
status = fields.Selection([('available', 'Available'),
('occupied', 'Occupied')],
'Status', default='available')
capacity = fields.Integer('Capacity', required=True)
room_line_ids = fields.One2many('folio.room.line', 'room_id',
string='Room Reservation Line')
product_manager = fields.Many2one('res.users', string='Product Manager')
@api.constrains('capacity')
def check_capacity(self):
for room in self:
if room.capacity <= 0:
raise ValidationError(_('Room capacity must be more than 0'))
@api.onchange('isroom')
def isroom_change(self):
'''
Based on isroom, status will be updated.
----------------------------------------
@param self: object pointer
'''
if self.isroom is False:
self.status = 'occupied'
if self.isroom is True:
self.status = 'available'
@api.multi
def write(self, vals):
"""
Overrides orm write method.
@param self: The object pointer
@param vals: dictionary of fields value.
"""
if 'isroom' in vals and vals['isroom'] is False:
vals.update({'color': 2, 'status': 'occupied'})
if 'isroom'in vals and vals['isroom'] is True:
vals.update({'color': 5, 'status': 'available'})
ret_val = super(HotelRoom, self).write(vals)
return ret_val
@api.multi
def set_room_status_occupied(self):
"""
This method is used to change the state
to occupied of the hotel room.
---------------------------------------
@param self: object pointer
"""
return self.write({'isroom': False, 'color': 2})
@api.multi
def set_room_status_available(self):
"""
This method is used to change the state
to available of the hotel room.
---------------------------------------
@param self: object pointer
"""
return self.write({'isroom': True, 'color': 5})
class HotelFolio(models.Model):
@api.multi
def name_get(self):
res = []
disp = ''
for rec in self:
if rec.order_id:
disp = str(rec.name)
res.append((rec.id, disp))
return res
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
if args is None:
args = []
args += ([('name', operator, name)])
mids = self.search(args, limit=100)
return mids.name_get()
@api.model
def _needaction_count(self, domain=None):
"""
Show a count of draft state folio on the menu badge.
@param self: object pointer
"""
return self.search_count([('state', '=', 'draft')])
@api.model
def _get_checkin_date(self):
if self._context.get('tz'):
to_zone = self._context.get('tz')
else:
to_zone = 'UTC'
return _offset_format_timestamp1(time.strftime("%Y-%m-%d 12:00:00"),
DEFAULT_SERVER_DATETIME_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
ignore_unparsable_time=True,
context={'tz': to_zone})
@api.model
def _get_checkout_date(self):
if self._context.get('tz'):
to_zone = self._context.get('tz')
else:
to_zone = 'UTC'
tm_delta = datetime.timedelta(days=1)
return datetime.datetime.strptime(_offset_format_timestamp1
(time.strftime("%Y-%m-%d 12:00:00"),
DEFAULT_SERVER_DATETIME_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
ignore_unparsable_time=True,
context={'tz': to_zone}),
'%Y-%m-%d %H:%M:%S') + tm_delta
@api.multi
def copy(self, default=None):
'''
@param self: object pointer
@param default: dict of default values to be set
'''
return super(HotelFolio, self).copy(default=default)
_name = 'hotel.folio'
_description = 'hotel folio new'
_rec_name = 'order_id'
_order = 'id'
# _inherit = ['ir.needaction_mixin']
name = fields.Char('Folio Number', readonly=True, index=True,
default='New')
order_id = fields.Many2one('sale.order', 'Order', delegate=True,
required=True, ondelete='cascade')
checkin_date = fields.Datetime('Check In', required=True, readonly=True,
states={'draft': [('readonly', False)]},
default=_get_checkin_date)
checkout_date = fields.Datetime('Check Out', required=True, readonly=True,
states={'draft': [('readonly', False)]},
default=_get_checkout_date)
room_lines = fields.One2many('hotel.folio.line', 'folio_id',
readonly=True,
states={'draft': [('readonly', False)],
'sent': [('readonly', False)]},
help="Hotel room reservation detail.")
service_lines = fields.One2many('hotel.service.line', 'folio_id',
readonly=True,
states={'draft': [('readonly', False)],
'sent': [('readonly', False)]},
help="Hotel services detail provide to"
"customer and it will include in "
"main Invoice.")
hotel_policy = fields.Selection([('prepaid', 'On Booking'),
('manual', 'On Check In'),
('picking', 'On Checkout')],
'Hotel Policy', default='manual',
help="Hotel policy for payment that "
"either the guest has to payment at "
"booking time or check-in "
"check-out time.")
duration = fields.Float('Duration in Days',
help="Number of days which will automatically "
"count from the check-in and check-out date. ")
hotel_invoice_id = fields.Many2one('account.invoice', 'Invoice',
copy=False)
duration_dummy = fields.Float('Duration Dummy')
@api.constrains('room_lines')
def folio_room_lines(self):
'''
This method is used to validate the room_lines.
------------------------------------------------
@param self: object pointer
@return: raise warning depending on the validation
'''
folio_rooms = []
for room in self[0].room_lines:
if room.product_id.id in folio_rooms:
raise ValidationError(_('You Cannot Take Same Room Twice'))
folio_rooms.append(room.product_id.id)
@api.onchange('checkout_date', 'checkin_date')
def onchange_dates(self):
'''
This method gives the duration between check in and checkout
if customer will leave only for some hour it would be considers
as a whole day.If customer will check in checkout for more or equal
hours, which configured in company as additional hours than it would
be consider as full days
--------------------------------------------------------------------
@param self: object pointer
@return: Duration and checkout_date
'''
configured_addition_hours = 0
wid = self.warehouse_id
whouse_com_id = wid or wid.company_id
if whouse_com_id:
configured_addition_hours = wid.company_id.additional_hours
myduration = 0
chckin = self.checkin_date
chckout = self.checkout_date
if chckin and chckout:
server_dt = DEFAULT_SERVER_DATETIME_FORMAT
chkin_dt = datetime.datetime.strptime(chckin, server_dt)
chkout_dt = datetime.datetime.strptime(chckout, server_dt)
dur = chkout_dt - chkin_dt
sec_dur = dur.seconds
if (not dur.days and not sec_dur) or (dur.days and not sec_dur):
myduration = dur.days
else:
myduration = dur.days + 1
# To calculate additional hours in hotel room as per minutes
if configured_addition_hours > 0:
additional_hours = abs((dur.seconds / 60) / 60)
if additional_hours >= configured_addition_hours:
myduration += 1
self.duration = myduration
self.duration_dummy = self.duration
@api.model
def create(self, vals, check=True):
"""
Overrides orm create method.
@param self: The object pointer
@param vals: dictionary of fields value.
@return: new record set for hotel folio.
"""
if not 'service_lines' and 'folio_id' in vals:
tmp_room_lines = vals.get('room_lines', [])
vals['order_policy'] = vals.get('hotel_policy', 'manual')
vals.update({'room_lines': []})
folio_id = super(HotelFolio, self).create(vals)
for line in (tmp_room_lines):
line[2].update({'folio_id': folio_id})
vals.update({'room_lines': tmp_room_lines})
folio_id.write(vals)
else:
if not vals:
vals = {}
vals['name'] = self.env['ir.sequence'].next_by_code('hotel.folio')
vals['duration'] = vals.get('duration',
0.0) or vals.get('duration_dummy',
0.0)
folio_id = super(HotelFolio, self).create(vals)
folio_room_line_obj = self.env['folio.room.line']
h_room_obj = self.env['hotel.room']
try:
for rec in folio_id:
if not rec.reservation_id:
for room_rec in rec.room_lines:
prod = room_rec.product_id.name
room_obj = h_room_obj.search([('name', '=',
prod)])
room_obj.write({'isroom': False})
vals = {'room_id': room_obj.id,
'check_in': rec.checkin_date,
'check_out': rec.checkout_date,
'folio_id': rec.id,
}
folio_room_line_obj.create(vals)
except:
for rec in folio_id:
for room_rec in rec.room_lines:
prod = room_rec.product_id.name
room_obj = h_room_obj.search([('name', '=', prod)])
room_obj.write({'isroom': False})
vals = {'room_id': room_obj.id,
'check_in': rec.checkin_date,
'check_out': rec.checkout_date,
'folio_id': rec.id,
}
folio_room_line_obj.create(vals)
return folio_id
@api.multi
def write(self, vals):
"""
Overrides orm write method.
@param self: The object pointer
@param vals: dictionary of fields value.
"""
product_obj = self.env['product.product']
h_room_obj = self.env['hotel.room']
folio_room_line_obj = self.env['folio.room.line']
room_lst1 = []
for rec in self:
for res in rec.room_lines:
room_lst1.append(res.product_id.id)
room_lst = []
for folio_obj in self:
if vals and vals.get('duration_dummy', False):
vals['duration'] = vals.get('duration_dummy', 0.0)
else:
vals['duration'] = folio_obj.duration
for folio_rec in folio_obj.room_lines:
room_lst.append(folio_rec.product_id.id)
new_rooms = set(room_lst).difference(set(room_lst1))
if len(list(new_rooms)) != 0:
room_list = product_obj.browse(list(new_rooms))
for rm in room_list:
room_obj = h_room_obj.search([('name', '=', rm.name)])
room_obj.write({'isroom': False})
vals = {'room_id': room_obj.id,
'check_in': folio_obj.checkin_date,
'check_out': folio_obj.checkout_date,
'folio_id': folio_obj.id,
}
folio_room_line_obj.create(vals)
if len(list(new_rooms)) == 0:
room_list_obj = product_obj.browse(room_lst1)
for rom in room_list_obj:
room_obj = h_room_obj.search([('name', '=', rom.name)])
room_obj.write({'isroom': False})
room_vals = {'room_id': room_obj.id,
'check_in': folio_obj.checkin_date,
'check_out': folio_obj.checkout_date,
'folio_id': folio_obj.id,
}
folio_romline_rec = (folio_room_line_obj.search
([('folio_id', '=', folio_obj.id)]))
folio_romline_rec.write(room_vals)
return super(HotelFolio, self).write(vals)
@api.onchange('warehouse_id')
def onchange_warehouse_id(self):
'''
When you change warehouse it will update the warehouse of
the hotel folio as well
----------------------------------------------------------
@param self: object pointer
'''
return self.order_id._onchange_warehouse_id()
@api.onchange('partner_id')
def onchange_partner_id(self):
'''
When you change partner_id it will update the partner_invoice_id,
partner_shipping_id and pricelist_id of the hotel folio as well
---------------------------------------------------------------
@param self: object pointer
'''
if self.partner_id:
partner_rec = self.env['res.partner'].browse(self.partner_id.id)
order_ids = [folio.order_id.id for folio in self]
if not order_ids:
self.partner_invoice_id = partner_rec.id
self.partner_shipping_id = partner_rec.id
self.pricelist_id = partner_rec.property_product_pricelist.id
raise _('Not Any Order For %s ' % (partner_rec.name))
else:
self.partner_invoice_id = partner_rec.id
self.partner_shipping_id = partner_rec.id
self.pricelist_id = partner_rec.property_product_pricelist.id
@api.multi
def button_dummy(self):
'''
@param self: object pointer
'''
for folio in self:
folio.order_id.button_dummy()
return True
@api.multi
def action_done(self):
self.state = 'done'
@api.multi
def action_invoice_create(self, grouped=False, final=False):
'''
@param self: object pointer
'''
room_lst = []
invoice_id = (self.order_id.action_invoice_create(grouped=False,
final=False))
for line in self:
values = {'invoiced': True,
'hotel_invoice_id': invoice_id
}
line.write(values)
for rec in line.room_lines:
room_lst.append(rec.product_id)
for room in room_lst:
room_obj = self.env['hotel.room'
].search([('name', '=', room.name)])
room_obj.write({'isroom': True})
return invoice_id
@api.multi
def action_invoice_cancel(self):
'''
@param self: object pointer
'''
if not self.order_id:
raise UserError(_('Order id is not available'))
for sale in self:
for line in sale.order_line:
line.write({'invoiced': 'invoiced'})
self.state = 'invoice_except'
return self.order_id.action_invoice_cancel
@api.multi
def action_cancel(self):
'''
@param self: object pointer
'''
if not self.order_id:
raise UserError(_('Order id is not available'))
for sale in self:
for invoice in sale.invoice_ids:
invoice.state = 'cancel'
return self.order_id.action_cancel()
@api.multi
def action_confirm(self):
for order in self.order_id:
order.state = 'sale'
# order.order_line._action_procurement_create()
if not order.analytic_account_id:
for line in order.order_line:
if line.product_id.invoice_policy == 'cost':
order._create_analytic_account()
break
config_parameter_obj = self.env['ir.config_parameter']
if config_parameter_obj.sudo().get_param('sale.auto_done_setting'):
self.order_id.action_done()
@api.multi
def test_state(self, mode):
'''
@param self: object pointer
@param mode: state of workflow
'''
write_done_ids = []
write_cancel_ids = []
if write_done_ids:
test_obj = self.env['sale.order.line'].browse(write_done_ids)
test_obj.write({'state': 'done'})
if write_cancel_ids:
test_obj = self.env['sale.order.line'].browse(write_cancel_ids)
test_obj.write({'state': 'cancel'})
@api.multi
def action_cancel_draft(self):
'''
@param self: object pointer
'''
if not len(self._ids):
return False
query = "select id from sale_order_line \
where order_id IN %s and state=%s"
self._cr.execute(query, (tuple(self._ids), 'cancel'))
cr1 = self._cr
line_ids = map(lambda x: x[0], cr1.fetchall())
self.write({'state': 'draft', 'invoice_ids': [], 'shipped': 0})
sale_line_obj = self.env['sale.order.line'].browse(line_ids)
sale_line_obj.write({'invoiced': False, 'state': 'draft',
'invoice_lines': [(6, 0, [])]})
return True
class HotelFolioLine(models.Model):
@api.multi
def copy(self, default=None):
'''
@param self: object pointer
@param default: dict of default values to be set
'''
return super(HotelFolioLine, self).copy(default=default)
@api.model
def _get_checkin_date(self):
if 'checkin' in self._context:
return self._context['checkin']
return time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
@api.model
def _get_checkout_date(self):
if 'checkout' in self._context:
return self._context['checkout']
return time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
_name = 'hotel.folio.line'
_description = 'hotel folio1 room line'
order_line_id = fields.Many2one('sale.order.line', string='Order Line',
required=True, delegate=True,
ondelete='cascade')
folio_id = fields.Many2one('hotel.folio', string='Folio',
ondelete='cascade')
checkin_date = fields.Datetime('Check In', required=True,
default=_get_checkin_date)
checkout_date = fields.Datetime('Check Out', required=True,
default=_get_checkout_date)
is_reserved = fields.Boolean('Is Reserved',
help='True when folio line created from \
Reservation')
@api.model
def create(self, vals, check=True):
"""
Overrides orm create method.
@param self: The object pointer
@param vals: dictionary of fields value.
@return: new record set for hotel folio line.
"""
if 'folio_id' in vals:
folio = self.env["hotel.folio"].browse(vals['folio_id'])
vals.update({'order_id': folio.order_id.id})
return super(HotelFolioLine, self).create(vals)
@api.constrains('checkin_date', 'checkout_date')
def check_dates(self):
'''
This method is used to validate the checkin_date and checkout_date.
-------------------------------------------------------------------
@param self: object pointer
@return: raise warning depending on the validation
'''
if self.checkin_date >= self.checkout_date:
raise ValidationError(_('Room line Check In Date Should be \
less than the Check Out Date!'))
if self.folio_id.date_order and self.checkin_date:
if self.checkin_date <= self.folio_id.date_order:
raise ValidationError(_('Room line check in date should be \
greater than the current date.'))
@api.multi
def unlink(self):
"""
Overrides orm unlink method.
@param self: The object pointer
@return: True/False.
"""
sale_line_obj = self.env['sale.order.line']
fr_obj = self.env['folio.room.line']
for line in self:
if line.order_line_id:
sale_unlink_obj = (sale_line_obj.browse
([line.order_line_id.id]))
for rec in sale_unlink_obj:
room_obj = self.env['hotel.room'
].search([('name', '=', rec.name)])
if room_obj.id:
folio_arg = [('folio_id', '=', line.folio_id.id),
('room_id', '=', room_obj.id)]
folio_room_line_myobj = fr_obj.search(folio_arg)
if folio_room_line_myobj.id:
folio_room_line_myobj.unlink()
room_obj.write({'isroom': True,
'status': 'available'})
sale_unlink_obj.unlink()
return super(HotelFolioLine, self).unlink()
@api.onchange('product_id')
def product_id_change(self):
'''
- @param self: object pointer
- '''
context = dict(self._context)
if not context:
context = {}
if context.get('folio', False):
if self.product_id and self.folio_id.partner_id:
self.name = self.product_id.name
self.price_unit = self.product_id.list_price
self.product_uom = self.product_id.uom_id
tax_obj = self.env['account.tax']
pr = self.product_id
self.price_unit = tax_obj._fix_tax_included_price(pr.price,
pr.taxes_id,
self.tax_id)
else:
if not self.product_id:
return {'domain': {'product_uom': []}}
val = {}
pr = self.product_id.with_context(
lang=self.folio_id.partner_id.lang,
partner=self.folio_id.partner_id.id,
quantity=val.get('product_uom_qty') or self.product_uom_qty,
date=self.folio_id.date_order,
pricelist=self.folio_id.pricelist_id.id,
uom=self.product_uom.id
)
p = pr.with_context(pricelist=self.order_id.pricelist_id.id).price
if self.folio_id.pricelist_id and self.folio_id.partner_id:
obj = self.env['account.tax']
val['price_unit'] = obj._fix_tax_included_price(p,
pr.taxes_id,
self.tax_id)
@api.onchange('checkin_date', 'checkout_date')
def on_change_checkout(self):
'''
When you change checkin_date or checkout_date it will checked it
and update the qty of hotel folio line
-----------------------------------------------------------------
@param self: object pointer
'''
configured_addition_hours = 0
fwhouse_id = self.folio_id.warehouse_id
fwc_id = fwhouse_id or fwhouse_id.company_id
if fwc_id:
configured_addition_hours = fwhouse_id.company_id.additional_hours
myduration = 0
if not self.checkin_date:
self.checkin_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if not self.checkout_date:
self.checkout_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
chckin = self.checkin_date
chckout = self.checkout_date
if chckin and chckout:
server_dt = DEFAULT_SERVER_DATETIME_FORMAT
chkin_dt = datetime.datetime.strptime(chckin, server_dt)
chkout_dt = datetime.datetime.strptime(chckout, server_dt)
dur = chkout_dt - chkin_dt
sec_dur = dur.seconds
if (not dur.days and not sec_dur) or (dur.days and not sec_dur):
myduration = dur.days
else:
myduration = dur.days + 1
# To calculate additional hours in hotel room as per minutes
if configured_addition_hours > 0:
additional_hours = abs((dur.seconds / 60) / 60)
if additional_hours >= configured_addition_hours:
myduration += 1
self.product_uom_qty = myduration
hotel_room_obj = self.env['hotel.room']
hotel_room_ids = hotel_room_obj.search([])
avail_prod_ids = []
for room in hotel_room_ids:
assigned = False
for rm_line in room.room_line_ids:
if rm_line.status != 'cancel':
if(self.checkin_date <= rm_line.check_in <=
self.checkout_date) or (self.checkin_date <=
rm_line.check_out <=
self.checkout_date):
assigned = True
elif (rm_line.check_in <= self.checkin_date <=
rm_line.check_out) or (rm_line.check_in <=
self.checkout_date <=
rm_line.check_out):
assigned = True
if not assigned:
avail_prod_ids.append(room.product_id.id)
domain = {'product_id': [('id', 'in', avail_prod_ids)]}
return {'domain': domain}
@api.multi
def button_confirm(self):
'''
@param self: object pointer
'''
for folio in self:
line = folio.order_line_id
line.button_confirm()
return True
@api.multi
def button_done(self):
'''
@param self: object pointer
'''
lines = [folio_line.order_line_id for folio_line in self]
lines.button_done()
self.state = 'done'
return True
@api.multi
def copy_data(self, default=None):
'''
@param self: object pointer
@param default: dict of default values to be set
'''
line_id = self.order_line_id.id
sale_line_obj = self.env['sale.order.line'].browse(line_id)
return sale_line_obj.copy_data(default=default)
class HotelServiceLine(models.Model):
@api.multi
def copy(self, default=None):
'''
@param self: object pointer
@param default: dict of default values to be set
'''
return super(HotelServiceLine, self).copy(default=default)
@api.model
def _service_checkin_date(self):
if 'checkin' in self._context:
return self._context['checkin']
return time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
@api.model
def _service_checkout_date(self):
if 'checkout' in self._context:
return self._context['checkout']
return time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
_name = 'hotel.service.line'
_description = 'hotel Service line'
service_line_id = fields.Many2one('sale.order.line', 'Service Line',
required=True, delegate=True,
ondelete='cascade')
folio_id = fields.Many2one('hotel.folio', 'Folio', ondelete='cascade')
ser_checkin_date = fields.Datetime('From Date', required=True,
default=_service_checkin_date)
ser_checkout_date = fields.Datetime('To Date', required=True,
default=_service_checkout_date)
@api.model
def create(self, vals, check=True):
"""
Overrides orm create method.
@param self: The object pointer
@param vals: dictionary of fields value.
@return: new record set for hotel service line.
"""
if 'folio_id' in vals:
folio = self.env['hotel.folio'].browse(vals['folio_id'])
vals.update({'order_id': folio.order_id.id})
return super(HotelServiceLine, self).create(vals)
@api.multi
def unlink(self):
"""
Overrides orm unlink method.
@param self: The object pointer
@return: True/False.
"""
s_line_obj = self.env['sale.order.line']
for line in self:
if line.service_line_id:
sale_unlink_obj = s_line_obj.browse([line.service_line_id.id])
sale_unlink_obj.unlink()
return super(HotelServiceLine, self).unlink()
@api.onchange('product_id')
def product_id_change(self):
'''
@param self: object pointer
'''
if self.product_id and self.folio_id.partner_id:
self.name = self.product_id.name
self.price_unit = self.product_id.list_price
self.product_uom = self.product_id.uom_id
tax_obj = self.env['account.tax']
prod = self.product_id
self.price_unit = tax_obj._fix_tax_included_price(prod.price,
prod.taxes_id,
self.tax_id)
@api.onchange('ser_checkin_date', 'ser_checkout_date')
def on_change_checkout(self):
'''
When you change checkin_date or checkout_date it will checked it
and update the qty of hotel service line
-----------------------------------------------------------------
@param self: object pointer
'''
if not self.ser_checkin_date:
time_a = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
self.ser_checkin_date = time_a
if not self.ser_checkout_date:
self.ser_checkout_date = time_a
if self.ser_checkout_date < self.ser_checkin_date:
raise _('Checkout must be greater or equal checkin date')
if self.ser_checkin_date and self.ser_checkout_date:
date_a = time.strptime(self.ser_checkout_date,
DEFAULT_SERVER_DATETIME_FORMAT)[:5]
date_b = time.strptime(self.ser_checkin_date,
DEFAULT_SERVER_DATETIME_FORMAT)[:5]
diffDate = datetime.datetime(*date_a) - datetime.datetime(*date_b)
qty = diffDate.days + 1
self.product_uom_qty = qty
@api.multi
def button_confirm(self):
'''
@param self: object pointer
'''
for folio in self:
line = folio.service_line_id
x = line.button_confirm()
return x
@api.multi
def button_done(self):
'''
@param self: object pointer
'''
for folio in self:
line = folio.service_line_id
x = line.button_done()
return x
@api.multi
def copy_data(self, default=None):
'''
@param self: object pointer
@param default: dict of default values to be set
'''
sale_line_obj = self.env['sale.order.line'
].browse(self.service_line_id.id)
return sale_line_obj.copy_data(default=default)
class HotelServiceType(models.Model):
_name = "hotel.service.type"
_description = "Service Type"
name = fields.Char('Service Name', size=64, required=True)
service_id = fields.Many2one('hotel.service.type', 'Service Category')
child_id = fields.One2many('hotel.service.type', 'service_id',
'Child Categories')
@api.multi
def name_get(self):
def get_names(cat):
""" Return the list [cat.name, cat.service_id.name, ...] """
res = []
while cat:
res.append(cat.name)
cat = cat.service_id
return res
return [(cat.id, " / ".join(reversed(get_names(cat)))) for cat in self]
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
if not args:
args = []
if name:
# Be sure name_search is symetric to name_get
category_names = name.split(' / ')
parents = list(category_names)
child = parents.pop()
domain = [('name', operator, child)]
if parents:
names_ids = self.name_search(' / '.join(parents), args=args,
operator='ilike', limit=limit)
category_ids = [name_id[0] for name_id in names_ids]
if operator in expression.NEGATIVE_TERM_OPERATORS:
categories = self.search([('id', 'not in', category_ids)])
domain = expression.OR([[('service_id', 'in',
categories.ids)], domain])
else:
domain = expression.AND([[('service_id', 'in',
category_ids)], domain])
for i in range(1, len(category_names)):
domain = [[('name', operator,
' / '.join(category_names[-1 - i:]))], domain]
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = expression.AND(domain)
else:
domain = expression.OR(domain)
categories = self.search(expression.AND([domain, args]),
limit=limit)
else:
categories = self.search(args, limit=limit)
return categories.name_get()
class HotelServices(models.Model):
_name = 'hotel.services'
_description = 'Hotel Services and its charges'
product_id = fields.Many2one('product.product', 'Service_id',
required=True, ondelete='cascade',
delegate=True)
categ_id = fields.Many2one('hotel.service.type', string='Service Category',
required=True)
product_manager = fields.Many2one('res.users', string='Product Manager')
class ResCompany(models.Model):
_inherit = 'res.company'
additional_hours = fields.Integer('Additional Hours',
help="Provide the min hours value for \
check in, checkout days, whatever the \
hours will be provided here based on \
that extra days will be calculated.")
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.model
def create(self, vals):
res = super(AccountInvoice, self).create(vals)
if self._context.get('folio_id'):
folio = self.env['hotel.folio'].browse(self._context['folio_id'])
folio.write({'hotel_invoice_id': res.id,
'invoice_status': 'invoiced'})
return res
| agpl-3.0 | -5,164,944,052,077,309,000 | 40.07645 | 79 | 0.507413 | false | 4.29681 | false | false | false |
go-bears/rally | samples/plugins/context/context_plugin.py | 2 | 3091 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import log as logging
from rally import consts
from rally import osclients
from rally.task import context
LOG = logging.getLogger(__name__)
@context.configure(name="create_flavor", order=1000)
class CreateFlavorContext(context.Context):
"""Create sample flavor
This sample create flavor with specified options before task starts and
delete it after task completion.
To create your own context plugin, inherit it from
rally.task.context.Context
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"additionalProperties": False,
"properties": {
"flavor_name": {
"type": "string",
},
"ram": {
"type": "integer",
"minimum": 1
},
"vcpus": {
"type": "integer",
"minimum": 1
},
"disk": {
"type": "integer",
"minimum": 1
}
}
}
def setup(self):
"""This method is called before the task start."""
try:
# use rally.osclients to get necessary client instance
nova = osclients.Clients(self.context["admin"]["endpoint"]).nova()
# and than do what you need with this client
self.context["flavor"] = nova.flavors.create(
# context settings are stored in self.config
name=self.config.get("flavor_name", "rally_test_flavor"),
ram=self.config.get("ram", 1),
vcpus=self.config.get("vcpus", 1),
disk=self.config.get("disk", 1)).to_dict()
LOG.debug("Flavor with id '%s'" % self.context["flavor"]["id"])
except Exception as e:
msg = "Can't create flavor: %s" % e.message
if logging.is_debug():
LOG.exception(msg)
else:
LOG.warning(msg)
def cleanup(self):
"""This method is called after the task finish."""
try:
nova = osclients.Clients(self.context["admin"]["endpoint"]).nova()
nova.flavors.delete(self.context["flavor"]["id"])
LOG.debug("Flavor '%s' deleted" % self.context["flavor"]["id"])
except Exception as e:
msg = "Can't delete flavor: %s" % e.message
if logging.is_debug():
LOG.exception(msg)
else:
LOG.warning(msg)
| apache-2.0 | -6,515,033,223,136,696,000 | 34.125 | 78 | 0.572307 | false | 4.299026 | true | false | false |
dennybaa/mistral | mistral/db/sqlalchemy/types.py | 1 | 3760 | # -*- coding: utf-8 -*-
#
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This module implements SQLAlchemy-based types for dict and list
# expressed by json-strings
#
from oslo_serialization import jsonutils
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from sqlalchemy.ext import mutable
class JsonEncoded(sa.TypeDecorator):
"""Represents an immutable structure as a json-encoded string."""
impl = sa.Text
def process_bind_param(self, value, dialect):
if value is not None:
value = jsonutils.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = jsonutils.loads(value)
return value
class MutableDict(mutable.Mutable, dict):
@classmethod
def coerce(cls, key, value):
"""Convert plain dictionaries to MutableDict."""
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
# this call will raise ValueError
return mutable.Mutable.coerce(key, value)
return value
def update(self, e=None, **f):
"""Detect dictionary update events and emit change events."""
dict.update(self, e, **f)
self.changed()
def __setitem__(self, key, value):
"""Detect dictionary set events and emit change events."""
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"""Detect dictionary del events and emit change events."""
dict.__delitem__(self, key)
self.changed()
class MutableList(mutable.Mutable, list):
@classmethod
def coerce(cls, key, value):
"""Convert plain lists to MutableList."""
if not isinstance(value, MutableList):
if isinstance(value, list):
return MutableList(value)
# this call will raise ValueError
return mutable.Mutable.coerce(key, value)
return value
def __add__(self, value):
"""Detect list add events and emit change events."""
list.__add__(self, value)
self.changed()
def append(self, value):
"""Detect list add events and emit change events."""
list.append(self, value)
self.changed()
def __setitem__(self, key, value):
"""Detect list set events and emit change events."""
list.__setitem__(self, key, value)
self.changed()
def __delitem__(self, i):
"""Detect list del events and emit change events."""
list.__delitem__(self, i)
self.changed()
def JsonDictType():
"""Returns an SQLAlchemy Column Type suitable to store a Json dict."""
return MutableDict.as_mutable(JsonEncoded)
def JsonListType():
"""Returns an SQLAlchemy Column Type suitable to store a Json array."""
return MutableList.as_mutable(JsonEncoded)
def LongText():
# TODO(rakhmerov): Need to do for postgres.
return sa.Text().with_variant(mysql.LONGTEXT(), 'mysql')
class JsonEncodedLongText(JsonEncoded):
impl = LongText()
def JsonLongDictType():
return MutableDict.as_mutable(JsonEncodedLongText)
| apache-2.0 | -6,777,889,495,756,849,000 | 29.322581 | 77 | 0.645745 | false | 4.321839 | false | false | false |
BD2KGenomics/toil-old | src/toil/batchSystems/mesos/test/__init__.py | 1 | 2292 | from abc import ABCMeta, abstractmethod
import logging
import shutil
import threading
import time
import subprocess
import multiprocessing
log = logging.getLogger(__name__)
class MesosTestSupport(object):
"""
A mixin for test cases that need a running Mesos master and slave on the local host
"""
def _startMesos(self, numCores=None):
if numCores is None:
numCores = multiprocessing.cpu_count()
shutil.rmtree('/tmp/mesos', ignore_errors=True)
self.master = self.MesosMasterThread(numCores)
self.master.start()
self.slave = self.MesosSlaveThread(numCores)
self.slave.start()
while self.master.popen is None or self.slave.popen is None:
log.info("Waiting for master and slave processes")
time.sleep(.1)
def _stopMesos(self):
self.slave.popen.kill()
self.slave.join()
self.master.popen.kill()
self.master.join()
class MesosThread(threading.Thread):
__metaclass__ = ABCMeta
# Lock is used because subprocess is NOT thread safe: http://tinyurl.com/pkp5pgq
lock = threading.Lock()
def __init__(self, numCores):
threading.Thread.__init__(self)
self.numCores = numCores
self.popen = None
@abstractmethod
def mesosCommand(self):
raise NotImplementedError
def run(self):
with self.lock:
self.popen = subprocess.Popen(self.mesosCommand())
self.popen.wait()
log.info('Exiting %s', self.__class__.__name__)
class MesosMasterThread(MesosThread):
def mesosCommand(self):
return ['mesos-master',
'--registry=in_memory',
'--ip=127.0.0.1',
'--allocation_interval=500ms']
class MesosSlaveThread(MesosThread):
def mesosCommand(self):
# NB: The --resources parameter forces this test to use a predictable number of cores, independent of how
# many cores the system running the test actually has.
return ['mesos-slave',
'--ip=127.0.0.1',
'--master=127.0.0.1:5050',
'--resources=cpus(*):%i' % self.numCores]
| mit | 4,546,301,206,306,912,000 | 32.217391 | 117 | 0.59075 | false | 4.071048 | false | false | false |
ecolell/pfamserver | pfamserver/api/v0/pfam.py | 1 | 2139 | from flask_restplus import Resource, abort
from pfamserver.api.v0 import api, schemas
from pfamserver.services import pfam_service
from pfamserver.extensions import cache
from flask import request
from zlib import compress
from base64 import b64encode
ns = api.namespace('pfams', decorators=[
api.response(200, "success"),
api.response(400, "not found")])
@ns.errorhandler(pfam_service.PfamServiceError)
def handle_root_exception(error):
'''Return a custom message and 400 status code'''
return {'message': error.message}, 400
def make_cache_key(*args, **kwargs):
path = request.path
args = str(request.args.items())
return (path + args).encode('utf-8')
@ns.route('/<pfam>')
class PfamAAPI(Resource):
schema = schemas.PfamSchema()
@ns.response(200, "response")
@ns.doc('Obtain the pfam information.')
@cache.cached(timeout=3600)
def get(self, pfam):
pfam = pfam_service.get_pfam(pfam)
data, errors = self.schema.dump(pfam)
return data, 200
@ns.route('/<pfam>/sequence_descriptions')
class PfamASequenceDescriptionsAPI(Resource):
@ns.response(200, "response")
@ns.doc('Obtain a sequence_description list from a pfam.')
@cache.cached(timeout=3600, key_prefix=make_cache_key)
@ns.expect(schemas.pfam_a_query)
def get(self, pfam):
kwargs = schemas.pfam_a_query.parse_args()
with_pdb = kwargs['with_pdb']
sequence_descriptions = pfam_service.get_sequence_descriptions_from_pfam(pfam, with_pdb)
data = {'query': pfam,
'with_pdb': with_pdb,
'output': sequence_descriptions,
'size': len(sequence_descriptions)}
return data, 200
@ns.route('/<pfam>/stockholm')
class PfamAStockholmAPI(Resource):
@ns.response(200, "response")
@ns.doc('Obtain a sequence_description list from a pfam.')
@cache.cached(timeout=3600, key_prefix=make_cache_key)
def get(self, pfam):
stockholm = pfam_service.get_stockholm_from_pfam(pfam)
data = {'query': pfam,
'output': b64encode(compress(stockholm))}
return data, 200
| agpl-3.0 | 22,869,584,441,214,160 | 30.455882 | 96 | 0.665732 | false | 3.363208 | false | false | false |
PhillyPUG/phillypug | settings.py | 1 | 3579 | # Django settings for phillypug project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Jason Stelzer', '[email protected]'),
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'phillypug', # Or path to database file if using sqlite3.
'USER': 'pugadmin', # Not used with sqlite3.
'PASSWORD': 'secret', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 3
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = '/Users/cynic/projects/phillypug/phillypug/media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = 'http://www.phillypug.org/static/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'oq_dm6t5h99j7e$7vdbvh8xewt4idldki%7xsf-f#g4!#g8j0v'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"/Users/cynic/projects/phillypug/phillypug/templates",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.admindocs',
'memberlist',
'about',
'events',
)
MEETUP_CFG="/etc/phillypug/meetup.cfg"
| bsd-3-clause | 2,050,719,463,421,289,200 | 34.088235 | 120 | 0.690696 | false | 3.451302 | false | false | false |
madsbuch/GridDispenser | dispenserrun/printrun/printrun/gcoder.py | 1 | 21325 | #!/usr/bin/env python
# This file is copied from GCoder.
#
# GCoder is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GCoder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
import math
import datetime
import logging
from array import array
from printrun_utils import install_locale
install_locale('pronterface')
gcode_parsed_args = ["x", "y", "e", "f", "z", "i", "j"]
gcode_parsed_nonargs = ["g", "t", "m", "n"]
to_parse = "".join(gcode_parsed_args + gcode_parsed_nonargs)
gcode_exp = re.compile("\([^\(\)]*\)|;.*|[/\*].*\n|([%s])([-+]?[0-9]*\.?[0-9]*)" % to_parse)
m114_exp = re.compile("\([^\(\)]*\)|[/\*].*\n|([XYZ]):?([-+]?[0-9]*\.?[0-9]*)")
specific_exp = "(?:\([^\(\)]*\))|(?:;.*)|(?:[/\*].*\n)|(%s[-+]?[0-9]*\.?[0-9]*)"
move_gcodes = ["G0", "G1", "G2", "G3"]
class PyLine(object):
__slots__ = ('x', 'y', 'z', 'e', 'f', 'i', 'j',
'raw', 'command', 'is_move',
'relative', 'relative_e',
'current_x', 'current_y', 'current_z', 'extruding',
'current_tool',
'gcview_end_vertex')
def __init__(self, l):
self.raw = l
def __getattr__(self, name):
return None
try:
import gcoder_line
Line = gcoder_line.GLine
except ImportError:
Line = PyLine
def find_specific_code(line, code):
exp = specific_exp % code
bits = [bit for bit in re.findall(exp, line.raw) if bit]
if not bits: return None
else: return float(bits[0][1:])
def S(line):
return find_specific_code(line, "S")
def P(line):
return find_specific_code(line, "P")
def split(line):
split_raw = gcode_exp.findall(line.raw.lower())
if not split_raw:
line.command = line.raw
line.is_move = False
logging.warning(_("raw G-Code line \"%s\" could not be parsed") % line.raw)
return [line.raw]
command = split_raw[0] if split_raw[0][0] != "n" else split_raw[1]
line.command = command[0].upper() + command[1]
line.is_move = line.command in move_gcodes
return split_raw
def parse_coordinates(line, split_raw, imperial = False, force = False):
# Not a G-line, we don't want to parse its arguments
if not force and line.command[0] != "G":
return
unit_factor = 25.4 if imperial else 1
for bit in split_raw:
code = bit[0]
if code not in gcode_parsed_nonargs and bit[1]:
setattr(line, code, unit_factor * float(bit[1]))
class Layer(list):
__slots__ = ("duration", "z")
def __init__(self, lines, z = None):
super(Layer, self).__init__(lines)
self.z = z
class GCode(object):
lines = None
layers = None
all_layers = None
layer_idxs = None
line_idxs = None
append_layer = None
append_layer_id = None
imperial = False
relative = False
relative_e = False
current_tool = 0
# Home position: current absolute position counted from machine origin
home_x = 0
home_y = 0
home_z = 0
# Current position: current absolute position counted from machine origin
current_x = 0
current_y = 0
current_z = 0
# For E this is the absolute position from machine start
current_e = 0
# Current feedrate
current_f = 0
# Offset: current offset between the machine origin and the machine current
# absolute coordinate system (as shifted by G92s)
offset_x = 0
offset_y = 0
offset_z = 0
offset_e = 0
# Expected behavior:
# - G28 X => X axis is homed, offset_x <- 0, current_x <- home_x
# - G92 Xk => X axis does not move, so current_x does not change
# and offset_x <- current_x - k,
# - absolute G1 Xk => X axis moves, current_x <- offset_x + k
# How to get...
# current abs X from machine origin: current_x
# current abs X in machine current coordinate system: current_x - offset_x
filament_length = None
duration = None
xmin = None
xmax = None
ymin = None
ymax = None
zmin = None
zmax = None
width = None
depth = None
height = None
est_layer_height = None
# abs_x is the current absolute X in machine current coordinate system
# (after the various G92 transformations) and can be used to store the
# absolute position of the head at a given time
def _get_abs_x(self):
return self.current_x - self.offset_x
abs_x = property(_get_abs_x)
def _get_abs_y(self):
return self.current_y - self.offset_y
abs_y = property(_get_abs_y)
def _get_abs_z(self):
return self.current_z - self.offset_z
abs_z = property(_get_abs_z)
def _get_abs_e(self):
return self.current_e - self.offset_e
abs_e = property(_get_abs_e)
def _get_abs_pos(self):
return (self.abs_x, self.abs_y, self.abs_z)
abs_pos = property(_get_abs_pos)
def _get_current_pos(self):
return (self.current_x, self.current_y, self.current_z)
current_pos = property(_get_current_pos)
def _get_home_pos(self):
return (self.home_x, self.home_y, self.home_z)
def _set_home_pos(self, home_pos):
if home_pos:
self.home_x, self.home_y, self.home_z = home_pos
home_pos = property(_get_home_pos, _set_home_pos)
def __init__(self, data = None, home_pos = None):
self.home_pos = home_pos
if data:
self.lines = [Line(l2) for l2 in
(l.strip() for l in data)
if l2]
self._preprocess_lines()
self.filament_length = self._preprocess_extrusion()
self._create_layers()
self._preprocess_layers()
else:
self.lines = []
def __len__(self):
return len(self.line_idxs)
def __iter__(self):
return self.lines.__iter__()
def append(self, command, store = True):
command = command.strip()
if not command:
return
gline = Line(command)
self._preprocess_lines([gline])
self._preprocess_extrusion([gline])
if store:
self.lines.append(gline)
self.append_layer.append(gline)
self.layer_idxs.append(self.append_layer_id)
self.line_idxs.append(len(self.append_layer))
return gline
def _preprocess_lines(self, lines = None):
"""Checks for imperial/relativeness settings and tool changes"""
if not lines:
lines = self.lines
imperial = self.imperial
relative = self.relative
relative_e = self.relative_e
current_tool = self.current_tool
current_x = self.current_x
current_y = self.current_y
current_z = self.current_z
offset_x = self.offset_x
offset_y = self.offset_y
offset_z = self.offset_z
for line in lines:
split_raw = split(line)
if not line.command:
continue
# Update properties
if line.is_move:
line.relative = relative
line.relative_e = relative_e
line.current_tool = current_tool
elif line.command == "G20":
imperial = True
elif line.command == "G21":
imperial = False
elif line.command == "G90":
relative = False
relative_e = False
elif line.command == "G91":
relative = True
relative_e = True
elif line.command == "M82":
relative_e = False
elif line.command == "M83":
relative_e = True
elif line.command[0] == "T":
current_tool = int(line.command[1:])
if line.command[0] == "G":
parse_coordinates(line, split_raw, imperial)
# Compute current position
if line.is_move:
x = line.x
y = line.y
z = line.z
if line.f is not None:
self.current_f = line.f
if line.relative:
x = current_x + (x or 0)
y = current_y + (y or 0)
z = current_z + (z or 0)
else:
if x is not None: x = x + offset_x
if y is not None: y = y + offset_y
if z is not None: z = z + offset_z
if x is not None: current_x = x
if y is not None: current_y = y
if z is not None: current_z = z
elif line.command == "G28":
home_all = not any([line.x, line.y, line.z])
if home_all or line.x is not None:
offset_x = 0
current_x = self.home_x
if home_all or line.y is not None:
offset_y = 0
current_y = self.home_y
if home_all or line.z is not None:
offset_z = 0
current_z = self.home_z
elif line.command == "G92":
if line.x is not None: offset_x = current_x - line.x
if line.y is not None: offset_y = current_y - line.y
if line.z is not None: offset_z = current_z - line.z
line.current_x = current_x
line.current_y = current_y
line.current_z = current_z
self.imperial = imperial
self.relative = relative
self.relative_e = relative_e
self.current_tool = current_tool
self.current_x = current_x
self.current_y = current_y
self.current_z = current_z
self.offset_x = offset_x
self.offset_y = offset_y
self.offset_z = offset_z
def _preprocess_extrusion(self, lines = None):
if not lines:
lines = self.lines
current_e = self.current_e
offset_e = self.offset_e
total_e = 0
max_e = 0
for line in lines:
if line.e is None:
continue
if line.is_move:
if line.relative_e:
line.extruding = line.e > 0
total_e += line.e
current_e += line.e
else:
new_e = line.e + offset_e
line.extruding = new_e > current_e
total_e += new_e - current_e
current_e = new_e
max_e = max(max_e, total_e)
elif line.command == "G92":
offset_e = current_e - line.e
self.current_e = current_e
self.offset_e = offset_e
return max_e
# FIXME : looks like this needs to be tested with list Z on move
def _create_layers(self):
layers = {}
all_layers = []
layer_idxs = []
line_idxs = []
layer_id = 0
layer_line = 0
last_layer_z = None
prev_z = None
prev_base_z = (None, None)
cur_z = None
cur_lines = []
for line in self.lines:
if line.command == "G92" and line.z is not None:
cur_z = line.z
elif line.is_move:
if line.z is not None:
if line.relative:
cur_z += line.z
else:
cur_z = line.z
# FIXME: the logic behind this code seems to work, but it might be
# broken
if cur_z != prev_z:
if prev_z is not None and last_layer_z is not None:
offset = self.est_layer_height if self.est_layer_height else 0.01
if abs(prev_z - last_layer_z) < offset:
if self.est_layer_height is None:
zs = sorted([l.z for l in all_layers if l.z is not None])
heights = [round(zs[i + 1] - zs[i], 3) for i in range(len(zs) - 1)]
if len(heights) >= 2: self.est_layer_height = heights[1]
elif heights: self.est_layer_height = heights[0]
else: self.est_layer_height = 0.1
base_z = round(prev_z - (prev_z % self.est_layer_height), 2)
else:
base_z = round(prev_z, 2)
else:
base_z = prev_z
if base_z != prev_base_z:
all_layers.append(Layer(cur_lines, base_z))
old_lines = layers.get(base_z, [])
old_lines += cur_lines
layers[base_z] = old_lines
cur_lines = []
layer_id += 1
layer_line = 0
last_layer_z = base_z
prev_base_z = base_z
cur_lines.append(line)
layer_idxs.append(layer_id)
line_idxs.append(layer_line)
layer_line += 1
prev_z = cur_z
if cur_lines:
all_layers.append(Layer(cur_lines, prev_z))
old_lines = layers.get(prev_z, [])
old_lines += cur_lines
layers[prev_z] = old_lines
for zindex in layers.keys():
cur_lines = layers[zindex]
has_movement = False
for l in layers[zindex]:
if l.is_move and l.e is not None:
has_movement = True
break
if has_movement:
layers[zindex] = Layer(cur_lines, zindex)
else:
del layers[zindex]
self.append_layer_id = len(all_layers)
self.append_layer = Layer([])
all_layers.append(self.append_layer)
self.all_layers = all_layers
self.layers = layers
self.layer_idxs = array('I', layer_idxs)
self.line_idxs = array('I', line_idxs)
def idxs(self, i):
return self.layer_idxs[i], self.line_idxs[i]
def num_layers(self):
return len(self.layers)
def _preprocess_layers(self):
xmin = float("inf")
ymin = float("inf")
zmin = 0
xmax = float("-inf")
ymax = float("-inf")
zmax = float("-inf")
# Count moves without extrusion if filament length is lower than 0
count_noe = self.filament_length <= 0
for line in self.lines:
if line.is_move and (line.extruding or count_noe):
if line.current_x is not None:
xmin = min(xmin, line.current_x)
xmax = max(xmax, line.current_x)
if line.current_y is not None:
ymin = min(ymin, line.current_y)
ymax = max(ymax, line.current_y)
if line.current_z is not None:
zmin = min(zmin, line.current_z)
zmax = max(zmax, line.current_z)
self.xmin = xmin if not math.isinf(xmin) else 0
self.xmax = xmax if not math.isinf(xmax) else 0
self.ymin = ymin if not math.isinf(ymin) else 0
self.ymax = ymax if not math.isinf(ymax) else 0
self.zmin = zmin if not math.isinf(zmin) else 0
self.zmax = zmax if not math.isinf(zmax) else 0
self.width = self.xmax - self.xmin
self.depth = self.ymax - self.ymin
self.height = self.zmax - self.zmin
def estimate_duration(self):
if self.duration is not None:
return self.duration
lastx = lasty = lastz = laste = lastf = 0.0
lastdx = 0
lastdy = 0
x = y = e = f = 0.0
currenttravel = 0.0
moveduration = 0.0
totalduration = 0.0
acceleration = 2000.0 # mm/s^2
layerbeginduration = 0.0
#TODO:
# get device caps from firmware: max speed, acceleration/axis
# (including extruder)
# calculate the maximum move duration accounting for above ;)
for layer in self.all_layers:
for line in layer:
if line.command not in ["G1", "G0", "G4"]:
continue
if line.command == "G4":
moveduration = line.p
if not moveduration:
continue
else:
moveduration /= 1000.0
else:
x = line.x if line.x is not None else lastx
y = line.y if line.y is not None else lasty
z = line.z if line.z is not None else lastz
e = line.e if line.e is not None else laste
# mm/s vs mm/m => divide by 60
f = line.f / 60.0 if line.f is not None else lastf
# given last feedrate and current feedrate calculate the
# distance needed to achieve current feedrate.
# if travel is longer than req'd distance, then subtract
# distance to achieve full speed, and add the time it took
# to get there.
# then calculate the time taken to complete the remaining
# distance
# FIXME: this code has been proven to be super wrong when 2
# subsquent moves are in opposite directions, as requested
# speed is constant but printer has to fully decellerate
# and reaccelerate
# The following code tries to fix it by forcing a full
# reacceleration if this move is in the opposite direction
# of the previous one
dx = x - lastx
dy = y - lasty
if dx * lastdx + dy * lastdy <= 0:
lastf = 0
currenttravel = math.hypot(dx, dy)
if currenttravel == 0:
if line.z is not None:
currenttravel = abs(line.z) if line.relative else abs(line.z - lastz)
elif line.e is not None:
currenttravel = abs(line.e) if line.relative_e else abs(line.e - laste)
# Feedrate hasn't changed, no acceleration/decceleration planned
if f == lastf:
moveduration = currenttravel / f if f != 0 else 0.
else:
# FIXME: review this better
# this looks wrong : there's little chance that the feedrate we'll decelerate to is the previous feedrate
# shouldn't we instead look at three consecutive moves ?
distance = 2 * abs(((lastf + f) * (f - lastf) * 0.5) / acceleration) # multiply by 2 because we have to accelerate and decelerate
if distance <= currenttravel and lastf + f != 0 and f != 0:
moveduration = 2 * distance / (lastf + f) # This is distance / mean(lastf, f)
moveduration += (currenttravel - distance) / f
else:
moveduration = 2 * currenttravel / (lastf + f) # This is currenttravel / mean(lastf, f)
# FIXME: probably a little bit optimistic, but probably a much better estimate than the previous one:
# moveduration = math.sqrt(2 * distance / acceleration) # probably buggy : not taking actual travel into account
lastdx = dx
lastdy = dy
totalduration += moveduration
lastx = x
lasty = y
lastz = z
laste = e
lastf = f
layer.duration = totalduration - layerbeginduration
layerbeginduration = totalduration
totaltime = datetime.timedelta(seconds = int(totalduration))
self.duration = totaltime
return "%d layers, %s" % (len(self.layers), str(totaltime))
def main():
if len(sys.argv) < 2:
print "usage: %s filename.gcode" % sys.argv[0]
return
print "Line object size:", sys.getsizeof(Line("G0 X0"))
gcode = GCode(open(sys.argv[1], "rU"))
print "Dimensions:"
xdims = (gcode.xmin, gcode.xmax, gcode.width)
print "\tX: %0.02f - %0.02f (%0.02f)" % xdims
ydims = (gcode.ymin, gcode.ymax, gcode.depth)
print "\tY: %0.02f - %0.02f (%0.02f)" % ydims
zdims = (gcode.zmin, gcode.zmax, gcode.height)
print "\tZ: %0.02f - %0.02f (%0.02f)" % zdims
print "Filament used: %0.02fmm" % gcode.filament_length
print "Number of layers: %d" % gcode.num_layers()
print "Estimated duration: %s" % gcode.estimate_duration()
if __name__ == '__main__':
main()
| gpl-2.0 | -9,158,954,328,203,161,000 | 35.205433 | 154 | 0.516952 | false | 3.82237 | false | false | false |
Weffe/NigelThornberry-Bot | PickleList.py | 1 | 2696 | __author__ = 'Rogelio Negrete - Weffe'
import pickle
import random
import time
from datetime import datetime
class PickleList:
def __init__(self, nameoffile):
self.nameoffile = nameoffile
self.pickle_list = self.load_pickle_list()
def load_pickle_list(self):
with open(self.nameoffile, "rb") as file:
try:
pickle_list = pickle.load(file)
return pickle_list
except:
return []
def add_to_pickle_list(self, user_input):
#its a list of tuples
#user input = [{permalink : timeposted}, {permalink : timeposted}, ...]
self.pickle_list.append(user_input)
def save_pickle_list(self):
# save to existing list
with open(self.nameoffile, "wb") as file:
pickle.dump(self.pickle_list, file)
def manual_add_to_pickle_list(self):
endLoop = False
while (endLoop != True):
user_input = input("Enter in (Nigel) Link [Enter DONE to stop]: ")
if user_input != 'done':
self.pickle_list.append(user_input)
print(self.pickle_list)
else:
endLoop = True
# save to existing list
with open(self.nameoffile, "wb") as file:
pickle.dump(self.pickle_list, file)
def empty_pickle_file(self):
#cheeky way of deleting the file content
#just open the file and save an empty list to it - which overwrites everything
with open(self.nameoffile, "wb") as file:
pickle.dump([], file)
def print_pickle(self):
print(self.pickle_list)
#-----------------------
#NIGEL Related method(s)
def choose_random_nigel_pic(self):
#choose a random nigel picture from the pickle list
return random.choice(self.pickle_list)
#----------------------
#permalinks Related method(s)
def is_link_in_list(self, permalink):
linkInList = False
#permalink list looks like: (permalink, date_posted)
for key in [y[0] for y in self.pickle_list]:
if permalink == key:
#print('Found old match. Ignoring comment. Link: ' + permalink)
linkInList = True
return linkInList
def clean_up_permalink_list(self):
day_ago = datetime.fromtimestamp(time.time() - (24 * 60 * 60)) # find date for 24 hours ago
for tupleItem in self.pickle_list:
permalink_date = tupleItem[1]
time_delta = int((day_ago - permalink_date).days) + 1
if time_delta >= 2:
#print("Found Old Link.")
self.pickle_list.remove(tupleItem)
| mit | 725,602,132,936,203,400 | 30.717647 | 100 | 0.570475 | false | 3.824113 | false | false | false |
tonningp/spritewalker | assets/space/ch09/space.py | 1 | 3672 | """ space.py
simulate a spacecraft with
no traction at all
"""
import pygame, math
pygame.init()
class Ship(pygame.sprite.Sprite):
def __init__(self, screen):
pygame.sprite.Sprite.__init__(self)
self.screen = screen
self.imageThrust = pygame.image.load("shipThrust.png")
self.imageThrust = self.imageThrust.convert()
self.imageCruise = pygame.image.load("shipCruise.png")
self.imageCruise = self.imageCruise.convert()
self.imageLeft = pygame.image.load("shipLeft.png")
self.imageLeft = self.imageLeft.convert()
self.imageRight = pygame.image.load("shipRight.png")
self.imageRight = self.imageRight.convert()
self.imageMaster = self.imageCruise
self.image = self.imageMaster
self.rect = self.image.get_rect()
self.x = 100
self.y = 100
self.dx = 0
self.dy = 0
self.dir = 0
self.turnRate = 5
self.thrust = 0
def update(self):
self.checkKeys()
self.rotate()
self.calcVector()
self.setPos()
self.checkBounds()
self.rect.center = (self.x, self.y)
def checkKeys(self):
keys = pygame.key.get_pressed()
self.imageMaster = self.imageCruise
if keys[pygame.K_RIGHT]:
self.dir -= self.turnRate
if self.dir < 0:
self.dir = 360 - self.turnRate
self.imageMaster = self.imageRight
if keys[pygame.K_LEFT]:
self.dir += self.turnRate
if self.dir > 360:
self.dir = self.turnRate
self.imageMaster = self.imageLeft
if keys[pygame.K_UP]:
self.thrust = .1
self.imageMaster = self.imageThrust
else:
self.thrust = 0
def rotate(self):
oldCenter = self.rect.center
self.image = pygame.transform.rotate(self.imageMaster, self.dir)
self.rect = self.image.get_rect()
self.rect.center = oldCenter
def calcVector(self):
radians = self.dir * math.pi / 180
thrustDx = self.thrust * math.cos(radians)
thrustDy = self.thrust * math.sin(radians)
thrustDy *= -1
self.dx += thrustDx
self.dy += thrustDy
self.speed = math.sqrt((self.dx * self.dx) + (self.dy * self.dy))
def setPos(self):
self.x += self.dx
self.y += self.dy
def checkBounds(self):
screen = self.screen
if self.x > screen.get_width():
self.x = 0
if self.x < 0:
self.x = screen.get_width()
if self.y > screen.get_height():
self.y = 0
if self.y < 0:
self.y = screen.get_height()
def main():
screen = pygame.display.set_mode((640, 480))
pygame.display.set_caption("Space craft")
background = pygame.Surface(screen.get_size())
background.fill((0, 0, 0))
screen.blit(background, (0, 0))
ship = Ship(screen)
allSprites = pygame.sprite.Group(ship)
clock = pygame.time.Clock()
keepGoing = True
while keepGoing:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
keepGoing = False
allSprites.clear(screen, background)
allSprites.update()
allSprites.draw(screen)
pygame.display.flip()
if __name__ == "__main__":
main()
| gpl-3.0 | -5,523,270,527,935,922,000 | 27.869919 | 73 | 0.528322 | false | 3.817048 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.