repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
umuzungu/zipline | zipline/examples/buyapple.py | 4 | 1562 | #!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zipline.api import order, record, symbol
def initialize(context):
pass
def handle_data(context, data):
order(symbol('AAPL'), 10)
record(AAPL=data.current(symbol('AAPL'), 'price'))
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
# Plot the portfolio and asset data.
ax1 = plt.subplot(211)
results.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('Portfolio value (USD)')
ax2 = plt.subplot(212, sharex=ax1)
results.AAPL.plot(ax=ax2)
ax2.set_ylabel('AAPL price (USD)')
# Show the plot.
plt.gcf().set_size_inches(18, 8)
plt.show()
def _test_args():
"""Extra arguments to use when zipline's automated tests run this example.
"""
import pandas as pd
return {
'start': pd.Timestamp('2014-01-01', tz='utc'),
'end': pd.Timestamp('2014-11-01', tz='utc'),
}
| apache-2.0 | 4,482,768,199,834,304,000 | 27.925926 | 78 | 0.690781 | false | 3.440529 | false | false | false |
sophacles/todo-scripts | todo.py | 1 | 5207 | import re
import subprocess
from datetime import datetime as DT, date
CONFIG_FILE="~/.todo.cfg"
_tagTest = re.compile(r'.+:.+')
_prioTest = re.compile(r'\([A-Z]\)$')
_validPrio = re.compile(r'[A-Z]')
def _makeDate(word):
if word is None: return None
if isinstance(word, date): return word
return DT.strptime(word, "%Y-%m-%d").date()
def _isDate(word):
# print "date testing:", word
try:
_makeDate(word)
except Exception, e:
# print "Failed date parse on: %s" % (word,)
# print "exeption", e
return False
return True
def _isPriority(word):
return bool(_prioTest.match(word))
def _isProject(word):
return word.startswith("+")
def _isContext(word):
return word.startswith("@")
def _isTag(word):
return bool(_tagTest.search(word))
def get_todo_env(key):
cmd = ". %s; echo $%s"
cmd %= (CONFIG_FILE, key)
var = subprocess.check_output([cmd], shell=True)
return var.strip()
class Task(object):
def __init__(self, task="", projects=None, contexts=None, tags=None, autodate=False):
self.priority = ''
self._create = None
self._finish = None
self.task = task
self.done = False
self.projects = projects if projects else list()
self.contexts = contexts if contexts else list()
self.tags = tags if tags else dict()
if autodate:
self.create = date.today()
# can "undo" - pass false
def do(self, value=True):
if bool(value):
self.done = True
self.finish = DT.now().date()
else:
self.done = False
self.finish = None
@property
def priority(self):
return self._priority
@priority.setter
def priority(self, value):
if not value:
self._priority = ""
return
value = value.upper()
if _isPriority(value):
self._priority = value
elif len(value) == 1 and _validPrio.match(value):
self._priority = "(%s)" % value
else:
raise Exception('Bad prio')
@property
def create(self):
return self._create
@create.setter
def create(self, val):
self._create = _makeDate(val)
@property
def finish(self):
return self._finish
@finish.setter
def finish(self, val):
self._finish = _makeDate(val)
def __str__(self):
# Question - strip prio as option?
tok = []
finish = str(self.finish) if self.finish else ""
create = str(self.create) if self.create else ""
if self.done:
tok.append("x")
# strip prio because:
# tood.sh do [TASK]
# does it
tok.extend([finish, create, self.task])
else:
tok.extend([self.priority, create, self.task])
tok.extend(self.projects)
tok.extend(self.contexts)
tok.extend("%s:%s" % (k,v) for k,v in self.tags.iteritems())
return " ".join(v for v in tok if v)
@staticmethod
def parse(todoline):
leading_space=False
bare_words = []
task = Task()
if todoline.strip(' \t\n') == "":
return None
if todoline.startswith(' '):
leading_space = True
tokens = todoline.split(" ")
if not leading_space:
# get rid of internal "" tokens
tokens = [tok for tok in tokens if tok]
else:
# preserve leading ws
leader = []
while tokens[0] == '':
leader.append(tokens.pop(0))
tokens.insert(0, " ".join(leader))
# Deal with leading space wierdness
if not leading_space:
if tokens[0] == 'x':
task.done = True
tokens.pop(0)
if _isDate(tokens[0]):
task.finish = tokens.pop(0)
if _isPriority(tokens[0]):
task.priority = tokens.pop(0)
else:
bare_words.append(tokens.pop(0))
# creation date still valid for leading space... TODO: verify
if _isDate(tokens[0]):
task.create = tokens.pop(0)
# Now the meat
for word in tokens:
if _isProject(word):
task.projects.append(word)
elif _isContext(word):
task.contexts.append(word)
elif _isTag(word):
k, v = word.partition(":")[::2]
task.tags[k] = v
else:
bare_words.append(word)
task.task = " ".join(bare_words)
return task
class TodoFile(object):
def __init__(self, filename=""):
self.filename = filename
def __str__(self):
return "\n".join(str(task) for task in self.tasks) + "\n"
def open(self):
try:
with open(self.filename, 'r') as fd:
self.tasks = [Task.parse(x.strip()) for x in fd.readlines()]
self.tasks = [x for x in self.tasks if x is not None]
except:
self.tasks = []
def save(self):
with open(self.filename, 'w') as fd:
fd.write(str(self))
| mit | 2,225,535,076,095,501,300 | 26.696809 | 89 | 0.528519 | false | 3.831494 | false | false | false |
diminishedprime/dotfiles | i3/i3-pretty-mode/i3-pretty-mode.py | 1 | 2125 | #!/usr/bin/env python3
import i3ipc
import Tkinter
import sys
import re
from functools import reduce
def parse_entry(acc, entry):
key, value = entry.split("=")
acc[key] = value
return acc
def parse_args(args):
rest = args[1:]
arg_map = reduce(parse_entry, rest, {})
return arg_map
args = parse_args(sys.argv)
i3 = i3ipc.Connection()
def center(toplevel):
toplevel.update_idletasks()
w = toplevel.winfo_screenwidth()
h = toplevel.winfo_screenheight()
size = tuple(int(_) for _ in toplevel.geometry().split('+')[0].split('x'))
x = w/2 - size[0]/2
y = h/2 - size[1]/2
toplevel.geometry("%dx%d+%d+%d" % (size + (x, y)))
def show_window(label_list):
fontName = args.get("--font", "Arial")
fontSize = int(float(args.get("--font_size", "12")))
regexFilter = args.get("--ignore_titles", "$^")
regex = re.compile(regexFilter)
if (regex.match(label_list[0]) != None):
return
root = Tkinter.Tk()
root.attributes("-type", "dock")
width = int(float(args.get("--min_width", "500")))
root.minsize(width=width, height=1)
labelText = reduce(lambda acc, s: acc + '\n' + s, label_list[1:])
label = Tkinter.Label(root, text=labelText, justify=Tkinter.LEFT, anchor='w')
foreground=args.get("--foreground", "#000000")
background=args.get("--background", "#ffffff")
label.config( font=(fontName, fontSize)
, background=background
, foreground=foreground)
label.pack(fill='both')
root.title("i3-pretty-mode-title")
center(root)
# TODO(me)figure out scaling
#root.tk.call('tk', 'scaling', 4.0)
return root
def destroy_root():
global lastRoot
if (lastRoot != None):
lastRoot.destroy()
lastRoot = None
lastRoot = None
def on_mode(i3, e):
global lastRoot
mode_string = e.change
destroy_root()
if (mode_string != "default"):
label_list = mode_string.split(" :: ")
lastRoot = show_window(label_list)
# Subscribe to "mode" events
i3.on("mode", on_mode)
# Start the main loop and wait for events to come in.
i3.main()
| mit | -1,267,126,929,088,295,700 | 26.960526 | 81 | 0.616471 | false | 3.244275 | false | false | false |
Narfinger/confical | confical.py | 1 | 6200 | #!/usr/bin/python
from bs4 import BeautifulSoup
from datetime import date
import datetime
from icalendar import Calendar, Event
import pytz
import string
import urllib.request, urllib.error, urllib.parse
import re
DEBUG = False
start_index_conf_shortnames = 2
if DEBUG:
OUTFILE = '/tmp/conferences.ics'
else:
OUTFILE = '/var/www/confical/conferences.ics'
class Conf():
def __init__(self, title, deadline, description, venue, dates=("","")):
self.title = title
self.deadline = deadline
self.description = description
self.venue = venue
self.dates = dates
def getDescription(self):
string = self.description + " at "
if self.venue:
string += self.venue
string += "\n From: " + self.dates[0].strftime('%d.%m.%Y')
if len(self.dates)==2:
string += " to: "+ self.dates[1].strftime('%d.%m.%Y')
return string
def __repr__(self):
return self.title + ": " + str(self.deadline.day) + "." + str(self.deadline.month)
def isValid(self):
return True
# return (self.deadline!="None") and (self.dates[1]!="None")
class Website(object):
def parse(self):
site = self.download()
conferences = self.parseConferencesFromString(site)
return conferences
class Friedetzky(Website):
URL = ' http://www.dur.ac.uk/tom.friedetzky/conf.html'
def download(self):
response = urllib.request.urlopen(self.URL)
html = response.read()
return html
def parseConferencesFromString(self,string):
soup = BeautifulSoup(string)
header = [ x for x in soup.findAll('ul')]
confs = [ x for x in header[0] ]
confs_cleaned = [str(x).strip() for x in confs if x]
conferences = []
for x in confs_cleaned:
if x and len(x.strip())!=0:
tag = re.search("<strong>(.*?)</strong>", x).group(1)
longname = re.search("</strong>\n.*?\((.*?)\)", x).group(1)
deadline = re.search("Submission deadline: <b>(.*?)</b>", x).group(1)
dates_and_location = re.search("Conference dates: (.*?)<br.*\n(.*?)<br", x)
date_and_location = re.search("Conference date: (.*?)<br.*\n(.*?)<br", x)
dates = dates_and_location.group(1) if dates_and_location else date_and_location.group(1)
location = dates_and_location.group(2) if dates_and_location else date_and_location.group(2)
link = re.search("More info: <a href.*>(.*)</a><p>", x).group(1)
#convertdates
deadline_date = datetime.datetime.strptime(deadline, "%B %d, %Y")
datessplit = re.search("(.*) - (.*)", dates)
if dates_and_location:
startconf_date = datetime.datetime.strptime(datessplit.group(1), "%B %d, %Y")
endconf_date = datetime.datetime.strptime(datessplit.group(2), "%B %d, %Y")
conf = Conf(tag, deadline_date, longname, location, (startconf_date, endconf_date))
else:
conf = Conf(tag, deadline_date, longname, location, (startconf_date,))
#print(conf)
conferences.append(conf)
#build conferences
return conferences
class Farishi(Website):
URL = 'http://cs.yazd.ac.ir/farshi/conf.html'
def download(self):
# the real download is complicated because frames are web2.0 and static html is even 3.0
with open('test-farishi.html', 'r') as f:
thefile = f.read()
return thefile
def parseConferencesFromString(self, string):
# print(string)
parsed_html = BeautifulSoup(string)
trs = parsed_html.body.findAll('tr')
conferences = []
for elem in trs[2:]: #the first two are junk tags
tds = elem.findAll('td')
#print(tds.decode("utf8"))
for x in tds:
print(x.text.encode("utf8"))
print("done")
tag = tds[0].text
try:
deadline_date = datetime.datetime.strptime(tds[1].text, "%d %b %Y")
except:
deadline_date = datetime.datetime.strptime(tds[1].text, "%d %B %Y")
longname = ""
location = ""
# notification = datetime.datetime.strptime(tds[2].text, "%d %B %Y")
datessplit = re.search("(.*) - (.*)", tds[3].text)
#startconf_date = datetime.datetime.strptime(tds[2].text, "%d %b %Y")
#endconf_date = datetime.datetime.strptime(tds[3].text, "%d %b %Y")
conf = Conf(tag, deadline_date, longname, location)
conferences.append(conf)
return conferences
# table = re.findall("<tr>(?iLmsux)*</tr>", string)
return table
def gatherTwo(list):
list1 = [x for i,x in enumerate(list) if i%2 == 0]
list2 = [x for i,x in enumerate(list) if i%2 == 1]
gatherer = [(x,y) for x,y in zip(list1,list2)]
return gatherer
def constructCalendar(conferences):
cal = Calendar()
cal.add('prodid', '-//conferences//mxm.dk')
cal.add('version', '2.0')
for c in conferences:
#print(c)
# if c.isValid():
event = Event()
event.add('summary', string.upper(c.title) + ' Deadline')
event.add('description', c.getDescription())
year = c.deadline.year
month = c.deadline.month
day = c.deadline.day
event.add('dtstart', datetime.datetime(year, month, day, 0, 0, tzinfo=pytz.utc))
event.add('dtend', datetime.datetime( year, month, day, 20, 0, tzinfo=pytz.utc))
event.add('dtstamp', datetime.datetime(year, month, day, 0, 0, tzinfo=pytz.utc))
cal.add_component(event)
return cal
def writeCal(calendar):
with open(OUTFILE, 'wb') as f:
f.write(calendar.to_ical())
website = Friedetzky()
tmp = website.parse()
writeCal(tmp)
# website = Farishi()
# tmp = website.parse()
# print(tmp)
| gpl-3.0 | 8,544,279,651,468,598,000 | 32.513514 | 108 | 0.555 | false | 3.606748 | false | false | false |
enjaz/enjaz | activities/migrations/0001_initial.py | 2 | 12700 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('clubs', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, verbose_name='\u0627\u0633\u0645 \u0627\u0644\u0646\u0634\u0627\u0637')),
('description', models.TextField(verbose_name='\u0648\u0635\u0641 \u0627\u0644\u0646\u0634\u0627\u0637')),
('public_description', models.TextField(help_text='\u0647\u0630\u0627 \u0647\u0648 \u0627\u0644\u0648\u0635\u0641 \u0627\u0644\u0630\u064a \u0633\u064a\u0639\u0631\u0636 \u0644\u0644\u0637\u0644\u0627\u0628', verbose_name='\u0627\u0644\u0648\u0635\u0641 \u0627\u0644\u0625\u0639\u0644\u0627\u0645\u064a')),
('requirements', models.TextField(verbose_name='\u0645\u062a\u0637\u0644\u0628\u0627\u062a \u0627\u0644\u0646\u0634\u0627\u0637', blank=True)),
('submission_date', models.DateTimeField(auto_now_add=True, verbose_name='\u062a\u0627\u0631\u064a\u062e \u0627\u0644\u0625\u0631\u0633\u0627\u0644')),
('edit_date', models.DateTimeField(auto_now=True, verbose_name='\u062a\u0627\u0631\u064a\u062e \u0627\u0644\u062a\u0639\u062f\u064a\u0644')),
('is_editable', models.BooleanField(default=True, verbose_name='\u0647\u0644 \u064a\u0645\u0643\u0646 \u062a\u0639\u062f\u064a\u0644\u0647\u061f')),
('is_deleted', models.BooleanField(default=False, verbose_name='\u0645\u062d\u0630\u0648\u0641\u061f')),
('inside_collaborators', models.TextField(verbose_name='\u0627\u0644\u0645\u062a\u0639\u0627\u0648\u0646\u0648\u0646 \u0645\u0646 \u062f\u0627\u062e\u0644 \u0627\u0644\u062c\u0627\u0645\u0639\u0629', blank=True)),
('outside_collaborators', models.TextField(verbose_name='\u0627\u0644\u0645\u062a\u0639\u0627\u0648\u0646\u0648\u0646 \u0645\u0646 \u062e\u0627\u0631\u062c \u0627\u0644\u062c\u0627\u0645\u0639\u0629', blank=True)),
('participants', models.IntegerField(help_text='\u0627\u0644\u0639\u062f\u062f \u0627\u0644\u0645\u062a\u0648\u0642\u0639 \u0644\u0644\u0645\u0633\u062a\u0641\u064a\u062f\u064a\u0646 \u0645\u0646 \u0627\u0644\u0646\u0634\u0627\u0637', verbose_name='\u0639\u062f\u062f \u0627\u0644\u0645\u0634\u0627\u0631\u0643\u064a\u0646')),
('organizers', models.IntegerField(help_text='\u0639\u062f\u062f \u0627\u0644\u0637\u0644\u0627\u0628 \u0627\u0644\u0630\u064a\u0646 \u0633\u064a\u0646\u0638\u0645\u0648\u0646 \u0627\u0644\u0646\u0634\u0627\u0637', verbose_name='\u0639\u062f\u062f \u0627\u0644\u0645\u0646\u0638\u0645\u064a\u0646')),
],
options={
'verbose_name': '\u0646\u0634\u0627\u0637',
'verbose_name_plural': '\u0627\u0644\u0646\u0634\u0627\u0637\u0627\u062a',
'permissions': (('view_activity', 'Can view all available activities.'), ('directly_add_activity', 'Can add activities directly, without approval.')),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ar_name', models.CharField(max_length=50, verbose_name='\u0627\u0633\u0645 \u0627\u0644\u062a\u0635\u0646\u064a\u0641')),
('en_name', models.CharField(max_length=50, verbose_name='\u0627\u0633\u0645 \u0627\u0644\u0625\u0646\u062c\u0644\u064a\u0632\u064a')),
('description', models.TextField(verbose_name='\u0648\u0635\u0641 \u0627\u0644\u062a\u0635\u0646\u064a\u0641', blank=True)),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='\u0627\u0644\u062a\u0635\u0646\u064a\u0641 \u0627\u0644\u0623\u0628', blank=True, to='activities.Category', null=True)),
],
options={
'verbose_name': '\u062a\u0635\u0646\u064a\u0641',
'verbose_name_plural': '\u0627\u0644\u062a\u0635\u0646\u064a\u0641\u0627\u062a',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Episode',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start_date', models.DateField()),
('end_date', models.DateField()),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
('location', models.CharField(max_length=128)),
('allow_multiple_niqati', models.BooleanField(default=False, verbose_name='\u0627\u0633\u0645\u062d \u0628\u0625\u062f\u062e\u0627\u0644 \u0623\u0643\u062b\u0631 \u0645\u0646 \u0631\u0645\u0632 \u0646\u0642\u0627\u0637\u064a\u061f')),
('requires_report', models.BooleanField(default=True)),
('can_report_early', models.BooleanField(default=False)),
('requires_story', models.BooleanField(default=True)),
('activity', models.ForeignKey(to='activities.Activity')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Evaluation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quality', models.PositiveIntegerField(help_text='\u0643\u064a\u0641 \u062a\u0642\u064a\u0645 \u0639\u0645\u0644 \u0627\u0644\u0646\u0627\u062f\u064a \u0641\u064a \u062a\u0646\u0638\u064a\u0645 \u0627\u0644\u0646\u0634\u0627\u0637\u061f', verbose_name='\u062c\u0648\u062f\u0629 \u062a\u0646\u0638\u064a\u0645 \u0627\u0644\u0646\u0634\u0627\u0637')),
('relevance', models.PositiveIntegerField(help_text='\u0645\u0627 \u0645\u062f\u0649 \u0645\u0644\u0627\u0621\u0645\u0629 \u0627\u0644\u0646\u0634\u0627\u0637 \u0644\u0627\u0647\u062a\u0645\u0627\u0645 \u0627\u0644\u0637\u0644\u0627\u0628\u061f', verbose_name='\u0645\u0644\u0627\u0621\u0645\u0629 \u0627\u0644\u0646\u0634\u0627\u0637 \u0644\u0627\u0647\u062a\u0645\u0627\u0645 \u0627\u0644\u0637\u0644\u0627\u0628')),
('episode', models.ForeignKey(to='activities.Episode')),
('evaluator', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '\u062a\u0642\u064a\u064a\u0645',
'verbose_name_plural': '\u0627\u0644\u062a\u0642\u064a\u064a\u0645\u0627\u062a',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('review_date', models.DateTimeField(auto_now_add=True, verbose_name='\u062a\u0627\u0631\u064a\u062e \u0627\u0644\u0645\u0631\u0627\u062c\u0639\u0629')),
('edit_date', models.DateTimeField(auto_now=True, verbose_name='\u062a\u0627\u0631\u064a\u062e \u0627\u0644\u062a\u0639\u062f\u064a\u0644', null=True)),
('clubs_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u0627\u0644\u0623\u0646\u062f\u064a\u0629', blank=True)),
('name_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u0627\u0644\u0627\u0633\u0645', blank=True)),
('datetime_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u0627\u0644\u062a\u0627\u0631\u064a\u062e \u0648\u0627\u0644\u0648\u0642\u062a', blank=True)),
('description_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u0627\u0644\u0648\u0635\u0641', blank=True)),
('requirement_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u0627\u0644\u0645\u062a\u0637\u0644\u0628\u0627\u062a', blank=True)),
('inside_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0627\u0644\u0645\u062a\u0639\u0627\u0648\u0646\u0648\u0646 \u0645\u0646 \u062f\u0627\u062e\u0644 \u0627\u0644\u062c\u0627\u0645\u0639\u0629', blank=True)),
('outside_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0627\u0644\u0645\u062a\u0639\u0627\u0648\u0646\u0648\u0646 \u0645\u0646 \u062e\u0627\u0631\u062c \u0627\u0644\u062c\u0627\u0645\u0639\u0629', blank=True)),
('participants_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u0639\u062f\u062f \u0627\u0644\u0645\u0634\u0627\u0631\u0643\u064a\u0646', blank=True)),
('organizers_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u0639\u062f\u062f \u0627\u0644\u0645\u0646\u0638\u0645\u064a\u0646', blank=True)),
('submission_date_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u062a\u0627\u0631\u064a\u062e \u062a\u0642\u062f\u064a\u0645 \u0627\u0644\u0637\u0644\u0628', blank=True)),
('review_type', models.CharField(default=b'P', max_length=1, verbose_name='\u0646\u0648\u0639 \u0627\u0644\u0645\u0631\u0627\u062c\u0639\u0629', choices=[(b'P', '\u0631\u0626\u0627\u0633\u0629 \u0646\u0627\u062f\u064a \u0627\u0644\u0637\u0644\u0627\u0628'), (b'D', '\u0639\u0645\u0627\u062f\u0629 \u0634\u0624\u0648\u0646 \u0627\u0644\u0637\u0644\u0627\u0628')])),
('is_approved', models.NullBooleanField(verbose_name='\u0627\u0644\u062d\u0627\u0644\u0629', choices=[(None, '\u0623\u0628\u0642\u0650 \u0645\u0639\u0644\u0642\u064b\u0627'), (True, '\u0627\u0642\u0628\u0644'), (False, '\u0627\u0631\u0641\u0636')])),
('activity', models.ForeignKey(verbose_name=' \u0627\u0644\u0646\u0634\u0627\u0637', to='activities.Activity')),
('reviewer', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name': '\u0645\u0631\u0627\u062c\u0639\u0629',
'verbose_name_plural': '\u0627\u0644\u0645\u0631\u0627\u062c\u0639\u0627\u062a',
'permissions': (('view_review', 'Can view all available reviews.'), ('add_deanship_review', 'Can add a review in the name of the deanship.'), ('add_presidency_review', 'Can add a review in the name of the presidency.'), ('view_deanship_review', 'Can view a review in the name of the deanship.'), ('view_presidency_review', 'Can view a review in the name of the presidency.')),
},
bases=(models.Model,),
),
migrations.AddField(
model_name='activity',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='\u0627\u0644\u062a\u0635\u0646\u064a\u0641', to='activities.Category', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='activity',
name='primary_club',
field=models.ForeignKey(related_name='primary_activity', on_delete=django.db.models.deletion.SET_NULL, verbose_name='\u0627\u0644\u0646\u0627\u062f\u064a \u0627\u0644\u0645\u0646\u0638\u0645', to='clubs.Club', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='activity',
name='secondary_clubs',
field=models.ManyToManyField(related_name='secondary_activity', null=True, verbose_name='\u0627\u0644\u0623\u0646\u062f\u064a\u0629 \u0627\u0644\u0645\u062a\u0639\u0627\u0648\u0646\u0629', to='clubs.Club', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='activity',
name='submitter',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
]
| agpl-3.0 | 2,666,880,227,767,831,000 | 88.43662 | 434 | 0.660709 | false | 2.553277 | false | false | false |
houssemFat/MeeM-Dev | teacher/apps/courses/quizzes/views.py | 1 | 1279 |
from django.http import Http404
import json
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from core.apps.decorators import require_request_attributes
from core.apps.tools.common import dump_and_render_json
from . import helper
quizzes_fields=['title', 'created_at', 'about', 'id']
@login_required
@require_http_methods(['GET', 'POST'])
def default(request):
data = None
if request.method == 'GET' :
module = 'course'
if 'module' in request.GET :
module = request.GET['module']
try :
id = request.GET['cid']
except KeyError :
raise Http404()
data = helper.get_list(request, module, id)
else :
data = helper.create(request)
return dump_and_render_json (request, data)
raise Http404()
# read , update , delete
@login_required
@require_http_methods(['GET', 'PUT', 'DELETE'])
def rud(request, id):
if request.method == 'GET' :
data = helper.get_item(request, id)
elif request.method == 'PUT' :
data = helper.update (request, id)
elif request.method == 'DELETE' :
data = helper.delete (request, id)
return dump_and_render_json(request, data)
| mit | 21,473,460,907,099,270 | 28.744186 | 61 | 0.63878 | false | 3.675287 | false | false | false |
LeZuse/psd-tools | src/psd_tools/decoder/tagged_blocks.py | 1 | 5964 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
import warnings
import collections
import io
from psd_tools.constants import TaggedBlock, SectionDivider
from psd_tools.decoder.actions import decode_descriptor, UnknownOSType
from psd_tools.utils import read_fmt, read_unicode_string, unpack, debug_view
from psd_tools.decoder import decoders
from psd_tools.reader.layers import Block
_tagged_block_decoders, register = decoders.new_registry()
_tagged_block_decoders.update({
TaggedBlock.BLEND_CLIPPING_ELEMENTS: decoders.boolean("I"),
TaggedBlock.BLEND_INTERIOR_ELEMENTS: decoders.boolean("I"),
TaggedBlock.KNOCKOUT_SETTING: decoders.boolean("I"),
TaggedBlock.UNICODE_LAYER_NAME: decoders.unicode_string,
TaggedBlock.LAYER_ID: decoders.single_value("I") # XXX: there are more fields in docs, but they seem to be incorrect
})
SolidColorSettings = collections.namedtuple('SolidColorSettings', 'version data')
MetadataItem = collections.namedtuple('MetadataItem', 'sig key copy_on_sheet_duplication data')
ProtectedSetting = collections.namedtuple('ProtectedSetting', 'transparency, composite, position')
TypeToolObjectSetting = collections.namedtuple('TypeToolObjectSetting',
'version xx xy yx yy tx ty text_version descriptor_version1 text_data')
#'warp_version descriptor_version2 warp_data'
#'left top right bottom')
class Divider(collections.namedtuple('Divider', 'type key')):
def __repr__(self):
return "Divider(%r %s, %s)" % (self.type, SectionDivider.name_of(self.type), self.key)
def decode(tagged_blocks):
"""
Replaces "data" attribute of a blocks from ``tagged_blocks`` list
with parsed data structure if it is known how to parse it.
"""
return [parse_tagged_block(block) for block in tagged_blocks]
def parse_tagged_block(block):
"""
Replaces "data" attribute of a block with parsed data structure
if it is known how to parse it.
"""
key = block.key.decode('ascii')
if not TaggedBlock.is_known(key):
warnings.warn("Unknown tagged block (%s)" % block.key)
decoder = _tagged_block_decoders.get(key, lambda data: data)
return Block(key, decoder(block.data))
@register(TaggedBlock.SOLID_COLOR)
def _decode_soco(data):
fp = io.BytesIO(data)
version = read_fmt("I", fp)
try:
data = decode_descriptor(None, fp)
return SolidColorSettings(version, data)
except UnknownOSType as e:
warnings.warn("Ignoring solid color tagged block (%s)" % e)
@register(TaggedBlock.REFERENCE_POINT)
def _decode_reference_point(data):
return read_fmt("2d", io.BytesIO(data))
@register(TaggedBlock.SHEET_COLOR_SETTING)
def _decode_color_setting(data):
return read_fmt("4H", io.BytesIO(data))
@register(TaggedBlock.SECTION_DIVIDER_SETTING)
def _decode_section_divider(data):
fp = io.BytesIO(data)
key = None
tp = read_fmt("I", fp)[0]
if not SectionDivider.is_known(tp):
warnings.warn("Unknown section divider type (%s)" % tp)
if len(data) == 12:
sig = fp.read(4)
if sig != b'8BIM':
warnings.warn("Invalid signature in section divider block")
key = fp.read(4).decode('ascii')
return Divider(tp, key)
@register(TaggedBlock.METADATA_SETTING)
def _decode_metadata(data):
fp = io.BytesIO(data)
items_count = read_fmt("I", fp)[0]
items = []
for x in range(items_count):
sig, key, copy_on_sheet, data_length = read_fmt("4s 4s ? 3x I", fp)
data = fp.read(data_length)
items.append(MetadataItem(sig, key, copy_on_sheet, data))
return items
@register(TaggedBlock.PROTECTED_SETTING)
def _decode_protected(data):
flag = unpack("I", data)[0]
return ProtectedSetting(
bool(flag & 1),
bool(flag & 2),
bool(flag & 4),
)
@register(TaggedBlock.LAYER_32)
def _decode_layer32(data):
from psd_tools.reader import layers
from psd_tools.decoder.decoder import decode_layers
fp = io.BytesIO(data)
layers = layers._read_layers(fp, 'latin1', 32, length=len(data))
return decode_layers(layers)
@register(TaggedBlock.LAYER_16)
def _decode_layer16(data):
from psd_tools.reader import layers
from psd_tools.decoder.decoder import decode_layers
fp = io.BytesIO(data)
layers = layers._read_layers(fp, 'latin1', 16, length=len(data))
return decode_layers(layers)
@register(TaggedBlock.TYPE_TOOL_OBJECT_SETTING)
def _decode_type_tool_object_setting(data):
fp = io.BytesIO(data)
ver, xx, xy, yx, yy, tx, ty, txt_ver, desc_ver1 = read_fmt("H 6Q H I", fp)
# This decoder needs to be updated if we have new formats.
if ver != 1 or txt_ver != 50 or desc_ver1 != 16:
warnings.warn("Ignoring type setting tagged block due to old versions")
return
try:
text_data = decode_descriptor(None, fp)
except UnknownOSType as e:
warnings.warn("Ignoring type setting tagged block (%s)" % e)
return
# XXX: Until Engine Data is parsed properly, the following cannot be parsed.
# The end of the engine data dictates where this starts.
return TypeToolObjectSetting(ver, xx, xy, yx, yy, tx, ty, txt_ver, desc_ver1, text_data)
warp_ver, desc_ver2 = read_fmt("H I", fp)
if warp_ver != 1 or desc_ver2 != 16:
warnings.warn("Ignoring type setting tagged block due to old versions")
return
try:
warp_data = decode_descriptor(None, fp)
except UnknownOSType as e:
warnings.warn("Ignoring type setting tagged block (%s)" % e)
return
left, top, right, bottom = read_fmt("4Q", fp)
return TypeToolObjectSetting(ver, xx, xy, yx, yy, tx, ty, txt_ver, desc_ver1,
text_data, warp_ver, desc_ver2, warp_data,
left, top, right, bottom)
| mit | -2,801,926,272,393,216,500 | 35.814815 | 138 | 0.666164 | false | 3.506173 | false | false | false |
amirgeva/coide | gen/templates.py | 1 | 3555 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'uis/templates.ui'
#
#
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(640, 480)
Dialog.buttonBox = QtGui.QDialogButtonBox(Dialog)
Dialog.buttonBox.setGeometry(QtCore.QRect(406, 440, 225, 32))
Dialog.buttonBox.setOrientation(QtCore.Qt.Horizontal)
Dialog.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
Dialog.buttonBox.setObjectName(_fromUtf8("buttonBox"))
Dialog.templatesList = QtGui.QListWidget(Dialog)
Dialog.templatesList.setGeometry(QtCore.QRect(16, 64, 161, 353))
Dialog.templatesList.setObjectName(_fromUtf8("templatesList"))
Dialog.codeEdit = QtGui.QPlainTextEdit(Dialog)
Dialog.codeEdit.setGeometry(QtCore.QRect(192, 61, 433, 353))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Monospace"))
font.setPointSize(12)
Dialog.codeEdit.setFont(font)
Dialog.codeEdit.setObjectName(_fromUtf8("codeEdit"))
Dialog.addButton = QtGui.QPushButton(Dialog)
Dialog.addButton.setGeometry(QtCore.QRect(16, 432, 65, 32))
Dialog.addButton.setObjectName(_fromUtf8("addButton"))
Dialog.delButton = QtGui.QPushButton(Dialog)
Dialog.delButton.setGeometry(QtCore.QRect(96, 432, 81, 32))
Dialog.delButton.setObjectName(_fromUtf8("delButton"))
Dialog.macrosButton = QtGui.QPushButton(Dialog)
Dialog.macrosButton.setGeometry(QtCore.QRect(258, 420, 101, 25))
Dialog.macrosButton.setObjectName(_fromUtf8("macrosButton"))
Dialog.label = QtGui.QLabel(Dialog)
Dialog.label.setGeometry(QtCore.QRect(16, 16, 161, 17))
Dialog.label.setObjectName(_fromUtf8("label"))
Dialog.tmplDir = QtGui.QLineEdit(Dialog)
Dialog.tmplDir.setGeometry(QtCore.QRect(192, 16, 369, 27))
Dialog.tmplDir.setObjectName(_fromUtf8("tmplDir"))
Dialog.tmplDirBrowseButton = QtGui.QPushButton(Dialog)
Dialog.tmplDirBrowseButton.setGeometry(QtCore.QRect(577, 16, 49, 32))
Dialog.tmplDirBrowseButton.setObjectName(_fromUtf8("tmplDirBrowseButton"))
self.retranslateUi(Dialog)
QtCore.QObject.connect(Dialog.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(Dialog.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Code Templates", None))
Dialog.addButton.setText(_translate("Dialog", "Add", None))
Dialog.delButton.setText(_translate("Dialog", "Delete", None))
Dialog.macrosButton.setText(_translate("Dialog", "Macros Help", None))
Dialog.label.setText(_translate("Dialog", "Templates Directory:", None))
Dialog.tmplDirBrowseButton.setText(_translate("Dialog", "...", None))
| gpl-2.0 | 5,748,713,276,356,304,000 | 46.4 | 103 | 0.699859 | false | 3.954394 | false | false | false |
npdoty/bigbang | bigbang/entity_resolution.py | 3 | 1925 | import pandas as pd
import numpy as np
import re
namesID = dict();
emailsID = dict();
allID = dict();
nameRegex = re.compile("(\(.*\))")
currID = 1;
def getID(name, email):
global currID;
global emailsID
global namesID
nameID = False;
emailID = False;
if name is not None:
if name in namesID:
nameID = namesID[name];
if email is not None:
if email in emailsID:
emailID = emailsID[email]
if (not emailID) and (not nameID):
store(currID, name, email);
currID += 1;
return currID;
if not emailID:
store(nameID, name, email)
return nameID
if not nameID:
store(emailID, name, email)
return emailID
if emailID != nameID:
# print("ID MISMATCH! " + email + " " + name)
store(nameID, name, email)
else:
if emailID not in allID:
store(emailID, name, email);
return nameID;
def store(id, name, email) :
if id not in allID:
allID[id] = {"emails": list(), "names": list()}
fullID = allID[id];
namesID[name] = id;
emailsID[email] = id;
fullID["names"].append(name);
fullID["emails"].append(email);
def name_for_id(id):
if id in allID:
if "names" in allID[id] and len(allID[id]["names"]) > 0:
return allID[id]["names"][0]
return "UNKNOWN " + str(id)
def entity_resolve(row, emailCol, nameCol):
emailAddress = row[emailCol].upper();
emailAddress = emailAddress.replace(" AT ", "@")
match = nameRegex.search(emailAddress)
name = None
if (match is not None):
name = match.group(0) #unused info for now
emailAddress = emailAddress.replace(name, "");
name = name.replace("(", "")
name = name.replace(")", "")
if nameCol is not None :
name = row[nameCol].upper()
row["Person-ID"] = getID(name, emailAddress)
return row | agpl-3.0 | 3,706,484,465,789,909,000 | 24.342105 | 64 | 0.575584 | false | 3.40708 | false | false | false |
ellisztamas/faps | faps/pr_transition.py | 1 | 2865 | import numpy as np
def pr_transition(offspring_diploid, maternal_diploid, male_diploid, offspring_genotype, maternal_genotype, male_genotype, mu):
"""
Calculate the transition probability for a trio of putative diploid
genotypes.
Transition probabilities are then weight by the probability that the true
offspring, mothers, and male genotypes match the input genotypes given
observed marker data. Generally one would need to sum over all 27 possible
combinations of genotypes.
This function works with diploid genotypes, rather than a genotypeArray.
Generally this is not called directly, but through lik_sampled_fathers() or
similar.
Parameters
----------
offspring_diploid, maternal_diploid, male_diploid: array
arrays of diploid genotypes for the offspring, mothers and fathers.
offspring_genotype, maternal_genotype, male_genotype: int
0, 1 or 2 indicating homozygous, heterozygous or alternate homozygous
genotype.
mu: float
point estimate of the genotyping error rate.
Returns
-------
A 3-dimensional array of probabilities indexing offspring, candidate males,
and loci. These are given in linear, rather than log space.
"""
# an array of all possible transition probabilities indexed as [offspring, mother, father].
trans_prob_array = np.array([[[1, 0.5, 0 ],
[0.5,0.25,0 ],
[0, 0, 0 ]],
[[0, 0.5, 1 ],
[0.5,0.5, 0.5],
[1, 0.5, 0 ]],
[[0, 0, 0 ],
[0, 0.25,0.5],
[0, 0.5, 1 ]]])
# the transition probability for the given genotypes.
trans_prob = trans_prob_array[offspring_genotype, maternal_genotype, male_genotype]
# Probabilities that the observed offspring marker data match observed data.
pr_offs = np.zeros([offspring_diploid.shape[0], offspring_diploid.shape[1]])
pr_offs[offspring_diploid == offspring_genotype] = 1-mu
pr_offs[offspring_diploid != offspring_genotype] = mu
# Probabilities that the observed maternal marker data match observed data.
pr_mothers = np.zeros([maternal_diploid.shape[0], maternal_diploid.shape[1]])
pr_mothers[maternal_diploid == maternal_genotype] = 1-mu
pr_mothers[maternal_diploid != maternal_genotype] = mu
# Probabilities that the observed candidate male marker data match observed data.
pr_males = np.zeros([male_diploid.shape[0], male_diploid.shape[1]])
pr_males[male_diploid == male_genotype] = 1-mu
pr_males[male_diploid != male_genotype] = mu
return trans_prob * pr_males[np.newaxis] * pr_mothers[:,np.newaxis] * pr_offs[:,np.newaxis]
| mit | 5,825,343,204,856,742,000 | 46.75 | 127 | 0.631763 | false | 3.455971 | false | false | false |
firedrakeproject/firedrake-fluids | firedrake_fluids/expression.py | 1 | 3412 | # Copyright (C) 2014 Imperial College London.
# This file is part of Firedrake-Fluids.
#
# Firedrake-Fluids is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Firedrake-Fluids is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Firedrake-Fluids. If not, see <http://www.gnu.org/licenses/>.
from firedrake import *
from firedrake_fluids import LOG
import libspud
class ExpressionFromOptions:
""" A module for instantiating UFL Expression objects using information provided
in a simulation's configuration/options file. """
def __init__(self, path, t=None):
""" Retrieve the expression's value from the options file.
:param str path: The path to the expression's definition in the options file.
:param float t: The current time. """
try:
if(libspud.have_option(path + "/constant")):
self.val = libspud.get_option(path + "/constant")
self.type = "constant"
elif(libspud.have_option(path + "/python")):
v = libspud.get_option(path + "/python")
self.type = "python"
exec v # Make the 'val' function that the user has defined available for calling.
self.val = val
self.t = t
elif(libspud.have_option(path + "/cpp")):
# For C++ expressions.
self.type = "cpp"
v = libspud.get_option(path + "/cpp")
exec v
self.val = val
self.t = t
else:
raise ValueError("Unknown expression type.")
except ValueError as e:
LOG.exception(e)
sys.exit()
return
def get_expression(self):
""" Create a UFL Expression object, whose value is obtained from self.val.
Note that for time-dependent Expressions, the variable 't' will need to be updated manually.
:returns: A UFL Expression object.
:rtype: ufl.Expression
"""
try:
if(self.type == "constant"):
return Expression(self.val)
elif(self.type == "cpp"):
return Expression(code=self.val(), t=self.t)
elif(self.type == "python"):
val = self.val
t = self.t
# Determine the value shape by plugging in some dummy coordinate and time.
s = val(x = [0,0,0], t=t)
class PythonExpression(Expression):
def eval(self, value, x, t=None):
value[:] = val(x, t)
if(not isinstance(s, float) and not isinstance(s, int)):
def value_shape(self):
return (len(s),)
e = PythonExpression(t=t)
return e
else:
raise ValueError("Unknown expression type: %s." % self.type)
except ValueError as e:
LOG.exception(e)
sys.exit()
| gpl-3.0 | -1,841,821,523,388,323,600 | 35.688172 | 98 | 0.575322 | false | 4.140777 | false | false | false |
dalemyers/xcrun | isim/base_types.py | 1 | 3475 | """Base types for `xcrun simctl`."""
import enum
import json
from typing import Any, Dict
import subprocess
class ErrorCodes(enum.Enum):
"""Simple lookup for all known error codes."""
# Tried to access a file or directory (such as by searching for an app
# container) that doesn't exist
#no_such_file_or_directory = 2
# Trying to perform an action on a device type, but supplied an invalid
# device type
#invalid_device_type = 161
# Tried to perform an action on the device, but there was an
# incompatibility, such as when trying to create a new Apple TV device with
# a watchOS runtime.
incompatible_device = 147
# The device was in a state where it can't be shutdown. e.g. already
# shutdown
#unable_to_shutdown_device_in_current_state = 164
class SimulatorControlType(enum.Enum):
"""Which type of simulator control type is it."""
device_pair = "pair"
runtime = "runtime"
device_type = "device_type"
device = "device"
def list_key(self):
"""Define the key passed into the list function for the type."""
# Disable this false positive
#pylint: disable=comparison-with-callable
if self.name == "device_type":
return "devicetypes"
#pylint: enable=comparison-with-callable
return self.value + "s"
class SimulatorControlBase:
"""Types defined by simctl should inherit from this."""
raw_info: Dict[str, Any]
simctl_type: SimulatorControlType
def __init__(self, raw_info: Dict[str, Any], simctl_type: SimulatorControlType) -> None:
self.raw_info = raw_info
self.simctl_type = simctl_type
#pylint: disable=no-self-use
def _run_command(self, command: str) -> str:
"""Convenience method for running an xcrun simctl command."""
return SimulatorControlBase.run_command(command)
#pylint: enable=no-self-use
def __eq__(self, other: object) -> bool:
"""Override the default Equals behavior"""
if not isinstance(other, self.__class__):
return False
if not self.simctl_type == other.simctl_type:
return False
return self.raw_info == other.raw_info
def __ne__(self, other: object) -> bool:
"""Define a non-equality test"""
return not self.__eq__(other)
@staticmethod
def run_command(command: str) -> str:
"""Run an xcrun simctl command."""
full_command = "xcrun simctl %s" % (command,)
# Deliberately don't catch the exception - we want it to bubble up
return subprocess.run(full_command, universal_newlines=True, shell=True, check=True, stdout=subprocess.PIPE).stdout
@staticmethod
def list_type(item: SimulatorControlType) -> Any:
"""Run an `xcrun simctl` command with JSON output."""
full_command = "xcrun simctl list %s --json" % (item.list_key(),)
# Deliberately don't catch the exception - we want it to bubble up
output = subprocess.run(full_command, universal_newlines=True, shell=True, check=True, stdout=subprocess.PIPE).stdout
json_output = json.loads(output)
if not isinstance(json_output, dict):
raise Exception("Unexpected list type: " + str(type(json_output)))
if not json_output.get(item.list_key()):
raise Exception("Unexpected format for " + item.list_key() + " list type: " + str(json_output))
return json_output[item.list_key()]
| mit | -4,084,051,533,225,033,700 | 33.75 | 125 | 0.647482 | false | 3.904494 | false | false | false |
carloderamo/mushroom | mushroom_rl/algorithms/actor_critic/deep_actor_critic/deep_actor_critic.py | 1 | 2771 | from mushroom_rl.algorithms import Agent
from mushroom_rl.utils.torch import update_optimizer_parameters
class DeepAC(Agent):
"""
Base class for algorithms that uses the reparametrization trick, such as
SAC, DDPG and TD3.
"""
def __init__(self, mdp_info, policy, actor_optimizer, parameters):
"""
Constructor.
Args:
actor_optimizer (dict): parameters to specify the actor optimizer
algorithm;
parameters: policy parameters to be optimized.
"""
if actor_optimizer is not None:
if parameters is not None and not isinstance(parameters, list):
parameters = list(parameters)
self._parameters = parameters
self._optimizer = actor_optimizer['class'](
parameters, **actor_optimizer['params']
)
self._clipping = None
if 'clipping' in actor_optimizer:
self._clipping = actor_optimizer['clipping']['method']
self._clipping_params = actor_optimizer['clipping']['params']
self._add_save_attr(
_optimizer='torch',
_clipping='torch',
_clipping_params='pickle'
)
super().__init__(mdp_info, policy)
def fit(self, dataset):
"""
Fit step.
Args:
dataset (list): the dataset.
"""
raise NotImplementedError('DeepAC is an abstract class')
def _optimize_actor_parameters(self, loss):
"""
Method used to update actor parameters to maximize a given loss.
Args:
loss (torch.tensor): the loss computed by the algorithm.
"""
self._optimizer.zero_grad()
loss.backward()
self._clip_gradient()
self._optimizer.step()
def _clip_gradient(self):
if self._clipping:
self._clipping(self._parameters, **self._clipping_params)
@staticmethod
def _init_target(online, target):
for i in range(len(target)):
target[i].set_weights(online[i].get_weights())
def _update_target(self, online, target):
for i in range(len(target)):
weights = self._tau * online[i].get_weights()
weights += (1 - self._tau) * target[i].get_weights()
target[i].set_weights(weights)
def _update_optimizer_parameters(self, parameters):
self._parameters = list(parameters)
if self._optimizer is not None:
update_optimizer_parameters(self._optimizer, self._parameters)
def _post_load(self):
raise NotImplementedError('DeepAC is an abstract class. Subclasses need'
'to implement the `_post_load` method.')
| mit | -6,520,832,054,038,720,000 | 30.134831 | 80 | 0.575965 | false | 4.520392 | false | false | false |
timo/zasim | zasim/cagen/jvn.py | 1 | 19821 | """
"""
# This file is part of zasim. zasim is licensed under the BSD 3-clause license.
# See LICENSE.txt for details.
# Copyright (c) 2011, Felix Bondarenko
import numpy as np
## A map from states according to the bitmask to
# pygame blittable states ( between 0 and 28 )
displayableStateDict = {
0: 0, # U
2048: 1, #C00 2048
2049: 2, #C10 2048+1
2050: 3, #C01 2048+2
2051: 4, #C11 2048+3
4192: 5, #S000 4096+96
4160: 6, #S00 4096+64
4168: 7, #S01 4096+64+8
4128: 8, #S0 4096+32
4176: 9, #S10 4096+64+16
4184: 10, #S11 4096+64+16+8
4144: 11, #S1 4096+32+16
4096: 12, #S 4096
6144: 13, #T000 6144
6272: 14, #T001 6144+128
6400: 15, #T010 6144+256
6528: 16, #T011 6144+128+256
6656: 17, #T020 6144+512
6784: 18, #T021 6144+128+512
6912: 19, #T030 6144+256+512
7040: 20, #T031 6144+128+256+512
7168: 21, #T100 6144+1024
7296: 22, #T101 6144+128+1024
7424: 23, #T110 6144+256+1024
7552: 24, #T111 6144+128+256+1024
7680: 25, #T120 6144+512+1024
7808: 26, #T121 6144+128+512+1024
7936: 27, #T130 6144+256+512+1024
8064: 28, #T131 6144+128+256+1024+512
}
## A map from human readable vonNeumann states ( such as 'U', 'T020' and 'C11' )
# actual states calculated via bitmask
nameStateDict = { "U": 0,
#"C00" : 2048, "C10" : 2049, "C01" : 2050, "C11" : 2051,
"C00" : 1, "C10" : 2, "C01" : 3, "C11" : 4,
"S" : 4096, "S0" : 4128, "S1" : 4144, "S00" : 4160,
"S01" : 4168, "S10" : 4176, "S11" : 4184, "S000": 4192,
"T000": 6144, "T001": 6272, "T010": 6400, "T011": 6528,
"T020": 6656, "T032": 6784, "T030": 6912, "T031": 7040,
"T100": 7168, "T101": 7296, "T110": 7424, "T111": 7552,
"T120": 7680, "T121": 7808, "T130": 7936, "T131": 8064 }
stateNameDict = {a:b for b,a in nameStateDict.iteritems()}
states = sorted(nameStateDict.values())
from os import path
from zasim.display.qt import generate_tile_atlas
# XXX get the absolute path if possible.
filename_map = {num:path.join("images/vonNeumann", stateNameDict[num]) + ".png" for num in states}
PALETTE_JVN_IMAGE, PALETTE_JVN_RECT = generate_tile_atlas(filename_map, "images/vonNeumann")
## The cellular automaton proposed by John von Neumann
# \verbatim
# All states are encoded in a bitmask:
#
# <--MSB 10 LSB
# ...... 0 0 0 0 0 0 0 0 0 X X u a1 a0 eps sc1 sc0 s2 s1 s0 e1 e0
# | | | | | | | | | | | | |-> current e
# XX = 00 -> U <--------| | | | | | | | | | | |----> next e
# XX = 01 -> C <----------| | | | | | | | | |
# XX = 10 -> S | | | | | | | | |-------> lsb on S
# XX = 11 -> T | | | | | | | |----------> ...
# | | | | | | |-------------> msb on S
# S{} is encoded as SMASK_111 | | | | | |-----------------> s-state counter
# | | | | |---------------------> s-state counter
# | | | |
# | | | |--------------------------> excited
# | | |-----------------------------> direction
# | |--------------------------------> direction
# |----------------------------------> special
#
# \endverbatim
class vonNeumann ( object ):
palette = []
## The constructor
def __init__( self, sizeX, sizeY, confFile ):
## The ca's title
self.title = "vonNeumann"
## The ca's dimension
self.dim = 2
self.size = self.sizeX, self.sizeY = sizeX, sizeY
## The current configuration is held here
# as usual, these two arrays contain the real configuration, that is used
# in every step ... (see vonNeumann::displayConf)
self.currConf = np.zeros( (sizeX, sizeY), int )
## The current configuration is held here
self.nextConf = np.zeros( (sizeX, sizeY), int )
# used when updating only some cells instead of all....
self.cActArr = np.zeros( (self.sizeX*self.sizeY), bool )
self.nActArr = np.zeros( (self.sizeX*self.sizeY), bool )
self.cList = np.zeros( (self.sizeX*self.sizeY), int )
self.nList = np.zeros( (self.sizeX*self.sizeY), int )
self.cCounter = 0
self.nCounter = 0
if confFile != "":
self.importConf( confFile )
self.nextConf = self.currConf.copy()
## The configuration that is blittet...
# But in this CA the states are not enumerable from 0..28, but scattered
# between 0 and ~2^13, so we need a dict (see vonNeumann::displayableStateDict)
# to map the states to 0..28, so the Display-module can display states
# without knowing the difference
self.displayConf = np.zeros( self.size, int )
## Used to append cells to the list of cells to handle in the next step
def enlist( self, x, y ):
for i in ( ( (x) + (y)*self.sizeX ),
( (x+1) + (y)*self.sizeX ),
( (x-1) + (y)*self.sizeX ),
( (x) + (y-1)*self.sizeX ),
( (x) + (y+1)*self.sizeX ) ):
if self.cActArr[ i ] == False:
self.cActArr[ i ] = True
self.cList[self.cCounter] = i
self.cCounter += 1
## Updates all cells using scipy.weave.inline
def updateAllCellsWeaveInline( self ):
#
# All states are encoded in a bitmask:
#
# <--MSB 10 LSB
# ...... 0 0 0 0 0 0 0 0 0 X X u a1 a0 eps sc1 sc0 s2 s1 s0 e1 e0
# | | | | | | | | | | | | |-> current e
# XX = 00 -> U <--------| | | | | | | | | | | |----> next e
# XX = 01 -> C <----------| | | | | | | | | |
# XX = 10 -> S | | | | | | | | |-------> lsb on S
# XX = 11 -> T | | | | | | | |----------> ...
# | | | | | | |-------------> msb on S
# S{} is encoded as SMASK_111 | | | | | |-----------------> s-state counter
# | | | | |---------------------> s-state counter
# | | | |
# | | | |--------------------------> excited
# | | |-----------------------------> direction
# | |--------------------------------> direction
# |----------------------------------> special
#
vonNeumannCode = """
#include <stdlib.h>
#include <stdio.h>
#line 1 "CA.py"
#define UMASK 0
#define CMASK 2048 // 1 << 11
#define SMASK 4096 // 2 << 11
#define TMASK 6144 // 3 << 11
#define CSTATEMASK 3 // 1|2
#define SSTATEMASK 28 // 4|8|16
#define TSTATEMASK 1920 // 128|256|512|1024
#define e0 1
#define e1 2
#define s0 4
#define s1 8
#define s2 16
#define s 28 // s2|s1|s0
#define sc0 32
#define sc1 64
#define sc 96 // sc1|sc0
#define eps 128
#define a0 256
#define a1 512
#define a 768 // a1|a0
#define u 1024
#define U(x) ((x) == 0)
#define C(x) (((x) & CMASK) == CMASK)
#define S(x) (((x) & SMASK) == SMASK)
#define T(x) (((x) & TMASK) == TMASK)
#define A_UNSHIFT(x) (((x)&a)>>8)
#define SC_SHIFT(x) ((x)<<5)
#define SC_UNSHIFT(x) (((x)&sc)>>5)
int i, j, k, l;
int nbs[4];
int state;
for ( i = 1; i < sizeX-1; i++ ) {
for ( j = 1; j < sizeY-1; j++ ) {
state = cconf( i, j );
nbs[0] = cconf( i+1, j );
nbs[1] = cconf( i, j-1 );
nbs[2] = cconf( i-1, j );
nbs[3] = cconf( i, j+1 );
if ( T(state) ) { // transmission state
// transisition rule (T.1):
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && ( abs(k-(A_UNSHIFT(nbs[k]))) == 2)
&& ((nbs[k]&u) != (state&u)) && (nbs[k]&eps) ) {
// (T.1)(alpha)
nconf( i, j ) = UMASK;
break;
}
}
if ( k < 4 ) continue;
// (T.1)(beta)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs((A_UNSHIFT(nbs[k]))-(A_UNSHIFT(state))) != 2)
&& (abs(k-(A_UNSHIFT(nbs[k]))) == 2)
&& ((nbs[k]&u) == (state&u) ) && (nbs[k]&eps) ) {
// (T.1)(beta)(a)
nconf( i, j ) = state | eps;
break;
}
if ( C(nbs[k]) && (nbs[k]&e0) && (k-(A_UNSHIFT(state)) != 0) ) {
// (T.1)(beta)(b)
nconf( i, j ) = state | eps;
break;
}
}
if ( k < 4 ) continue;
// (T.1)(gamma)
nconf( i, j ) = TMASK | (state&u) | (state&a);
} // end of T(state)
else if ( C(state) ) { // confluent state
// transistion rule (T.2)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2)
&& (nbs[k]&eps) && (nbs[k]&u) ) {
// (T.2)(alpha)
nconf( i, j ) = UMASK;
break;
}
}
if ( k < 4 ) continue;
// (T.2)(beta)
for( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2)
&& (nbs[k]&eps) && !(nbs[k]&u) ) {
// (T.2)(beta)(a)
break;
}
}
if ( k < 4 ) {
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2)
&& !(nbs[k]&eps) && !(nbs[k]&u) ) {
// (T.2)(beta)(b)
break;
}
}
if ( k == 4 ) {
nconf( i, j ) = CMASK | e1 | ((state&e1)>>1);
continue;
}
}
// (T.2)(gamma)
nconf( i, j ) = CMASK | ((state&e1)>>1);
} // end of C(state)
else if ( U(state) ) { // unexcitable state
// transition rule (T.3)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
// (T.3)(alpha)
nconf( i, j ) = SMASK;
break;
}
}
// (T.3)(beta)
// doesn' change the state
} // end of U(state)
else if ( S(state) ) { // sensitized state
if ( !(state&sc1) ) {
// transition rule (T.4)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
// (T.4)(alpha)
nconf( i, j ) = state | (s0<<(2-SC_UNSHIFT(state)));
break;
}
}
// (T.4)(beta)
// doesn't change the state but the counter
nconf( i, j ) += sc0;
} else {
if ( (state&sc) == sc ) {
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
nconf( i, j ) = TMASK | a0;
break;
}
}
if ( k == 4 ) {
nconf( i, j ) = TMASK;
}
} else {
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
nconf( i, j ) = state | s0;
break;
}
}
nconf( i, j ) += sc0;
if ( nconf( i, j ) & s ) {
// make transition from sensitized to transmission or confluent state
l = nconf( i, j );
if ( (l & s) == s ) {
nconf( i, j ) = CMASK;
} else {
// other leaves of the S-to-T-transition tree of depth 3
l += s0;
nconf( i, j ) = TMASK | ((l&s)<<6);
}
}
}// else {
// stay for another run
//}
}
}
else {
// this state is undefined!
}
}
}
"""
## Update cells, but only those that changed or are in the neighbourhood of one of those.
# This is done via bitchecking, and hence admittedly difficult to read.
# Every subsection of the transitionfunction from von Neumann's paper is marked.
def updateAllCellsWeaveInlineFewStates( self ):
#
# All states are encoded in a bitmask:
#
# <--MSB 10 LSB
# ...... 0 0 0 0 0 0 0 0 0 X X u a1 a0 eps sc1 sc0 s2 s1 s0 e1 e0
# | | | | | | | | | | | | |-> current e
# XX = 00 -> U <--------| | | | | | | | | | | |----> next e
# XX = 01 -> C <----------| | | | | | | | | |
# XX = 10 -> S | | | | | | | | |-------> lsb on S
# XX = 11 -> T | | | | | | | |----------> ...
# | | | | | | |-------------> msb on S
# S{} is encoded as SMASK_111 | | | | | |-----------------> s-state counter
# | | | | |---------------------> s-state counter
# | | | |
# | | | |--------------------------> excited
# | | |-----------------------------> direction
# | |--------------------------------> direction
# |----------------------------------> special
#
#
vonNeumannCodeFewStates = """
#include <stdlib.h>
#include <stdio.h>
#line 1 "VonNeumannDefinesInCA.py"
#define UMASK 0
#define CMASK 2048 // 1 << 11
#define SMASK 4096 // 2 << 11
#define TMASK 6144 // 3 << 11
#define CSTATEMASK 3 // 1|2
#define SSTATEMASK 28 // 4|8|16
#define TSTATEMASK 1920 // 128|256|512|1024
#define e0 1
#define e1 2
#define s0 4
#define s1 8
#define s2 16
#define s 28 // s2|s1|s0
#define sc0 32
#define sc1 64
#define sc 96 // sc1|sc0
#define eps 128
#define a0 256
#define a1 512
#define a 768 // a1|a0
#define u 1024
/* checkers for different kinds of states */
#define U(x) ((x) == 0)
#define C(x) (((x) & CMASK) == CMASK)
#define S(x) (((x) & SMASK) == SMASK)
#define T(x) (((x) & TMASK) == TMASK)
/* get the direction of a T-state and the 'age' of an S-state */
#define A_UNSHIFT(x) (((x)&a)>>8)
#define SC_SHIFT(x) ((x)<<5)
#define SC_UNSHIFT(x) (((x)&sc)>>5)
/* enlist a cell to be checked in the next step */
#define ENLIST(id) if ( !nActArr( (id) ) ) {\
nActArr( id ) = true;\
nList( nCounter++ ) = id;\
}
/* enlist a cell and it's neighbourhood to be checke in the next step */
#define MARKNBH(x,y) ENLIST( (x)+(y)*sizeX );\
ENLIST( (x+1)+(y)*sizeX );\
ENLIST( (x-1)+(y)*sizeX );\
ENLIST( (x)+(y-1)*sizeX );\
ENLIST( (x)+(y+1)*sizeX );
#include <stdio.h>
#line 1 "VonNeumannCodeInCA.py"
int i, j, k, l, x, y, aa;
/* the neighbours' states */
int nbs[4];
/* the 'own' state */
int state;
/* the number of cells that have to be checked in the next step and is returned as return_val */
int nCounter = 0;
for ( i = 0; i < cCounter; i++ ) {
x = cList( i ) % sizeX;
y = cList( i ) / sizeX;
cActArr( cList( i ) ) = false;
state = cconf( x, y );
nbs[0] = cconf( x+1, y );
nbs[1] = cconf( x, y-1 );
nbs[2] = cconf( x-1, y );
nbs[3] = cconf( x, y+1 );
if ( T(state) ) { // transmission state
// transisition rule (T.1):
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && ( abs(k-(A_UNSHIFT(nbs[k]))) == 2)
&& ((nbs[k]&u) != (state&u)) && (nbs[k]&eps) ) {
// (T.1)(alpha)
nconf( x, y ) = UMASK;
break;
}
}
if ( k < 4 ) continue;
// (T.1)(beta)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs((A_UNSHIFT(nbs[k]))-(A_UNSHIFT(state))) != 2)
&& (abs(k-(A_UNSHIFT(nbs[k]))) == 2)
&& ((nbs[k]&u) == (state&u) ) && (nbs[k]&eps) ) {
// (T.1)(beta)(a)
nconf( x, y ) = state | eps;
MARKNBH( x, y );
break;
}
if ( C(nbs[k]) && (nbs[k]&e0) && (k-(A_UNSHIFT(state)) != 0) ) {
// (T.1)(beta)(b)
nconf( x, y ) = state | eps;
MARKNBH( x, y );
break;
}
}
if ( k < 4 ) continue;
// (T.1)(gamma)
// don't enlist, since cell is not active
// MARKNBH( x, y );
nconf( x, y ) = TMASK | (state&u) | (state&a);
} // end of T(state)
else if ( C(state) ) { // confluent state
// transistion rule (T.2)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2)
&& (nbs[k]&eps) && (nbs[k]&u) ) {
// (T.2)(alpha)
// don't enlist, since cell is not active
nconf( x, y ) = UMASK;
break;
}
}
if ( k < 4 ) continue;
// (T.2)(beta)
for( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2)
&& (nbs[k]&eps) && !(nbs[k]&u) ) {
// (T.2)(beta)(a)
MARKNBH( x, y );
break;
}
}
if ( k < 4 ) {
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2)
&& !(nbs[k]&eps) && !(nbs[k]&u) ) {
// (T.2)(beta)(b)
MARKNBH( x, y );
break;
}
}
if ( k == 4 ) {
nconf( x, y ) = CMASK | e1 | ((state&e1)>>1);
MARKNBH( x, y );
continue;
}
}
// (T.2)(gamma)
nconf( x, y ) = CMASK | ((state&e1)>>1);
MARKNBH( x, y );
} // end of C(state)
else if ( U(state) ) { // unexcitable state
// transition rule (T.3)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
// (T.3)(alpha)
nconf( x, y ) = SMASK;
MARKNBH( x, y );
break;
}
}
// (T.3)(beta)
// doesn' change the state
} // end of U(state)
else if ( S(state) ) { // sensitized state
MARKNBH( x, y );
if ( !(state&sc1) ) {
// transition rule (T.4)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
// (T.4)(alpha)
nconf( x, y ) = state | (s0<<(2-SC_UNSHIFT(state)));
break;
}
}
// (T.4)(beta)
// doesn't change the state but the counter
nconf( x, y ) += sc0;
} else {
if ( (state&sc) == sc ) {
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
nconf( x, y ) = TMASK | a0;
break;
}
}
if ( k == 4 ) {
nconf( x, y ) = TMASK;
}
} else {
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
nconf( x, y ) = state | s0;
break;
}
}
nconf( x, y ) += sc0;
if ( nconf( x, y ) & s ) {
// make transition from sensitized to transmission or confluent state
l = nconf( x, y );
if ( (l & s) == s ) {
nconf( x, y ) = CMASK;
} else {
// other leaves of the S-to-T-transition tree of depth 3
l += s0;
nconf( x, y ) = TMASK | ((l&s)<<6);
}
}
}// else {
// stay for another run
//}
}
}
else {
// this state is undefined!
}
}
return_val = nCounter;
"""
| bsd-3-clause | 5,987,590,987,266,482,000 | 32.368687 | 98 | 0.401796 | false | 2.84172 | false | false | false |
nick-youngblut/SIPSim | SIPSim/Commands/KDE_plot.py | 1 | 3536 | #!/usr/bin/env python
"""
KDE_plot: make plots of each KDE (1D or 2D)
Usage:
KDE_plot [options] <kde>
KDE_plot -h | --help
KDE_plot --version
Options:
<kde> Pickled KDE object
('-' if input from STDIN)
-o=<o> Output file name.
[Default: KDE.png]
-n=<n> Number of taxon KDEs to plot (0 = all plotted).
[Default: 0]
--nCol=<nc> Number of subplot columns.
[Default: 1]
--xMin=<xm> Minimum x-axis value ('' = min value in dataset).
[Default: ]
--xMax=<xM> Maximum x-axis value ('' = max value in dataset).
[Default: ]
--yMin=<ym> Minimum y-axis value ('' = min value in dataset).
[Default: ]
--yMax=<yM> Maximum y-axis value ('' = max value in dataset).
[Default: ]
--xStep=<xs> X dimension granularity.
[Default: 0.0005]
--yStep=<yx> Y dimension granularity.
[Default: 100]
--xX=<xx> X dimension figure size multiplier (ncol * x)
[Default: 4]
--yX=<yx> Y dimension figure size multiplier (ncol * x)
[Default: 3.5]
--logY=<ly> Base for y-axis log scaling ('' = no log scaling).
[Default: ]
-h --help Show this screen.
--version Show version.
--debug Debug mode
Description:
Plot each KDE (1D or 2D KDEs) in the the provided multi-KDE object.
Output:
Image files written to `-o`
"""
# import
## batteries
from docopt import docopt
import sys,os
## application libraries
scriptDir = os.path.dirname(__file__)
libDir = os.path.join(scriptDir, '../lib/')
sys.path.append(libDir)
# application
from SIPSim import Utils
from SIPSim import FigGen
def main(args=None):
KDEs = Utils.load_kde(args['<kde>'])
try:
FigGen.KDE_ndims(KDEs)
except AttributeError:
outFile = os.path.splitext(args['-o'])
msg = 'Processing library: "{}"\n'
for lib,x in KDEs.items():
sys.stderr.write(msg.format(lib))
outName = ''.join([outFile[0], '_', str(lib), outFile[1]])
FigGen.make_kde_fig(x, outName,
n_subplot=args['-n'],
ncol=args['--nCol'],
xMin=args['--xMin'],
xMax=args['--xMax'],
yMin=args['--yMin'],
yMax=args['--yMax'],
xStep=args['--xStep'],
yStep=args['--yStep'],
xX=args['--xX'],
yX=args['--yX'],
logY=args['--logY'])
else:
FigGen.make_kde_fig(KDEs, args['-o'],
n_subplot=args['-n'],
ncol=args['--nCol'],
xMin=args['--xMin'],
xMax=args['--xMax'],
yMin=args['--yMin'],
yMax=args['--yMax'],
xStep=args['--xStep'],
yStep=args['--yStep'],
xX=args['--xX'],
yX=args['--yX'],
logY=args['--logY'])
def opt_parse(args=None):
if args is None:
args = docopt(__doc__, version='0.1')
else:
args = docopt(__doc__, version='0.1', argv=args)
main(args)
| mit | 8,313,887,363,895,623,000 | 33 | 70 | 0.451357 | false | 3.664249 | false | false | false |
fandrefh/AnjoMeu | anjo/core/forms.py | 2 | 1138 | #coding: utf-8
from django import forms
from localflavor.br.br_states import STATE_CHOICES
from django.contrib.auth.models import User
from .models import UserProfile, Testimonials, UserBank, Banks
class TestimonialsForm(forms.ModelForm):
class Meta:
model = Testimonials
exclude = ('active',)
class UserForm(forms.ModelForm):
first_name = forms.CharField(label='Nome')
last_name = forms.CharField(label='Sobrenome')
username = forms.CharField(label='Nome de usuário')
email = forms.EmailField(label='E-mail')
password = forms.CharField(label='Senha', widget=forms.PasswordInput())
class Meta:
model = User
fields = ('first_name', 'last_name', 'username', 'email', 'password')
class UserProfileForm(forms.ModelForm):
state = forms.ChoiceField(label='Estado', choices=STATE_CHOICES)
class Meta:
model = UserProfile
fields = ('cpf', 'birthday', 'gender', 'address', 'code_postal', 'neighborhood', 'state', 'city', 'phone_number')
class BanksForm(forms.ModelForm):
class Meta:
model = Banks
excludes = ('code_bank')
class UserBankForm(forms.ModelForm):
class Meta:
model = UserBank
excludes = ('user') | gpl-2.0 | 428,124,541,730,074,940 | 28.179487 | 115 | 0.729991 | false | 3.211864 | false | false | false |
pvagner/orca | src/orca/liveregions.py | 2 | 21474 | import bisect
import copy
import pyatspi
import time
from gi.repository import GLib
from . import cmdnames
from . import chnames
from . import keybindings
from . import messages
from . import input_event
from . import orca_state
from . import settings_manager
_settingsManager = settings_manager.getManager()
# define 'live' property types
LIVE_OFF = -1
LIVE_NONE = 0
LIVE_POLITE = 1
LIVE_ASSERTIVE = 2
LIVE_RUDE = 3
# Seconds a message is held in the queue before it is discarded
MSG_KEEPALIVE_TIME = 45 # in seconds
# The number of messages that are cached and can later be reviewed via
# LiveRegionManager.reviewLiveAnnouncement.
CACHE_SIZE = 9 # corresponds to one of nine key bindings
class PriorityQueue:
""" This class represents a thread **UNSAFE** priority queue where priority
is determined by the given integer priority. The entries are also
maintained in chronological order.
TODO: experiment with Queue.Queue to make thread safe
"""
def __init__(self):
self.queue = []
def enqueue(self, data, priority, obj):
""" Add a new element to the queue according to 1) priority and
2) timestamp. """
bisect.insort_left(self.queue, (priority, time.time(), data, obj))
def dequeue(self):
"""get the highest priority element from the queue. """
return self.queue.pop(0)
def clear(self):
""" Clear the queue """
self.queue = []
def purgeByKeepAlive(self):
""" Purge items from the queue that are older than the keepalive
time """
currenttime = time.time()
myfilter = lambda item: item[1] + MSG_KEEPALIVE_TIME > currenttime
self.queue = list(filter(myfilter, self.queue))
def purgeByPriority(self, priority):
""" Purge items from the queue that have a lower than or equal priority
than the given argument """
myfilter = lambda item: item[0] > priority
self.queue = list(filter(myfilter, self.queue))
def __len__(self):
""" Return the length of the queue """
return len(self.queue)
class LiveRegionManager:
def __init__(self, script):
self._script = script
# message priority queue
self.msg_queue = PriorityQueue()
self.inputEventHandlers = self._getInputEventHandlers()
self.keyBindings = self._getKeyBindings()
# This is temporary.
self.functions = [self.advancePoliteness,
self.setLivePolitenessOff,
self.toggleMonitoring,
self.reviewLiveAnnouncement]
# Message cache. Used to store up to 9 previous messages so user can
# review if desired.
self.msg_cache = []
# User overrides for politeness settings.
self._politenessOverrides = None
self._restoreOverrides = None
# last live obj to be announced
self.lastliveobj = None
# Used to track whether a user wants to monitor all live regions
# Not to be confused with the global Gecko.liveRegionsOn which
# completely turns off live region support. This one is based on
# a user control by changing politeness levels to LIVE_OFF or back
# to the bookmark or markup politeness value.
self.monitoring = True
# Set up politeness level overrides and subscribe to bookmarks
# for load and save user events.
# We are initialized after bookmarks so call the load handler once
# to get initialized.
#
self.bookmarkLoadHandler()
script.bookmarks.addSaveObserver(self.bookmarkSaveHandler)
script.bookmarks.addLoadObserver(self.bookmarkLoadHandler)
def _getInputEventHandlers(self):
handlers = {}
handlers["advanceLivePoliteness"] = \
input_event.InputEventHandler(
self.advancePoliteness,
cmdnames.LIVE_REGIONS_ADVANCE_POLITENESS)
handlers["setLivePolitenessOff"] = \
input_event.InputEventHandler(
self.setLivePolitenessOff,
cmdnames.LIVE_REGIONS_SET_POLITENESS_OFF)
handlers["monitorLiveRegions"] = \
input_event.InputEventHandler(
self.toggleMonitoring,
cmdnames.LIVE_REGIONS_MONITOR)
handlers["reviewLiveAnnouncement"] = \
input_event.InputEventHandler(
self.reviewLiveAnnouncement,
cmdnames.LIVE_REGIONS_REVIEW)
return handlers
def _getKeyBindings(self):
keyBindings = keybindings.KeyBindings()
keyBindings.add(
keybindings.KeyBinding(
"backslash",
keybindings.defaultModifierMask,
keybindings.NO_MODIFIER_MASK,
self.inputEventHandlers.get("advanceLivePoliteness")))
keyBindings.add(
keybindings.KeyBinding(
"backslash",
keybindings.defaultModifierMask,
keybindings.SHIFT_MODIFIER_MASK,
self.inputEventHandlers.get("setLivePolitenessOff")))
keyBindings.add(
keybindings.KeyBinding(
"backslash",
keybindings.defaultModifierMask,
keybindings.ORCA_SHIFT_MODIFIER_MASK,
self.inputEventHandlers.get("monitorLiveRegions")))
for key in ["F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9"]:
keyBindings.add(
keybindings.KeyBinding(
key,
keybindings.defaultModifierMask,
keybindings.ORCA_MODIFIER_MASK,
self.inputEventHandlers.get("reviewLiveAnnouncement")))
return keyBindings
def reset(self):
# First we will purge our politeness override dictionary of LIVE_NONE
# objects that are not registered for this page
newpoliteness = {}
currenturi = self._script.bookmarks.getURIKey()
for key, value in list(self._politenessOverrides.items()):
if key[0] == currenturi or value != LIVE_NONE:
newpoliteness[key] = value
self._politenessOverrides = newpoliteness
def bookmarkSaveHandler(self):
"""Bookmark save callback"""
self._script.bookmarks.saveBookmarksToDisk(self._politenessOverrides,
filename='politeness')
def bookmarkLoadHandler(self):
"""Bookmark load callback"""
# readBookmarksFromDisk() returns None on error. Just initialize to an
# empty dictionary if this is the case.
self._politenessOverrides = \
self._script.bookmarks.readBookmarksFromDisk(filename='politeness') \
or {}
def handleEvent(self, event):
"""Main live region event handler"""
politeness = self._getLiveType(event.source)
if politeness == LIVE_OFF:
return
if politeness == LIVE_NONE:
# All the 'registered' LIVE_NONE objects will be set to off
# if not monitoring. We will ignore LIVE_NONE objects that
# arrive after the user switches off monitoring.
if not self.monitoring:
return
elif politeness == LIVE_POLITE:
# Nothing to do for now
pass
elif politeness == LIVE_ASSERTIVE:
self.msg_queue.purgeByPriority(LIVE_POLITE)
elif politeness == LIVE_RUDE:
self.msg_queue.purgeByPriority(LIVE_ASSERTIVE)
message = self._getMessage(event)
if message:
if len(self.msg_queue) == 0:
GLib.timeout_add(100, self.pumpMessages)
self.msg_queue.enqueue(message, politeness, event.source)
def pumpMessages(self):
""" Main gobject callback for live region support. Handles both
purging the message queue and outputting any queued messages that
were queued up in the handleEvent() method.
"""
if len(self.msg_queue) > 0:
self.msg_queue.purgeByKeepAlive()
politeness, timestamp, message, obj = self.msg_queue.dequeue()
# Form output message. No need to repeat labels and content.
# TODO: really needs to be tested in real life cases. Perhaps
# a verbosity setting?
if message['labels'] == message['content']:
utts = message['content']
else:
utts = message['labels'] + message['content']
self._script.presentMessage(utts)
# set the last live obj to be announced
self.lastliveobj = obj
# cache our message
self._cacheMessage(utts)
# We still want to maintain our queue if we are not monitoring
if not self.monitoring:
self.msg_queue.purgeByKeepAlive()
# See you again soon, stay in event loop if we still have messages.
if len(self.msg_queue) > 0:
return True
else:
return False
def getLiveNoneObjects(self):
"""Return the live objects that are registered and have a politeness
of LIVE_NONE. """
retval = []
currenturi = self._script.bookmarks.getURIKey()
for uri, objectid in self._politenessOverrides:
if uri == currenturi and isinstance(objectid, tuple):
retval.append(self._script.bookmarks.pathToObj(objectid))
return retval
def advancePoliteness(self, script, inputEvent):
"""Advance the politeness level of the given object"""
if not _settingsManager.getSetting('inferLiveRegions'):
self._script.presentMessage(messages.LIVE_REGIONS_OFF)
return
obj = orca_state.locusOfFocus
objectid = self._getObjectId(obj)
uri = self._script.bookmarks.getURIKey()
try:
# The current priority is either a previous override or the
# live property. If an exception is thrown, an override for
# this object has never occurred and the object does not have
# live markup. In either case, set the override to LIVE_NONE.
cur_priority = self._politenessOverrides[(uri, objectid)]
except KeyError:
cur_priority = self._liveStringToType(obj)
if cur_priority == LIVE_OFF or cur_priority == LIVE_NONE:
self._politenessOverrides[(uri, objectid)] = LIVE_POLITE
self._script.presentMessage(messages.LIVE_REGIONS_LEVEL_POLITE)
elif cur_priority == LIVE_POLITE:
self._politenessOverrides[(uri, objectid)] = LIVE_ASSERTIVE
self._script.presentMessage(messages.LIVE_REGIONS_LEVEL_ASSERTIVE)
elif cur_priority == LIVE_ASSERTIVE:
self._politenessOverrides[(uri, objectid)] = LIVE_RUDE
self._script.presentMessage(messages.LIVE_REGIONS_LEVEL_RUDE)
elif cur_priority == LIVE_RUDE:
self._politenessOverrides[(uri, objectid)] = LIVE_OFF
self._script.presentMessage(messages.LIVE_REGIONS_LEVEL_OFF)
def goLastLiveRegion(self):
"""Move the caret to the last announced live region and speak the
contents of that object"""
if self.lastliveobj:
self._script.utilities.setCaretPosition(self.lastliveobj, 0)
self._script.speakContents(self._script.utilities.getObjectContentsAtOffset(
self.lastliveobj, 0))
def reviewLiveAnnouncement(self, script, inputEvent):
"""Speak the given number cached message"""
msgnum = int(inputEvent.event_string[1:])
if not _settingsManager.getSetting('inferLiveRegions'):
self._script.presentMessage(messages.LIVE_REGIONS_OFF)
return
if msgnum > len(self.msg_cache):
self._script.presentMessage(messages.LIVE_REGIONS_NO_MESSAGE)
else:
self._script.presentMessage(self.msg_cache[-msgnum])
def setLivePolitenessOff(self, script, inputEvent):
"""User toggle to set all live regions to LIVE_OFF or back to their
original politeness."""
if not _settingsManager.getSetting('inferLiveRegions'):
self._script.presentMessage(messages.LIVE_REGIONS_OFF)
return
# start at the document frame
docframe = self._script.utilities.documentFrame()
# get the URI of the page. It is used as a partial key.
uri = self._script.bookmarks.getURIKey()
# The user is currently monitoring live regions but now wants to
# change all live region politeness on page to LIVE_OFF
if self.monitoring:
self._script.presentMessage(messages.LIVE_REGIONS_ALL_OFF)
self.msg_queue.clear()
# First we'll save off a copy for quick restoration
self._restoreOverrides = copy.copy(self._politenessOverrides)
# Set all politeness overrides to LIVE_OFF.
for override in list(self._politenessOverrides.keys()):
self._politenessOverrides[override] = LIVE_OFF
# look through all the objects on the page and set/add to
# politeness overrides. This only adds live regions with good
# markup.
matches = pyatspi.findAllDescendants(docframe, self.matchLiveRegion)
for match in matches:
objectid = self._getObjectId(match)
self._politenessOverrides[(uri, objectid)] = LIVE_OFF
# Toggle our flag
self.monitoring = False
# The user wants to restore politeness levels
else:
for key, value in list(self._restoreOverrides.items()):
self._politenessOverrides[key] = value
self._script.presentMessage(messages.LIVE_REGIONS_ALL_RESTORED)
# Toggle our flag
self.monitoring = True
def generateLiveRegionDescription(self, obj, **args):
"""Used in conjuction with whereAmI to output description and
politeness of the given live region object"""
objectid = self._getObjectId(obj)
uri = self._script.bookmarks.getURIKey()
results = []
# get the description if there is one.
for relation in obj.getRelationSet():
relationtype = relation.getRelationType()
if relationtype == pyatspi.RELATION_DESCRIBED_BY:
targetobj = relation.getTarget(0)
try:
# We will add on descriptions if they don't duplicate
# what's already in the object's description.
# See http://bugzilla.gnome.org/show_bug.cgi?id=568467
# for more information.
#
description = targetobj.queryText().getText(0, -1)
if description.strip() != obj.description.strip():
results.append(description)
except NotImplemented:
pass
# get the politeness level as a string
try:
livepriority = self._politenessOverrides[(uri, objectid)]
liveprioritystr = self._liveTypeToString(livepriority)
except KeyError:
liveprioritystr = 'none'
# We will only output useful information
#
if results or liveprioritystr != 'none':
results.append(messages.LIVE_REGIONS_LEVEL % liveprioritystr)
return results
def matchLiveRegion(self, obj):
"""Predicate used to find a live region"""
attrs = self._getAttrDictionary(obj)
return 'container-live' in attrs
def _getMessage(self, event):
"""Gets the message associated with a given live event."""
attrs = self._getAttrDictionary(event.source)
content = ""
labels = ""
# A message is divided into two parts: labels and content. We
# will first try to get the content. If there is None,
# assume it is an invalid message and return None
if event.type.startswith('object:children-changed:add'):
if attrs.get('container-atomic') == 'true':
content = self._script.utilities.expandEOCs(event.source)
else:
content = self._script.utilities.expandEOCs(event.any_data)
elif event.type.startswith('object:text-changed:insert'):
if attrs.get('container-atomic') != 'true':
content = event.any_data
else:
text = self._script.utilities.queryNonEmptyText(event.source)
if text:
content = text.getText(0, -1)
if not content:
return None
content = content.strip()
if len(content) == 1:
content = chnames.getCharacterName(content)
# Proper live regions typically come with proper aria labels. These
# labels are typically exposed as names. Failing that, descriptions.
# Looking for actual labels seems a non-performant waste of time.
name = (event.source.name or event.source.description).strip()
if name and name != content:
labels = name
# instantly send out notify messages
if attrs.get('channel') == 'notify':
utts = labels + content
self._script.presentationInterrupt()
self._script.presentMessage(utts)
return None
return {'content':[content], 'labels':[labels]}
def flushMessages(self):
self.msg_queue.clear()
def _cacheMessage(self, utts):
"""Cache a message in our cache list of length CACHE_SIZE"""
self.msg_cache.append(utts)
if len(self.msg_cache) > CACHE_SIZE:
self.msg_cache.pop(0)
def _getLiveType(self, obj):
"""Returns the live politeness setting for a given object. Also,
registers LIVE_NONE objects in politeness overrides when monitoring."""
objectid = self._getObjectId(obj)
uri = self._script.bookmarks.getURIKey()
if (uri, objectid) in self._politenessOverrides:
# look to see if there is a user politeness override
return self._politenessOverrides[(uri, objectid)]
else:
livetype = self._liveStringToType(obj)
# We'll save off a reference to LIVE_NONE if we are monitoring
# to give the user a chance to change the politeness level. It
# is done here for performance sake (objectid, uri are expensive)
if livetype == LIVE_NONE and self.monitoring:
self._politenessOverrides[(uri, objectid)] = livetype
return livetype
def _getObjectId(self, obj):
"""Returns the HTML 'id' or a path to the object is an HTML id is
unavailable"""
attrs = self._getAttrDictionary(obj)
if attrs is None:
return self._getPath(obj)
try:
return attrs['id']
except KeyError:
return self._getPath(obj)
def _liveStringToType(self, obj, attributes=None):
"""Returns the politeness enum for a given object"""
attrs = attributes or self._getAttrDictionary(obj)
try:
if attrs['container-live'] == 'off':
return LIVE_OFF
elif attrs['container-live'] == 'polite':
return LIVE_POLITE
elif attrs['container-live'] == 'assertive':
return LIVE_ASSERTIVE
elif attrs['container-live'] == 'rude':
return LIVE_RUDE
else: return LIVE_NONE
except KeyError:
return LIVE_NONE
def _liveTypeToString(self, politeness):
"""Returns the politeness level as a string given a politeness enum"""
if politeness == LIVE_OFF:
return 'off'
elif politeness == LIVE_POLITE:
return 'polite'
elif politeness == LIVE_ASSERTIVE:
return 'assertive'
elif politeness == LIVE_RUDE:
return 'rude'
elif politeness == LIVE_NONE:
return 'none'
else: return 'unknown'
def _getAttrDictionary(self, obj):
try:
return dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return {}
def _getPath(self, obj):
""" Returns, as a tuple of integers, the path from the given object
to the document frame."""
docframe = self._script.utilities.documentFrame()
path = []
while True:
if obj.parent is None or obj == docframe:
path.reverse()
return tuple(path)
try:
path.append(obj.getIndexInParent())
except Exception:
raise LookupError
obj = obj.parent
def toggleMonitoring(self, script, inputEvent):
if not _settingsManager.getSetting('inferLiveRegions'):
_settingsManager.setSetting('inferLiveRegions', True)
self._script.presentMessage(messages.LIVE_REGIONS_MONITORING_ON)
else:
_settingsManager.setSetting('inferLiveRegions', False)
self.flushMessages()
self._script.presentMessage(messages.LIVE_REGIONS_MONITORING_OFF)
| lgpl-2.1 | 1,968,413,401,298,374,400 | 38.043636 | 88 | 0.605476 | false | 4.356665 | false | false | false |
SeanDS/pygeosolve | pygeosolve/problem.py | 1 | 6569 | from __future__ import division
import numpy as np
import scipy.optimize as opt
import imp
import geometry
import plot
"""Problem classes"""
class Problem(object):
params = []
"""Parameters associated with this problem."""
constraints = []
"""Constraints associated with this problem."""
error_calc_count = 0
"""Number of times this problem's error has been calculated."""
def add_constraint(self, constraint):
"""Adds a constraint to this problem.
:param constraint: the \
:class:`~pygeosolve.constraints.AbstractConstraint` to add
"""
# add constraint
self.constraints.append(constraint)
# extract its parameters
self._add_constraint_params(constraint)
def _add_param(self, param):
"""Adds a parameter to this problem.
:param param: the :class:`~pygeosolve.parameters.Parameter` to add
"""
# add parameter
self.params.append(param)
def _add_constraint_params(self, constraint):
"""Adds the parameters from a constraint to this problem.
:param constraint: the \
:class:`~pygeosolve.constraints.AbstractConstraint` to extract the \
parameters from
"""
# loop over the constraint's parameters
for param in constraint.params:
# check if parameter already exists in list
if param not in self.params:
# add parameter
self._add_param(param)
def free_params(self):
"""Non-fixed parameters associated with this problem.
:return: list of free :class:`~pygeosolve.parameters.Parameter` objects
"""
# empty list of free parameters
free = []
# loop over this problem's parameters
for param in self.params:
# identify free parameters
if not param.fixed:
# add to list
free.append(param)
return free
def free_param_vals(self):
"""Values of non-fixed parameters associated with this problem.
:return: list of free :class:`~pygeosolve.parameters.Parameter` values
"""
# return values extracted from list of free parameters
return np.array([param.value for param in self.free_params()])
def _set_free_param_vals(self, values):
"""Sets values of non-fixed parameters in this problem.
:param values: list of new values to set, in the same order as the \
free parameters returned by `free_param_vals`
"""
# loop over free parameters and the new values
for param, value in zip(self.free_params(), values):
# set the new value of this parameter
param.value = value
def error(self):
"""Calculates the total error associated with this problem.
:return: total of individual \
:class:`~pygeosolve.constraints.AbstractConstraint` errors"""
# calculate error sum
error = sum([constraint.error() for constraint in self.constraints])
# increment error calculation count
self.error_calc_count += 1
return error
def _error_with_vals(self, vals):
"""Sets new free parameter values and returns the new error.
:param vals: the new free parameter values to set"""
# set free parameter values
self._set_free_param_vals(vals)
# return new error
return self.error()
def _error_methods(self):
"""Creates a list of error dicts in scipy.optimize format."""
# empty constraints list
constraints = []
# create list of dicts
for constraint in self.constraints:
constraints.append({'type': 'ineq', 'fun': constraint.error})
return constraints
def solve(self):
"""Solves the problem.
This method attempts to minimise the error function given the
constraints defined within the problem. A successful minimisation
results in the new, optimised parameter values being assigned."""
# first guess at solution - just use current values
x0 = self.free_param_vals()
# call optimisation routine
self.solution = opt.minimize(fun=self._error_with_vals, x0=x0, \
method="COBYLA", tol=1e-10, constraints=self._error_methods())
#self.solution = opt.minimize(fun=self._error_with_vals, x0=x0, \
#method="SLSQP", constraints=self._constraint_functions(), \
#options={'maxiter': 1000000})
#self.solution = opt.basinhopping(self._error_with_vals, x0=x0, \
#niter=1000)
# update parameters from solution
self._update()
def solution_exists(self):
"""Checks if a solution has been computed.
:return: True if solution exists, otherwise False"""
return self.solution is not None
def _update(self):
"""Updates the list of free parameters associated with this problem.
This method retrieves the values from the optimisation result and
updates each one's corresponding parameter."""
# check if solution exists
if not self.solution_exists():
# cannot update values without a solution
raise Exception("Solution has not been computed")
# update values from the optimisation result's solution
self._set_free_param_vals(self.solution.x)
def plot(self, *args, **kwargs):
"""Plots the problem with its current values.
Requires the PyQt4 module."""
# try to find PyQt4 module
try:
imp.find_module("PyQt4")
except ImportError:
raise Exception("The PyQt4 module is required for plotting")
# create canvas
canvas = plot.Canvas()
# empty list of lines added to canvas
lines = []
# add lines to canvas
# TODO: add support for different primitives
for constraint in self.constraints:
for primitive in constraint.primitives:
if isinstance(primitive, geometry.Line):
canvas.add_line(primitive)
# show canvas
canvas.show(*args, **kwargs)
def __str__(self):
"""String representation of this problem.
:return: description of problem"""
# build list of parameter string representations
param_str = "\n\t" + "\n\t".join([str(param) for param in self.params])
# return description
return "Problem with parameters:{0}".format(param_str)
| gpl-3.0 | -8,406,412,917,677,966,000 | 29.696262 | 79 | 0.619577 | false | 4.665483 | false | false | false |
KarrLab/kinetic_datanator | tests/elasticsearch_kl/test_batch_load.py | 1 | 2253 | import unittest
from datanator.elasticsearch_kl import batch_load
from datanator_query_python.config import config
import tempfile
import shutil
import requests
class TestMongoToES(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cache_dir = tempfile.mkdtemp()
cls.src = batch_load.MongoToES(profile_name='es-poweruser', credential_path='~/.wc/third_party/aws_credentials',
config_path='~/.wc/third_party/aws_config', elastic_path='~/.wc/third_party/elasticsearch.ini',
cache_dir=cls.cache_dir, service_name='es', index='test', max_entries=float('inf'), verbose=True)
cls.url = cls.src.es_endpoint + '/' + cls.src.index
requests.delete(cls.url, auth=cls.src.awsauth)
conf = config.Config()
cls.username = conf.USERNAME
cls.password = conf.PASSWORD
cls.server = conf.SERVER
cls.authDB = conf.AUTHDB
cls.db = 'datanator'
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.cache_dir)
requests.delete(cls.url, auth=cls.src.awsauth)
def test_connection(self):
result = self.src.client.list_domain_names()
self.assertEqual(result['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertTrue('datanator-elasticsearch' in self.src.es_endpoint)
def test_data_from_mongo(self):
count, _ = self.src.data_from_mongo_protein(self.server, self.db, self.username,
self.password, authSource=self.authDB)
self.assertTrue(count >= 1000)
def test_data_from_metabolite(self):
_, count_0, _, count_1 = self.src.data_from_mongo_metabolite(self.server, self.db, self.username,
self.password, authSource=self.authDB)
self.assertTrue(count_0 >= 1000)
self.assertTrue(count_1 >= 1000)
def test_data_from_metabolites_meta(self):
doc = self.src.data_from_mongo_metabolites_meta(self.server, self.db, self.username,
self.password, authSource=self.authDB)
result = []
for i in range(5):
result.append(doc)
self.assertEqual(len(result), 5) | mit | -4,733,256,728,715,654,000 | 42.346154 | 120 | 0.620506 | false | 3.730132 | true | false | false |
BeyondTheClouds/rome | lib/rome/core/expression/expression.py | 1 | 15467 | __author__ = 'jonathan'
import datetime
import pytz
from lib.rome.core.dataformat import get_decoder
import re
import uuid
from sqlalchemy.sql.expression import BinaryExpression
from lib.rome.core.rows.rows import get_attribute, has_attribute
from lib.rome.core.utils import DATE_FORMAT, datetime_to_int
def uncapitalize(s):
return s[:1].lower() + s[1:] if s else ''
def get_attribute_reccursively(obj, attr, otherwise=None):
""" A reccursive getattr function.
:param obj: the object that will be use to perform the search
:param attr: the searched attribute
:param otherwise: value returned in case attr was not found
:return:
"""
try:
if not "." in attr:
return get_attribute(obj, attr.replace("\"", ""))
else:
current_key = attr[:attr.index(".")]
next_key = attr[attr.index(".") + 1:]
if has_attribute(obj, current_key):
current_object = get_attribute(obj, current_key)
elif has_attribute(obj, current_key.capitalize()):
current_object = get_attribute(obj, current_key.capitalize())
elif has_attribute(obj, uncapitalize(current_key)):
current_object = get_attribute(obj, uncapitalize(current_key))
else:
current_object = get_attribute(obj, current_key)
if type(obj) is dict and next_key in obj:
return obj[next_key]
return get_attribute_reccursively(current_object, next_key, otherwise)
except AttributeError:
return otherwise
class LazyDictionnary:
"""This temporary class is used to make a dict acting like an object. This code can be found at:
http://stackoverflow.com/questions/1305532/convert-python-dict-to-object
"""
def __init__(self, **entries):
self.entries = entries
self._cache = {}
self.deconverter = get_decoder()
def keys(self):
return self.entries.keys()
def __getattr__(self, item):
if item not in self._cache:
raw_value = self.entries[item] if item in self.entries else None
deconverted_value = self.deconverter.desimplify(raw_value)
self._cache[item] = deconverted_value
return self._cache[item]
boolean_expression_str_memory = {}
class BooleanExpression(object):
def __init__(self, operator, *exps):
def transform_exp(exp):
if type(exp) is not BooleanExpression and self.operator != "NORMAL":
return BooleanExpression("NORMAL", exp)
else:
return exp
self.operator = operator
self.exps = map(lambda x: transform_exp(x), exps)
self.deconverter = get_decoder()
self.compiled_expression = ""
self.uuid = str(uuid.uuid1()).replace("-", "")
self.is_joining_expression = True
self.tables_involved = []
""" Prepare the expression. """
self.variable_substitution_dict = {}
self.default_value_dict = {}
self.prepare_expression()
def is_boolean_expression(self):
return True
def extract_hint(self):
from lib.rome.core.terms.terms import Hint
result = []
for expression in self.exps:
try:
if hasattr(expression, "extract_hint"):
result += expression.extract_hint()
elif hasattr(expression, "right") and hasattr(expression.right, "value"):
table_name = str(expression.left.table)
attribute_name = str(expression.left.key)
# value = "%s" % (criterion.expression.right.value)
value = expression.right.value
if type(expression.left.type).__name__ == "Integer":
value = int(value)
if type(expression.left.type).__name__ == "Float":
value = float(value)
result += [Hint(table_name, attribute_name, value)]
except:
# TODO: this catch errors that occur when there are "CASE WHEN" expression (this is caused by _paginate_query in glance.db.api)
pass
return result
def extract_joining_pairs(self):
if self.operator == "NORMAL":
word_pattern = "[_a-zA-Z0-9]+"
joining_criterion_pattern = "%s\.%s == %s\.%s" % (word_pattern, word_pattern, word_pattern, word_pattern)
m = re.search(joining_criterion_pattern, self.raw_expression)
if m is not None:
joining_pair = self.raw_expression[1:-1].split("==")
joining_pair = map(lambda x: x.strip(), joining_pair)
joining_pair = sorted(joining_pair)
return [joining_pair]
else:
return []
result = []
for exp in self.exps:
if type(exp).__name__ == "BooleanExpression":
result += exp.extract_joining_pairs()
return result
def extract_nonjoining_criterions(self):
if self.operator == "NORMAL":
word_pattern = "[_a-zA-Z0-9]+"
joining_criterion_pattern = "%s\.%s == %s\.%s" % (word_pattern, word_pattern, word_pattern, word_pattern)
m = re.search(joining_criterion_pattern, self.raw_expression)
if m is None:
return [self]
else:
return []
return [self]
def prepare_expression(self):
def collect_expressions(exp):
if type(exp) is BooleanExpression:
return exp.compiled_expression
if type(exp) is BinaryExpression:
return self.prepare_criterion(exp)
else:
return exp
compiled_expressions = map(lambda x: "(%s)" % (collect_expressions(x)), self.exps)
joined_compiled_expressions = []
if self.operator == "and":
joined_compiled_expressions = " and ".join(compiled_expressions)
elif self.operator == "or":
joined_compiled_expressions = " or ".join(compiled_expressions)
elif self.operator == "NORMAL":
joined_compiled_expressions = " or ".join(compiled_expressions)
self.compiled_expression = joined_compiled_expressions
for criterion_str in compiled_expressions:
for expression in self.exps:
if type(expression) is BinaryExpression:
expression_parts = [expression.right, expression.left]
other_part = expression.left
for expression_part in expression_parts:
# other_parts = filter(lambda x: x != expression_part,expression_parts)
if hasattr(expression_part, "default") and expression_part.bind is None and expression_part.default is not None:
expression_part.bind = expression_part.default.arg
if ":" in str(expression_part):
""" Handle right part of the expression. """
if " in " in criterion_str:
count = 1
parts = getattr(expression_part, "element", [])
like_operator_used = False
if len(parts) == 0:
""" This case happens when the LIKE operator is used. """
like_operator_used = True
parts = [expression_part] if "BindParameter" in str(type(expression_part)) else []
for i in parts:
corrected_label = ("%s_%s_%i" % (i._orig_key, self.uuid, count))
key = ":%s_%i" % (i._orig_key, count)
self.variable_substitution_dict[key] = corrected_label
self.default_value_dict[corrected_label] = i.value
if like_operator_used:
""" Must remove the '%' used as the wildcard symbol in the LIKE synthax"""
self.default_value_dict[corrected_label] = self.default_value_dict[corrected_label].replace("%", "")
count += 1
elif not "." in str(expression_part):
original_label = str(expression_part)
corrected_label = ("%s_%s" % (original_label, self.uuid)).replace(":", "")
self.variable_substitution_dict[original_label] = corrected_label
value = expression_part.value
# if len(other_parts) > 0:
# other_part = other_parts[0]
if type(other_part.expression.type).__name__ == "Integer":
value = int(value)
if type(other_part.expression.type).__name__ == "Float":
value = float(value)
if isinstance(value, datetime.datetime):
value = datetime_to_int(value)
self.default_value_dict[corrected_label] = value
other_part = expression.right
for sub in self.variable_substitution_dict:
joined_compiled_expressions = joined_compiled_expressions.replace(sub, self.variable_substitution_dict[sub])
joined_compiled_expressions = joined_compiled_expressions.replace(":", "")
for exp in self.exps:
if type(exp) is BooleanExpression:
for default_value_key in exp.default_value_dict:
self.default_value_dict[default_value_key] = exp.default_value_dict[default_value_key]
self.compiled_expression = joined_compiled_expressions
self.raw_expression = "%s" % (self.compiled_expression)
keys = self.default_value_dict.keys()
keys = sorted(keys, reverse=True, key=lambda x: len(x))
for key in keys:
value = self.default_value_dict[key]
if type(value).__name__ in ["int", "float"]:
self.raw_expression = self.raw_expression.replace(key, "%s" % (self.default_value_dict[key]))
else:
self.raw_expression = self.raw_expression.replace(key, "\"%s\"" % (self.default_value_dict[key]))
return self.compiled_expression
def prepare_criterion(self, criterion):
criterion_str = criterion.__str__()
if criterion_str in boolean_expression_str_memory:
criterion_str = boolean_expression_str_memory[criterion_str]
else:
prev_criterion_str = criterion_str
subs = {
" = ": " == ",
# ":": "",
"\"": "",
"IN": " in ",
"IS": " is ",
"NOT": " not ",
"NULL": "None",
"(": "[",
")": "]"
}
compiled = re.compile('|'.join(map(re.escape, subs)))
criterion_str = compiled.sub(lambda x: subs[x.group(0)], criterion_str)
for sub in self.variable_substitution_dict:
criterion_str = criterion_str.replace(sub, self.variable_substitution_dict[sub])
# handle regex
if "REGEXP" in criterion_str:
tab = criterion_str.split("REGEXP")
a = tab[0]
b = tab[1]
criterion_str = ("""__import__('re').search(%s, %s) is not None\n""" % (b, a))
if "LIKE" in criterion_str:
left = criterion_str.split("LIKE")[0]
right = criterion_str.split("LIKE")[1]
criterion_str = "(%s in %s) or (%s in %s)" % (left, right, right, left)
boolean_expression_str_memory[prev_criterion_str] = criterion_str
return criterion_str
def evaluate(self, value, additional_parameters={}):
orig_value = value
# construct a dict with the values involved in the expression
values_dict = {}
if type(value) is not dict:
for key in value.keys():
try:
s = LazyDictionnary(**value[value.keys().index(key)])
values_dict[key] = s
except Exception as e:
print("[BUG] evaluation failed: %s -> %s" % (key, value))
# return False
else:
values_dict = value
for key in self.default_value_dict:
values_dict[key] = self.default_value_dict[key]
final_values_dict = {}
for key in values_dict.keys():
value = values_dict[key]
if key.startswith("id_"):
value = int(value)
final_values_dict[key] = value
for key in values_dict:
if key in self.variable_substitution_dict:
value = values_dict[key]
if key.startswith("id_"):
value = int(value)
final_values_dict[self.variable_substitution_dict[key]] = value
for expression in self.exps:
if type(expression) is BinaryExpression:
expression_parts = [expression.right, expression.left]
for expression_part in expression_parts:
if hasattr(expression_part, "default") and expression_part.default is not None:
key = str(expression_part).split(".")[0]
attr = str(expression_part).split(".")[1]
if getattr(final_values_dict[key], attr, None) is None:
value = expression_part.default.arg
setattr(final_values_dict[key], attr, value)
second_final_values_dict = {}
for key in additional_parameters:
value = LazyDictionnary(**additional_parameters[key])
second_final_values_dict[key] = value
for key in final_values_dict:
second_final_values_dict[key] = final_values_dict[key]
try:
result = eval(self.compiled_expression, second_final_values_dict)
except:
import traceback
traceback.print_exc()
if self.operator == "NORMAL":
return False
for exp in self.exps:
if exp.evaluate(orig_value):
if self.operator in ["or"]:
return True
else:
if self.operator in ["and"]:
return False
if self.operator in ["NORMAL", "or"]:
return False
else:
return True
pass
return result
def __repr__(self):
if self.operator == "NORMAL":
return str(self.raw_expression)
else:
op = " %s ".lower() % (self.operator)
return "(%s)" % (op.join(map(lambda x: str(x), self.exps)))
class JoiningBooleanExpression(BooleanExpression):
def __init__(self, operator, *exps):
BooleanExpression.__init__(self, operator, *exps)
self.is_joining_expression = True | mit | 6,101,530,661,296,925,000 | 43.576369 | 143 | 0.527446 | false | 4.480591 | false | false | false |
baloo/shinken | test/test_bad_timeperiods.py | 1 | 1428 | #!/usr/bin/env python2.6
#Copyright (C) 2009-2010 :
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
#It's ugly I know....
from shinken_test import *
class TestConfig(ShinkenTest):
#setUp is in shinken_test
def setUp(self):
self.setup_with_file('etc/nagios_bad_timeperiods.cfg')
#Change ME :)
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the bad timeperiod"
tp = self.sched.timeperiods.find_by_name("24x7")
self.assert_(tp.is_correct() == False)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | 3,856,600,961,667,422,000 | 28.75 | 76 | 0.696078 | false | 3.561097 | true | false | false |
zv1n/pycmef | pycmef/pygaze_eyetracker.py | 1 | 1322 |
import sys, os, random, constants
from pycmef.event_handler import returns_string, returns_dictionary
from pygaze import libscreen
from pygaze import libtime
from pygaze import liblog
from pygaze import libinput
from pygaze import eyetracker
class PygazeEyetracker:
def __init__(self):
# create display object
self.disp = libscreen.Display()
# create eyetracker object
self.tracker = eyetracker.EyeTracker(self.disp)
# create keyboard object
self.keyboard = libinput.Keyboard(keylist=['space'], timeout=None)
# create logfile object
self.log = liblog.Logfile()
def register(self, event_manager):
event_manager.register_events({
'calibrate_eyetracker': self.calibrate_eyetracker,
'start_eyetracker': self.start_tracking,
'stop_eyetracker': self.stop_tracking,
'log_to_eyetracker': self.log_to_eyetracker
})
@returns_string
def calibrate_eyetracker(self, args):
# calibrate eye tracker
self.tracker.calibrate()
return ""
@returns_string
def start_tracking(self, args):
self.tracker.start_recording()
return ""
@returns_string
def stop_tracking(self, args):
self.tracker.stop_recording()
return ""
@returns_string
def log_to_eyetracker(self, args):
self.tracker.log(args.message)
return ""
| bsd-2-clause | 1,446,169,945,404,340,200 | 22.607143 | 70 | 0.705749 | false | 3.42487 | false | false | false |
shellphish/rex | rex/exploit/chess/control.py | 1 | 1294 | import os
from ...enums import CrashInputType
from .chess_exploit import CHESSExploit
class CHESSExploitControl(CHESSExploit):
"""
An exploit that crashes with a controlled instruction pointer and register
"""
def __init__(self, *args, registers=None, **kwargs):
if registers is None:
raise TypeError("Need to specify registers")
super().__init__(*args, **kwargs)
self.registers = registers
def _write_script(self, **kwargs):
if self.crash.input_type in (CrashInputType.STDIN, CrashInputType.POV_FILE):
actions = self._script_get_actions_stdin()
elif self.crash.input_type == CrashInputType.TCP:
actions = self._script_get_actions_tcp()
else:
raise NotImplementedError("Unsupported crash input type %s." % self.crash.input_type)
body = os.linesep.join(actions)
preamble = """
import sys
import time
import nclib
if len(sys.argv) < 3:
print("%s: <host> <port>" % sys.argv[0])
sys.exit(1)
r = nclib.Netcat((sys.argv[1], int(sys.argv[2])), udp=False)
"""
tail = "\nr.recvall()\n"
for reg in self.registers:
tail += 'print("REGISTER_%s=%X")\n' % (reg.upper(), self.registers[reg])
return preamble + body + tail
| bsd-2-clause | 3,315,967,754,982,694,400 | 28.409091 | 97 | 0.620556 | false | 3.564738 | false | false | false |
chadwangcn/control_app | unit_test/udp_monitor_ut.py | 1 | 2355 | '''
Created on Aug 4, 2014
@author: lijun
'''
import unittest
import threading
import time
from socket import *
from engine import *
from engine.DataCenter import *
from engine.LogAdapter import *
from data_source.UdpMonitor import *
class udp_send(threading.Thread):
def __init__(self,_strIP,_nPort):
self.bExit = False
self.threadid= 0
self.nPort = _nPort
self.strIP = _strIP
threading.Thread.__init__(self)
def run(self):
while self.bExit == False:
self.SendData(" haha this is a nice tes t")
time.sleep(2)
def start_send(self):
self.start()
self.bExit = False
def stop_send(self):
self.bExit = True
def reTryConnectUdp(self):
try:
self.udpsocket = socket(AF_INET, SOCK_DGRAM)
self.address = (self.strIP,self.nPort)
'self.udpsocket.bind(self.address)'
self.net_status = True
except Exception,e:
self.net_status = False
print e
def SendData(self,_data):
error = False
try:
self.udpsocket.sendto( _data ,self.address)
except Exception,e:
print e
error = True
if error == True:
self.net_status = False
self.reTryConnectUdp()
def test( _data):
print "==>"+_data
class Test(unittest.TestCase):
def setUp(self):
self.udpsender = udp_send("127.0.0.1",45232)
self.udpsender.start_send()
def tearDown(self):
self.udpsender.stop_send()
def testDataCenter(self):
self.DataCenterObject = DataCenter.DataCenter()
object_src = UdpMonitor(3000,"192.168.11.5")
self.DataCenterObject.Register("UDP", object_src)
self.DataCenterObject.Subcribe("UDP", "test", test)
self.DataCenterObject.Start()
while True:
time.sleep(60)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | lgpl-3.0 | 216,481,319,521,117,400 | 23.8 | 69 | 0.498938 | false | 4.032534 | true | false | false |
jbarlow83/OCRmyPDF | src/ocrmypdf/__main__.py | 1 | 2173 | #!/usr/bin/env python3
# © 2015-19 James R. Barlow: github.com/jbarlow83
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
import os
import signal
import sys
from multiprocessing import set_start_method
from ocrmypdf import __version__
from ocrmypdf._plugin_manager import get_parser_options_plugins
from ocrmypdf._sync import run_pipeline
from ocrmypdf._validation import check_closed_streams, check_options
from ocrmypdf.api import Verbosity, configure_logging
from ocrmypdf.exceptions import (
BadArgsError,
ExitCode,
InputFileError,
MissingDependencyError,
)
log = logging.getLogger('ocrmypdf')
def sigbus(*args):
raise InputFileError("Lost access to the input file")
def run(args=None):
_parser, options, plugin_manager = get_parser_options_plugins(args=args)
if not check_closed_streams(options):
return ExitCode.bad_args
if hasattr(os, 'nice'):
os.nice(5)
verbosity = options.verbose
if not os.isatty(sys.stderr.fileno()):
options.progress_bar = False
if options.quiet:
verbosity = Verbosity.quiet
options.progress_bar = False
configure_logging(
verbosity,
progress_bar_friendly=options.progress_bar,
manage_root_logger=True,
plugin_manager=plugin_manager,
)
log.debug('ocrmypdf %s', __version__)
try:
check_options(options, plugin_manager)
except ValueError as e:
log.error(e)
return ExitCode.bad_args
except BadArgsError as e:
log.error(e)
return e.exit_code
except MissingDependencyError as e:
log.error(e)
return ExitCode.missing_dependency
if hasattr(signal, 'SIGBUS'):
signal.signal(signal.SIGBUS, sigbus)
result = run_pipeline(options=options, plugin_manager=plugin_manager)
return result
if __name__ == '__main__':
if sys.platform == 'darwin' and sys.version_info < (3, 8):
set_start_method('spawn') # see python bpo-33725
sys.exit(run())
| gpl-3.0 | -3,380,931,195,360,879,000 | 26.846154 | 76 | 0.683702 | false | 3.62 | false | false | false |
dladd/pyFormex | pyformex/examples/Diamatic.py | 1 | 2859 | # $Id$
##
## This file is part of pyFormex 0.8.9 (Fri Nov 9 10:49:51 CET 2012)
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: http://savannah.nongnu.org/projects/pyformex/
## Copyright 2004-2012 (C) Benedict Verhegghe ([email protected])
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""Diamatic dome
"""
from __future__ import print_function
_status = 'checked'
_level = 'beginner'
_topics = ['structure','domes']
_techniques = ['color']
from gui.draw import *
def run():
wireframe()
u = 3. # modular length
n = 6 # number of modules in one sector
r = 36. # radius of the dome
# Topology for 1 sector
T = Formex('l:164',3).replic2(n,n,1,1,0,1,0,-1)
# 4 sectors
m = 4
angle = 360./m
# circulize sector
D = T.scale(u).circulize(angle)
D = D.mapd(2,lambda d:sqrt(r**2-d**2),[0,0,0],[0,1])
dome1=D.rosette(m,angle)
clear()
draw(dome1)
# 6 sectors
m = 6
angle = 360./m
a = sqrt(3.)/2
D = T.shear(0,1,0.5).scale([1,a,1])
#D = T.replic2(n,n,1,a,0,1,0.5,-1)
D = D.scale(u).circulize(angle)
D = D.mapd(2,lambda d:sqrt(r**2-d**2),[0,0,0],[0,1])
dome2=D.rosette(m,angle)
clear()
draw(dome2)
# 8 sectors
m = 8
angle = 360./m
a = sqrt(2.)/2
T = Formex([[[0,0],[1,0]],[[1,0],[a,a]],[[a,a],[0,0]]],3)
D = T.replic2(n,n,1,a,0,1,a,-1)
# circulize sector
D = D.scale(u).circulize(angle)
D = D.mapd(2,lambda d:sqrt(r**2-d**2),[0,0,0],[0,1])
dome3=D.rosette(m,angle)
clear()
draw(dome3)
# circulize1
m = 6
angle = 360./m
T = Formex('l:127',3)
D = T.replic2(n,n,1,1,0,1,1,-1)
D = D.scale(u).circulize1()
D = D.mapd(2,lambda d:sqrt(r**2-d**2),[0,0,0],[0,1])
dome4=D.rosette(m,angle)
clear()
draw(dome4)
clear()
dome4.setProp(1)
draw(dome2+dome4)
clear()
d=1.1*r
draw(dome1+dome2.translate([d,0,0])+dome3.translate([0,d,0])+dome4.translate([d,d,0]))
if __name__ == 'draw':
run()
# End
| gpl-3.0 | -2,082,022,288,859,585,300 | 25.971698 | 90 | 0.607555 | false | 2.594374 | false | false | false |
pymedusa/Medusa | ext/trans.py | 6 | 11206 | # coding: utf8
u""" This module translates national characters into similar
sounding latin characters (transliteration).
At the moment, Czech, Greek, Latvian, Polish, Turkish, Russian, Ukrainian
and Kazakh alphabets are supported (it covers 99% of needs).
Python 3:
>>> from trans import trans
>>> trans('Привет, Мир!')
Python 2:
>>> import trans
>>> u'Привет, Мир!'.encode('trans')
u'Privet, Mir!'
>>> trans.trans(u'Привет, Мир!')
u'Privet, Mir!'
Source and full documentations can be found here:
https://github.com/zzzsochi/trans
"""
import sys
import codecs
__version__ = '2.1.0'
__author__ = 'Zelenyak Aleksander aka ZZZ <[email protected]>'
PY2 = sys.version_info[0] == 2
class Trans(object):
""" Main class for transliteration with tables.
"""
def __init__(self, tables=None, default_table=None):
self.tables = tables or {}
self.default_table = default_table
def __call__(self, input, table=None):
""" Translate unicode string, using 'table'.
Table may be tuple (diphthongs, other), dict (other) or string name of table.
"""
if table is None:
if self.default_table is not None:
table = self.default_table
else:
raise ValueError('Table not set.')
if not isinstance(input, unicode if PY2 else str): # noqa
raise TypeError(
'trans codec support only unicode string, {0!r} given.'.format(type(input))
)
if isinstance(table, basestring if PY2 else str): # noqa
try:
table = self.tables[table]
except KeyError:
raise ValueError(u'Table "{0}" not found in tables!'.format(table))
if isinstance(table, dict):
table = ({}, table)
first = input
for diphthong, value in table[0].items():
first = first.replace(diphthong, value)
default = table[1].get(None, u'_')
second = u''
for char in first:
second += table[1].get(char, default)
return second
latin = {
u'à': u'a', u'á': u'a', u'â': u'a', u'ã': u'a', u'ä': u'a', u'å': u'a',
u'æ': u'ae', u'ç': u'c', u'è': u'e', u'é': u'e', u'ê': u'e', u'ë': u'e',
u'ì': u'i', u'í': u'i', u'î': u'i', u'ï': u'i', u'ð': u'd', u'ñ': u'n',
u'ò': u'o', u'ó': u'o', u'ô': u'o', u'õ': u'o', u'ö': u'o', u'ő': u'o',
u'ø': u'o', u'ù': u'u', u'ú': u'u', u'û': u'u', u'ü': u'u', u'ű': u'u',
u'ý': u'y', u'þ': u'th', u'ÿ': u'y',
u'À': u'A', u'Á': u'A', u'Â': u'A', u'Ã': u'A', u'Ä': u'A', u'Å': u'A',
u'Æ': u'AE', u'Ç': u'C', u'È': u'E', u'É': u'E', u'Ê': u'E', u'Ë': u'E',
u'Ì': u'I', u'Í': u'I', u'Î': u'I', u'Ï': u'I', u'Ð': u'D', u'Ñ': u'N',
u'Ò': u'O', u'Ó': u'O', u'Ô': u'O', u'Õ': u'O', u'Ö': u'O', u'Ő': u'O',
u'Ø': u'O', u'Ù': u'U', u'Ú': u'U', u'Û': u'U', u'Ü': u'U', u'Ű': u'U',
u'Ý': u'Y', u'Þ': u'TH', u'ß': u'ss',
}
greek = {
u'α': u'a', u'β': u'b', u'γ': u'g', u'δ': u'd', u'ε': u'e', u'ζ': u'z',
u'η': u'h', u'θ': u'8', u'ι': u'i', u'κ': u'k', u'λ': u'l', u'μ': u'm',
u'ν': u'n', u'ξ': u'3', u'ο': u'o', u'π': u'p', u'ρ': u'r', u'σ': u's',
u'τ': u't', u'υ': u'y', u'φ': u'f', u'χ': u'x', u'ψ': u'ps', u'ω': u'w',
u'ά': u'a', u'έ': u'e', u'ί': u'i', u'ό': u'o', u'ύ': u'y', u'ή': u'h',
u'ώ': u'w', u'ς': u's', u'ϊ': u'i', u'ΰ': u'y', u'ϋ': u'y', u'ΐ': u'i',
u'Α': u'A', u'Β': u'B', u'Γ': u'G', u'Δ': u'D', u'Ε': u'E', u'Ζ': u'Z',
u'Η': u'H', u'Θ': u'8', u'Ι': u'I', u'Κ': u'K', u'Λ': u'L', u'Μ': u'M',
u'Ν': u'N', u'Ξ': u'3', u'Ο': u'O', u'Π': u'P', u'Ρ': u'R', u'Σ': u'S',
u'Τ': u'T', u'Υ': u'Y', u'Φ': u'F', u'Χ': u'X', u'Ψ': u'PS', u'Ω': u'W',
u'Ά': u'A', u'Έ': u'E', u'Ί': u'I', u'Ό': u'O', u'Ύ': u'Y', u'Ή': u'H',
u'Ώ': u'W', u'Ϊ': u'I', u'Ϋ': u'Y',
}
turkish = {
u'ş': u's', u'Ş': u'S', u'ı': u'i', u'İ': u'I', u'ç': u'c', u'Ç': u'C',
u'ü': u'u', u'Ü': u'U', u'ö': u'o', u'Ö': u'O', u'ğ': u'g', u'Ğ': u'G'
}
russian = (
{
u'юй': u'yuy', u'ей': u'yay',
u'Юй': u'Yuy', u'Ей': u'Yay'
},
{
u'а': u'a', u'б': u'b', u'в': u'v', u'г': u'g', u'д': u'd', u'е': u'e',
u'ё': u'yo', u'ж': u'zh', u'з': u'z', u'и': u'i', u'й': u'y', u'к': u'k',
u'л': u'l', u'м': u'm', u'н': u'n', u'о': u'o', u'п': u'p', u'р': u'r',
u'с': u's', u'т': u't', u'у': u'u', u'ф': u'f', u'х': u'h', u'ц': u'c',
u'ч': u'ch', u'ш': u'sh', u'щ': u'sh', u'ъ': u'', u'ы': u'y', u'ь': u'',
u'э': u'e', u'ю': u'yu', u'я': u'ya',
u'А': u'A', u'Б': u'B', u'В': u'V', u'Г': u'G', u'Д': u'D', u'Е': u'E',
u'Ё': u'Yo', u'Ж': u'Zh', u'З': u'Z', u'И': u'I', u'Й': u'Y', u'К': u'K',
u'Л': u'L', u'М': u'M', u'Н': u'N', u'О': u'O', u'П': u'P', u'Р': u'R',
u'С': u'S', u'Т': u'T', u'У': u'U', u'Ф': u'F', u'Х': u'H', u'Ц': u'C',
u'Ч': u'Ch', u'Ш': u'Sh', u'Щ': u'Sh', u'Ъ': u'', u'Ы': u'Y', u'Ь': u'',
u'Э': u'E', u'Ю': u'Yu', u'Я': u'Ya',
})
ukrainian = (russian[0].copy(), {
u'Є': u'Ye', u'І': u'I', u'Ї': u'Yi', u'Ґ': u'G',
u'є': u'ye', u'і': u'i', u'ї': u'yi', u'ґ': u'g',
})
ukrainian[1].update(russian[1])
czech = {
u'č': u'c', u'ď': u'd', u'ě': u'e', u'ň': u'n', u'ř': u'r', u'š': u's',
u'ť': u't', u'ů': u'u', u'ž': u'z',
u'Č': u'C', u'Ď': u'D', u'Ě': u'E', u'Ň': u'N', u'Ř': u'R', u'Š': u'S',
u'Ť': u'T', u'Ů': u'U', u'Ž': u'Z',
}
polish = {
u'ą': u'a', u'ć': u'c', u'ę': u'e', u'ł': u'l', u'ń': u'n', u'ó': u'o',
u'ś': u's', u'ź': u'z', u'ż': u'z',
u'Ą': u'A', u'Ć': u'C', u'Ę': u'E', u'Ł': u'L', u'Ń': u'N', u'Ó': u'O',
u'Ś': u'S', u'Ź': u'Z', u'Ż': u'Z',
}
latvian = {
u'ā': u'a', u'č': u'c', u'ē': u'e', u'ģ': u'g', u'ī': u'i', u'ķ': u'k',
u'ļ': u'l', u'ņ': u'n', u'š': u's', u'ū': u'u', u'ž': u'z',
u'Ā': u'A', u'Č': u'C', u'Ē': u'E', u'Ģ': u'G', u'Ī': u'i', u'Ķ': u'k',
u'Ļ': u'L', u'Ņ': u'N', u'Š': u'S', u'Ū': u'u', u'Ž': u'Z',
}
kazakh = (russian[0].copy(), {
u'ә': u'a', u'ғ': u'g', u'қ': u'k', u'ң': 'n', u'ө': u'o', u'ұ': u'u',
u'ү': u'u', u'һ': u'h', u'і': u'i',
u'Ә': u'A', u'Ғ': u'G', u'Қ': u'K', u'Ң': 'N', u'Ө': u'O', u'Ұ': u'U',
u'Ү': u'U', u'Һ': u'H', u'І': u'I',
})
kazakh[1].update(russian[1])
farsi = {
u'ا': u'a',
u'أ': u'a', u'\uFE81': u'a', u'\uFE82': u'a',
u'آ': u'a', u'\uFE83': u'a', u'\uFE84': u'a',
u'ب': u'b', u'\uFE8F': u'b', u'\uFE90': u'b', u'\uFE92': u'b', u'\uFE91': u'b',
u'ت': u't', u'\uFE95': u't', u'\uFE96': u't', u'\uFE98': u't', u'\uFE97': u't',
u'ث': u'th', u'\uFE99': u'th', u'\uFE9A': u'th', u'\uFE9C': u'th', u'\uFE9B': u'th',
u'ج': u'j', u'\uFE9D': u'j', u'\uFE9E': u'j', u'\uFEA0': u'j', u'\uFE9F': u'j',
u'ح': u'h', u'\uFEA1': u'h', u'\uFEA2': u'h', u'\uFEA4': u'h', u'\uFEA3': u'h',
u'خ': u'x', u'\uFEA5': u'x', u'\uFEA6': u'x', u'\uFEA8': u'x', u'\uFEA7': u'x',
u'د': u'd', u'\uFEA9': u'd', u'\uFEAA': u'd',
u'ذ': u'd', u'\uFEAB': u'd', u'\uFEAC': u'd',
u'ر': u'r', u'\uFEAD': u'r', u'\uFEAE': u'r',
u'ز': u'z', u'\uFEAF': u'z', u'\uFEB0': u'z',
u'س': u's', u'\uFEB1': u's', u'\uFEB2': u's', u'\uFEB4': u's', u'\uFEB3 ': u's',
u'ش': u'sh', u'\uFEB5': u'sh', u'\uFEB6': u'sh', u'\uFEB8': u'sh', u'\uFEB7': u'sh',
u'ص': u's', u'\uFEB9': u's', u'\uFEBA': u's', u'\uFEBC': u's', u'\uFEBB': u's',
u'ض': u'd', u'\uFEBD': u'd', u'\uFEBE': u'd', u'\uFEC0': u'd', u'\uFEBF': u'd',
u'ط': u't', u'\uFEC1': u't', u'\uFEC2': u't', u'\uFEC4': u't', u'\uFEC3': u't',
u'ظ': u'z', u'\uFEC5': u'z', u'\uFEC6': u'z', u'\uFEC8': u'z', u'\uFEC7': u'z',
u'ع': u'ao', u'\uFEC9': u'ao', u'\uFECA': u'ao', u'\uFECC': u'ao', u'\uFECB': u'ao',
u'غ': u'za', u'\uFECD': u'za', u'\uFECE': u'za', u'\uFED0': u'za', u'\uFECF': u'za',
u'ف': u'f', u'\uFED1': u'f', u'\uFED2': u'f', u'\uFED4': u'f', u'\uFED3': u'f',
u'ق': u'q', u'\uFED5': u'q', u'\uFED6': u'q', u'\uFED8': u'q', u'\uFED7': u'q',
u'ك': u'k', u'\uFED9': u'k', u'\uFEDA': u'k', u'\uFEDC': u'k', u'\uFEDB': u'k',
u'ل': u'l', u'\uFEDD': u'l', u'\uFEDE': u'l', u'\uFEE0': u'l', u'\uFEDF': u'l',
u'م': u'm', u'\uFEE1': u'm', u'\uFEE2': u'm', u'\uFEE4': u'm', u'\uFEE3': u'm',
u'ن': u'n', u'\uFEE5': u'n', u'\uFEE6': u'n', u'\uFEE8': u'n', u'\uFEE7': u'n',
u'ه': u'h', u'\uFEE9': u'h', u'\uFEEA': u'h', u'\uFEEC': u'h', u'\uFEEB': u'h',
u'و': u'wa', u'\uFEED': u'wa', u'\uFEEE': u'wa',
u'ي': u'ya', u'\uFEF1': u'ya', u'\uFEF2': u'ya', u'\uFEF4': u'ya', u'\uFEF3': u'ya',
u'ة': u'at', u'\uFE93': u'at', u'\uFE94': u'at',
u'ى': u'a', u'\uFEEF': u'a', u'\uFEF0': u'a',
u'ی': u'ye', u'\uFBFC': u'ye', u'\uFBFD': u'ye', u'\uFBFE': u'ye', u'\uFBFF': u'ye',
# Arabic Sukun
u'\u064B': u'', u'\u064C': u'', u'\u064D': u'', u'\u064E': u'', u'\u064F': u'',
u'\u0650': u'', u'\u0651': u'', u'\u0652': u'', u'\u0653': u'', u'\u0670': u'',
# Arabic punctuation
u'،': u',', u'؍': u'.', u'؟': u'?', u'٭': u'★', u'؞': u'...', u'٬': u'\'', u'\u200C': u'',
}
ascii_str = (u'_0123456789'
u'abcdefghijklmnopqrstuvwxyz'
u'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
u'!"#$%&\'()*+,_-./:;<=>?@[\\]^`{|}~ \t\n\r\x0b\x0c')
ascii = ({}, dict(zip(ascii_str, ascii_str)))
for t in [latin, greek, turkish, russian, ukrainian, czech, polish, latvian, kazakh, farsi]:
if isinstance(t, dict):
t = ({}, t)
ascii[0].update(t[0])
ascii[1].update(t[1])
del t
ascii[1][None] = u'_'
slug = (ascii[0].copy(), ascii[1].copy())
for c in u'''!"#$%&'()*+,_-./:;<=>?@[\\]^`{|}~ \t\n\r\x0b\x0c''':
del slug[1][c]
tables = {u'ascii': ascii, u'text': ascii, u'slug': slug, u'id': slug}
# Main Trans with default tales
# It uses for str.encode('trans')
trans = Trans(tables=tables, default_table='ascii')
# trans codec work only with python 2
if PY2:
def encode(input, errors='strict', table_name='ascii'):
try:
table = trans.tables[table_name]
except KeyError:
raise ValueError("Table {0!r} not found in tables!".format(table_name))
else:
data = trans(input, table)
return data, len(data)
def no_decode(input, errors='strict'):
raise TypeError("trans codec does not support decode.")
def trans_codec(enc):
if enc == 'trans':
return codecs.CodecInfo(encode, no_decode)
try:
enc_name, table_name = enc.split(u'/', 1)
except ValueError:
return None
if enc_name != 'trans':
return None
if table_name not in trans.tables:
raise ValueError(u"Table {0!r} not found in tables!").format(table_name)
return codecs.CodecInfo(lambda i, e='strict': encode(i, e, table_name), no_decode)
codecs.register(trans_codec)
| gpl-3.0 | 8,545,537,031,901,760,000 | 38.681319 | 94 | 0.442075 | false | 1.966419 | false | false | false |
nasfarley88/bslbot | convert_video_to_gif.py | 1 | 2119 | #/usr/bin/env python
from subprocess import call
from moviepy.editor import (
VideoFileClip,
CompositeVideoClip,
TextClip,
ImageClip,
concatenate
)
from numpy.testing import assert_approx_equal
from os import listdir
from os.path import expanduser, isfile, getsize
def process_video(filename, video_height=480, overwrite=False):
gif_name = 'gifs/' + filename + '.gif'
if isfile(gif_name) and overwrite == False:
print "Skipping " + gif_name + " as it already exists."
return
video_file = VideoFileClip(filename)
try:
assert_approx_equal(float(video_file.w)/float(video_file.h),16.0/9.0)
video_file = video_file.crop(x1=video_file.w/8, x2=7*video_file.w/8)
except:
print "Not resizing video."
video_file = video_file.resize(height=video_height)
end_image = video_file.to_ImageClip(0).set_duration(0.7)
video_file = concatenate([video_file, end_image])
logo_size = video_height/6
text = ImageClip(expanduser("~/dropbox/bslparlour/twitter_logo2.png")).set_duration(video_file.duration).resize(width=logo_size).set_pos((video_file.w-logo_size,video_file.h-logo_size))
composite_video_file = CompositeVideoClip([video_file, text])
composite_video_file.write_gif(gif_name,fps=20)
fuzz_amt = 5
commands = 'gifsicle "'+gif_name+'" -O3 | convert -fuzz '+str(fuzz_amt)+'% - -ordered-dither o8x8,16 -layers optimize-transparency "'+gif_name+'"'
process = call(commands, shell=True)
if getsize(gif_name) > 5*1024**2:
process_video(filename, video_height=video_height*0.75, overwrite=True)
if __name__ == '__main__':
from multiprocessing import Pool
p = Pool(processes=4)
q = Pool(processes=4)
p.map(process_video, [x for x in listdir('.') if x.find('.mp4') != -1])
q.map(process_video, [x for x in listdir('.') if x.find('.mov') != -1])
# for filename in [x for x in listdir('.') if x.find('.mp4') != -1]:
# process_video(filename)
# for filename in [x for x in listdir('.') if x.find('.mov') != -1]:
# process_video(filename)
| mit | 1,198,612,975,229,369,600 | 32.109375 | 189 | 0.650779 | false | 3.176912 | false | false | false |
pirate/bookmark-archiver | archivebox/cli/archivebox_oneshot.py | 1 | 2092 | #!/usr/bin/env python3
__package__ = 'archivebox.cli'
__command__ = 'archivebox oneshot'
import sys
import argparse
from pathlib import Path
from typing import List, Optional, IO
from ..main import oneshot
from ..util import docstring
from ..config import OUTPUT_DIR
from ..logging_util import SmartFormatter, accept_stdin, stderr
@docstring(oneshot.__doc__)
def main(args: Optional[List[str]]=None, stdin: Optional[IO]=None, pwd: Optional[str]=None) -> None:
parser = argparse.ArgumentParser(
prog=__command__,
description=oneshot.__doc__,
add_help=True,
formatter_class=SmartFormatter,
)
parser.add_argument(
'url',
type=str,
default=None,
help=(
'URLs or paths to archive e.g.:\n'
' https://getpocket.com/users/USERNAME/feed/all\n'
' https://example.com/some/rss/feed.xml\n'
' https://example.com\n'
' ~/Downloads/firefox_bookmarks_export.html\n'
' ~/Desktop/sites_list.csv\n'
)
)
parser.add_argument(
"--extract",
type=str,
help="Pass a list of the extractors to be used. If the method name is not correct, it will be ignored. \
This does not take precedence over the configuration",
default=""
)
parser.add_argument(
'--out-dir',
type=str,
default=OUTPUT_DIR,
help= "Path to save the single archive folder to, e.g. ./example.com_archive"
)
command = parser.parse_args(args or ())
stdin_url = None
url = command.url
if not url:
stdin_url = accept_stdin(stdin)
if (stdin_url and url) or (not stdin and not url):
stderr(
'[X] You must pass a URL/path to add via stdin or CLI arguments.\n',
color='red',
)
raise SystemExit(2)
oneshot(
url=stdin_url or url,
out_dir=Path(command.out_dir).resolve(),
extractors=command.extract,
)
if __name__ == '__main__':
main(args=sys.argv[1:], stdin=sys.stdin)
| mit | -7,624,070,805,743,249,000 | 27.657534 | 112 | 0.58652 | false | 3.683099 | false | false | false |
maxalbert/Pytess | pytess/__init__.py | 1 | 1265 | """
# Pytess
Pure Python tessellation of points into polygons, including
Delauney/Thiessin, and Voronoi polygons. Built as a
convenient user interface for Bill Simons/Carson Farmer python port of
Steven Fortune C++ version of a Delauney triangulator.
## Platforms
Tested on Python version 2.x.
## Dependencies
Pure Python, no dependencies.
## Installing it
Pytess is installed with pip from the commandline:
pip install pytess
## Usage
To triangulate a set of points, simply do:
import pytess
points = [(1,1), (5,5), (3,5), (8,1)]
triangles = pytess.triangulate(points)
And for voronoi diagrams:
import pytess
points = [(1,1), (5,5), (3,5), (8,1)]
voronoipolys = pytess.voronoi(points)
## More Information:
- [Home Page](http://github.com/karimbahgat/Pytess)
- [API Documentation](http://pythonhosted.org/Pytess)
## License:
This code is free to share, use, reuse,
and modify according to the MIT license, see license.txt
## Credits:
I just made it more convenient to use for end-users and uploaded it to PyPi.
The real credit goes to Bill Simons/Carson Farmer and Steven Fortune for
implementing the algorithm in the first place.
Karim Bahgat (2015)
"""
__version__ = "0.1.0"
from .main import *
| mit | -9,201,779,272,006,112,000 | 17.071429 | 76 | 0.7083 | false | 2.955607 | false | false | false |
zeehio/META-SHARE | misc/tools/multitest/init_data/node_settings.py | 3 | 2472 | # Node specific local settings
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# The URL for this META-SHARE node django application
DJANGO_URL = 'http://localhost:{0}/metashare'.format(%%DJANGO_PORT%%)
DJANGO_BASE = 'metashare/'
SECRET_KEY = 'fdklsc)dscdus8f7odc$slacud%%8so7cwp2fsFDASFWR/REFEsfjskdcjsdl3W'
#STORAGE_PATH = ROOT_PATH + '/storageFolder'
STORAGE_PATH = '%%STORAGE_PATH%%'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql',
# 'postgresql', 'sqlite3', 'oracle'.
'NAME': '{0}'.format('%%DATABASE_FILE%%'), # Or path to file if using sqlite3.
# '{0}/development.db'.format(ROOT_PATH)
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost.
# Not used with sqlite3.
'PORT': '', # Set to empty string for default.
# Not used with sqlite3.
}
}
# the URL of the Solr server which is used as a search backend
HAYSTACK_SOLR_URL = 'http://127.0.0.1:{0}/solr'.format(%%SOLR_PORT%%)
# the URL of the Solr server (or server core) which is used as a search backend
SOLR_URL = 'http://127.0.0.1:{0}/solr/main'.format(%%SOLR_PORT%%)
# the URL of the Solr server (or server core) which is used as a search backend
# when running tests
TESTING_SOLR_URL = 'http://127.0.0.1:{0}/solr/testing'.format(%%SOLR_PORT%%)
# List of other META-SHARE Managing Nodes from which the local node imports
# resource descriptions. Any remote changes will later be updated
# ("synchronized"). Use this if you are a META-SHARE Managing Node!
%%CORE_NODES%%
# User accounts with the permission to access synchronization information on
# this node:
%%SYNC_USERS%%
# List of other META-SHARE Nodes from which the local node imports resource
# descriptions. Any remote changes will later be updated ("synchronized"). Any
# imported resource descriptions will also be shared with other nodes that
# synchronize with this local node, i.e., this node acts as a proxy for the
# listed nodes. This setting is meant to be used by META-SHARE Managing Nodes
# which make normal META-SHARE Node resource descriptions available on the
# META-SHARE Managing Nodes.
%%PROXIED_NODES%%
| bsd-3-clause | 4,710,058,495,416,672,000 | 42.368421 | 87 | 0.635113 | false | 3.684054 | false | false | false |
johnny-die-tulpe/illuminati | sauron/metrics/SphinxMetric.py | 1 | 2481 | #! /usr/bin/env python
#
# Copyright (c) 2011 SEOmoz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import pymysql
import datetime
from sauron import logger
from sauron.metrics import Metric, MetricException
class SphinxMetric(Metric):
"""
return 1 during the given timeframe. This can be used for self implementing
scheduled autoscaling
Attributes:
host (string): the ip of your Sphinxhost (default: 127.0.0.1)
port (int): the port for Sphinx (default: 9306)
"""
def reconfig(self, **kwargs):
Metric.reconfig(self, **kwargs)
self.conn = None
self.cur = None
def __del__(self):
try:
self.cur.close()
self.conn.close()
except AttributeError:
pass
def values(self):
try:
self.conn = pymysql.connect(host=self.host, port=self.port)
self.cur = self.conn.cursor()
self.cur.execute('show status')
r = dict(self.cur.fetchall())
return {
'results' : {
'uptime' : (r['uptime'], 'Seconds'),
'queries' : (r['queries'], 'Count'),
'avg_wall' : (r['avg_query_wall'], 'Seconds'),
'avg_cpu' : (r['avg_query_cpu'], 'Percent'),
'avg_read' : (r['avg_query_readkb'], 'Kilobytes')
}
}
except pymysql.err.MySQLError:
raise MetricException('Error connecting to sphinx searchd')
except KeyError:
raise MetricException('Could not find all keys in searchd status')
| mit | -2,778,664,131,293,459,500 | 33.943662 | 77 | 0.681983 | false | 3.956938 | false | false | false |
peastman/deepchem | deepchem/models/tests/test_mpnn.py | 2 | 3320 | import pytest
import tempfile
import numpy as np
import deepchem as dc
from deepchem.feat import MolGraphConvFeaturizer
from deepchem.models.tests.test_graph_models import get_dataset
try:
import dgl
import dgllife
import torch
from deepchem.models.torch_models import MPNNModel
has_torch_and_dgl = True
except:
has_torch_and_dgl = False
@pytest.mark.torch
def test_mpnn_regression():
# load datasets
featurizer = MolGraphConvFeaturizer(use_edges=True)
tasks, dataset, transformers, metric = get_dataset(
'regression', featurizer=featurizer)
# initialize models
n_tasks = len(tasks)
model = MPNNModel(mode='regression', n_tasks=n_tasks, learning_rate=0.0005)
# overfit test
model.fit(dataset, nb_epoch=400)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean_absolute_error'] < 0.5
# test on a small MoleculeNet dataset
from deepchem.molnet import load_delaney
tasks, all_dataset, transformers = load_delaney(featurizer=featurizer)
train_set, _, _ = all_dataset
model = MPNNModel(
mode='regression',
n_tasks=len(tasks),
node_out_feats=2,
edge_hidden_feats=2,
num_step_message_passing=1,
num_step_set2set=1,
num_layer_set2set=1)
model.fit(train_set, nb_epoch=1)
@pytest.mark.torch
def test_mpnn_classification():
# load datasets
featurizer = MolGraphConvFeaturizer(use_edges=True)
tasks, dataset, transformers, metric = get_dataset(
'classification', featurizer=featurizer)
# initialize models
n_tasks = len(tasks)
model = MPNNModel(
mode='classification', n_tasks=n_tasks, learning_rate=0.0005)
# overfit test
model.fit(dataset, nb_epoch=200)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean-roc_auc_score'] >= 0.80
# test on a small MoleculeNet dataset
from deepchem.molnet import load_bace_classification
tasks, all_dataset, transformers = load_bace_classification(
featurizer=featurizer)
train_set, _, _ = all_dataset
model = MPNNModel(
mode='classification',
n_tasks=len(tasks),
node_out_feats=2,
edge_hidden_feats=2,
num_step_message_passing=1,
num_step_set2set=1,
num_layer_set2set=1)
model.fit(train_set, nb_epoch=1)
@pytest.mark.torch
def test_mpnn_reload():
# load datasets
featurizer = MolGraphConvFeaturizer(use_edges=True)
tasks, dataset, transformers, metric = get_dataset(
'classification', featurizer=featurizer)
# initialize models
n_tasks = len(tasks)
model_dir = tempfile.mkdtemp()
model = MPNNModel(
mode='classification',
n_tasks=n_tasks,
model_dir=model_dir,
batch_size=10,
learning_rate=0.001)
model.fit(dataset, nb_epoch=200)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean-roc_auc_score'] >= 0.80
reloaded_model = MPNNModel(
mode='classification',
n_tasks=n_tasks,
model_dir=model_dir,
batch_size=10,
learning_rate=0.001)
reloaded_model.restore()
pred_mols = ["CCCC", "CCCCCO", "CCCCC"]
X_pred = featurizer(pred_mols)
random_dataset = dc.data.NumpyDataset(X_pred)
original_pred = model.predict(random_dataset)
reload_pred = reloaded_model.predict(random_dataset)
assert np.all(original_pred == reload_pred)
| mit | -7,588,067,651,061,039,000 | 26.666667 | 77 | 0.700602 | false | 3.223301 | true | false | false |
rlucio/cinder-violin-driver-havana | cinder/volume/drivers/violin/vxg/vshare/vshare.py | 1 | 3295 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.volume.drivers.violin.vxg.vshare import igroup as IGROUP
from cinder.volume.drivers.violin.vxg.vshare import iscsi as ISCSI
from cinder.volume.drivers.violin.vxg.vshare import lun as LUN
from cinder.volume.drivers.violin.vxg.vshare import snapshot as SNAPSHOT
CLASS_NAMES = 'VShare'
"""
Adding new classes to this module:
All new classes should be added at the bottom of this file (you can't inherit
from a class that hasn't been defined yet). Keep the most up-to-date class
named "VShare". When adding a new VShare class, rename the current VShare
to "VShare_x", where x is +1 of the highest named class in this file. This
will typically be +1 of whatever class the old "VShare" class is inheriting
from).
Here's an example snippit of old code before updating:
class VShare(VShare_5):
def __init__(self, session):
super(VShare, self).__init__(session)
...
Here's what this would change to (two updates):
class VShare_6(VShare_5):
def __init__(self, session):
super(VShare_6, self).__init__(session)
...
"""
class VShare_1(object):
versions = '5.0.2'
def __init__(self, session):
self.basic = session
self.lun = LUN.LUNManager(self.basic)
self.close = self.basic.close
def __del__(self):
try:
self.basic.close()
except Exception:
pass
@property
def debug(self):
return self.basic.debug
@debug.setter
def debug(self, value):
self.basic.debug = value
def __repr__(self):
return '<%s host:%s user:%s proto:%s>' % (self.__class__.__name__,
self.basic.host,
self.basic.user,
self.basic.proto)
class VShare_2(VShare_1):
versions = '5.1.0'
def __init__(self, session):
super(VShare_2, self).__init__(session)
self.lun = LUN.LUNManager_1(self.basic)
class VShare_3(VShare_2):
versions = '5.2.0'
def __init__(self, session):
super(VShare_3, self).__init__(session)
self.lun = LUN.LUNManager_2(self.basic)
self.igroup = IGROUP.IGroupManager(self.basic)
self.iscsi = ISCSI.ISCSIManager(self.basic)
class VShare(VShare_3):
versions = '6.0.0'
def __init__(self, session):
super(VShare, self).__init__(session)
self.lun = LUN.LUNManager_3(self.basic)
self.igroup = IGROUP.IGroupManager_1(self.basic)
self.snapshot = SNAPSHOT.SnapshotManager(self.basic)
| apache-2.0 | -2,200,932,792,427,783,000 | 29.509259 | 78 | 0.63308 | false | 3.535408 | false | false | false |
Shu-A/pybot | src/pybot/adapters/shell.py | 1 | 2120 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os, sys
from pybot.adapter import Adapter
from pybot.message import TextMessage
if os.environ.get('HUBOT_SHELL_HISTSIZE'):
history_size = int(os.environ.get('HUBOT_SHELL_HISTSIZE'))
else:
history_size = 1024
class Shell(Adapter):
def send(self, envelope, *strings):
if sys.platform is not 'win32':
for string in strings:
print "\x1b[01;32m%s\x1b[0m" % string
else:
for string in strings:
print string
self.repl.prompt()
def emote(self, envelope, *strings):
self.send(envelope, [ "* %s" for string in strings ])
def reply(self, envelope, *strings):
for string in strings:
string = envelope.user.name + ': ' + string
seld.send(envelope, *strings)
def run(self):
history_file_path = ".hubot_history"
try:
f = open(history_file_path, 'r')
history_lines = [ l[:-1] for l in f.readlines()[:history_size] ]
f.close()
except IOError:
history_lines = []
self.emit('connected')
f = open(history_file_path, 'w')
while True:
line = raw_input('> ')
if len(history_lines) >= history_size:
history_lines.pop(0)
history_lines.append(line)
if line == 'exit' or line == 'quit':
self.robot.shutdown()
break
elif line == 'history':
for history in history_lines:
print history
else:
user_id = int(os.environ.get('HUBOT_SHELL_USER_ID') or '1')
user_name = os.environ.get('HUBOT_SHELL_USER_NAME') or 'Shell'
options = { 'name': user_name, 'room': 'Shell' }
user = self.robot.brain.user_for_id(user_id, options)
self.recieve(TextMessage(user, line, 'messageId'))
for line in history_lines:
f.write(line + '\n')
f.close()
sys.exit(0)
def close(self):
pass
| mit | -3,812,921,751,053,811,000 | 27.648649 | 78 | 0.526415 | false | 3.752212 | false | false | false |
wingtk/icbuild | icbuild/environment.py | 1 | 1743 | # icbuild - a tool to ease building collections of source packages
# Copyright (C) 2001-2006 James Henstridge
# Copyright (C) 2007-2008 Frederic Peters
# Copyright (C) 2014 Canonical Limited
# Copyright (C) 2015 Ignacio Casal Quinteiro
#
# environment.py: environment variable setup
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import os
from icbuild.errors import FatalError, CommandError
def addpath(envvar, path, prepend=True):
'''Adds a path to an environment variable.'''
pathsep = os.pathsep
envval = os.environ.get(envvar, path)
parts = envval.split(pathsep)
if prepend:
parts.insert(0, path)
else:
parts.append(path)
# remove duplicate entries:
i = 1
while i < len(parts):
if parts[i] in parts[:i]:
del parts[i]
else:
i += 1
envval = pathsep.join(parts)
os.environ[envvar] = envval
def setup_env(config):
'''set environment variables for using prefix'''
# PATH
msys2bindir = os.path.join(config.msys2dir, 'bin')
addpath('PATH', msys2bindir)
| gpl-2.0 | 2,001,456,193,785,049,000 | 31.886792 | 75 | 0.702238 | false | 3.700637 | false | false | false |
TetraAsh/baruwa2 | baruwa/lib/cluster.py | 1 | 2097 | # -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2012 Andrew Colin Kissa <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"Cluster functions"
from beaker.cache import cache_region
from celery.exceptions import TimeoutError, QueueNotFound
from baruwa.model.meta import Session
from baruwa.model.settings import Server
from baruwa.tasks.status import systemstatus
@cache_region('system_status', 'cluster-status')
def cluster_status():
"Check cluster status"
hosts = Session.query(Server.hostname)\
.filter(Server.enabled == True).all()
if not hosts:
return False
for host in hosts:
if host.hostname == 'default':
continue
if not host_status(host.hostname):
return False
return True
@cache_region('system_status', 'host-status')
def host_status(hostname):
"Check host status"
try:
task = systemstatus.apply_async(queue=hostname)
task.wait(30)
hoststatus = task.result
except (TimeoutError, QueueNotFound):
return False
# check load
if hoststatus['load'][0] > 15:
return False
# return quick if any service is not running
for service in ['mta', 'scanners', 'av']:
if hoststatus[service] == 0:
return False
# check disks
for part in hoststatus['partitions']:
if part['percent'] >= 95:
return False
return True
| gpl-3.0 | 5,157,463,058,498,966,000 | 31.276923 | 71 | 0.680019 | false | 3.926966 | false | false | false |
klbostee/spaws | spaws/spark_ec2.py | 1 | 45854 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import with_statement
import logging
import os
import pipes
import random
import shutil
import string
import subprocess
import sys
import tempfile
import time
import urllib2
import warnings
from optparse import OptionParser
from sys import stderr
import boto
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType, EBSBlockDeviceType
from boto import ec2
DEFAULT_SPARK_VERSION = "1.1.0"
SPARK_EC2_DIR = os.path.dirname(os.path.realpath(__file__))
# cfr. https://issues.apache.org/jira/browse/SPARK-3821
MESOS_SPARK_EC2_BRANCH = "v4"
# A URL prefix from which to fetch AMI information
AMI_PREFIX = "https://raw.github.com/klbostee/spark-ec2/{b}/ami-list".format(b=MESOS_SPARK_EC2_BRANCH)
class UsageError(Exception):
pass
class TimeoutError(Exception):
pass
# Configure and parse our command-line arguments
def parse_args():
parser = OptionParser(
usage="spark-ec2 [options] <action> <cluster_name>"
+ "\n\n<action> can be: launch, destroy, login, stop, start, get-master, reboot-slaves",
add_help_option=False)
parser.add_option(
"-h", "--help", action="help",
help="Show this help message and exit")
parser.add_option(
"-s", "--slaves", type="int", default=1,
help="Number of slaves to launch (default: %default)")
parser.add_option(
"-w", "--wait", type="int",
help="DEPRECATED (no longer necessary) - Seconds to wait for nodes to start")
parser.add_option(
"-k", "--key-pair",
help="Key pair to use on instances")
parser.add_option(
"-i", "--identity-file",
help="SSH private key file to use for logging into instances")
parser.add_option(
"-t", "--instance-type", default="m1.large",
help="Type of instance to launch (default: %default). " +
"WARNING: must be 64-bit; small instances won't work")
parser.add_option(
"-m", "--master-instance-type", default="m1.medium",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option(
"-r", "--region", default="us-east-1",
help="EC2 region zone to launch instances in")
parser.add_option(
"-z", "--zone", default="",
help="Availability zone to launch instances in, or 'all' to spread " +
"slaves across multiple (an additional $0.01/Gb for bandwidth" +
"between zones applies)")
parser.add_option("-a", "--ami", help="Amazon Machine Image ID to use")
parser.add_option(
"-v", "--spark-version", default=DEFAULT_SPARK_VERSION,
help="Version of Spark to use: 'X.Y.Z' or a specific git hash (default: %default)")
parser.add_option(
"-p", "--python", default="python2.7",
help="Python executable to use for PySpark (default: %default)")
parser.add_option(
"--spark-git-repo",
default="https://github.com/apache/spark",
help="Github repo from which to checkout supplied commit hash")
parser.add_option(
"--hadoop-major-version", default="2",
help="Major version of Hadoop (default: %default)")
parser.add_option(
"-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
"the given local address (for use with login)")
parser.add_option(
"--resume", action="store_true", default=False,
help="Resume installation on a previously launched cluster " +
"(for debugging)")
parser.add_option(
"--ebs-vol-size", metavar="SIZE", type="int", default=0,
help="Size (in GB) of each EBS volume.")
parser.add_option(
"--ebs-vol-type", default="standard",
help="EBS volume type (e.g. 'gp2', 'standard').")
parser.add_option(
"--ebs-vol-num", type="int", default=1,
help="Number of EBS volumes to attach to each node as /vol[x]. " +
"The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs. " +
"EBS volumes are only attached if --ebs-vol-size > 0." +
"Only support up to 8 EBS volumes.")
parser.add_option(
"--swap", metavar="SWAP", type="int", default=1024,
help="Swap space to set up per node, in MB (default: %default)")
parser.add_option(
"--spot-price", metavar="PRICE", type="float",
help="If specified, launch slaves as spot instances with the given " +
"maximum price (in dollars)")
parser.add_option(
"--ganglia", action="store_true", default=True,
help="Setup Ganglia monitoring on cluster (default: %default). NOTE: " +
"the Ganglia page will be publicly accessible")
parser.add_option(
"--no-ganglia", action="store_false", dest="ganglia",
help="Disable Ganglia monitoring for the cluster")
parser.add_option(
"-u", "--user", default="root",
help="The SSH user you want to connect as (default: %default)")
parser.add_option(
"--delete-groups", action="store_true", default=False,
help="When destroying a cluster, delete the security groups that were created.")
parser.add_option(
"--use-existing-master", action="store_true", default=False,
help="Launch fresh slaves, but use an existing stopped master if possible")
parser.add_option(
"--worker-instances", type="int", default=1,
help="Number of instances per worker: variable SPARK_WORKER_INSTANCES (default: %default)")
parser.add_option(
"--master-opts", type="string", default="",
help="Extra options to give to master through SPARK_MASTER_OPTS variable " +
"(e.g -Dspark.worker.timeout=180)")
parser.add_option(
"--user-data", type="string", default="",
help="Path to a user-data file (most AMI's interpret this as an initialization script)")
parser.add_option(
"--security-group-prefix", type="string", default=None,
help="Use this prefix for the security group rather than the cluster name.")
parser.add_option(
"--authorized-address", type="string", default="0.0.0.0/0",
help="Address to authorize on created security groups (default: %default)")
parser.add_option(
"--additional-security-group", type="string", default="",
help="Additional security group to place the machines in")
parser.add_option(
"--copy-aws-credentials", action="store_true", default=False,
help="Add AWS credentials to hadoop configuration to allow Spark to access S3")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
# Boto config check
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html
home_dir = os.getenv('HOME')
if home_dir is None or not os.path.isfile(home_dir + '/.boto'):
if not os.path.isfile('/etc/boto.cfg'):
if os.getenv('AWS_ACCESS_KEY_ID') is None:
print >> stderr, ("ERROR: The environment variable AWS_ACCESS_KEY_ID " +
"must be set")
sys.exit(1)
if os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print >> stderr, ("ERROR: The environment variable AWS_SECRET_ACCESS_KEY " +
"must be set")
sys.exit(1)
return (opts, action, cluster_name)
# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name):
groups = conn.get_all_security_groups()
group = [g for g in groups if g.name == name]
if len(group) > 0:
return group[0]
else:
print "Creating security group " + name
return conn.create_security_group(name, "Spark EC2 group")
# Check whether a given EC2 instance object is in a state we consider active,
# i.e. not terminating or terminated. We count both stopping and stopped as
# active since we can restart stopped clusters.
def is_active(instance):
return (instance.state in ['pending', 'running', 'stopping', 'stopped'])
# Return correct versions of Spark and Shark, given the supplied Spark version
def get_spark_shark_version(opts):
spark_shark_map = {
"0.7.3": "0.7.1",
"0.8.0": "0.8.0",
"0.8.1": "0.8.1",
"0.9.0": "0.9.0",
"0.9.1": "0.9.1",
"1.0.0": "1.0.0",
"1.0.1": "1.0.1",
"1.0.2": "1.0.2",
"1.1.0": "1.1.0",
}
version = opts.spark_version.replace("v", "")
if version not in spark_shark_map:
print >> stderr, "Don't know about Spark version: %s" % version
sys.exit(1)
return (version, spark_shark_map[version])
# Attempt to resolve an appropriate AMI given the architecture and region of the request.
# Source: http://aws.amazon.com/amazon-linux-ami/instance-type-matrix/
# Last Updated: 2015-06-19
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
def get_spark_ami(opts):
instance_types = {
"c1.medium": "pvm",
"c1.xlarge": "pvm",
"c3.large": "pvm",
"c3.xlarge": "pvm",
"c3.2xlarge": "pvm",
"c3.4xlarge": "pvm",
"c3.8xlarge": "pvm",
"c4.large": "hvm",
"c4.xlarge": "hvm",
"c4.2xlarge": "hvm",
"c4.4xlarge": "hvm",
"c4.8xlarge": "hvm",
"cc1.4xlarge": "hvm",
"cc2.8xlarge": "hvm",
"cg1.4xlarge": "hvm",
"cr1.8xlarge": "hvm",
"d2.xlarge": "hvm",
"d2.2xlarge": "hvm",
"d2.4xlarge": "hvm",
"d2.8xlarge": "hvm",
"g2.2xlarge": "hvm",
"g2.8xlarge": "hvm",
"hi1.4xlarge": "pvm",
"hs1.8xlarge": "pvm",
"i2.xlarge": "hvm",
"i2.2xlarge": "hvm",
"i2.4xlarge": "hvm",
"i2.8xlarge": "hvm",
"m1.small": "pvm",
"m1.medium": "pvm",
"m1.large": "pvm",
"m1.xlarge": "pvm",
"m2.xlarge": "pvm",
"m2.2xlarge": "pvm",
"m2.4xlarge": "pvm",
"m3.medium": "hvm",
"m3.large": "hvm",
"m3.xlarge": "hvm",
"m3.2xlarge": "hvm",
"m4.large": "hvm",
"m4.xlarge": "hvm",
"m4.2xlarge": "hvm",
"m4.4xlarge": "hvm",
"m4.10xlarge": "hvm",
"r3.large": "hvm",
"r3.xlarge": "hvm",
"r3.2xlarge": "hvm",
"r3.4xlarge": "hvm",
"r3.8xlarge": "hvm",
"t1.micro": "pvm",
"t2.micro": "hvm",
"t2.small": "hvm",
"t2.medium": "hvm",
"t2.large": "hvm",
}
if opts.instance_type in instance_types:
instance_type = instance_types[opts.instance_type]
else:
instance_type = "pvm"
print >> stderr,\
"Don't recognize %s, assuming type is pvm" % opts.instance_type
ami_path = "%s/%s/%s" % (AMI_PREFIX, opts.region, instance_type)
try:
ami = urllib2.urlopen(ami_path).read().strip()
print "Spark AMI: " + ami
except:
print >> stderr, "Could not resolve AMI at: " + ami_path
sys.exit(1)
return ami
# Launch a cluster of the given name, by setting up its security groups,
# and then starting new instances in them.
# Returns a tuple of EC2 reservation objects for the master and slaves
# Fails if there already instances running in the cluster's groups.
def launch_cluster(conn, opts, cluster_name):
if opts.identity_file is None:
print >> stderr, "ERROR: Must provide an identity file (-i) for ssh connections."
sys.exit(1)
if opts.key_pair is None:
print >> stderr, "ERROR: Must provide a key pair name (-k) to use on instances."
sys.exit(1)
user_data_content = None
if opts.user_data:
with open(opts.user_data) as user_data_file:
user_data_content = user_data_file.read()
print "Setting up security groups..."
if opts.security_group_prefix is None:
master_group = get_or_make_group(conn, cluster_name + "-master")
slave_group = get_or_make_group(conn, cluster_name + "-slaves")
else:
master_group = get_or_make_group(conn, opts.security_group_prefix + "-master")
slave_group = get_or_make_group(conn, opts.security_group_prefix + "-slaves")
authorized_address = opts.authorized_address
if master_group.rules == []: # Group was just now created
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
master_group.authorize('tcp', 22, 22, authorized_address)
master_group.authorize('tcp', 8080, 8081, authorized_address)
master_group.authorize('tcp', 18080, 18080, authorized_address)
master_group.authorize('tcp', 19999, 19999, authorized_address)
master_group.authorize('tcp', 50030, 50030, authorized_address)
master_group.authorize('tcp', 50070, 50070, authorized_address)
master_group.authorize('tcp', 60070, 60070, authorized_address)
master_group.authorize('tcp', 4040, 4045, authorized_address)
if opts.ganglia:
master_group.authorize('tcp', 5080, 5080, authorized_address)
if slave_group.rules == []: # Group was just now created
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
slave_group.authorize('tcp', 22, 22, authorized_address)
slave_group.authorize('tcp', 8080, 8081, authorized_address)
slave_group.authorize('tcp', 50060, 50060, authorized_address)
slave_group.authorize('tcp', 50075, 50075, authorized_address)
slave_group.authorize('tcp', 60060, 60060, authorized_address)
slave_group.authorize('tcp', 60075, 60075, authorized_address)
# Check if instances are already running with the cluster name
existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
die_on_error=False)
if existing_slaves or (existing_masters and not opts.use_existing_master):
print >> stderr, ("ERROR: There are already instances for name: %s " % cluster_name)
sys.exit(1)
# Figure out Spark AMI
if opts.ami is None:
opts.ami = get_spark_ami(opts)
additional_groups = []
if opts.additional_security_group:
additional_groups = [sg
for sg in conn.get_all_security_groups()
if opts.additional_security_group in (sg.name, sg.id)]
print "Launching instances..."
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print >> stderr, "Could not find AMI " + opts.ami
sys.exit(1)
# Create block device mapping so that we can add EBS volumes if asked to.
# The first drive is attached as /dev/sds, 2nd as /dev/sdt, ... /dev/sdz
block_map = BlockDeviceMapping()
if opts.ebs_vol_size > 0:
for i in range(opts.ebs_vol_num):
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.volume_type = opts.ebs_vol_type
device.delete_on_termination = True
block_map["/dev/sd" + chr(ord('s') + i)] = device
# AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
if opts.instance_type.startswith('m3.'):
for i in range(get_num_disks(opts.instance_type)):
dev = BlockDeviceType()
dev.ephemeral_name = 'ephemeral%d' % i
# The first ephemeral drive is /dev/sdb.
name = '/dev/sd' + string.letters[i + 1]
block_map[name] = dev
# Launch slaves
if opts.spot_price is not None:
# Launch spot instances with the requested price
print ("Requesting %d slaves as spot instances with price $%.3f" %
(opts.slaves, opts.spot_price))
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
my_req_ids = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
slave_reqs = conn.request_spot_instances(
price=opts.spot_price,
image_id=opts.ami,
launch_group="launch-group-%s" % cluster_name,
placement=zone,
count=num_slaves_this_zone,
key_name=opts.key_pair,
security_groups=[slave_group] + additional_groups,
instance_type=opts.instance_type,
block_device_map=block_map,
user_data=user_data_content)
my_req_ids += [req.id for req in slave_reqs]
i += 1
print "Waiting for spot instances to be granted..."
try:
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests()
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
outstanding_request_ids = []
for i in my_req_ids:
if i in id_to_req:
if id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
else:
outstanding_request_ids.append(i)
if len(active_instance_ids) == opts.slaves:
print "All %d slaves granted" % opts.slaves
reservations = conn.get_all_instances(active_instance_ids)
slave_nodes = []
for r in reservations:
slave_nodes += r.instances
break
else:
print "%d of %d slaves granted, waiting longer for request ids including %s" % (
len(active_instance_ids), opts.slaves, outstanding_request_ids[0:10])
except:
print "Canceling spot instance requests"
conn.cancel_spot_instance_requests(my_req_ids)
# Log a warning if any of these requests actually launched instances:
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
running = len(master_nodes) + len(slave_nodes)
if running:
print >> stderr, ("WARNING: %d instances are still running" % running)
sys.exit(0)
else:
# Launch non-spot instances
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
slave_nodes = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
if num_slaves_this_zone > 0:
slave_res = image.run(key_name=opts.key_pair,
security_groups=[slave_group] + additional_groups,
instance_type=opts.instance_type,
placement=zone,
min_count=num_slaves_this_zone,
max_count=num_slaves_this_zone,
block_device_map=block_map,
user_data=user_data_content)
slave_nodes += slave_res.instances
print "Launched %d slaves in %s, regid = %s" % (num_slaves_this_zone,
zone, slave_res.id)
i += 1
# Launch or resume masters
if existing_masters:
print "Starting master..."
for inst in existing_masters:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
master_nodes = existing_masters
else:
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
if opts.zone == 'all':
opts.zone = random.choice(conn.get_all_zones()).name
master_res = image.run(key_name=opts.key_pair,
security_groups=[master_group] + additional_groups,
instance_type=master_type,
placement=opts.zone,
min_count=1,
max_count=1,
block_device_map=block_map,
user_data=user_data_content)
master_nodes = master_res.instances
print "Launched master in %s, regid = %s" % (zone, master_res.id)
# Give the instances descriptive names
for master in master_nodes:
name = '{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id)
tag_instance(master, name)
for slave in slave_nodes:
name = '{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id)
tag_instance(slave, name)
# Return all the instances
return (master_nodes, slave_nodes)
def tag_instance(instance, name):
for i in range(0, 5):
try:
instance.add_tag(key='Name', value=name)
break
except:
print "Failed attempt %i of 5 to tag %s" % ((i + 1), name)
if i == 5:
raise "Error - failed max attempts to add name tag"
time.sleep(5)
# Get the EC2 instances in an existing cluster if available.
# Returns a tuple of lists of EC2 instance objects for the masters and slaves
def get_existing_cluster(conn, opts, cluster_name, die_on_error=True):
print "Searching for existing cluster " + cluster_name + "..."
# Search all the spot instance requests, and copy any tags from the spot
# instance request to the cluster.
spot_instance_requests = conn.get_all_spot_instance_requests()
for req in spot_instance_requests:
if req.state != u'active':
continue
name = req.tags.get(u'Name', "")
if name.startswith(cluster_name):
reservations = conn.get_all_instances(instance_ids=[req.instance_id])
for res in reservations:
active = [i for i in res.instances if is_active(i)]
for instance in active:
if instance.tags.get(u'Name') is None:
tag_instance(instance, name)
# Now proceed to detect master and slaves instances.
reservations = conn.get_all_instances()
master_nodes = []
slave_nodes = []
for res in reservations:
active = [i for i in res.instances if is_active(i)]
for inst in active:
name = inst.tags.get(u'Name', "")
if name.startswith(cluster_name + "-master"):
master_nodes.append(inst)
elif name.startswith(cluster_name + "-slave"):
slave_nodes.append(inst)
if any((master_nodes, slave_nodes)):
print "Found %d master(s), %d slaves" % (len(master_nodes), len(slave_nodes))
if master_nodes != [] or not die_on_error:
return (master_nodes, slave_nodes)
else:
if master_nodes == [] and slave_nodes != []:
print >> sys.stderr, "ERROR: Could not find master in with name " + \
cluster_name + "-master"
else:
print >> sys.stderr, "ERROR: Could not find any existing cluster"
sys.exit(1)
# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key):
master = master_nodes[0].public_dns_name
if deploy_ssh_key:
print "Generating cluster's SSH key on master..."
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
ssh(master, opts, key_setup)
dot_ssh_tar = ssh_read(master, opts, ['tar', 'c', '.ssh'])
print "Transferring cluster's SSH key to slaves..."
for slave in slave_nodes:
print slave.public_dns_name
ssh_write(slave.public_dns_name, opts, ['tar', 'x'], dot_ssh_tar)
modules = ['cleanup', 'python27', 'spark', 'shark', 'ephemeral-hdfs', 'mapreduce', 'spark-standalone']
if opts.ebs_vol_size > 0:
modules.append('persistent-hdfs')
if opts.hadoop_major_version == "1":
modules = filter(lambda x: x != "mapreduce", modules)
if opts.ganglia:
modules.append('ganglia')
# NOTE: We should clone the repository before running deploy_files to
# prevent ec2-variables.sh from being overwritten
ssh(
host=master,
opts=opts,
command="rm -rf spark-ec2"
+ " && "
+ "git clone https://github.com/klbostee/spark-ec2.git -b {b}".format(b=MESOS_SPARK_EC2_BRANCH)
)
print "Deploying files to master..."
deploy_files(
conn=conn,
root_dir=SPARK_EC2_DIR + "/" + "deploy.generic",
opts=opts,
master_nodes=master_nodes,
slave_nodes=slave_nodes,
modules=modules
)
print "Running setup on master..."
setup_spark_cluster(master, opts)
print "Done!"
def setup_standalone_cluster(master, slave_nodes, opts):
slave_ips = '\n'.join([i.public_dns_name for i in slave_nodes])
ssh(master, opts, "echo \"%s\" > spark/conf/slaves" % (slave_ips))
ssh(master, opts, "/root/spark/sbin/start-all.sh")
def setup_spark_cluster(master, opts):
ssh(master, opts, "chmod u+x spark-ec2/setup.sh")
ssh(master, opts, "spark-ec2/setup.sh")
print "Spark standalone cluster started at http://%s:8080" % master
if opts.ganglia:
print "Ganglia started at http://%s:5080/ganglia" % master
def is_ssh_available(host, opts):
"Checks if SSH is available on the host."
try:
with open(os.devnull, 'w') as devnull:
ret = subprocess.check_call(
ssh_command(opts) + ['-t', '-t', '-o', 'ConnectTimeout=3',
'%s@%s' % (opts.user, host), stringify_command('true')],
stdout=devnull,
stderr=devnull
)
return ret == 0
except subprocess.CalledProcessError as e:
return False
def is_cluster_ssh_available(cluster_instances, opts):
for i in cluster_instances:
if not is_ssh_available(host=i.ip_address, opts=opts):
return False
else:
return True
def wait_for_cluster_state(cluster_instances, cluster_state, opts, max_attempts=20):
"""
cluster_instances: a list of boto.ec2.instance.Instance
cluster_state: a string representing the desired state of all the instances in the cluster
value can be 'ssh-ready' or a valid value from boto.ec2.instance.InstanceState such as
'running', 'terminated', etc.
(would be nice to replace this with a proper enum: http://stackoverflow.com/a/1695250)
"""
sys.stdout.write(
"Waiting for all instances in cluster to enter '{s}' state.".format(s=cluster_state)
)
sys.stdout.flush()
num_attempts = 0
while num_attempts <= max_attempts:
time.sleep(3 * num_attempts)
for i in cluster_instances:
s = i.update() # capture output to suppress print to screen in newer versions of boto
if cluster_state == 'ssh-ready':
if all(i.state == 'running' for i in cluster_instances) and \
is_cluster_ssh_available(cluster_instances, opts):
break
else:
if all(i.state == cluster_state for i in cluster_instances):
break
num_attempts += 1
sys.stdout.write(".")
sys.stdout.flush()
else:
raise TimeoutError("Not able to SSH to instances even after {0} attempts.".format(num_attempts))
sys.stdout.write("\n")
# Get number of local disks available for a given EC2 instance type.
def get_num_disks(instance_type):
# Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html
# Last Updated: 2014-06-20
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
disks_by_instance = {
"c1.medium": 1,
"c1.xlarge": 4,
"c3.2xlarge": 2,
"c3.4xlarge": 2,
"c3.8xlarge": 2,
"c3.large": 2,
"c3.xlarge": 2,
"cc1.4xlarge": 2,
"cc2.8xlarge": 4,
"cg1.4xlarge": 2,
"cr1.8xlarge": 2,
"g2.2xlarge": 1,
"hi1.4xlarge": 2,
"hs1.8xlarge": 24,
"i2.2xlarge": 2,
"i2.4xlarge": 4,
"i2.8xlarge": 8,
"i2.xlarge": 1,
"m1.large": 2,
"m1.medium": 1,
"m1.small": 1,
"m1.xlarge": 4,
"m2.2xlarge": 1,
"m2.4xlarge": 2,
"m2.xlarge": 1,
"m3.2xlarge": 2,
"m3.large": 1,
"m3.medium": 1,
"m3.xlarge": 2,
"r3.2xlarge": 1,
"r3.4xlarge": 1,
"r3.8xlarge": 2,
"r3.large": 1,
"r3.xlarge": 1,
"t1.micro": 0,
}
if instance_type in disks_by_instance:
return disks_by_instance[instance_type]
else:
print >> stderr, ("WARNING: Don't know number of disks on instance type %s; assuming 1"
% instance_type)
return 1
# Deploy the configuration file templates in a given local directory to
# a cluster, filling in any template parameters with information about the
# cluster (e.g. lists of masters and slaves). Files are only deployed to
# the first master instance in the cluster, and we expect the setup
# script to be run on that instance to copy them to other nodes.
#
# root_dir should be an absolute path to the directory with the files we want to deploy.
def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, modules):
active_master = master_nodes[0].public_dns_name
num_disks = get_num_disks(opts.instance_type)
hdfs_data_dirs = "/mnt/ephemeral-hdfs/data"
mapred_local_dirs = "/mnt/hadoop/mrlocal"
spark_local_dirs = "/mnt/spark"
if num_disks > 1:
for i in range(2, num_disks + 1):
hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i
mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
spark_local_dirs += ",/mnt%d/spark" % i
cluster_url = "%s:7077" % active_master
if "." in opts.spark_version:
# Pre-built spark & shark deploy
(spark_v, shark_v) = get_spark_shark_version(opts)
else:
# Spark-only custom deploy
spark_v = "%s|%s" % (opts.spark_git_repo, opts.spark_version)
shark_v = ""
modules = filter(lambda x: x != "shark", modules)
template_vars = {
"master_list": '\n'.join([i.public_dns_name for i in master_nodes]),
"active_master": active_master,
"slave_list": '\n'.join([i.public_dns_name for i in slave_nodes]),
"cluster_url": cluster_url,
"hdfs_data_dirs": hdfs_data_dirs,
"mapred_local_dirs": mapred_local_dirs,
"spark_local_dirs": spark_local_dirs,
"swap": str(opts.swap),
"modules": '\n'.join(modules),
"spark_version": spark_v,
"shark_version": shark_v,
"hadoop_major_version": opts.hadoop_major_version,
"spark_worker_instances": "%d" % opts.worker_instances,
"spark_master_opts": opts.master_opts,
"pyspark_python": opts.python
}
if opts.copy_aws_credentials:
template_vars["aws_access_key_id"] = conn.aws_access_key_id
template_vars["aws_secret_access_key"] = conn.aws_secret_access_key
else:
template_vars["aws_access_key_id"] = ""
template_vars["aws_secret_access_key"] = ""
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
tmp_dir = tempfile.mkdtemp()
for path, dirs, files in os.walk(root_dir):
if path.find(".svn") == -1:
dest_dir = os.path.join('/', path[len(root_dir):])
local_dir = tmp_dir + dest_dir
if not os.path.exists(local_dir):
os.makedirs(local_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
local_file = tmp_dir + dest_file
with open(os.path.join(path, filename)) as src:
with open(local_file, "w") as dest:
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key])
dest.write(text)
dest.close()
# rsync the whole directory over to the master machine
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s/" % tmp_dir,
"%s@%s:/" % (opts.user, active_master)
]
subprocess.check_call(command)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
def stringify_command(parts):
if isinstance(parts, str):
return parts
else:
return ' '.join(map(pipes.quote, parts))
def ssh_args(opts):
parts = ['-o', 'StrictHostKeyChecking=no']
parts += ['-o', 'UserKnownHostsFile=/dev/null']
if opts.identity_file is not None:
parts += ['-i', opts.identity_file]
return parts
def ssh_command(opts):
return ['ssh'] + ssh_args(opts)
# Run a command on a host through ssh, retrying up to five times
# and then throwing an exception if ssh continues to fail.
def ssh(host, opts, command):
tries = 0
while True:
try:
return subprocess.check_call(
ssh_command(opts) + ['-t', '-t', '%s@%s' % (opts.user, host),
stringify_command(command)])
except subprocess.CalledProcessError as e:
if tries > 5:
# If this was an ssh failure, provide the user with hints.
if e.returncode == 255:
raise UsageError(
"Failed to SSH to remote host {0}.\n" +
"Please check that you have provided the correct --identity-file and " +
"--key-pair parameters and try again.".format(host))
else:
raise e
print >> stderr, \
"Error executing remote command, retrying after 30 seconds: {0}".format(e)
time.sleep(30)
tries = tries + 1
# Backported from Python 2.7 for compatiblity with 2.6 (See SPARK-1990)
def _check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def ssh_read(host, opts, command):
return _check_output(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)])
def ssh_write(host, opts, command, arguments):
tries = 0
while True:
proc = subprocess.Popen(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)],
stdin=subprocess.PIPE)
proc.stdin.write(arguments)
proc.stdin.close()
status = proc.wait()
if status == 0:
break
elif tries > 5:
raise RuntimeError("ssh_write failed with error %s" % proc.returncode)
else:
print >> stderr, \
"Error {0} while executing remote command, retrying after 30 seconds".format(status)
time.sleep(30)
tries = tries + 1
# Gets a list of zones to launch instances in
def get_zones(conn, opts):
if opts.zone == 'all':
zones = [z.name for z in conn.get_all_zones()]
else:
zones = [opts.zone]
return zones
# Gets the number of items in a partition
def get_partition(total, num_partitions, current_partitions):
num_slaves_this_zone = total / num_partitions
if (total % num_partitions) - current_partitions > 0:
num_slaves_this_zone += 1
return num_slaves_this_zone
def real_main():
(opts, action, cluster_name) = parse_args()
# Input parameter validation
if opts.wait is not None:
# NOTE: DeprecationWarnings are silent in 2.7+ by default.
# To show them, run Python with the -Wdefault switch.
# See: https://docs.python.org/3.5/whatsnew/2.7.html
warnings.warn(
"This option is deprecated and has no effect. "
"spark-ec2 automatically waits as long as necessary for clusters to startup.",
DeprecationWarning
)
if opts.ebs_vol_num > 8:
print >> stderr, "ebs-vol-num cannot be greater than 8"
sys.exit(1)
try:
conn = ec2.connect_to_region(opts.region)
except Exception as e:
print >> stderr, (e)
sys.exit(1)
# Select an AZ at random if it was not specified.
if opts.zone == "":
opts.zone = random.choice(conn.get_all_zones()).name
if action == "launch":
if opts.slaves <= 0:
print >> sys.stderr, "ERROR: You have to start at least 1 slave"
sys.exit(1)
if opts.resume:
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
else:
(master_nodes, slave_nodes) = launch_cluster(conn, opts, cluster_name)
wait_for_cluster_state(
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready',
opts=opts
)
setup_cluster(conn, master_nodes, slave_nodes, opts, True)
elif action == "destroy":
print "Are you sure you want to destroy the cluster %s?" % cluster_name
print "The following instances will be terminated:"
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
for inst in master_nodes + slave_nodes:
print "> %s" % inst.public_dns_name
msg = "ALL DATA ON ALL NODES WILL BE LOST!!\nDestroy cluster %s (y/N): " % cluster_name
response = raw_input(msg)
if response == "y":
print "Terminating master..."
for inst in master_nodes:
inst.terminate()
print "Terminating slaves..."
for inst in slave_nodes:
inst.terminate()
# Delete security groups as well
if opts.delete_groups:
print "Deleting security groups (this will take some time)..."
if opts.security_group_prefix is None:
group_names = [cluster_name + "-master", cluster_name + "-slaves"]
else:
group_names = [opts.security_group_prefix + "-master",
opts.security_group_prefix + "-slaves"]
wait_for_cluster_state(
cluster_instances=(master_nodes + slave_nodes),
cluster_state='terminated',
opts=opts
)
attempt = 1
while attempt <= 3:
print "Attempt %d" % attempt
groups = [g for g in conn.get_all_security_groups() if g.name in group_names]
success = True
# Delete individual rules in all groups before deleting groups to
# remove dependencies between them
for group in groups:
print "Deleting rules in security group " + group.name
for rule in group.rules:
for grant in rule.grants:
success &= group.revoke(ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group=grant)
# Sleep for AWS eventual-consistency to catch up, and for instances
# to terminate
time.sleep(30) # Yes, it does have to be this long :-(
for group in groups:
try:
conn.delete_security_group(group.name)
print "Deleted security group " + group.name
except boto.exception.EC2ResponseError:
success = False
print "Failed to delete security group " + group.name
# Unfortunately, group.revoke() returns True even if a rule was not
# deleted, so this needs to be rerun if something fails
if success:
break
attempt += 1
if not success:
print "Failed to delete all security groups after 3 tries."
print "Try re-running in a few minutes."
elif action == "login":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
master = master_nodes[0].public_dns_name
print "Logging into master " + master + "..."
proxy_opt = []
if opts.proxy_port is not None:
proxy_opt = ['-D', opts.proxy_port]
subprocess.check_call(
ssh_command(opts) + proxy_opt + ['-t', '-t', "%s@%s" % (opts.user, master)])
elif action == "reboot-slaves":
response = raw_input(
"Are you sure you want to reboot the cluster " +
cluster_name + " slaves?\n" +
"Reboot cluster slaves " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Rebooting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
print "Rebooting " + inst.id
inst.reboot()
elif action == "get-master":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print master_nodes[0].public_dns_name
elif action == "stop":
response = raw_input(
"Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"All data on spot-instance slaves will be lost.\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Stopping master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print "Stopping slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
if inst.spot_instance_request_id:
inst.terminate()
else:
inst.stop()
elif action == "start":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print "Starting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print "Starting master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
wait_for_cluster_state(
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready',
opts=opts
)
setup_cluster(conn, master_nodes, slave_nodes, opts, False)
else:
print >> stderr, "Invalid action: %s" % action
sys.exit(1)
def main():
try:
real_main()
except UsageError, e:
print >> stderr, "\nError:\n", e
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig()
main()
| apache-2.0 | -5,960,206,326,444,945,000 | 38.907746 | 106 | 0.568129 | false | 3.801525 | false | false | false |
xuzetan/gemini | gemini/tests/test_inheritance.py | 2 | 14866 | """
Create a setup so we can easily define families. Input is a ped file to define
the pedigree and a vector indicating the genotype.
>>> fam = TestFamily(\"\"\"
... #family_id sample_id paternal_id maternal_id sex phenotype
... 1 dad 0 0 1 1
... 1 mom 0 0 2 1
... 1 kid dad mom 1 2
... 1 kid2 dad mom 1 1
... 1 grandma 0 0 2 1
... 1 grandpa 0 0 1 1\"\"\")
>>> fam.gt_types = [HET, HET, HOM_ALT, HET, HET, HET]
>>> fam.gt_depths = [9] * 6
>>> _ = fam.dot()
<BLANKLINE>
auto_rec
--------
default True
strict=False True
only_affected=False True
both False True
<BLANKLINE>
auto_dom
--------
default False
strict=False False
only_affected=False False
both False False
>>> fam.auto_rec()
True
# attach granparents to mom
>>> fam.subjects[1].mom = fam.subjects[4]
>>> fam.subjects[1].dad = fam.subjects[5]
>>> fam.auto_rec()
True
>>> _ = fam.dot(tests=[])
# if grandpa is affected it is no longer autosomal recessive
>>> fam.subjects[5].affected = True
>>> fam.auto_rec()
False
>>> _ = fam.dot(tests=[])
# reset.
>>> fam.subjects[5].affected = False
# set both kids to HOM_ALT (including the
>>> fam.gt_types[3] = HOM_ALT
>>> fam.auto_rec(only_affected=True)
False
>>> fam.auto_rec(only_affected=False)
True
>>> fam.auto_rec(min_depth=10)
False
>>> fam.auto_dom()
False
# dad:un, mom:un, kid:aff, kid2:un, gma:un, gpa:un
>>> fam.gt_types = [HOM_REF, HOM_REF, HET, HET, HET, HET]
>>> fam.de_novo()
False
>>> fam.de_novo(only_affected=False)
True
>>> fam.gt_types = [HOM_ALT, HOM_REF, HET, HET, HET, HET]
>>> fam.de_novo()
False
>>> fam.gt_types = [HOM_ALT, HOM_ALT, HET, HET, HET, HET]
>>> fam.de_novo()
False
>>> fam.mendel_plausible_denovo()
True
>>> cfam = TestFamily(\"\"\"
... #family_id sample_id paternal_id maternal_id sex phenotype
... 1 dad 0 0 1 1
... 1 mom 0 0 2 1
... 1 akid dad mom 1 2
... 1 ukid dad mom 1 1
... 1 bkid dad mom 1 2\"\"\")
>>> gt_types1 = [HOM_REF, HET, HET, HOM_REF, HET]
>>> gt_bases1 = ["A/A", "A/T", "A/T", "A/A", "A/T"]
>>> gt_types2 = [HET, HOM_REF, HET, HOM_REF, HET]
>>> gt_bases2 = ["A/C", "A/A", "A/C", "A/A", "A/C"]
>>> cfam.gt_types = gt_types1
>>> cfam.comp_het()
True
>>> result = cfam.comp_het_pair(gt_types1, gt_bases1, gt_types2, gt_bases2)
# note that stuff got phased in-place:
#>>> gt_bases1, gt_bases2
#(['A/A', 'A/T', 'T|A', 'A/A', 'T|A'], ['G/C', 'G/G', 'G|C', 'G/G', 'G|C'])
>>> result['candidate']
True
>>> result['affected_phased']
[Sample(akid;affected;male), Sample(bkid;affected;male)]
>>> sorted(result.keys())
['affected_phased', 'affected_skipped', 'affected_unphased', 'candidate', 'candidates', 'priority', 'unaffected_phased', 'unaffected_unphased']
>>> assert result['affected_skipped'] == result['affected_unphased'] == result['unaffected_unphased'] == []
# remove as a candidate if even one of the affecteds doesn't share the het
# pair:
>>> gt_bases1[-1], gt_types1[-1] = "A/A", HOM_REF
>>> result = cfam.comp_het_pair(gt_types1, gt_bases1, gt_types2, gt_bases2, allow_unaffected=True)
>>> result['candidate']
False
# restore.
>>> gt_bases1[-1], gt_types1[-1] = "A/T", HET
>>> cfam.comp_het_pair(gt_types1, gt_bases1, gt_types2, gt_bases2, allow_unaffected=True)['candidate']
True
# a parent (unphaseable) has the same het pair so we know they will be phased to
# the same chrom in the affected kid.
>>> gt_types1 = [HET, HOM_REF, HET, HOM_REF, HET]
>>> gt_bases1 = ["A/T", "A/A", "A/T", "A/A", "A/T"]
>>> gt_types2 = [HET, HOM_REF, HET, HOM_REF, HET]
>>> gt_bases2 = ["G/C", "G/G", "G/C", "G/G", "G/C"]
>>> cfam.gt_types = gt_types1
>>> cfam.comp_het()
True
>>> result = cfam.comp_het_pair(gt_types1, gt_bases1, gt_types2, gt_bases2, allow_unaffected=True)
>>> gt_bases1, gt_bases2
(['A/T', 'A/A', 'A|T', 'A/A', 'A|T'], ['G/C', 'G/G', 'G|C', 'G/G', 'G|C'])
# NOTE how the variants are on the same chromosome (T, then C). so it's not a candidate.
>>> result['candidate']
False
>>> result['unaffected_unphased'], result['unaffected_phased'], result['candidate']
([Sample(dad;unaffected;male)], [], False)
# phase dad so he has same het pair (won't be a candidate):
>>> gt_bases1[0], gt_bases2[0] = "A|T", "G|C"
>>> result = cfam.comp_het_pair(gt_types1, gt_bases1, gt_types2, gt_bases2)
>>> result['unaffected_unphased'], result['unaffected_phased'], result['candidate']
([], [], False)
# unaffected kid has same het pair as affected.
>>> cfam = TestFamily(\"\"\"
... #family_id sample_id paternal_id maternal_id sex phenotype
... 1 dad 0 0 1 1
... 1 mom 0 0 2 1
... 1 akid dad mom 1 2
... 1 bkid dad mom 1 1\"\"\")
>>> gt_types1 = [HOM_REF, HET, HET, HET]
>>> gt_bases1 = ["A/A", "A/T", "A/T", "A/T"]
>>> gt_types2 = [HET, HOM_REF, HET, HET]
>>> gt_bases2 = ["G/C", "G/G", "G/C", "G/C"]
>>> cfam.gt_types = gt_types1
>>> cfam.comp_het()
True
>>> result = cfam.comp_het_pair(gt_types1, gt_bases1, gt_types2, gt_bases2)
>>> result['candidate']
False
# unaffected kid is ok, parent is hom_alt (this get's filtered in the first pass
# without even considering the pair).
>>> gt_bases1[-1], gt_types1[-1] = "A/A", HOM_REF
>>> gt_bases1[0], gt_types1[0] = "T/T", HOM_ALT
>>> cfam.comp_het()
False
###################################################3
# comp_het: pattern only
###################################################3
>>> cfam = TestFamily(\"\"\"
... #family_id sample_id paternal_id maternal_id sex phenotype
... 1 dad 0 0 1 -9
... 1 mom 0 0 2 -9
... 1 akid dad mom 1 -9
... 1 bkid dad mom 1 -9\"\"\")
>>> gt_types1 = [HOM_REF, HET, HET, HOM_REF]
>>> gt_bases1 = ["A/A", "A/T", "A/T", "A/A"]
>>> gt_types2 = [HET, HOM_REF, HET, HOM_REF]
>>> gt_bases2 = ["A/C", "A/A", "A/C", "A/A"]
>>> cfam.gt_types = gt_types1
>>> cfam.comp_het_pair(gt_types1, gt_bases1,
... gt_types2, gt_bases2)['candidate']
False
>>> cfam.comp_het_pair(gt_types1, gt_bases1,
... gt_types2, gt_bases2, pattern_only=True)
{'priority': 1, 'candidates': [Sample(akid;unknown;male)], 'candidate': True}
# get a higher priority with phased parents.
>>> gt_types1 = [HOM_REF, HET, HET, HOM_REF]
>>> gt_bases1 = ["A|A", "A|T", "A|T", "A|A"]
>>> gt_types2 = [HET, HOM_REF, HET, HOM_REF]
>>> gt_bases2 = ["A|C", "A|A", "A|C", "A|A"]
>>> cfam.comp_het_pair(gt_types1, gt_bases1,
... gt_types2, gt_bases2, pattern_only=True)
{'priority': 1, 'candidates': [Sample(akid;unknown;male)], 'candidate': True}
>>> fh = open('test/from_inheritance.vcf', 'w')
>>> cfam.to_vcf(fh)
>>> cfam.gt_types = gt_types2
>>> cfam.to_vcf(fh, header=False)
>>> fh.close()
>>> cfam.family.to_ped(open("test/from_inheritance.ped", "w"))
####################################################3
# auto_dom penetrance
####################################################3
>>> dfam = TestFamily(\"\"\"
... #family_id individual_id paternal_id maternal_id sex phenotype
... 1 DS134791 DS134793 DS134792 1 2
... 1 DS134792 0 0 2 0
... 1 DS134793 0 0 1 0\"\"\")
>>> dfam.gt_types = [HET, HOM_REF, HET]
>>> dfam.auto_dom()
False
>>> dfam.auto_dom(strict=False)
True
"""
from __future__ import print_function
import os
import sys
import tempfile
import atexit
from gemini import family
import itertools as it
HOM_REF, HET, UNKNOWN, HOM_ALT = range(4)
def tmp(pedstr, suf=".ped"):
t = tempfile.mktemp(suffix=suf)
atexit.register(os.unlink, t)
with open(t, "w") as fh:
for line in pedstr.split("\n"):
if not line.strip(): continue
print(line.strip(), file=fh)
return t
class TestFamily(object):
__slots__ = ('ped', 'family', 'gt_types', '_gt_types', 'gt_depths',
'_gt_depths', 'strict', 'subjects')
def draw(self, tests=('auto_rec', 'auto_dom')):
from IPython.display import Image, display
if isinstance(tests, basestring):
tests = (tests,)
img = self.dot(tests=tests)
return display(Image(filename=img))
def __init__(self, ped, fam_id=None, gt_types=None, gt_depths=None):
# can send in a family.
if isinstance(ped, family.Family):
self.family = ped
else:
if isinstance(ped, basestring) and len(ped.split("\n")) > 1:
self.ped = tmp(ped)
else:
self.ped = ped
self.family = family.Family.from_ped(self.ped) # always want 1 family
if fam_id is None:
assert len(self.family) == 1
self.family = self.family.values()[0]
else:
self.family = self.family[fam_id]
for s in self.family.subjects:
if s.sample_id[0].isdigit(): s.sample_id = "s" + s.sample_id
self.subjects = self.family.subjects
self._gt_types = None
self.gt_types = gt_types
self._gt_depths = None
self.gt_depths = gt_depths
def dot(self, comment=None, path="test.gv", view=False, tests=('auto_rec', 'auto_dom')):
from graphviz import Digraph
viz = Digraph(comment=comment)
subjects = self.family.subjects
lookup = ["HOM_REF", "HET", "UNKOWN", "HOM_ALT"]
for i, s in enumerate(subjects):
attrs = dict(style="filled", fontcolor="white")
attrs["fillcolor"] = {True: 'black', False: 'white', None: 'gray'}[s.affected]
attrs["shape"] = {'male': 'square', 'female': 'circle', None: 'octagon'}[s.gender]
if attrs["fillcolor"] == "black":
attrs["fontcolor"] = "white"
elif attrs["fillcolor"] == "white":
attrs["fontcolor"] = "black"
gt = lookup[self.gt_types[i]]
label = s.name
viz.node(s.name, label + "\n" + gt, **attrs)
for s in subjects:
if s.dad is not None:
viz.edge(s.dad.name, s.name)
if s.mom is not None:
viz.edge(s.mom.name, s.name)
for test in tests:
res = {}
res['default'] = getattr(self, test)()
res['strict=False'] = getattr(self, test)(strict=False)
res['only_affected=False'] = getattr(self, test)(only_affected=False)
res['both False'] = getattr(self, test)(only_affected=False, strict=False)
print("\n" + test)
print("-" * len(test))
for k in ("default", "strict=False", "only_affected=False", "both False"):
print("%-20s\t%s" % (k, res[k]))
viz._format = "png"
return viz.render(path, view=view)
@property
def gt_types(self):
return self._gt_types
@gt_types.setter
def gt_types(self, gt_types):
if gt_types is not None:
assert len(gt_types) == len(self.family)
self._gt_types = gt_types
@property
def gt_depths(self):
return self._gt_depths
@gt_depths.setter
def gt_depths(self, gt_depths):
if gt_depths is not None:
assert len(gt_depths) == len(self.family)
self._gt_depths = gt_depths
def __getattr__(self, gt):
assert self._gt_types
def func(*args, **kwargs):
if 'min_depth' in kwargs:
assert self._gt_depths is not None
debug = kwargs.pop('debug', False)
flt = getattr(self.family, gt)(*args, **kwargs)
if gt == "comp_het_pair":
return flt
env = {s.sample_id: i for i, s in enumerate(self.family.subjects)}
if debug:
print(flt, file=sys.stderr)
env['gt_types'] = self.gt_types
env['gt_depths'] = self.gt_depths
return eval(flt, env)
return func
def to_vcf(self, fh, var_dict=None, header=True, _POS=[100001]):
if header:
fh.write("##fileformat=VCFv4.1\n")
fh.write("""##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n""")
fh.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t")
fh.write("\t".join(s.name for s in self.subjects) + "\n")
if var_dict is None:
var_dict = {}
for k in ("ID", "QUAL", "INFO"):
if k not in var_dict:
var_dict[k] = "."
var_dict["FILTER"] = "PASS"
var_dict["FORMAT"] = "GT"
if not "CHROM" in var_dict:
var_dict["CHROM"] = "1"
if not "POS" in var_dict:
var_dict["POS"] = _POS[0]
_POS[0] += 1
if not "REF" in var_dict:
var_dict["REF"] = "A"
if not "ALT" in var_dict:
var_dict["ALT"] = "G"
# convert from number back to repr
x = ["0/0", "0/1", "./.", "1/1"]
formats = [x[t] for t in self.gt_types]
if self.gt_depths:
var_dict["FORMAT"] += ":DP"
for i, d in enumerate(self.gt_depths):
formats[i] += (":%d" % d)
"""
if self.gt_phred_ll_homref:
var_dict["FORMAT"] += ":PL"
for i, (hom, het, alt) in enumerate(it.izip(self.gt_phred_ll_homref,
self.gt_phred_ll_het,
self.gt_phred_ll_homalt)):
formats[i] += (":%s,%s,%s" % (hom, het, alt))
"""
fh.write("{CHROM}\t{POS}\t{ID}\t{REF}\t{ALT}\t{QUAL}\t{FILTER}\t{INFO}\t{FORMAT}\t".format(**var_dict))
fh.write("\t".join(formats) + "\n")
def main():
f = TestFamily("test/test.auto_rec.ped", "1")
f.gt_types = [HET, HET, HOM_ALT]
f.family.subjects[0].gender = "male"
f.family.subjects[1].gender = "female"
f.family.subjects[2].gender = "male"
print(f.family.subjects)
print(f.auto_rec(strict=False))
print(f.auto_rec(strict=True))
gm = family.Sample("grandma", False, gender="female")
f.family.subjects[1].mom = gm
gp = family.Sample("grandpa", None, gender="male")
f.family.subjects[1].dad = gp
f.gt_types.extend([HOM_REF, HET])
f.family.subjects.extend([gm, gp])
print(f.dot("autosomal recessive"))
#f.gt_depths = [9, 9, 9]
#print f.auto_rec(strict=True, min_depth=8)
#print f.auto_rec(strict=True, min_depth=18)
#f.gt_types = [HOM_ALT, HET, HOM_ALT]
#print f.auto_rec(strict=False)
#print f.auto_rec(strict=True)
f.to_vcf(open('a.vcf', 'w'))
f.family.to_ped(open('a.ped', 'w'))
import sys
import doctest
sys.stderr.write(str(doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS | doctest.REPORT_ONLY_FIRST_FAILURE, verbose=0)) + "\n")
if __name__ == "__main__":
main()
| mit | 2,982,564,929,376,766,000 | 30.362869 | 157 | 0.545406 | false | 2.824625 | true | false | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/status/cmd/performance/errors.py | 1 | 1265 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: errors.py
import mcl.status
ERR_SUCCESS = mcl.status.MCL_SUCCESS
ERR_INVALID_PARAM = mcl.status.framework.ERR_START
ERR_NOT_IMPLEMENTED = mcl.status.framework.ERR_START + 1
ERR_MARSHAL_FAILED = mcl.status.framework.ERR_START + 2
ERR_REG_OPEN_FAILED = mcl.status.framework.ERR_START + 3
ERR_ALLOC_FAILED = mcl.status.framework.ERR_START + 4
ERR_QUERY_FAILED = mcl.status.framework.ERR_START + 5
ERR_INVALID_FORMAT = mcl.status.framework.ERR_START + 6
ERR_UNHANDLED_REVISION = mcl.status.framework.ERR_START + 7
ERR_INTERNAL_ERROR = mcl.status.framework.ERR_START + 8
errorStrings = {ERR_INVALID_PARAM: 'Invalid parameter(s)',
ERR_NOT_IMPLEMENTED: 'Not implemented on this platform',
ERR_MARSHAL_FAILED: 'Marshaling data failed',
ERR_REG_OPEN_FAILED: 'Open of registry failed',
ERR_ALLOC_FAILED: 'Failed to allocate memory',
ERR_QUERY_FAILED: 'Failed to query performance data',
ERR_INVALID_FORMAT: 'Performance data is not formed correctly',
ERR_UNHANDLED_REVISION: 'Unhandled performance data revision',
ERR_INTERNAL_ERROR: 'Internal error parsing performance data'
} | unlicense | 8,984,621,947,516,781,000 | 47.692308 | 67 | 0.750988 | false | 3.070388 | false | true | false |
timlau/dnf-apiex | base.py | 1 | 4671 | from __future__ import print_function
from __future__ import absolute_import
from time import time
import dnf
import dnf.yum
import dnf.const
import dnf.conf
import dnf.subject
class Packages:
'''
Get access to packages in the dnf (hawkey) sack in an easy way
'''
def __init__(self, base):
self._base = base
self._sack = base.sack
self._inst_na = self._sack.query().installed().na_dict()
def _filter_packages(self, pkg_list, replace=True):
'''
Filter a list of package objects and replace
the installed ones with the installed object, instead
of the available object
'''
pkgs = []
for pkg in pkg_list:
key = (pkg.name, pkg.arch)
inst_pkg = self._inst_na.get(key, [None])[0]
if inst_pkg and inst_pkg.evr == pkg.evr:
if replace:
pkgs.append(inst_pkg)
else:
pkgs.append(pkg)
return pkgs
@property
def query(self):
'''
Get the query object from the current sack
'''
return self._sack.query()
@property
def installed(self):
'''
get installed packages
'''
return self.query.installed().run()
@property
def updates(self):
'''
get available updates
'''
return self.query.upgrades().run()
@property
def all(self,showdups = False):
'''
all packages in the repositories
installed ones are replace with the install package objects
'''
if showdups:
return self._filter_packages(self.query.available().run())
else:
return self._filter_packages(self.query.latest().run())
@property
def available(self, showdups = False):
'''
available packages there is not installed yet
'''
if showdups:
return self._filter_packages(self.query.available().run(), replace=False)
else:
return self._filter_packages(self.query.latest().run(), replace=False)
@property
def extras(self):
'''
installed packages, not in current repos
'''
# anything installed but not in a repo is an extra
avail_dict = self.query.available().pkgtup_dict()
inst_dict = self.query.installed().pkgtup_dict()
pkgs = []
for pkgtup in inst_dict:
if pkgtup not in avail_dict:
pkgs.extend(inst_dict[pkgtup])
return pkgs
@property
def obsoletes(self):
'''
packages there is obsoleting some installed packages
'''
inst = self.query.installed()
return self.query.filter(obsoletes=inst)
@property
def recent(self, showdups=False):
'''
Get the recent packages
'''
recent = []
now = time()
recentlimit = now-(self._base.conf.recent*86400)
if showdups:
avail = self.query.available()
else:
avail = self.query.latest()
for po in avail:
if int(po.buildtime) > recentlimit:
recent.append(po)
return recent
class DnfBase(dnf.Base):
'''
class to encapsulate and extend the dnf.Base API
'''
def __init__(self, setup_sack=True):
dnf.Base.__init__(self)
# setup the dnf cache
RELEASEVER = dnf.rpm.detect_releasever(self.conf.installroot)
self.conf.substitutions['releasever'] = RELEASEVER
# read the repository infomation
self.read_all_repos()
if setup_sack:
# populate the dnf sack
self.fill_sack()
self._packages = Packages(self) # Define a Packages object
def setup_base(self):
self.fill_sack()
self._packages = Packages(self) # Define a Packages object
@property
def packages(self):
''' property to get easy acceess to packages'''
return self._packages
def cachedir_fit(self):
conf = self.conf
subst = conf.substitutions
# this is not public API, same procedure as dnf cli
suffix = dnf.conf.parser.substitute(dnf.const.CACHEDIR_SUFFIX, subst)
cli_cache = dnf.conf.CliCache(conf.cachedir, suffix)
return cli_cache.cachedir, cli_cache.system_cachedir
def setup_cache(self):
"""Setup the dnf cache, same as dnf cli"""
conf = self.conf
conf.substitutions['releasever'] = dnf.rpm.detect_releasever('/')
conf.cachedir, self._system_cachedir = self.cachedir_fit()
print("cachedir: %s" % conf.cachedir)
| gpl-3.0 | -300,041,233,186,839,550 | 27.309091 | 85 | 0.574395 | false | 4.119048 | false | false | false |
mcgoddard/HALON | models.py | 1 | 2698 | from sqlalchemy import Column, Integer, String, DateTime, Boolean, ForeignKey
from database import Base
import datetime
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(50), unique=True)
password = Column(String(128), unique=False)
active_until = Column(DateTime, unique=False, default=(datetime.datetime.now))
x = Column(Integer, unique=False)
y = Column(Integer, unique=False)
direction = Column(Integer, unique=False)
health = Column(Integer, unique=False)
moving = Column(Boolean)
character_id = Column(Integer, ForeignKey('characters.id'))
character = relationship('Character', backref=backref('users', lazy='dynamic'))
def __init__(self, username, password):
self.username = username
self.password = password
def __repr__(self):
return '<User %r>' % (self.username)
class Character(Base):
__tablename__ = 'characters'
id = Column(Integer, primary_key=True)
name = Column(String(50), unique=True)
description = Column(String(144))
max_health = Column(Integer)
speed = Column(Integer)
decode_time = Column(Integer)
regen_speed = Column(Integer)
def __init__(self, name, description, max_health, speed, decode_time, regen_speed):
self.name = name
self.description = description
self.max_health = max_health
self.speed = speed
self.decode_time = decode_time
self.regen_speed = regen_speed
def __repr__(self):
return '<Character %r>' % (self.name)
class Tile(Base):
__tablename__ = 'tiles'
id = Column(Integer, primary_key=True)
x = Column(Integer)
y = Column(Integer)
tile_type = Column(Integer)
status = Column(Integer)
next_change = Column(DateTime)
def __init__(self, x, y, tile_type, status):
self.x = x
self.y = y
self.tile_type = tile_type
self.status = status
def __repr__(self):
return '<Tile %r>' % (self.id)
class Message(Base):
__tablename__ = 'messages'
id = Column(Integer, primary_key=True)
text = Column(String(144), unique=False)
created_at = Column(DateTime, unique=False, default=datetime.datetime.now)
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship('User', backref=backref('messages', lazy='dynamic'))
x = Column(Integer)
y = Column(Integer)
def __init__(self, text, user):
self.text = text
self.user = user
def __repr__(self):
return '<Message %r>' % (self.text)
| mit | -275,747,491,084,480,480 | 31.902439 | 87 | 0.642328 | false | 3.789326 | false | false | false |
pankajnits/vyked | vyked/bus.py | 1 | 12115 | import asyncio
from collections import defaultdict
from functools import partial
import json
import logging
import random
import uuid
from again.utils import unique_hex
from retrial.retrial import retry
import aiohttp
from .services import TCPServiceClient, HTTPServiceClient
from .pubsub import PubSub
from .packet import ControlPacket, MessagePacket
from .protocol_factory import get_vyked_protocol
HTTP = 'http'
TCP = 'tcp'
_logger = logging.getLogger(__name__)
def _retry_for_client_conn(result):
if isinstance(result, tuple):
return not isinstance(result[0], asyncio.transports.Transport) or not isinstance(result[1], asyncio.Protocol)
return True
def _retry_for_pub(result):
return not result
def _retry_for_exception(_):
return True
class HTTPBus:
def __init__(self, registry_client):
self._registry_client = registry_client
def send_http_request(self, app: str, service: str, version: str, method: str, entity: str, params: dict):
"""
A convenience method that allows you to send a well formatted http request to another service
"""
host, port, node_id, service_type = self._registry_client.resolve(service, version, entity, HTTP)
url = 'http://{}:{}{}'.format(host, port, params.pop('path'))
http_keys = ['data', 'headers', 'cookies', 'auth', 'allow_redirects', 'compress', 'chunked']
kwargs = {k: params[k] for k in http_keys if k in params}
query_params = params.pop('params', {})
if app is not None:
query_params['app'] = app
query_params['version'] = version
query_params['service'] = service
response = yield from aiohttp.request(method, url, params=query_params, **kwargs)
return response
class TCPBus:
def __init__(self, registry_client):
self._registry_client = registry_client
self._client_protocols = {}
self._pingers = {}
self._node_clients = {}
self._service_clients = []
self._pending_requests = []
self.tcp_host = None
self.http_host = None
self._host_id = unique_hex()
self._ronin = False
self._registered = False
def _create_service_clients(self):
futures = []
for sc in self._service_clients:
for host, port, node_id, service_type in self._registry_client.get_all_addresses(sc.properties):
self._node_clients[node_id] = sc
future = self._connect_to_client(host, node_id, port, service_type, sc)
futures.append(future)
return asyncio.gather(*futures, return_exceptions=False)
def register(self, host, port, service, version, clients, service_type):
for client in clients:
if isinstance(client, (TCPServiceClient, HTTPServiceClient)):
client.bus = self
self._service_clients = clients
self._registry_client.register(host, port, service, version, clients, service_type)
def registration_complete(self):
if not self._registered:
f = self._create_service_clients()
self._registered = True
def fun(_):
if self.tcp_host:
self._clear_request_queue()
f.add_done_callback(fun)
def send(self, packet: dict):
packet['from'] = self._host_id
func = getattr(self, '_' + packet['type'] + '_sender')
func(packet)
def _request_sender(self, packet: dict):
"""
Sends a request to a server from a ServiceClient
auto dispatch method called from self.send()
"""
self._pending_requests.append(packet)
self._clear_request_queue()
@retry(should_retry_for_result=_retry_for_client_conn, should_retry_for_exception=_retry_for_exception, timeout=10,
strategy=[0, 2, 2, 4])
def _connect_to_client(self, host, node_id, port, service_type, service_client):
_logger.info('node_id' + node_id)
future = asyncio.async(
asyncio.get_event_loop().create_connection(partial(get_vyked_protocol, service_client), host, port))
future.add_done_callback(
partial(self._service_client_connection_callback, self._node_clients[node_id], node_id, service_type))
return future
def _service_client_connection_callback(self, sc, node_id, service_type, future):
_, protocol = future.result()
# TODO : handle pinging
# if service_type == TCP:
# pinger = Pinger(self, asyncio.get_event_loop())
# self._pingers[node_id] = pinger
# pinger.register_tcp_service(protocol, node_id)
# asyncio.async(pinger.start_ping())
self._client_protocols[node_id] = protocol
@staticmethod
def _create_json_service_name(app, service, version):
return {'app': app, 'service': service, 'version': version}
@staticmethod
def _handle_ping(packet, protocol):
protocol.send(ControlPacket.pong(packet['node_id']))
def _handle_pong(self, node_id, count):
pinger = self._pingers[node_id]
asyncio.async(pinger.pong_received(count))
def _clear_request_queue(self):
self._pending_requests[:] = [each for each in self._pending_requests if not self._send_packet(each)]
def _send_packet(self, packet):
node_id = self._get_node_id_for_packet(packet)
if node_id is not None:
client_protocol = self._client_protocols[node_id]
if client_protocol.is_connected():
packet['to'] = node_id
client_protocol.send(packet)
return True
else:
return False
return False
def _get_node_id_for_packet(self, packet):
app, service, version, entity = packet['app'], packet['service'], packet['version'], packet['entity']
node = self._registry_client.resolve(service, version, entity, TCP)
return node[2] if node else None
def handle_ping_timeout(self, node_id):
_logger.info("Service client connection timed out {}".format(node_id))
self._pingers.pop(node_id, None)
service_props = self._registry_client.get_for_node(node_id)
_logger.info('service client props {}'.format(service_props))
if service_props is not None:
host, port, _node_id, _type = service_props
asyncio.async(self._connect_to_client(host, _node_id, port, _type))
def receive(self, packet: dict, protocol, transport):
if packet['type'] == 'ping':
self._handle_ping(packet, protocol)
elif packet['type'] == 'pong':
self._handle_pong(packet['node_id'], packet['count'])
elif packet['type'] == 'publish':
self._handle_publish(packet, protocol)
else:
if self.tcp_host.is_for_me(packet['service'], packet['version']):
func = getattr(self, '_' + packet['type'] + '_receiver')
func(packet, protocol)
else:
_logger.warn('wrongly routed packet: ', packet)
def _request_receiver(self, packet, protocol):
api_fn = getattr(self.tcp_host, packet['endpoint'])
if api_fn.is_api:
from_node_id = packet['from']
entity = packet['entity']
future = asyncio.async(api_fn(from_id=from_node_id, entity=entity, **packet['payload']))
def send_result(f):
result_packet = f.result()
protocol.send(result_packet)
future.add_done_callback(send_result)
else:
print('no api found for packet: ', packet)
def _handle_publish(self, packet, protocol):
service, version, endpoint, payload, publish_id = packet['service'], packet['version'], packet['endpoint'], \
packet['payload'], packet['publish_id']
for client in self._service_clients:
if client.name == service and client.version == version:
fun = getattr(client, endpoint)
asyncio.async(fun(payload))
protocol.send(MessagePacket.ack(publish_id))
class PubSubBus:
PUBSUB_DELAY = 5
def __init__(self, registry_client):
self._pubsub_handler = None
self._registry_client = registry_client
self._clients = None
self._pending_publishes = {}
def create_pubsub_handler(self, host, port):
self._pubsub_handler = PubSub(host, port)
yield from self._pubsub_handler.connect()
def register_for_subscription(self, clients):
self._clients = clients
subscription_list = []
xsubscription_list = []
for client in clients:
if isinstance(client, TCPServiceClient):
for each in dir(client):
fn = getattr(client, each)
if callable(fn) and getattr(fn, 'is_subscribe', False):
subscription_list.append(self._get_pubsub_key(client.name, client.version, fn.__name__))
elif callable(fn) and getattr(fn, 'is_xsubscribe', False):
xsubscription_list.append((client.name, client.version, fn.__name__, getattr(fn, 'strategy')))
self._registry_client.x_subscribe(xsubscription_list)
yield from self._pubsub_handler.subscribe(subscription_list, handler=self.subscription_handler)
def publish(self, service, version, endpoint, payload):
endpoint_key = self._get_pubsub_key(service, version, endpoint)
asyncio.async(self._retry_publish(endpoint_key, json.dumps(payload)))
publish_id = str(uuid.uuid4())
future = asyncio.async(self.xpublish(publish_id, service, version, endpoint, payload))
self._pending_publishes[publish_id] = future
def xpublish(self, publish_id, service, version, endpoint, payload):
subscribers = yield from self._registry_client.get_subscribers(service, version, endpoint)
strategies = defaultdict(list)
for subscriber in subscribers:
strategies[(subscriber['service'], subscriber['version'])].append(
(subscriber['host'], subscriber['port'], subscriber['node_id'], subscriber['strategy']))
if not len(subscribers):
future = self._pending_publishes[publish_id]
future.cancel()
return
yield from self._connect_and_publish(publish_id, service, version, endpoint, strategies, payload)
yield from asyncio.sleep(self.PUBSUB_DELAY)
yield from self.xpublish(publish_id, service, version, endpoint, payload)
def receive(self, packet, transport, protocol):
if packet['type'] == 'ack':
future = self._pending_publishes.pop(packet['request_id'])
future.cancel()
transport.close()
def _retry_publish(self, endpoint, payload):
return (yield from self._pubsub_handler.publish(endpoint, payload))
def subscription_handler(self, endpoint, payload):
service, version, endpoint = endpoint.split('/')
client = [sc for sc in self._clients if (sc.name == service and sc.version == int(version))][0]
func = getattr(client, endpoint)
asyncio.async(func(**json.loads(payload)))
@staticmethod
def _get_pubsub_key(service, version, endpoint):
return '/'.join((service, str(version), endpoint))
def _connect_and_publish(self, publish_id, service, version, endpoint, strategies, payload):
for key, value in strategies.items():
if value[0][3] == 'LEADER':
host, port = value[0][0], value[0][1]
else:
random_metadata = random.choice(value)
host, port = random_metadata[0], random_metadata[1]
transport, protocol = yield from asyncio.get_event_loop().create_connection(
partial(get_vyked_protocol, self), host, port)
packet = MessagePacket.publish(publish_id, service, version, endpoint, payload)
protocol.send(packet)
| mit | 537,956,476,833,558,800 | 39.654362 | 119 | 0.613372 | false | 4.070901 | false | false | false |
CalebBell/thermo | thermo/flash/flash_utils.py | 1 | 161519 | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2019, 2020 Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from __future__ import division
__all__ = [
'sequential_substitution_2P',
'sequential_substitution_2P_functional',
'sequential_substitution_GDEM3_2P',
'dew_bubble_Michelsen_Mollerup',
'bubble_T_Michelsen_Mollerup',
'dew_T_Michelsen_Mollerup',
'bubble_P_Michelsen_Mollerup',
'dew_P_Michelsen_Mollerup',
'minimize_gibbs_2P_transformed',
'sequential_substitution_Mehra_2P',
'nonlin_2P',
'nonlin_n_2P',
'sequential_substitution_NP',
'minimize_gibbs_NP_transformed',
'TPV_HSGUA_guesses_1P_methods',
'TPV_solve_HSGUA_guesses_1P',
'sequential_substitution_2P_HSGUAbeta',
'sequential_substitution_2P_sat',
'TP_solve_VF_guesses',
'TPV_double_solve_1P',
'nonlin_2P_HSGUAbeta',
'sequential_substitution_2P_double',
'cm_flash_tol',
'nonlin_2P_newton',
'dew_bubble_newton_zs',
'existence_3P_Michelsen_Mollerup',
'SS_VF_simultaneous',
'stability_iteration_Michelsen',
'assert_stab_success_2P',
'nonlin_equilibrium_NP',
'nonlin_spec_NP',
'TPV_solve_HSGUA_guesses_VL',
'solve_P_VF_IG_K_composition_independent',
'solve_T_VF_IG_K_composition_independent'
]
from fluids.constants import R
from fluids.numerics import (UnconvergedError, trunc_exp, newton,
brenth, secant, translate_bound_f_jac,
numpy as np, assert_close, assert_close1d,
damping_maintain_sign, oscillation_checking_wrapper,
OscillationError, NotBoundedError, jacobian,
best_bounding_bounds, isclose, newton_system,
make_damp_initial, newton_minimize,
root, minimize, fsolve)
from fluids.numerics import py_solve, trunc_log
from chemicals.utils import (exp, log, copysign, normalize,
mixing_simple, property_mass_to_molar)
from chemicals.heat_capacity import (Dadgostar_Shaw_integral,
Dadgostar_Shaw_integral_over_T,
Lastovka_Shaw_integral,
Lastovka_Shaw_integral_over_T)
from chemicals.rachford_rice import (flash_inner_loop,
Rachford_Rice_solutionN,
Rachford_Rice_flash_error,
Rachford_Rice_solution_LN2)
from chemicals.phase_change import SMK
from chemicals.volume import COSTALD
from chemicals.flash_basic import flash_wilson, flash_Tb_Tc_Pc, flash_ideal
from chemicals.exceptions import TrivialSolutionError
from thermo.phases import Phase, CoolPropPhase, CEOSLiquid, CEOSGas, IAPWS95
from thermo.phases.phase_utils import lnphis_direct
from thermo.coolprop import CPiP_min
LASTOVKA_SHAW = 'Lastovka Shaw'
DADGOSTAR_SHAW_1 = 'Dadgostar Shaw 1'
STP_T_GUESS = '298.15 K'
LAST_CONVERGED = 'Last converged'
FIXED_GUESS = 'Fixed guess'
IG_ENTHALPY = 'Ideal gas'
IDEAL_LIQUID_ENTHALPY = 'Ideal liquid'
WILSON_GUESS = 'Wilson'
TB_TC_GUESS = 'Tb Tc'
IDEAL_PSAT = 'Ideal Psat'
PT_SS = 'SS'
PT_SS_MEHRA = 'SS Mehra'
PT_SS_GDEM3 = 'SS GDEM3'
PT_NEWTON_lNKVF = 'Newton lnK VF'
IDEAL_WILSON = 'Ideal Wilson'
SHAW_ELEMENTAL = 'Shaw Elemental'
PH_T_guesses_1P_methods = [LASTOVKA_SHAW, DADGOSTAR_SHAW_1, IG_ENTHALPY,
IDEAL_LIQUID_ENTHALPY, FIXED_GUESS, STP_T_GUESS,
LAST_CONVERGED]
TPV_HSGUA_guesses_1P_methods = PH_T_guesses_1P_methods
def sequential_substitution_2P(T, P, V, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=None,
check_G=False, check_V=False, dZ_allow=0.1):
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
cmps = range(len(zs))
err, err1, err2, err3 = 0.0, 0.0, 0.0, 0.0
G_old = None
V_over_F_old = V_over_F
restrained = 0
restrained_switch_count = 300
# Code for testing phis at zs
l, g = liquid_phase, gas_phase
if liquid_phase.T != T or liquid_phase.P != P:
liquid_phase = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
if gas_phase.T != T or gas_phase.P != P:
gas_phase = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
for iteration in range(maxiter):
# g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
# l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
# l = liquid_phase.to(xs, T=T, P=P, V=V)
# g = gas_phase.to(ys, T=T, P=P, V=V)
# lnphis_g = g.lnphis()
# lnphis_l = l.lnphis()
lnphis_g = gas_phase.lnphis_at_zs(ys)
lnphis_l = liquid_phase.lnphis_at_zs(xs)
limited_Z = False
try:
Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] # K_value(phi_l=l, phi_g=g)
except OverflowError:
Ks = [trunc_exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] # K_value(phi_l=l, phi_g=g)
V_over_F_old = V_over_F
try:
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
except Exception as e:
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F, check=True)
# K_low, K_high = False, False
# for zi, Ki in zip(zs, Ks):
# if zi != 0.0:
# if Ki > 1.0:
# K_high = True
# else:
# K_low = True
# if K_high and K_low:
# break
# if not (K_high and K_low):
# raise TrivialSolutionError("Converged to trivial condition, all K same phase",
# comp_difference, iteration, err)
# else:
if check_G:
V_over_F_G = min(max(V_over_F_old, 0), 1)
G = g.G()*V_over_F_G + (1.0 - V_over_F_G)*l.G()
print('new G', G, 'old G', G_old)
if G_old is not None:
if G > G_old:
step = .5
while G > G_old and step > 1e-4:
# ys_working = normalize([step*xo + (1.0 - step)*xi for xi, xo in zip(xs, xs_old)])
# xs_working = normalize([step*xo + (1.0 - step)*xi for xi, xo in zip(ys, ys_old)])
# ys_working = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(xs, xs_old)])
# xs_working = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(ys, ys_old)])
# g = gas_phase.to(ys_working, T=T, P=P, V=V)
# l = liquid_phase.to(xs_working, T=T, P=P, V=V)
# lnphis_g = g.lnphis()
# lnphis_l = l.lnphis()
# try:
# Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
# except OverflowError:
# Ks = [trunc_exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
Ks_working = [step*xo + (1.0 - step)*xi for xo, xi in zip(Ks_old, Ks)]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks_working, guess=V_over_F)
# V_over_F_G = min(max(V_over_F, 0), 1)
g = gas_phase.to(ys_new, T=T, P=P, V=V)
l = liquid_phase.to(xs_new, T=T, P=P, V=V)
G = g.G()*V_over_F_G + (1.0 - V_over_F_G)*l.G()
print('step', step, G, V_over_F, Ks)
step *= 0.5
# xs, ys = xs_working, ys_working
# print('Gibbs increased', G/G_old)
G_old = G
if check_V and iteration > 2:
big_Z_change = (abs(1.0 - l_old.Z()/l.Z()) > dZ_allow or abs(1.0 - g_old.Z()/g.Z()) > dZ_allow)
if restrained <= restrained_switch_count and big_Z_change:
limited_Z = True
step = .5 #.5
while (abs(1.0 - l_old.Z()/l.Z()) > dZ_allow or abs(1.0 - g_old.Z()/g.Z()) > dZ_allow ) and step > 1e-8:
# Ks_working = [step*xo + (1.0 - step)*xi for xo, xi in zip(Ks, Ks_old)]
# Ks_working = [Ks[i]*(Ks_old[i]/Ks[i])**(1.0 - step) for i in cmps] # step = 0 - all new; step = 1 - all old
# Ks_working = [Ks_old[i]*(exp(lnphis_l[i])/exp(lnphis_g[i])/Ks_old[i])**(1.0 - step) for i in cmps]
ys_new = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(ys, ys_old)])
xs_new = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(xs, xs_old)])
# V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks_working, guess=V_over_F)
l = liquid_phase.to(xs_new, T=T, P=P, V=V)
g = gas_phase.to(ys_new, T=T, P=P, V=V)
# lnphis_g = g.lnphis()
# lnphis_l = l.lnphis()
print('step', step, V_over_F, g.Z())
step *= 0.5
xs, ys = xs_new, ys_new
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
restrained += 1
elif restrained > restrained_switch_count and big_Z_change:
restrained = 0
# Check for negative fractions - normalize only if needed
for xi in xs_new:
if xi < 0.0:
xs_new_sum_inv = 1.0/sum(abs(i) for i in xs_new)
for i in cmps:
xs_new[i] = abs(xs_new[i])*xs_new_sum_inv
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum_inv = 1.0/sum(abs(i) for i in ys_new)
for i in cmps:
ys_new[i] = abs(ys_new[i])*ys_new_sum_inv
break
# Calculate the error using the new Ks and old compositions
# Claimed error function in CONVENTIONAL AND RAPID FLASH
# CALCULATIONS FOR THE SOAVE-REDLICH-KWONG AND PENG-ROBINSON EQUATIONS OF STATE
err = 0.0
# Suggested tolerance 1e-15
try:
for Ki, xi, yi in zip(Ks, xs, ys):
# equivalent of fugacity ratio
# Could divide by the old Ks as well.
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
except ZeroDivisionError:
err = 0.0
for Ki, xi, yi in zip(Ks, xs, ys):
try:
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
except ZeroDivisionError:
pass
if err > 0.0 and err in (err1, err2, err3):
raise OscillationError("Converged to cycle in errors, no progress being made")
# Accept the new compositions
xs_old, ys_old, Ks_old = xs, ys, Ks
# if not limited_Z:
# assert xs == l.zs
# assert ys == g.zs
xs, ys = xs_new, ys_new
lnphis_g_old, lnphis_l_old = lnphis_g, lnphis_l
l_old, g_old = l, g
# print(err, V_over_F, Ks) # xs, ys
# Check for
comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)])
if comp_difference < trivial_solution_tol:
raise TrivialSolutionError("Converged to trivial condition, compositions of both phases equal",
comp_difference, iteration, err)
if err < tol and not limited_Z:
# Temporary!
# err_mole_balance = 0.0
# for i in cmps:
# err_mole_balance += abs(xs_old[i] * (1.0 - V_over_F_old) + ys_old[i] * V_over_F_old - zs[i])
# if err_mole_balance < mole_balance_tol:
# return V_over_F, xs, ys, l, g, iteration, err
if iteration == 0:
# We are composition independent!
g = gas_phase.to(ys_new, T=T, P=P, V=V)
l = liquid_phase.to(xs_new, T=T, P=P, V=V)
return V_over_F, xs_new, ys_new, l, g, iteration, err
else:
g = gas_phase.to(ys_old, T=T, P=P, V=V)
l = liquid_phase.to(xs_old, T=T, P=P, V=V)
return V_over_F_old, xs_old, ys_old, l, g, iteration, err
# elif err < tol and limited_Z:
# print(l.fugacities()/np.array(g.fugacities()))
err1, err2, err3 = err, err1, err2
raise UnconvergedError('End of SS without convergence')
def sequential_substitution_2P_functional(zs, xs_guess, ys_guess,
liquid_args, gas_args, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=0.5):
xs, ys = xs_guess, ys_guess
V_over_F = V_over_F_guess
N = len(zs)
err = 0.0
V_over_F_old = V_over_F
Ks = [0.0]*N
for iteration in range(maxiter):
lnphis_g = lnphis_direct(ys, *gas_args)
lnphis_l = lnphis_direct(xs, *liquid_args)
for i in range(N):
Ks[i] = exp(lnphis_l[i] - lnphis_g[i])
V_over_F_old = V_over_F
try:
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
except:
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F, check=True)
for xi in xs_new:
if xi < 0.0:
# Remove negative mole fractions - may help or may still fail
xs_new_sum_inv = 0.0
for xj in xs_new:
xs_new_sum_inv += abs(xj)
xs_new_sum_inv = 1.0/xs_new_sum_inv
for i in range(N):
xs_new[i] = abs(xs_new[i])*xs_new_sum_inv
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum_inv = 0.0
for yj in ys_new:
ys_new_sum_inv += abs(yj)
ys_new_sum_inv = 1.0/ys_new_sum_inv
for i in range(N):
ys_new[i] = abs(ys_new[i])*ys_new_sum_inv
break
err = 0.0
for Ki, xi, yi in zip(Ks, xs, ys):
# equivalent of fugacity ratio
# Could divide by the old Ks as well.
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
xs_old, ys_old = xs, ys
xs, ys = xs_new, ys_new
comp_difference = 0.0
for xi, yi in zip(xs, ys):
comp_difference += abs(xi - yi)
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
if err < tol:
return V_over_F_old, xs_old, ys_old, iteration, err
raise ValueError('End of SS without convergence')
def sequential_substitution_NP(T, P, zs, compositions_guesses, betas_guesses,
phases, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, ref_phase=2):
compositions = compositions_guesses
cmps = range(len(zs))
phase_count = len(phases)
phases_iter = range(phase_count)
phase_iter_n1 = range(phase_count - 1)
betas = betas_guesses
if len(betas) < len(phases):
betas.append(1.0 - sum(betas))
compositions_K_order = [compositions[i] for i in phases_iter if i != ref_phase]
compositions_ref = compositions_guesses[ref_phase]
for iteration in range(maxiter):
phases = [phases[i].to_TP_zs(T=T, P=P, zs=compositions[i]) for i in phases_iter]
lnphis = [phases[i].lnphis() for i in phases_iter]
Ks = []
lnphis_ref = lnphis[ref_phase]
for i in phases_iter:
if i != ref_phase:
lnphis_i = lnphis[i]
try:
Ks.append([exp(lnphis_ref[j] - lnphis_i[j]) for j in cmps])
except OverflowError:
Ks.append([trunc_exp(lnphis_ref[j] - lnphis_i[j]) for j in cmps])
beta_guesses = [betas[i] for i in phases_iter if i != ref_phase]
#if phase_count == 3:
# Rachford_Rice_solution2(zs, Ks[0], Ks[1], beta_y=beta_guesses[0], beta_z=beta_guesses[1])
betas_new, compositions_new = Rachford_Rice_solutionN(zs, Ks, beta_guesses)
# Sort the order back
beta_ref_new = betas_new[-1]
betas_new = betas_new[:-1]
betas_new.insert(ref_phase, beta_ref_new)
compositions_ref_new = compositions_new[-1]
compositions_K_order_new = compositions_new[:-1]
compositions_new = list(compositions_K_order_new)
compositions_new.insert(ref_phase, compositions_ref_new)
err = 0.0
for i in phase_iter_n1:
Ks_i = Ks[i]
ys = compositions_K_order[i]
try:
for Ki, xi, yi in zip(Ks_i, compositions_ref, ys):
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
except ZeroDivisionError:
err = 0.0
for Ki, xi, yi in zip(Ks_i, compositions_ref, ys):
try:
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
except ZeroDivisionError:
pass
# print(betas, Ks, 'calculated', err)
# print(err)
compositions = compositions_new
compositions_K_order = compositions_K_order_new
compositions_ref = compositions_ref_new
betas = betas_new
# TODO trivial solution check - how to handle - drop phase?
# Check for
# comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)])
# if comp_difference < trivial_solution_tol:
# raise ValueError("Converged to trivial condition, compositions of both phases equal")
if err < tol:
return betas, compositions, phases, iteration, err
# if iteration > 100:
# return betas, compositions, phases, iteration, err
raise UnconvergedError('End of SS without convergence')
def sequential_substitution_Mehra_2P(T, P, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5,
acc_frequency=3, acc_delay=5,
lambda_max=3, lambda_min=0.0,
V_over_F_guess=None):
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
N = len(zs)
cmps = range(N)
lambdas = [1.0]*N
Ks = [ys[i]/xs[i] for i in cmps]
gs = []
import numpy as np
for iteration in range(maxiter):
g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
fugacities_g = g.fugacities()
fugacities_l = l.fugacities()
# Ks = [fugacities_l[i]*ys[i]/(fugacities_g[i]*xs[i]) for i in cmps]
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
phis_g = g.phis()
phis_l = l.phis()
# Ks = [Ks[i]*exp(-lnphis_g[i]/lnphis_l[i]) for i in cmps]
# Ks = [Ks[i]*(phis_l[i]/phis_g[i]/Ks[i])**lambdas[i] for i in cmps]
# Ks = [Ks[i]*fugacities_l[i]/fugacities_g[i] for i in cmps]
# Ks = [Ks[i]*exp(-phis_g[i]/phis_l[i]) for i in cmps]
# Mehra, R. K., R. A. Heidemann, and K. Aziz. “An Accelerated Successive Substitution Algorithm.” The Canadian Journal of Chemical Engineering 61, no. 4 (August 1, 1983): 590-96. https://doi.org/10.1002/cjce.5450610414.
# Strongly believed correct
gis = np.log(fugacities_g) - np.log(fugacities_l)
if not (iteration % acc_frequency) and iteration > acc_delay:
gis_old = np.array(gs[-1])
# lambdas = np.abs(gis_old.T*gis_old/(gis_old.T*(gis_old - gis))*lambdas).tolist() # Alrotithm 3 also working
# lambdas = np.abs(gis_old.T*(gis_old-gis)/((gis_old-gis).T*(gis_old - gis))*lambdas).tolist() # WORKING
lambdas = np.abs(gis.T*gis/(gis_old.T*(gis - gis_old))).tolist() # 34, working
lambdas = [min(max(li, lambda_min), lambda_max) for li in lambdas]
# print(lambdas[0:5])
print(lambdas)
# print('Ks', Ks, )
# print(Ks[-1], phis_l[-1], phis_g[-1], lambdas[-1], gis[-1], gis_old[-1])
Ks = [Ks[i]*(phis_l[i]/phis_g[i]/Ks[i])**lambdas[i] for i in cmps]
# print(Ks)
else:
Ks = [Ks[i]*fugacities_l[i]/fugacities_g[i] for i in cmps]
# print(Ks[0:5])
gs.append(gis)
# lnKs = [lnKs[i]*1.5 for i in cmps]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
# Check for negative fractions - normalize only if needed
for xi in xs_new:
if xi < 0.0:
xs_new_sum = sum(abs(i) for i in xs_new)
xs_new = [abs(i)/xs_new_sum for i in xs_new]
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum = sum(abs(i) for i in ys_new)
ys_new = [abs(i)/ys_new_sum for i in ys_new]
break
err = 0.0
# Suggested tolerance 1e-15
for Ki, xi, yi in zip(Ks, xs, ys):
# equivalent of fugacity ratio
# Could divide by the old Ks as well.
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
print(err)
# Accept the new compositions
xs, ys = xs_new, ys_new
# Check for
comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)])
if comp_difference < trivial_solution_tol:
raise TrivialSolutionError("Converged to trivial condition, compositions of both phases equal",
comp_difference, iteration, err)
if err < tol:
return V_over_F, xs, ys, l, g, iteration, err
raise UnconvergedError('End of SS without convergence')
def sequential_substitution_GDEM3_2P(T, P, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=None,
acc_frequency=3, acc_delay=3,
):
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
cmps = range(len(zs))
all_Ks = []
all_lnKs = []
for iteration in range(maxiter):
g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
# Mehra et al. (1983) is another option
# Ks = [exp(l - g) for l, g in zip(lnphis_l, lnphis_g)]
# if not (iteration %3) and iteration > 3:
# dKs = gdem(Ks, all_Ks[-1], all_Ks[-2], all_Ks[-3])
# print(iteration, dKs)
# Ks = [Ks[i] + dKs[i] for i in cmps]
# all_Ks.append(Ks)
# lnKs = [(l - g) for l, g in zip(lnphis_l, lnphis_g)]
# if not (iteration %3) and iteration > 3:
## dlnKs = gdem(lnKs, all_lnKs[-1], all_lnKs[-2], all_lnKs[-3])
#
# dlnKs = gdem(lnKs, all_lnKs[-1], all_lnKs[-2], all_lnKs[-3])
# lnKs = [lnKs[i] + dlnKs[i] for i in cmps]
# Mehra, R. K., R. A. Heidemann, and K. Aziz. “An Accelerated Successive Substitution Algorithm.” The Canadian Journal of Chemical Engineering 61, no. 4 (August 1, 1983): 590-96. https://doi.org/10.1002/cjce.5450610414.
lnKs = [(l - g) for l, g in zip(lnphis_l, lnphis_g)]
if not (iteration %acc_frequency) and iteration > acc_delay:
dlnKs = gdem(lnKs, all_lnKs[-1], all_lnKs[-2], all_lnKs[-3])
print(dlnKs)
lnKs = [lnKs[i] + dlnKs[i] for i in cmps]
# Try to testaccelerated
all_lnKs.append(lnKs)
Ks = [exp(lnKi) for lnKi in lnKs]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
# Check for negative fractions - normalize only if needed
for xi in xs_new:
if xi < 0.0:
xs_new_sum = sum(abs(i) for i in xs_new)
xs_new = [abs(i)/xs_new_sum for i in xs_new]
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum = sum(abs(i) for i in ys_new)
ys_new = [abs(i)/ys_new_sum for i in ys_new]
break
err = 0.0
# Suggested tolerance 1e-15
for Ki, xi, yi in zip(Ks, xs, ys):
# equivalent of fugacity ratio
# Could divide by the old Ks as well.
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
# Accept the new compositions
xs, ys = xs_new, ys_new
# Check for
comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)])
if comp_difference < trivial_solution_tol:
raise TrivialSolutionError("Converged to trivial condition, compositions of both phases equal",
comp_difference, iteration, err)
if err < tol:
return V_over_F, xs, ys, l, g, iteration, err
raise UnconvergedError('End of SS without convergence')
def nonlin_equilibrium_NP(T, P, zs, compositions_guesses, betas_guesses,
phases, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, ref_phase=-1,
method='hybr', solve_kwargs=None, debug=False):
if solve_kwargs is None:
solve_kwargs = {}
compositions = compositions_guesses
N = len(zs)
Nm1 = N - 1
cmps = range(N)
phase_count = len(phases)
phase_iter = range(phase_count)
if ref_phase < 0:
ref_phase = phase_count + ref_phase
phase_iter_n1 = [i for i in phase_iter if i != ref_phase]
phase_iter_n1_0 = range(phase_count-1)
betas = betas_guesses
if len(betas) < len(phases):
betas.append(1.0 - sum(betas))
flows_guess = [compositions_guesses[j][i]*betas[j] for j in phase_iter_n1 for i in cmps]
jac = True
if method in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'krylov'):
jac = False
global iterations, info
iterations = 0
info = []
def to_solve(flows, jac=jac):
global iterations, info
try:
flows = flows.tolist()
except:
flows = list(flows)
iterations += 1
iter_flows = []
iter_comps = []
iter_betas = []
iter_phases = []
jac_arr = None
remaining = zs
for i in range(len(flows)):
if flows[i] < 0.0:
flows[i] = 1e-100
for j, k in zip(phase_iter_n1, phase_iter_n1_0):
v = flows[k*N:k*N+N]
vs = v
vs_sum = sum(abs(i) for i in vs)
if vs_sum == 0.0:
# Handle the case an optimizer takes all of all compounds already
ys = zs
else:
vs_sum_inv = 1.0/vs_sum
ys = [abs(vs[i]*vs_sum_inv) for i in cmps]
ys = normalize(ys)
iter_flows.append(vs)
iter_comps.append(ys)
iter_betas.append(vs_sum) # Would be divided by feed but feed is zs = 1
iter_phases.append(phases[j].to_TP_zs(T=T, P=P, zs=ys))
remaining = [remaining[i] - vs[i] for i in cmps]
flows_ref = remaining
iter_flows.insert(ref_phase, remaining)
beta_ref = sum(remaining)
iter_betas.insert(ref_phase, beta_ref)
xs_ref = normalize([abs(i) for i in remaining])
iter_comps.insert(ref_phase, xs_ref)
phase_ref = phases[ref_phase].to_TP_zs(T=T, P=P, zs=xs_ref)
iter_phases.insert(ref_phase, phase_ref)
lnphis_ref = phase_ref.lnphis()
dlnfugacities_ref = phase_ref.dlnfugacities_dns()
errs = []
for k in phase_iter_n1:
phase = iter_phases[k]
lnphis = phase.lnphis()
xs = iter_comps[k]
for i in cmps:
# This is identical to lnfugacity(i)^j - lnfugacity(i)^ref
gi = trunc_log(xs[i]/xs_ref[i]) + lnphis[i] - lnphis_ref[i]
errs.append(gi)
if jac:
jac_arr = [[0.0]*N*(phase_count-1) for i in range(N*(phase_count-1))]
for ni, nj in zip(phase_iter_n1, phase_iter_n1_0):
p = iter_phases[ni]
dlnfugacities = p.dlnfugacities_dns()
# Begin with the first row using ni, nj;
for i in cmps:
for ki, kj in zip(phase_iter_n1, phase_iter_n1_0):
for j in cmps:
delta = 1.0 if nj == kj else 0.0
v_ref = dlnfugacities_ref[i][j]/beta_ref
jac_arr[nj*N + i][kj*N + j] = dlnfugacities[i][j]*delta/iter_betas[ni] + v_ref
info[:] = iter_betas, iter_comps, iter_phases, errs, jac_arr, flows
if jac:
return errs, jac_arr
return errs
if method == 'newton_system':
comp_val, iterations = newton_system(to_solve, flows_guess, jac=True,
xtol=tol, damping=1,
damping_func=damping_maintain_sign)
else:
def f_jac_numpy(flows_guess):
# needed
ans = to_solve(flows_guess)
if jac:
return np.array(ans[0]), np.array(ans[1])
return np.array(ans)
sln = root(f_jac_numpy, flows_guess, tol=tol, jac=(True if jac else None), method=method, **solve_kwargs)
iterations = sln['nfev']
betas, compositions, phases, errs, jac, flows = info
sln = (betas, compositions, phases, errs, jac, iterations)
if debug:
return sln, flows, to_solve
return sln
def nonlin_spec_NP(guess, fixed_val, spec_val, zs, compositions_guesses, betas_guesses,
phases, iter_var='T', fixed_var='P', spec='H',
maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, ref_phase=-1,
# method='hybr',
method='fsolve',
solve_kwargs=None, debug=False,
analytical_jac=True):
if solve_kwargs is None:
solve_kwargs = {}
phase_kwargs = {fixed_var: fixed_val, iter_var: guess}
compositions = compositions_guesses
N = len(zs)
Nm1 = N - 1
cmps = range(N)
phase_count = len(phases)
phase_iter = range(phase_count)
if ref_phase < 0:
ref_phase = phase_count + ref_phase
phase_iter_n1 = [i for i in phase_iter if i != ref_phase]
phase_iter_n1_0 = range(phase_count-1)
betas = betas_guesses
if len(betas) < len(phases):
betas.append(1.0 - sum(betas))
guesses = [compositions_guesses[j][i]*betas[j] for j in phase_iter_n1 for i in cmps]
guesses.append(guess)
spec_callables = [getattr(phase.__class__, spec) for phase in phases]
dlnphis_diter_s = 'dlnphis_d' + iter_var
dlnphis_diter_callables = [getattr(phase.__class__, dlnphis_diter_s) for phase in phases]
dspec_diter_s = 'd%s_d%s' %(spec, iter_var)
dspec_diter_callables = [getattr(phase.__class__, dspec_diter_s) for phase in phases]
dspec_dn_s = 'd%s_dns' %(spec)
dspec_dn_callables = [getattr(phase.__class__, dspec_dn_s) for phase in phases]
jac = True
if method in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'krylov', 'fsolve'):
jac = False
global iterations, info
iterations = 0
info = []
def to_solve(flows, jac=jac, skip_err=False):
global iterations, info
try:
flows = flows.tolist()
except:
flows = list(flows)
iter_val = flows[-1]
phase_kwargs[iter_var] = iter_val
flows = flows[:-1]
iter_flows = []
iter_comps = []
iter_betas = []
iter_phases = []
jac_arr = None
remaining = zs
if not skip_err:
# print(flows, iter_val)
iterations += 1
for i in range(len(flows)):
if flows[i] < 0.0:
flows[i] = 1e-100
for j, k in zip(phase_iter_n1, phase_iter_n1_0):
v = flows[k*N:k*N+N]
vs = v
vs_sum = sum(abs(i) for i in vs)
if vs_sum == 0.0:
# Handle the case an optimizer takes all of all compounds already
ys = zs
else:
vs_sum_inv = 1.0/vs_sum
ys = [abs(vs[i]*vs_sum_inv) for i in cmps]
ys = normalize(ys)
iter_flows.append(vs)
iter_comps.append(ys)
iter_betas.append(vs_sum) # Would be divided by feed but feed is zs = 1
iter_phases.append(phases[j].to_TP_zs(zs=ys, **phase_kwargs))
remaining = [remaining[i] - vs[i] for i in cmps]
flows_ref = remaining
iter_flows.insert(ref_phase, remaining)
beta_ref = sum(remaining)
iter_betas.insert(ref_phase, beta_ref)
xs_ref = normalize([abs(i) for i in remaining])
iter_comps.insert(ref_phase, xs_ref)
phase_ref = phases[ref_phase].to_TP_zs(zs=xs_ref, **phase_kwargs)
iter_phases.insert(ref_phase, phase_ref)
lnphis_ref = phase_ref.lnphis()
errs = []
for k in phase_iter_n1:
phase = iter_phases[k]
lnphis = phase.lnphis()
xs = iter_comps[k]
for i in cmps:
# This is identical to lnfugacity(i)^j - lnfugacity(i)^ref
gi = trunc_log(xs[i]/xs_ref[i]) + lnphis[i] - lnphis_ref[i]
errs.append(gi)
spec_phases = []
spec_calc = 0.0
for k in phase_iter:
spec_phase = spec_callables[k](iter_phases[k])
spec_phases.append(spec_phase)
spec_calc += spec_phase*iter_betas[k]
errs.append(spec_calc - spec_val)
else:
iter_betas, iter_comps, iter_phases, errs, jac_arr, flows, iter_val_check, spec_phases = info
beta_ref = iter_betas[ref_phase]
xs_ref = iter_comps[ref_phase]
phase_ref = iter_phases[ref_phase]
lnphis_ref = phase_ref.lnphis()
# print(errs[-1], 'err', iter_val, 'T')
if jac:
dlnfugacities_ref = phase_ref.dlnfugacities_dns()
jac_arr = [[0.0]*(N*(phase_count-1) + 1) for i in range(N*(phase_count-1)+1)]
for ni, nj in zip(phase_iter_n1, phase_iter_n1_0):
p = iter_phases[ni]
dlnfugacities = p.dlnfugacities_dns()
# Begin with the first row using ni, nj;
for i in cmps:
for ki, kj in zip(phase_iter_n1, phase_iter_n1_0):
for j in cmps:
delta = 1.0 if nj == kj else 0.0
v_ref = dlnfugacities_ref[i][j]/beta_ref
jac_arr[nj*N + i][kj*N + j] = dlnfugacities[i][j]*delta/iter_betas[ni] + v_ref
dlnphis_dspec = [dlnphis_diter_callables[i](phases[i]) for i in phase_iter]
dlnphis_dspec_ref = dlnphis_dspec[ref_phase]
for ni, nj in zip(phase_iter_n1, phase_iter_n1_0):
p = iter_phases[ni]
for i in cmps:
jac_arr[nj*N + i][-1] = dlnphis_dspec[ni][i] - dlnphis_dspec_ref[i]
# last =
dspec_calc = 0.0
for k in phase_iter:
dspec_calc += dspec_diter_callables[k](iter_phases[k])*iter_betas[k]
jac_arr[-1][-1] = dspec_calc
dspec_dns = [dspec_dn_callables[i](phases[i]) for i in phase_iter]
dspec_dns_ref = dspec_dns[ref_phase]
last_jac_row = jac_arr[-1]
for ni, nj in zip(phase_iter_n1, phase_iter_n1_0):
for i in cmps:
# What is wrong?
# H is multiplied by the phase fraction, of which this n is a part of
# So there must be two parts here
last_jac_row[nj*N + i] = ((iter_betas[ni]*dspec_dns[ni][i]/iter_betas[ni] - beta_ref*dspec_dns_ref[i]/beta_ref)
+ (spec_phases[ni] - spec_phases[ref_phase]))
if skip_err:
return jac_arr
info[:] = iter_betas, iter_comps, iter_phases, errs, jac_arr, flows, iter_val, spec_phases
if jac:
return errs, jac_arr
return errs
if method == 'newton_system':
comp_val, iterations = newton_system(to_solve, guesses, jac=True,
xtol=tol, damping=1,
damping_func=damping_maintain_sign)
else:
def f_jac_numpy(flows_guess):
# needed
ans = to_solve(flows_guess)
if jac:
return np.array(ans[0]), np.array(ans[1])
return np.array(ans)
def jac_numpy(flows_guess):
if flows_guess.tolist() == info[5] + [info[6]]:
a = np.array(to_solve(flows_guess, jac=True, skip_err=True))
# b = np.array(to_solve(flows_guess, jac=True)[1])
# from numpy.testing import assert_allclose
# assert_allclose(a, b, rtol=1e-10)
return a
# print('fail jac', tuple(flows_guess.tolist()), tuple(info[5]))
# print('new jac')
return np.array(to_solve(flows_guess, jac=True)[1])
if method == 'fsolve':
# Need a function cache! 2 wasted fevals, 1 wasted jaceval
if analytical_jac:
jac = False
sln, infodict, _, _ = fsolve(f_jac_numpy, guesses, fprime=jac_numpy, xtol=tol, full_output=1, **solve_kwargs)
else:
sln, infodict, _, _ = fsolve(f_jac_numpy, guesses, xtol=tol, full_output=1, **solve_kwargs)
iterations = infodict['nfev']
else:
sln = root(f_jac_numpy, guesses, tol=tol, jac=(True if jac else None), method=method, **solve_kwargs)
iterations = sln['nfev']
betas, compositions, phases, errs, jac, flows, iter_val, spec_phases = info
sln = (iter_val, betas, compositions, phases, errs, jac, iterations)
if debug:
return sln, flows, to_solve
return sln
def nonlin_2P(T, P, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=None,
method='hybr'):
# Do with just n?
cmps = range(len(zs))
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
Ks_guess = [ys[i]/xs[i] for i in cmps]
info = [0, None, None, None]
def to_solve(lnKsVFTrans):
Ks = [trunc_exp(i) for i in lnKsVFTrans[:-1]]
V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + trunc_exp(-lnKsVFTrans[-1]))) # Translation function - keep it zero to 1
xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]
ys = [Ks[i]*xs[i] for i in cmps]
g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
# print(g.fugacities(), l.fugacities())
new_Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
VF_err = Rachford_Rice_flash_error(V_over_F, zs, new_Ks)
err = [new_Ks[i] - Ks[i] for i in cmps] + [VF_err]
info[1:] = l, g, err
info[0] += 1
return err
VF_guess_in_basis = -log((1.0-V_over_F)/(V_over_F-0.0))
guesses = [log(i) for i in Ks_guess]
guesses.append(VF_guess_in_basis)
# try:
sol = root(to_solve, guesses, tol=tol, method=method)
# No reliable way to get number of iterations from OptimizeResult
# solution, infodict, ier, mesg = fsolve(to_solve, guesses, full_output=True)
solution = sol.x.tolist()
V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + exp(-solution[-1])))
Ks = [exp(solution[i]) for i in cmps]
xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]
ys = [Ks[i]*xs[i] for i in cmps]
# except Exception as e:
# raise UnconvergedError(e)
tot_err = 0.0
for i in info[3]:
tot_err += abs(i)
return V_over_F, xs, ys, info[1], info[2], info[0], tot_err
def nonlin_2P_HSGUAbeta(spec, spec_var, iter_val, iter_var, fixed_val,
fixed_var, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=None,
method='hybr'
):
cmps = range(len(zs))
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
Ks_guess = [ys[i]/xs[i] for i in cmps]
kwargs_l = {'zs': xs_guess, fixed_var: fixed_val}
kwargs_g = {'zs': ys_guess, fixed_var: fixed_val}
info = [0, None, None, None, None]
def to_solve(lnKsVFTransHSGUABeta):
Ks = [trunc_exp(i) for i in lnKsVFTransHSGUABeta[:-2]]
V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + trunc_exp(-lnKsVFTransHSGUABeta[-2]))) # Translation function - keep it zero to 1
iter_val = lnKsVFTransHSGUABeta[-1]
xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]
ys = [Ks[i]*xs[i] for i in cmps]
kwargs_l[iter_var] = iter_val
kwargs_l['zs'] = xs
kwargs_g[iter_var] = iter_val
kwargs_g['zs'] = ys
g = gas_phase.to(**kwargs_g)
l = liquid_phase.to(**kwargs_l)
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
new_Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
VF_err = Rachford_Rice_flash_error(V_over_F, zs, new_Ks)
val_l = getattr(l, spec_var)()
val_g = getattr(g, spec_var)()
val = V_over_F*val_g + (1.0 - V_over_F)*val_l
other_err = val - spec
err = [new_Ks[i] - Ks[i] for i in cmps] + [VF_err, other_err]
info[1:] = l, g, err, other_err
info[0] += 1
# print(lnKsVFTransHSGUABeta, err)
return err
VF_guess_in_basis = -log((1.0-V_over_F)/(V_over_F-0.0))
guesses = [log(i) for i in Ks_guess]
guesses.append(VF_guess_in_basis)
guesses.append(iter_val)
# solution, iterations = broyden2(guesses, fun=to_solve, jac=False, xtol=1e-7,
# maxiter=maxiter, jac_has_fun=False, skip_J=True)
sol = root(to_solve, guesses, tol=tol, method=method)
solution = sol.x.tolist()
V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + exp(-solution[-2])))
iter_val = solution[-1]
Ks = [exp(solution[i]) for i in cmps]
xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]
ys = [Ks[i]*xs[i] for i in cmps]
tot_err = 0.0
for v in info[3]:
tot_err += abs(v)
return V_over_F, solution[-1], xs, ys, info[1], info[2], info[0], tot_err
#def broyden2(xs, fun, jac, xtol=1e-7, maxiter=100, jac_has_fun=False,
# skip_J=False):
def nonlin_n_2P(T, P, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=None,
method='hybr'):
cmps = range(len(zs))
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.45
else:
V_over_F = V_over_F_guess
ns = [ys[i]*V_over_F for i in cmps]
info = [0, None, None, None]
def to_solve(ns):
ys = normalize(ns)
ns_l = [zs[i] - ns[i] for i in cmps]
# print(sum(ns)+sum(ns_l))
xs = normalize(ns_l)
# print(ys, xs)
g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
# print(np.array(g.dfugacities_dns()) - np.array(l.dfugacities_dns()) )
fugacities_g = g.fugacities()
fugacities_l = l.fugacities()
err = [fugacities_g[i] - fugacities_l[i] for i in cmps]
info[1:] = l, g, err
info[0] += 1
# print(err)
return err
# print(np.array(jacobian(to_solve, ns, scalar=False)))
# print('ignore')
sol = root(to_solve, ns, tol=tol, method=method)
ns_sln = sol.x.tolist()
ys = normalize(ns_sln)
xs_sln = [zs[i] - ns_sln[i] for i in cmps]
xs = normalize(xs_sln)
return xs, ys
def nonlin_2P_newton(T, P, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, xtol=1E-10,
trivial_solution_tol=1e-5, V_over_F_guess=None):
N = len(zs)
cmps = range(N)
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
Ks_guess = [ys[i]/xs[i] for i in cmps]
info = []
def to_solve(lnKsVF):
# Jacobian verified. However, very sketchy - mole fractions may want
# to go negative.
lnKs = lnKsVF[:-1]
Ks = [exp(lnKi) for lnKi in lnKs]
VF = float(lnKsVF[-1])
# if VF > 1:
# VF = 1-1e-15
# if VF < 0:
# VF = 1e-15
xs = [zi/(1.0 + VF*(Ki - 1.0)) for zi, Ki in zip(zs, Ks)]
ys = [Ki*xi for Ki, xi in zip(Ks, xs)]
g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
size = N + 1
J = [[None]*size for i in range(size)]
d_lnphi_dxs = l.dlnphis_dzs()
d_lnphi_dys = g.dlnphis_dzs()
J[N][N] = 1.0
# Last column except last value; believed correct
# Was not correct when compared to numerical solution
Ksm1 = [Ki - 1.0 for Ki in Ks]
RR_denoms_inv2 = []
for i in cmps:
t = 1.0 + VF*Ksm1[i]
RR_denoms_inv2.append(1.0/(t*t))
RR_terms = [zs[k]*Ksm1[k]*RR_denoms_inv2[k] for k in cmps]
for i in cmps:
value = 0.0
d_lnphi_dxs_i, d_lnphi_dys_i = d_lnphi_dxs[i], d_lnphi_dys[i]
for k in cmps:
value += RR_terms[k]*(d_lnphi_dxs_i[k] - Ks[k]*d_lnphi_dys_i[k])
J[i][-1] = value
# Main body - expensive to compute! Lots of elements
zsKsRRinvs2 = [zs[j]*Ks[j]*RR_denoms_inv2[j] for j in cmps]
one_m_VF = 1.0 - VF
for i in cmps:
Ji = J[i]
d_lnphi_dxs_is, d_lnphi_dys_is = d_lnphi_dxs[i], d_lnphi_dys[i]
for j in cmps:
value = 1.0 if i == j else 0.0
value += zsKsRRinvs2[j]*(VF*d_lnphi_dxs_is[j] + one_m_VF*d_lnphi_dys_is[j])
Ji[j] = value
# Last row except last value - good, working
# Diff of RR w.r.t each log K
bottom_row = J[-1]
for j in cmps:
bottom_row[j] = zsKsRRinvs2[j]*(one_m_VF) + VF*zsKsRRinvs2[j]
# Last value - good, working, being overwritten
dF_ncp1_dB = 0.0
for i in cmps:
dF_ncp1_dB -= RR_terms[i]*Ksm1[i]
J[-1][-1] = dF_ncp1_dB
err_RR = Rachford_Rice_flash_error(VF, zs, Ks)
Fs = [lnKi - lnphi_l + lnphi_g for lnphi_l, lnphi_g, lnKi in zip(lnphis_l, lnphis_g, lnKs)]
Fs.append(err_RR)
info[:] = VF, xs, ys, l, g, Fs, J
return Fs, J
guesses = [log(i) for i in Ks_guess]
guesses.append(V_over_F)
# TODO trust-region
sln, iterations = newton_system(to_solve, guesses, jac=True, xtol=xtol,
maxiter=maxiter,
damping_func=make_damp_initial(steps=3),
damping=.5)
VF, xs, ys, l, g, Fs, J = info
tot_err = 0.0
for Fi in Fs:
tot_err += abs(Fi)
return VF, xs, ys, l, g, tot_err, J, iterations
def gdem(x, x1, x2, x3):
cmps = range(len(x))
dx2 = [x[i] - x3[i] for i in cmps]
dx1 = [x[i] - x2[i] for i in cmps]
dx = [x[i] - x1[i] for i in cmps]
b01, b02, b12, b11, b22 = 0.0, 0.0, 0.0, 0.0, 0.0
for i in cmps:
b01 += dx[i]*dx1[i]
b02 += dx[i]*dx2[i]
b12 += dx1[i]*dx2[i]
b11 += dx1[i]*dx1[i]
b22 += dx2[i]*dx2[i]
den_inv = 1.0/(b11*b22 - b12*b12)
mu1 = den_inv*(b02*b12 - b01*b22)
mu2 = den_inv*(b01*b12 - b02*b11)
factor = 1.0/(1.0 + mu1 + mu2)
return [factor*(dx[i] - mu2*dx1[i]) for i in cmps]
def minimize_gibbs_2P_transformed(T, P, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=None):
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
flows_v = [yi*V_over_F for yi in ys_guess]
cmps = range(len(zs))
calc_phases = []
def G(flows_v):
vs = [(0.0 + (zs[i] - 0.0)/(1.0 - flows_v[i])) for i in cmps]
ls = [zs[i] - vs[i] for i in cmps]
xs = normalize(ls)
ys = normalize(vs)
VF = flows_v[0]/ys[0]
g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
G_l = l.G()
G_g = g.G()
calc_phases[:] = G_l, G_g
GE_calc = (G_g*VF + (1.0 - VF)*G_l)/(R*T)
return GE_calc
ans = minimize(G, flows_v)
flows_v = ans['x']
vs = [(0.0 + (zs[i] - 0.0) / (1.0 - flows_v[i])) for i in cmps]
ls = [zs[i] - vs[i] for i in cmps]
xs = normalize(ls)
ys = normalize(vs)
V_over_F = flows_v[0] / ys[0]
return V_over_F, xs, ys, calc_phases[0], calc_phases[1], ans['nfev'], ans['fun']
def minimize_gibbs_NP_transformed(T, P, zs, compositions_guesses, phases,
betas, tol=1E-13,
method='L-BFGS-B', opt_kwargs=None, translate=False):
if opt_kwargs is None:
opt_kwargs = {}
N = len(zs)
cmps = range(N)
phase_count = len(phases)
phase_iter = range(phase_count)
phase_iter_n1 = range(phase_count-1)
if method == 'differential_evolution':
translate = True
# RT_inv = 1.0/(R*T)
# Only exist for the first n phases
# Do not multiply by zs - we are already multiplying by a composition
flows_guess = [compositions_guesses[j][i]*betas[j] for j in range(phase_count - 1) for i in cmps]
# Convert the flow guesses to the basis used
remaining = zs
if translate:
flows_guess_basis = []
for j in range(phase_count-1):
phase_guess = flows_guess[j*N:j*N+N]
flows_guess_basis.extend([-trunc_log((remaining[i]-phase_guess[i])/(phase_guess[i]-0.0)) for i in cmps])
remaining = [remaining[i] - phase_guess[i] for i in cmps]
else:
flows_guess_basis = flows_guess
global min_G, iterations
jac, hess = False, False
real_min = False
min_G = 1e100
iterations = 0
info = []
last = []
def G(flows):
global min_G, iterations
try:
flows = flows.tolist()
except:
flows = list(flows)
iterations += 1
iter_flows = []
iter_comps = []
iter_betas = []
iter_phases = []
remaining = zs
if not translate:
for i in range(len(flows)):
if flows[i] < 1e-10:
flows[i] = 1e-10
for j in phase_iter:
v = flows[j*N:j*N+N]
# Mole flows of phase0/vapor
if j == phase_count - 1:
vs = remaining
else:
if translate:
vs = [(0.0 + (remaining[i] - 0.0)/(1.0 + trunc_exp(-v[i]))) for i in cmps]
else:
vs = v
vs_sum = sum(abs(i) for i in vs)
if vs_sum == 0.0:
# Handle the case an optimizer takes all of all compounds already
ys = zs
else:
vs_sum_inv = 1.0/vs_sum
ys = [abs(vs[i]*vs_sum_inv) for i in cmps]
ys = normalize(ys)
iter_flows.append(vs)
iter_comps.append(ys)
iter_betas.append(vs_sum) # Would be divided by feed but feed is zs = 1
remaining = [remaining[i] - vs[i] for i in cmps]
G = 0.0
jac_array = []
for j in phase_iter:
comp = iter_comps[j]
phase = phases[j].to_TP_zs(T=T, P=P, zs=comp)
lnphis = phase.lnphis()
if real_min:
# fugacities = phase.fugacities()
# fugacities = phase.phis()
#G += sum([iter_flows[j][i]*trunc_log(fugacities[i]) for i in cmps])
G += phase.G()*iter_betas[j]
else:
for i in cmps:
G += iter_flows[j][i]*(trunc_log(comp[i]) + lnphis[i])
iter_phases.append(phase)
if 0:
fugacities_last = iter_phases[-1].fugacities()
# G = 0.0
for j in phase_iter_n1:
fugacities = iter_phases[j].fugacities()
G += sum([abs(fugacities_last[i] - fugacities[i]) for i in cmps])
# lnphis = phase.lnphis()
# if real_min:
# G += G_base
# # if not jac:
# for j in phase_iter:
# comp = iter_comps[j]
# G += phase.G()*iter_betas[j]
# if jac:
# r = []
# for i in cmps:
# v = (log())
# jac_array.append([log()])
jac_arr = []
comp = iter_comps[0]
phase = iter_phases[0]
lnphis = phase.lnphis()
base = [log(xi) + lnphii for xi, lnphii in zip(comp, lnphis)]
if jac:
for j in range(1, phase_count):
comp = iter_comps[j]
phase = iter_phases[j]
lnphis = phase.lnphis()
jac_arr.extend([ref - (log(xi) + lnphii) for ref, xi, lnphii in zip(base, comp, lnphis)])
jac_arr = []
comp_last = iter_comps[-1]
phase_last = iter_phases[-1]
flows_last = iter_flows[-1]
lnphis_last = phase_last.lnphis()
dlnphis_dns_last = phase_last.dlnphis_dns()
for j in phase_iter_n1:
comp = iter_comps[j]
phase = iter_phases[j]
flows = iter_flows[j]
lnphis = phase.lnphis()
dlnphis_dns = phase.dlnphis_dns()
for i in cmps:
v = 0
for k in cmps:
v += flows[k][i]*lnphis[k][i]
v -= flows_last[i]*dlnphis_dns_last[k][i]
v += lnphis[i] + log(comp[i])
if G < min_G:
# 'phases', iter_phases
print('new min G', G, 'betas', iter_betas, 'comp', iter_comps)
info[:] = iter_betas, iter_comps, iter_phases, G
min_G = G
last[:] = iter_betas, iter_comps, iter_phases, G
if hess:
base = iter_phases[0].dlnfugacities_dns()
p1 = iter_phases[1].dlnfugacities_dns()
dlnphis_dns = [i.dlnphis_dns() for i in iter_phases]
dlnphis_dns0 = iter_phases[0].dlnphis_dns()
dlnphis_dns1 = iter_phases[1].dlnphis_dns()
xs, ys = iter_comps[0], iter_comps[1]
hess_arr = []
beta = iter_betas[0]
hess_arr = [[0.0]*N*(phase_count-1) for i in range(N*(phase_count-1))]
for n in range(1, phase_count):
for m in range(1, phase_count):
for i in cmps:
for j in cmps:
delta = 1.0 if i == j else 0.0
v = 1.0/iter_betas[n]*(1.0/iter_comps[n][i]*delta
- 1.0 + dlnphis_dns[n][i][j])
v += 1.0/iter_betas[0]*(1.0/iter_comps[0][i]*delta
- 1.0 + dlnphis_dns[0][i][j])
hess_arr[(n-1)*N+i][(m-1)*N+j] = v
#
# for n in range(1, phase_count):
# for i in cmps:
# r = []
# for j in cmps:
# v = 0.0
# for m in phase_iter:
# delta = 1.0 if i ==j else 0.0
# v += 1.0/iter_betas[m]*(1.0/iter_comps[m][i]*delta
# - 1.0 + dlnphis_dns[m][i][j])
#
# # How the heck to make this multidimensional?
# # v = 1.0/(beta*(1.0 - beta))*(zs[i]*delta/(xs[i]*ys[i])
# # - 1.0 + (1.0 - beta)*dlnphis_dns0[i][j]
# # + beta*dlnphis_dns1[i][j])
#
# # v = base[i][j] + p1[i][j]
# r.append(v)
# hess_arr.append(r)
# Going to be hard to figure out
# for j in range(1, phase_count):
# comp = iter_comps[j]
# phase = iter_phases[j]
# dlnfugacities_dns = phase.dlnfugacities_dns()
# row = [base[i] + dlnfugacities_dns[i] for i in cmps]
# hess_arr = row
# hess_arr.append(row)
return G, jac_arr, hess_arr
if jac:
return G, np.array(jac_arr)
return G
# ans = None
if method == 'differential_evolution':
from scipy.optimize import differential_evolution
real_min = True
translate = True
G_base = 1e100
for p in phases:
G_calc = p.to(T=T,P=P, zs=zs).G()
if G_base > G_calc:
G_base = G_calc
jac = hess = False
# print(G(list(flows_guess_basis)))
ans = differential_evolution(G, [(-30.0, 30.0) for i in cmps for j in range(phase_count-1)], **opt_kwargs)
# ans = differential_evolution(G, [(-100.0, 100.0) for i in cmps for j in range(phase_count-1)], **opt_kwargs)
objf = float(ans['fun'])
elif method == 'newton_minimize':
import numdifftools as nd
jac = True
hess = True
initial_hess = nd.Hessian(lambda x: G(x)[0], step=1e-4)(flows_guess_basis)
ans, iters = newton_minimize(G, flows_guess_basis, jac=True, hess=True, xtol=tol, ytol=None, maxiter=100, damping=1.0,
damping_func=damping_maintain_sign)
objf = None
else:
jac = True
hess = True
import numdifftools as nd
def hess_fun(flows):
return np.array(G(flows)[2])
# hess_fun = lambda flows_guess_basis: np.array(G(flows_guess_basis)[2])
# nd.Jacobian(G, step=1e-5)
# trust-constr special handling to add constraints
def fun_and_jac(x):
x, j, _ = G(x)
return x, np.array(j)
ans = minimize(fun_and_jac, flows_guess_basis, jac=True, hess=hess_fun, method=method, tol=tol, **opt_kwargs)
objf = float(ans['fun'])
# G(ans['x']) # Make sure info has right value
# ans['fun'] *= R*T
betas, compositions, phases, objf = info#info
return betas, compositions, phases, iterations, objf
def TP_solve_VF_guesses(zs, method, constants, correlations,
T=None, P=None, VF=None,
maxiter=50, xtol=1E-7, ytol=None,
bounded=False,
user_guess=None, last_conv=None):
if method == IDEAL_PSAT:
return flash_ideal(zs=zs, funcs=correlations.VaporPressures, Tcs=constants.Tcs, T=T, P=P, VF=VF)
elif method == WILSON_GUESS:
return flash_wilson(zs, Tcs=constants.Tcs, Pcs=constants.Pcs, omegas=constants.omegas, T=T, P=P, VF=VF)
elif method == TB_TC_GUESS:
return flash_Tb_Tc_Pc(zs, Tbs=constants.Tbs, Tcs=constants.Tcs, Pcs=constants.Pcs, T=T, P=P, VF=VF)
# Simple return values - not going through a model
elif method == STP_T_GUESS:
return flash_ideal(zs=zs, funcs=correlations.VaporPressures, Tcs=constants.Tcs, T=298.15, P=101325.0)
elif method == LAST_CONVERGED:
if last_conv is None:
raise ValueError("No last converged")
return last_conv
else:
raise ValueError("Could not converge")
def dew_P_newton(P_guess, T, zs, liquid_phase, gas_phase,
maxiter=200, xtol=1E-10, xs_guess=None,
max_step_damping=1e5,
trivial_solution_tol=1e-4):
# Trial function only
V = None
N = len(zs)
cmps = range(N)
xs = zs if xs_guess is None else xs_guess
V_over_F = 1.0
def to_solve(lnKsP):
# d(fl_i - fg_i)/d(ln K,i) -
# rest is less important
# d d(fl_i - fg_i)/d(P) should be easy
Ks = [trunc_exp(i) for i in lnKsP[:-1]]
P = lnKsP[-1]
xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]
ys = [Ks[i]*xs[i] for i in cmps]
g = gas_phase.to(ys, T=T, P=P, V=V)
l = liquid_phase.to(xs, T=T, P=P, V=V)
fugacities_l = l.fugacities()
fugacities_g = g.fugacities()
VF_err = Rachford_Rice_flash_error(V_over_F, zs, Ks)
errs = [fi_l - fi_g for fi_l, fi_g in zip(fugacities_l, fugacities_g)]
errs.append(VF_err)
return errs
lnKs_guess = [log(zs[i]/xs[i]) for i in cmps]
lnKs_guess.append(P_guess)
def jac(lnKsP):
j = jacobian(to_solve, lnKsP, scalar=False)
return j
lnKsP, iterations = newton_system(to_solve, lnKs_guess, jac=jac, xtol=xtol)
xs = [zs[i]/(1.0 + V_over_F*(exp(lnKsP[i]) - 1.0)) for i in cmps]
# ys = [exp(lnKsP[i])*xs[i] for i in cmps]
return lnKsP[-1], xs, zs, iterations
def dew_bubble_newton_zs(guess, fixed_val, zs, liquid_phase, gas_phase,
iter_var='T', fixed_var='P', V_over_F=1, # 1 = dew, 0 = bubble
maxiter=200, xtol=1E-10, comp_guess=None,
max_step_damping=1e5, damping=1.0,
trivial_solution_tol=1e-4, debug=False,
method='newton', opt_kwargs=None):
V = None
N = len(zs)
cmps = range(N)
if comp_guess is None:
comp_guess = zs
if V_over_F == 1.0:
iter_phase, const_phase = liquid_phase, gas_phase
elif V_over_F == 0.0:
iter_phase, const_phase = gas_phase, liquid_phase
else:
raise ValueError("Supports only VF of 0 or 1")
lnKs = [0.0]*N
size = N + 1
errs = [0.0]*size
comp_invs = [0.0]*N
J = [[0.0]*size for i in range(size)]
#J[N][N] = 0.0 as well
JN = J[N]
for i in cmps:
JN[i] = -1.0
s = 'dlnphis_d%s' %(iter_var)
dlnphis_diter_var_iter = getattr(iter_phase.__class__, s)
dlnphis_diter_var_const = getattr(const_phase.__class__, s)
dlnphis_dzs = getattr(iter_phase.__class__, 'dlnphis_dzs')
info = []
kwargs = {}
kwargs[fixed_var] = fixed_val
kwargs['V'] = None
def to_solve_comp(iter_vals, jac=True):
comp = iter_vals[:-1]
iter_val = iter_vals[-1]
kwargs[iter_var] = iter_val
p_iter = iter_phase.to(comp, **kwargs)
p_const = const_phase.to(zs, **kwargs)
lnphis_iter = p_iter.lnphis()
lnphis_const = p_const.lnphis()
for i in cmps:
comp_invs[i] = comp_inv = 1.0/comp[i]
lnKs[i] = log(zs[i]*comp_inv)
errs[i] = lnKs[i] - lnphis_iter[i] + lnphis_const[i]
errs[-1] = 1.0 - sum(comp)
if jac:
dlnphis_dxs = dlnphis_dzs(p_iter)
dlnphis_dprop_iter = dlnphis_diter_var_iter(p_iter)
dlnphis_dprop_const = dlnphis_diter_var_const(p_const)
for i in cmps:
Ji = J[i]
Ji[-1] = dlnphis_dprop_const[i] - dlnphis_dprop_iter[i]
for j in cmps:
Ji[j] = -dlnphis_dxs[i][j]
Ji[i] -= comp_invs[i]
info[:] = [p_iter, p_const, errs, J]
return errs, J
return errs
damping = 1.0
guesses = list(comp_guess)
guesses.append(guess)
if method == 'newton':
comp_val, iterations = newton_system(to_solve_comp, guesses, jac=True,
xtol=xtol, damping=damping,
solve_func=py_solve,
# solve_func=lambda x, y:np.linalg.solve(x, y).tolist(),
damping_func=damping_maintain_sign)
elif method == 'odeint':
# Not even close to working
# equations are hard
from scipy.integrate import odeint
def fun_and_jac(x, t):
x, j = to_solve_comp(x.tolist() + [t])
return np.array(x), np.array(j)
def fun(x, t):
x, j = to_solve_comp(x.tolist() +[t])
return np.array(x)
def jac(x, t):
x, j = to_solve_comp(x.tolist() + [t])
return np.array(j)
ans = odeint(func=fun, y0=np.array(guesses), t=np.linspace(guess, guess*2, 5), Dfun=jac)
return ans
else:
if opt_kwargs is None:
opt_kwargs = {}
# def fun_and_jac(x):
# x, j = to_solve_comp(x.tolist())
# return np.array(x), np.array(j)
low = [.0]*N
low.append(1.0) # guess at minimum pressure
high = [1.0]*N
high.append(1e10) # guess at maximum pressure
f_j, into, outof = translate_bound_f_jac(to_solve_comp, jac=True, low=low, high=high, as_np=True)
ans = root(f_j, np.array(into(guesses)), jac=True, method=method, tol=xtol, **opt_kwargs)
comp_val = outof(ans['x']).tolist()
iterations = ans['nfev']
iter_val = comp_val[-1]
comp = comp_val[:-1]
comp_difference = 0.0
for i in cmps: comp_difference += abs(zs[i] - comp[i])
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
if iter_var == 'P' and iter_val > 1e10:
raise ValueError("Converged to unlikely point")
sln = [iter_val, comp]
sln.append(info[0])
sln.append(info[1])
sln.append(iterations)
tot_err = 0.0
for err_i in info[2]:
tot_err += abs(err_i)
sln.append(tot_err)
if debug:
return sln, to_solve_comp
return sln
l_undefined_T_msg = "Could not calculate liquid conditions at provided temperature %s K (mole fracions %s)"
g_undefined_T_msg = "Could not calculate vapor conditions at provided temperature %s K (mole fracions %s)"
l_undefined_P_msg = "Could not calculate liquid conditions at provided pressure %s Pa (mole fracions %s)"
g_undefined_P_msg = "Could not calculate vapor conditions at provided pressure %s Pa (mole fracions %s)"
def dew_bubble_Michelsen_Mollerup(guess, fixed_val, zs, liquid_phase, gas_phase,
iter_var='T', fixed_var='P', V_over_F=1,
maxiter=200, xtol=1E-10, comp_guess=None,
max_step_damping=.25, guess_update_frequency=1,
trivial_solution_tol=1e-7, V_diff=.00002, damping=1.0):
# for near critical, V diff very wrong - .005 seen, both g as or both liquid
kwargs = {fixed_var: fixed_val}
N = len(zs)
cmps = range(N)
comp_guess = zs if comp_guess is None else comp_guess
damping_orig = damping
if V_over_F == 1.0:
iter_phase, const_phase, bubble = liquid_phase, gas_phase, False
elif V_over_F == 0.0:
iter_phase, const_phase, bubble = gas_phase, liquid_phase, True
else:
raise ValueError("Supports only VF of 0 or 1")
if iter_var == 'T':
if V_over_F == 1.0:
iter_msg, const_msg = l_undefined_T_msg, g_undefined_T_msg
else:
iter_msg, const_msg = g_undefined_T_msg, l_undefined_T_msg
elif iter_var == 'P':
if V_over_F == 1.0:
iter_msg, const_msg = l_undefined_P_msg, g_undefined_P_msg
else:
iter_msg, const_msg = g_undefined_P_msg, l_undefined_P_msg
s = 'dlnphis_d%s' %(iter_var)
dlnphis_diter_var_iter = getattr(iter_phase.__class__, s)
dlnphis_diter_var_const = getattr(const_phase.__class__, s)
skip = 0
guess_old = None
V_ratio, V_ratio_last = None, None
V_iter_last, V_const_last = None, None
expect_phase = 'g' if V_over_F == 0.0 else 'l'
unwanted_phase = 'l' if expect_phase == 'g' else 'g'
successive_fails = 0
for iteration in range(maxiter):
kwargs[iter_var] = guess
try:
const_phase = const_phase.to_TP_zs(zs=zs, **kwargs)
lnphis_const = const_phase.lnphis()
dlnphis_dvar_const = dlnphis_diter_var_const(const_phase)
except Exception as e:
if guess_old is None:
raise ValueError(const_msg %(guess, zs), e)
successive_fails += 1
guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)
continue
try:
skip -= 1
iter_phase = iter_phase.to_TP_zs(zs=comp_guess, **kwargs)
if V_diff is not None:
V_iter, V_const = iter_phase.V(), const_phase.V()
V_ratio = V_iter/V_const
if 1.0 - V_diff < V_ratio < 1.0 + V_diff or skip > 0 or V_iter_last and (abs(min(V_iter, V_iter_last)/max(V_iter, V_iter_last)) < .8):
# Relax the constraint for the iterating on variable so two different phases exist
#if iter_phase.eos_mix.phase in ('l', 'g') and iter_phase.eos_mix.phase == const_phase.eos_mix.phase:
# Alternatively, try a stability test here
if iter_phase.eos_mix.phase == unwanted_phase:
if skip < 0:
skip = 4
damping = .15
if iter_var == 'P':
split = min(iter_phase.eos_mix.P_discriminant_zeros()) # P_discriminant_zero_l
if bubble:
split *= 0.999999999
else:
split *= 1.000000001
elif iter_var == 'T':
split = iter_phase.eos_mix.T_discriminant_zero_l()
if bubble:
split *= 0.999999999
else:
split *= 1.000000001
kwargs[iter_var] = guess = split
iter_phase = iter_phase.to(zs=comp_guess, **kwargs)
const_phase = const_phase.to(zs=zs, **kwargs)
lnphis_const = const_phase.lnphis()
dlnphis_dvar_const = dlnphis_diter_var_const(const_phase)
print('adj iter phase', split)
elif const_phase.eos_mix.phase == expect_phase:
if skip < 0:
skip = 4
damping = .15
if iter_var == 'P':
split = min(const_phase.eos_mix.P_discriminant_zeros())
if bubble:
split *= 0.999999999
else:
split *= 1.000000001
elif iter_var == 'T':
split = const_phase.eos_mix.T_discriminant_zero_l()
if bubble:
split *= 0.999999999
else:
split *= 1.000000001
kwargs[iter_var] = guess = split
const_phase = const_phase.to(zs=zs, **kwargs)
lnphis_const = const_phase.lnphis()
dlnphis_dvar_const = dlnphis_diter_var_const(const_phase)
iter_phase = iter_phase.to(zs=comp_guess, **kwargs)
# Also need to adjust the other phase to keep it in sync
print('adj const phase', split)
lnphis_iter = iter_phase.lnphis()
dlnphis_dvar_iter = dlnphis_diter_var_iter(iter_phase)
except Exception as e:
if guess_old is None:
raise ValueError(iter_msg %(guess, zs), e)
successive_fails += 1
guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)
continue
if successive_fails > 2:
raise ValueError("Stopped convergence procedure after multiple bad steps")
successive_fails = 0
Ks = [exp(a - b) for a, b in zip(lnphis_const, lnphis_iter)]
comp_guess = [zs[i]*Ks[i] for i in cmps]
y_sum = sum(comp_guess)
comp_guess = [y/y_sum for y in comp_guess]
if iteration % guess_update_frequency: # or skip > 0
continue
elif skip == 0:
damping = damping_orig
f_k = sum([zs[i]*Ks[i] for i in cmps]) - 1.0
dfk_dvar = 0.0
for i in cmps:
dfk_dvar += zs[i]*Ks[i]*(dlnphis_dvar_const[i] - dlnphis_dvar_iter[i])
guess_old = guess
step = -f_k/dfk_dvar
# if near_critical:
adj_step = copysign(min(max_step_damping*guess, abs(step), abs(step)*damping), step)
if guess + adj_step <= 0.0:
adj_step *= 0.5
guess = guess + adj_step
# else:
# guess = guess + step
comp_difference = 0.0
for i in cmps: comp_difference += abs(zs[i] - comp_guess[i])
if comp_difference < trivial_solution_tol and iteration:
for zi in zs:
if zi == 1.0:
# Turn off trivial check for pure components
trivial_solution_tol = -1.0
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
if abs(guess - guess_old) < xtol: #and not skip:
guess = guess_old
break
if V_diff is not None:
V_iter_last, V_const_last, V_ratio_last = V_iter, V_const, V_ratio
if abs(guess - guess_old) > xtol:
raise ValueError("Did not converge to specified tolerance")
return guess, comp_guess, iter_phase, const_phase, iteration, abs(guess - guess_old)
l_undefined_T_msg = "Could not calculate liquid conditions at provided temperature %s K (mole fracions %s)"
g_undefined_T_msg = "Could not calculate vapor conditions at provided temperature %s K (mole fracions %s)"
l_undefined_P_msg = "Could not calculate liquid conditions at provided pressure %s Pa (mole fracions %s)"
g_undefined_P_msg = "Could not calculate vapor conditions at provided pressure %s Pa (mole fracions %s)"
def existence_3P_Michelsen_Mollerup(guess, fixed_val, zs, iter_phase, liquid0, liquid1,
iter_var='T', fixed_var='P',
maxiter=200, xtol=1E-10, comp_guess=None,
liquid0_comp=None, liquid1_comp=None,
max_step_damping=.25, SS_tol=1e-10,
trivial_solution_tol=1e-7, damping=1.0,
beta=0.5):
# For convenience call the two phases that exist already liquid0, liquid1
# But one of them can be a gas, solid, etc.
kwargs = {fixed_var: fixed_val}
N = len(zs)
cmps = range(N)
comp_guess = zs if comp_guess is None else comp_guess
damping_orig = damping
if iter_var == 'T':
iter_msg, const_msg = g_undefined_T_msg, l_undefined_T_msg
elif iter_var == 'P':
iter_msg, const_msg = g_undefined_P_msg, l_undefined_P_msg
s = 'dlnphis_d%s' %(iter_var)
dlnphis_diter_var_iter = getattr(iter_phase.__class__, s)
dlnphis_diter_var_liquid0 = getattr(liquid0.__class__, s)
# dlnphis_diter_var_liquid1 = getattr(liquid1.__class__, s)
skip = 0
guess_old = None
successive_fails = 0
for iteration in range(maxiter):
kwargs[iter_var] = guess
try:
liquid0 = liquid0.to_TP_zs(zs=liquid0_comp, **kwargs)
lnphis_liquid0 = liquid0.lnphis()
dlnphis_dvar_liquid0 = dlnphis_diter_var_liquid0(liquid0)
except Exception as e:
if guess_old is None:
raise ValueError(const_msg %(guess, liquid0_comp), e)
successive_fails += 1
guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)
continue
try:
liquid1 = liquid1.to_TP_zs(zs=liquid1_comp, **kwargs)
lnphis_liquid1 = liquid1.lnphis()
# dlnphis_dvar_liquid1 = dlnphis_diter_var_liquid1(liquid1)
except Exception as e:
if guess_old is None:
raise ValueError(const_msg %(guess, liquid0_comp), e)
successive_fails += 1
guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)
continue
try:
iter_phase = iter_phase.to_TP_zs(zs=comp_guess, **kwargs)
lnphis_iter = iter_phase.lnphis()
dlnphis_dvar_iter = dlnphis_diter_var_iter(iter_phase)
except Exception as e:
if guess_old is None:
raise ValueError(iter_msg %(guess, zs), e)
successive_fails += 1
guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)
continue
if successive_fails > 2:
raise ValueError("Stopped convergence procedure after multiple bad steps")
successive_fails = 0
Ks = [exp(a - b) for a, b in zip(lnphis_liquid0, lnphis_iter)]
comp_guess = [liquid0_comp[i]*Ks[i] for i in cmps]
y_sum_inv = 1.0/sum(comp_guess)
comp_guess = [y*y_sum_inv for y in comp_guess]
f_k = sum([liquid0_comp[i]*Ks[i] for i in cmps]) - 1.0
dfk_dvar = 0.0
for i in cmps:
dfk_dvar += liquid0_comp[i]*Ks[i]*(dlnphis_dvar_liquid0[i] - dlnphis_dvar_iter[i])
guess_old = guess
step = -f_k/dfk_dvar
adj_step = copysign(min(max_step_damping*guess, abs(step), abs(step)*damping), step)
if guess + adj_step <= 0.0:
adj_step *= 0.5
guess = guess + adj_step
comp_difference = 0.0
for i in cmps:
comp_difference += abs(liquid0_comp[i] - comp_guess[i])
if comp_difference < trivial_solution_tol and iteration:
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
# Do the SS part for the two phases
try:
Ks_SS = [exp(lnphis_liquid0[i] - lnphis_liquid1[i]) for i in cmps]
except OverflowError:
Ks_SS = [trunc_exp(lnphis_liquid0[i] - lnphis_liquid1[i]) for i in cmps]
beta, liquid0_comp_new, liquid1_comp_new = flash_inner_loop(zs, Ks_SS, guess=beta)
for xi in liquid0_comp_new:
if xi < 0.0:
xs_new_sum_inv = 1.0/sum(abs(i) for i in liquid0_comp_new)
for i in cmps:
liquid0_comp_new[i] = abs(liquid0_comp_new[i])*xs_new_sum_inv
break
for xi in liquid1_comp_new:
if xi < 0.0:
xs_new_sum_inv = 1.0/sum(abs(i) for i in liquid1_comp_new)
for i in cmps:
liquid1_comp_new[i] = abs(liquid1_comp_new[i])*xs_new_sum_inv
break
err_SS = 0.0
try:
for Ki, xi, yi in zip(Ks_SS, liquid0_comp, liquid1_comp):
err_i = Ki*xi/yi - 1.0
err_SS += err_i*err_i
except ZeroDivisionError:
err_SS = 0.0
for Ki, xi, yi in zip(Ks, xs, ys):
try:
err_i = Ki*xi/yi - 1.0
err_SS += err_i*err_i
except ZeroDivisionError:
pass
liquid0_comp, liquid1_comp = liquid0_comp_new, liquid1_comp_new
if abs(guess - guess_old) < xtol and err_SS < SS_tol:
err_VF = abs(guess - guess_old)
guess = guess_old
break
if abs(guess - guess_old) > xtol:
raise ValueError("Did not converge to specified tolerance")
return guess, [iter_phase, liquid0, liquid1], [0.0, 1.0-beta, beta], err_VF, err_SS, iteration
def bubble_T_Michelsen_Mollerup(T_guess, P, zs, liquid_phase, gas_phase,
maxiter=200, xtol=1E-10, ys_guess=None,
max_step_damping=5.0, T_update_frequency=1,
trivial_solution_tol=1e-4):
N = len(zs)
cmps = range(N)
ys = zs if ys_guess is None else ys_guess
T_guess_old = None
successive_fails = 0
for iteration in range(maxiter):
try:
g = gas_phase.to_TP_zs(T=T_guess, P=P, zs=ys)
lnphis_g = g.lnphis()
dlnphis_dT_g = g.dlnphis_dT()
except Exception as e:
if T_guess_old is None:
raise ValueError(g_undefined_T_msg %(T_guess, ys), e)
successive_fails += 1
T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
try:
l = liquid_phase.to_TP_zs(T=T_guess, P=P, zs=zs)
lnphis_l = l.lnphis()
dlnphis_dT_l = l.dlnphis_dT()
except Exception as e:
if T_guess_old is None:
raise ValueError(l_undefined_T_msg %(T_guess, zs), e)
successive_fails += 1
T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
if successive_fails > 2:
raise ValueError("Stopped convergence procedure after multiple bad steps")
successive_fails = 0
Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)]
ys = [zs[i]*Ks[i] for i in cmps]
if iteration % T_update_frequency:
continue
f_k = sum([zs[i]*Ks[i] for i in cmps]) - 1.0
dfk_dT = 0.0
for i in cmps:
dfk_dT += zs[i]*Ks[i]*(dlnphis_dT_l[i] - dlnphis_dT_g[i])
T_guess_old = T_guess
step = -f_k/dfk_dT
# if near_critical:
T_guess = T_guess + copysign(min(max_step_damping, abs(step)), step)
# else:
# T_guess = T_guess + step
comp_difference = sum([abs(zi - yi) for zi, yi in zip(zs, ys)])
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
y_sum = sum(ys)
ys = [y/y_sum for y in ys]
if abs(T_guess - T_guess_old) < xtol:
T_guess = T_guess_old
break
if abs(T_guess - T_guess_old) > xtol:
raise ValueError("Did not converge to specified tolerance")
return T_guess, ys, l, g, iteration, abs(T_guess - T_guess_old)
def dew_T_Michelsen_Mollerup(T_guess, P, zs, liquid_phase, gas_phase,
maxiter=200, xtol=1E-10, xs_guess=None,
max_step_damping=5.0, T_update_frequency=1,
trivial_solution_tol=1e-4):
N = len(zs)
cmps = range(N)
xs = zs if xs_guess is None else xs_guess
T_guess_old = None
successive_fails = 0
for iteration in range(maxiter):
try:
g = gas_phase.to_TP_zs(T=T_guess, P=P, zs=zs)
lnphis_g = g.lnphis()
dlnphis_dT_g = g.dlnphis_dT()
except Exception as e:
if T_guess_old is None:
raise ValueError(g_undefined_T_msg %(T_guess, zs), e)
successive_fails += 1
T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
try:
l = liquid_phase.to_TP_zs(T=T_guess, P=P, zs=xs)
lnphis_l = l.lnphis()
dlnphis_dT_l = l.dlnphis_dT()
except Exception as e:
if T_guess_old is None:
raise ValueError(l_undefined_T_msg %(T_guess, xs), e)
successive_fails += 1
T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
if successive_fails > 2:
raise ValueError("Stopped convergence procedure after multiple bad steps")
successive_fails = 0
Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)]
xs = [zs[i]/Ks[i] for i in cmps]
if iteration % T_update_frequency:
continue
f_k = sum(xs) - 1.0
dfk_dT = 0.0
for i in cmps:
dfk_dT += xs[i]*(dlnphis_dT_g[i] - dlnphis_dT_l[i])
T_guess_old = T_guess
step = -f_k/dfk_dT
# if near_critical:
T_guess = T_guess + copysign(min(max_step_damping, abs(step)), step)
# else:
# T_guess = T_guess + step
comp_difference = sum([abs(zi - xi) for zi, xi in zip(zs, xs)])
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
y_sum = sum(xs)
xs = [y/y_sum for y in xs]
if abs(T_guess - T_guess_old) < xtol:
T_guess = T_guess_old
break
if abs(T_guess - T_guess_old) > xtol:
raise ValueError("Did not converge to specified tolerance")
return T_guess, xs, l, g, iteration, abs(T_guess - T_guess_old)
def bubble_P_Michelsen_Mollerup(P_guess, T, zs, liquid_phase, gas_phase,
maxiter=200, xtol=1E-10, ys_guess=None,
max_step_damping=1e5, P_update_frequency=1,
trivial_solution_tol=1e-4):
N = len(zs)
cmps = range(N)
ys = zs if ys_guess is None else ys_guess
P_guess_old = None
successive_fails = 0
for iteration in range(maxiter):
try:
g = gas_phase = gas_phase.to_TP_zs(T=T, P=P_guess, zs=ys)
lnphis_g = g.lnphis()
dlnphis_dP_g = g.dlnphis_dP()
except Exception as e:
if P_guess_old is None:
raise ValueError(g_undefined_P_msg %(P_guess, ys), e)
successive_fails += 1
P_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
try:
l = liquid_phase= liquid_phase.to_TP_zs(T=T, P=P_guess, zs=zs)
lnphis_l = l.lnphis()
dlnphis_dP_l = l.dlnphis_dP()
except Exception as e:
if P_guess_old is None:
raise ValueError(l_undefined_P_msg %(P_guess, zs), e)
successive_fails += 1
T_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
if successive_fails > 2:
raise ValueError("Stopped convergence procedure after multiple bad steps")
successive_fails = 0
Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)]
ys = [zs[i]*Ks[i] for i in cmps]
if iteration % P_update_frequency:
continue
f_k = sum([zs[i]*Ks[i] for i in cmps]) - 1.0
dfk_dP = 0.0
for i in cmps:
dfk_dP += zs[i]*Ks[i]*(dlnphis_dP_l[i] - dlnphis_dP_g[i])
P_guess_old = P_guess
step = -f_k/dfk_dP
P_guess = P_guess + copysign(min(max_step_damping, abs(step)), step)
comp_difference = sum([abs(zi - yi) for zi, yi in zip(zs, ys)])
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
y_sum = sum(ys)
ys = [y/y_sum for y in ys]
if abs(P_guess - P_guess_old) < xtol:
P_guess = P_guess_old
break
if abs(P_guess - P_guess_old) > xtol:
raise ValueError("Did not converge to specified tolerance")
return P_guess, ys, l, g, iteration, abs(P_guess - P_guess_old)
def dew_P_Michelsen_Mollerup(P_guess, T, zs, liquid_phase, gas_phase,
maxiter=200, xtol=1E-10, xs_guess=None,
max_step_damping=1e5, P_update_frequency=1,
trivial_solution_tol=1e-4):
N = len(zs)
cmps = range(N)
xs = zs if xs_guess is None else xs_guess
P_guess_old = None
successive_fails = 0
for iteration in range(maxiter):
try:
g = gas_phase = gas_phase.to_TP_zs(T=T, P=P_guess, zs=zs)
lnphis_g = g.lnphis()
dlnphis_dP_g = g.dlnphis_dP()
except Exception as e:
if P_guess_old is None:
raise ValueError(g_undefined_P_msg %(P_guess, zs), e)
successive_fails += 1
P_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
try:
l = liquid_phase= liquid_phase.to_TP_zs(T=T, P=P_guess, zs=xs)
lnphis_l = l.lnphis()
dlnphis_dP_l = l.dlnphis_dP()
except Exception as e:
if P_guess_old is None:
raise ValueError(l_undefined_P_msg %(P_guess, xs), e)
successive_fails += 1
T_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
if successive_fails > 2:
raise ValueError("Stopped convergence procedure after multiple bad steps")
successive_fails = 0
Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)]
xs = [zs[i]/Ks[i] for i in cmps]
if iteration % P_update_frequency:
continue
f_k = sum(xs) - 1.0
dfk_dP = 0.0
for i in cmps:
dfk_dP += xs[i]*(dlnphis_dP_g[i] - dlnphis_dP_l[i])
P_guess_old = P_guess
step = -f_k/dfk_dP
P_guess = P_guess + copysign(min(max_step_damping, abs(step)), step)
comp_difference = sum([abs(zi - xi) for zi, xi in zip(zs, xs)])
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
x_sum_inv = 1.0/sum(xs)
xs = [x*x_sum_inv for x in xs]
if abs(P_guess - P_guess_old) < xtol:
P_guess = P_guess_old
break
if abs(P_guess - P_guess_old) > xtol:
raise ValueError("Did not converge to specified tolerance")
return P_guess, xs, l, g, iteration, abs(P_guess - P_guess_old)
# spec, iter_var, fixed_var
strs_to_ders = {('H', 'T', 'P'): 'dH_dT_P',
('S', 'T', 'P'): 'dS_dT_P',
('G', 'T', 'P'): 'dG_dT_P',
('U', 'T', 'P'): 'dU_dT_P',
('A', 'T', 'P'): 'dA_dT_P',
('H', 'T', 'V'): 'dH_dT_V',
('S', 'T', 'V'): 'dS_dT_V',
('G', 'T', 'V'): 'dG_dT_V',
('U', 'T', 'V'): 'dU_dT_V',
('A', 'T', 'V'): 'dA_dT_V',
('H', 'P', 'T'): 'dH_dP_T',
('S', 'P', 'T'): 'dS_dP_T',
('G', 'P', 'T'): 'dG_dP_T',
('U', 'P', 'T'): 'dU_dP_T',
('A', 'P', 'T'): 'dA_dP_T',
('H', 'P', 'V'): 'dH_dP_V',
('S', 'P', 'V'): 'dS_dP_V',
('G', 'P', 'V'): 'dG_dP_V',
('U', 'P', 'V'): 'dU_dP_V',
('A', 'P', 'V'): 'dA_dP_V',
('H', 'V', 'T'): 'dH_dV_T',
('S', 'V', 'T'): 'dS_dV_T',
('G', 'V', 'T'): 'dG_dV_T',
('U', 'V', 'T'): 'dU_dV_T',
('A', 'V', 'T'): 'dA_dV_T',
('H', 'V', 'P'): 'dH_dV_P',
('S', 'V', 'P'): 'dS_dV_P',
('G', 'V', 'P'): 'dG_dV_P',
('U', 'V', 'P'): 'dU_dV_P',
('A', 'V', 'P'): 'dA_dV_P',
}
multiple_solution_sets = set([('T', 'S'), ('T', 'H'), ('T', 'U'), ('T', 'A'), ('T', 'G'),
('S', 'T'), ('H', 'T'), ('U', 'T'), ('A', 'T'), ('G', 'T'),
])
def TPV_solve_HSGUA_1P(zs, phase, guess, fixed_var_val, spec_val,
iter_var='T', fixed_var='P', spec='H',
maxiter=200, xtol=1E-10, ytol=None, fprime=False,
minimum_progress=0.3, oscillation_detection=True,
bounded=False, min_bound=None, max_bound=None,
multi_solution=False):
r'''Solve a single-phase flash where one of `T`, `P`, or `V` are specified
and one of `H`, `S`, `G`, `U`, or `A` are also specified. The iteration
(changed input variable) variable must be specified as be one of `T`, `P`,
or `V`, but it cannot be the same as the fixed variable.
This method is a secant or newton based solution method, optionally with
oscillation detection to bail out of tring to solve the problem to handle
the case where the spec cannot be met because of a phase change (as in a
cubic eos case).
Parameters
----------
zs : list[float]
Mole fractions of the phase, [-]
phase : `Phase`
The phase object of the mixture, containing the information for
calculating properties at new conditions, [-]
guess : float
The guessed value for the iteration variable,
[K or Pa or m^3/mol]
fixed_var_val : float
The specified value of the fixed variable (one of T, P, or V);
[K or Pa, or m^3/mol]
spec_val : float
The specified value of H, S, G, U, or A, [J/(mol*K) or J/mol]
iter_var : str
One of 'T', 'P', 'V', [-]
fixed_var : str
One of 'T', 'P', 'V', [-]
spec : str
One of 'H', 'S', 'G', 'U', 'A', [-]
maxiter : float
Maximum number of iterations, [-]
xtol : float
Tolerance for secant-style convergence of the iteration variable,
[K or Pa, or m^3/mol]
ytol : float or None
Tolerance for convergence of the spec variable,
[J/(mol*K) or J/mol]
Returns
-------
iter_var_val, phase, iterations, err
Notes
-----
'''
# Needs lots of work but the idea is here
# Can iterate chancing any of T, P, V with a fixed other T, P, V to meet any
# H S G U A spec.
store = []
global iterations
iterations = 0
if fixed_var == iter_var:
raise ValueError("Fixed variable cannot be the same as iteration variable")
if fixed_var not in ('T', 'P', 'V'):
raise ValueError("Fixed variable must be one of `T`, `P`, `V`")
if iter_var not in ('T', 'P', 'V'):
raise ValueError("Iteration variable must be one of `T`, `P`, `V`")
# Little point in enforcing the spec - might want to repurpose the function later
if spec not in ('H', 'S', 'G', 'U', 'A'):
raise ValueError("Spec variable must be one of `H`, `S`, `G` `U`, `A`")
multiple_solutions = (fixed_var, spec) in multiple_solution_sets
phase_kwargs = {fixed_var: fixed_var_val, 'zs': zs}
spec_fun = getattr(phase.__class__, spec)
# print('spec_fun', spec_fun)
if fprime:
try:
# Gotta be a lookup by (spec, iter_var, fixed_var)
der_attr = strs_to_ders[(spec, iter_var, fixed_var)]
except KeyError:
der_attr = 'd' + spec + '_d' + iter_var
der_attr_fun = getattr(phase.__class__, der_attr)
# print('der_attr_fun', der_attr_fun)
def to_solve(guess, solved_phase=None):
global iterations
iterations += 1
if solved_phase is not None:
p = solved_phase
else:
phase_kwargs[iter_var] = guess
p = phase.to(**phase_kwargs)
err = spec_fun(p) - spec_val
# err = (spec_fun(p) - spec_val)/spec_val
store[:] = (p, err)
if fprime:
# print([err, guess, p.eos_mix.phase, der_attr])
derr = der_attr_fun(p)
# derr = der_attr_fun(p)/spec_val
return err, derr
# print(err)
return err
arg_fprime = fprime
high = None # Optional and not often used bound for newton
if fixed_var == 'V':
if iter_var == 'T':
max_phys = phase.T_max_at_V(fixed_var_val)
elif iter_var == 'P':
max_phys = phase.P_max_at_V(fixed_var_val)
if max_phys is not None:
if max_bound is None:
max_bound = high = max_phys
else:
max_bound = high = min(max_phys, max_bound)
# TV iterations
ignore_bound_fail = (fixed_var == 'T' and iter_var == 'P')
if fixed_var in ('T',) and ((fixed_var == 'T' and iter_var == 'P') or (fixed_var == 'P' and iter_var == 'T') or (fixed_var == 'T' and iter_var == 'V') ) and 1:
try:
fprime = False
if iter_var == 'V':
dummy_iter = 1e8
else:
dummy_iter = guess
phase_kwargs[iter_var] = dummy_iter # Dummy pressure does not matter
phase_temp = phase.to(**phase_kwargs)
lower_phase, higher_phase = None, None
delta = 1e-9
if fixed_var == 'T' and iter_var == 'P':
transitions = phase_temp.P_transitions()
# assert len(transitions) == 1
under_trans, above_trans = transitions[0] * (1.0 - delta), transitions[0] * (1.0 + delta)
elif fixed_var == 'P' and iter_var == 'T':
transitions = phase_temp.T_transitions()
under_trans, above_trans = transitions[0] * (1.0 - delta), transitions[0] * (1.0 + delta)
assert len(transitions) == 1
elif fixed_var == 'T' and iter_var == 'V':
transitions = phase_temp.P_transitions()
delta = 1e-11
# not_separated = True
# while not_separated:
P_higher = transitions[0]*(1.0 + delta) # Dummy pressure does not matter
lower_phase = phase.to(T=fixed_var_val, zs=zs, P=P_higher)
P_lower = transitions[0]*(1.0 - delta) # Dummy pressure does not matter
higher_phase = phase.to(T=fixed_var_val, zs=zs, P=P_lower)
under_trans, above_trans = lower_phase.V(), higher_phase.V()
not_separated = isclose(under_trans, above_trans, rel_tol=1e-3)
# delta *= 10
# TODO is it possible to evaluate each limit at once, so half the work is avoided?
bracketed_high, bracketed_low = False, False
if min_bound is not None:
f_min = to_solve(min_bound)
f_low_trans = to_solve(under_trans, lower_phase)
if f_min*f_low_trans <= 0.0:
bracketed_low = True
bounding_pair = (min(min_bound, under_trans), max(min_bound, under_trans))
if max_bound is not None and (not bracketed_low or multiple_solutions):
f_max = to_solve(max_bound)
f_max_trans = to_solve(above_trans, higher_phase)
if f_max*f_max_trans <= 0.0:
bracketed_high = True
bounding_pair = (min(max_bound, above_trans), max(max_bound, above_trans))
if max_bound is not None and max_bound is not None and not bracketed_low and not bracketed_high:
if not ignore_bound_fail:
raise NotBoundedError("Between phases")
if bracketed_high or bracketed_low:
oscillation_detection = False
high = bounding_pair[1] # restrict newton/secant just in case
min_bound, max_bound = bounding_pair
if not (min_bound < guess < max_bound):
guess = 0.5*(min_bound + max_bound)
else:
if min_bound is not None and transitions[0] < min_bound and not ignore_bound_fail:
raise NotBoundedError("Not likely to bound")
if max_bound is not None and transitions[0] > max_bound and not ignore_bound_fail:
raise NotBoundedError("Not likely to bound")
except NotBoundedError as e:
raise e
except Exception:
pass
fprime = arg_fprime
# Plot the objective function
# tests = logspace(log10(10.6999), log10(10.70005), 15000)
# tests = logspace(log10(10.6), log10(10.8), 15000)
# tests = logspace(log10(min_bound), log10(max_bound), 1500)
# values = [to_solve(t)[0] for t in tests]
# values = [abs(t) for t in values]
# import matplotlib.pyplot as plt
# plt.loglog(tests, values)
# plt.show()
if oscillation_detection and ytol is not None:
to_solve2, checker = oscillation_checking_wrapper(to_solve, full=True,
minimum_progress=minimum_progress,
good_err=ytol*1e6)
else:
to_solve2 = to_solve
checker = None
solve_bounded = False
try:
# All three variables P, T, V are positive but can grow unbounded, so
# for the secant method, only set the one variable
if fprime:
iter_var_val = newton(to_solve2, guess, xtol=xtol, ytol=ytol, fprime=True,
maxiter=maxiter, bisection=True, low=min_bound, high=high, gap_detection=False)
else:
iter_var_val = secant(to_solve2, guess, xtol=xtol, ytol=ytol,
maxiter=maxiter, bisection=True, low=min_bound, high=high)
except (UnconvergedError, OscillationError, NotBoundedError):
solve_bounded = True
# Unconverged - from newton/secant; oscillation - from the oscillation detector;
# NotBounded - from when EOS needs to solve T and there is no solution
fprime = False
if solve_bounded:
if bounded and min_bound is not None and max_bound is not None:
if checker:
min_bound_prev, max_bound_prev, fa, fb = best_bounding_bounds(min_bound, max_bound,
f=to_solve, xs_pos=checker.xs_pos, ys_pos=checker.ys_pos,
xs_neg=checker.xs_neg, ys_neg=checker.ys_neg)
if abs(min_bound_prev/max_bound_prev - 1.0) > 2.5e-4:
# If the points are too close, odds are there is a discontinuity in the newton solution
min_bound, max_bound = min_bound_prev, max_bound_prev
# maxiter = 20
else:
fa, fb = None, None
else:
fa, fb = None, None
# try:
iter_var_val = brenth(to_solve, min_bound, max_bound, xtol=xtol,
ytol=ytol, maxiter=maxiter, fa=fa, fb=fb)
# except:
# # Not sure at all if good idea
# iter_var_val = secant(to_solve, guess, xtol=xtol, ytol=ytol,
# maxiter=maxiter, bisection=True, low=min_bound)
phase, err = store
return iter_var_val, phase, iterations, err
def solve_PTV_HSGUA_1P(phase, zs, fixed_var_val, spec_val, fixed_var,
spec, iter_var, constants, correlations, last_conv=None,
oscillation_detection=True, guess_maxiter=50,
guess_xtol=1e-7, maxiter=80, xtol=1e-10):
# TODO: replace oscillation detection with bounding parameters and translation
# The cost should be less.
if iter_var == 'T':
if isinstance(phase, CoolPropPhase):
min_bound = phase.AS.Tmin()
max_bound = phase.AS.Tmax()
else:
min_bound = phase.T_MIN_FIXED
max_bound = phase.T_MAX_FIXED
# if isinstance(phase, IAPWS95):
# min_bound = 235.0
# max_bound = 5000.0
elif iter_var == 'P':
min_bound = Phase.P_MIN_FIXED*(1.0 - 1e-12)
max_bound = Phase.P_MAX_FIXED*(1.0 + 1e-12)
if isinstance(phase, CoolPropPhase):
AS = phase.AS
max_bound = AS.pmax()*(1.0 - 1e-7)
min_bound = AS.trivial_keyed_output(CPiP_min)*(1.0 + 1e-7)
elif iter_var == 'V':
min_bound = Phase.V_MIN_FIXED
max_bound = Phase.V_MAX_FIXED
if isinstance(phase, (CEOSLiquid, CEOSGas)):
c2R = phase.eos_class.c2*R
Tcs, Pcs = constants.Tcs, constants.Pcs
b = sum([c2R*Tcs[i]*zs[i]/Pcs[i] for i in range(constants.N)])
min_bound = b*(1.0 + 1e-15)
if phase.is_gas:
methods = [LAST_CONVERGED, FIXED_GUESS, STP_T_GUESS, IG_ENTHALPY,
LASTOVKA_SHAW]
elif phase.is_liquid:
methods = [LAST_CONVERGED, FIXED_GUESS, STP_T_GUESS, IDEAL_LIQUID_ENTHALPY,
DADGOSTAR_SHAW_1]
else:
methods = [LAST_CONVERGED, FIXED_GUESS, STP_T_GUESS]
for method in methods:
try:
guess = TPV_solve_HSGUA_guesses_1P(zs, method, constants, correlations,
fixed_var_val, spec_val,
iter_var=iter_var, fixed_var=fixed_var, spec=spec,
maxiter=guess_maxiter, xtol=guess_xtol, ytol=abs(spec_val)*1e-5,
bounded=True, min_bound=min_bound, max_bound=max_bound,
user_guess=None, last_conv=last_conv, T_ref=298.15,
P_ref=101325.0)
break
except Exception:
pass
ytol = 1e-8*abs(spec_val)
if iter_var == 'T' and spec in ('S', 'H'):
ytol = ytol/100
if isinstance(phase, IAPWS95):
# Objective function isn't quite as nice and smooth as desired
ytol = None
_, phase, iterations, err = TPV_solve_HSGUA_1P(zs, phase, guess, fixed_var_val=fixed_var_val, spec_val=spec_val, ytol=ytol,
iter_var=iter_var, fixed_var=fixed_var, spec=spec, oscillation_detection=oscillation_detection,
minimum_progress=1e-4, maxiter=maxiter, fprime=True, xtol=xtol,
bounded=True, min_bound=min_bound, max_bound=max_bound)
T, P = phase.T, phase.P
return T, P, phase, iterations, err
def TPV_solve_HSGUA_guesses_1P(zs, method, constants, correlations,
fixed_var_val, spec_val,
iter_var='T', fixed_var='P', spec='H',
maxiter=20, xtol=1E-7, ytol=None,
bounded=False, min_bound=None, max_bound=None,
user_guess=None, last_conv=None, T_ref=298.15,
P_ref=101325.0):
if fixed_var == iter_var:
raise ValueError("Fixed variable cannot be the same as iteration variable")
if fixed_var not in ('T', 'P', 'V'):
raise ValueError("Fixed variable must be one of `T`, `P`, `V`")
if iter_var not in ('T', 'P', 'V'):
raise ValueError("Iteration variable must be one of `T`, `P`, `V`")
if spec not in ('H', 'S', 'G', 'U', 'A'):
raise ValueError("Spec variable must be one of `H`, `S`, `G` `U`, `A`")
cmps = range(len(zs))
iter_T = iter_var == 'T'
iter_P = iter_var == 'P'
iter_V = iter_var == 'V'
fixed_P = fixed_var == 'P'
fixed_T = fixed_var == 'T'
fixed_V = fixed_var == 'V'
always_S = spec in ('S', 'G', 'A')
always_H = spec in ('H', 'G', 'U', 'A')
always_V = spec in ('U', 'A')
if always_S:
P_ref_inv = 1.0/P_ref
dS_ideal = R*sum([zi*log(zi) for zi in zs if zi > 0.0]) # ideal composition entropy composition
def err(guess):
# Translate the fixed variable to a local variable
if fixed_P:
P = fixed_var_val
elif fixed_T:
T = fixed_var_val
elif fixed_V:
V = fixed_var_val
T = None
# Translate the iteration variable to a local variable
if iter_P:
P = guess
if not fixed_V:
V = None
elif iter_T:
T = guess
if not fixed_V:
V = None
elif iter_V:
V = guess
T = None
if T is None:
T = T_from_V(V, P)
# Compute S, H, V as necessary
if always_S:
S = S_model(T, P) - dS_ideal - R*log(P*P_ref_inv)
if always_H:
H = H_model(T, P)
if always_V and V is None:
V = V_model(T, P)
# print(H, S, V, 'hi')
# Return the objective function
if spec == 'H':
err = H - spec_val
elif spec == 'S':
err = S - spec_val
elif spec == 'G':
err = (H - T*S) - spec_val
elif spec == 'U':
err = (H - P*V) - spec_val
elif spec == 'A':
err = (H - P*V - T*S) - spec_val
# print(T, P, V, 'TPV', err)
return err
# Precompute some things depending on the method
if method in (LASTOVKA_SHAW, DADGOSTAR_SHAW_1):
MW = mixing_simple(zs, constants.MWs)
n_atoms = [sum(i.values()) for i in constants.atomss]
sv = mixing_simple(zs, n_atoms)/MW
if method == IG_ENTHALPY:
HeatCapacityGases = correlations.HeatCapacityGases
def H_model(T, P=None):
H_calc = 0.
for i in cmps:
H_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral(T_ref, T)
return H_calc
def S_model(T, P=None):
S_calc = 0.
for i in cmps:
S_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T)
return S_calc
def V_model(T, P): return R*T/P
def T_from_V(V, P): return P*V/R
elif method == LASTOVKA_SHAW:
H_ref = Lastovka_Shaw_integral(T_ref, sv)
S_ref = Lastovka_Shaw_integral_over_T(T_ref, sv)
def H_model(T, P=None):
H1 = Lastovka_Shaw_integral(T, sv)
dH = H1 - H_ref
return property_mass_to_molar(dH, MW)
def S_model(T, P=None):
S1 = Lastovka_Shaw_integral_over_T(T, sv)
dS = S1 - S_ref
return property_mass_to_molar(dS, MW)
def V_model(T, P): return R*T/P
def T_from_V(V, P): return P*V/R
elif method == DADGOSTAR_SHAW_1:
Tc = mixing_simple(zs, constants.Tcs)
omega = mixing_simple(zs, constants.omegas)
H_ref = Dadgostar_Shaw_integral(T_ref, sv)
S_ref = Dadgostar_Shaw_integral_over_T(T_ref, sv)
def H_model(T, P=None):
H1 = Dadgostar_Shaw_integral(T, sv)
Hvap = SMK(T, Tc, omega)
return (property_mass_to_molar(H1 - H_ref, MW) - Hvap)
def S_model(T, P=None):
S1 = Dadgostar_Shaw_integral_over_T(T, sv)
dSvap = SMK(T, Tc, omega)/T
return (property_mass_to_molar(S1 - S_ref, MW) - dSvap)
Vc = mixing_simple(zs, constants.Vcs)
def V_model(T, P=None): return COSTALD(T, Tc, Vc, omega)
def T_from_V(V, P): secant(lambda T: COSTALD(T, Tc, Vc, omega), .65*Tc)
elif method == IDEAL_LIQUID_ENTHALPY:
HeatCapacityGases = correlations.HeatCapacityGases
EnthalpyVaporizations = correlations.EnthalpyVaporizations
def H_model(T, P=None):
H_calc = 0.
for i in cmps:
H_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral(T_ref, T) - EnthalpyVaporizations[i](T))
return H_calc
def S_model(T, P=None):
S_calc = 0.
T_inv = 1.0/T
for i in cmps:
S_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T) - T_inv*EnthalpyVaporizations[i](T))
return S_calc
VolumeLiquids = correlations.VolumeLiquids
def V_model(T, P=None):
V_calc = 0.
for i in cmps:
V_calc += zs[i]*VolumeLiquids[i].T_dependent_property(T)
return V_calc
def T_from_V(V, P):
T_calc = 0.
for i in cmps:
T_calc += zs[i]*VolumeLiquids[i].solve_property(V)
return T_calc
# Simple return values - not going through a model
if method == STP_T_GUESS:
if iter_T:
return 298.15
elif iter_P:
return 101325.0
elif iter_V:
return 0.024465403697038125
elif method == LAST_CONVERGED:
if last_conv is None:
raise ValueError("No last converged")
return last_conv
elif method == FIXED_GUESS:
if user_guess is None:
raise ValueError("No user guess")
return user_guess
try:
# All three variables P, T, V are positive but can grow unbounded, so
# for the secant method, only set the one variable
if iter_T:
guess = 298.15
elif iter_P:
guess = 101325.0
elif iter_V:
guess = 0.024465403697038125
return secant(err, guess, xtol=xtol, ytol=ytol,
maxiter=maxiter, bisection=True, low=min_bound)
except (UnconvergedError,):
# G and A specs are NOT MONOTONIC and the brackets will likely NOT BRACKET
# THE ROOTS!
return brenth(err, min_bound, max_bound, xtol=xtol, ytol=ytol, maxiter=maxiter)
def PH_secant_1P(T_guess, P, H, zs, phase, maxiter=200, xtol=1E-10,
minimum_progress=0.3, oscillation_detection=True):
store = []
global iterations
iterations = 0
def to_solve(T):
global iterations
iterations += 1
p = phase.to_TP_zs(T, P, zs)
err = p.H() - H
store[:] = (p, err)
return err
if oscillation_detection:
to_solve, checker = oscillation_checking_wrapper(to_solve, full=True,
minimum_progress=minimum_progress)
T = secant(to_solve, T_guess, xtol=xtol, maxiter=maxiter)
phase, err = store
return T, phase, iterations, err
def PH_newton_1P(T_guess, P, H, zs, phase, maxiter=200, xtol=1E-10,
minimum_progress=0.3, oscillation_detection=True):
store = []
global iterations
iterations = 0
def to_solve(T):
global iterations
iterations += 1
p = phase.to_TP_zs(T, P, zs)
err = p.H() - H
derr_dT = p.dH_dT()
store[:] = (p, err)
return err, derr_dT
if oscillation_detection:
to_solve, checker = oscillation_checking_wrapper(to_solve, full=True,
minimum_progress=minimum_progress)
T = newton(to_solve, T_guess, fprime=True, xtol=xtol, maxiter=maxiter)
phase, err = store
return T, phase, iterations, err
def TVF_pure_newton(P_guess, T, liquids, gas, maxiter=200, xtol=1E-10):
one_liquid = len(liquids)
zs = [1.0]
store = []
global iterations
iterations = 0
def to_solve_newton(P):
global iterations
iterations += 1
g = gas.to_TP_zs(T, P, zs)
fugacity_gas = g.fugacities()[0]
dfugacities_dP_gas = g.dfugacities_dP()[0]
if one_liquid:
lowest_phase = liquids[0].to_TP_zs(T, P, zs)
else:
ls = [l.to_TP_zs(T, P, zs) for l in liquids]
G_min, lowest_phase = 1e100, None
for l in ls:
G = l.G()
if G < G_min:
G_min, lowest_phase = G, l
fugacity_liq = lowest_phase.fugacities()[0]
dfugacities_dP_liq = lowest_phase.dfugacities_dP()[0]
err = fugacity_liq - fugacity_gas
derr_dP = dfugacities_dP_liq - dfugacities_dP_gas
store[:] = (lowest_phase, g, err)
return err, derr_dP
Psat = newton(to_solve_newton, P_guess, xtol=xtol, maxiter=maxiter,
low=Phase.P_MIN_FIXED,
require_eval=True, bisection=False, fprime=True)
l, g, err = store
return Psat, l, g, iterations, err
def TVF_pure_secant(P_guess, T, liquids, gas, maxiter=200, xtol=1E-10):
one_liquid = len(liquids)
zs = [1.0]
store = []
global iterations
iterations = 0
def to_solve_secant(P):
global iterations
iterations += 1
g = gas.to_TP_zs(T, P, zs)
fugacity_gas = g.fugacities()[0]
if one_liquid:
lowest_phase = liquids[0].to_TP_zs(T, P, zs)
else:
ls = [l.to_TP_zs(T, P, zs) for l in liquids]
G_min, lowest_phase = 1e100, None
for l in ls:
G = l.G()
if G < G_min:
G_min, lowest_phase = G, l
fugacity_liq = lowest_phase.fugacities()[0]
err = fugacity_liq - fugacity_gas
store[:] = (lowest_phase, g, err)
return err
if P_guess < Phase.P_MIN_FIXED:
raise ValueError("Too low.")
# if P_guess < Phase.P_MIN_FIXED:
# low = None
# else:
# low = Phase.P_MIN_FIXED
Psat = secant(to_solve_secant, P_guess, xtol=xtol, maxiter=maxiter, low=Phase.P_MIN_FIXED*(1-1e-10))
l, g, err = store
return Psat, l, g, iterations, err
def PVF_pure_newton(T_guess, P, liquids, gas, maxiter=200, xtol=1E-10):
one_liquid = len(liquids)
zs = [1.0]
store = []
global iterations
iterations = 0
def to_solve_newton(T):
global iterations
iterations += 1
g = gas.to_TP_zs(T, P, zs)
fugacity_gas = g.fugacities()[0]
dfugacities_dT_gas = g.dfugacities_dT()[0]
if one_liquid:
lowest_phase = liquids[0].to_TP_zs(T, P, zs)
else:
ls = [l.to_TP_zs(T, P, zs) for l in liquids]
G_min, lowest_phase = 1e100, None
for l in ls:
G = l.G()
if G < G_min:
G_min, lowest_phase = G, l
fugacity_liq = lowest_phase.fugacities()[0]
dfugacities_dT_liq = lowest_phase.dfugacities_dT()[0]
err = fugacity_liq - fugacity_gas
derr_dT = dfugacities_dT_liq - dfugacities_dT_gas
store[:] = (lowest_phase, g, err)
return err, derr_dT
Tsat = newton(to_solve_newton, T_guess, xtol=xtol, maxiter=maxiter,
low=Phase.T_MIN_FIXED,
require_eval=True, bisection=False, fprime=True)
l, g, err = store
return Tsat, l, g, iterations, err
def PVF_pure_secant(T_guess, P, liquids, gas, maxiter=200, xtol=1E-10):
one_liquid = len(liquids)
zs = [1.0]
store = []
global iterations
iterations = 0
def to_solve_secant(T):
global iterations
iterations += 1
g = gas.to_TP_zs(T, P, zs)
fugacity_gas = g.fugacities()[0]
if one_liquid:
lowest_phase = liquids[0].to_TP_zs(T, P, zs)
else:
ls = [l.to_TP_zs(T, P, zs) for l in liquids]
G_min, lowest_phase = 1e100, None
for l in ls:
G = l.G()
if G < G_min:
G_min, lowest_phase = G, l
fugacity_liq = lowest_phase.fugacities()[0]
err = fugacity_liq - fugacity_gas
store[:] = (lowest_phase, g, err)
return err
Tsat = secant(to_solve_secant, T_guess, xtol=xtol, maxiter=maxiter,
low=Phase.T_MIN_FIXED)
l, g, err = store
return Tsat, l, g, iterations, err
def TSF_pure_newton(P_guess, T, other_phases, solids, maxiter=200, xtol=1E-10):
one_other = len(other_phases)
one_solid = len(solids)
zs = [1.0]
store = []
global iterations
iterations = 0
def to_solve_newton(P):
global iterations
iterations += 1
if one_solid:
lowest_solid = solids[0].to_TP_zs(T, P, zs)
else:
ss = [s.to_TP_zs(T, P, zs) for s in solids]
G_min, lowest_solid = 1e100, None
for o in ss:
G = o.G()
if G < G_min:
G_min, lowest_solid = G, o
fugacity_solid = lowest_solid.fugacities()[0]
dfugacities_dP_solid = lowest_solid.dfugacities_dP()[0]
if one_other:
lowest_other = other_phases[0].to_TP_zs(T, P, zs)
else:
others = [l.to_TP_zs(T, P, zs) for l in other_phases]
G_min, lowest_other = 1e100, None
for o in others:
G = o.G()
if G < G_min:
G_min, lowest_other = G, o
fugacity_other = lowest_other.fugacities()[0]
dfugacities_dP_other = lowest_other.dfugacities_dP()[0]
err = fugacity_other - fugacity_solid
derr_dP = dfugacities_dP_other - dfugacities_dP_solid
store[:] = (lowest_other, lowest_solid, err)
return err, derr_dP
Psub = newton(to_solve_newton, P_guess, xtol=xtol, maxiter=maxiter,
require_eval=True, bisection=False, fprime=True)
other, solid, err = store
return Psub, other, solid, iterations, err
def PSF_pure_newton(T_guess, P, other_phases, solids, maxiter=200, xtol=1E-10):
one_other = len(other_phases)
one_solid = len(solids)
zs = [1.0]
store = []
global iterations
iterations = 0
def to_solve_newton(T):
global iterations
iterations += 1
if one_solid:
lowest_solid = solids[0].to_TP_zs(T, P, zs)
else:
ss = [s.to_TP_zs(T, P, zs) for s in solids]
G_min, lowest_solid = 1e100, None
for o in ss:
G = o.G()
if G < G_min:
G_min, lowest_solid = G, o
fugacity_solid = lowest_solid.fugacities()[0]
dfugacities_dT_solid = lowest_solid.dfugacities_dT()[0]
if one_other:
lowest_other = other_phases[0].to_TP_zs(T, P, zs)
else:
others = [l.to_TP_zs(T, P, zs) for l in other_phases]
G_min, lowest_other = 1e100, None
for o in others:
G = o.G()
if G < G_min:
G_min, lowest_other = G, o
fugacity_other = lowest_other.fugacities()[0]
dfugacities_dT_other = lowest_other.dfugacities_dT()[0]
err = fugacity_other - fugacity_solid
derr_dT = dfugacities_dT_other - dfugacities_dT_solid
store[:] = (lowest_other, lowest_solid, err)
return err, derr_dT
Tsub = newton(to_solve_newton, T_guess, xtol=xtol, maxiter=maxiter,
require_eval=True, bisection=False, fprime=True)
other, solid, err = store
return Tsub, other, solid, iterations, err
def solve_T_VF_IG_K_composition_independent(VF, T, zs, gas, liq, xtol=1e-10):
'''from sympy import *
zi, P, VF = symbols('zi, P, VF')
l_phi, g_phi = symbols('l_phi, g_phi', cls=Function)
# g_phi = symbols('g_phi')
# Ki = l_phi(P)/g_phi(P)
Ki = l_phi(P)#/g_phi
err = zi*(Ki-1)/(1+VF*(Ki-1))
cse([diff(err, P), err], optimizations='basic')'''
# gas phis are all one in IG model
# gas.to(T=T, P=P, zs=zs)
cmps = range(liq.N)
global Ks, iterations, err
iterations = 0
err = 0.0
def to_solve(lnP):
global Ks, iterations, err
iterations += 1
P = exp(lnP)
l = liq.to(T=T, P=P, zs=zs)
Ks = liquid_phis = l.phis()
dlnphis_dP_l = l.dphis_dP()
err = derr = 0.0
for i in cmps:
x1 = liquid_phis[i] - 1.0
x2 = VF*x1
x3 = 1.0/(x2 + 1.0)
x4 = x3*zs[i]
err += x1*x4
derr += x4*(1.0 - x2*x3)*dlnphis_dP_l[i]
return err, P*derr
# estimate bubble point and dew point
# Make sure to overwrite the phase so the Psats get cached
P_base = 1e5
liq = liq.to(T=T, P=P_base, zs=zs)
phis = liq.phis()
P_bub, P_dew = 0.0, 0.0
for i in range(liq.N):
P_bub += phis[i]*zs[i]
P_dew += zs[i]/(phis[i]*P_base)
P_bub = P_bub*liq.P
P_dew = 1.0/P_dew
P_guess = VF*P_dew + (1.0 - VF)*P_bub
# When Poynting is on, the are only an estimate; otherwise it is dead on
# and there is no need for a solver
if liq.use_Poynting or 0.0 < VF < 1.0:
lnP = newton(to_solve, log(P_guess), xtol=xtol, fprime=True)
P = exp(lnP)
else:
if VF == 0.0:
Ks = liq.to(T=T, P=P_bub, zs=zs).phis()
P = P_bub
elif VF == 1.0:
Ks = liq.to(T=T, P=P_dew, zs=zs).phis()
P = P_dew
else:
raise ValueError("Vapor fraction outside range 0 to 1")
xs = [zs[i]/(1.+VF*(Ks[i]-1.)) for i in cmps]
for i in cmps:
Ks[i] *= xs[i]
ys = Ks
return P, xs, ys, iterations, err
def solve_P_VF_IG_K_composition_independent(VF, P, zs, gas, liq, xtol=1e-10):
# gas phis are all one in IG model
# gas.to(T=T, P=P, zs=zs)
cmps = range(liq.N)
global Ks, iterations, err
iterations = 0
def to_solve(T):
global Ks, iterations, err
iterations += 1
dlnphis_dT_l, liquid_phis = liq.dphis_dT_at(T, P, zs, phis_also=True)
Ks = liquid_phis
# l = liq.to(T=T, P=P, zs=zs)
# Ks = liquid_phis = l.phis()
# dlnphis_dT_l = l.dphis_dT()
err = derr = 0.0
for i in cmps:
x1 = liquid_phis[i] - 1.0
x2 = VF*x1
x3 = 1.0/(x2 + 1.0)
x4 = x3*zs[i]
err += x1*x4
derr += x4*(1.0 - x2*x3)*dlnphis_dT_l[i]
return err, derr
try:
T = newton(to_solve, 300.0, xtol=xtol, fprime=True, low=1e-6)
except:
try:
T = brenth(lambda x: to_solve(x)[0], 300, 1000)
except:
T = newton(to_solve, 400.0, xtol=xtol, fprime=True, low=1e-6)
xs = [zs[i]/(1.+VF*(Ks[i]-1.)) for i in cmps]
for i in cmps:
Ks[i] *= xs[i]
ys = Ks
return T, xs, ys, iterations, err
def sequential_substitution_2P_sat(T, P, V, zs_dry, xs_guess, ys_guess, liquid_phase,
gas_phase, idx, z0, z1=None, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, damping=1.0):
xs, ys = xs_guess, ys_guess
V_over_F = 1.0
cmps = range(len(zs_dry))
if z1 is None:
z1 = z0*1.0001 + 1e-4
if z1 > 1:
z1 = z0*1.0001 - 1e-4
# secant step/solving
p0, p1, err0, err1 = None, None, None, None
def step(p0, p1, err0, err1):
if p0 is None:
return z0
if p1 is None:
return z1
else:
new = p1 - err1*(p1 - p0)/(err1 - err0)*damping
return new
for iteration in range(maxiter):
p0, p1 = step(p0, p1, err0, err1), p0
zs = list(zs_dry)
zs[idx] = p0
zs = normalize(zs)
# print(zs, p0, p1)
g = gas_phase.to(ys, T=T, P=P, V=V)
l = liquid_phase.to(xs, T=T, P=P, V=V)
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
err0, err1 = 1.0 - V_over_F, err0
# Check for negative fractions - normalize only if needed
for xi in xs_new:
if xi < 0.0:
xs_new_sum = sum(abs(i) for i in xs_new)
xs_new = [abs(i)/xs_new_sum for i in xs_new]
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum = sum(abs(i) for i in ys_new)
ys_new = [abs(i)/ys_new_sum for i in ys_new]
break
err, comp_diff = 0.0, 0.0
for i in cmps:
err_i = Ks[i]*xs[i]/ys[i] - 1.0
err += err_i*err_i + abs(ys[i] - zs[i])
comp_diff += abs(xs[i] - ys[i])
# Accept the new compositions
# xs, ys = xs_new, zs # This has worse convergence behavior?
xs, ys = xs_new, ys_new
if comp_diff < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
if err < tol and abs(err0) < tol:
return V_over_F, xs, zs, l, g, iteration, err, err0
raise UnconvergedError('End of SS without convergence')
def SS_VF_simultaneous(guess, fixed_val, zs, liquid_phase, gas_phase,
iter_var='T', fixed_var='P', V_over_F=1,
maxiter=200, xtol=1E-10, comp_guess=None,
damping=0.8, tol_eq=1e-12, update_frequency=3):
if comp_guess is None:
comp_guess = zs
if V_over_F == 1 or V_over_F > 0.5:
dew = True
xs, ys = comp_guess, zs
else:
dew = False
xs, ys = zs, comp_guess
sln = sequential_substitution_2P_HSGUAbeta(zs=zs, xs_guess=xs, ys_guess=ys, liquid_phase=liquid_phase,
gas_phase=gas_phase, fixed_var_val=fixed_val, spec_val=V_over_F, tol_spec=xtol,
iter_var_0=guess, update_frequency=update_frequency,
iter_var=iter_var, fixed_var=fixed_var, spec='beta', damping=damping, tol_eq=tol_eq)
guess, _, xs, ys, l, g, iteration, err_eq, spec_err = sln
if dew:
comp_guess = xs
iter_phase, const_phase = l, g
else:
comp_guess = ys
iter_phase, const_phase = g, l
return guess, comp_guess, iter_phase, const_phase, iteration, {'err_eq': err_eq, 'spec_err': spec_err}
def sequential_substitution_2P_HSGUAbeta(zs, xs_guess, ys_guess, liquid_phase,
gas_phase, fixed_var_val, spec_val,
iter_var_0, iter_var_1=None,
iter_var='T', fixed_var='P', spec='H',
maxiter=1000, tol_eq=1E-13, tol_spec=1e-9,
trivial_solution_tol=1e-5, damping=1.0,
V_over_F_guess=None, fprime=True,
update_frequency=1, update_eq=1e-7):
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
cmps = range(len(zs))
if iter_var_1 is None:
iter_var_1 = iter_var_0*1.0001 + 1e-4
tol_spec_abs = tol_spec*abs(spec_val)
if tol_spec_abs == 0.0:
if spec == 'beta':
tol_spec_abs = 1e-9
else:
tol_spec_abs = 1e-7
# secant step/solving
p0, p1, spec_err, spec_err_old = None, None, None, None
def step(p0, p1, spec_err, spec_err_old, step_der):
if p0 is None:
return iter_var_0
if p1 is None:
return iter_var_1
else:
secant_step = spec_err_old*(p1 - p0)/(spec_err_old - spec_err)*damping
if fprime and step_der is not None:
if abs(step_der) < abs(secant_step):
step = step_der
new = p0 - step
else:
step = secant_step
new = p1 - step
else:
new = p1 - secant_step
if new < 1e-7:
# Only handle positive values, damped steps to .5
new = 0.5*(1e-7 + p0)
# print(p0, p1, new)
return new
TPV_args = {fixed_var: fixed_var_val, iter_var: iter_var_0}
VF_spec = spec == 'beta'
if not VF_spec:
spec_fun_l = getattr(liquid_phase.__class__, spec)
spec_fun_g = getattr(gas_phase.__class__, spec)
s_der = 'd%s_d%s_%s'%(spec, iter_var, fixed_var)
spec_der_fun_l = getattr(liquid_phase.__class__, s_der)
spec_der_fun_g = getattr(gas_phase.__class__, s_der)
else:
V_over_F = iter_var_0
step_der = None
for iteration in range(maxiter):
if (not (iteration % update_frequency) or err_eq < update_eq) or iteration < 2:
p0, p1 = step(p0, p1, spec_err, spec_err_old, step_der), p0
TPV_args[iter_var] = p0
g = gas_phase.to(ys, **TPV_args)
l = liquid_phase.to(xs, **TPV_args)
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
if not VF_spec:
spec_calc = spec_fun_l(l)*(1.0 - V_over_F) + spec_fun_g(g)*V_over_F
spec_der_calc = spec_der_fun_l(l)*(1.0 - V_over_F) + spec_der_fun_g(g)*V_over_F
# print(spec_der_calc)
else:
spec_calc = V_over_F
if (not (iteration % update_frequency) or err_eq < update_eq) or iteration < 2:
spec_err_old = spec_err # Only update old error on an update iteration
spec_err = spec_calc - spec_val
try:
step_der = spec_err/spec_der_calc
# print(spec_err, step_der, p1-p0)
except:
pass
# Check for negative fractions - normalize only if needed
for xi in xs_new:
if xi < 0.0:
xs_new_sum_inv = 1.0/sum(abs(i) for i in xs_new)
xs_new = [abs(i)*xs_new_sum_inv for i in xs_new]
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum_inv = 1.0/sum(abs(i) for i in ys_new)
ys_new = [abs(i)*ys_new_sum_inv for i in ys_new]
break
err_eq, comp_diff = 0.0, 0.0
for i in cmps:
err_i = Ks[i]*xs[i]/ys[i] - 1.0
err_eq += err_i*err_i
comp_diff += abs(xs[i] - ys[i])
# Accept the new compositions
# xs, ys = xs_new, zs # This has worse convergence behavior; seems to not even converge some of the time
xs, ys = xs_new, ys_new
if comp_diff < trivial_solution_tol and iteration: # Allow the first iteration to start with the same composition
raise ValueError("Converged to trivial condition, compositions of both phases equal")
print('Guess: %g, Eq Err: %g, Spec Err: %g, VF: %g' %(p0, err_eq, spec_err, V_over_F))
# print(p0, err_eq, spec_err, V_over_F)
# print(p0, err, spec_err, xs, ys, V_over_F)
if err_eq < tol_eq and abs(spec_err) < tol_spec_abs:
return p0, V_over_F, xs, ys, l, g, iteration, err_eq, spec_err
raise UnconvergedError('End of SS without convergence')
def sequential_substitution_2P_double(zs, xs_guess, ys_guess, liquid_phase,
gas_phase, guess, spec_vals,
iter_var0='T', iter_var1='P',
spec_vars=['H', 'S'],
maxiter=1000, tol_eq=1E-13, tol_specs=1e-9,
trivial_solution_tol=1e-5, damping=1.0,
V_over_F_guess=None, fprime=True):
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
cmps = range(len(zs))
iter0_val = guess[0]
iter1_val = guess[1]
spec0_val = spec_vals[0]
spec1_val = spec_vals[1]
spec0_var = spec_vars[0]
spec1_var = spec_vars[1]
spec0_fun_l = getattr(liquid_phase.__class__, spec0_var)
spec0_fun_g = getattr(gas_phase.__class__, spec0_var)
spec1_fun_l = getattr(liquid_phase.__class__, spec1_var)
spec1_fun_g = getattr(gas_phase.__class__, spec1_var)
spec0_der0 = 'd%s_d%s_%s'%(spec0_var, iter_var0, iter_var1)
spec1_der0 = 'd%s_d%s_%s'%(spec1_var, iter_var0, iter_var1)
spec0_der1 = 'd%s_d%s_%s'%(spec0_var, iter_var1, iter_var0)
spec1_der1 = 'd%s_d%s_%s'%(spec1_var, iter_var1, iter_var0)
spec0_der0_fun_l = getattr(liquid_phase.__class__, spec0_der0)
spec0_der0_fun_g = getattr(gas_phase.__class__, spec0_der0)
spec1_der0_fun_l = getattr(liquid_phase.__class__, spec1_der0)
spec1_der0_fun_g = getattr(gas_phase.__class__, spec1_der0)
spec0_der1_fun_l = getattr(liquid_phase.__class__, spec0_der1)
spec0_der1_fun_g = getattr(gas_phase.__class__, spec0_der1)
spec1_der1_fun_l = getattr(liquid_phase.__class__, spec1_der1)
spec1_der1_fun_g = getattr(gas_phase.__class__, spec1_der1)
step_der = None
for iteration in range(maxiter):
TPV_args[iter_var0] = iter0_val
TPV_args[iter_var1] = iter1_val
g = gas_phase.to(zs=ys, **TPV_args)
l = liquid_phase.to(zs=xs, **TPV_args)
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
spec0_calc = spec0_fun_l(l)*(1.0 - V_over_F) + spec0_fun_g(g)*V_over_F
spec1_calc = spec1_fun_l(l)*(1.0 - V_over_F) + spec1_fun_g(g)*V_over_F
spec0_der0_calc = spec0_der0_fun_l(l)*(1.0 - V_over_F) + spec0_der0_fun_g(g)*V_over_F
spec0_der1_calc = spec0_der1_fun_l(l)*(1.0 - V_over_F) + spec0_der1_fun_g(g)*V_over_F
spec1_der0_calc = spec1_der0_fun_l(l)*(1.0 - V_over_F) + spec1_der0_fun_g(g)*V_over_F
spec1_der1_calc = spec1_der1_fun_l(l)*(1.0 - V_over_F) + spec1_der1_fun_g(g)*V_over_F
errs = [spec0_calc - spec0_val, spec1_calc - spec1_val]
jac = [[spec0_der0_calc, spec0_der1_calc], [spec1_der0_calc, spec1_der1_calc]]
# Do the newton step
dx = py_solve(jac, [-v for v in errs])
iter0_val, iter1_val = [xi + dxi*damping for xi, dxi in zip([iter0_val, iter1_val], dx)]
# Check for negative fractions - normalize only if needed
for xi in xs_new:
if xi < 0.0:
xs_new_sum = sum(abs(i) for i in xs_new)
xs_new = [abs(i)/xs_new_sum for i in xs_new]
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum = sum(abs(i) for i in ys_new)
ys_new = [abs(i)/ys_new_sum for i in ys_new]
break
err, comp_diff = 0.0, 0.0
for i in cmps:
err_i = Ks[i]*xs[i]/ys[i] - 1.0
err += err_i*err_i
comp_diff += abs(xs[i] - ys[i])
xs, ys = xs_new, ys_new
if comp_diff < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
if err < tol_eq and abs(err0) < tol_spec_abs:
return p0, V_over_F, xs, ys, l, g, iteration, err, err0
raise UnconvergedError('End of SS without convergence')
def stability_iteration_Michelsen(trial_phase, zs_test, test_phase=None,
maxiter=20, xtol=1E-12):
# So long as for both trial_phase, and test_phase use the lowest Gibbs energy fugacities, no need to test two phases.
# Very much no need to converge using acceleration - just keep a low tolerance
# At any point, can use the Ks working, assume a drop of the new phase, and evaluate two new phases and see if G drops.
# If it does, drop out early! This implementation does not do that.
# Should be possible to tell if converging to trivial solution during the process - and bail out then
# It is possible to switch this function to operated on lnphis e.g.
# corrections[i] = ci = zs[i]/zs_test[i]*trunc_exp(lnphis_trial[i] - lnphis_test[i])*sum_zs_test_inv
# however numerical differences seem to be huge and operate better on fugacities with the trunc_exp function
# then anything else.
# Can this whole function be switched to the functional approach?
# Should be possible
if test_phase is None:
test_phase = trial_phase
T, P, zs = trial_phase.T, trial_phase.P, trial_phase.zs
N = trial_phase.N
fugacities_trial = trial_phase.fugacities_lowest_Gibbs()
# Go through the feed composition - and the trial composition - if we have zeros, need to make them a trace;
zs_test2 = [0.0]*N
for i in range(N):
zs_test2[i] = zs_test[i]
zs_test = zs_test2
for i in range(N):
if zs_test[i] == 0.0:
zs_test[i] = 1e-50
# break
for i in range(N):
if zs[i] == 0.0:
zs2 = [0.0]*N
for i in range(N):
if zs[i] == 0.0:
zs2[i] = 1e-50
else:
zs2[i] = zs[i]
zs = zs2
# Requires another evaluation of the trial phase
trial_phase = trial_phase.to(T=T, P=P, zs=zs)
fugacities_trial = trial_phase.fugacities_lowest_Gibbs()
break
# Basis of equations is for the test phase being a gas, the trial phase assumed is a liquid
# makes no real difference
Ks = [0.0]*N
corrections = [1.0]*N
# Model converges towards fictional K values which, when evaluated, yield the
# stationary point composition
for i in range(N):
Ks[i] = zs_test[i]/zs[i]
sum_zs_test = sum_zs_test_inv = 1.0
converged = False
for _ in range(maxiter):
# test_phase = test_phase.to(T=T, P=P, zs=zs_test)
# fugacities_test = test_phase.fugacities_lowest_Gibbs()
fugacities_test = test_phase.fugacities_at_zs(zs_test)
err = 0.0
try:
for i in range(N):
corrections[i] = ci = fugacities_trial[i]/fugacities_test[i]*sum_zs_test_inv
Ks[i] *= ci
err += (ci - 1.0)*(ci - 1.0)
except:
# A test fugacity became zero
# May need special handling for this outside.
converged = True
break
if err < xtol:
converged = True
break
# Update compositions for the next iteration - might as well move this above the break check
for i in range(N):
zs_test[i] = Ks[i]*zs[i] # new test phase comp
# Cannot move the normalization above the error check - returning
# unnormalized sum_zs_test is used also to detect a trivial solution
sum_zs_test = 0.0
for i in range(N):
sum_zs_test += zs_test[i]
try:
sum_zs_test_inv = 1.0/sum_zs_test
except:
# Fugacities are all zero
converged = True
break
for i in range(N):
zs_test[i] *= sum_zs_test_inv
if converged:
try:
V_over_F, xs, ys = V_over_F, trial_zs, appearing_zs = flash_inner_loop(zs, Ks)
except:
# Converged to trivial solution so closely the math does not work
V_over_F, xs, ys = V_over_F, trial_zs, appearing_zs = 0.0, zs, zs
# Calculate the dG of the feed
dG_RT = 0.0
if V_over_F != 0.0:
lnphis_test = test_phase.lnphis_at_zs(zs_test) #test_phase.lnphis()
for i in range(N):
dG_RT += zs_test[i]*(log(zs_test[i]) + lnphis_test[i])
dG_RT *= V_over_F
# print(dG_RT)
return sum_zs_test, Ks, zs_test, V_over_F, trial_zs, appearing_zs, dG_RT
else:
raise UnconvergedError('End of stability_iteration_Michelsen without convergence')
def TPV_double_solve_1P(zs, phase, guesses, spec_vals,
goal_specs=('V', 'U'), state_specs=('T', 'P'),
maxiter=200, xtol=1E-10, ytol=None, spec_funs=None):
kwargs = {'zs': zs}
phase_cls = phase.__class__
s00 = 'd%s_d%s_%s' %(goal_specs[0], state_specs[0], state_specs[1])
s01 = 'd%s_d%s_%s' %(goal_specs[0], state_specs[1], state_specs[0])
s10 = 'd%s_d%s_%s' %(goal_specs[1], state_specs[0], state_specs[1])
s11 = 'd%s_d%s_%s' %(goal_specs[1], state_specs[1], state_specs[0])
try:
err0_fun = getattr(phase_cls, goal_specs[0])
err1_fun = getattr(phase_cls, goal_specs[1])
j00 = getattr(phase_cls, s00)
j01 = getattr(phase_cls, s01)
j10 = getattr(phase_cls, s10)
j11 = getattr(phase_cls, s11)
except:
pass
cache = []
def to_solve(states):
kwargs[state_specs[0]] = float(states[0])
kwargs[state_specs[1]] = float(states[1])
new = phase.to(**kwargs)
try:
v0, v1 = err0_fun(new), err1_fun(new)
jac = [[j00(new), j01(new)],
[j10(new), j11(new)]]
except:
v0, v1 = new.value(goal_specs[0]), new.value(goal_specs[1])
jac = [[new.value(s00), new.value(s01)],
[new.value(s10), new.value(s11)]]
if spec_funs is not None:
err0 = v0 - spec_funs[0](new)
err1 = v1 - spec_funs[1](new)
else:
err0 = v0 - spec_vals[0]
err1 = v1 - spec_vals[1]
errs = [err0, err1]
cache[:] = [new, errs, jac]
print(kwargs, errs)
return errs, jac
#
states, iterations = newton_system(to_solve, x0=guesses, jac=True, xtol=xtol,
ytol=ytol, maxiter=maxiter, damping_func=damping_maintain_sign)
phase = cache[0]
err = cache[1]
jac = cache[2]
return states, phase, iterations, err, jac
def assert_stab_success_2P(liq, gas, stab, T, P, zs, guess_name, xs=None,
ys=None, VF=None, SS_tol=1e-15, rtol=1e-7):
r'''Basic function - perform a specified stability test, and then a two-phase flash using it
Check on specified variables the method is working.
'''
gas = gas.to(T=T, P=P, zs=zs)
liq = liq.to(T=T, P=P, zs=zs)
trial_comp = stab.incipient_guess_named(T, P, zs, guess_name)
if liq.G() < gas.G():
min_phase, other_phase = liq, gas
else:
min_phase, other_phase = gas, liq
_, _, _, V_over_F, trial_zs, appearing_zs, dG_RT = stability_iteration_Michelsen(min_phase, trial_comp, test_phase=other_phase, maxiter=100)
V_over_F, xs_calc, ys_calc, l, g, iteration, err = sequential_substitution_2P(T=T, P=P, V=None,
zs=zs, xs_guess=trial_zs, ys_guess=appearing_zs,
liquid_phase=min_phase, tol=SS_tol,
gas_phase=other_phase)
if xs_calc is not None:
assert_close1d(xs, xs_calc, rtol)
if ys_calc is not None:
assert_close1d(ys, ys_calc, rtol)
if VF is not None:
assert_close(V_over_F, VF, rtol)
assert_close1d(l.fugacities(), g.fugacities(), rtol)
def TPV_solve_HSGUA_guesses_VL(zs, method, constants, correlations,
fixed_var_val, spec_val,
iter_var='T', fixed_var='P', spec='H',
maxiter=20, xtol=1E-7, ytol=None,
bounded=False, min_bound=None, max_bound=None,
user_guess=None, last_conv=None, T_ref=298.15,
P_ref=101325.0):
global V_over_F_guess
V_over_F_guess = 0.5
cmps = range(constants.N)
Tcs, Pcs, omegas = constants.Tcs, constants.Pcs, constants.omegas
if fixed_var == iter_var:
raise ValueError("Fixed variable cannot be the same as iteration variable")
if fixed_var not in ('T', 'P', 'V'):
raise ValueError("Fixed variable must be one of `T`, `P`, `V`")
if iter_var not in ('T', 'P', 'V'):
raise ValueError("Iteration variable must be one of `T`, `P`, `V`")
if spec not in ('H', 'S', 'G', 'U', 'A'):
raise ValueError("Spec variable must be one of `H`, `S`, `G` `U`, `A`")
cmps = range(len(zs))
iter_T = iter_var == 'T'
iter_P = iter_var == 'P'
iter_V = iter_var == 'V'
fixed_P = fixed_var == 'P'
fixed_T = fixed_var == 'T'
fixed_V = fixed_var == 'V'
if fixed_P:
P = fixed_var_val
elif fixed_T:
T = fixed_var_val
elif fixed_V:
V = fixed_var_val
always_S = spec in ('S', 'G', 'A')
always_H = spec in ('H', 'G', 'U', 'A')
always_V = spec in ('U', 'A')
def H_model(T, P, xs, ys, V_over_F):
if V_over_F >= 1.0:
return H_model_g(T, P, zs)
elif V_over_F <= 0.0:
return H_model_l(T, P, zs)
H_liq = H_model_l(T, P, xs)
H_gas = H_model_g(T, P, ys)
return H_liq*(1.0 - V_over_F) + V_over_F*H_gas
def S_model(T, P, xs, ys, V_over_F):
if V_over_F >= 1.0:
return S_model_g(T, P, zs)
elif V_over_F <= 0.0:
return S_model_l(T, P, zs)
S_liq = S_model_l(T, P, xs)
S_gas = S_model_g(T, P, ys)
return S_liq*(1.0 - V_over_F) + V_over_F*S_gas
def V_model(T, P, xs, ys, V_over_F):
if V_over_F >= 1.0:
return V_model_g(T, P, zs)
elif V_over_F <= 0.0:
return V_model_l(T, P, zs)
V_liq = V_model_l(T, P, xs)
V_gas = V_model_g(T, P, ys)
return V_liq*(1.0 - V_over_F) + V_over_F*V_gas
# whhat goes in here?
if always_S:
P_ref_inv = 1.0/P_ref
dS_ideal = R*sum([zi*log(zi) for zi in zs if zi > 0.0]) # ideal composition entropy composition
info = []
def err(guess):
# Translate the fixed variable to a local variable
if fixed_P:
P = fixed_var_val
elif fixed_T:
T = fixed_var_val
elif fixed_V:
V = fixed_var_val
T = None
# Translate the iteration variable to a local variable
if iter_P:
P = guess
if not fixed_V:
V = None
elif iter_T:
T = guess
if not fixed_V:
V = None
elif iter_V:
V = guess
T = None
if T is None:
T = T_from_V(V, P, zs)
VF, xs, ys = flash_model(T, P, zs)
info[:] = VF, xs, ys
# Compute S, H, V as necessary
if always_S:
S = S_model(T, P, xs, ys, VF) - dS_ideal - R*log(P*P_ref_inv)
if always_H:
H = H_model(T, P, xs, ys, VF)
if always_V and V is None:
V = V_model(T, P, xs, ys, VF)
# Return the objective function
if spec == 'H':
err = H - spec_val
elif spec == 'S':
err = S - spec_val
elif spec == 'G':
err = (H - T*S) - spec_val
elif spec == 'U':
err = (H - P*V) - spec_val
elif spec == 'A':
err = (H - P*V - T*S) - spec_val
# print(T, P, V, 'TPV', err)
return err
# Common models
VolumeLiquids = correlations.VolumeLiquids
def V_model_l(T, P, zs):
V_calc = 0.
for i in cmps:
V_calc += zs[i]*VolumeLiquids[i].T_dependent_property(T)
return V_calc
def T_from_V_l(V, P, zs):
T_calc = 0.
for i in cmps:
T_calc += zs[i]*VolumeLiquids[i].solve_property(V)
return T_calc
def V_model_g(T, P, zs):
return R*T/P
def T_from_V_g(V, P, zs):
return P*V/R
if method == IDEAL_WILSON or method == SHAW_ELEMENTAL:
if iter_P:
if fixed_T:
T_inv = 1.0/T
Ks_P = [Pcs[i]*exp((5.37*(1.0 + omegas[i])*(1.0 - Tcs[i]*T_inv))) for i in cmps]
def flash_model(T, P, zs):
global V_over_F_guess
P_inv = 1.0/P
if not fixed_T:
T_inv = 1.0/T
Ks_P_local = [Pcs[i]*exp((5.37*(1.0 + omegas[i])*(1.0 - Tcs[i]*T_inv))) for i in cmps]
Ks = [Ki*P_inv for Ki in Ks_P_local]
else:
Ks = [Ki*P_inv for Ki in Ks_P]
K_low, K_high = False, False
for i in cmps:
if zs[i] != 0.0:
if Ks[i] > 1.0:
K_high = True
else:
K_low = True
if K_high and K_low:
break
if K_high and K_low:
V_over_F_guess, xs, ys = Rachford_Rice_solution_LN2(zs, Ks, V_over_F_guess)
return V_over_F_guess, xs, ys
elif K_high:
return 1.0, zs, zs
else:
return 0.0, zs, zs
else:
P_inv = 1.0/P
def flash_model(T, P, zs):
global V_over_F_guess
T_inv = 1.0/T
Ks = [Pcs[i]*P_inv*exp((5.37*(1.0 + omegas[i])*(1.0 - Tcs[i]*T_inv))) for i in cmps]
K_low, K_high = False, False
for i in cmps:
if zs[i] != 0.0:
if Ks[i] > 1.0:
K_high = True
else:
K_low = True
if K_high and K_low:
break
if K_high and K_low:
V_over_F_guess, xs, ys = Rachford_Rice_solution_LN2(zs, Ks, V_over_F_guess)
return V_over_F_guess, xs, ys
elif K_high:
return 1.0, zs, zs
else:
return 0.0, zs, zs
if method == SHAW_ELEMENTAL:
VolumeLiquids = correlations.VolumeLiquids
MWs, n_atoms = constants.MWs, constants.n_atoms
def H_model_g(T, P, zs):
MW_g, sv_g = 0.0, 0.0
for i in cmps:
MW_g += MWs[i]*zs[i]
sv_g += n_atoms[i]*zs[i]
sv_g /= MW_g
H_ref_LS = Lastovka_Shaw_integral(T_ref, sv_g)
H1 = Lastovka_Shaw_integral(T, sv_g)
dH = H1 - H_ref_LS
H_gas = 1e-3*dH*MW_g #property_mass_to_molar(dH, MW_g)
return H_gas
def S_model_g(T, P, zs):
MW_g, sv_g = 0.0, 0.0
for i in cmps:
MW_g += MWs[i]*zs[i]
sv_g += n_atoms[i]*zs[i]
sv_g /= MW_g
S_ref_LS = Lastovka_Shaw_integral_over_T(T_ref, sv_g)
S1 = Lastovka_Shaw_integral_over_T(T, sv_g)
dS = S1 - S_ref_LS
S_gas = 1e-3*dS*MW_g
return S_gas
def H_model_l(T, P, zs):
MW_l, sv_l, Tc_l, omega_l = 0.0, 0.0, 0.0, 0.0
for i in cmps:
MW_l += MWs[i]*zs[i]
sv_l += n_atoms[i]*zs[i]
Tc_l += Tcs[i]*zs[i]
omega_l += omegas[i]*zs[i]
sv_l /= MW_l
H_ref_DS = Dadgostar_Shaw_integral(T_ref, sv_l)
H1 = Dadgostar_Shaw_integral(T, sv_l)
Hvap = SMK(T, Tc_l, omega_l)
dH = H1 - H_ref_DS
H_liq = 1e-3*dH*MW_l #property_mass_to_molar(dH, MW_l)
return (H_liq - Hvap)
def S_model_l(T, P, zs):
MW_l, sv_l, Tc_l, omega_l = 0.0, 0.0, 0.0, 0.0
for i in cmps:
MW_l += MWs[i]*zs[i]
sv_l += n_atoms[i]*zs[i]
Tc_l += Tcs[i]*zs[i]
omega_l += omegas[i]*zs[i]
sv_l /= MW_l
S_ref_DS = Dadgostar_Shaw_integral_over_T(T_ref, sv_l)
S1 = Dadgostar_Shaw_integral_over_T(T, sv_l)
Hvap = SMK(T, Tc_l, omega_l)
dS = S1 - S_ref_DS
S_liq = 1e-3*dS*MW_l
return (S_liq - Hvap/T)
elif method == IDEAL_WILSON:
HeatCapacityGases = correlations.HeatCapacityGases
EnthalpyVaporizations = correlations.EnthalpyVaporizations
def flash_model(T, P, zs):
_, _, VF, xs, ys = flash_wilson(zs, constants.Tcs, constants.Pcs, constants.omegas, T=T, P=P)
return VF, xs, ys
def H_model_g(T, P, zs):
H_calc = 0.
for i in cmps:
H_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral(T_ref, T)
return H_calc
def S_model_g(T, P, zs):
S_calc = 0.
for i in cmps:
S_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T)
return S_calc
def H_model_l(T, P, zs):
H_calc = 0.
for i in cmps:
H_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral(T_ref, T) - EnthalpyVaporizations[i](T))
return H_calc
def S_model_l(T, P, zs):
S_calc = 0.
T_inv = 1.0/T
for i in cmps:
S_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T) - T_inv*EnthalpyVaporizations[i](T))
return S_calc
try:
# All three variables P, T, V are positive but can grow unbounded, so
# for the secant method, only set the one variable
if iter_T:
guess = 298.15
elif iter_P:
guess = 101325.0
elif iter_V:
guess = 0.024465403697038125
val = secant(err, guess, xtol=xtol, ytol=ytol,
maxiter=maxiter, bisection=True, low=min_bound, require_xtol=False)
return val, info[0], info[1], info[2]
except (UnconvergedError,) as e:
val = brenth(err, min_bound, max_bound, xtol=xtol, ytol=ytol, maxiter=maxiter)
return val, info[0], info[1], info[2]
global cm_flash
cm_flash = None
def cm_flash_tol():
global cm_flash
if cm_flash is not None:
return cm_flash
from matplotlib.colors import ListedColormap
N = 100
vals = np.zeros((N, 4))
vals[:, 3] = np.ones(N)
# Grey for 1e-10 to 1e-7
low = 40
vals[:low, 0] = np.linspace(100/256, 1, low)[::-1]
vals[:low, 1] = np.linspace(100/256, 1, low)[::-1]
vals[:low, 2] = np.linspace(100/256, 1, low)[::-1]
# green 1e-6 to 1e-5
ok = 50
vals[low:ok, 1] = np.linspace(100/256, 1, ok-low)[::-1]
# Blue 1e-5 to 1e-3
mid = 70
vals[ok:mid, 2] = np.linspace(100/256, 1, mid-ok)[::-1]
# Red 1e-3 and higher
vals[mid:101, 0] = np.linspace(100/256, 1, 100-mid)[::-1]
newcmp = ListedColormap(vals)
cm_flash = newcmp
return cm_flash
def deduplicate_stab_results(results, tol_frac_err=5e-3):
if not results:
return results
N = len(results[0][0])
cmps = range(N)
results.sort(key=lambda x: (x[0][0], x[2]))
good_results = [results[0]]
for t in results[1:]:
xs_last, ys_last = good_results[-1][0], good_results[-1][1]
xs, ys = t[0], t[1]
diff_x = sum([abs(xs[i] - xs_last[i]) for i in cmps])/N
diff_y = sum([abs(ys[i] - ys_last[i]) for i in cmps])/N
if diff_x > tol_frac_err or diff_y > tol_frac_err:
good_results.append(t)
return good_results
empty_flash_conv = {'iterations': 0, 'err': 0.0, 'stab_guess_name': None}
one_in_list = [1.0]
empty_list = []
| mit | 2,218,052,274,649,085,200 | 36.456169 | 227 | 0.514281 | false | 3.045998 | false | false | false |
google-research/sound-separation | models/train/train_with_estimator.py | 2 | 2729 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train helper for source separation using tf.estimator."""
import tensorflow.compat.v1 as tf
from . import inference_graph
def execute(model_fn, input_fn, **params):
"""Execute train or eval and/or inference graph writing.
Args:
model_fn: An estimator compatible function taking parameters
(features, labels, mode, params) that returns a EstimatorSpec.
input_fn: An estimator compatible function taking 'params' that returns a
dataset
**params: Dict of additional params to pass to both model_fn and input_fn.
"""
if params['write_inference_graph']:
inference_graph.write(model_fn, input_fn, params, params['model_dir'])
def estimator_model_fn(features, labels, mode, params):
spec = model_fn(features, labels, mode, params)
return spec
def train_input_fn():
train_params = params.copy()
train_params['input_data'] = params['input_data_train']
train_params['batch_size'] = params['train_batch_size']
if params['randomize_training']:
train_params['randomize_order'] = True
return input_fn(train_params)
def eval_input_fn():
eval_params = params.copy()
eval_params['input_data'] = params['input_data_eval']
eval_params['batch_size'] = params['eval_batch_size']
return input_fn(eval_params)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
max_steps=params['train_steps'])
eval_steps = int(round(params['eval_examples'] / params['eval_batch_size']))
eval_spec = tf.estimator.EvalSpec(
name=params['eval_suffix'], input_fn=eval_input_fn, steps=eval_steps,
throttle_secs=params.get('eval_throttle_secs', 600))
run_config = tf.estimator.RunConfig(
model_dir=params['model_dir'],
save_summary_steps=params['save_summary_steps'],
save_checkpoints_secs=params['save_checkpoints_secs'],
keep_checkpoint_every_n_hours=params['keep_checkpoint_every_n_hours'])
estimator = tf.estimator.Estimator(
model_fn=estimator_model_fn,
params=params,
config=run_config)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
| apache-2.0 | 7,421,683,461,617,904,000 | 36.383562 | 78 | 0.696592 | false | 3.790278 | false | false | false |
lampwins/netbox | netbox/dcim/migrations/0052_virtual_chassis.py | 2 | 1701 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-27 17:27
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dcim', '0051_rackreservation_tenant'),
]
operations = [
migrations.CreateModel(
name='VirtualChassis',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(blank=True, max_length=30)),
('master', models.OneToOneField(default=1, on_delete=django.db.models.deletion.PROTECT, related_name='vc_master_for', to='dcim.Device')),
],
),
migrations.AddField(
model_name='device',
name='virtual_chassis',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='members', to='dcim.VirtualChassis'),
),
migrations.AddField(
model_name='device',
name='vc_position',
field=models.PositiveSmallIntegerField(blank=True, null=True, validators=[django.core.validators.MaxValueValidator(255)]),
),
migrations.AddField(
model_name='device',
name='vc_priority',
field=models.PositiveSmallIntegerField(blank=True, null=True, validators=[django.core.validators.MaxValueValidator(255)]),
),
migrations.AlterUniqueTogether(
name='device',
unique_together=set([('virtual_chassis', 'vc_position'), ('rack', 'position', 'face')]),
),
]
| apache-2.0 | 352,704,305,173,732,540 | 39.5 | 155 | 0.611405 | false | 4.169118 | false | false | false |
Mester/demo-day-vikings | music_app/utils.py | 1 | 2858 | import re
import os
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
try:
import collections.abc as collections
except ImportError:
import collections
from music_app.settings import DATABASE_NAME
from tinydb import TinyDB, Query
def parse_listing(data):
"""Method to parse the listing from data"""
songs = [{key:song[key] for key in song.keys() if key in ['url', 'score', 'created_utc', 'thumbnail',
'title']} for song in [flatten(thing['data']) for thing
in data['data']['children']
if thing['kind'] == 't3']]
for song in songs:
parsed = parse_title(song['title'])
if parsed is None:
continue
song.update(parsed)
return songs
def flatten(d, parent_key='', sep='_'):
"""Flatten a dictionary of dictionaries"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def parse_title(title):
"""
Returns parsed contents of a post's title
"""
ro = re.compile(r"""
(?P<artist>.+[^- ]+) # The artist
\s*-+\s* # Skip some spaces and dashes
(?P<title>.*) # The title
\s*\[ # Skip some spaces and opening bracket
(?P<genre>.*) # The genre
\]\s*\( # Skip closing bracket, spaces and opening parenthesis
(?P<year>\d+) # The year
\) # Skip closing parenthesis
""", re.VERBOSE | re.IGNORECASE)
mo = ro.search(title)
if mo is None:
return
return {'artist': mo.group('artist'), 'title': mo.group('title'), 'genre': mo.group('genre'), 'year': mo.group(
'year')}
def get_genres(database_name):
"""Utility method to get all the genres as a set"""
db = TinyDB(os.path.join(os.getcwd(), database_name))
all_genres = { song['genre'] for song in db.all() }
specific_genres = set()
for genre in all_genres:
specific_genres = specific_genres.union(set(genre.strip().split('/')))
db.close()
return _strip_spaces(specific_genres)
def _strip_spaces(genres):
"""Helper method to strip spaces and remove duplicates from genres """
return { x.strip() for x in genres }
def get_total_songs(database_name):
"""Utility Method to get the total number of songs in the database"""
db = TinyDB(os.path.join(os.getcwd(), database_name))
total_length = len(db.all())
db.close()
return total_length
| unlicense | -3,554,098,026,968,036,400 | 35.641026 | 117 | 0.555983 | false | 3.986053 | false | false | false |
pacificclimate/climate-explorer-netcdf-tests | util/grids.py | 1 | 1158 | import datetime
# Approximate representation of PRISM 400m grid
bc_400m = {'lon': {'start': -140, 'step': 0.008333333, 'count': 1680 },
'lat': {'start': 48, 'step': 0.008333333, 'count': 3241 } }
# Approximate representation of BCSD/BCCAQ grid
canada_5k = {'lon': {'start': -141, 'step': 0.08333333, 'count': 1068 },
'lat': {'start': 41, 'step': 0.08333333, 'count': 510 } }
# Approximate representation of high res CMIP5 (MIROC5) grid
world_125k = {'lon': {'start': 0, 'step': 1.40625, 'count': 256 },
'lat': {'start': -89.296875, 'step': 1.40625, 'count': 128 } }
# Approximate representation of standard CMIP5 (CanESM) grid
world_250k = {'lon': {'start': 0, 'step': 2.8125, 'count': 128 },
'lat': {'start': -88.59375, 'step': 2.8125, 'count': 64 } }
# Timescales
start_day = datetime.date(1950,1,1)
end_day = datetime.date(2100,1,1)
timescales = {'seasonal': range(17), # the seasonal index
'annual': range(1950, 2100), # the year
'monthly': range(12 * 150), # months since January 1950
'daily':range((end_day - start_day).days)} # days since January 1, 1950 | gpl-3.0 | -8,089,446,457,915,485,000 | 45.36 | 85 | 0.594991 | false | 2.909548 | false | true | false |
codl/forget | migrations/versions/7afc95e24778_init.py | 1 | 4244 | """init
Revision ID: 7afc95e24778
Revises:
Create Date: 2017-08-03 11:51:08.190298
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7afc95e24778'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table('accounts',
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('id', sa.String(), nullable=False),
sa.Column('policy_enabled', sa.Boolean(), server_default='FALSE', nullable=False),
sa.Column('policy_keep_latest', sa.Integer(), server_default='0', nullable=False),
sa.Column('policy_keep_favourites', sa.Boolean(), server_default='TRUE', nullable=False),
sa.Column('policy_delete_every', sa.Interval(), server_default='0', nullable=False),
sa.Column('policy_keep_younger', sa.Interval(), server_default='0', nullable=False),
sa.Column('display_name', sa.String(), nullable=True),
sa.Column('screen_name', sa.String(), nullable=True),
sa.Column('avatar_url', sa.String(), nullable=True),
sa.Column('last_fetch', sa.DateTime(), server_default='epoch', nullable=True),
sa.Column('last_delete', sa.DateTime(), server_default='epoch', nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_accounts'))
)
op.create_table('oauth_tokens',
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('token', sa.String(), nullable=False),
sa.Column('token_secret', sa.String(), nullable=False),
sa.Column('account_id', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['account_id'], ['accounts.id'], name=op.f('fk_oauth_tokens_account_id_accounts'), onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('token', name=op.f('pk_oauth_tokens'))
)
op.create_table('posts',
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('id', sa.String(), nullable=False),
sa.Column('body', sa.String(), nullable=True),
sa.Column('author_id', sa.String(), nullable=False),
sa.Column('favourite', sa.Boolean(), server_default='FALSE', nullable=False),
sa.ForeignKeyConstraint(['author_id'], ['accounts.id'], name=op.f('fk_posts_author_id_accounts'), onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name=op.f('pk_posts'))
)
op.create_table('sessions',
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('id', sa.String(), nullable=False),
sa.Column('account_id', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['account_id'], ['accounts.id'], name=op.f('fk_sessions_account_id_accounts'), onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name=op.f('pk_sessions'))
)
op.create_table('twitter_archives',
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('account_id', sa.String(), nullable=False),
sa.Column('body', sa.LargeBinary(), nullable=False),
sa.Column('chunks', sa.Integer(), nullable=True),
sa.Column('chunks_successful', sa.Integer(), server_default='0', nullable=False),
sa.Column('chunks_failed', sa.Integer(), server_default='0', nullable=False),
sa.ForeignKeyConstraint(['account_id'], ['accounts.id'], name=op.f('fk_twitter_archives_account_id_accounts'), onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name=op.f('pk_twitter_archives'))
)
def downgrade():
op.drop_table('twitter_archives')
op.drop_table('sessions')
op.drop_table('posts')
op.drop_table('oauth_tokens')
op.drop_table('accounts')
| isc | -6,913,526,836,987,514,000 | 50.756098 | 155 | 0.674599 | false | 3.425343 | false | false | false |
assafnativ/NativDebugging | src/Win32/BreakPoint.py | 1 | 1475 | #
# BreakPoint.py
#
# https://github.com/assafnativ/NativDebugging.git
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
BREAK_POINT_HIDE = 2
BREAK_POINT_ACTIVE = 1
BREAK_POINT_DEACTIVE = 0 # Canceld, use ~BREAK_POINT_ACTIVE
BREAK_POINT_BYTE = ord('\xcc')
class BreakPoint:
def __init__( self, \
address = -1, \
state = BREAK_POINT_DEACTIVE, \
original_byte = None, \
proc = None ):
"""
Constructor of the BreakPoint class.
"""
self.address = address
self.state = state
self.original_byte = original_byte
self.proc = proc
| gpl-3.0 | 8,445,425,550,363,102,000 | 37.864865 | 74 | 0.568136 | false | 4.074586 | false | false | false |
vhazali/cs5331 | assignment3/verifier/scapy-2.3.1/scapy/arch/unix.py | 15 | 5795 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
Common customizations for all Unix-like operating systems other than Linux
"""
import sys,os,struct,socket,time
from fcntl import ioctl
from scapy.error import warning
import scapy.config
import scapy.utils
import scapy.utils6
import scapy.arch
scapy.config.conf.use_pcap = 1
scapy.config.conf.use_dnet = 1
from pcapdnet import *
##################
## Routes stuff ##
##################
def read_routes():
if scapy.arch.SOLARIS:
f=os.popen("netstat -rvn") # -f inet
elif scapy.arch.FREEBSD:
f=os.popen("netstat -rnW") # -W to handle long interface names
else:
f=os.popen("netstat -rn") # -f inet
ok = 0
mtu_present = False
prio_present = False
routes = []
pending_if = []
for l in f.readlines():
if not l:
break
l = l.strip()
if l.find("----") >= 0: # a separation line
continue
if not ok:
if l.find("Destination") >= 0:
ok = 1
mtu_present = l.find("Mtu") >= 0
prio_present = l.find("Prio") >= 0
continue
if not l:
break
if scapy.arch.SOLARIS:
lspl = l.split()
if len(lspl) == 10:
dest,mask,gw,netif,mxfrg,rtt,ref,flg = lspl[:8]
else: # missing interface
dest,mask,gw,mxfrg,rtt,ref,flg = lspl[:7]
netif=None
else:
rt = l.split()
dest,gw,flg = rt[:3]
netif = rt[5+mtu_present+prio_present]
if flg.find("Lc") >= 0:
continue
if dest == "default":
dest = 0L
netmask = 0L
else:
if scapy.arch.SOLARIS:
netmask = scapy.utils.atol(mask)
elif "/" in dest:
dest,netmask = dest.split("/")
netmask = scapy.utils.itom(int(netmask))
else:
netmask = scapy.utils.itom((dest.count(".") + 1) * 8)
dest += ".0"*(3-dest.count("."))
dest = scapy.utils.atol(dest)
if not "G" in flg:
gw = '0.0.0.0'
if netif is not None:
ifaddr = scapy.arch.get_if_addr(netif)
routes.append((dest,netmask,gw,netif,ifaddr))
else:
pending_if.append((dest,netmask,gw))
f.close()
# On Solaris, netstat does not provide output interfaces for some routes
# We need to parse completely the routing table to route their gw and
# know their output interface
for dest,netmask,gw in pending_if:
gw_l = scapy.utils.atol(gw)
max_rtmask,gw_if,gw_if_addr, = 0,None,None
for rtdst,rtmask,_,rtif,rtaddr in routes[:]:
if gw_l & rtmask == rtdst:
if rtmask >= max_rtmask:
max_rtmask = rtmask
gw_if = rtif
gw_if_addr = rtaddr
if gw_if:
routes.append((dest,netmask,gw,gw_if,gw_if_addr))
else:
warning("Did not find output interface to reach gateway %s" % gw)
return routes
############
### IPv6 ###
############
def in6_getifaddr():
"""
Returns a list of 3-tuples of the form (addr, scope, iface) where
'addr' is the address of scope 'scope' associated to the interface
'ifcace'.
This is the list of all addresses of all interfaces available on
the system.
"""
ret = []
i = dnet.intf()
for int in i:
ifname = int['name']
v6 = []
if int.has_key('alias_addrs'):
v6 = int['alias_addrs']
for a in v6:
if a.type != dnet.ADDR_TYPE_IP6:
continue
xx = str(a).split('/')[0]
addr = scapy.utils6.in6_ptop(xx)
scope = scapy.utils6.in6_getscope(addr)
ret.append((xx, scope, ifname))
return ret
def read_routes6():
f = os.popen("netstat -rn -f inet6")
ok = False
mtu_present = False
prio_present = False
routes = []
lifaddr = in6_getifaddr()
for l in f.readlines():
if not l:
break
l = l.strip()
if not ok:
if l.find("Destination") >= 0:
ok = 1
mtu_present = l.find("Mtu") >= 0
prio_present = l.find("Prio") >= 0
continue
# gv 12/12/06: under debugging
if scapy.arch.NETBSD or scapy.arch.OPENBSD:
lspl = l.split()
d,nh,fl = lspl[:3]
dev = lspl[5+mtu_present+prio_present]
else: # FREEBSD or DARWIN
d,nh,fl,dev = l.split()[:4]
if filter(lambda x: x[2] == dev, lifaddr) == []:
continue
if 'L' in fl: # drop MAC addresses
continue
if 'link' in nh:
nh = '::'
cset = [] # candidate set (possible source addresses)
dp = 128
if d == 'default':
d = '::'
dp = 0
if '/' in d:
d,dp = d.split("/")
dp = int(dp)
if '%' in d:
d,dev = d.split('%')
if '%' in nh:
nh,dev = nh.split('%')
if scapy.arch.LOOPBACK_NAME in dev:
cset = ['::1']
nh = '::'
else:
devaddrs = filter(lambda x: x[2] == dev, lifaddr)
cset = scapy.utils6.construct_source_candidate_set(d, dp, devaddrs, scapy.arch.LOOPBACK_NAME)
if len(cset) != 0:
routes.append((d, dp, nh, dev, cset))
f.close()
return routes
| mit | 750,118,604,424,448,800 | 27.268293 | 105 | 0.497325 | false | 3.453516 | false | false | false |
barisser/Swift | main.py | 1 | 15798 | import ecdsa
import ecdsa.der
import ecdsa.util
import hashlib
import os
import re
import struct
import requests
import json
import math
import time
from bitcoin import *
try:
import cPickle as pickle
except:
import pickle
b58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
master_address='1GgwA7c2ovgWDBoVYsHT5VYXw2QBey1EdF'
subkey_complexity=32
standard_fee=0.0001
minincrement=0.001 #min BTC per address (smallest addresses)
increment_base=2
def base58encode(n):
result = ''
while n > 0:
result = b58[n%58] + result
n /= 58
return result
def base256decode(s):
result = 0
for c in s:
result = result * 256 + ord(c)
return result
def countLeadingChars(s, ch):
count = 0
for c in s:
if c == ch:
count += 1
else:
break
return count
# https://en.bitcoin.it/wiki/Base58Check_encoding
def base58CheckEncode(version, payload):
s = chr(version) + payload
checksum = hashlib.sha256(hashlib.sha256(s).digest()).digest()[0:4]
result = s + checksum
leadingZeros = countLeadingChars(result, '\0')
return '1' * leadingZeros + base58encode(base256decode(result))
def privateKeyToWif(key_hex):
return base58CheckEncode(0x80, key_hex.decode('hex'))
def privateKeyToPublicKey(s):
sk = ecdsa.SigningKey.from_string(s.decode('hex'), curve=ecdsa.SECP256k1)
vk = sk.verifying_key
return ('\04' + sk.verifying_key.to_string()).encode('hex')
def pubKeyToAddr(s):
ripemd160 = hashlib.new('ripemd160')
ripemd160.update(hashlib.sha256(s.decode('hex')).digest())
return base58CheckEncode(0, ripemd160.digest())
def keyToAddr(s):
return pubKeyToAddr(privateKeyToPublicKey(s))
# Generate a random private key
def generate_subkeys():
a=[]
a.append(os.urandom(subkey_complexity).encode('hex')) #subkey1
a.append(os.urandom(subkey_complexity).encode('hex')) #subkey2
return a
def generate_privatekey(subkey1,subkey2):
keysum=subkey1+subkey2
secret_exponent=hashlib.sha256(keysum).hexdigest()
privkey=privateKeyToWif(secret_exponent)
return privkey
def generate_publicaddress(subkey1,subkey2):
keysum=subkey1+subkey2
secret_exponent=hashlib.sha256(keysum).hexdigest()
address=keyToAddr(secret_exponent)
return address
def check_address(public_address):
p='https://blockchain.info/q/addressbalance/'
p=p+public_address
h=requests.get(p)
if h.status_code==200:
return h.content
else:
return -1
def check_address_subkeys(subkey1,subkey2):
global h
address=generate_publicaddress(subkey1,subkey2)
return check_address(address)
def generate_receiving_address(destination_address):
global g,r
a='https://blockchain.info/api/receive?method=create&address='
a=a+destination_address
r=requests.get(a)
receiving_address=''
if r.status_code==200:
g=json.loads(str(r.content))
receiving_address=g['input_address']
return str(receiving_address)
else:
return "ERROR"
#'$receiving_address&callback=$callback_url
class subkeypair:
subkey1='' #user
subkey2='' #swiftcoin
referenceid=''
publicaddress=''
balance=0
myuser=''
received=False
def __init__(self):
self.subkey1=os.urandom(subkey_complexity).encode('hex')
self.subkey2=os.urandom(subkey_complexity).encode('hex')
self.referenceid=os.urandom(subkey_complexity).encode('hex')
self.publicaddress=generate_publicaddress(self.subkey1,self.subkey2)
#return self.publicaddress
def private_key(self):
return generate_privatekey(self.subkey1,self.subkey2)
def roundfloat(s, decimals):
n=s
n=n*math.pow(10,decimals)
n=int(n)
n=float(n/math.pow(10,decimals))
return n
def split_logarithmically(amt,base, min):
global r,s
s=amt
r=int(math.log(amt/min,base))
a=[0]*(r+1)
g=0
v=0
s=int(s/min)
min=1
h=s%min
s=s-h
while s>0.00000000:
print s
g=0
while g<r+1 and s+min/100>=math.pow(base,g)*min:
a[g]=a[g]+1
v=v+1
s=s-math.pow(base,g)*min
g=g+1
if s<1 and s>0:
s=-1
#print v
return a
def split_n(amt,base,min):
r=int(math.log(amt/min,base))
a=[0]*(r+1)
g=0
v=0
s=amt
s=s/min
min=1
while s>0.000000001:
g=0
print s
while g<r+1:# and s+min/100>=float(math.pow(base,g)*min):
a[g]=a[g]+1
v=v+1
s=s-float(int(math.pow(base,g)))*min
g=g+1
if s<1 and s>0:
s=-1
return v
def assemble_logarithmically(amt,base,min, storedset):
s=amt
s=s/min
min=1
a=[0]*len(storedset)
c=[]
for x in storedset:
c.append(x)
g=len(storedset)-1
while g>-1:
if c[g]>0 and s>=math.pow(base,g):
n=int(s/math.pow(base,g))
if n>c[g]:
n=c[g]
c[g]=c[g]-n
a[g]=a[g]+n
print s
s=s-math.pow(base,g)*n
g=g-1
return a
a=split_logarithmically(100,2,1)
def convert_to_base(x,base):
a=''
n=30
found=False
while n>-1:
r=math.pow(base,n)
#print r
b=int(x/r)
if b>0:
found=True
if found==True:
a=a+str(b)
x=x-b*r
n=n-1
return a
class user:
name=''
totalbalance=0
inputaddress=''
inputsecretexponent='' #passphrase not yet hashed
outputaddress=''
#outputaddress==''
subkeypairs=[]
subkeys=[] #for memory purposes
def __init__(self):
self.inputsecretexponent=os.urandom(subkey_complexity).encode('hex')
self.inputaddress=generate_publicaddress(self.inputsecretexponent,'')
self.outputaddress=m #TEMPORARY
def generate_subaddresses(self, amt): #this takes way too long
a=0
n=split_n(amt,increment_base,minincrement)
while a<n:
#print a
k=subkeypair()
h1=k.subkey1
h2=k.subkey2
self.subkeys.append([h1,h2])
#UPLOAD SUBKEY2 TO OUR DATABASE AND BACK UP
#k.subkey2=''
save()
self.subkeypairs.append(k)
a=a+1
def checkinputaddress(self):
return check_address(self.inputaddress)
def check_and_split(self): #splits input address BTC into new subkeypairs, subkeypairs must already exist
global dests, outs
newsum=float(self.checkinputaddress())/100000000
newsum=newsum/(1+split_n(newsum,increment_base,minincrement)*standard_fee)
print "detected sum: "+str(newsum)
if newsum>0:
splitsums=split_logarithmically(newsum,increment_base,minincrement)
self.totalbalance=self.totalbalance+newsum
else:
splitsums=[]
a=0
outs=[]
dests=[]
s=0
while a<len(splitsums):#for each digit in splitsums
amt=minincrement*math.pow(increment_base,a)# +standard_fee #dont include standard fee in send_many
print str(amt)
#construct arrays for destinations, outputs
h=0
while h<splitsums[a]:
outputvalue=amt
#if h==0:
# outputvalue=outputvalue+standard_fee
outs.append(outputvalue)
try:
dest=self.subkeypairs[s].publicaddress
self.subkeypairs[s].balance=amt
self.subkeypairs[s].received=True
dests.append(dest)
except:
print "insufficient subkeypairs"
s=s+1
h=h+1
a=a+1
outs[0]=outs[0]+standard_fee
send_many(self.inputaddress,outs,dests,standard_fee,0,0,self.inputsecretexponent)
def redeem(self): #redeem received subkeypairs to outputwallet
global fromaddrs, subkey1s, subkey2s
fromaddrs=[]
dest=self.outputaddress
fee=standard_fee
subkey1s=[]
subkey2s=[]
for x in self.subkeypairs:
if x.received==True:
fromaddrs.append(x.publicaddress)
subkey1s.append(x.subkey1)
subkey2s.append(x.subkey2)
send_from_many(fromaddrs,dest,fee,subkey1s,subkey2s)
#def send_from_many(fromaddrs,destination,fee, subkey1,subkey2): #always sends ALL BTC in ALL SOURCE ADDRESSES
def send_to_output(self,amt):
sent=0
ok=True
h=0
while ok:
if sent>=amt:
ok=False
else:
if self.subkeypairs[h].balance>0:
fromaddr=self.subkeypairs[h].publicaddress
if self.subkeypairs[h].balance>amt-sent+standardfee:
fromthisoneamt=amt-sent
else:
fromthisoneamt=self.subkeypairs[h].balance
subkey1=self.subkeypairs[h].subkey1
subkey2=self.subkeypairs[h].subkey2
send(fromaddr,fromthisoneamt,self.outputaddress,standard_fee,subkey1,subkey2)
self.subkeypairs[h].balance=self.subkeypairs[h].balance-fromthisoneamt-standard_fee
sent=sent+fromthisoneamt
h=h+1
def isinside(small,big):
a=len(small)
b=len(big)
f=0
found=False
while f<b-a:
g=''
for x in big[f:f+a]:
g=g+str(x.lower())
if g==small:
f=b-a
found=True
f=f+1
return found
def find_vanity(vanity,n):
k=math.pow(26,n)
a=0
while a<k:
print math.log(a+1,36)
d=os.urandom(subkey_complexity).encode('hex')
b=generate_publicaddress(d,'')
if isinside(vanity,b):
a=k
print "secret exponent: "+str(d)
print "public address: "+str(b)
a=a+1
def send_transaction(fromaddress,amount,destination, fee, privatekey):
#try:
global ins, outs,h, tx, tx2
fee=int(fee*100000000)
amount=int(amount*100000000)
h=unspent(fromaddress)
ins=[]
ok=False
outs=[]
totalfound=0
for x in h:
if not ok:
ins.append(x)
if x['value']>=fee+amount-totalfound:
outs.append({'value':amount,'address':destination})
if x['value']>fee+amount-totalfound:
outs.append({'value':x['value']-amount-fee,'address':fromaddress})
ok=True
totalfound=fee+amount
else:
outs.append({'value':x['value'],'address':destination})
totalfound=totalfound+x['value']
tx=mktx(ins,outs)
tx2=sign(tx,0,privatekey)
#tx3=sign(tx2,1,privatekey)
pushtx(tx2)
print "Sending "+str(amount)+" from "+str(fromaddress)+" to "+str(destination)+" with fee= "+str(fee)+" and secret exponent= "+str(privatekey)
#a='https://blockchain.info/pushtx/'
#b=requests.get(a+tx3)
#if b.response_code==200:
# print b.content
#except:
# print "failed"
def send_many(fromaddr,outputs,destinations,fee, subkey1,subkey2, secretexponent):
global outs,inp, tx, tx2,totalin,b,amounts, totalout
amounts=[]
outs=[]
ins=[]
totalout=0
fee=int(fee*100000000)
#feeouts=[]
for x in outputs:
amounts.append(int(x*100000000))
totalout=totalout+int(x*100000000)
#x in fees:
#feeouts.append(int(x*100000000))
inp=unspent(fromaddr)
totalin=0
for x in inp:
totalin=totalin+x['value']
ins=inp
a=0
b=0
while a<len(amounts):
amt=amounts[a]#+feeouts[a] #in satoshi
dest=destinations[a]
b=b+amt
outs.append({'value':amt,'address':dest})
a=a+1
unspentbtc=totalin-b-fee
if unspentbtc>0:
outs.append({'value':unspentbtc,'address':fromaddr})
if secretexponent<=0:
priv=hashlib.sha256(subkey1+subkey2).hexdigest()
else:
priv=hashlib.sha256(secretexponent).hexdigest()
tx=mktx(ins,outs)
p=0
tx2=tx
for x in inp:
tx2=sign(tx2,p,priv)
p=p+1
#tx2=sign(tx,0,priv)
pushtx(tx2)
def send_from_many(fromaddrs,destination,fee, subkey1,subkey2): #always sends ALL BTC in ALL SOURCE ADDRESSES
#fromaddrs and subkey1 and subkey2 need to be arrays of addresses and subkeys
global inps, tx, tx2, outs,r
#make inputs
privorder=[]
inps=[]
totalin=0
for x in fromaddrs:
r=unspent(x)
privorder.append(len(r)) # number of inputs from each input address
inps=inps+r
for y in r:
totalin=totalin+y['value']
#make output
sfee=int(fee*100000000)
outs=[]
amt=totalin-sfee
outs.append({'value':amt,'address':destination})
#send tx
tx=mktx(inps,outs)
tx2=tx
g=0
j=0
while g<len(subkey1):
for t in range(0,privorder[g]):
sk1=subkey1[g]
sk2=subkey2[g]
priv=hashlib.sha256(sk1+sk2).hexdigest()
tx2=sign(tx2,j,priv)
j=j+1
g=g+1
pushtx(tx2)
def send(fromaddr, amt, destination, fee, subkey1, subkey2):
pk=hashlib.sha256(subkey1+subkey2).hexdigest()
send_transaction(fromaddr,amt,destination,fee,pk)
users=[]
def add_user():
global users
a=user()
print a.inputaddress
k=len(users)
users.append(a)
return k
def load_user_db():
global users
filename='users.data'
f=open(filename)
users=[]
ok=True
while ok:
inputaddress=f.readline().strip()
if inputaddress=='END':
ok=False
else:
inputsecretexponent=f.readline().strip()
try:
nsubkeypairs=int(f.readline().strip())
except:
print "failed reading file"
r=user()
#r.append(inputaddress)
r.inputaddress=inputaddress
r.inputsecretexponent=inputsecretexponent
for i in range(0,nsubkeypairs):
subkey1=f.readline().strip()
subkey2=f.readline().strip()
referenceid=f.readline().strip()
publicaddress=f.readline().strip()
balance=f.readline().strip()
received=f.readline().strip()
g=subkeypair()
g.subkey1=subkey1
g.subkey2=subkey2
g.referenceid=referenceid
g.publicaddress=publicaddress
g.balance=balance
if received==True:
g.received=True
else:
g.received=False
r.subkeypairs.append(g)
#r.append(inputsecretexponent)
#r.append(nsubkeypairs)
users.append(r)
def save():
filename='users.data'
#pickle.dump(users,open('users.data','wb'))
f=open(filename,'wb')
for x in users:
f.write(x.inputaddress)
f.write('\r\n')
f.write(x.inputsecretexponent)
f.write('\r\n')
f.write(str(len(x.subkeypairs)))
f.write('\r\n')
if len(x.subkeypairs)>0:
for y in x.subkeypairs:
f.write(str(y.subkey1))
f.write('\r\n')
f.write(str(y.subkey2))
f.write('\r\n')
f.write(str(y.referenceid))
f.write('\r\n')
f.write(str(y.publicaddress))
f.write('\r\n')
f.write(str(y.balance))
f.write('\r\n')
if y.received==True:
f.write('received')
else:
f.write('not received')
f.write('\r\n')
f.write('END')
m='1GgwA7c2ovgWDBoVYsHT5VYXw2QBey1EdF'
load_user_db()
| apache-2.0 | -3,476,511,744,232,896,500 | 24.604538 | 148 | 0.580833 | false | 3.266749 | false | false | false |
fooelisa/pyiosxr | pyIOSXR/exceptions.py | 1 | 2670 | #!/usr/bin/env python
# coding=utf-8
"""Exceptions for pyiosxr, a module to interact with Cisco devices running IOS-XR."""
# Copyright 2015 Netflix. All rights reserved.
# Copyright 2016 BigWaveIT. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
class IOSXRException(Exception):
def __init__(self, msg=None, dev=None):
super(IOSXRException, self).__init__(msg)
if dev:
self._xr = dev
# release the XML agent
if self._xr._xml_agent_locker.locked():
self._xr._xml_agent_locker.release()
class ConnectError(IOSXRException):
"""Exception while openning the connection."""
def __init__(self, msg=None, dev=None):
super(ConnectError, self).__init__(msg=msg, dev=dev)
if dev:
self._xr = dev
self._xr._xml_agent_alive = False
class CommitError(IOSXRException):
"""Raised when unable to commit. Mostly due to ERROR 0x41866c00"""
pass
class LockError(IOSXRException):
"""Throw this exception when unable to lock the config DB."""
pass
class UnlockError(IOSXRException):
"""Throw this exception when unable to unlock the config DB."""
pass
class CompareConfigError(IOSXRException):
"""Throw this exception when unable to compare config."""
pass
class UnknownError(IOSXRException):
"""UnknownError Exception."""
pass
class InvalidInputError(IOSXRException):
"""InvalidInputError Exception."""
pass
class XMLCLIError(IOSXRException):
"""XMLCLIError Exception."""
pass
class InvalidXMLResponse(IOSXRException):
"""Raised when unable to process properly the XML reply from the device."""
pass
class TimeoutError(IOSXRException):
"""TimeoutError Exception."""
def __init__(self, msg=None, dev=None):
super(TimeoutError, self).__init__(msg=msg, dev=dev)
if dev:
self._xr = dev
self._xr._xml_agent_alive = False
class EOFError(IOSXRException):
"""EOFError Exception."""
pass
class IteratorIDError(IOSXRException):
"""IteratorIDError Exception."""
pass
| apache-2.0 | 247,740,485,529,006,500 | 22.839286 | 85 | 0.675655 | false | 3.997006 | false | false | false |
meteoswiss-mdr/monti-pytroll | scripts/plot_odyssey.py | 1 | 10098 | from __future__ import division
from __future__ import print_function
#!/usr/bin/python
import datetime
import logging
from mpop.satellites import GeostationaryFactory
from mpop.projector import get_area_def
from mpop.utils import debug_on
from pyresample import plot
import numpy as np
from pydecorate import DecoratorAGG
import aggdraw
from trollimage.colormap import rainbow, RainRate
from trollimage.image import Image as trollimage
from PIL import ImageFont, ImageDraw
from pycoast import ContourWriterAGG
from datetime import timedelta
import sys
from os.path import dirname, exists
from os import makedirs
LOG = logging.getLogger(__name__)
delay=0
if len(sys.argv) > 1:
if len(sys.argv) < 6:
print("*** ")
print("*** Warning, please specify date and time completely, e.g.")
print("*** python plot_odyssey.py 2014 07 23 16 10 ")
print("*** ")
quit() # quit at this point
else:
year = int(sys.argv[1])
month = int(sys.argv[2])
day = int(sys.argv[3])
hour = int(sys.argv[4])
minute = int(sys.argv[5])
else:
if True: # automatic choise of last 5min
from my_msg_module import get_last_SEVIRI_date
datetime1 = get_last_SEVIRI_date(False)
if delay != 0:
datetime1 -= timedelta(minutes=delay)
year = datetime1.year
month = datetime1.month
day = datetime1.day
hour = datetime1.hour
minute = datetime1.minute
else: # fixed date for text reasons
year = 2015
month = 12
day = 16
hour = 13
minute = 30
prop_str='DBZH'
#prop_str='RATE'
#if len(sys.argv) > 1:
# prop_str = sys.argv[1]
yearS = str(year)
#yearS = yearS[2:]
monthS = "%02d" % month
dayS = "%02d" % day
hourS = "%02d" % hour
minS = "%02d" % minute
dateS=yearS+'-'+monthS+'-'+dayS
timeS=hourS+':'+minS+'UTC'
print(dateS, timeS)
#import sys, string, os
#sys.path.insert(0, "/opt/users/mbc/pytroll/install/lib/python2.6/site-packages")
debug_on()
time_slot = datetime.datetime(year, month, day, hour, minute)
global_data = GeostationaryFactory.create_scene("odyssey", "", "radar", time_slot)
global_data.load([prop_str])
print(global_data)
color_mode='RainRate'
#print "global_data[prop_str].product_name=",global_data[prop_str].product_name
#area='odyssey'
#area='odysseyS25'
area='EuroMercator' # should be the same as blitzortung
reproject=True
if reproject:
print('-------------------')
print("start projection")
# PROJECT data to new area
data = global_data.project(area, precompute=True)
#data[prop_str].product_name = global_data[prop_str].product_name
#data[prop_str].units = global_data[prop_str].units
global_data = data
#outputDir = "/data/cinesat/out/"
outputDir = time_slot.strftime('/data/COALITION2/PicturesSatellite/%Y-%m-%d/%Y-%m-%d_ODY_RATE_'+area+'/')
if not exists(outputDir):
makedirs(outputDir)
outputFile = outputDir+'ODY_'+prop_str+'-'+area+'_'+yearS[2:]+monthS+dayS+hourS+minS +'.png'
# define area
print('-------------------')
obj_area = get_area_def(area)
print('obj_area ', obj_area)
proj4_string = obj_area.proj4_string
# e.g. proj4_string = '+proj=geos +lon_0=0.0 +a=6378169.00 +b=6356583.80 +h=35785831.0'
print('proj4_string ',proj4_string)
area_extent = obj_area.area_extent
# e.g. area_extent = (-5570248.4773392612, -5567248.074173444, 5567248.074173444, 5570248.4773392612)
area_def = (proj4_string, area_extent)
print('-------------------')
print('area_def ', area_def)
prop=global_data[prop_str].data
fill_value=None # transparent background
#fill_value=(1,1,1) # white background
min_data = 0.0
max_data = 150
colormap = RainRate
# instantaneous rain rate in mm/h
if prop_str == 'RATE':
# prop = np.log10(prop)
# min_data = prop.min()
# #max_data = prop.max()
# #min_data = -0.25
# #max_data = 1.9
# min_data = -0.2 # log(0.63)
# max_data = 2.41 # log(260)
# units='log(RR)'
# tick_marks = 1 # default
# minor_tick_marks = 0.1 # default
lower_value=0.15
# instantaneous rain rate in mm/h
if prop_str == 'DBZH':
min_data = -20
max_data = 70
colormap = rainbow
lower_value=13
if prop_str == 'ACRR':
min_data = 0
max_data = 250
lower_value=0.15
if lower_value > -1000:
prop [prop < lower_value ] = np.ma.masked
LOG.debug("min_data/max_data: "+str(min_data)+" / "+str(max_data))
colormap.set_range(min_data, max_data)
# prop.mask[:,:]=True
img = trollimage(prop, mode="L", fill_value=fill_value)
img.colorize(colormap)
PIL_image=img.pil_image()
dc = DecoratorAGG(PIL_image)
add_logos=True
add_colorscale=True
add_title=True
add_map=True
find_maxima=True
verbose=True
layer=' 2nd layer'
add_borders=True
resolution='l'
if add_borders:
cw = ContourWriterAGG('/data/OWARNA/hau/pytroll/shapes/')
cw.add_coastlines(PIL_image, area_def, outline='white', resolution=resolution, outline_opacity=127, width=1, level=2) #, outline_opacity=0
#outline = (255, 0, 0)
outline = 'red'
#outline = 'white'
cw.add_coastlines(PIL_image, area_def, outline=outline, resolution=resolution, width=2) #, outline_opacity=0
cw.add_borders(PIL_image, area_def, outline=outline, resolution=resolution, width=2) #, outline_opacity=0
ticks=20
tick_marks=20 # default
minor_tick_marks=10 # default
title_color='white'
units=global_data[prop_str].info["units"]
#global_data[prop_str].units
if add_logos:
if verbose:
print('... add logos')
dc.align_right()
if add_colorscale:
dc.write_vertically()
#dc.add_logo("../logos/meteoSwiss3.jpg",height=60.0)
#dc.add_logo("../logos/pytroll3.jpg",height=60.0)
dc.add_logo("/opt/users/common/logos/meteoSwiss.png",height=40.0)
#font_scale = aggdraw.Font("black","/usr/share/fonts/truetype/ttf-dejavu/DejaVuSerif-Bold.ttf",size=16)
fontsize=18
#font = ImageFont.truetype("/usr/openv/java/jre/lib/fonts/LucidaTypewriterBold.ttf", fontsize)
font = ImageFont.truetype("/usr/openv/java/jre/lib/fonts/LucidaSansRegular.ttf", fontsize)
if add_colorscale:
print('... add colorscale ranging from min_data (',min_data,') to max_data (',max_data,')')
dc.align_right()
dc.write_vertically()
#font_scale = ImageFont.truetype("/usr/openv/java/jre/lib/fonts/LucidaTypewriterBold.ttf", fontsize)
colormap_r = colormap.reverse()
#rainbow_r.set_range(min_data, max_data)
dc.add_scale(colormap_r, extend=True, ticks=ticks, tick_marks=tick_marks, minor_tick_marks=minor_tick_marks, line_opacity=100, unit=units) #, font=font
indicate_range=True
if indicate_range:
mask = global_data[prop_str+'-MASK'].data
img = trollimage(mask, mode="L", fill_value=None) #fill_value,[1,1,1], None
from trollimage.colormap import greys
img.colorize(greys)
img.putalpha(mask*0+0.4)
PIL_mask = img.pil_image()
from PIL import Image as PILimage
PIL_image = PILimage.alpha_composite(PIL_mask, PIL_image)
if add_title:
draw = ImageDraw.Draw(PIL_image)
if layer.find('2nd') != -1:
y_pos_title=20
elif layer.find('3rd') != -1:
y_pos_title=40
else:
y_pos_title=5
layer = dateS+' '+timeS
if len(layer) > 0:
layer=layer+':'
#title = layer+' radar, '+prop_str+' ['+global_data[prop_str].units+']'
title = layer+' ODYSSEY, '+'precipitation rate'+' ['+global_data[prop_str].info["units"]+']'
draw.text((0, y_pos_title),title, title_color, font=font)
PIL_image.save(outputFile)
print('... save image as ', outputFile)
# Austria works with
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_watershed/py_watershed.html
if find_maxima:
import numpy as np
import scipy
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
import matplotlib.pyplot as plt
data = global_data[prop_str].data.data
#data = filters.gaussian_filter(global_data[prop_str].data,1) ### filter eliminates too many data points...
noise_removal = False
if noise_removal:
# ... need to install openCV2
import cv2
kernel = np.ones((3,3),np.uint8)
ret, thresh = cv2.threshold(data, 0, 75, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations = 1)
sure_bg = cv2.dilate(opening,kernel,iterations=3)
# arbitrary settings
neighborhood_size = 6
threshold = 6
ref_min=43
data_max = filters.maximum_filter(data, neighborhood_size)
print(data_max.max())
maxima = (data == data_max)
data_min = filters.minimum_filter(data, neighborhood_size)
print(data_min.max())
diff = ((data_max - data_min) > threshold)
#print "diff: ", diff
maxima[diff == False] = 0
maxima[data_max < ref_min] = 0
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
x, y = [], []
for dy,dx in slices:
x_center = (dx.start + dx.stop - 1)/2
x.append(x_center)
y_center = (dy.start + dy.stop - 1)/2
y.append(y_center)
plot_plt=True
if plot_plt:
plt.imshow(data, vmin=0, vmax=0.9*data_max.max())
#plt.imshow(data, vmin=0, vmax=50)
#plt.imshow(data)
plt.autoscale(False)
outputFile = outputDir+'odd_'+prop_str+'-'+area+'_'+yearS[2:]+monthS+dayS+hourS+minS +'.png'
plt.savefig(outputFile, bbox_inches = 'tight')
print("display "+outputFile+" &")
plt.autoscale(False)
plt.plot(x,y, 'ro', markersize=2.5)
outputFile = outputDir+'odm_'+prop_str+'-'+area+'_'+yearS[2:]+monthS+dayS+hourS+minS +'.png'
plt.savefig(outputFile, bbox_inches = 'tight')
print("display "+outputFile+" &")
else:
prop = np.full(data.shape, False, dtype=bool)
for i,j in zip(x,y):
prop[i,j]=True
from mpop.satin.swisslightning import unfold_lightning
img = trollimage(prop, mode="L", fill_value=fill_value)
# ... not yet finished ...
| lgpl-3.0 | 3,182,971,709,990,367,700 | 29.786585 | 154 | 0.647059 | false | 2.885967 | false | false | false |
Tinkerforge/brickv | src/brickv/plugin_system/plugins/current25/current25.py | 1 | 3393 | # -*- coding: utf-8 -*-
"""
Current25 Plugin
Copyright (C) 2011-2012 Olaf Lüke <[email protected]>
Copyright (C) 2014-2016 Matthias Bolte <[email protected]>
current25.py: Current25 Plugin Implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from PyQt5.QtCore import pyqtSignal, Qt
from PyQt5.QtWidgets import QVBoxLayout, QLabel, QPushButton, QFrame
from brickv.plugin_system.plugin_base import PluginBase
from brickv.bindings import ip_connection
from brickv.bindings.bricklet_current25 import BrickletCurrent25
from brickv.plot_widget import PlotWidget, CurveValueWrapper
from brickv.callback_emulator import CallbackEmulator
from brickv.utils import format_current
class Current25(PluginBase):
qtcb_over = pyqtSignal()
def __init__(self, *args):
super().__init__(BrickletCurrent25, *args)
self.cur = self.device
self.cbe_current = CallbackEmulator(self,
self.cur.get_current,
None,
self.cb_current,
self.increase_error_count)
self.qtcb_over.connect(self.cb_over)
self.cur.register_callback(self.cur.CALLBACK_OVER_CURRENT,
self.qtcb_over.emit)
self.over_label = QLabel('Over Current: No')
self.calibrate_button = QPushButton('Calibrate Zero')
self.calibrate_button.clicked.connect(self.calibrate_clicked)
self.current_current = CurveValueWrapper() # float, A
plots = [('Current', Qt.red, self.current_current, format_current)]
self.plot_widget = PlotWidget('Current [A]', plots, extra_key_widgets=[self.over_label], y_resolution=0.001)
line = QFrame()
line.setObjectName("line")
line.setFrameShape(QFrame.HLine)
line.setFrameShadow(QFrame.Sunken)
layout = QVBoxLayout(self)
layout.addWidget(self.plot_widget)
layout.addWidget(line)
layout.addWidget(self.calibrate_button)
def start(self):
self.cbe_current.set_period(100)
self.plot_widget.stop = False
def stop(self):
self.cbe_current.set_period(0)
self.plot_widget.stop = True
def destroy(self):
pass
@staticmethod
def has_device_identifier(device_identifier):
return device_identifier == BrickletCurrent25.DEVICE_IDENTIFIER
def cb_current(self, current):
self.current_current.value = current / 1000.0
def cb_over(self):
self.over_label.setText('Over Current: Yes')
def calibrate_clicked(self):
try:
self.cur.calibrate()
except ip_connection.Error:
return
| gpl-2.0 | 8,338,593,412,160,102,000 | 33.262626 | 116 | 0.666863 | false | 3.939605 | false | false | false |
trobanga/pifidelity | mp3database.py | 1 | 7571 | import json
import os
import mutagen
import mutagen.mp3
import mutagen.oggvorbis
import mutagen.flac
from mutagen.easyid3 import EasyID3
import itertools
class db_structure(object):
data_fields = ['artist', 'title', 'album', 'tracknumber']
def __init__(self,
path,
artist,
title,
album,
tracknumber):
self.path = path
self.artist = artist
self.title = title
self.album = album
if tracknumber:
t = type(tracknumber)
if t is str or t is unicode:
s = tracknumber.split('/')[0]
s = s.split('.')[0]
self.tracknumber = int(s)
else:
self.tracknumber = tracknumber
else:
self.tracknumber = None
self.name_dict = dict(zip(self.data_fields, [self.artist,
self.title,
self.album,
self.tracknumber]))
def __iter__(self):
return iter([self.path,
self.artist,
self.title,
self.album,
self.tracknumber])
def __getitem__(self, k):
if k == 'path':
return self.path
return self.name_dict[k]
def __repr__(self):
return repr((self.path,
self.artist,
self.title,
self.album,
self.tracknumber))
def to_list(self):
return (self.path,
self.artist,
self.title,
self.album,
self.tracknumber)
class MusicDB(object):
"""
Functions to use from outside:
scan_library,
get_albums_from_artist,
get_artists_from_album,
get_title, num_songs
"""
def __init__(self, filename='music.db'):
self.music_db = list() # main db
self.artist_db = set() # set of all artists
self.title_db = set()
self.path_db = set()
self.playlist_db = list()
self.filename = filename
self.initialized = False
self.file_types = frozenset(('mp3', 'flac', 'ogg', 'oga'))
def num_songs(self):
return len(self.music_db)
def save_db(self, db):
with open(self.filename, 'w') as f:
l = map(db_structure.to_list, db)
f.write(json.dumps(l))
def load_db(self):
try:
with open(self.filename, 'r') as f:
db = json.loads(*f.readlines())
print len(db)
# directories, db = db
db = map(lambda x: db_structure(*x), db)
self._update_db(db)
except Exception, e:
print e
def scan_library(self, directories=None):
"""
Scans directories for mp3 files and stops time
"""
import time
t = time.time()
try:
self._create_db(directories)
except Exception, e:
print e
raise Exception( "Couldn't create DB")
print 'db created in ', time.time() - t, ' seconds'
def _parse_dirs(self, directories):
"""
Parses directories and returns mp3 files
"""
l = []
for dirs in directories:
try:
d = os.listdir(dirs)
except os.error, e:
continue
# ignore hidden files
d = filter(lambda x: not x.startswith('.'), d)
d = map(lambda x: dirs + '/' + x, d)
for f in d:
try:
if not os.path.isdir(f):
ending = f.split('.')[-1].lower()
if ending in self.file_types:
l.append(f)
else:
print f
print ending, "not supported"
else:
# parse subdirectories
p = self._parse_dirs([f])
for x in p:
l.append(x)
except Exception, e:
print e
if not l:
raise Exception('Parsing failed for {}'.format(directories))
return l
def _create_db(self, directories):
"""
Creates db from directories
"""
try:
d = self._parse_dirs(directories)
except Exception, e:
print e
if not d:
raise Exception('No music in', directories)
def get_tags(f):
l = [f]
offset = len(l)
try:
t = mutagen.File(f, easy=True)
for tag in db_structure.data_fields:
if tag == 'artist':
# first try albumartist and others
if 'albumartist' in t:
i = t.get('albumartist')
elif 'albumartistsort' in t:
i = t.get('albumartistsort')
else:
i = t.get(tag)
else:
i = t.get(tag)
if i:
l.append(i[0])
else:
l.append(None)
except Exception, e:
print 'get tags'
print f, e
print db_structure, dir(db_structure)
for i in xrange(len(l),
len(db_structure.data_fields) + offset):
l.append(None)
return l
d = map(lambda x: db_structure(*get_tags(x)), d)
try:
self._update_db(d)
except Exception, e:
raise e
self.save_db(d)
def _find(self, db, wanted, t):
"""
Finds wanted e.g. 'artist'
for args e.g. 'album'
in db
"""
def crawl_db(key):
"""
Return set of all DB entries of key
"""
s = set()
for e in db:
s.add(e[key])
return s
d = dict()
for n in crawl_db(t):
w = set()
for a in self._filter_by(db, t, n):
name = a[wanted]
if name:
w.add(a[wanted])
else:
w.add("unknown")
d[n] = w
return d
def _update_db(self, db):
"""
Updates DBS with album and artist entries
"""
self.artist_db = self._find(db, 'album', 'artist')
self.music_db = db
self.initialized = True
def _get(self, key, db, name):
return self._filter_by(db, key, name)
def get_album(self, name):
return self._sort_by(self._get('album', self.music_db, name),
'tracknumber')
def get_title(self, db, name):
return self._get('title', db, name)
def get_albums_from_artist(self, name):
a = list()
if name not in self.artist_db:
return None
return self.artist_db[name]
def _sort_by(self, db, t):
return sorted(db, key=lambda db_structure: db_structure.name_dict[t])
def _filter_by(self, db, t, name):
l = []
return filter(lambda x: name == x.name_dict[t], db)
| mit | 1,349,341,664,177,360,000 | 28.344961 | 77 | 0.434157 | false | 4.378832 | false | false | false |
pentestfail/TA-FireEye_TAP | bin/ta_fireeye_tap/solnlib/packages/schematics/contrib/mongo.py | 4 | 1248 | """This module contains fields that depend on importing `bson`. `bson` is
a part of the pymongo distribution.
"""
from __future__ import unicode_literals, absolute_import
import bson
from ..common import * # pylint: disable=redefined-builtin
from ..types import BaseType
from ..exceptions import ConversionError
class ObjectIdType(BaseType):
"""An field wrapper around MongoDB ObjectIds. It is correct to say they're
bson fields, but I am unaware of bson being used outside MongoDB.
`auto_fill` is disabled by default for ObjectIdType's as they are
typically obtained after a successful save to Mongo.
"""
MESSAGES = {
'convert': "Couldn't interpret value as an ObjectId.",
}
def __init__(self, auto_fill=False, **kwargs):
self.auto_fill = auto_fill
super(ObjectIdType, self).__init__(**kwargs)
def to_native(self, value, context=None):
if not isinstance(value, bson.objectid.ObjectId):
try:
value = bson.objectid.ObjectId(str(value))
except bson.objectid.InvalidId:
raise ConversionError(self.messages['convert'])
return value
def to_primitive(self, value, context=None):
return str(value)
| mit | 6,206,104,877,321,113,000 | 29.439024 | 79 | 0.668269 | false | 4.025806 | false | false | false |
rgreinho/craigomatic | craigomatic/apps/craigmine/models.py | 1 | 1322 | from django.db import models
from django.utils.html import format_html
# Create your models here.
class Search(models.Model):
"""
http://austin.craigslist.org/search/bia?sort=date&hasPic=1&minAsk=10&maxAsk=250&query=fixed
"""
server = models.CharField(max_length=200)
category = models.CharField(max_length=50)
has_pic = models.BooleanField(default=True)
min_ask = models.PositiveIntegerField(default=0)
max_ask = models.PositiveIntegerField(default=1000)
query = models.CharField(max_length=300, default='')
tag = models.CharField(max_length=20)
custom_search_args = models.CharField(max_length=300, default='')
last_update = models.DateTimeField(auto_now=True)
def __str__(self):
return self.tag
class Item(models.Model):
search = models.ForeignKey(Search)
id = models.CharField(primary_key=True, max_length=200)
link = models.URLField()
post_date = models.DateTimeField()
pnr = models.CharField(max_length=200)
price = models.PositiveIntegerField()
title = models.CharField(max_length=200)
retrieved = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
def external_link(self):
return format_html('<a href="{0}">{0}</a>', self.link)
external_link.allow_tags = True
| mit | 7,252,995,492,067,824,000 | 32.05 | 95 | 0.69062 | false | 3.544236 | false | false | false |
blakerouse/python-libmaas | maas/client/utils/multipart.py | 3 | 6112 | # Copyright 2016-2017 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoding of MIME multipart data."""
__all__ = ["encode_multipart_data"]
from collections import Iterable, Mapping
from email.generator import BytesGenerator
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from io import BytesIO, IOBase
from itertools import chain
import mimetypes
def get_content_type(*names):
"""Return the MIME content type for the file with the given name."""
for name in names:
if name is not None:
mimetype, encoding = mimetypes.guess_type(name)
if mimetype is not None:
if isinstance(mimetype, bytes):
return mimetype.decode("ascii")
else:
return mimetype
else:
return "application/octet-stream"
def make_bytes_payload(name, content):
payload = MIMEApplication(content)
payload.add_header("Content-Disposition", "form-data", name=name)
return payload
def make_string_payload(name, content):
payload = MIMEApplication(content.encode("utf-8"), charset="utf-8")
payload.add_header("Content-Disposition", "form-data", name=name)
payload.set_type("text/plain")
return payload
def make_file_payload(name, content):
payload = MIMEApplication(content.read())
payload.add_header("Content-Disposition", "form-data", name=name, filename=name)
names = name, getattr(content, "name", None)
payload.set_type(get_content_type(*names))
return payload
def make_payloads(name, content):
"""Constructs payload(s) for the given `name` and `content`.
If `content` is a byte string, this calls `make_bytes_payload` to
construct the payload, which this then yields.
If `content` is a unicode string, this calls `make_string_payload`.
If `content` is file-like -- it inherits from `IOBase` or `file` --
this calls `make_file_payload`.
If `content` is iterable, this calls `make_payloads` for each item,
with the same name, and then re-yields each payload generated.
If `content` is callable, this calls it with no arguments, and then
uses the result as a context manager. This can be useful if the
callable returns an open file, for example, because the context
protocol means it will be closed after use.
This raises `AssertionError` if it encounters anything else.
"""
if content is None:
yield make_bytes_payload(name, b"")
elif isinstance(content, bool):
if content:
yield make_bytes_payload(name, b"true")
else:
yield make_bytes_payload(name, b"false")
elif isinstance(content, int):
yield make_bytes_payload(name, b"%d" % content)
elif isinstance(content, bytes):
yield make_bytes_payload(name, content)
elif isinstance(content, str):
yield make_string_payload(name, content)
elif isinstance(content, IOBase):
yield make_file_payload(name, content)
elif callable(content):
with content() as content:
for payload in make_payloads(name, content):
yield payload
elif isinstance(content, Iterable):
for part in content:
for payload in make_payloads(name, part):
yield payload
else:
raise AssertionError("%r is unrecognised: %r" % (name, content))
def build_multipart_message(data):
message = MIMEMultipart("form-data")
for name, content in data:
for payload in make_payloads(name, content):
message.attach(payload)
return message
def encode_multipart_message(message):
# The message must be multipart.
assert message.is_multipart()
# The body length cannot yet be known.
assert "Content-Length" not in message
# So line-endings can be fixed-up later on, component payloads must have
# no Content-Length and their Content-Transfer-Encoding must be base64
# (and not quoted-printable, which Django doesn't appear to understand).
for part in message.get_payload():
assert "Content-Length" not in part
assert part["Content-Transfer-Encoding"] == "base64"
# Flatten the message without headers.
buf = BytesIO()
generator = BytesGenerator(buf, False) # Don't mangle "^From".
generator._write_headers = lambda self: None # Ignore.
generator.flatten(message)
# Ensure the body has CRLF-delimited lines. See
# http://bugs.python.org/issue1349106.
body = b"\r\n".join(buf.getvalue().splitlines())
# Only now is it safe to set the content length.
message.add_header("Content-Length", "%d" % len(body))
return message.items(), body
def encode_multipart_data(data=(), files=()):
"""Create a MIME multipart payload from L{data} and L{files}.
**Note** that this function is deprecated. Use `build_multipart_message`
and `encode_multipart_message` instead.
@param data: A mapping of names (ASCII strings) to data (byte string).
@param files: A mapping of names (ASCII strings) to file objects ready to
be read.
@return: A 2-tuple of C{(body, headers)}, where C{body} is a a byte string
and C{headers} is a dict of headers to add to the enclosing request in
which this payload will travel.
"""
if isinstance(data, Mapping):
data = data.items()
if isinstance(files, Mapping):
files = files.items()
message = build_multipart_message(chain(data, files))
headers, body = encode_multipart_message(message)
return body, dict(headers)
| agpl-3.0 | -3,004,474,281,309,078,000 | 36.728395 | 84 | 0.680465 | false | 4.099262 | false | false | false |
tejaskhot/deep-learning | test/nnet/optimize.py | 8 | 2575 | """Optimization method
Supported method:
+ Stochastic gradient descent
"""
from collections import OrderedDict;
import theano.tensor as T;
def gd_updates(cost,
params,
updates=None,
max_norm=5.0,
learning_rate=0.1,
eps=1e-6,
rho=0.95,
method="sgd"):
"""Gradient Descent based optimization
Note: should be a class to make flexible call
Parameters
----------
cost : scalar
total cost of the cost function.
params : list
parameter list
method : string
optimization method: "sgd", "adagrad", "adadelta"
Returns
-------
updates : OrderedDict
dictionary of updates
"""
if updates is None:
updates=OrderedDict();
gparams=T.grad(cost, params);
for gparam, param in zip(gparams, params):
if method=="sgd":
updates[param]=param-learning_rate*gparam;
return updates;
theano_rng=T.shared_randomstreams.RandomStreams(1234);
def dropout(shape, prob=0.):
"""generate dropout mask
Parameters
----------
shape : tuple
shape of the dropout mask
prob : double
probability of each sample
Returns
-------
mask : tensor
dropout mask
"""
mask=theano_rng.binominal(n=1, p=1-prob, size=shape);
return T.cast(x=mask, dtype="float32");
def multi_dropout(shapes, prob=0.):
"""generate a list of dropout mask
Parameters
----------
shapes : tuple of tuples
list of shapes of dropout masks
prob : double
probability of each sample
Returns
-------
masks : tuple of tensors
list of dropout masks
"""
return [dropout(shape, dropout) for shape in shapes];
def apply_dropout(X, mask=None):
"""apply dropout operation
Parameters
----------
X : tensor
data to be masked
mask : dropout mask
Returns
-------
masked_X : tensor
dropout masked data
"""
if mask is not None:
return X*mask;
else:
return X;
def corrupt_input(X, corruption_level=0.):
"""Add noise on data
Parameters
----------
X : tensor
data to be corrupted
corruption_level : double
probability of the corruption level
Returns
-------
corrupted_out : tensor
corrupted output
"""
return apply_dropout(X, dropout(X.shape, corruption_level)); | gpl-2.0 | -6,390,088,161,485,654,000 | 19.943089 | 64 | 0.553786 | false | 4.447323 | false | false | false |
valuehack/scholarium.at | Scholien/migrations/0007_markdownartikel.py | 1 | 1076 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-12-11 13:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Scholien', '0006_artikel_prioritaet'),
]
operations = [
migrations.CreateModel(
name='MarkdownArtikel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bezeichnung', models.CharField(max_length=200)),
('slug', models.SlugField(blank=True, max_length=100, unique=True)),
('zeit_erstellt', models.DateTimeField(auto_now_add=True)),
('text', models.TextField()),
('prioritaet', models.PositiveSmallIntegerField(default=0)),
],
options={
'verbose_name': 'Markdown Artikel',
'verbose_name_plural': 'Markdown Artikel',
'ordering': ['-zeit_erstellt'],
},
),
]
| mit | 8,424,974,101,967,312,000 | 33.709677 | 114 | 0.556691 | false | 4.10687 | false | false | false |
NERSC/QENESAP | PP/tools/sum_states.py | 19 | 8217 | #! /usr/bin/python
###### SUM STATES #######
# Python script for summing and ploting the data from the Density Of States
# files obtained from projwfc.x. It can sum also k-solved dos, and make a plot
# with mathplotlib (if not available, gnuplot, if not avaible, print to file)
# if there is not X11 forwarding, plots in terminal.
# It does something very similar to sumpdos.f90, but with
# some extra features (use "-h" option).
#
# it takes two different inputs, the first one is the pw.x output
# ("-o" option), which is used for parsing the Fermi energy for fitting
# the PDOS curve to the right energy. The other files are the pDOS files
# ("-s" option), that can be given with shell syntax, i.e.
# pdos_atm*Fe*wfc*d* for summing all the d orbitals of Fe.
# It can also handle k solved dos files.
#
# One of the most useful feature, compared to the sumpdos.x, is the
# fact that it also builds the picture directly, so it can be directly
# visualized and exported for inclusion in a document.
# It uses mathplotlib for plotting, but if no mathplotlib is found in
# the $PYTHONPATH, it tries to use gnuplot, if no gnuplot available,
# dumps the output data to a file.
# In the that no X11 forwarding is available (i.e. ssh to the cluster),
# it shows a rough graph in the terminal, so we get an idea of the shape
# of the results.
#
# Example of usage:
# cd ....../espresso-5.0/PP/examples/example02/results/
# ../../../src/sum_states.py -o ni.dos.out -s
# ni.pdos_atm#1\(Ni\)_wfc#2\(d\) -t "Example PP/02" -xr -6 2
#
#
# The procedure for obtaining the DOS files is explained
# i.e. in (espresso-dir)/PP/examples/example02/
#
# Author: Dr. Julen Larrucea
# University of Bremen,
# Bremen Centre for Computational Materials Science, HMI Group
# julenl [at] gmail.com or larrucea [at] hmi.uni-bremen.de
#
# This file is distributed under the terms of the GNU General Public
# License. See the file `License'
# in the root directory of the present distribution,
# or http://www.gnu.org/copyleft/gpl.txt .
#######################
import sys
import os
import fnmatch
import linecache
# Some default variables
version=0.2
pwout=""
selat="*"
graphtitle=""
min_x,max_x=-10,3
min_y,max_y="",""
output_file_name="sum_dos.out"
prt="no"
print " #### sum_states.py version "+str(version)+" #### "
# Check if X11, mathplotlib and gnuplot are available
try:
os.popen("gnuplot -V").read()
prog_gnuplot="yes" # gnuplot is installed
except:
prog_gnuplot="no"
# Parse command line options
if len(sys.argv)>1:
for i in sys.argv:
if i.startswith('-'):
option=i.split('-')[1]
if option=="o":
pwout= sys.argv[sys.argv.index('-o')+1]
if option=="s":
selat= sys.argv[sys.argv.index('-s')+1]
if option=="p":
prt="yes"
if len(sys.argv) > sys.argv.index('-p')+1: # if there is a name after "-p" take it as an output name
if sys.argv[sys.argv.index('-p')+1] != "-": # otherwise default name sum_dos.out
dos_out_name=sys.argv[sys.argv.index('-p')+1]
if option=="t":
graphtitle= sys.argv[sys.argv.index('-t')+1]
if option=="xr":
min_x,max_x= float(sys.argv[sys.argv.index('-xr')+1]),float(sys.argv[sys.argv.index('-xr')+2])
if option=="yr":
min_y,max_y= float(sys.argv[sys.argv.index('-yr')+1]),float(sys.argv[sys.argv.index('-yr')+2])
if option=="v":
print "sum_dos.py version: "+version
sys.exit()
if option=="h":
print '''
-o QE output file name (for grepping Fermi E)
-s Selection of atoms for summing the DOSes. "*" for all, *1*Fe*d* for first Fe atom " (def. "*")
-p Print output to a file and aditionaly provide an output name (def. no output and "sum_dos.out")
-t set title in the head of the graph
-xr set min and max x value for the axes in the graph
-yr set min and max y value for the axes in the graph
-h print this help
-v print version
Example: sum_states.py --s sys.pdos_atm#4\(Fe2\)_wfc#2\(d\) -t "Wustite LDA+U single Fe" -xr -9 4
'''
sys.exit()
# Check for mathplotlib/gnuplot and import mpl if possible
if len(os.popen('echo $DISPLAY').read()) > 1:
graphic_plot="yes"
try:
from pylab import *
mplplot="yes"
print "pylab imported"
except:
print "There is no mathplotlib installed. Using gnuplot."
mplplot="no"
prt="yes"
else:
print "No X11. Trying to plot on terminal"
graphic_plot="no"
if prog_gnuplot=="no":
prt="yes"
# if not specified, try to find the espresso output, in order to parse the Fermi energy
if pwout == "":
for filen in filter(os.path.isfile, os.listdir('.')):
if "Program PWSCF" in linecache.getline(filen, 2):
print "Using " + filen + " as pw.x output. You can specify another one with the -o option."
pwout=filen
# Parse Fermi energy from the pw.x output
if pwout!="":
try:
os.popen("grep -a 'the Fermi energy is' "+pwout ).read()
fermi=float(os.popen("grep -a 'the Fermi energy is' "+pwout ).read().split()[4])
print "Fermi energy = ", fermi, "a.u."
except:
print "WARNING: No Fermi energy found. Using 0 e.V. instead"
fermi=0
else:
print "WARNING: No pw.x output found. Using E Fermi = 0 e.V."
fermi=0
# List of all DOS files to add
dosfiles=[]
for dfile in os.listdir('.'):
if fnmatch.fnmatch(dfile, selat):
dosfiles.append(dfile)
if len(dosfiles)==0:
print "ERROR: Provide a (list of) valid DOS file(s)"
sys.exit()
print "dosfiles list: ",
for dosfile in dosfiles:
print dosfile,
print ""
# Check wetter we have k-solved DOS
if open(dosfiles[0],'r').readline().split()[1]=="E":
ksolved="no"
print "no ksolved"
elif open(dosfiles[0],'r').readline().split()[1]=="ik":
ksolved="yes"
print "ksolved"
# Sum over all k-points and files
mat=[] # matrix with total sum of ldos
for i in range(len(dosfiles)):
mati=[] # temporal matrix for each DOS file "i"
k=0
for line in open(dosfiles[i],'r'):
if len(line) > 10 and line.split()[0] != "#":
if ksolved=="no":
mati.append([float(line.split()[0]),float(line.split()[1]),float(line.split()[2])])
if ksolved=="yes":
ik = int(line.split()[0])
if ik > k: #if it is a different k block
k=int(line.split()[0])
oldmat=[] # temporal matrix for each k-point
if ik == 1:
mati.append([float(line.split()[1]),float(line.split()[2]),float(line.split()[3])]) # append: energy, ldosup, ldosdw
elif ik == k and k > 1:
oldmat.append([float(line.split()[1]),float(line.split()[2]),float(line.split()[3])])
elif len(line) < 5 and k > 1: #if blank line, sum k-frame to the total
for j in range(len(oldmat)):
mati[j]=[mati[j][0],mati[j][1]+oldmat[j][1],mati[j][2]+oldmat[j][2]]
if mat == []: # if it is the first dos file, copy total matrix (mat) = the first dos files's data
mat=mati[:]
else:
for j in range(len(mati)): # if it is not the first file, sum values
mat[j]=[mat[j][0],mat[j][1]+mati[j][1],mat[j][2]+mati[j][2]]
print "...ploting..."
if prt=="yes":
out=open(output_file_name,"w")
x,y1,y2=[],[],[]
for i in mat:
x.append(i[0]-fermi)
y1.append(i[1])
y2.append(-i[2])
if prt=="yes": # print to a file
print>>out, i[0]-fermi, i[1], i[2]
if prt=="yes":
out.close()
if graphic_plot=="yes":
# if there is matplotlib, generate a plot with it
if mplplot=="yes":
plot(x,y1,linewidth=1.0)
plot(x,y2,linewidth=1.0)
print min(y2),max(y1)
plt.title(graphtitle)
plt.xlabel('E (eV)')
plt.ylabel('States')
plt.grid(True)
plt.rcParams.update({'font.size': 22})
plt.fill(x,y1,color='0.8')
plt.fill(x,y2,color='0.9')
if min_x and max_x:
fromx,tox=min_x,max_x
plt.axis([fromx, tox, min(y2), max(y1)])
show()
elif mplplot=="no" and prog_gnuplot=="yes": # If no mathplotlib available, use gnuplot
os.system("echo \"plot '"+ output_file_name + "' using ($1-"+str(fermi)+"):2 w l, '' u ($1"+str(fermi)+"):3 w l\" | gnuplot -persist")
elif graphic_plot=="no": # If no X forwarding available, show graph in terminal
if prog_gnuplot=="yes":
os.system("echo \"set terminal dumb; plot '"+ output_file_name + "' using ($1-"+str(fermi)+"):2 w l, '' u ($1-"+str(fermi)+"):3 w l\" | gnuplot -persist")
| gpl-2.0 | 1,317,896,384,210,962,000 | 32.538776 | 159 | 0.638433 | false | 2.883158 | false | false | false |
StanfordBioinformatics/xppf | client/loomengine/auth.py | 2 | 2574 | #!/usr/bin/env python
import argparse
import os
from getpass import getpass
from loomengine import verify_has_connection_settings, \
verify_server_is_running, get_server_url, \
save_token, delete_token, get_token
from loomengine_utils.connection import Connection
from requests.exceptions import HTTPError
class AuthClient(object):
def __init__(self, args=None, silent=False):
# Parse arguments
if args is None:
args = _get_args()
verify_has_connection_settings()
server_url = get_server_url()
verify_server_is_running(url=server_url)
self.args = args
self.silent = silent
self._set_run_function()
self.connection = Connection(server_url, token=None)
def _print(self, text):
if not self.silent:
print text
def _set_run_function(self):
# Map user input command to method
commands = {
'login': self.login,
'logout': self.logout,
'print-token': self.print_token,
}
self.run = commands[self.args.command]
def login(self):
username = self.args.username
password = self.args.password
if password is None:
password = getpass("Password: ")
try:
token = self.connection.create_token(
username=username, password=password)
except HTTPError:
raise SystemExit("ERROR! Login failed")
save_token(token)
self._print("Login was successful. Token saved.")
def logout(self):
token = get_token()
if token is None:
self._print("No token found. You are logged out.")
else:
delete_token()
self._print("Token deleted.")
def print_token(self):
print get_token()
def get_parser(parser=None):
if parser is None:
parser = argparse.ArgumentParser(__file__)
subparsers = parser.add_subparsers(dest='command')
login_parser = subparsers.add_parser('login')
login_parser.add_argument('username', metavar='USERNAME')
login_parser.add_argument(
'--password', '-p', metavar='PASSWORD',
default=None,
help='Optional. Wait for the prompt to avoid displaying '
'password and writing it in your terminal history'
)
subparsers.add_parser('logout')
subparsers.add_parser('print-token')
return parser
def _get_args():
parser = get_parser()
args = parser.parse_args()
return args
if __name__ == '__main__':
AuthClient().run()
| agpl-3.0 | -1,499,549,989,239,108,000 | 26.677419 | 65 | 0.607615 | false | 4.15832 | false | false | false |
davesteele/pygsear-debian | pygsear/Widget.py | 1 | 43275 | # pygsear
# Copyright (C) 2003 Lee Harr
#
#
# This file is part of pygsear.
#
# pygsear is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# pygsear is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygsear; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Graphical input devices"""
import time, random, math, os, sys, types
import colorsys
from code import InteractiveConsole
from code import compile_command
import pygame
import pygame.draw
from pygame.locals import K_RETURN, K_ESCAPE, K_BACKSPACE, K_F1, K_UP, K_DOWN
from pygame.locals import K_PAGEUP, K_PAGEDOWN, K_LEFT, K_RIGHT, K_DELETE
from pygame.locals import QUIT, MOUSEBUTTONUP
import conf
import Drawable
from Drawable import Rectangle
import Path
import Event
import Util
from locals import TRANSPARENT, BLACK, WHITE, LGREEN, LGRAY, GRAY, BLUE, RED
class Widget:
def __init__(self, callback=None, group=()):
self.set_callback(callback)
self.events = Event.EventGroup()
def set_callback(self, callback):
if callback is None:
callback = self.nop
self.callback = callback
def nop(self, arg=None):
pass
def activate(self):
self.active = 1
def deactivate(self):
self.active = 0
def _stop(self, pygame_event=None):
self.stop = 1
def _quit(self, pygame_event=None):
ev = pygame.event.Event(QUIT)
pygame.event.post(ev)
self._stop()
def modal(self):
stop = Event.KEYUP_Event(key=K_ESCAPE, callback=self._stop)
while not self.stop:
self.events.check()
class Score(Widget, Drawable.Drawable):
"""Keep and display a score or value."""
def __init__(self,
w=None,
position=(100, 100),
text="Score:",
digits=6,
fontSize=40,
color=WHITE,
bgcolor=TRANSPARENT):
Drawable.Drawable.__init__(self, w)
self.score_position = position
self.text = text
self.digits = digits
self.color = color
self.bgcolor = bgcolor
self.font = pygame.font.Font(None, fontSize)
self.points = 0
self.updateScore()
self.set_position(position)
self.set_crect(self.rect)
def addPoints(self, n):
"""Add points to the score."""
self.points += n
def subtractPoints(self, n):
"""Subtract points from the score."""
self.points -= n
def set_points(self, p):
"""Set the score to a particular value."""
self.points = p
def updateScore(self):
"""Render the text for showing the score."""
if hasattr(self, 'image'):
self.uclear()
line = '%s %*d' % (self.text, self.digits, self.points)
self.image = self.font.render(line, 1, self.color, self.bgcolor)
self.rect = self.image.get_rect()
self.set_position(self.score_position)
if self.bgcolor == TRANSPARENT:
self.image.set_colorkey(TRANSPARENT)
class ProgressBar(Widget, Rectangle):
"""Percentage bar graph."""
def __init__(self,
w=None,
steps=100,
position=None,
color=BLACK,
width=None,
height=10,
fill=1,
border=0,
borderColor=WHITE):
if width is None:
width = conf.WINWIDTH-60
self.colorOriginal = color
self.set_color(color)
self.width = width
self.height = height
Rectangle.__init__(self, w, width, height, color=color)
self.image.set_colorkey(TRANSPARENT)
if position is None:
self.center(y=-30)
else:
self.set_position(position)
self.fill = fill
self.set_steps(steps)
self.set_crect(self.image.get_rect())
def set_steps(self, steps):
"""
"""
self.steps = steps
self.perStep = float(self.width)/steps
if self.fill:
self.stepsLeft = steps
else:
self.stepsLeft = 0
self.show()
def step(self):
"""
"""
if self.fill:
self.stepsLeft -= 1
if self.stepsLeft < 1:
self.stepsLeft = 0
else:
self.stepsLeft += 1
if self.stepsLeft > self.steps:
self.stepsLeft = self.steps
self.show()
def unstep(self):
"""
"""
if not self.fill:
self.stepsLeft -= 1
if self.stepsLeft < 1:
self.stepsLeft = 0
else:
self.stepsLeft += 1
if self.stepsLeft > self.steps:
self.stepsLeft = self.steps
self.show()
def reset(self):
self.stepsLeft = self.steps
self.set_color(self.colorOriginal)
self.show()
def set_color(self, color):
"""set the color of the bar"""
self.color = color
def show(self):
"""
"""
width = int(self.stepsLeft * self.perStep)
height = self.height
bar = pygame.Surface((width, height))
bar.fill(self.color)
self.image.fill(TRANSPARENT)
self.image.blit(bar, (0, 0))
class VProgressBar(ProgressBar):
def __init__(self,
w=None,
steps=100,
position=None,
color=BLACK,
width=10,
height=None,
fill=1):
if height is None:
height = conf.WINHEIGHT-60
self.colorOriginal = color
self.set_color(color)
self.width = width
self.height = height
Rectangle.__init__(self, w, width, height, color=color)
self.image.set_colorkey(TRANSPARENT)
if position is None:
self.center(x=30)
else:
self.set_position(position)
self.fill = fill
self.set_steps(steps)
self.set_crect(self.image.get_rect())
def set_steps(self, steps):
"""
"""
self.steps = steps
self.perStep = float(self.height)/steps
if self.fill:
self.stepsLeft = steps
else:
self.stepsLeft = 0
self.show()
def show(self):
"""
"""
width = self.width
height = int(self.stepsLeft * self.perStep)
bar = pygame.Surface((width, height))
bar.fill(self.color)
self.image.fill(TRANSPARENT)
self.image.blit(bar, (0, self.height-height))
class Button(Widget):
def __init__(self, callback=None, group=None):
Widget.__init__(self)
self.set_callback(callback)
#print 'offset', offset, callback
self.armed = 0
self.events.add(Event.MOUSEBUTTONDOWN_Event(callback=self.clicked))
self.events.add(Event.MOUSEBUTTONUP_Event(callback=self.released))
if group is not None:
group.add(self.events)
self.stop = 0
def arm(self):
self.armed = 1
def fire(self, pygameEvent):
self.armed = 0
self.callback(pygameEvent)
def clicked(self, pygameEvent):
pos = pygameEvent.pos
try:
offset = self.window.rect[0:2]
#print 'off', offset
except AttributeError:
offset = (0, 0)
if self.rect.move(offset[0], offset[1]).collidepoint(pos):
self.arm()
return 1
else:
return 0
def released(self, pygameEvent):
#print 'rel'
pos = pygameEvent.pos
try:
offset = self.window.rect[0:2]
#print 'off', offset
except AttributeError:
offset = (0, 0)
if self.rect.move(offset[0], offset[1]).collidepoint(pos) and self.armed:
self.fire(pygameEvent)
else:
self.armed = 0
class SpriteButton(Button, Drawable.Drawable):
"""Clickable button which is also a sprite."""
def __init__(self, sprite, callback=None, group=None):
"""Initialize the button.
@param sprite: Clickable sprite.
@param callback: Function to call when sprite is clicked.
@param group: Other C{EventGroup} to put this widget's events
in to also.
"""
pos = sprite.get_position()
Button.__init__(self, callback, group)
Drawable.Drawable.__init__(self, w=sprite.window)
self.image = sprite.image
self.rect = sprite.rect
self.set_path(sprite.path)
def modal(self):
quit = Event.QUIT_Event(callback=self._stop)
self.events.add(quit)
stop = Event.KEYUP_Event(key=K_ESCAPE, callback=self._stop)
self.events.add(stop)
while not self.stop:
self.clear()
self.events.check()
self.udraw()
quit.kill()
stop.kill()
self.uclear()
class ImageButton(SpriteButton):
filename = None
def __init__(self, filename=None, callback=None, group=None):
if filename is None:
filename = self.filename
sprite = Drawable.Image(filename=filename)
SpriteButton.__init__(self, sprite=sprite, callback=callback, group=group)
class StationaryButton(Drawable.Stationary, SpriteButton):
"""Clickable button which is a sprite but does not need to move."""
def __init__(self,
window=None,
sprite=None,
callback=None,
group=None):
pos = sprite.get_position()
Drawable.Stationary.__init__(self, w=window, sprite=sprite)
SpriteButton.__init__(self, sprite, callback, group)
self.image = sprite.image
self.rect = sprite.rect
self.set_position(pos)
class CloseButton(StationaryButton):
"""White square button with a black X."""
def __init__(self, window=None, callback=None, group=None):
b = Drawable.Square(w=window, side=15, color=WHITE)
w, h = b.image.get_size()
pygame.draw.line(b.image, BLACK, (0, 0), (w, h))
pygame.draw.line(b.image, BLACK, (w, 0), (0, h))
b.center(-5, 5)
#print 'bc', b.center, window.screen.get_size(), b.get_position()
StationaryButton.__init__(self, window, b, callback, group)
self.draw()
class TextButton:
"""Clickable button with text printed on it."""
def __init__(self,
window=None,
text='click',
length=None,
callback=None,
size=22,
color=WHITE,
border=2,
borderColor=LGREEN,
padding=5,
bgColor=BLACK):
self.window = window
#print window
self.text = text
self.length = length
self.size = size
self.color = color
self.border = border
self.borderColor = borderColor
self.padding = padding
self.bgColor = bgColor
self.makeButton()
def makeButton(self):
window = self.window
text = self.text
length = self.length
size = self.size
color = self.color
border = self.border
borderColor = self.borderColor
padding = self.padding
bgColor = self.bgColor
t = Drawable.String(message=text, fontSize=size, color=color,
bgcolor=bgColor)
# use inverse text at cursor position if cursor_pos is set
if hasattr(self, 'cursor_pos'):
c = self.cursor_pos
before = Drawable.String(message=text[:c], fontSize=size, color=color,
bgcolor=bgColor)
bw, bh = before.image.get_size()
cursor = Drawable.String(message=text[c:c+1], fontSize=size, color=bgColor,
bgcolor=color)
cw, ch = cursor.image.get_size()
t.image.blit(cursor.image, (bw, 0))
w, h = t.image.get_size()
if length is not None:
s = pygame.Surface((length, h))
s.fill(bgColor)
s.blit(t.image, (0, 0))
w = length
self.length = w
bw = w + 2*padding + 2*border
bh = h + 2*padding + 2*border
if border:
#print 'boxing', dir(window)
box = Drawable.Rectangle(w=window, width=bw, height=bh,
color=borderColor)
iw = w + 2*padding
ih = h + 2*padding
pygame.draw.rect(box.image, bgColor,
((border, border), (iw, ih)))
else:
#print 'boxing', dir(window)
box = Drawable.Rectangle(w=window, width=bw, height=bh, color=bgColor)
box.image.blit(t.image, (border+padding, border+padding))
if bgColor == TRANSPARENT:
box.image.set_colorkey(TRANSPARENT)
self.box = box
class SpriteTextButton(TextButton, SpriteButton):
"""Clickable button which is also a sprite with text printed on it."""
def __init__(self,
window=None,
text='',
length=None,
callback=None,
size=22,
color=WHITE,
border=2,
borderColor=LGREEN,
padding=5,
bgColor=BLACK,
group=None):
#print 'stb', window.offset
TextButton.__init__(self, window, text, length, callback, size,
color, border, borderColor, padding, bgColor)
SpriteButton.__init__(self, self.box, callback, group)
class StationaryTextButton(TextButton, StationaryButton):
"""Clickable button which is also a sprite with text printed on it
and does not need to move."""
def __init__(self,
window=None,
text="",
length=None,
callback=None,
size=22,
color=WHITE,
border=1,
borderColor=LGREEN,
padding=5,
bgColor=BLACK,
group=None):
TextButton.__init__(self, window, text, length, callback, size,
color, border, borderColor, padding, bgColor)
StationaryButton.__init__(self, sprite=self.box, callback=callback,
group=group)
class TextInput(SpriteTextButton):
"""Used to gather text input from the user."""
def __init__(self,
window=None,
text='',
prompt='',
maxLength=10,
length=150,
callback=None,
size=22,
color=WHITE,
border=1,
borderColor=LGREEN,
padding=5,
bgColor=BLACK,
inactiveColor=LGRAY,
inactiveBgColor=GRAY,
group=None):
"""
Initialize the TextInput widget.
@param window: Layer on which sprite lives.
@param text: Initial text in the window.
@param maxLength: Maximum number of characters in input.
@param length: Width of the text window in pixels.
@param callback: Function to call when RETURN is pressed.
@param size: Font size.
@param color: Text color.
@param border: Thickness of text window border (0 for no border)
@param borderColor: Color of window border (if any)
@param padding: Space between text and edge of window or border.
@param bgColor: Background color of text window.
@param inactiveColor: Text color when widget is inactive.
@param inactiveBgColor: Background color when widget is inactive.
@param group: Additional group/ groups that should watch for
this widget's events.
"""
self.maxLength = maxLength
self.text = text
self.prompt = prompt
self.text_content = text
t = prompt + text + " " * (maxLength - len(text))
self.active = 0
SpriteTextButton.__init__(self, window, t, length, callback, size, color,
border, borderColor, padding, bgColor, group)
repeater = Event.Repeat_KEY_Event(on_hold=self.addLetter, group=group)
self.events.add(repeater)
self.events.add(repeater.contains.events())
self.repeater = repeater
self.events.add(Event.KEYUP_Event(key=K_RETURN, callback=self.done, group=group))
self.activeColor = color
self.activeBgColor = bgColor
self.inactiveColor = inactiveColor
self.inactiveBgColor = inactiveBgColor
def done(self, pygame_event=None):
"""return the text_content.
If this is triggered from one of the widget's own events (ie K_RETURN),
it only returns the contents if the widget is active. Otherwise, if it
was called from outside (pygame_event is None) it returns the content
no matter what it's state was (active or inactive). This allows another
button to call in to the TextInput and force it to trigger its callback.
@param pygame_event: C{pygame.Event} triggering the call. If this is
None, C{done()} must have been called from outside the widget, and
so it should just go ahead and callback with its text.
"""
#print 'done', pygame_event, self.active
if self.active or pygame_event is None:
text = self.text_content
self.callback(text)
self.text = ""
self.text_content = ""
if hasattr(self, 'cursor_pos'):
del(self.cursor_pos)
self.updateButton()
else:
return
def addLetter(self, pygameEvent):
"""Process the next keypress.
@param pygameEvent: L{pygame.event.Event}. Usually passed in from
the pygsear Event handler.
"""
#print 'adding letter'
if not self.active:
return
k = pygameEvent.key
text = self.text_content
new_text = text
if k in (K_RETURN, K_ESCAPE):
return
elif k == K_LEFT:
self.cursor_left()
return
elif k == K_RIGHT:
self.cursor_right()
return
letter = pygameEvent.unicode
if letter:
if hasattr(self, 'cursor_pos'):
c = self.cursor_pos
t = list(text)
if k == K_BACKSPACE:
t.pop(c-1)
self.cursor_pos -= 1
elif k == K_DELETE:
t.pop(c)
if self.cursor_pos > len(t) - 1:
del(self.cursor_pos)
elif len(t) >= self.maxLength:
Util.beep()
#return
else:
t.insert(c, letter)
self.cursor_pos += 1
new_text = ''.join(t)
else:
if k == K_BACKSPACE:
if text:
new_text = text[0:-1]
else:
Util.beep()
elif k == K_DELETE:
Util.beep()
elif len(text) >= self.maxLength:
Util.beep()
#return
else:
new_text = text + letter
if new_text != text:
self.set_text(new_text)
def set_text(self, text):
"""Save a copy of the content of the text field and update.
Since the actual field is padded with spaces when it is rendered,
it is necessary to save a copy of the actual contents before going
to render.
"""
#self.text = text
self.text_content = text
self.updateButton()
def cursor_left(self):
text = self.text_content
if text:
if not hasattr(self, 'cursor_pos'):
pos = len(text) - 1
else:
pos = self.cursor_pos - 1
if pos < 0:
pos = 0
Util.beep()
self.cursor_pos = pos
self.updateButton()
else:
pass
Util.beep()
def cursor_right(self):
if not hasattr(self, 'cursor_pos'):
Util.beep()
else:
pos = self.cursor_pos + 1
if pos == len(self.text_content):
del(self.cursor_pos)
else:
self.cursor_pos = pos
self.updateButton()
def updateButton(self):
pos = self.get_position()
text = self.text_content
self.text = self.prompt + text + " " * (self.maxLength - len(text))
self.makeButton()
self.image = self.box.image
self.set_position(pos)
self.udraw()
def makeButton(self):
if self.prompt:
if hasattr(self, 'cursor_pos'):
promptlen = len(self.prompt)
self.cursor_pos += promptlen
TextButton.makeButton(self)
self.cursor_pos -= promptlen
else:
TextButton.makeButton(self)
else:
TextButton.makeButton(self)
def activate(self):
Widget.activate(self)
self.color = self.activeColor
self.bgColor = self.activeBgColor
self.updateButton()
def deactivate(self):
Widget.deactivate(self)
self.color = self.inactiveColor
self.bgColor = self.inactiveBgColor
self.updateButton()
def fire(self, pygameEvent):
self.armed = 0
self.callback(pygameEvent)
def clicked(self, pygameEvent):
pos = pygameEvent.pos
try:
offset = self.window.rect[0:2]
except AttributeError:
offset = (0, 0)
if self.rect.move(offset[0], offset[1]).collidepoint(pos):
self.activate()
return 1
else:
self.deactivate()
return 0
def released(self, pygameEvent):
pass
def modal(self):
quit = Event.QUIT_Event(callback=self._stop)
self.events.add(quit)
stop = Event.KEYUP_Event(key=K_ESCAPE, callback=self._stop)
self.events.add(stop)
self.stop = 0
self.activate()
while not self.stop and self.active:
try:
conf.ticks = min(20, conf.game.clock.tick(conf.MAX_FPS))
except AttributeError:
conf.ticks = 20
self.clear()
self.events.check()
self.udraw()
if not self.line.repeater.key_held and not self.stop:
ev = pygame.event.wait()
pygame.event.post(ev)
self.deactivate()
quit.kill()
stop.kill()
self.uclear()
class Dialog(Drawable.Layer, Widget):
def __init__(self, window=None, size=None, callback=None):
Widget.__init__(self, callback)
if size is None:
w, h = conf.WINSIZE
w = int(0.5 * w)
h = int(0.3 * h)
else:
w, h = size
Drawable.Layer.__init__(self, w=window, size=(w, h))
self.center()
self.events.add(Event.KEYUP_Event(key=K_ESCAPE, callback=self.cancel))
self.set_background(color=BLACK)
self.border(width=3, color=RED)
def cancel(self, pygame_event=None):
self.teardown()
def teardown(self):
self._stop()
self.uclear()
self.kill()
self.events.kill()
def modal(self):
quit_ev = Event.QUIT_Event(callback=self._quit)
self.events.add(quit_ev)
stop_ev = Event.KEYUP_Event(key=K_ESCAPE, callback=self._stop)
self.events.add(stop_ev)
self.stop = 0
while not self.stop:
self.clear()
self.events.check()
self.udraw()
quit_ev.kill()
stop_ev.kill()
self.uclear()
class Dialog_OK(Dialog):
"""Pop up a window to get some input."""
message = None
def __init__(self,
window=None,
size=None,
message=None,
centertext=1,
callback=None):
"""Initialize dialog
@param window: Layer in which to draw the dialog box.
@param size: Tuple of C{(width, height)} for dialog box.
@param message: String message to be displayed. Text will be wrapped
automatically to fit inside the box, but an error will be raised
if the text will not fit.
@param centertext: Center justify the message by default.
@param callback: Function to call when the OK button is clicked
or the enter key is pressed.
"""
Dialog.__init__(self, window, size, callback)
if message is None:
if self.message is None:
message = 'OK ?'
else:
message = self.message
self.events.add(Event.KEYUP_Event(key=K_RETURN, callback=self.ok))
w, h = self.get_size()
rect_w = int(0.9 * w)
rect_h = int(h - 70)
rect = pygame.Rect(0, 0, rect_w, rect_h)
textrect = Util.render_textrect(message, rect, fontSize=24, justification=centertext)
s = Drawable.Image(w=self, image=textrect)
s.center(y=15)
s = Drawable.Stationary(w=self, sprite=s)
s.draw()
ok = SpriteTextButton(self, ' OK ', callback=self.ok,
group=self.events)
ok.center(y=-30)
ok = Drawable.Stationary(w=self, sprite=ok)
ok.draw()
self.return_ok = None
def ok(self, pygame_event=None):
self.teardown()
self.callback(pygame_event)
self.return_ok = 1
def modal(self):
Dialog.modal(self)
if self.return_ok == 1:
return 1
class Dialog_LineInput(Dialog_OK):
"""Used to get a single line of input"""
def __init__(self, window=None, size=None, message='', default='', callback=None, group=None):
"""Initialize the line input dialog.
@param window: Layer in which to draw the dialog box.
@param message: Text message to print above the input box.
@param callback: Function to call when input is finished. returns
the input text to the callback function.
@param group: Other event group in which to include this widget's
events, in addition to its own event group.
"""
Dialog_OK.__init__(self, window, size, message, callback=callback)
w, h = self.get_size()
self.line = TextInput(self, callback=self.finished, text=default, maxLength=50, length=w-50, group=self.events)
self.events.add(self.line.events.events())
self.line.center()
self.line.activate()
self.has_finished = 0
if group is not None:
group.add(self.events.events())
group.add(self.line.events.events())
self.return_text = ''
def ok(self, pygame_event):
"""Called when OK button is clicked. Also called when widget is active
and ENTER key is pressed.
@param pygame_event: The pygame event that triggered the callback.
"""
if pygame_event.type == MOUSEBUTTONUP or self.line.active:
self.line.done()
if self.has_finished:
self.teardown()
def finished(self, text):
"""Default callback when text input is complete."""
if not self.has_finished:
self.return_text = text
self.callback(text)
self.line.deactivate()
self.has_finished = 1
def modal(self):
quit_ev = Event.QUIT_Event(callback=self._quit)
self.events.add(quit_ev)
stop_ev = Event.KEYUP_Event(key=K_ESCAPE, callback=self._stop)
self.events.add(stop_ev)
self.line.activate()
self.stop = 0
while not self.stop:
try:
conf.ticks = min(20, conf.game.clock.tick(conf.MAX_FPS))
except AttributeError:
conf.ticks = 20
self.clear()
self.events.check()
self.line.udraw()
self.udraw()
if not self.line.repeater.key_held and not self.stop:
ev = pygame.event.wait()
pygame.event.post(ev)
quit_ev.kill()
stop_ev.kill()
self.uclear()
if self.return_text:
return self.return_text
class Dialog_ColorSelector(Dialog_OK):
"""Used to choose a color interactively"""
def __init__(self, window=None):
"""Initialize the color selector"""
Dialog_OK.__init__(self, window=window, size=(400, 380))
self.color_square = Drawable.Square(w=self, side=256)
self.color_square.set_position((10, 10))
self.color_square_array = pygame.surfarray.array2d(self.color_square.image)
#self.R = 0
self.hue = 0
self.set_color_square()
self.color_rect = Drawable.Rectangle(w=self, width=20, height=360)
self.color_rect.set_position((370, 10))
self.set_color_rect()
self.show_square = Drawable.Square(w=self, side=50)
self.show_square.set_position((300, 10))
self.color_chosen = WHITE
self.set_color_chosen(self.color_chosen)
self.mousebuttonup(None)
def set_color_square(self):
"""Paint a square with possible colors.
This uses the ColorSelector C{hue} property for the hue of the
color, then ranges over all possible saturations and values to make
a square.
This is way too slow.
"""
image = self.color_square.image
h = self.hue
r, g, b = colorsys.hsv_to_rgb(h, 1, 1)
rmax = r * 255
gmax = g * 255
bmax = b * 255
dr = (255 - rmax) / 255.0
dg = (255 - gmax) / 255.0
db = (255 - bmax) / 255.0
for y in range(256):
r = g = b = 0
xdr = rmax / 255.0
xdg = gmax / 255.0
xdb = bmax / 255.0
for x in range(256):
image.set_at((x, y), (r, g, b))
r += xdr
g += xdg
b += xdb
rmax += dr
gmax += dg
bmax += db
self.color_square.udraw()
# image = self.color_square.image
#
# h = self.hue
# r, g, b = colorsys.hsv_to_rgb(h, 1, 1)
#
# x,y = N.indices((256,256), N.Float)
# y /= 256.0
# row_mul = 1-y
#
# y *= x
# x *= row_mul
#
# rgb = N.zeros((256,256,3), N.Float)
# rgb[...,0] = x * r + y
# rgb[...,1] = x * g + y
# rgb[...,2] = x * b + y
#
# a = pygame.surfarray.pixels3d(image)
# a[...] = rgb.astype(N.UnsignedInt8)
#
# self.color_square.udraw()
def set_color_rect(self):
"""Set up the chooser for the red value of the color."""
image = self.color_rect.image
# for R in range(256):
# pygame.draw.line(image, (R, 0, 0), (0, R), (19, R))
for hue in range(360):
h = hue / 360.0
s = v = 1.0
r, g, b = colorsys.hsv_to_rgb(h, s, v)
R, G, B = 255 * r, 255* g, 255 * b
pygame.draw.line(image, (R, G, B), (0, hue), (19, hue))
self.color_rect.udraw()
def set_color_chosen(self, color):
"""Set the chosen color, and update the display of the chosen color."""
self.color_chosen = color
self.show_square.set_color(color)
self.show_square.udraw()
def mousebuttondown(self, ev):
"""Set a flag indicating the mouse button is held down."""
self.button_pressed = 1
def mousebuttonup(self, ev):
"""Reset the mouse button held down flag."""
self.button_pressed = 0
def mousebutton_action(self):
"""Actions to perform any time the mouse button is held down.
Checks to see if the mouse is inside either of the C{color_square}
or the C{color_rect} and either sets the chosen color, or sets the
red value for possible colors and updates the C{color_square}.
"""
try:
offset = self.rect[0:2]
except AttributeError:
offset = (0, 0)
lx, ly = offset
x, y = pygame.mouse.get_pos()
pos = x-lx, y-ly
if self.color_square.rect.collidepoint(pos):
try:
pos = x-lx-10, y-ly-10
color = self.color_square.image.get_at(pos)
except IndexError:
pass
else:
self.set_color_chosen(color[0:3])
elif self.color_rect.rect.collidepoint(pos):
try:
pos = x-lx-370, y-ly-10
color = self.color_rect.image.get_at(pos)
except IndexError:
pass
else:
R, G, B = color[0:3]
r, g, b = R / 256.0, G / 256.0, B / 256.0
h, s, v = colorsys.rgb_to_hsv(r, g, b)
self.hue = h
self.set_color_square()
def modal(self):
quit_ev = Event.QUIT_Event(callback=self._quit)
self.events.add(quit_ev)
stop_ev = Event.KEYUP_Event(key=K_ESCAPE, callback=self._stop)
self.events.add(stop_ev)
down = Event.MOUSEBUTTONDOWN_Event(callback=self.mousebuttondown)
self.events.add(down)
up = Event.MOUSEBUTTONUP_Event(callback=self.mousebuttonup)
self.events.add(up)
self.stop = 0
while not self.stop:
self.clear()
self.events.check()
if self.button_pressed:
self.mousebutton_action()
self.udraw()
quit_ev.kill()
stop_ev.kill()
self.uclear()
if self.return_ok:
return self.color_chosen
class Console(Widget):
def __init__(self, locals={}, size=(600, 200)):
self.locals = locals
Widget.__init__(self)
self.size = size
self.history = []
self.history_curr_index = 0
self.buffer = []
self.paged_up = 0
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.make_widget()
def make_widget(self):
self.events.kill()
size = self.size
w, h = size
chars = int(w / 8.0)
self.layer = Drawable.Layer(size=size, color=BLACK)
self.layer.center(x=10, y=-10)
self.terp = InteractiveConsole(self.locals)
self.line = TextInput(self.layer, callback=self.run_command, text='', prompt='>>> ',
maxLength=chars, length=w, border=0, group=self.events)
self.events.add(self.line.events.events())
self.line.center(x=10, y=-10)
self.events.add(self.line.events)
#self.events.add(Event.KEYUP_Event(key=K_F1, callback=self.toggle_visible))
self.events.add(Event.KEYUP_Event(key=K_UP, callback=self.history_prev))
self.events.add(Event.KEYUP_Event(key=K_DOWN, callback=self.history_next))
self.events.add(Event.KEYUP_Event(key=K_PAGEUP, callback=self.handle_pageup))
self.events.add(Event.KEYUP_Event(key=K_PAGEDOWN, callback=self.handle_pagedown))
self.lines_width = int(0.95 * w)
self.lines_height = 5000
self.lines_per_screen = int(0.8 * (h-45))
self.lines = Drawable.Layer(w=self.layer, size=(self.lines_width, self.lines_height), color=BLACK)
self.lines.center(x=10, y=15)
self.lines_position = h - 52
def resize(self, size):
self.size = size
self.make_widget()
self.layer.udraw()
def activate(self):
Widget.activate(self)
self.line.activate()
self.layer.udraw()
def deactivate(self):
Widget.deactivate(self)
self.line.deactivate()
self.layer.uclear()
def new_line(self, text, prompt=''):
save_text = prompt + text
s = Drawable.String(w=self.lines, message=save_text, fontSize=22)
w, h = s.get_size()
# deal with output longer than one line
if w > self.lines_width:
try:
t = Util.render_textrect(save_text, pygame.Rect(0, 0, self.lines_width, 1500), fontSize=22, trim=1)
s = Drawable.Image(image=t)
except Exception, e:
s = Drawable.String(w=self.lines, message='Output too long for this window...', fontSize=22)
w, h = s.get_size()
s.set_position((5, self.lines_position))
self.lines_position += h
if self.lines_position > self.lines_height - 50:
# starting to run out of room in the lines surface...
# i am not sure how large to make this or if i should
# bother trying to extend it if it starts to get full.
Util.beep()
s = Drawable.Stationary(w=self.lines, sprite=s)
s.draw()
self.lines.clear()
self.lines.nudge(dy=-h)
self.lines.udraw()
def write(self, text):
self.handle_print(text)
def handle_print(self, text):
text = text.strip()
lines = str(text).split('\n')
for line in lines:
self.new_line(line, prompt='')
def handle_pageup(self, pygame_event=None):
self.paged_up += self.lines_per_screen
self.lines.clear()
self.lines.nudge(dy=self.lines_per_screen)
self.lines.udraw()
def handle_pagedown(self, pygame_event=None):
self.paged_up -= self.lines_per_screen
if self.paged_up >= 0:
self.lines.clear()
self.lines.nudge(dy=-self.lines_per_screen)
self.lines.udraw()
else:
self.paged_up = 0
Util.beep()
if self.paged_up == 0:
self.line.udraw()
def handle_exception(self, e):
for line in str(e).split('\n'):
self.new_line(line, prompt='')
self.line.prompt = '>>> '
def run_command(self, text):
"""Process the next line of input.
This is called when the user presses ENTER in the console widget.
If this new text completes a command, the command is executed,
otherwise this text is added to a buffer awaiting the next line.
@param text: The next line of input. Does not include the newline.
"""
if text:
self.history.append(text)
self.history_curr_index = len(self.history)
self.new_line(text, self.line.prompt)
self.buffer.append(text)
command = '\n'.join(self.buffer)
sys.stdout = self
sys.stderr = self
code = None
try:
code = compile_command(command)
except Exception, e:
self.handle_exception(e)
self.buffer = []
else:
if code is not None:
self.deactivate()
try:
self.terp.runcode(code)
except SyntaxError, e:
self.handle_exception(e)
except Exception, e:
self.handle_exception(e)
self.buffer = []
self.line.prompt = '>>> '
self.activate()
else:
self.line.prompt = '... '
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
def toggle_visible(self, pygame_event):
if self.active:
self.deactivate()
else:
self.activate()
def history_prev(self, pygame_event):
if self.history_curr_index == len(self.history):
self.partial_line = self.line.text_content
self.history_curr_index -= 1
if self.history_curr_index < 0:
Util.beep()
self.history_curr_index = 0
if self.history:
text = self.history[self.history_curr_index]
self.line.set_text(text)
def history_next(self, pygame_event):
self.history_curr_index += 1
if self.history_curr_index > len(self.history)-1:
if self.line.text_content == self.partial_line:
Util.beep()
self.history_curr_index = len(self.history)
self.line.set_text(self.partial_line)
elif self.history:
text = self.history[self.history_curr_index]
self.line.set_text(text)
else:
Util.beep()
def set_modal_events(self):
self.quit_ev = Event.QUIT_Event(callback=self._quit)
self.events.add(self.quit_ev)
self.stop_ev = Event.KEYUP_Event(key=K_ESCAPE, callback=self._stop)
self.events.add(self.stop_ev)
def modal(self):
self.activate()
self.set_modal_events()
self.new_line('Press ESC to close console')
self.line.udraw()
self.stop = 0
while not self.stop:
try:
conf.ticks = min(20, conf.game.clock.tick(conf.MAX_FPS))
except AttributeError:
conf.ticks = 20
self.layer.clear()
self.events.check()
if self.active:
self.line.udraw()
self.layer.udraw()
if not self.line.repeater.key_held and not self.stop:
ev = pygame.event.wait()
pygame.event.post(ev)
self.quit_ev.kill()
self.stop_ev.kill()
self.layer.uclear()
self.deactivate()
class EscCatcher(Widget):
def __init__(self, callback=None):
if callback is None:
callback = self._quit
self.set_callback(callback)
Event.KEYDOWN_Event(self.escape)
def escape(self, pygameEvent):
key = pygameEvent.key
if key == K_ESCAPE:
self.callback(self)
def _quit(self, pygame_event=None):
import sys
sys.exit()
| gpl-2.0 | 7,833,039,730,303,193,000 | 29.604668 | 119 | 0.534674 | false | 3.960373 | false | false | false |
openstack/vitrage | tools/datasource-scaffold/sample/transformer.py | 1 | 2558 | # Copyright 2018 - Vitrage team
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from vitrage.common.constants import EntityCategory
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.resource_transformer_base import \
ResourceTransformerBase
from vitrage.datasources.sample import SAMPLE_DATASOURCE
from vitrage.datasources.sample import SampleFields
from vitrage.datasources import transformer_base
import vitrage.graph.utils as graph_utils
LOG = logging.getLogger(__name__)
class SampleTransformer(ResourceTransformerBase):
def __init__(self, transformers):
super(SampleTransformer, self).__init__(transformers)
def _create_snapshot_entity_vertex(self, entity_event):
return self._create_vertex(entity_event)
def _create_update_entity_vertex(self, entity_event):
return self._create_vertex(entity_event)
def _create_snapshot_neighbors(self, entity_event):
return self._create_sample_neighbors(entity_event)
def _create_update_neighbors(self, entity_event):
return self._create_sample_neighbors(entity_event)
def _create_entity_key(self, entity_event):
"""the unique key of this entity"""
entity_id = entity_event[VProps.ID]
entity_type = entity_event[SampleFields.TYPE]
key_fields = self._key_values(entity_type, entity_id)
return transformer_base.build_key(key_fields)
@staticmethod
def get_vitrage_type():
return SAMPLE_DATASOURCE
def _create_vertex(self, entity_event):
return graph_utils.create_vertex(
self._create_entity_key(entity_event),
vitrage_category=EntityCategory.RESOURCE,
vitrage_type=None, # FIXME
vitrage_sample_timestamp=None, # FIXME
entity_id=None, # FIXME
update_timestamp=None, # FIXME
entity_state=None, # FIXME
metadata=None) # FIXME
def _create_sample_neighbors(self, entity_event):
return []
| apache-2.0 | -8,240,631,817,514,356,000 | 36.072464 | 75 | 0.710321 | false | 3.953632 | false | false | false |
belokop/indico_bare | indico/modules/events/requests/base.py | 2 | 7396 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from flask import render_template
from flask_pluginengine import plugin_context
from wtforms.fields import TextAreaField, SubmitField
from indico.core.db import db
from indico.modules.events.requests.models.requests import RequestState
from indico.modules.events.requests.notifications import (notify_new_modified_request, notify_withdrawn_request,
notify_accepted_request, notify_rejected_request)
from indico.util.date_time import now_utc
from indico.util.i18n import _
from indico.web.flask.templating import get_overridable_template_name, get_template_module
from indico.web.forms.base import FormDefaults, IndicoForm
class RequestFormBase(IndicoForm):
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.request = kwargs.pop('request')
super(RequestFormBase, self).__init__(*args, **kwargs)
class RequestManagerForm(IndicoForm):
action_buttons = {'action_save', 'action_accept', 'action_reject'}
comment = TextAreaField(_('Comment'),
description=_('The comment will be shown only if the request is accepted or rejected.'))
action_save = SubmitField(_('Save'))
action_accept = SubmitField(_('Accept'))
action_reject = SubmitField(_('Reject'))
class RequestDefinitionBase(object):
"""Defines a service request which can be sent by event managers."""
#: the plugin containing this request definition - assigned automatically
plugin = None
#: the unique internal name of the request type
name = None
#: the title of the request type as shown to users
title = None
#: the :class:`IndicoForm` to use for the request form
form = None
#: the :class:`IndicoForm` to use for the request manager form
manager_form = RequestManagerForm
#: default values to use if there's no existing request
form_defaults = {}
@classmethod
def render_form(cls, **kwargs):
"""Renders the request form
:param kwargs: arguments passed to the template
"""
tpl = get_overridable_template_name('event_request_details.html', cls.plugin, 'events/requests/')
return render_template(tpl, **kwargs)
@classmethod
def create_form(cls, event, existing_request=None):
"""Creates the request form
:param event: the event the request is for
:param existing_request: the :class:`Request` if there's an existing request of this type
:return: an instance of an :class:`IndicoForm` subclass
"""
defaults = FormDefaults(existing_request.data if existing_request else cls.form_defaults)
with plugin_context(cls.plugin):
return cls.form(prefix='request-', obj=defaults, event=event, request=existing_request)
@classmethod
def create_manager_form(cls, req):
"""Creates the request management form
:param req: the :class:`Request` of the request
:return: an instance of an :class:`IndicoForm` subclass
"""
defaults = FormDefaults(req, **req.data)
with plugin_context(cls.plugin):
return cls.manager_form(prefix='request-manage-', obj=defaults)
@classmethod
def get_notification_template(cls, name, **context):
"""Gets the template module for a notification email
:param name: the template name
:param context: data passed to the template
"""
tpl = get_overridable_template_name(name, cls.plugin, 'events/requests/emails/', 'emails/')
return get_template_module(tpl, **context)
@classmethod
def can_be_managed(cls, user):
"""Checks whether the user is allowed to manage this request type
:param user: a :class:`.User`
"""
raise NotImplementedError
@classmethod
def get_manager_notification_emails(cls):
"""Returns the email addresses of users who manage requests of this type
The email addresses are used only for notifications.
It usually makes sense to return the email addresses of the users who
pass the :method:`can_be_managed` check.
:return: set of email addresses
"""
return set()
@classmethod
def send(cls, req, data):
"""Sends a new/modified request
:param req: the :class:`Request` of the request
:param data: the form data from the request form
"""
req.data = dict(req.data or {}, **data)
is_new = req.id is None
if is_new:
db.session.add(req)
db.session.flush() # we need the creation dt for the notification
notify_new_modified_request(req, is_new)
@classmethod
def withdraw(cls, req, notify_event_managers=True):
"""Withdraws the request
:param req: the :class:`Request` of the request
:param notify_event_managers: if event managers should be notified
"""
req.state = RequestState.withdrawn
notify_withdrawn_request(req, notify_event_managers)
@classmethod
def accept(cls, req, data, user):
"""Accepts the request
To ensure that additional data is saved, this method should
call :method:`manager_save`.
:param req: the :class:`Request` of the request
:param data: the form data from the management form
:param user: the user processing the request
"""
cls.manager_save(req, data)
req.state = RequestState.accepted
req.processed_by_user = user
req.processed_dt = now_utc()
notify_accepted_request(req)
@classmethod
def reject(cls, req, data, user):
"""Rejects the request
To ensure that additional data is saved, this method should
call :method:`manager_save`.
:param req: the :class:`Request` of the request
:param data: the form data from the management form
:param user: the user processing the request
"""
cls.manager_save(req, data)
req.state = RequestState.rejected
req.processed_by_user = user
req.processed_dt = now_utc()
notify_rejected_request(req)
@classmethod
def manager_save(cls, req, data):
"""Saves management-specific data
This method is called when the management form is submitted without
accepting/rejecting the request (which is guaranteed to be already
accepted or rejected).
:param req: the :class:`Request` of the request
:param data: the form data from the management form
"""
req.comment = data['comment']
| gpl-3.0 | 1,224,264,384,494,363,000 | 36.353535 | 116 | 0.663467 | false | 4.302501 | false | false | false |
kusha/dialog | dialog/returns.py | 1 | 1993 | #!/usr/bin/env python3
"""
Return answers implementation.
"""
__author__ = "Mark Birger"
__date__ = "20 Jan 2015"
import multiprocessing
class Returns:
"""
Class manages routines.
"""
def __init__(self):
self.routines = []
self.processes = {}
def new_return(self, answers):
"""
Creates Queue, adds it to the pool.
"""
routine = {}
routine["answers"] = answers
routine["queue"] = multiprocessing.Queue(maxsize=0)
self.routines.append(routine)
return routine["queue"]
def get_returns(self):
"""
For each routine, get indexes of returns.
Reuturns every "return" statements.
"""
# print("GETTING RETURNS", self.routines)
answers = []
for routine in self.routines:
while not routine["queue"].empty():
answer_idx = routine["queue"].get()
answers.append(routine["answers"][answer_idx])
to_delete = []
for name, routine in self.processes.items():
while not routine["responses_queue"].empty():
response = routine["responses_queue"].get()
for idx, case in enumerate(routine["cases"][0]):
if case == response:
answers.append(routine["cases"][1][idx])
if not routine["process"].is_alive():
# TODO: check how it is safety from his child states
to_delete.append(name)
for each in to_delete:
del self.processes[each]
return answers
def new_routine(self, process, name, requests_queue, responses):
"""
Adds new routine to the list.
"""
self.processes[name] = {
"process": process,
"requests_queue": requests_queue, #TODO: remove, unused, realised with Scope module
"cases": responses[0],
"responses_queue": responses[1],
}
| mit | -4,341,747,657,530,639,400 | 30.634921 | 95 | 0.539388 | false | 4.419069 | false | false | false |
INTI-CMNB-FPGA/fpga_helpers | fpgahelpers/fpga_prog.py | 1 | 2865 | #!/usr/bin/python
#
# FPGA Prog, transfers a BitStream to a device
# Copyright (C) 2015-2019 INTI
# Copyright (C) 2015-2019 Rodrigo A. Melo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import database as db
import common
def main():
options = common.get_options(__file__)
# Processing the options
if not os.path.exists(options.bit) and options.device not in ['detect','unlock'] and options.tool not in ['libero']:
sys.exit('fpga_prog (ERROR): bitstream needed but not found.')
if options.board and options.board not in db._boards:
sys.exit("fpga_prog (ERROR): unsupported board")
if options.board is not None and options.device not in ['detect','unlock']:
if options.device + '_name' not in db._boards[options.board]:
sys.exit(
"fpga_prog (ERROR): the device <%s> is not supported in the board <%s>." %
(options.device, options.board)
)
else:
options.position = db._boards[options.board]['fpga_pos']
if options.device != 'fpga':
options.memname = db._boards[options.board][options.device + '_name']
options.width = db._boards[options.board][options.device + '_width']
if not options.debug:
# Preparing files
temp = None;
if not os.path.exists('options.tcl'):
temp = open('options.tcl','w')
if 'memname' in options:
for dev in ['fpga', 'spi', 'bpi', 'xcf']:
temp.write("set %s_name %s\n" % (dev, options.memname))
if 'position' in options:
for dev in ['fpga', 'xcf']:
temp.write("set %s_pos %s\n" % (dev, options.position))
if 'width' in options:
for dev in ['spi', 'bpi', 'xcf']:
temp.write("set %s_width %s\n" % (dev, options.width))
temp.flush()
# Executing
text = common.get_makefile_content(
tool=options.tool, task=None, dev=options.device,
path=(common.get_script_path(__file__) + "/tcl")
)
common.execute_make(__file__, text)
if temp is not None:
temp.close()
os.remove('options.tcl')
else:
print(options)
if __name__ == "__main__":
main()
| gpl-3.0 | -7,260,991,947,493,755,000 | 37.2 | 120 | 0.611518 | false | 3.725618 | false | false | false |
bobcolner/pgrap | pgrap/pgrap.py | 1 | 2914 | import psycopg2
import os
_logger = logging.getLogger(__name__)
def query(conn, sql, results='namedtuple'):
"Issue SQL query that returns a result set."
return execute(conn, sql, results=results)
def execute(conn, sql, data=None, results=False):
"Issue a general SQL statment. Optinally specify results cursor type."
with conn:
if results:
from psycopg2 import extras
if results == 'pgdict':
cur_type = psycopg2.extras.DictCursor
elif results == 'dict':
cur_type = psycopg2.extras.RealDictCursor
elif results == 'logging':
cur_type = psycopg2.extras.LoggingCursor
elif results == 'namedtuple':
cur_type = psycopg2.extras.NamedTupleCursor
with conn.cursor(cursor_factory=cur_type) as cursor:
cursor.execute(query=sql, vars=data)
_logger.info('fetching results: {0}'.format(sql))
return cursor.fetchall()
else:
with conn.cursor() as cursor:
cursor.execute(query=sql, vars=data)
_logger.info('executing statment: {0}'.format(sql))
def exec_psql(conn, sql_path, results=True, **kwargs):
"Execute a parameterized .psql file"
with open(sql_path, 'r') as sql_file:
sql_template = sql_file.read()
sql = sql_template.format(**kwargs)
_logger.info('executing psql file: {0}'.format(sql_path))
execute(conn, sql, results=results)
def multi_insert(conn, data, table, column_list, schema='public', submit=True):
"Issue a multi-row insert"
# http://stackoverflow.com/questions/8134602/psycopg2-insert-multiple-rows-with-one-query
values = ",".join(["%s"] * len(data[0]))
sql = '''insert into {schema}.{table} ({columns}) values ({values})
'''.format(table=table, schema=schema, columns=column_list, values=values)
execute(conn, sql, data=data, submit=submit)
def copy_from(conn, file_obj, table, columns, sep="\t"):
"Stream file_obj into table"
with conn:
with conn.cursor() as cursor:
cursor.copy_from(file=file_obj, table=table, columns=columns, sep=sep)
_logger.info('psql copy to table: {0}'.format(table))
def drop_table(conn, table, schema='public'):
"Issue 'drop table if exists' statment."
sql = "drop table if exists {schema}.{table};".format(schema=schema, table=table)
execute(conn, sql)
_logger.info('dropped table: {0}'.format(table))
def drop_schema(conn, schema):
"Issue 'drop schema if exists .. cascade' statment."
sql = "drop schema if exists {schema} cascade;".format(schema=schema)
execute(conn, sql)
_logger.info('dropped schema: {0}'.format(schema))
def vacuum(conn, table, schema='public'):
"Vacume & analyze table"
execute(conn, sql="vacuum analyze {schema}.{table};".format(schema=schema, table=table))
| mit | 3,699,232,167,538,300,400 | 41.852941 | 93 | 0.635553 | false | 3.69797 | false | false | false |
manashmndl/bluebrain | src/python/examples/deprecated/xboxjoypad/xboxjoypad.py | 1 | 2251 | import time
import pygame
from time import sleep
from cannybots.radio import BLE
from cannybots.clients.joypad import SimpleJoypadClient
pygame.init()
def main():
xAxis=0
yAxis=0
lastUpdateTime = time.time()
joysticks = []
clock = pygame.time.Clock()
keepPlaying = True
ble = BLE()
myBot = ble.findNearest()
joypadClient = SimpleJoypadClient(myBot)
# for al the connected joysticks
for i in range(0, pygame.joystick.get_count()):
# create an Joystick object in our list
joysticks.append(pygame.joystick.Joystick(i))
# initialize them all (-1 means loop forever)
joysticks[-1].init()
# print a statement telling what the name of the controller is
print "Detected joystick '",joysticks[-1].get_name(),"'"
while keepPlaying:
if (time.time() - lastUpdateTime) > 0.05:
joypadClient.updateJoypadWithZ(int(xAxis*255), int(yAxis*255), 0,0)
lastUpdateTime=time.time()
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
print "Received event 'Quit', exiting."
keepPlaying = False
elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
print "Escape key pressed, exiting."
keepPlaying = False
elif event.type == pygame.JOYAXISMOTION:
#print "Joystick '",joysticks[event.joy].get_name(),"' axis",event.axis,"motion."
if event.axis==0:
xAxis=joysticks[-1].get_axis(0)
if event.axis==1:
yAxis=joysticks[-1].get_axis(1)
elif event.type == pygame.JOYBUTTONDOWN:
print "Joystick '",joysticks[event.joy].get_name(),"' button",event.button,"down."
elif event.type == pygame.JOYBUTTONUP:
print "Joystick '",joysticks[event.joy].get_name(),"' button",event.button,"up."
elif event.type == pygame.JOYHATMOTION:
print "Joystick '",joysticks[event.joy].get_name(),"' hat",event.hat," moved."
main()
pygame.quit()
| mit | -6,557,765,234,474,229,000 | 37.152542 | 102 | 0.569969 | false | 3.678105 | false | false | false |
taynaud/sparkit-learn | splearn/decomposition/tests/test_truncated_svd.py | 1 | 3718 | import numpy as np
import scipy.linalg as ln
from sklearn.decomposition import TruncatedSVD
from splearn.decomposition import SparkTruncatedSVD
from splearn.decomposition.truncated_svd import svd, svd_em
from splearn.utils.testing import (SplearnTestCase, assert_array_almost_equal,
assert_array_equal, assert_true)
from splearn.utils.validation import check_rdd_dtype
def match_sign(a, b):
a_sign = np.sign(a)
b_sign = np.sign(b)
if np.array_equal(a_sign, -b_sign):
return -b
elif np.array_equal(a_sign, b_sign):
return b
else:
raise AssertionError("inconsistent matching of sign")
class TestSVDFunctions(SplearnTestCase):
def test_svd(self):
X, X_rdd = self.make_dense_rdd()
u, s, v = svd(X_rdd, 1)
u = np.squeeze(np.concatenate(np.array(u.collect()))).T
u_true, s_true, v_true = ln.svd(X)
assert_array_almost_equal(v[0], match_sign(v[0], v_true[0, :]))
assert_array_almost_equal(s[0], s_true[0])
assert_array_almost_equal(u, match_sign(u, u_true[:, 0]))
def test_svd_em(self):
X, X_rdd = self.make_dense_rdd((1000, 4))
u, s, v = svd_em(X_rdd, 1, seed=42, maxiter=50)
u = np.squeeze(np.concatenate(np.array(u.collect()))).T
u_true, s_true, v_true = ln.svd(X)
tol = 1e-1
assert(np.allclose(s[0], s_true[0], atol=tol))
assert(np.allclose(+v, v_true[0, :], atol=tol) |
np.allclose(-v, v_true[0, :], atol=tol))
assert(np.allclose(+u, u_true[:, 0], atol=tol) |
np.allclose(-u, u_true[:, 0], atol=tol))
def test_svd_em_sparse(self):
X, X_rdd = self.make_sparse_rdd((1000, 4))
u, s, v = svd_em(X_rdd, 1, seed=42, maxiter=50)
u = np.squeeze(np.concatenate(np.array(u.collect()))).T
u_true, s_true, v_true = ln.svd(X.toarray())
tol = 1e-1
assert(np.allclose(s[0], s_true[0], atol=tol))
assert(np.allclose(+v, v_true[0, :], atol=tol) |
np.allclose(-v, v_true[0, :], atol=tol))
assert(np.allclose(+u, u_true[:, 0], atol=tol) |
np.allclose(-u, u_true[:, 0], atol=tol))
class TestTruncatedSVD(SplearnTestCase):
def test_same_components(self):
X, X_rdd = self.make_dense_rdd((1000, 10))
n_components = 2
random_state = 42
tol = 1e-7
local = TruncatedSVD(n_components, n_iter=5, tol=tol,
random_state=random_state)
dist = SparkTruncatedSVD(n_components, n_iter=50, tol=tol,
random_state=random_state)
local.fit(X)
dist.fit(X_rdd)
v_true = local.components_
v = dist.components_
tol = 1e-1
assert(np.allclose(+v[0], v_true[0, :], atol=tol) |
np.allclose(-v[0], v_true[0, :], atol=tol))
def test_same_fit_transforms(self):
X, X_rdd = self.make_dense_rdd((1000, 12))
n_components = 4
random_state = 42
tol = 1e-7
local = TruncatedSVD(n_components, n_iter=5, tol=tol,
random_state=random_state)
dist = SparkTruncatedSVD(n_components, n_iter=50, tol=tol,
random_state=random_state)
Z_local = local.fit_transform(X)
Z_dist = dist.fit_transform(X_rdd)
Z_collected = Z_dist.toarray()
assert_true(check_rdd_dtype(Z_dist, (np.ndarray,)))
tol = 1e-1
assert_array_equal(Z_local.shape, Z_collected.shape)
assert(np.allclose(+Z_collected[:, 0], Z_local[:, 0], atol=tol) |
np.allclose(-Z_collected[:, 0], Z_local[:, 0], atol=tol))
| apache-2.0 | 795,953,697,992,878,100 | 36.18 | 78 | 0.562399 | false | 3.116513 | true | false | false |
parkouss/treeherder | treeherder/perf/models.py | 1 | 6070 | from django.core.validators import MinLengthValidator
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from jsonfield import JSONField
from treeherder.model.models import (MachinePlatform,
OptionCollection,
Repository)
SIGNATURE_HASH_LENGTH = 40L
@python_2_unicode_compatible
class PerformanceFramework(models.Model):
name = models.SlugField(max_length=255L, unique=True)
class Meta:
db_table = 'performance_framework'
def __str__(self):
return self.name
@python_2_unicode_compatible
class PerformanceSignature(models.Model):
signature_hash = models.CharField(max_length=SIGNATURE_HASH_LENGTH,
validators=[
MinLengthValidator(SIGNATURE_HASH_LENGTH)
],
db_index=True)
repository = models.ForeignKey(Repository, null=True) # null=True only temporary, until we update old entries
framework = models.ForeignKey(PerformanceFramework)
platform = models.ForeignKey(MachinePlatform)
option_collection = models.ForeignKey(OptionCollection)
suite = models.CharField(max_length=80L)
test = models.CharField(max_length=80L, blank=True)
lower_is_better = models.BooleanField(default=True)
last_updated = models.DateTimeField(db_index=True, null=True) # null=True only temporary, until we update old entries
# extra properties to distinguish the test (that don't fit into
# option collection for whatever reason)
extra_properties = JSONField(max_length=1024)
class Meta:
db_table = 'performance_signature'
# make sure there is only one signature per repository with a
# particular set of properties
unique_together = ('repository', 'framework', 'platform',
'option_collection', 'suite', 'test',
'last_updated')
# make sure there is only one signature of any hash per
# repository (same hash in different repositories is allowed)
unique_together = ('repository', 'signature_hash')
def __str__(self):
return self.signature_hash
@python_2_unicode_compatible
class PerformanceDatum(models.Model):
repository = models.ForeignKey(Repository)
job_id = models.PositiveIntegerField(db_index=True)
result_set_id = models.PositiveIntegerField(db_index=True)
signature = models.ForeignKey(PerformanceSignature)
value = models.FloatField()
push_timestamp = models.DateTimeField(db_index=True)
class Meta:
db_table = 'performance_datum'
index_together = [('repository', 'signature', 'push_timestamp'),
('repository', 'job_id'),
('repository', 'result_set_id')]
unique_together = ('repository', 'job_id', 'result_set_id',
'signature', 'push_timestamp')
def save(self, *args, **kwargs):
super(PerformanceDatum, self).save(*args, **kwargs) # Call the "real" save() method.
if not self.signature.last_updated or (self.signature.last_updated <
self.push_timestamp):
self.signature.last_updated = self.push_timestamp
self.signature.save()
def __str__(self):
return "{} {}".format(self.value, self.push_timestamp)
@python_2_unicode_compatible
class PerformanceAlertSummary(models.Model):
'''
A summarization of performance alerts
A summary of "alerts" that the performance numbers for a specific
repository have changed at a particular time.
See also the :ref:`PerformanceAlert` class below.
'''
id = models.AutoField(primary_key=True)
repository = models.ForeignKey(Repository)
prev_result_set_id = models.PositiveIntegerField()
result_set_id = models.PositiveIntegerField()
last_updated = models.DateTimeField(db_index=True)
class Meta:
db_table = "performance_alert_summary"
unique_together = ('repository', 'prev_result_set_id', 'result_set_id')
def __str__(self):
return "{} {}".format(self.repository, self.result_set_id)
@python_2_unicode_compatible
class PerformanceAlert(models.Model):
'''
A single performance alert
An individual "alert" that the numbers in a specific performance
series have consistently changed level at a specific time.
An alert is always a member of an alert summary, which groups all
the alerts associated with a particular result set and repository
together. In many cases at Mozilla, the original alert summary is not
correct, so we allow reassigning it to a different (revised) summary.
'''
id = models.AutoField(primary_key=True)
summary = models.ForeignKey(PerformanceAlertSummary,
related_name='alerts')
revised_summary = models.ForeignKey(PerformanceAlertSummary,
related_name='revised_alerts',
null=True)
series_signature = models.ForeignKey(PerformanceSignature)
is_regression = models.BooleanField()
amount_pct = models.FloatField(
help_text="Amount in percentage that series has changed")
amount_abs = models.FloatField(
help_text="Absolute amount that series has changed")
prev_value = models.FloatField(
help_text="Previous value of series before change")
new_value = models.FloatField(
help_text="New value of series after change")
t_value = models.FloatField(
help_text="t value out of analysis indicating confidence "
"that change is 'real'")
class Meta:
db_table = "performance_alert"
unique_together = ('summary', 'series_signature')
def __str__(self):
return "{} {} {}%".format(self.summary, self.series_signature,
self.amount_pct)
| mpl-2.0 | 6,666,924,374,772,804,000 | 38.16129 | 122 | 0.642175 | false | 4.56391 | false | false | false |
ketanmukadam/StockData | stockdata/SDxls.py | 1 | 6832 | from openpyxl import load_workbook
from openpyxl import Workbook
from openpyxl.utils import column_index_from_string
from copy import copy
import re
key_map = {
'Cash and Equivalents':'Cash And Cash Equivalents',
'Accounts Receivable, Net':'Trade Receivables',
'Inventory':'Inventories',
'Total Current Assets':'Total Current Assets',
'Net PP&E':'Tangible Assets',
'Intangible Assets': 'Intangible Assets',
'Total Assets': 'Total Assets',
'Accounts Payable':'Trade Payables',
'Taxes Payable':'Deferred Tax Liabilities [Net]',
'Total Current Liabilities':'Total Current Liabilities',
'Long-term Debt':'Long Term Borrowings',
"Total Stockholder's Equity":'Total Shareholders Funds',
'Total Liabilities and Equity':'Total Capital And Liabilities',
'Sales':'Total Operating Revenues',
'Depreciation and Amortization':'Depreciation And Amortisation Expenses',
'Interest Expense':'Finance Costs',
'Other Gains and Losses':'Exceptional Items',
'Pretax Income': 'Profit/Loss Before Tax',
'Income Tax Expense':'Total Tax Expenses',
'Net Income':'Profit/Loss For The Period',
'Net Cash from Operations':'Net CashFlow From Operating Activities',
'Net Cash from Investing Activities':'Net Cash Used In Investing Activities',
'Net Cash from Financing Activities':'Net Cash Used From Financing Activities',
'Change in cash':'Net Inc/Dec In Cash And Cash Equivalents',
'Earnings per share': 'Diluted EPS (Rs.)',
'Dividends per share': 'Dividend / Share(Rs.)',
'BookValue per share': 'Book Value [InclRevalReserve]/Share (Rs.)',
'Other Current Assets':'Total Current Assets - Inventories - Trade Receivables - Cash And Cash Equivalents',
'Other Current Liabilities':'Total Current Liabilities - Trade Payables',
'Other Liabilities': 'Total Non-Current Liabilities - Long Term Borrowings - Deferred Tax Liabilities [Net]',
'Total Liabilities': 'Total Current Liabilities + Total Non-Current Liabilities',
'Cost of Goods Sold':'Cost Of Materials Consumed + Purchase Of Stock-In Trade + Changes In Inventories Of FG,WIP And Stock-In Trade',
'Gross Profit':'Total Operating Revenues - Cost Of Materials Consumed - Purchase Of Stock-In Trade - Changes In Inventories Of FG,WIP And Stock-In Trade',
'Operating Income before Depr':'Total Operating Revenues - Cost Of Materials Consumed - Purchase Of Stock-In Trade - Changes In Inventories Of FG,WIP And Stock-In Trade - Employee Benefit Expenses - Other Expenses',
'Operating Profit':'Total Operating Revenues - Cost Of Materials Consumed - Purchase Of Stock-In Trade - Changes In Inventories Of FG,WIP And Stock-In Trade - Employee Benefit Expenses - Other Expenses - Depreciation And Amortisation Expenses',
'Selling, General, and Admin Exp':'Employee Benefit Expenses + Other Expenses'
}
class SDxlsMixin():
def copy_fulldata(self, dest):
wb = Workbook()
ws = wb.active
for i,row in enumerate(self.data):
for j,col in enumerate(row):
ws.cell(row=i+1, column=j+1, value=col)
wb.save(dest)
def oper_list_of_list(self, data, name, oper):
if oper :
tempdata = [sum(i) for i in zip(*data) if not str in [type(e) for e in i]]
else:
tempdata = [i[0] - i[1] for i in zip(*data) if not str in [type(e) for e in i]]
tempdata.insert(0,name)
return tempdata
def calculate_datarow(self,key,name):
datarow = []
if len(re.split(' \+ | - ', key)) < 2 :
for drow in self.data:
if drow[0] == key:
datarow = drow
else:
tempdata = []
delimt = ' - '
if ' + ' in key : delimt = ' + '
keys = key.split(delimt)
for k in keys:
for drow in self.data:
if drow[0] == k:
tempdata.append(drow)
if delimt == ' + ':
tempdata[0] = self.oper_list_of_list(tempdata, name, True)
if delimt == ' - ':
tempdata[1] = self.oper_list_of_list(tempdata[1:], name, True)
tempdata = tempdata[:2]
tempdata[0] = self.oper_list_of_list(tempdata, name, False)
datarow = tempdata[0]
return datarow
def copy_cellformat(self,incell, outcell):
if incell.has_style:
outcell.font = copy(incell.font)
outcell.border = copy(incell.border)
outcell.fill = copy(incell.fill)
outcell.number_format = copy(incell.number_format)
outcell.protection = copy(incell.protection)
outcell.alignment = copy(incell.alignment)
def update_mysheet(self,wb):
ws = wb.active
for row in ws.rows:
if not isinstance(row[0].value,str):continue
key = key_map.get(row[0].value.strip())
if not key: continue
datarow = self.calculate_datarow(key, row[0].value)
for idx, datacol in enumerate(datarow):
if not idx: continue
cell = row[idx+1]
col = column_index_from_string(cell.column)
if type(datacol) != float:
newcell = ws.cell(row=cell.row,column=col, value=float(datacol.replace(',','')))
else :
newcell = ws.cell(row=cell.row,column=col, value=float(datacol))
self.copy_cellformat(cell, newcell)
def zap_mysheet(self, ws):
for row in ws.rows:
for cell in row:
if isinstance(cell.value,float):
dcell = ws.cell(row=cell.row, column=column_index_from_string(cell.column), value=0.0)
self.copy_cellformat(cell, dcell)
def copy_mysheet(self,src, dest, sheetname):
dwb = Workbook()
dws = dwb.active
swb = load_workbook(filename = src, keep_vba=True)
sws = swb.get_sheet_by_name(sheetname)
dws.title = sws.title
dws.sheet_view.showGridLines = False
for row in sws.rows:
for cell in row:
dcell = dws.cell(row=cell.row, column=column_index_from_string(cell.column), value=cell.value)
self.copy_cellformat(cell, dcell)
self.zap_mysheet(dws)
self.update_mysheet(dwb)
dwb.save(dest)
| gpl-3.0 | -2,751,520,717,131,465,700 | 49.985075 | 255 | 0.578893 | false | 3.69497 | false | false | false |
jameshicks/pydigree | pydigree/io/genomesimla.py | 1 | 1171 | "Read GenomeSIMLA formatted chromosome templates"
import pydigree
from pydigree.io.smartopen import smartopen
def read_gs_chromosome_template(templatef):
"""
Reads a genomeSIMLA format chromosome template file
:param templatef: The filename of the template file
:type templatef: string
:rtype: A ChromosomeTemplate object corresponding to the file
"""
with smartopen(templatef) as f:
label = f.readline().strip() # The label and
f.readline() # the number of markers, both of which we dont need.
c = pydigree.ChromosomeTemplate(label=label)
# genomeSIMLA chromosome files have marginal recombination probs
# instead of map positions. We'll have to keep track of what the
# last position was and add to it to get it into the shape we want
# it to be in.
last_cm = 0
for line in f:
if line == '\n':
continue
label, _, minf, cm, bp = line.strip().split()
bp = int(bp)
cm = float(cm)
last_cm += cm
c.add_genotype(float(minf), last_cm, label=label, bp=bp)
return c
| apache-2.0 | 6,838,976,471,412,323,000 | 33.441176 | 74 | 0.617421 | false | 3.969492 | false | false | false |
VirusTotal/vt-py | examples/upload_files.py | 1 | 2284 | # Copyright © 2019 The vt-py authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import asyncio
import os
import sys
import vt
async def get_files_to_upload(queue, path):
"""Finds which files will be uploaded to VirusTotal."""
if os.path.isfile(path):
await queue.put(path)
return
with os.scandir(path) as it:
for entry in it:
if not entry.name.startswith('.') and entry.is_file():
await queue.put(entry.path)
async def upload_hashes(queue, apikey):
"""Uploads selected files to VirusTotal."""
async with vt.Client(apikey) as client:
while not queue.empty():
file_path = await queue.get()
await client.scan_file_async(file=file_path)
print(f'File {file_path} uploaded.')
queue.task_done()
def main():
parser = argparse.ArgumentParser(description='Upload files to VirusTotal.')
parser.add_argument('--apikey', required=True, help='your VirusTotal API key')
parser.add_argument('--path', required=True,
help='path to the file/directory to upload.')
parser.add_argument('--workers', type=int, required=False, default=4,
help='number of concurrent workers')
args = parser.parse_args()
if not os.path.exists(args.path):
print(f'ERROR: file {args.path} not found.')
sys.exit(1)
loop = asyncio.get_event_loop()
queue = asyncio.Queue(loop=loop)
loop.create_task(get_files_to_upload(queue, args.path))
_worker_tasks = []
for i in range(args.workers):
_worker_tasks.append(
loop.create_task(upload_hashes(queue, args.apikey)))
# Wait until all worker tasks has completed.
loop.run_until_complete(asyncio.gather(*_worker_tasks))
loop.close()
if __name__ == '__main__':
main()
| apache-2.0 | -5,028,977,853,249,549,000 | 30.273973 | 80 | 0.68813 | false | 3.682258 | false | false | false |
spudmind/undertheinfluence | scrapers/meetings/scrape_meetings.py | 1 | 6792 | # -*- coding: utf-8 -*-
import calendar
from datetime import datetime
import logging
import os.path
import re
import webbrowser
from utils import mongo, fuzzy_dates, unicode_csv
class ScrapeMeetings:
def __init__(self, **kwargs):
# fetch the logger
self._logger = logging.getLogger("spud")
# database stuff
self.db = mongo.MongoInterface()
self.PREFIX = "meetings"
if kwargs["refreshdb"]:
self.db.drop("%s_scrape" % self.PREFIX)
# get the current path
self.current_path = os.path.dirname(os.path.abspath(__file__))
self.STORE_DIR = "store"
def find_header_rows(self, meetings):
found_headers = []
headers_re = [
("date", re.compile(r"(?:date|month)", re.IGNORECASE)),
("organisation", re.compile(r"(?:organisation|individuals|senior executive)", re.IGNORECASE)),
("name", re.compile(r"(?:name|minister|officials|spad)", re.IGNORECASE)),
("purpose", re.compile(r"(?:purpose|nature|issues)", re.IGNORECASE)),
]
for row_idx, row in enumerate(meetings):
column_mappings = {}
# create a copy
current_headers = list(headers_re)
for column_idx, cell in enumerate(row):
for idx, header in enumerate(current_headers):
header_id, header_re = header
if header_re.search(cell):
# remove from the possible headers
column_mappings[header_id] = column_idx
current_headers.pop(idx)
break
found_header = column_mappings.keys()
if "date" in found_header and "organisation" in found_header:
if "name" not in found_header and 0 not in column_mappings.values():
# take a guess that the first column is the name
column_mappings["name"] = 0
found_headers.append((row_idx, column_mappings))
return found_headers
def read_csv(self, filename):
full_path = os.path.join(self.current_path, self.STORE_DIR, filename)
with open(full_path, "rU") as csv_file:
csv = unicode_csv.UnicodeReader(csv_file, encoding="latin1", strict=True)
# read in the whole csv
return [[cell.strip() for cell in row] for row in csv]
# strip empty columns; standardize row length
def normalise_csv(self, meetings):
row_length = max([len(row) for row in meetings])
not_empty = {}
for row in meetings:
if len(not_empty) == row_length:
break
for idx, cell in enumerate(row):
if idx in not_empty:
continue
if cell != "":
not_empty[idx] = None
not_empty = not_empty.keys()
return [[m[idx] if idx < len(m) else "" for idx in not_empty] for m in meetings]
# often, a cell is left blank to mean its value is
# the same as the value of the cell above. This function populates
# these blank cells.
def populate_empty_cells(self, meetings, header_mappings):
if len(meetings) <= 1:
return meetings
pop_meetings = [meetings[0]]
for idx, row in enumerate(meetings[1:]):
pop_meeting = {k: row.get(k) if row.get(k) is not None else pop_meetings[idx].get(k, "") for k in header_mappings.keys()}
pop_meetings.append(pop_meeting)
return pop_meetings
def csv_to_dicts(self, meeting_rows, header_mappings):
meeting_dicts = []
for meeting_row in meeting_rows:
meeting = {}
for k, v in header_mappings.items():
val = meeting_row[v]
if val == "":
continue
meeting[k] = val
# we avoid adding blank rows
if meeting != {}:
meeting_dicts.append(meeting)
return meeting_dicts
def parse_meetings(self, meetings, meta):
date_format = None
date_range = fuzzy_dates.extract_date_range(meta["title"])
# print meta
# for x in meetings:
# print x
# webbrowser.open(meta["source"]["url"] + "/preview")
# raw_input()
for meeting in meetings:
if "date" not in meeting:
self._logger.warning("Date missing from the following row:", meeting)
continue
meeting_date = fuzzy_dates.parse_date(meeting["date"], date_format=date_format, date_range=date_range)
if meeting_date:
meeting["date"] = str(meeting_date.date)
date_format = meeting_date.date_format
else:
self._logger.warning("Couldn't find '%s' in range %s" % (meeting["date"], date_range))
return meetings
def scrape_csv(self, meta):
self._logger.info("... %s" % meta["filename"])
meetings = self.read_csv(meta["filename"])
meetings = self.normalise_csv(meetings)
# find index(es) of header rows
header_rows = self.find_header_rows(meetings)
if header_rows == []:
# doesn't look like this file contains meeting data
return []
meetings_dicts = []
# sometimes a file contains multiple groups of meetings
for idx, header_row in enumerate(header_rows):
if idx == len(header_rows) - 1:
meetings_block = meetings[header_row[0]+1:]
else:
meetings_block = meetings[header_row[0]+1:header_rows[idx + 1][0]-1]
block_dicts = self.csv_to_dicts(meetings_block, header_row[1])
block_dicts = self.populate_empty_cells(block_dicts, header_row[1])
meetings_dicts += block_dicts
# if "name" not in header_row[1]:
return meetings_dicts
def run(self):
self._logger.info("Scraping Meetings")
_all_meetings = self.db.fetch_all("%s_fetch" % self.PREFIX, paged=False)
for meta in _all_meetings:
meetings = []
meta["published_at"] = str(datetime.strptime(meta["published_at"], "%d %B %Y").date())
if meta["file_type"] == "CSV":
meetings = self.scrape_csv(meta)
meetings = self.parse_meetings(meetings, meta)
elif meta["file_type"] == "PDF":
# TODO: Parse PDF
pass
for meeting in meetings:
for k in ["published_at", "department", "title", "source"]:
meeting[k] = meta[k]
self.db.save("%s_scrape" % self.PREFIX, meeting)
def scrape(**kwargs):
ScrapeMeetings(**kwargs).run()
| mit | -2,568,683,875,341,646,000 | 38.719298 | 133 | 0.553592 | false | 3.958042 | false | false | false |
carljm/django-model-utils | tests/test_fields/test_urlsafe_token_field.py | 1 | 2105 | from unittest.mock import Mock
from django.db.models import NOT_PROVIDED
from django.test import TestCase
from model_utils.fields import UrlsafeTokenField
class UrlsaftTokenFieldTests(TestCase):
def test_editable_default(self):
field = UrlsafeTokenField()
self.assertFalse(field.editable)
def test_editable(self):
field = UrlsafeTokenField(editable=True)
self.assertTrue(field.editable)
def test_max_length_default(self):
field = UrlsafeTokenField()
self.assertEqual(field.max_length, 128)
def test_max_length(self):
field = UrlsafeTokenField(max_length=256)
self.assertEqual(field.max_length, 256)
def test_factory_default(self):
field = UrlsafeTokenField()
self.assertIsNone(field._factory)
def test_factory_not_callable(self):
with self.assertRaises(TypeError):
UrlsafeTokenField(factory='INVALID')
def test_get_default(self):
field = UrlsafeTokenField()
value = field.get_default()
self.assertEqual(len(value), field.max_length)
def test_get_default_with_non_default_max_length(self):
field = UrlsafeTokenField(max_length=64)
value = field.get_default()
self.assertEqual(len(value), 64)
def test_get_default_with_factory(self):
token = 'SAMPLE_TOKEN'
factory = Mock(return_value=token)
field = UrlsafeTokenField(factory=factory)
value = field.get_default()
self.assertEqual(value, token)
factory.assert_called_once_with(field.max_length)
def test_no_default_param(self):
field = UrlsafeTokenField(default='DEFAULT')
self.assertIs(field.default, NOT_PROVIDED)
def test_deconstruct(self):
def test_factory():
pass
instance = UrlsafeTokenField(factory=test_factory)
name, path, args, kwargs = instance.deconstruct()
new_instance = UrlsafeTokenField(*args, **kwargs)
self.assertIs(instance._factory, new_instance._factory)
self.assertIs(test_factory, new_instance._factory)
| bsd-3-clause | -7,383,633,666,099,288,000 | 31.890625 | 63 | 0.671259 | false | 3.73227 | true | false | false |
olivierdalang/stdm | security/authorization.py | 1 | 3515 | """
/***************************************************************************
Name : Auhtorization Service
Description : Checks whether the logged in user has permissions to
access the particular content item
Date : 2/June/2013
copyright : (C) 2013 by John Gitau
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from roleprovider import RoleProvider
from exception import SecurityException
from stdm.data import Content, STDMDb, Base
from stdm.utils import *
from sqlalchemy import Table
from sqlalchemy.orm import relationship, mapper, clear_mappers
from sqlalchemy.exc import *
class RoleMapper(object):
pass
class Authorizer(object):
'''
This class has the responsibility of asserting whether an account with
the given user name has permissions to access a particular content item
'''
def __init__(self, username):
self.username = username
self.userRoles = []
self._getUserRoles()
def _getUserRoles(self):
'''
Get roles that the user belongs to
'''
roleProvider = RoleProvider()
self.userRoles = roleProvider.GetRolesForUser(self.username)
'''
If user name is postgres then add it to the list of user roles since
it is not a group role in PostgreSQL but content is initialized by
morphing it as a role in registering content items
'''
pg_account = 'postgres'
if self.username == pg_account:
self.userRoles.append(pg_account)
def CheckAccess(self, contentCode):
'''
Assert whether the given user has permissions to access a content
item with the gien code.
'''
hasPermission = False
#Get roles with permission
try:
cnt = Content()
qo = cnt.queryObject()
'''
cntRef = qo.filter(Content.code == contentCode).first()
'''
cntRef = qo.filter(Content.code == contentCode).first()
if cntRef != None:
cntRoles =cntRef.roles
for rl in cntRoles:
if getIndex(self.userRoles,rl.name) != -1:
hasPermission = True
break
except Exception:
'''
Current user does not have permission to access the content tables.
Catches all errors
'''
#pass
raise
return hasPermission
| gpl-2.0 | -3,874,758,017,087,749,600 | 34.257732 | 79 | 0.462304 | false | 5.466563 | false | false | false |
nolze/ms-offcrypto-tool | msoffcrypto/__init__.py | 1 | 2006 | import olefile
import zipfile
__version__ = "4.11.0"
def OfficeFile(file):
'''Return an office file object based on the format of given file.
Args:
file (:obj:`_io.BufferedReader`): Input file.
Returns:
BaseOfficeFile object.
Examples:
>>> with open("tests/inputs/example_password.docx", "rb") as f:
... officefile = OfficeFile(f)
... officefile.keyTypes
('password', 'private_key', 'secret_key')
Given file handle will not be closed, the file position will most certainly
change.
'''
file.seek(0) # required by isOleFile
if olefile.isOleFile(file):
ole = olefile.OleFileIO(file)
elif zipfile.is_zipfile(file): # Heuristic
from .format.ooxml import OOXMLFile
return OOXMLFile(file)
else:
raise Exception("Unsupported file format")
# TODO: Make format specifiable by option in case of obstruction
# Try this first; see https://github.com/nolze/msoffcrypto-tool/issues/17
if ole.exists('EncryptionInfo'):
from .format.ooxml import OOXMLFile
return OOXMLFile(file)
# MS-DOC: The WordDocument stream MUST be present in the file.
# https://msdn.microsoft.com/en-us/library/dd926131(v=office.12).aspx
elif ole.exists('wordDocument'):
from .format.doc97 import Doc97File
return Doc97File(file)
# MS-XLS: A file MUST contain exactly one Workbook Stream, ...
# https://msdn.microsoft.com/en-us/library/dd911009(v=office.12).aspx
elif ole.exists('Workbook'):
from .format.xls97 import Xls97File
return Xls97File(file)
# MS-PPT: A required stream whose name MUST be "PowerPoint Document".
# https://docs.microsoft.com/en-us/openspecs/office_file_formats/ms-ppt/1fc22d56-28f9-4818-bd45-67c2bf721ccf
elif ole.exists('PowerPoint Document'):
from .format.ppt97 import Ppt97File
return Ppt97File(file)
else:
raise Exception("Unrecognized file format")
| mit | 7,344,676,869,608,963,000 | 34.821429 | 112 | 0.665005 | false | 3.452668 | false | false | false |
CTSNE/NodeDefender | NodeDefender/db/node.py | 1 | 2543 | from NodeDefender.db.sql import SQL, GroupModel, NodeModel, LocationModel, UserModel
import NodeDefender
from geopy.geocoders import Nominatim
def get_sql(name):
return NodeModel.query.filter_by(name = name).first()
def update_sql(original_name, **kwargs):
node = get_sql(original_name)
if node is None:
return False
for key, value in kwargs.items():
if key not in node.columns():
continue
setattr(node, key, value)
SQL.session.add(node)
SQL.session.commit()
return node
def create_sql(name):
if get_sql(name):
return get_sql(name)
node = NodeModel(name)
SQL.session.add(node)
SQL.session.commit()
return node
def save_sql(node):
SQL.session.add(node)
return SQL.session.commit()
def delete_sql(name):
if not get_sql(name):
return False
SQL.session.delete(get_sql(name))
SQL.session.commit()
return True
def get(name):
return get_sql(name)
def list(*groups):
return SQL.session.query(NodeModel).join(NodeModel.groups).\
filter(GroupModel.name.in_(groups)).all()
def unassigned():
return SQL.session.query(NodeModel).filter(NodeModel.groups == None).all()
def create(name):
node = create_sql(name)
NodeDefender.db.message.node_created(node)
return node
def update(original_name, **kwargs):
return update_sql(original_name, **kwargs)
def location(name, street, city, latitude = None, longitude = None):
node = get_sql(name)
if node is None:
return False
if not latitude and not longitude:
geo = Nominatim()
coord = geo.geocode(street + ' ' + city, timeout = 10)
if coord:
latitude = coord.latitude
longitude = coord.longitude
else:
latitude = 0.0
longitude = 0.0
node.location = LocationModel(street, city, latitude, longitude)
SQL.session.add(node)
SQL.session.commit()
return node
def delete(name):
return delete_sql(name)
def add_icpe(nodeName, icpeMac):
node = get_sql(nodeName)
icpe = NodeDefender.db.icpe.get_sql(icpeMac)
if icpe is None or node is None:
return False
node.icpe = icpe
SQL.session.add(node)
SQL.session.commit()
return node
def remove_icpe(nodeName, icpeMac):
node = get_sql(nodeName)
icpe = NodeDefender.db.icpe.get(icpeMAc)
if icpe is None or node is None:
return False
node.icpe = None
SQL.session.add(node)
SQL.session.commit()
return node
| mit | 5,579,770,979,561,890,000 | 24.178218 | 84 | 0.646087 | false | 3.427224 | false | false | false |
eshoyfer/dwc_network_server_emulator | master_server.py | 3 | 2304 | # DWC Network Server Emulator
# Copyright (C) 2014 polaris-
# Copyright (C) 2014 ToadKing
# Copyright (C) 2014 AdmiralCurtiss
# Copyright (C) 2014 msoucy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gamespy_player_search_server import GameSpyPlayerSearchServer
from gamespy_profile_server import GameSpyProfileServer
from gamespy_backend_server import GameSpyBackendServer
from gamespy_natneg_server import GameSpyNatNegServer
from gamespy_qr_server import GameSpyQRServer
from gamespy_server_browser_server import GameSpyServerBrowserServer
from gamespy_gamestats_server import GameSpyGamestatsServer
from nas_server import NasServer
from internal_stats_server import InternalStatsServer
from admin_page_server import AdminPageServer
from storage_server import StorageServer
from gamestats_server_http import GameStatsServer
import gamespy.gs_database as gs_database
import threading
if __name__ == "__main__":
# Let database initialize before starting any servers.
# This fixes any conflicts where two servers find an uninitialized database at the same time and both try to
# initialize it.
db = gs_database.GamespyDatabase()
db.initialize_database()
db.close()
servers = [
GameSpyBackendServer,
GameSpyQRServer,
GameSpyProfileServer,
GameSpyPlayerSearchServer,
GameSpyGamestatsServer,
#GameSpyServerBrowserServer,
GameSpyNatNegServer,
NasServer,
InternalStatsServer,
AdminPageServer,
StorageServer,
GameStatsServer,
]
for server in servers:
threading.Thread(target=server().start).start()
| agpl-3.0 | -4,606,263,240,929,476,000 | 36.770492 | 112 | 0.74566 | false | 4.006957 | false | false | false |
daniel-thompson/tintamp | tools/tube_transfer.py | 1 | 12222 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009, 2010 Hermann Meyer, James Warden, Andreas Degert
# Copyright (C) 2011 Pete Shorthose
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#----------------------------------------------------------------
#
# lots of hints and some numerical values taken from
# Virtual Air Guitar (Matti Karjalainen, Teemu Maki-Patola, Aki Kanerva, Antti Huovilainen)
#
import sys
from pylab import *
from scipy.optimize import newton
#
# o V+
# |
# |
# |
# +-+
# | |
# Rp | |
# | | Co
# +-+
# | ||
# +-----++---o Vo
# Vp | ||
# |
# Ci --+--
# Ri /--+--
# || Vi +-----+ Vg / 12AX7
# o---++--+-------| |-----+- - - -
# || | +-----+ \
# | \/----
# | /-----
# | /
# +-+ Vk |
# | | +------+
# | | | |
# | | | |
# +-+ +-+ |
# | | | -+-
# | Rk | | -+- Ck
# | | | |
# ----- +-+ |
# --- | |
# o | |
# | |
# ----- -----
# --- ---
# o o
#
#
# mu Amplification factor.
# kx Exponent:
# kg1 Inversely proportional to overall plate current
# kp Affects opration in region of large plate voltage and large negative grid voltage
# kvb Knee volts
#
names = ("mu", "kx", "kg1", "kg2", "kp", "kvb", "ccg", "cpg", "ccp", "rgi")
factor = ( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1e-12, 1e-12, 1e-12, 1e3)
tubes = {
#TUBE MU EX KG1 KG2 KP KVB CCG* CPG* CCP* RGI**
"6DJ8" : ( 28.0, 1.3, 330.0, None, 320.0, 300.0, 2.3, 2.1, 0.7, 2.0),
"6L6CG": ( 8.7, 1.35, 1460.0, 4500.0, 48.0, 12.0, 14.0, 0.85, 12.0, 1.0),
"12AX7": (100.0, 1.4, 1060.0, None, 600.0, 300.0, 2.3, 2.4, 0.9, 2.0),
"12AT7": ( 60.0, 1.35, 460.0, None, 300.0, 300.0, 2.7, 2.2, 1.0, 2.0),
"12AU7": ( 21.5, 1.3, 1180.0, None, 84.0, 300.0, 2.3, 2.2, 1.0, 2.0),
"6550" : ( 7.9, 1.35, 890.0, 4800.0, 60.0, 24.0, 14.0, 0.85, 12.0, 1.0),
"KT88" : ( 8.8, 1.35, 730.0, 4200.0, 32.0, 16.0, 14.0, 0.85, 12.0, 1.0),
#"EL34" : ( 60.0, 4.8, 2000.0, None, 800.0, 50.0, None, None, None, None),
"EL34" : ( 11.0, 1.35, 650.0, 4200.0, 60.0, 24.0, 15.0, 1.0, 8.0, 1.0),
"2A3" : ( 4.2, 1.4, 1500.0, None, 60.0, 300.0, 8.0, 17.0, 6.0, 1.0),
"300B" : ( 3.95, 1.4, 1550.0, None, 65.0, 300.0, 10.0, 16.0, 5.0, 1.0),
"6C33C": ( 3.1, 1.4, 163.0, None, 15.0, 300.0, 31.0, 31.0, 11.0, 0.5),
"6C16": ( 42.2, 2.21, 393.0, None, 629.0, 446.0, 9.0, 1.8, 0.48, 2.0),
# * : 10^-12 (pF)
# **: 10^3 (kOhm)
}
class Circuit(object):
# Parameters for circuit / approximation
#table_size = 2001
table_size = 1001
Uin_range = (-5.0, 5.0)
Vp = 250
Rp = 100e3
Ri_values = (68e3, 250e3)
# class data
used_names = ("mu", "kx", "kg1", "kp", "kvb")
ipk_tab = { "triode": "Ipk_triode", "pentode": "Ipk_triode_pentode" }
Vi = linspace(Uin_range[0],Uin_range[1],table_size)
@classmethod
def help(self):
return ("tube: %s\nplate current functions: %s" % (
", ".join(sorted(tubes.keys())),
", ".join(sorted(self.ipk_tab.keys()))))
def __init__(self, tube, ipk_func):
self.tube = tube
self.ipk_func = ipk_func
error = False
if tube not in tubes:
print "tube '%s' not found" % tube
error = True
if ipk_func not in self.ipk_tab:
print "plate current function '%s' not found" % ipk_func
error = True
if error:
print
usage()
for n, f, v in zip(names, factor, tubes[tube]):
if v is not None:
setattr(self, n, f*v)
self.Ipk = getattr(self, self.ipk_tab[ipk_func])
self.FtubeV = vectorize(self.Ftube)
def Igk_Vgk(self, Vgk):
"""gate current as function of gate-kathode voltage"""
return exp(7.75*Vgk-10.3)
def Ipk_triode_pentode(self, Vgk, Vpk):
"""Koren model of pentode connected as class A triode
(screen connected to plate):
plate current as function of gate-kathode voltage
and plate-kathode voltage
"""
E1 = Vpk/self.kp*log(1+exp(self.kp*(1/self.mu+Vgk/Vpk)))
return 2*E1**self.kx/self.kg1*(E1>0.0)*arctan(Vpk/self.kvb)
def Ipk_triode(self, Vgk, Vpk):
"""
Koren model of triode:
plate current as function of gate-kathode voltage
and plate-kathode voltage
"""
E1 = Vpk/self.kp*log(1+exp(self.kp*(1/self.mu+Vgk/sqrt(self.kvb+Vpk*Vpk))))
return 2*E1**self.kx/self.kg1*(E1>0.0)
def Ftube(self, Vi, Ri):
"""calculate output voltage of a tube circuit as function of input voltage
Vi input voltage
Ri value of resistor Ri
"""
def fi(Vgk, Vi, Ri):
return Vi - Vgk - Ri * self.Igk_Vgk(Vgk) # sum of voltages -> zero
Vgk = newton(fi, self.Igk_Vgk(0), args=(Vi, Ri)) # Vgk(Vi)
def fp(Vpk, Vgk, Ipk):
return Vpk + self.Rp * Ipk(Vgk, Vpk) - self.Vp
return newton(fp, self.Vp/2, args=(Vgk,self.Ipk)) # Vpk(Vgk)
def Vk0(self, Ri, Rk):
v0 = 0
def f(v):
return (self.Ftube(-v, Ri)-self.Vp)*(Rk/self.Rp) + v
return newton(f, v0)
def write_ftube_table(self, Ri, Vi, Vp):
"""write C source"""
sys.stdout.write("\t{ // Ri = %dk\n" % (Ri/1e3))
sys.stdout.write('\t%g,%g,%g,%d, {' % (Vi[0], Vi[-1], (len(Vi)-1)/(Vi[-1]-Vi[0]), self.table_size))
s = ""
for i, v in enumerate(Vi):
if i % 5 == 0:
sys.stdout.write(s+"\n\t")
s = ""
sys.stdout.write(s+str(Vp[i]))
s = ","
sys.stdout.write("\n\t}}")
def write_tables(self, prgname):
sys.stdout.write("// generated by %s\n" % prgname)
sys.stdout.write("// tube: %s\n" % self.tube)
sys.stdout.write("// plate current function: %s\n" % self.ipk_func)
for n in self.used_names:
sys.stdout.write("// %s: %g\n" % (n, getattr(self, n)))
sys.stdout.write("\n")
sys.stdout.write("table1d_imp<%d> tubetable_%s[%d] = {\n"
% (self.table_size, self.tube, len(self.Ri_values)))
s = ""
for Ri in self.Ri_values:
sys.stdout.write(s)
s = ",\n"
Vp = self.FtubeV(self.Vi, Ri)
self.write_ftube_table(Ri, self.Vi, Vp)
sys.stdout.write("\n};\n")
def write_tt_ftube_table(self, Ri, Vi, Vp):
"""write C source"""
sys.stdout.write("\t{ // Ri = %dk\n" % (Ri/1e3))
sys.stdout.write('\t%g,%g,%g,%d, {' % (Vi[0], Vi[-1], (len(Vi)-1)/(Vi[-1]-Vi[0]), self.table_size))
s = ""
for i, v in enumerate(Vi):
if i % 3 == 0:
sys.stdout.write(s+"\n\t")
s = ""
sys.stdout.write(s+"TTFLOAT("+str(Vp[i])+")")
s = ","
sys.stdout.write("\n\t}};\n")
def write_tt_tables(self, prgname):
sys.stdout.write("// generated by %s\n" % prgname)
sys.stdout.write("// tube: %s\n" % self.tube)
sys.stdout.write("// plate current function: %s\n" % self.ipk_func)
for n in self.used_names:
sys.stdout.write("// %s: %g\n" % (n, getattr(self, n)))
sys.stdout.write("//\n")
sys.stdout.write("// struct tubetable {\n")
sys.stdout.write("// unsigned int min_Vi;\n")
sys.stdout.write("// unsigned int max_Vi;\n")
sys.stdout.write("// unsigned int mapping;\n")
sys.stdout.write("// unsigned int table_size;\n")
sys.stdout.write("// ttspl_t table[1001];\n")
sys.stdout.write("// }\n")
for Ri in self.Ri_values:
sys.stdout.write("\nstruct tubetable tubetable_%s_Ri%s = \n"
% (self.tube, self.R_name(Ri).upper()))
Vp = self.FtubeV(self.Vi, Ri)
self.write_tt_ftube_table(Ri, self.Vi, Vp)
sys.stdout.write("\n")
def R_name(self, r):
for f, n in (1e6,"M"),(1e3,"k"),(1,""):
if r >= f:
return ("%g%s" % (r/f, n)).replace(".","_")
def show_vk0(self, args):
if args:
Ri = float(args[0])
Rk = float(args[1])
else:
try:
while True:
vl = ["%d: %s" % (i, self.R_name(r)) for i, r in enumerate(self.Ri_values)]
i = raw_input("Ri [%s]: " % ", ".join(vl))
try:
i = int(i)
except ValueError:
pass
else:
if 0 <= i < len(self.Ri_values):
Ri = self.Ri_values[i]
break
print "error: illegal input"
while True:
try:
Rk = float(raw_input("Rk: "))
break
except ValueError:
print "error: please enter float value"
except KeyboardInterrupt:
print
return
print "%f" % self.Vk0(Ri,Rk)
def check_table_accuracy(self, Ri):
"""maximal relative table error at half interval"""
def ip(x):
return (x[:-1]+x[1:])/2
Vp = self.FtubeV(self.Vi, Ri)
VpM = self.FtubeV(ip(self.Vi), Ri)
VpD = (ip(Vp) - VpM) / VpM
return max(VpD)
def display_accuracy(self):
for Ri in self.Ri_values:
print "Ri=%dk: %g" % (Ri/1e3, self.check_table_accuracy(Ri))
def plot_Ftube(self):
title(self.tube)
for Ri in self.Ri_values:
Vp = self.FtubeV(self.Vi, Ri)
plot(self.Vi, Vp, label="Ri=%dk" % (Ri/1e3))
xlabel("Vik")
ylabel("Vp")
legend()
axis
show()
def usage():
print "usage: %s plot|accuracy|table|tt-table|vk0 tube-name plate-func" % sys.argv[0]
print Circuit.help()
raise SystemExit, 1
def main():
if len(sys.argv) < 4:
usage()
cmd = sys.argv[1]
c = Circuit(sys.argv[2], sys.argv[3])
if cmd == "plot":
c.plot_Ftube()
elif cmd == "accuracy":
c.display_accuracy()
elif cmd == "table":
c.write_tables(sys.argv[0])
elif cmd == "tt-table":
c.write_tt_tables(sys.argv[0])
elif cmd == "vk0":
c.show_vk0(sys.argv[4:])
else:
usage()
if __name__ == "__main__":
main()
| lgpl-2.1 | 6,777,996,124,714,815,000 | 36.490798 | 107 | 0.448208 | false | 2.926023 | false | false | false |
ipfs/py-ipfs-api | docs/publish.py | 2 | 1473 | #!/usr/bin/python3
import os
import sys
__dir__ = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(__dir__, ".."))
import sphinx.cmd.build
import ipfshttpclient
# Ensure working directory is script directory
os.chdir(__dir__)
def main(argv=sys.argv[1:], program=sys.argv[0]):
if len(argv) != 1:
print("Usage: {0} [IPNS-key]".format(os.path.basename(program)))
print()
print("!! Continuing without publishing to IPNS !!")
print()
# Invoke Sphinx like the Makefile does
result = sphinx.cmd.build.build_main(["-b", "html", "-d", "build/doctrees", ".", "build/html"])
if result != 0:
return result
print()
print("Exporting files to IPFS…")
client = ipfshttpclient.connect()
hash_docs = client.add("build/html", recursive=True, raw_leaves=True, pin=False)[-1]["Hash"]
hash_main = client.object.new("unixfs-dir")["Hash"]
hash_main = client.object.patch.add_link(hash_main, "docs", hash_docs)["Hash"]
client.pin.add(hash_main)
print("Final IPFS path: /ipfs/{0}".format(hash_main))
if len(argv) == 1:
key = argv[0]
print()
print("Exporting files to IPNS…")
name_main = client.name.publish(hash_main, key=key)["Name"]
print("Final IPNS path: /ipns/{0}".format(name_main))
print()
print("Run the following commandline on all systems that mirror this documentation:")
print(" ipfs pin add {0} && ipfs name publish -k {1} /ipfs/{0}".format(hash_main, name_main))
return 0
if __name__ == "__main__":
sys.exit(main()) | mit | -3,829,902,737,229,777,400 | 29.625 | 96 | 0.663036 | false | 2.857977 | false | false | false |
autrimpo/rlite | tests/perftests.py | 2 | 5765 | #!/usr/bin/env python
#
# Author: Vincenzo Maffione <[email protected]>
#
import multiprocessing
import subprocess
import statistics
import argparse
import time
import re
import os
def has_outliers(tuples):
for t in range(len(tuples[0])):
avg = statistics.mean([x[t] for x in tuples])
stdev = statistics.stdev([x[t] for x in tuples])
if stdev > avg*0.05:
return True
return False
def to_avg_stdev(vlist, nsamples):
# Sort by kpps or ktts
tuples = sorted(vlist[-nsamples:], key=lambda x: x[1])
left = 0
vals = []
while left < len(tuples):
if not has_outliers(tuples[left:]):
for t in range(len(tuples[0])):
avg = statistics.mean([x[t] for x in tuples[left:]])
stdev = statistics.stdev([x[t] for x in tuples[left:]])
vals.append(avg)
vals.append(stdev)
break
left += 1
del vlist[-nsamples:]
vlist.append(tuple(vals))
description = "Python script to perform automated tests based on rinaperf"
epilog = "2017 Vincenzo Maffione <[email protected]>"
argparser = argparse.ArgumentParser(description = description,
epilog = epilog)
argparser.add_argument('--size-min', type = int, default = 2,
help = "Minimum size for the test")
argparser.add_argument('--size-max', type = int, default = 1400,
help = "Maximum size for the test")
argparser.add_argument('--size-step', type = int, default = 10,
help = "Packet size increment")
argparser.add_argument('--trials', type = int, default = 3,
help = "Number of trials for each combination "
"of parameters")
argparser.add_argument('-D', '--duration', type = int, default = 10,
help = "Duration of each test (in seconds)")
argparser.add_argument('-g', '--max-sdu-gap', type = int, default = -1,
help = "Max SDU gap")
argparser.add_argument('-t', '--test-type', type = str, default = "perf",
help = "Test type", choices = ["perf", "rr"])
argparser.add_argument('-d', '--dif', type = str,
help = "DIF to use for the tests")
argparser.add_argument('-o', '--output', type = str, help = "Output file for gnuplot data",
default = 'output.txt')
argparser.add_argument('--sleep', type = int, default = 2,
help = "How many seconds to sleep between two consecutive test runs")
args = argparser.parse_args()
stats = []
plotcols = ['size']
if args.test_type == 'perf':
plotcols += ['snd_kpps', 'rcv_kpps', 'snd_mbps', 'rcv_mbps']
elif args.test_type == 'rr':
plotcols += ['ktps', 'snd_mbps', 'snd_latency']
# build QoS
qosarg = ""
if args.max_sdu_gap >= 0:
qosarg += " -g %s" % args.max_sdu_gap
difarg = ""
if args.dif:
difarg = " -d %s" % args.dif
try:
for sz in range(args.size_min, args.size_max+1, args.size_step):
cmd = ("rinaperf -s %s -t %s -D %s %s %s"
% (sz, args.test_type, args.duration, qosarg, difarg))
print("Running: %s" % cmd)
t = 1
while t <= args.trials:
try:
out = subprocess.check_output(cmd.split())
except subprocess.CalledProcessError:
print("Test run #%d failed" % t)
continue
out = out.decode('ascii')
outl = out.split('\n')
if args.test_type == 'perf':
if len(outl) < 4:
print(out)
continue
m = re.match(r'^Sender\s+(\d+)\s+(\d+\.?\d*)\s+(\d+\.?\d*)', outl[2])
if m is None:
print(out)
continue
tpackets = int(m.group(1))
tkpps = float(m.group(2))
tmbps = float(m.group(3))
m = re.match(r'^Receiver\s+(\d+)\s+(\d+\.?\d*)\s+(\d+\.?\d*)', outl[3])
if m is None:
print(out)
continue
rpackets = int(m.group(1))
rkpps = float(m.group(2))
rmbps = float(m.group(3))
prtuple = (tpackets, rpackets, tkpps, rkpps, tmbps, rmbps)
stats.append((sz, tkpps, rkpps, tmbps, rmbps))
print("%d/%d pkts %.3f/%.3f Kpps %.3f/%.3f Mbps" % prtuple)
elif args.test_type == 'rr':
if len(outl) < 3:
print(out)
continue
m = re.match(r'^Sender\s+(\d+)\s+(\d+\.?\d*)\s+(\d+\.?\d*)\s+(\d+)', outl[2])
if m is None:
print(out)
continue
transactions = int(m.group(1))
ktps = float(m.group(2))
mbps = float(m.group(3))
latency = int(m.group(4))
prtuple = (transactions, ktps, mbps, latency)
stats.append((sz, ktps, mbps, latency))
print("%d transactions %.3f Ktps %.3f Mbps %d ns" % prtuple)
else:
assert(False)
t += 1
time.sleep(args.sleep)
# Transform the last args.trials element of the 'stats' vectors into
# a (avg, stddev) tuple.
to_avg_stdev(stats, args.trials)
except KeyboardInterrupt:
pass
# Dump statistics for gnuplot
fout = open(args.output, 'w')
s = '#'
for k in plotcols:
s += '%19s ' % k
fout.write("%s\n" % s)
for i in range(len(stats)): # num samples
s = ' '
for j in range(len(stats[i])):
s += '%9.1f ' % stats[i][j]
fout.write("%s\n" % s)
| lgpl-2.1 | 422,188,228,162,067,700 | 31.570621 | 93 | 0.505117 | false | 3.468712 | true | false | false |
rsmoorthy/docker | tally/typekeys.py | 1 | 2917 | # Type keys and specify shift key up/down
import subprocess
import sys
import argparse
import time
import os
class TypeKeys:
def __init__(self, *args, **kwargs):
self.shift = False
self.name = 'Tally.ERP 9'
self.window = 0
if 'WID' in os.environ:
self.window = os.environ['WID']
if 'WINDOWID' in os.environ:
self.window = os.environ['WINDOWID']
self.chars = {}
for x in range(ord('A'), ord('Z')+1):
self.chars[chr(x)] = True
for x in range(ord('a'), ord('z')+1):
self.chars[chr(x)] = False
for x in [' ', ',', '.', '/', ';', "'", '[', ']', '`', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '-', '=', '\\']:
self.chars[x] = False
for x in ['<', '>', '?', ':', '"', '{', '}', '~', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '+', '|']:
self.chars[x] = True
self.keys = ["BackSpace", "Escape", "Return", "Down", "Up", "Left", "Right"]
def init(self):
if not self.window:
self.window = self.runxdo(["xdotool", "search", "--name", "%s" % (self.name)])
self.stop_shift()
def runxdo(self, cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return out
def start_shift(self):
if self.shift == True:
return
self.runxdo(["xdotool", "keydown", "--window", "%s" % (self.window), "Shift"])
self.shift = True
def stop_shift(self):
if self.shift == False:
return
self.runxdo(["xdotool", "keyup", "--window", "%s" % (self.window), "Shift"])
self.shift = False
def type(self, str):
if str in self.keys:
self.runxdo(["xdotool", "key", "--delay", "%s" % (self.delay), "--window", "%s" % (self.window), "%s" % (str)])
return
for x in list(str):
if self.chars[x]:
self.start_shift()
else:
self.stop_shift()
self.runxdo(["xdotool", "type", "--delay", "%s" % (self.delay), "--window", "%s" % (self.window), "%s" % (x)])
self.stop_shift()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("string", help="string to type")
parser.add_argument("--ender", help="Key to press at the end", default=None)
parser.add_argument("--delay", help="delay between characters", default=1)
parser.add_argument("--window", help="window id")
parser.add_argument("--sleep", type=float, help="sleep time after commands", default=0.1)
args = parser.parse_args()
tk = TypeKeys()
if args.delay:
tk.delay = args.delay
if args.window:
tk.window = args.window
tk.init()
tk.type(args.string)
if(args.ender):
tk.type(args.ender)
time.sleep(args.sleep)
| mit | -6,232,246,684,279,853,000 | 34.573171 | 129 | 0.502571 | false | 3.399767 | false | false | false |
F5Networks/f5-common-python | f5/bigip/tm/sys/test/functional/test_hardware.py | 1 | 1360 | # Copyright 2018 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.bigip.tm.sys.hardware import Hardware
class TestHardware(object):
def test_load_refresh(self, mgmt_root):
h1 = mgmt_root.tm.sys.hardware.load()
assert isinstance(h1, Hardware)
assert hasattr(h1, 'entries')
assert h1.kind == 'tm:sys:hardware:hardwarestats'
assert 'https://localhost/mgmt/tm/sys/hardware/platform' in h1.entries.keys()
h2 = mgmt_root.tm.sys.hardware.load()
assert isinstance(h2, Hardware)
assert hasattr(h2, 'entries')
assert h2.kind == 'tm:sys:hardware:hardwarestats'
assert 'https://localhost/mgmt/tm/sys/hardware/platform' in h2.entries.keys()
h1.refresh()
assert h1.kind == h2.kind
assert h1.entries.keys() == h2.entries.keys()
| apache-2.0 | 7,374,813,908,562,533,000 | 35.756757 | 85 | 0.689706 | false | 3.597884 | false | false | false |
dwhickox/NCHS-Programming-1-Python-Programs | Chap 2/ch2-debug-a.py | 1 | 1245 | # This program computes the final value of an invested
# amount using the compound interest formula:
# amount = principle * (1 + rate / num) ** (num * term)
# Variables:
# amount ........... Final value of investment
# principle ........ Amount invested
# rate ............. Rate of interest as a decimal
# num .............. Number of times per year compounded
# term ............. Investment term in years
# percentagerate ... Rate of interest as a percentage
import locale
locale.setlocale( locale.LC_ALL, '' )
# welcome message
print ("Welcome to the Investing Program \n")
# Assign values of input data
principle = 4500
percentagerate = .096
term = 2
num = 4
# Compute final value of investment
rate = percentagerate * 100
amount = principle * (1 + percentagerate / num) ** (num * term)
# Display results
print ("Amount of money invested ....", principle, "dollars")
print ("Rate of interest ............", rate, "percent")
print ("Frequency of compounding ....", num, "times per year")
print ("Period of investment ........", term, "years")
print ()
print ("Final value of investment ...", locale.currency(amount), "dollars")
input("\nPress enter to exit")
| mit | -3,296,714,930,589,738,500 | 31.648649 | 75 | 0.624096 | false | 3.392371 | false | true | false |
akkana/scripts | censusdata.py | 1 | 6357 | #!/usr/bin/env python3
# Download Census data files for the 2000 Decadal Summary File 1:
# https://www2.census.gov/census_2000/datasets/Summary_File_1/STATENAME
# If you look for documentation, you'll see pointers to the 730-page
# PDF sf1.pdf. Don't bother: it's completely wrong and must be for
# some earlier dataset.
# Instead, the documentation is in the files inside:
# http://www.census.gov/support/2000/SF1/SF1SAS.zip
# inside which, SF101.Sas describes the fields in st00001.uf1
# where st is the state abbreviation.
import os, sys
import re
import argparse
import zipfile
from collections import OrderedDict
# While testing:
from pprint import pprint
# A dictionary: { fileno: dic } where fileno is an int from 1 to 39 or 'geo'
# and dic is another dictionary of 'censuscode': "long description"
# where censuscode is a 7-char string like P000001 or H016H018.
CensusCodes = {}
# Fields in the sf1geo file
GeoFields = {}
def codesFromZipFile(zipfilename):
zf = zipfile.ZipFile(zipfilename, 'r')
pat = re.compile(b" *([A-Z][0-9]{3}[0-9A-Z]{3,4})=' *(.*)'")
for name in zf.namelist():
if not name.lower().endswith('.sas'):
continue
# The sf1geo file is special, so parse it separately
if name == 'sf1geo.sas':
parse_geo_sas_lines(zf.read(name).split(b'\n'))
continue
filematch = re.match('sf([0-9]{3}).sas', name.lower())
if not filematch:
# print(name, "doesn't match filematch pattern")
continue
code_dict = OrderedDict()
fileno = int(filematch.group(1))
# basename = os.path.basename(name)
# root, ext = os.path.splitext(basename)
# Every file stars with these five, which don't have p-numbers
code_dict['FILEID'] = 'File Identification'
code_dict['STUSAB'] = 'State/U.S.-Abbreviation (USPS)'
code_dict['CHARITER'] = 'Characteristic Iteration'
code_dict['CIFSN'] = 'Characteristic Iteration File Sequence Number'
code_dict['LOGRECNO'] = 'Logical Record Number'
saslines = zf.read(name).split(b'\n')
for line in saslines:
m = re.match(pat, line)
if m:
pcode, desc = [ s.decode() for s in m.groups() ]
# print("%7s -- %s" % (code, desc))
code_dict[pcode] = desc
# else:
# print("No match on line:", line)
CensusCodes[fileno] = code_dict
def parse_geo_sas_lines(lines):
"""lines are read from the sf1geo.sas file.
Create a dictionary of fields:
{ 'CODE': { 'name':'long name', 'start': int, 'end': int }
{ 'name', 'code', 'start', 'end' }
"""
labelpat = re.compile(b"(LABEL )?([A-Z0-9]*)\=\'(.*)\'")
fieldspat = re.compile(b"([A-Z0-9]+) \$ ([0-9]+)\-([0-9]+)")
for line in lines:
line = line.strip()
m = re.match(labelpat, line)
if m:
sys.stdout.flush()
# Assume here that labelpats all come before fieldspats,
# so if we're seeing a labelpat, it doesn't already exist
# inside GeoFields.
code = m.group(2).decode()
GeoFields[code] = { 'name': m.group(3).decode() }
continue
m = re.match(fieldspat, line)
if m:
# If there's a fieldspat for this code, it should have
# had a long description already using a labelpat,
# so the code (group(1)) should already be in GeoFields.
# print("groups:", m.groups())
code = m.group(1).decode()
GeoFields[code]['start'] = int(m.group(2)) - 1
GeoFields[code]['end'] = int(m.group(3))
continue
# pprint(GeoFields)
def file_for_code(code):
for fileno in CensusCodes:
if code in CensusCodes[fileno]:
return fileno
return None
def codes_for_description(desc):
codes = []
desc = desc.lower()
for fileno in CensusCodes:
for pcode in CensusCodes[fileno]:
if desc in CensusCodes[fileno][pcode].lower():
codes.append((pcode, CensusCodes[fileno][pcode]))
return codes
counties = []
def parse_geo_file(filename):
with open(filename) as fp:
for line in fp:
geo = parse_geo_line(line)
c = geo['COUNTY'].strip()
if c:
c = int(c)
if c not in counties:
counties.append(c)
counties.sort()
print("Counties:", counties)
def parse_geo_line(line):
"""Parse the <st>geo.uf1 file according to the GeoFields.
"""
d = {}
for code in GeoFields:
try:
d[code] = line[GeoFields[code]['start']:GeoFields[code]['end']]
except KeyError:
print("Key error, GeoFields[%s] =" % code, GeoFields[code])
break
# print("Line:", line)
# for field in d:
# print(field, ":", d[field], ":", GeoFields[field]['name'])
# print()
# print(d['COUNTY'], d['TRACT'], d['BLKGRP'], d['BLOCK'])
return d
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Parse US Decennial census data")
parser.add_argument('-c', action="store", dest="code",
help='Show filenumber containing 6-digit census code')
parser.add_argument('-d', action="store", dest="desc",
help='Show entries containing a long description')
parser.add_argument('-g', action="store", dest="geo",
help='Parse the <ST>geo.uf1 file')
parser.add_argument('zipfile', help="location of SF1SAS.zip file")
args = parser.parse_args(sys.argv[1:])
# print(args)
# Pass in the path to SF1SAS.zip
codesFromZipFile(args.zipfile)
if args.code:
print("Files with code %s:" % args.code, file_for_code(args.code))
elif args.desc:
codes = codes_for_description(args.desc)
print('Codes containing description "%s":' % args.desc)
for pair in codes:
print("%s: %s" % pair)
elif args.geo:
parse_geo_file(args.geo)
else:
for fileno in CensusCodes:
print("\n==== File", fileno)
for pcode in CensusCodes[fileno]:
print("%7s: %s" % (pcode, CensusCodes[fileno][pcode]))
| gpl-2.0 | -6,318,665,787,852,739,000 | 31.106061 | 78 | 0.575901 | false | 3.543478 | false | false | false |
lytex/multisplt | multisplt.py | 1 | 2762 | #!/usr/bin/python
# coding=UTF-8
import sys
import re
import os
import math
# Time formats
# mm:ss with mm > 59, mm > 99
# hh:mm:ss with mm <= 59, ss <= 59
# Arguments format
# 01 52nd Street Theme 00:11 02 A Night in Tunisia 05:03
# 03 All The Things You Are 08:07 04 Embraceable You 15:21
# 00:00:00 01 Serenade Grotesque 00:03:20 02 Menuet Antique
# 00:09:31 03 Pavane Pour Une infante defunte 00:15:55 04 Jeux D'eau
# The song names don't have to be between quotes and can be before or after the timestamps (the regex don't care)
# mp3splt format
# mp3splt foo.mp3 0.30 1.00 2.00 EOF -o @n
# 1.mp3 0:30 - 1:00
# 2.mp3 1:00 - 2:00
# 3.mp3 3:00 - EOF
def toMinSec(time):
splited = re.split(":", time)
if (len(splited) is 2):
# No need to convert
return (splited[0]+"."+splited[1])
elif (len(splited) is 3):
minutes = int(splited[0])*60 + int(splited[1])
minutes = str(minutes)
return(minutes+"."+splited[2])
else:
return None
# TODO if the argument has ' quotes must be closed
# even when the script doesn't use them.
# This happens before the script runs
#inputfile = sys.argv[1]
argv = ' '.join(sys.argv[1:len(sys.argv)])
# Removes name of the program argv[0] and input file and converts it # to srt separated by ' '
# \d+:\d{2} -> mm:ss
# \d+:\d{2}:\d{2} -> hh:mm:ss
time_regex = r'\d+:\d{2}:\d{2}|\d+:\d{2}'
arg_time = re.findall(time_regex, argv)
num_time = len(arg_time)
arg_name = re.split(time_regex, argv)
inputfile = arg_name[0]
del arg_name[0]
# arg_name has some empty strings entries we need to remove
try:
# Only eliminates one '' each time
for i in range(0, len(arg_name)):
arg_name.remove('') # If it doesn't find it throws an error
except ValueError:
pass
num_name = len(arg_name)
# There's always a space at the end of arg_name[0] y the rest have
# spaces both at the end and the beggining
temp = arg_name[0][0:len(arg_name[0])-1]
arg_name[0] = temp
for i in range(1, num_name):
temp = arg_name[i][1:len(arg_name[i])-1]
arg_name[i] = temp
# TODO check that nun_name = num_time + 1
# Initial timestamp may be implicit and num_name = num_time + 2
if (num_name == num_time):
mp3args = inputfile+" "
for i in range(0, num_time):
mp3args += toMinSec(arg_time[i])+" "
mp3args += "EOF -o @n"
else:
sys.exit("The number of names and timestamps doesn't match")
os.system("mp3splt "+mp3args)
pad = math.floor(math.log10(num_name))+1
# The mp3splt name files will be str(i).zfill(pad)
for i in range(1, num_name+1):
print (str(i).zfill(pad)+".mp3")
seqname = str(i).zfill(pad)
filename = '"' +seqname+" - "+arg_name[i-1]+".mp3"+ '"'
os.system("mv "+seqname+".mp3"+" "+filename)
| gpl-3.0 | -3,314,878,754,560,369,000 | 23.22807 | 113 | 0.634685 | false | 2.673766 | false | false | false |
motech/perf | activemq-intercepter/client/stats.py | 1 | 1696 | from influxdb import client as influxdb
import json
from stompy.stomp import Stomp
import threading
import time
def listener():
# Connect to activemq and subscribe to the stats queue
stomp = Stomp("localhost")
stomp.connect()
stomp.subscribe({'destination':'/queue/stats', 'ack':'client'})
# Connect to influxdb
db = influxdb.InfluxDBClient(database="motech")
while True:
frame = stomp.receive_frame()
print(frame.headers['message-id'])
# Post to influxDB
msg = json.loads(frame.body)
if len(msg["subjects"]):
data = []
for subject in msg["subjects"]:
data.append(
{
"name":"activemq_queue_depth_" + subject["subject"],
"columns":["timestamp", "value"],
"points":[[msg["timestamp"], subject["value"]]],
})
print(data)
try:
db.write_points(data)
except:
db = influxdb.InfluxDBClient(database="motech")
db.write_points(data)
stomp.ack(frame)
# [
# {
# "name": "activemq_queue_depth",
# "columns": ["time", "subject", "value"],
# "points": [
# [1400425947368, "", ]
# ]
# }
# ]
if __name__ == "__main__":
stomp = Stomp("localhost")
stomp.connect()
t = threading.Thread(target=listener)
t.daemon = True
t.start()
while True:
time.sleep(1)
# Send message to activemq
stomp.send({'destination': '/queue/foo.bar',
'body': 'Testing',
'reply-to': '/queue/stats'})
| bsd-3-clause | -6,311,227,152,657,556,000 | 24.313433 | 76 | 0.510613 | false | 3.907834 | false | false | false |
lc525/cmake-project | docs/ext/breathe-1.0.0/breathe/directives.py | 1 | 36150 |
from docutils import nodes
from docutils.parsers.rst.directives import unchanged_required, unchanged, flag
import os
import sys
import copy
import fnmatch
import re
import textwrap
import collections
from docutils.parsers import rst
from docutils.statemachine import ViewList
from sphinx.domains.cpp import DefinitionParser
from breathe.finder import FinderFactory, NoMatchesError, MultipleMatchesError
from breathe.parser import DoxygenParserFactory, CacheFactory, ParserError
from breathe.renderer.rst.doxygen import DoxygenToRstRendererFactoryCreatorConstructor, RstContentCreator
from breathe.renderer.rst.doxygen.domain import DomainHandlerFactoryCreator, NullDomainHandler
from breathe.renderer.rst.doxygen.domain import CppDomainHelper, CDomainHelper
from breathe.renderer.rst.doxygen.filter import FilterFactory, GlobFactory
from breathe.renderer.rst.doxygen.target import TargetHandlerFactory
from breathe.finder.doxygen import DoxygenItemFinderFactoryCreator, ItemMatcherFactory
import docutils.nodes
import sphinx.addnodes
import sphinx.ext.mathbase
# Somewhat outrageously, reach in and fix a Sphinx regex
import sphinx.domains.cpp
sphinx.domains.cpp._identifier_re = re.compile(r'(~?\b[a-zA-Z_][a-zA-Z0-9_]*)\b')
class BreatheException(Exception):
pass
class NoMatchingFunctionError(BreatheException):
pass
class UnableToResolveFunctionError(BreatheException):
pass
class NoDefaultProjectError(BreatheException):
pass
class BaseDirective(rst.Directive):
def __init__(
self,
root_data_object,
renderer_factory_creator_constructor,
finder_factory,
matcher_factory,
project_info_factory,
filter_factory,
target_handler_factory,
*args
):
rst.Directive.__init__(self, *args)
self.root_data_object = root_data_object
self.renderer_factory_creator_constructor = renderer_factory_creator_constructor
self.finder_factory = finder_factory
self.matcher_factory = matcher_factory
self.project_info_factory = project_info_factory
self.filter_factory = filter_factory
self.target_handler_factory = target_handler_factory
# Directives
# ----------
class DoxygenIndexDirective(BaseDirective):
required_arguments = 0
optional_arguments = 2
option_spec = {
"path": unchanged_required,
"project": unchanged_required,
"outline": flag,
"no-link": flag,
}
has_content = False
def run(self):
try:
project_info = self.project_info_factory.create_project_info(self.options)
except NoDefaultProjectError as e:
warning = 'doxygenindex: %s' % e
return [docutils.nodes.warning("", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning(warning, line=self.lineno)]
try:
finder = self.finder_factory.create_finder(project_info)
except ParserError as e:
warning = 'doxygenindex: Unable to parse file "%s"' % e
return [docutils.nodes.warning("", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning(warning, line=self.lineno)]
data_object = finder.root()
target_handler = self.target_handler_factory.create(self.options, project_info, self.state.document)
filter_ = self.filter_factory.create_index_filter(self.options)
renderer_factory_creator = self.renderer_factory_creator_constructor.create_factory_creator(
project_info,
self.state.document,
self.options,
)
renderer_factory = renderer_factory_creator.create_factory(
data_object,
self.state,
self.state.document,
filter_,
target_handler,
)
object_renderer = renderer_factory.create_renderer(self.root_data_object, data_object)
node_list = object_renderer.render()
return node_list
class DoxygenFunctionDirective(BaseDirective):
required_arguments = 1
option_spec = {
"path": unchanged_required,
"project": unchanged_required,
"outline": flag,
"no-link": flag,
}
has_content = False
final_argument_whitespace = True
def run(self):
# Separate possible arguments (delimited by a "(") from the namespace::name
match = re.match( r"([^(]*)(.*)", self.arguments[0] )
namespaced_function, args = match.group(1), match.group(2)
# Split the namespace and the function name
try:
(namespace, function_name) = namespaced_function.rsplit( "::", 1 )
except ValueError:
(namespace, function_name) = "", namespaced_function
try:
project_info = self.project_info_factory.create_project_info(self.options)
except NoDefaultProjectError as e:
warning = 'doxygenfunction: %s' % e
return [docutils.nodes.warning("", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning(warning, line=self.lineno)]
finder = self.finder_factory.create_finder(project_info)
# Extract arguments from the function name.
args = self.parse_args(args)
matcher_stack = self.matcher_factory.create_matcher_stack(
{
"compound": self.matcher_factory.create_name_matcher(namespace),
"member": self.matcher_factory.create_name_type_matcher(function_name, "function")
},
"member"
)
results = finder.find(matcher_stack)
try:
data_object = self.resolve_function(results, args)
except NoMatchingFunctionError:
warning = ('doxygenfunction: Cannot find function "%s%s" in doxygen xml output '
'for project "%s" from directory: %s'
% (namespace, function_name, project_info.name(), project_info.path()))
return [docutils.nodes.warning("", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning(warning, line=self.lineno)]
except UnableToResolveFunctionError:
warning = ('doxygenfunction: Unable to resolve multiple matches for function "%s%s" with arguments (%s) in doxygen xml output '
'for project "%s" from directory: %s.'
% (namespace, function_name, ", ".join(args), project_info.name(), project_info.path()))
return [docutils.nodes.warning("", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning(warning, line=self.lineno)]
target_handler = self.target_handler_factory.create(self.options, project_info, self.state.document)
filter_ = self.filter_factory.create_outline_filter(self.options)
renderer_factory_creator = self.renderer_factory_creator_constructor.create_factory_creator(
project_info,
self.state.document,
self.options,
)
renderer_factory = renderer_factory_creator.create_factory(
data_object,
self.state,
self.state.document,
filter_,
target_handler,
)
object_renderer = renderer_factory.create_renderer(self.root_data_object, data_object)
node_list = object_renderer.render()
return node_list
def parse_args(self, function_description):
paren_index = function_description.find('(')
if paren_index == -1:
return []
else:
# Parse the function name string, eg. f(int, float) to
# extract the types so we can use them for matching
args = []
num_open_brackets = -1;
start = paren_index + 1
for i in range(paren_index, len(function_description)):
c = function_description[i]
if c == '(' or c == '<':
num_open_brackets += 1
elif c == ')' or c == '>':
num_open_brackets -= 1
elif c == ',' and num_open_brackets == 0:
args.append(function_description[start:i].strip())
start = i + 1
args.append(function_description[start:-1].strip())
return args
def resolve_function(self, matches, args):
if not matches:
raise NoMatchingFunctionError()
if len(matches) == 1:
return matches[0]
data_object = None
# Tries to match the args array agains the arguments listed in the
# doxygen data
# TODO: We don't have any doxygen xml dom accessing code at this level
# this might benefit from being abstracted away at some point
for entry in matches:
if len(args) == len(entry.param):
equal = True
for i in range(len(args)):
param_type = entry.param[i].type_.content_[0].value
if not isinstance(param_type, unicode) :
param_type = param_type.valueOf_
if args[i] != param_type:
equal = False
break
if equal:
data_object = entry
break
if not data_object:
raise UnableToResolveFunctionError()
return data_object
class DoxygenClassDirective(BaseDirective):
kind = "class"
required_arguments = 1
optional_arguments = 1
option_spec = {
"path": unchanged_required,
"project": unchanged_required,
"members": unchanged,
"sections": unchanged,
"show": unchanged_required,
"outline": flag,
"no-link": flag,
}
has_content = False
def run(self):
name = self.arguments[0]
try:
project_info = self.project_info_factory.create_project_info(self.options)
except NoDefaultProjectError as e:
warning = 'doxygen%s: %s' % (self.kind, e)
return [docutils.nodes.warning("", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning(warning, line=self.lineno)]
finder = self.finder_factory.create_finder(project_info)
matcher_stack = self.matcher_factory.create_matcher_stack(
{
"compound": self.matcher_factory.create_name_type_matcher(name, self.kind)
},
"compound"
)
try:
data_object = finder.find_one(matcher_stack)
except NoMatchesError as e:
warning = ('doxygen%s: Cannot find %s "%s" in doxygen xml output for project "%s" from directory: %s'
% (self.kind, self.kind, name, project_info.name(), project_info.path()))
return [docutils.nodes.warning("", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning(warning, line=self.lineno)]
target_handler = self.target_handler_factory.create(self.options, project_info, self.state.document)
filter_ = self.filter_factory.create_class_filter(self.options)
renderer_factory_creator = self.renderer_factory_creator_constructor.create_factory_creator(
project_info,
self.state.document,
self.options,
)
renderer_factory = renderer_factory_creator.create_factory(
data_object,
self.state,
self.state.document,
filter_,
target_handler,
)
object_renderer = renderer_factory.create_renderer(self.root_data_object, data_object)
node_list = object_renderer.render()
return node_list
class DoxygenFileDirective(BaseDirective):
kind = "file"
required_arguments = 1
optional_arguments = 1
option_spec = {
"path": unchanged_required,
"project": unchanged_required,
"no-link": flag,
}
has_content = False
def run(self):
name = self.arguments[0]
try:
project_info = self.project_info_factory.create_project_info(self.options)
except NoDefaultProjectError as e:
warning = 'doxygenfile: %s' % e
return [docutils.nodes.warning("", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning(warning, line=self.lineno)]
finder = self.finder_factory.create_finder(project_info)
finder_filter = self.filter_factory.create_file_finder_filter(name)
matches = []
finder.filter_(finder_filter, matches)
if len(matches) > 1:
warning = ('doxygenfile: Found multiple matches for file "%s" in doxygen xml output for project "%s" '
'from directory: %s' % (name, project_info.name(), project_info.path()))
return [docutils.nodes.warning("", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning(warning, line=self.lineno)]
elif not matches:
warning = ('doxygenfile: Cannot find file "%s" in doxygen xml output for project "%s" from directory: %s'
% (name, project_info.name(), project_info.path()))
return [docutils.nodes.warning("", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning(warning, line=self.lineno)]
target_handler = self.target_handler_factory.create(self.options, project_info, self.state.document)
filter_ = self.filter_factory.create_file_filter(name, self.options)
renderer_factory_creator = self.renderer_factory_creator_constructor.create_factory_creator(
project_info,
self.state.document,
self.options,
)
node_list = []
for data_object in matches:
renderer_factory = renderer_factory_creator.create_factory(
data_object,
self.state,
self.state.document,
filter_,
target_handler,
)
object_renderer = renderer_factory.create_renderer(self.root_data_object, data_object)
node_list.extend(object_renderer.render())
return node_list
class DoxygenBaseDirective(BaseDirective):
required_arguments = 1
optional_arguments = 1
option_spec = {
"path": unchanged_required,
"project": unchanged_required,
"outline": flag,
"no-link": flag,
}
has_content = False
def run(self):
try:
namespace, name = self.arguments[0].rsplit("::", 1)
except ValueError:
namespace, name = "", self.arguments[0]
try:
project_info = self.project_info_factory.create_project_info(self.options)
except NoDefaultProjectError as e:
warning = 'doxygen%s: %s' % (self.kind, e)
return [docutils.nodes.warning("", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning(warning, line=self.lineno)]
finder = self.finder_factory.create_finder(project_info)
matcher_stack = self.create_matcher_stack(namespace, name)
try:
data_object = finder.find_one(matcher_stack)
except NoMatchesError as e:
display_name = "%s::%s" % (namespace, name) if namespace else name
warning = ('doxygen%s: Cannot find %s "%s" in doxygen xml output for project "%s" from directory: %s'
% (self.kind, self.kind, display_name, project_info.name(), project_info.path()))
return [docutils.nodes.warning("", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning(warning, line=self.lineno)]
target_handler = self.target_handler_factory.create(self.options, project_info, self.state.document)
filter_ = self.filter_factory.create_outline_filter(self.options)
renderer_factory_creator = self.renderer_factory_creator_constructor.create_factory_creator(
project_info,
self.state.document,
self.options,
)
renderer_factory = renderer_factory_creator.create_factory(
data_object,
self.state,
self.state.document,
filter_,
target_handler,
)
object_renderer = renderer_factory.create_renderer(self.root_data_object, data_object)
node_list = object_renderer.render()
return node_list
class DoxygenStructDirective(DoxygenBaseDirective):
kind = "struct"
def create_matcher_stack(self, namespace, name):
# Structs are stored in the xml file with their fully namespaced name
# We're using C++ namespaces here, it might be best to make this file
# type dependent
#
xml_name = "%s::%s" % (namespace, name) if namespace else name
return self.matcher_factory.create_matcher_stack(
{
"compound": self.matcher_factory.create_name_type_matcher(xml_name, self.kind)
},
"compound"
)
# This class was the same as the DoxygenBaseDirective above, except that it
# wraps the output in a definition_list before passing it back. This should be
# abstracted in a far nicely way to avoid repeating so much code
#
# Now we're removed the definition_list wrap so we really need to refactor this!
class DoxygenBaseItemDirective(BaseDirective):
required_arguments = 1
optional_arguments = 1
option_spec = {
"path": unchanged_required,
"project": unchanged_required,
"outline": flag,
"no-link": flag,
}
has_content = False
def run(self):
try:
namespace, name = self.arguments[0].rsplit("::", 1)
except ValueError:
namespace, name = "", self.arguments[0]
try:
project_info = self.project_info_factory.create_project_info(self.options)
except NoDefaultProjectError as e:
warning = 'doxygen%s: %s' % (self.kind, e)
return [docutils.nodes.warning("", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning(warning, line=self.lineno)]
finder = self.finder_factory.create_finder(project_info)
matcher_stack = self.create_matcher_stack(namespace, name)
try:
data_object = finder.find_one(matcher_stack)
except NoMatchesError as e:
display_name = "%s::%s" % (namespace, name) if namespace else name
warning = ('doxygen%s: Cannot find %s "%s" in doxygen xml output for project "%s" from directory: %s'
% (self.kind, self.kind, display_name, project_info.name(), project_info.path()))
return [docutils.nodes.warning("", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning(warning, line=self.lineno)]
target_handler = self.target_handler_factory.create(self.options, project_info, self.state.document)
filter_ = self.filter_factory.create_outline_filter(self.options)
renderer_factory_creator = self.renderer_factory_creator_constructor.create_factory_creator(
project_info,
self.state.document,
self.options,
)
renderer_factory = renderer_factory_creator.create_factory(
data_object,
self.state,
self.state.document,
filter_,
target_handler,
)
object_renderer = renderer_factory.create_renderer(self.root_data_object, data_object)
node_list = object_renderer.render()
return node_list
class DoxygenVariableDirective(DoxygenBaseItemDirective):
kind = "variable"
def create_matcher_stack(self, namespace, name):
return self.matcher_factory.create_matcher_stack(
{
"compound": self.matcher_factory.create_name_matcher(namespace),
"member": self.matcher_factory.create_name_type_matcher(name, self.kind)
},
"member"
)
class DoxygenDefineDirective(DoxygenBaseItemDirective):
kind = "define"
def create_matcher_stack(self, namespace, name):
return self.matcher_factory.create_matcher_stack(
{
"compound": self.matcher_factory.create_name_matcher(namespace),
"member": self.matcher_factory.create_name_type_matcher(name, self.kind)
},
"member"
)
class DoxygenEnumDirective(DoxygenBaseItemDirective):
kind = "enum"
def create_matcher_stack(self, namespace, name):
return self.matcher_factory.create_matcher_stack(
{
"compound": self.matcher_factory.create_name_matcher(namespace),
"member": self.matcher_factory.create_name_type_matcher(name, self.kind)
},
"member"
)
class DoxygenTypedefDirective(DoxygenBaseItemDirective):
kind = "typedef"
def create_matcher_stack(self, namespace, name):
return self.matcher_factory.create_matcher_stack(
{
"compound": self.matcher_factory.create_name_matcher(namespace),
"member": self.matcher_factory.create_name_type_matcher(name, self.kind)
},
"member"
)
# Setup Administration
# --------------------
class DirectiveContainer(object):
def __init__(
self,
directive,
*args
):
self.directive = directive
self.args = args
# Required for sphinx to inspect
self.required_arguments = directive.required_arguments
self.optional_arguments = directive.optional_arguments
self.option_spec = directive.option_spec
self.has_content = directive.has_content
self.final_argument_whitespace = directive.final_argument_whitespace
def __call__(self, *args):
call_args = []
call_args.extend(self.args)
call_args.extend(args)
return self.directive(*call_args)
class ProjectInfo(object):
def __init__(self, name, path, reference, domain_by_extension, domain_by_file_pattern, match):
self._name = name
self._path = path
self._reference = reference
self._domain_by_extension = domain_by_extension
self._domain_by_file_pattern = domain_by_file_pattern
self._match = match
def name(self):
return self._name
def path(self):
return self._path
def reference(self):
return self._reference
def domain_for_file(self, file_):
domain = ""
extension = file_.split(".")[-1]
try:
domain = self._domain_by_extension[extension]
except KeyError:
pass
for pattern, pattern_domain in self._domain_by_file_pattern.items():
if self._match(file_, pattern):
domain = pattern_domain
return domain
class ProjectInfoFactory(object):
def __init__(self, match):
self.match = match
self.projects = {}
self.default_project = None
self.domain_by_extension = {}
self.domain_by_file_pattern = {}
self.project_count = 0
self.project_info_store = {}
def update(
self,
projects,
default_project,
domain_by_extension,
domain_by_file_pattern,
):
self.projects = projects
self.default_project = default_project
self.domain_by_extension = domain_by_extension
self.domain_by_file_pattern = domain_by_file_pattern
def default_path(self):
if not self.default_project:
raise NoDefaultProjectError(
"No breathe_default_project config setting to fall back on "
"for directive with no 'project' or 'path' specified."
)
return self.projects[self.default_project]
def create_project_info(self, options):
name = ""
if "project" in options:
try:
path = self.projects[options["project"]]
name = options["project"]
except KeyError as e:
sys.stderr.write(
"Unable to find project '%s' in breathe_projects dictionary" % options["project"]
)
elif "path" in options:
path = options["path"]
else:
path = self.default_path()
try:
return self.project_info_store[path]
except KeyError:
reference = name
if not name:
name = "project%s" % self.project_count
reference = path
self.project_count += 1
project_info = ProjectInfo(
name,
path,
reference,
self.domain_by_extension,
self.domain_by_file_pattern,
self.match
)
self.project_info_store[path] = project_info
return project_info
class DoxygenDirectiveFactory(object):
directives = {
"doxygenindex": DoxygenIndexDirective,
"doxygenfunction": DoxygenFunctionDirective,
"doxygenstruct": DoxygenStructDirective,
"doxygenclass": DoxygenClassDirective,
"doxygenvariable": DoxygenVariableDirective,
"doxygendefine": DoxygenDefineDirective,
"doxygenenum": DoxygenEnumDirective,
"doxygentypedef": DoxygenTypedefDirective,
"doxygenfile": DoxygenFileDirective,
}
def __init__(
self,
root_data_object,
renderer_factory_creator_constructor,
finder_factory,
matcher_factory,
project_info_factory,
filter_factory,
target_handler_factory
):
self.root_data_object = root_data_object
self.renderer_factory_creator_constructor = renderer_factory_creator_constructor
self.finder_factory = finder_factory
self.matcher_factory = matcher_factory
self.project_info_factory = project_info_factory
self.filter_factory = filter_factory
self.target_handler_factory = target_handler_factory
def create_index_directive_container(self):
return self.create_directive_container("doxygenindex")
def create_function_directive_container(self):
return self.create_directive_container("doxygenfunction")
def create_struct_directive_container(self):
return self.create_directive_container("doxygenstruct")
def create_enum_directive_container(self):
return self.create_directive_container("doxygenenum")
def create_typedef_directive_container(self):
return self.create_directive_container("doxygentypedef")
def create_class_directive_container(self):
return self.create_directive_container("doxygenclass")
def create_file_directive_container(self):
return self.create_directive_container("doxygenfile")
def create_variable_directive_container(self):
return self.create_directive_container("doxygenvariable")
def create_define_directive_container(self):
return self.create_directive_container("doxygendefine")
def create_directive_container(self, type_):
return DirectiveContainer(
self.directives[type_],
self.root_data_object,
self.renderer_factory_creator_constructor,
self.finder_factory,
self.matcher_factory,
self.project_info_factory,
self.filter_factory,
self.target_handler_factory
)
def get_config_values(self, app):
# All DirectiveContainers maintain references to this project info factory
# so we can update this to update them
self.project_info_factory.update(
app.config.breathe_projects,
app.config.breathe_default_project,
app.config.breathe_domain_by_extension,
app.config.breathe_domain_by_file_pattern,
)
class NodeFactory(object):
def __init__(self, *args):
self.sources = args
def __getattr__(self, node_name):
for source in self.sources:
try:
return getattr(source, node_name)
except AttributeError:
pass
raise NodeNotFoundError(node_name)
class RootDataObject(object):
node_type = "root"
class PathHandler(object):
def __init__(self, sep, basename, join):
self.sep = sep
self.basename = basename
self.join = join
def includes_directory(self, file_path):
return bool( file_path.count( self.sep ) )
class MTimer(object):
def __init__(self, getmtime):
self.getmtime = getmtime
def get_mtime(self, filename):
return self.getmtime(filename)
class FileStateCache(object):
"""
Stores the modified time of the various doxygen xml files against the
reStructuredText file that they are referenced from so that we know which
reStructuredText files to rebuild if the doxygen xml is modified.
We store the information in the environment object so that it is pickled
down and stored between builds as Sphinx is designed to do.
"""
def __init__(self, mtimer, app):
self.app = app
self.mtimer = mtimer
def update(self, source_file):
if not hasattr( self.app.env, "breathe_file_state" ):
self.app.env.breathe_file_state = {}
new_mtime = self.mtimer.get_mtime(source_file)
mtime, docnames = self.app.env.breathe_file_state.setdefault(source_file, (new_mtime, set()))
docnames.add(self.app.env.docname)
self.app.env.breathe_file_state[source_file] = (new_mtime, docnames)
def get_outdated(self, app, env, added, changed, removed):
if not hasattr( self.app.env, "breathe_file_state" ):
return []
stale = []
for filename, info in self.app.env.breathe_file_state.items():
old_mtime, docnames = info
if self.mtimer.get_mtime(filename) > old_mtime:
stale.extend(docnames)
return list(set(stale).difference(removed))
def purge_doc(self, app, env, docname):
if not hasattr( self.app.env, "breathe_file_state" ):
return
toremove = []
for filename, info in self.app.env.breathe_file_state.items():
_, docnames = info
docnames.discard(docname)
if not docnames:
toremove.append(filename)
for filename in toremove:
del self.app.env.breathe_file_state[filename]
# Setup
# -----
def setup(app):
cache_factory = CacheFactory()
cache = cache_factory.create_cache()
path_handler = PathHandler(os.sep, os.path.basename, os.path.join)
mtimer = MTimer(os.path.getmtime)
file_state_cache = FileStateCache(mtimer, app)
parser_factory = DoxygenParserFactory(cache, path_handler, file_state_cache)
matcher_factory = ItemMatcherFactory()
item_finder_factory_creator = DoxygenItemFinderFactoryCreator(parser_factory, matcher_factory)
index_parser = parser_factory.create_index_parser()
finder_factory = FinderFactory(index_parser, item_finder_factory_creator)
# Create a math_nodes object with a displaymath member for the displaymath
# node so that we can treat it in the same way as the nodes & addnodes
# modules in the NodeFactory
math_nodes = collections.namedtuple("MathNodes", ["displaymath"])
math_nodes.displaymath = sphinx.ext.mathbase.displaymath
node_factory = NodeFactory(docutils.nodes, sphinx.addnodes, math_nodes)
cpp_domain_helper = CppDomainHelper(DefinitionParser, re.sub)
c_domain_helper = CDomainHelper()
domain_helpers = {"c": c_domain_helper, "cpp": cpp_domain_helper}
domain_handler_factory_creator = DomainHandlerFactoryCreator(node_factory, domain_helpers)
rst_content_creator = RstContentCreator(ViewList, textwrap.dedent)
default_domain_handler = NullDomainHandler()
renderer_factory_creator_constructor = DoxygenToRstRendererFactoryCreatorConstructor(
node_factory,
parser_factory,
default_domain_handler,
domain_handler_factory_creator,
rst_content_creator
)
project_info_factory = ProjectInfoFactory(fnmatch.fnmatch)
glob_factory = GlobFactory(fnmatch.fnmatch)
filter_factory = FilterFactory(glob_factory, path_handler)
target_handler_factory = TargetHandlerFactory(node_factory)
root_data_object = RootDataObject()
directive_factory = DoxygenDirectiveFactory(
root_data_object,
renderer_factory_creator_constructor,
finder_factory,
matcher_factory,
project_info_factory,
filter_factory,
target_handler_factory
)
app.add_directive(
"doxygenindex",
directive_factory.create_index_directive_container(),
)
app.add_directive(
"doxygenfunction",
directive_factory.create_function_directive_container(),
)
app.add_directive(
"doxygenstruct",
directive_factory.create_struct_directive_container(),
)
app.add_directive(
"doxygenenum",
directive_factory.create_enum_directive_container(),
)
app.add_directive(
"doxygentypedef",
directive_factory.create_typedef_directive_container(),
)
app.add_directive(
"doxygenclass",
directive_factory.create_class_directive_container(),
)
app.add_directive(
"doxygenfile",
directive_factory.create_file_directive_container(),
)
app.add_directive(
"doxygenvariable",
directive_factory.create_variable_directive_container(),
)
app.add_directive(
"doxygendefine",
directive_factory.create_define_directive_container(),
)
app.add_config_value("breathe_projects", {}, True)
app.add_config_value("breathe_default_project", "", True)
app.add_config_value("breathe_domain_by_extension", {}, True)
app.add_config_value("breathe_domain_by_file_pattern", {}, True)
app.add_stylesheet("breathe.css")
app.connect("builder-inited", directive_factory.get_config_values)
app.connect("env-get-outdated", file_state_cache.get_outdated)
app.connect("env-purge-doc", file_state_cache.purge_doc)
| bsd-2-clause | -5,710,829,884,436,303,000 | 33.527221 | 139 | 0.602379 | false | 4.328823 | false | false | false |
qqalexqq/monkeys | test/test_views.py | 1 | 8060 | from hamcrest import *
from models import Monkey as M
from test_models import create_monkeys
def test_view_monkey_list(client, session):
monkey_ginger, monkey_john, monkey_melissa = create_monkeys(session)
monkey_john.add_friend(monkey_melissa)
session.commit()
request = client.get('/')
assert_that(request.status_code, equal_to(200))
for monkey in (monkey_ginger, monkey_john, monkey_melissa):
assert_that(request.data, contains_string(monkey.name))
assert_that(request.data, contains_string(str(monkey.friends_count)))
request = client.get('/?page={0}'.format(100), follow_redirects=True)
assert_that(request.status_code, equal_to(200))
for monkey in (monkey_ginger, monkey_john, monkey_melissa):
assert_that(request.data, contains_string(monkey.name))
assert_that(request.data, contains_string(str(monkey.friends_count)))
def test_view_monkey(client, session):
monkey_ginger, monkey_john, monkey_melissa = create_monkeys(session)
monkey_ginger.set_best_friend(monkey_melissa)
session.commit()
request = client.get('/monkey/{0}'.format(monkey_ginger.id))
assert_that(
request.data,
contains_string('[email protected]')
)
assert_that(
request.data,
contains_string('Melissa')
)
def test_add_monkey(client, session):
request = client.get('/monkey/add')
assert_that(
request.data,
contains_string('Add monkey')
)
data = dict(name='John', age=2, email='[email protected]')
request = client.post('/monkey/add', data=data, follow_redirects=True)
assert_that(request.status_code, equal_to(200))
monkey = M.query.filter(M.email == '[email protected]').one()
assert_that(monkey.name, equal_to('John'))
assert_that(monkey.email, equal_to('[email protected]'))
assert_that(monkey.age, equal_to(2))
data = dict(name='John', age='not_an_age', email='[email protected]')
request = client.post('/monkey/add', data=data, follow_redirects=True)
assert_that(request.status_code, equal_to(200))
assert_that(request.data, contains_string('Not a valid integer value'))
def test_edit_monkey(client, session):
data = dict(name='Melissa', age=19, email='[email protected]')
monkey = M(**data)
session.add(monkey)
session.commit()
request = client.get('/monkey/{0}/edit'.format(monkey.id))
assert_that(
request.data,
contains_string('Edit monkey')
)
assert_that(
request.data,
contains_string('[email protected]')
)
data['age'] = 20
request = client.post(
'/monkey/{0}/edit'.format(monkey.id),
data=data, follow_redirects=True
)
assert_that(request.status_code, equal_to(200))
assert_that(request.data, contains_string('Melissa'))
assert_that(request.data, contains_string('[email protected]'))
assert_that(request.data, contains_string('20'))
data['email'] = 123
request = client.post(
'/monkey/{0}/edit'.format(monkey.id),
data=data, follow_redirects=True
)
assert_that(request.status_code, equal_to(200))
assert_that(request.data, contains_string('Invalid email address'))
def test_delete_monkey(client, session):
monkey = M(name='John', age=2, email='[email protected]')
session.add(monkey)
session.commit()
request = client.get('/monkey/{0}/delete'.format(monkey.id))
assert_that(
request.data,
contains_string('Monkey to be deleted:')
)
assert_that(
request.data,
contains_string('[email protected]')
)
def test_delete_monkey_confirm(client, session):
monkey = M(name='Melissa', age=19, email='[email protected]')
session.add(monkey)
session.commit()
request = client.post(
'/monkey/{0}/delete'.format(monkey.id), follow_redirects=True
)
assert_that(request.status_code, equal_to(200))
assert_that(
request.data,
contains_string('{0} was succesfully deleted.'.format(monkey.name))
)
request = client.get(
'/monkey/{0}/delete/confirm'.format(-1), follow_redirects=True
)
assert_that(request.status_code, equal_to(404))
def test_view_friend_list(client, session):
monkey_ginger, monkey_john, monkey_melissa = create_monkeys(session)
monkey_john.add_friend(monkey_melissa)
session.commit()
request = client.get('/friend/{0}'.format(monkey_john.id))
assert_that(request.status_code, equal_to(200))
assert_that(request.data, contains_string(monkey_melissa.name))
assert_that(request.data, contains_string(str(monkey_melissa.age)))
assert_that(request.data, contains_string(monkey_melissa.email))
request = client.get(
'/friend/{0}?page={1}'.format(monkey_john.id, 100),
follow_redirects=True
)
assert_that(request.status_code, equal_to(200))
assert_that(request.data, contains_string(monkey_melissa.name))
assert_that(request.data, contains_string(str(monkey_melissa.age)))
assert_that(request.data, contains_string(monkey_melissa.email))
def test_view_add_friend(client, session):
monkey_ginger, monkey_john, monkey_melissa = create_monkeys(session)
request = client.get('/friend/{0}/add'.format(monkey_melissa.id))
assert_that(request.status_code, equal_to(200))
for monkey in (monkey_ginger, monkey_john):
assert_that(request.data, contains_string(monkey.name))
assert_that(request.data, contains_string(str(monkey.age)))
assert_that(request.data, contains_string(monkey.email))
request = client.get(
'/friend/{0}/add?page={0}'.format(monkey_melissa.id, 100),
follow_redirects=True
)
assert_that(request.status_code, equal_to(200))
for monkey in (monkey_ginger, monkey_john):
assert_that(request.data, contains_string(monkey.name))
assert_that(request.data, contains_string(str(monkey.age)))
assert_that(request.data, contains_string(monkey.email))
def test_add_friend(client, session):
monkey_ginger, monkey_john, monkey_melissa = create_monkeys(session)
request = client.post(
'/friend/{0}/add/{1}'.format(monkey_melissa.id, monkey_john.id),
follow_redirects=True
)
assert_that(request.status_code, equal_to(200))
assert_that(request.data, contains_string(
'{0} added to monkey {1} friends.'
.format(monkey_john.name, monkey_melissa.name)
))
def test_delete_friend(client, session):
monkey_ginger, monkey_john, monkey_melissa = create_monkeys(session)
monkey_ginger.add_friend(monkey_melissa)
session.commit()
request = client.post(
'/friend/{0}/delete/{1}'.format(monkey_ginger.id, monkey_melissa.id),
follow_redirects=True
)
assert_that(request.status_code, equal_to(200))
assert_that(request.data, contains_string(
'{0} deleted from monkey {1} friends.'
.format(monkey_melissa.name, monkey_ginger.name)
))
def test_set_best_friend(client, session):
monkey_ginger, monkey_john, monkey_melissa = create_monkeys(session)
request = client.post(
'/best_friend/{0}/set/{1}'.format(monkey_melissa.id, monkey_john.id),
follow_redirects=True
)
assert_that(request.status_code, equal_to(200))
assert_that(request.data, contains_string(
'Best friend {0} set for monkey {1}.'
.format(monkey_john.name, monkey_melissa.name)
))
def test_unset_best_friend(client, session):
monkey_ginger, monkey_john, monkey_melissa = create_monkeys(session)
monkey_melissa.set_best_friend(monkey_john)
session.commit()
request = client.post(
'/best_friend/{0}/unset/{1}'.format(monkey_melissa.id, monkey_john.id),
follow_redirects=True
)
assert_that(request.status_code, equal_to(200))
assert_that(request.data, contains_string(
'Best friend {0} unset for monkey {1}.'
.format(monkey_john.name, monkey_melissa.name)
))
| mit | 575,900,874,263,612,540 | 28.962825 | 79 | 0.663151 | false | 3.34024 | true | false | false |
google-code-export/django-hotclub | libs/external_libs/gdata.py-1.0.13/src/gdata/docs/service.py | 10 | 10786 | #!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DocsService extends the GDataService to streamline Google Documents
operations.
DocsService: Provides methods to query feeds and manipulate items.
Extends GDataService.
DocumentQuery: Queries a Google Document list feed.
"""
__author__ = 'api.jfisher (Jeff Fisher)'
import urllib
import atom
import gdata.service
import gdata.docs
# XML Namespaces used in Google Documents entities.
DATA_KIND_SCHEME = 'http://schemas.google.com/g/2005#kind'
DOCUMENT_KIND_TERM = 'http://schemas.google.com/docs/2007#document'
SPREADSHEET_KIND_TERM = 'http://schemas.google.com/docs/2007#spreadsheet'
PRESENTATION_KIND_TERM = 'http://schemas.google.com/docs/2007#presentation'
# File extensions of documents that are permitted to be uploaded.
SUPPORTED_FILETYPES = {
'CSV': 'text/csv',
'TSV': 'text/tab-separated-values',
'TAB': 'text/tab-separated-values',
'DOC': 'application/msword',
'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet',
'ODT': 'application/vnd.oasis.opendocument.text',
'RTF': 'application/rtf',
'SXW': 'application/vnd.sun.xml.writer',
'TXT': 'text/plain',
'XLS': 'application/vnd.ms-excel',
'PPT': 'application/vnd.ms-powerpoint',
'PPS': 'application/vnd.ms-powerpoint',
'HTM': 'text/html',
'HTML' : 'text/html'}
class DocsService(gdata.service.GDataService):
"""Client extension for the Google Documents service Document List feed."""
def __init__(self, email=None, password=None, source=None,
server='docs.google.com', additional_headers=None):
"""Constructor for the DocsService.
Args:
email: string (optional) The e-mail address of the account to use for
authentication.
password: string (optional) The password of the account to use for
authentication.
source: string (optional) The name of the user's application.
server: string (optional) The server the feed is hosted on.
additional_headers: dict (optional) Any additional HTTP headers to be
transmitted to the service in the form of key-value
pairs.
Yields:
A DocsService object used to communicate with the Google Documents
service.
"""
gdata.service.GDataService.__init__(self, email=email, password=password,
service='writely', source=source,
server=server,
additional_headers=additional_headers)
def Query(self, uri, converter=gdata.docs.DocumentListFeedFromString):
"""Queries the Document List feed and returns the resulting feed of
entries.
Args:
uri: string The full URI to be queried. This can contain query
parameters, a hostname, or simply the relative path to a Document
List feed. The DocumentQuery object is useful when constructing
query parameters.
converter: func (optional) A function which will be executed on the
retrieved item, generally to render it into a Python object.
By default the DocumentListFeedFromString function is used to
return a DocumentListFeed object. This is because most feed
queries will result in a feed and not a single entry.
"""
return self.Get(uri, converter=converter)
def QueryDocumentListFeed(self, uri):
"""Retrieves a DocumentListFeed by retrieving a URI based off the Document
List feed, including any query parameters. A DocumentQuery object can
be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
A DocumentListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.docs.DocumentListFeedFromString)
def GetDocumentListEntry(self, uri):
"""Retrieves a particular DocumentListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in a Document List feed.
Returns:
A DocumentListEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.docs.DocumentListEntryFromString)
def GetDocumentListFeed(self):
"""Retrieves a feed containing all of a user's documents."""
q = gdata.docs.service.DocumentQuery();
return self.QueryDocumentListFeed(q.ToUri())
def UploadPresentation(self, media_source, title):
"""Uploads a presentation inside of a MediaSource object to the Document
List feed with the given title.
Args:
media_source: MediaSource The MediaSource object containing a
presentation file to be uploaded.
title: string The title of the presentation on the server after being
uploaded.
Returns:
A GDataEntry containing information about the presentation created on the
Google Documents service.
"""
category = atom.Category(scheme=DATA_KIND_SCHEME,
term=PRESENTATION_KIND_TERM)
return self._UploadFile(media_source, title, category)
def UploadSpreadsheet(self, media_source, title):
"""Uploads a spreadsheet inside of a MediaSource object to the Document
List feed with the given title.
Args:
media_source: MediaSource The MediaSource object containing a spreadsheet
file to be uploaded.
title: string The title of the spreadsheet on the server after being
uploaded.
Returns:
A GDataEntry containing information about the spreadsheet created on the
Google Documents service.
"""
category = atom.Category(scheme=DATA_KIND_SCHEME,
term=SPREADSHEET_KIND_TERM)
return self._UploadFile(media_source, title, category)
def UploadDocument(self, media_source, title):
"""Uploads a document inside of a MediaSource object to the Document List
feed with the given title.
Args:
media_source: MediaSource The gdata.MediaSource object containing a
document file to be uploaded.
title: string The title of the document on the server after being
uploaded.
Returns:
A GDataEntry containing information about the document created on the
Google Documents service.
"""
category = atom.Category(scheme=DATA_KIND_SCHEME,
term=DOCUMENT_KIND_TERM)
return self._UploadFile(media_source, title, category)
def _UploadFile(self, media_source, title, category):
"""Uploads a file to the Document List feed.
Args:
media_source: A gdata.MediaSource object containing the file to be
uploaded.
title: string The title of the document on the server after being
uploaded.
category: An atom.Category object specifying the appropriate document
type
Returns:
A GDataEntry containing information about the document created on
the Google Documents service.
"""
media_entry = gdata.GDataEntry()
media_entry.title = atom.Title(text=title)
media_entry.category.append(category)
media_entry = self.Post(media_entry, '/feeds/documents/private/full',
media_source = media_source,
extra_headers = {'Slug' : media_source.file_name })
return media_entry
class DocumentQuery(gdata.service.Query):
"""Object used to construct a URI to query the Google Document List feed"""
def __init__(self, feed='/feeds/documents', visibility='private',
projection='full', text_query=None, params=None,
categories=None):
"""Constructor for Document List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/documents')
visibility: string (optional) The visibility chosen for the current feed.
projection: string (optional) The projection chosen for the current feed.
text_query: string (optional) The contents of the q query parameter. This
string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.visibility = visibility
self.projection = projection
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Document
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed, self.visibility, self.projection])
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
return new_feed
def AddNamedFolder(self, email, folder_name):
"""Adds a named folder category, qualified by a schema.
This function lets you query for documents that are contained inside a
named folder without fear of collision with other categories.
Args:
email: string The email of the user who owns the folder.
folder_name: string The name of the folder.
Returns:
The string of the category that was added to the object.
"""
category = '{http://schemas.google.com/docs/2007/folders/'
category += email + '}' + folder_name
self.categories.append(category)
return category
def RemoveNamedFolder(self, email, folder_name):
"""Removes a named folder category, qualified by a schema.
Args:
email: string The email of the user who owns the folder.
folder_name: string The name of the folder.
Returns:
The string of the category that was removed to the object.
"""
category = '{http://schemas.google.com/docs/2007/folders/'
category += email + '}' + folder_name
self.categories.remove(category)
return category
| mit | 4,083,827,386,476,407,000 | 35.938356 | 79 | 0.683479 | false | 4.418681 | false | false | false |
ahealy19/F-IDE-2016 | benchexec/tools/cpachecker.py | 1 | 7896 | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import subprocess
import sys
import os
import re
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template
from benchexec.model import SOFTTIMELIMIT
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for CPAchecker.
It has additional features such as building CPAchecker before running it
if executed within a source checkout.
It also supports extracting data from the statistics output of CPAchecker
for adding it to the result tables.
"""
REQUIRED_PATHS = [
"lib/java/runtime",
"lib/*.jar",
"lib/native/x86_64-linux",
"scripts",
"cpachecker.jar",
"config",
]
def executable(self):
executable = util.find_executable('cpa.sh', 'scripts/cpa.sh')
executableDir = os.path.join(os.path.dirname(executable), os.path.pardir)
if os.path.isdir(os.path.join(executableDir, 'src')):
self._buildCPAchecker(executableDir)
if not os.path.isfile(os.path.join(executableDir, "cpachecker.jar")):
logging.warning("Required JAR file for CPAchecker not found in {0}.".format(executableDir))
return executable
def program_files(self, executable):
installDir = os.path.join(os.path.dirname(executable), os.path.pardir)
return util.flatten(util.expand_filename_pattern(path, installDir) for path in self.REQUIRED_PATHS)
def _buildCPAchecker(self, executableDir):
logging.debug('Building CPAchecker in directory {0}.'.format(executableDir))
ant = subprocess.Popen(['ant', '-lib', 'lib/java/build', '-q', 'jar'], cwd=executableDir, shell=util.is_windows())
ant.communicate()
if ant.returncode:
sys.exit('Failed to build CPAchecker, please fix the build first.')
def version(self, executable):
stdout = self._version_from_tool(executable, '-help')
line = next(l for l in stdout.splitlines() if l.startswith('CPAchecker'))
line = line.replace('CPAchecker' , '')
line = line.split('(')[0]
return line.strip()
def name(self):
return 'CPAchecker'
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
if SOFTTIMELIMIT in rlimits:
if "-timelimit" in options:
logging.warning('Time limit already specified in command-line options, not adding time limit from benchmark definition to the command line.')
else:
options = options + ["-timelimit", str(rlimits[SOFTTIMELIMIT]) + "s"] # benchmark-xml uses seconds as unit
# if data.MEMLIMIT in rlimits:
# if "-heap" not in options:
# heapsize = rlimits[MEMLIMIT]*0.8 # 20% overhead for non-java-memory
# options = options + ["-heap", str(int(heapsize))]
if ("-stats" not in options):
options = options + ["-stats"]
spec = ["-spec", propertyfile] if propertyfile is not None else []
return [executable] + options + spec + tasks
def determine_result(self, returncode, returnsignal, output, isTimeout):
"""
@param returncode: code returned by CPAchecker
@param returnsignal: signal, which terminated CPAchecker
@param output: the output of CPAchecker
@return: status of CPAchecker after executing a run
"""
def isOutOfNativeMemory(line):
return ('std::bad_alloc' in line # C++ out of memory exception (MathSAT)
or 'Cannot allocate memory' in line
or 'Native memory allocation (malloc) failed to allocate' in line # JNI
or line.startswith('out of memory') # CuDD
)
status = None
for line in output:
if 'java.lang.OutOfMemoryError' in line:
status = 'OUT OF JAVA MEMORY'
elif isOutOfNativeMemory(line):
status = 'OUT OF NATIVE MEMORY'
elif 'There is insufficient memory for the Java Runtime Environment to continue.' in line \
or 'cannot allocate memory for thread-local data: ABORT' in line:
status = 'OUT OF MEMORY'
elif 'SIGSEGV' in line:
status = 'SEGMENTATION FAULT'
elif (returncode == 0 or returncode == 1) and 'java.lang.AssertionError' in line:
status = 'ASSERTION'
elif ((returncode == 0 or returncode == 1)
and ('Exception:' in line or line.startswith('Exception in thread'))
and not line.startswith('cbmc')): # ignore "cbmc error output: ... Minisat::OutOfMemoryException"
status = 'EXCEPTION'
elif 'Could not reserve enough space for object heap' in line:
status = 'JAVA HEAP ERROR'
elif line.startswith('Error: ') and not status:
status = result.RESULT_ERROR
if 'Unsupported' in line:
if 'recursion' in line:
status += ' (recursion)'
elif 'threads' in line:
status += ' (threads)'
elif 'Parsing failed' in line:
status += ' (parsing failed)'
elif line.startswith('For your information: CPAchecker is currently hanging at') and not status and isTimeout:
status = 'TIMEOUT'
elif line.startswith('Verification result: '):
line = line[21:].strip()
if line.startswith('TRUE'):
newStatus = result.RESULT_TRUE_PROP
elif line.startswith('FALSE'):
newStatus = result.RESULT_FALSE_REACH
match = re.match('.* Property violation \(([^:]*)(:.*)?\) found by chosen configuration.*', line)
if match and match.group(1) in ['valid-deref', 'valid-free', 'valid-memtrack', 'no-overflow']:
newStatus = result.STR_FALSE + '(' + match.group(1) + ')'
else:
newStatus = result.RESULT_UNKNOWN
if not status:
status = newStatus
elif newStatus != result.RESULT_UNKNOWN:
status = "{0} ({1})".format(status, newStatus)
if not status:
status = result.RESULT_ERROR
return status
def get_value_from_output(self, lines, identifier):
# search for the text in output and get its value,
# stop after the first line, that contains the searched text
for line in lines:
if identifier in line:
startPosition = line.find(':') + 1
endPosition = line.find('(', startPosition) # bracket maybe not found -> (-1)
if (endPosition == -1):
return line[startPosition:].strip()
else:
return line[startPosition: endPosition].strip()
return None
| apache-2.0 | 7,328,689,529,093,601,000 | 41.451613 | 157 | 0.596631 | false | 4.435955 | false | false | false |
natea/django-lfc | lfc/utils/__init__.py | 1 | 8891 | # python imports
import datetime
import urllib
import sys
# django settings
from django.conf import settings
from django.contrib.auth import SESSION_KEY
from django.contrib.auth import BACKEND_SESSION_KEY
from django.contrib.auth import load_backend
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.utils import simplejson
from django.utils.functional import Promise
from django.utils.encoding import force_unicode
from django.utils import translation
# lfc imports
import lfc.models
class HttpJsonResponse(HttpResponse):
def __init__(self, content, mimetype=None, status=None, content_type=None, **kwargs):
if mimetype is None:
mimetype = "application/json"
content = render_to_json(content, **kwargs)
HttpResponse.__init__(self, content=content,
mimetype=mimetype, status=status, content_type=content_type)
# TODO: Checkout Django's new message feature
class MessageHttpResponseRedirect(HttpResponseRedirect):
"""Specific HttpResponseRedirect to set a cookie with a message.
"""
def __init__(self, redirect_to, message):
HttpResponseRedirect.__init__(self, redirect_to)
# We just keep the message two seconds.
max_age = 2
expires = datetime.datetime.strftime(
datetime.datetime.utcnow() +
datetime.timedelta(seconds=max_age), "%a, %d-%b-%Y %H:%M:%S GMT")
self.set_cookie("message", lfc_quote(message), max_age=max_age, expires=expires)
def set_message_to_reponse(response, msg):
"""Sets message cookie with passed message to passed response.
"""
# We just keep the message two seconds.
max_age = 2
expires = datetime.datetime.strftime(
datetime.datetime.utcnow() +
datetime.timedelta(seconds=max_age), "%a, %d-%b-%Y %H:%M:%S GMT")
response.set_cookie("message", lfc_quote(msg), max_age=max_age, expires=expires)
return response
def render_to_json(html, **kwargs):
"""Renders given data to jsnon
"""
data = { "html" : html }
data.update(**kwargs)
return simplejson.dumps(data, cls = LazyEncoder)
def return_as_json(html, message):
"""
"""
return HttpResponse(get_json(html, message))
def get_json(html, message):
"""Returns html and message json encoded.
"""
return simplejson.dumps({ "html" : html, "message" : message, }, cls = LazyEncoder)
class LazyEncoder(simplejson.JSONEncoder):
"""JSONEncoder which encodes django's lazy i18n strings.
This is mainly used to return status messages along with content to ajax
calls.
"""
def default(self, obj):
if isinstance(obj, Promise):
return force_unicode(obj)
return obj
def get_content_object(request=None, *args, **kwargs):
"""Returns specific content object based on passed parameters.
This method should be used if one wants the specific content object
instead of the BaseContent object.
You can consider this as the equivalent to Django's get method.
"""
obj = lfc.models.BaseContent.objects.get(*args, **kwargs)
return obj.get_content_object()
def get_content_objects(request=None, *args, **kwargs):
"""Returns specific content objects based on passed parameters.
This method should be used if one wants the specific content object
instead of the BaseContent object.
Takes permissions of the current and start_date and end_date of object
into account.
You can consider this as the equivalent to Django's filter method.
"""
objs = lfc.models.BaseContent.objects.filter(*args, **kwargs)
parent = kwargs.get("parent")
if parent and parent.order_by:
objs = objs.order_by(parent.order_by)
result = []
if request is None or request.user.is_superuser:
for obj in objs:
obj = obj.get_content_object()
if lfc.utils.registration.get_info(obj):
result.append(obj)
else:
for obj in objs:
obj = obj.get_content_object()
if lfc.utils.registration.get_info(obj) and \
obj.has_permission(request.user, "view") and \
obj.is_active(request.user):
obj = obj.get_content_object()
result.append(obj)
return result
def get_portal(pk=1):
"""Returns the default portal.
"""
# CACHE
cache_key = "%s-portal-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, pk)
portal = cache.get(cache_key)
if portal:
return portal
# At the moment the default portal should always exist.
try:
portal = lfc.models.Portal.objects.get(pk=pk)
except lfc.models.Portal.DoesNotExist:
portal = lfc.models.Portal.objects.filter()[0]
cache.set(cache_key, portal)
return portal
def get_user_from_session_key(session_key):
"""Returns the user from the passes session_key.
This is a workaround for SWFUpload, which is used to mass upload images
and files.
"""
try:
session_engine = __import__(settings.SESSION_ENGINE, {}, {}, [''])
session_wrapper = session_engine.SessionStore(session_key)
user_id = session_wrapper.get(SESSION_KEY)
auth_backend = load_backend(session_wrapper.get(BACKEND_SESSION_KEY))
if user_id and auth_backend:
return auth_backend.get_user(user_id)
else:
return AnonymousUser()
except AttributeError:
return AnonymousUser()
def login_form(next=None):
"""Returns the lfc login form.
"""
if next:
url = "%s?next=%s" % (reverse("lfc_login"), next)
else:
url = reverse("lfc_login")
return HttpResponseRedirect(url)
def traverse_object(request, path):
"""Returns the the object with the given path.
"""
language = translation.get_language()
# CACHE
cache_key = "%s-traverse-obj-%s-%s-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX,
path, request.user.id, language)
obj = cache.get(cache_key)
if obj:
return obj
paths = path.split("/")
language = translation.get_language()
try:
obj = lfc.utils.get_content_object(request, slug=paths[0],
parent=None, language__in = ("0", language))
except lfc.models.BaseContent.DoesNotExist:
raise Http404
for path in paths[1:]:
try:
obj = obj.children.get(slug=path, language__in = ("0", obj.language)).get_content_object()
except lfc.models.BaseContent.DoesNotExist:
raise Http404
cache.set(cache_key, obj)
return obj
def clear_cache():
"""Clears the complete cache.
"""
# memcached
try:
cache._cache.flush_all()
except AttributeError:
pass
else:
return
try:
cache._cache.clear()
except AttributeError:
pass
try:
cache._expire_info.clear()
except AttributeError:
pass
def import_module(module):
"""Imports module with given dotted name.
"""
try:
module = sys.modules[module]
except KeyError:
__import__(module)
module = sys.modules[module]
return module
def getLOL(objects, objects_per_row=3):
"""Returns a list of list of given objects.
"""
result = []
row = []
for i, object in enumerate(objects):
row.append(object)
if (i+1) % objects_per_row == 0:
result.append(row)
row = []
if len(row) > 0:
result.append(row)
return result
def lfc_quote(string, encoding="utf-8"):
"""Encodes string to encoding before quoting.
"""
return urllib.quote(string.encode(encoding))
# TODO: Not used at the moment - what to do?
def get_related_pages_by_tags(page, num=None):
"""Returns a dict with related products by tags.
This is just a thin wrapper for the get_related method of the
TaggedItem manager of the tagging product in order to provide caching.
From the tagging product's doc string (mutatis mutantis):
Returns a list of products which share tags with the product with passed id
ordered by the number of shared tags in descending order.
See there for more.
"""
# CACHE
cache_key = "%s-related-page-by-tags-%s" % \
(settings.CACHE_MIDDLEWARE_KEY_PREFIX, page.id)
related_pages = cache.get(cache_key)
if related_pages is not None:
return {"related_pages" : related_pages}
# Create related pages
related_pages = TaggedItem.objects.get_related(page, Page, num)
# Save related pages to cache
cache.set(cache_key, related_pages)
return {"related_pages" : related_pages} | bsd-3-clause | -1,566,837,927,116,174,600 | 29.556701 | 102 | 0.647734 | false | 3.967425 | false | false | false |
Juzley/typingdefense | typingdefense/menu.py | 1 | 3132 | import ctypes
import numpy
import OpenGL.GL as GL
class MenuItem(object):
pass
class TextButtonItem(MenuItem):
def __init__(self, x, y, h, text):
self.x, self.y, self.h = (x, y, h)
self.text = text
self.selected = False
def draw(self):
pass
class MenuScreen(object):
def __init__(self, app, background=None, items=None):
self._background = background
if items is None:
self._items = []
else:
self._items = items
# Load the background image info.
if self._background:
# Load the shader program
self._bg_shader = app.resources.load_shader_program("ortho.vs",
"texture.fs")
# Set up the texture
self._bg_tex = app.resources.load_texture(self._background)
self._bg_texunit_uniform = self._bg_shader.uniform('texUnit')
# Set up geometry
self._bg_vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(self._bg_vao)
self._bg_vbo = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self._bg_vbo)
verts = numpy.array(
# X Y U V
[-1, -1, 0, 0,
-1, 1, 0, 1,
1, -1, 1, 0,
1, 1, 1, 1],
dtype=numpy.float32)
GL.glBufferData(GL.GL_ARRAY_BUFFER, verts.nbytes, verts,
GL.GL_STATIC_DRAW)
GL.glEnableVertexAttribArray(0)
GL.glEnableVertexAttribArray(1)
GL.glVertexAttribPointer(0, 2, GL.GL_FLOAT, GL.GL_FALSE, 16, None)
GL.glVertexAttribPointer(1, 2, GL.GL_FLOAT, GL.GL_FALSE, 16,
ctypes.c_void_p(8))
GL.glBindVertexArray(0)
def __del__(self):
# TODO: release VAO etc?
pass
def draw(self):
if self._background:
self._bg_shader.use()
self._bg_tex.bind()
GL.glUniform1i(self._bg_texunit_uniform, 0)
GL.glBindVertexArray(self._bg_vao)
GL.glDrawArrays(GL.GL_TRIANGLE_STRIP, 0, 4)
GL.glBindVertexArray(0)
GL.glUseProgram(0)
class Menu(object):
"""Menu class.
Manages a number of menu screens and coordinates moving between them.
"""
def __init__(self, start_screen):
"""Initialize a menu."""
self._menu_stack = []
self.reset(start_screen)
def draw(self):
"""Draw the menu."""
self._menu_stack[-1].draw()
def reset(self, screen):
"""Reset the menu.
This discards the current menu stack and starts again at the given
screen.
"""
self._menu_stack = [screen]
def navigate_forward(self, screen):
"""Move to a new screen.
The current screen is kept on the stack so we can go back to it.
"""
self._menu_stack.append(screen)
def navigate_back(self):
"""Move to the previous screen."""
self._menu_stack.pop()
| mit | 1,557,668,990,168,349,700 | 27.472727 | 78 | 0.523308 | false | 3.805589 | false | false | false |
phil0522/anote | anote-web/anoteweb/util/mjson.py | 1 | 3073 | """
Converts a model to and from json string.
"""
import google.appengine.ext.ndb as ndb
import json
import logging
from anoteweb.util.time_util import from_epoch, to_epoch
from datetime import datetime
def _json_to_model(model_class, json_obj):
"""json to model string."""
_result = {}
url_safe_key = None
for k, value in json_obj.iteritems():
if k == 'key':
url_safe_key = value
continue
prop = model_class._properties.get(k)
if prop is None:
print dir(model_class)
logging.fatal('can not decode %s, Property is not defined on %s.%s.', k,
model_class.__module__, model_class.__name__)
if isinstance(prop, ndb.model.ComputedProperty):
continue
if prop._repeated:
value = [_get_value_for_json_to_model(prop, val) for val in value]
else:
value = _get_value_for_json_to_model(prop, value)
_result[k] = value
print 'result=', repr(_result)
m = model_class(**_result)
if url_safe_key:
m.key = ndb.Key(urlsafe=url_safe_key)
return m
def _get_value_for_json_to_model(prop, v):
"""json to model."""
logging.info('_get_value_for_json_to_model: %s, vaue: %s',
repr(prop), repr(v))
if isinstance(prop, (ndb.DateTimeProperty, ndb.DateProperty,
ndb.TimeProperty)):
return from_epoch(v)
if isinstance(prop, ndb.KeyProperty):
return ndb.Key(urlsafe=v)
if isinstance(prop, (ndb.StructuredProperty, ndb.LocalStructuredProperty)):
return _json_to_model(prop._modelclass, v)
if isinstance(prop, (ndb.IntegerProperty, ndb.StringProperty,
ndb.TextProperty)):
return v
logging.fatal('unsupported property type: %s', prop)
def _remove_null_value_from_map(value):
if isinstance(value, ndb.Model):
kv_map = value.to_dict()
kv_map['key'] = value.key.urlsafe()
kv_map['key_id'] = value.key.id()
return _remove_null_value_from_map(kv_map)
if isinstance(value, list):
return [_remove_null_value_from_map(i) for i in value]
elif isinstance(value, datetime):
return to_epoch(value)
elif isinstance(value, str) or isinstance(value, int) or isinstance(
value, unicode):
return value
elif isinstance(value, dict):
result = {}
for k, v in value.iteritems():
logging.info('current key: %s', k)
if isinstance(v, (list, dict)) and not v:
continue
if v is None:
continue
result[k] = _remove_null_value_from_map(v)
return result
else:
logging.fatal('unknown type: %s %s', type(value), repr(value))
def json2model(model_class, json_str):
return _json_to_model(model_class, json.loads(json_str))
def model2json(model):
if isinstance(model, list):
logging.info('model is list %s', model)
non_empty_map = [_remove_null_value_from_map(m) for m in model]
return json.dumps(non_empty_map, ensure_ascii=False, sort_keys=True)
else:
non_empty_map = _remove_null_value_from_map(model)
# Keep it sorted to make test easilier.
return json.dumps(non_empty_map, ensure_ascii=False, sort_keys=True)
| mit | -5,546,565,122,223,178,000 | 28.548077 | 78 | 0.653759 | false | 3.286631 | false | false | false |
peo3/cgroup-utils | setup.py | 1 | 1691 | #!/usr/bin/python
from setuptools import setup, Extension
from cgutils.version import VERSION
mod_linux = Extension('linux', sources=['cgutils/linux.c'])
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: POSIX :: Linux',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Operating System Kernels :: Linux',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
]
long_description = open('README').read() + '\n' + open('Changelog').read()
setup(name='cgroup-utils',
version=VERSION,
description='Utility tools for control groups of Linux',
long_description=long_description,
scripts=['bin/cgutil'],
packages=['cgutils', 'cgutils.commands'],
ext_package='cgutils',
ext_modules=[mod_linux],
author='peo3',
author_email='[email protected]',
url='https://github.com/peo3/cgroup-utils',
license='GPLv2',
classifiers=classifiers,
install_requires=['argparse'],
tests_require=['nose', 'pep8'],
test_suite='nose.collector',
extras_require=dict(
test=[
'nose',
'pep8',
]
),)
| gpl-2.0 | -8,136,616,997,294,508,000 | 32.156863 | 74 | 0.613838 | false | 4.055156 | false | false | false |
mrakitin/sirepo | sirepo/sim_data/flash.py | 1 | 1110 | # -*- coding: utf-8 -*-
u"""simulation data operations
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import sirepo.sim_data
class SimData(sirepo.sim_data.SimDataBase):
@classmethod
def fixup_old_data(cls, data):
dm = data.models
cls._init_models(dm)
if dm.simulation.flashType == 'CapLaser':
dm.IO.update(
plot_var_5='magz',
plot_var_6='depo',
)
@classmethod
def _compute_job_fields(cls, data, r, compute_model):
return [r]
@classmethod
def _lib_file_basenames(cls, data):
t = data.models.simulation.flashType
#return ['flash.par', 'al-imx-004.cn4', 'h-imx-004.cn4']
#return ['flash.par', 'helm_table.dat']
if t == 'RTFlame':
return ['helm_table.dat']
if t == 'CapLaser':
return ['al-imx-004.cn4', 'h-imx-004.cn4']
raise AssertionError('invalid flashType: {}'.format(t))
| apache-2.0 | -3,410,153,178,655,896,600 | 29.833333 | 67 | 0.59009 | false | 3.284024 | false | false | false |
martinmoene/svn-churn | svn-churn.py | 1 | 5938 | #
# svn-churn.py - determine file churn and fix count for Subversion repository.
#
# Example: python svn-churn.py |sort -n -t , +2 | tail -n 50 |sort -r -n -t , +2
#
# Runs with Python 2.7, 3.3
#
# License: MIT, see accompanying LICENSE.txt
#
# ------------------------------------------------------------------------
# Configuration:
# Repository: working copy path, or URL
# cfg_reposes = ['https://svn.webkit.org/repository/webkit/trunk']
cfg_reposes = []
# Recognise as fix:
cfg_fixed_issues = (
'[Ii]ssue[s]? #',
'[Ff]ix',
'[Cc]orrect'
)
# Substitute partial path with replacement
cfg_edited_paths = (
# ( r'/trunk/Source/core/', '/trunk/Source/WebCore/' ),
# ( r'/trunk/Source/' , '' ),
)
# Subversion command:
cfg_svn = 'svn'
# ------------------------------------------------------------------------
import re, subprocess, sys
class Context:
def __init__( self, svn, fixed_issues, edited_paths ):
self.svn = svn
self.fixed_issues = fixed_issues
self.edited_paths = edited_paths
class Churn:
"""storage: { path : [ changed, fixed, [messages] ] }
"""
def __init__( self, context ):
self.context = context
self.storage = dict()
self.edits = self.create_edits( context.edited_paths )
def __call__( self, reposes, options ):
for repos in reposes:
self.parse_svn_log( self.svn_log( repos, options ) )
self.update_fixes()
self.print_results( reposes )
def svn_log( self, repos, options ):
command = [ self.context.svn, 'log', '-v' ] + options + [ repos ]
process = subprocess.Popen( command, stdout=subprocess.PIPE, universal_newlines=True )
out, err = process.communicate()
return out
def issue_pattern( self ):
result = ''
for p in self.context.fixed_issues:
result += p if 0==len(result) else '|' + p
return r'(' + result + ')'
def update_fixes( self ):
for k, v in self.storage.items():
pattern = re.compile( self.issue_pattern() )
for m in v[2]:
if pattern.search( m ):
v[1] += 1
def print_results( self, reposes ):
print( 'Churn,Fixes,Churn*Fixes,File {reposes}'.format( reposes=reposes) )
for k, v in self.storage.items():
print( "{chg},{fix},{prod},{path}".format( chg=v[0], fix=v[1], prod=v[0] * v[1], path=k ) )
def parse_svn_log( self, text ):
s_dash = 1
s_revision = 2
s_paths = 3
s_message = 4
state = s_dash
for line in text.split( '\n' ):
if state == s_dash:
state = s_revision
elif state == s_revision:
msg = ''
files = []
state = s_paths
elif state == s_paths:
if line.startswith( 'Changed paths:' ):
continue
elif line == '':
state = s_message
else:
files.append( line )
elif state == s_message:
if line.startswith( '-----' ):
for name in files:
self.store( name, msg )
state = s_revision
else:
if msg == '':
msg = line
else:
msg += '|' + line
def store( self, name, msg ):
name = self.edit_path( name )
if name in self.storage:
self.storage[ name ][0] += 1
self.storage[ name ][2].append( msg )
else:
self.storage[ name ] = [ 1, 0, [msg] ]
def edit_path( self, path ):
for (p,r) in self.edits:
path = p.sub( r, path )
return path
def create_edits( self, edited_paths ):
result = [ ( re.compile( r'\s+[ADMR] /' ), '/' ) ]
for (p,r) in edited_paths:
result.append( ( re.compile( p ), r ) )
return result
def usage():
print(
"""Usage: svn-churn [options] [repos...]
Options
-h, --help this help screen
-- end options section
Other options upto -- are passed on to the 'svn log' command.
svn-churn mines the log of the given Subversion repository
and presents the number of changes and fixes for each file.
Repos can be specified as a working copy path or a URL.
Examples
Use repositories configured in script:
./svn-churn.py
Use repositories configured in script and limit log used to latest 200 items:
./svn-churn.py --limit 200 --
Report 50 most changed and fixed files (sort on changes*fixes):
./svn-churn.py |sort -n -t , +2 | tail -n 50 |sort -r -n -t , +2
Note
Among a few other things, you can configure the SVN repository in the script.""" )
def split_arguments( arguments ):
options = []
inputs = []
opt_help = False
in_options = True
for arg in arguments:
if in_options:
if arg == '--' : in_options = False; continue
elif arg == '-h' or '--help' == arg: opt_help = True ; continue
else: options.append( arg ); continue
inputs.append( arg )
return ( opt_help, options, cfg_reposes if len(inputs) == 0 else inputs )
def help( opt_help, reposes ):
return opt_help or len( reposes ) == 0
def main( arguments ):
churn = Churn( Context( cfg_svn, cfg_fixed_issues, cfg_edited_paths ) )
( opt_help, svn_options, svn_reposes ) = split_arguments( arguments[1:] )
if help( opt_help, svn_reposes ):
return usage()
churn( svn_reposes, svn_options )
if __name__ == "__main__":
try:
main( sys.argv )
except Exception as e:
output = e # transform representation to message
print( "Error: {e}".format( e=output ) )
#
# end of file
#
| mit | 4,805,279,237,556,647,000 | 29.295918 | 103 | 0.518862 | false | 3.74401 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.